aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/ISSUE_TEMPLATE/feature_request.md2
-rw-r--r--.github/workflows/ci.yml9
-rw-r--r--.github/workflows/nightly.yml13
-rw-r--r--LICENSE2
-rw-r--r--Makefile24
-rw-r--r--README.md2
-rw-r--r--bin/README.md7
-rw-r--r--build.bat8
-rw-r--r--core/compress/common.odin196
-rw-r--r--core/compress/gzip/example.odin70
-rw-r--r--core/compress/gzip/gzip.odin313
-rw-r--r--core/compress/zlib/example.odin42
-rw-r--r--core/compress/zlib/zlib.odin606
-rw-r--r--core/fmt/fmt.odin17
-rw-r--r--core/image/common.odin204
-rw-r--r--core/image/png/example.odin327
-rw-r--r--core/image/png/helpers.odin516
-rw-r--r--core/image/png/png.odin1657
-rw-r--r--core/intrinsics/intrinsics.odin82
-rw-r--r--core/math/rand/rand.odin4
-rw-r--r--core/mem/alloc.odin2
-rw-r--r--core/mem/mem.odin1
-rw-r--r--core/odin/ast/ast.odin2
-rw-r--r--core/odin/parser/parse_files.odin2
-rw-r--r--core/odin/parser/parser.odin76
-rw-r--r--core/odin/tokenizer/token.odin2
-rw-r--r--core/odin/tokenizer/tokenizer.odin9
-rw-r--r--core/os/os2/errors.odin61
-rw-r--r--core/os/os2/file_stream.odin23
-rw-r--r--core/os/os2/file_util.odin1
-rw-r--r--core/os/os2/file_windows.odin20
-rw-r--r--core/os/os2/pipe_windows.odin2
-rw-r--r--core/os/os2/stat_windows.odin6
-rw-r--r--core/os/os2/temp_file_windows.odin4
-rw-r--r--core/os/os_freebsd.odin2
-rw-r--r--core/os/os_linux.odin6
-rw-r--r--core/runtime/core.odin4
-rw-r--r--core/runtime/internal.odin41
-rw-r--r--core/runtime/udivmod128.odin2
-rw-r--r--core/strings/builder.odin2
-rw-r--r--core/sync/sync2/atomic.odin84
-rw-r--r--core/sync/sync2/channel.odin886
-rw-r--r--core/sync/sync2/channel_unix.odin17
-rw-r--r--core/sync/sync2/channel_windows.odin34
-rw-r--r--core/sync/sync2/extended.odin54
-rw-r--r--core/sync/sync2/primitives.odin116
-rw-r--r--core/sync/sync2/primitives_atomic.odin79
-rw-r--r--core/sync/sync2/primitives_pthreads.odin81
-rw-r--r--core/sync/sync2/primitives_windows.odin82
-rw-r--r--core/testing/runner.odin8
-rw-r--r--core/testing/runner_other.odin8
-rw-r--r--core/testing/runner_windows.odin191
-rw-r--r--core/testing/testing.odin16
-rw-r--r--core/time/time.odin9
-rw-r--r--core/unicode/tables.odin10
-rw-r--r--examples/demo/demo.odin36
-rw-r--r--examples/demo_insert_semicolon/demo.odin10
-rw-r--r--src/build_settings.cpp70
-rw-r--r--src/check_builtin.cpp135
-rw-r--r--src/check_decl.cpp34
-rw-r--r--src/check_expr.cpp548
-rw-r--r--src/check_stmt.cpp125
-rw-r--r--src/check_type.cpp104
-rw-r--r--src/checker.cpp324
-rw-r--r--src/checker.hpp2
-rw-r--r--src/checker_builtin_procs.hpp14
-rw-r--r--src/docs_writer.cpp3
-rw-r--r--src/entity.cpp2
-rw-r--r--src/llvm_abi.cpp203
-rw-r--r--src/llvm_backend.cpp2487
-rw-r--r--src/llvm_backend.hpp26
-rw-r--r--src/llvm_backend_opt.cpp131
-rw-r--r--src/main.cpp213
-rw-r--r--src/parser.cpp334
-rw-r--r--src/parser.hpp49
-rw-r--r--src/parser_pos.cpp331
-rw-r--r--src/thread_pool.cpp3
-rw-r--r--src/tokenizer.cpp221
-rw-r--r--src/types.cpp66
79 files changed, 8065 insertions, 3450 deletions
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
index df81be2be..33d7e7a71 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -7,6 +7,8 @@ assignees: ''
---
+# PLEASE POST THIS IN THE DISCUSSION TAB UNDER "PROPOSALS" OR "IDEAS/REQUESTS"
+
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index f9abf219f..faf5ecd29 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -23,7 +23,7 @@ jobs:
- name: Download LLVM and setup PATH
run: |
brew install llvm@11
- echo "/usr/local/opt/llvm/bin" >> $GITHUB_PATH
+ echo "/usr/local/opt/llvm@11/bin" >> $GITHUB_PATH
TMP_PATH=$(xcrun --show-sdk-path)/user/include
echo "CPATH=$TMP_PATH" >> $GITHUB_ENV
- name: build odin
@@ -38,13 +38,6 @@ jobs:
runs-on: windows-latest
steps:
- uses: actions/checkout@v1
- - name: Download and unpack LLVM bins
- shell: powershell
- run: |
- cd bin
- $ProgressPreference = "SilentlyContinue";
- Invoke-WebRequest -Uri https://github.com/odin-lang/Odin/releases/download/llvm-windows/llvm-binaries.zip -OutFile llvm-binaries.zip
- 7z x llvm-binaries.zip > $null
- name: build Odin
shell: cmd
run: |
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index 7c4b42b1e..3d58a8fd8 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -10,15 +10,6 @@ jobs:
runs-on: windows-latest
steps:
- uses: actions/checkout@v1
- - name: Install cURL
- run: choco install curl
- - name: Download and unpack LLVM bins
- shell: cmd
- run: |
- cd bin
- curl -sL https://github.com/odin-lang/Odin/releases/download/llvm-windows/llvm-binaries.zip --output llvm-binaries.zip
- 7z x llvm-binaries.zip > nul
- rm -f llvm-binaries.zip
- name: build Odin
shell: cmd
run: |
@@ -72,8 +63,8 @@ jobs:
- uses: actions/checkout@v1
- name: Download LLVM and setup PATH
run: |
- brew install llvm
- echo "/usr/local/opt/llvm/bin" >> $GITHUB_PATH
+ brew install llvm@11
+ echo "/usr/local/opt/llvm@11/bin" >> $GITHUB_PATH
TMP_PATH=$(xcrun --show-sdk-path)/user/include
echo "CPATH=$TMP_PATH" >> $GITHUB_ENV
- name: build odin
diff --git a/LICENSE b/LICENSE
index e9e75e569..8ee9b17d6 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2016-2020 Ginger Bill. All rights reserved.
+Copyright (c) 2016-2021 Ginger Bill. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
diff --git a/Makefile b/Makefile
index 0425d17e0..5cdf1e467 100644
--- a/Makefile
+++ b/Makefile
@@ -8,13 +8,31 @@ CC=clang
OS=$(shell uname)
ifeq ($(OS), Darwin)
+ LLVM_CONFIG=llvm-config
+ ifneq ($(shell llvm-config --version | grep '^11\.'),)
+ LLVM_CONFIG=llvm-config
+ else
+ $(error "Requirement: llvm-config must be version 11")
+ endif
+
LDFLAGS:=$(LDFLAGS) -liconv
- CFLAGS:=$(CFLAGS) $(shell llvm-config --cxxflags --ldflags)
+ CFLAGS:=$(CFLAGS) $(shell $(LLVM_CONFIG) --cxxflags --ldflags)
LDFLAGS:=$(LDFLAGS) -lLLVM-C
endif
ifeq ($(OS), Linux)
- CFLAGS:=$(CFLAGS) $(shell llvm-config-11 --cxxflags --ldflags)
- LDFLAGS:=$(LDFLAGS) $(shell llvm-config-11 --libs core native --system-libs)
+ LLVM_CONFIG=llvm-config-11
+ ifneq ($(shell which llvm-config-11 2>/dev/null),)
+ LLVM_CONFIG=llvm-config-11
+ else
+ ifneq ($(shell llvm-config --version | grep '^11\.'),)
+ LLVM_CONFIG=llvm-config
+ else
+ $(error "Requirement: llvm-config must be version 11")
+ endif
+ endif
+
+ CFLAGS:=$(CFLAGS) $(shell $(LLVM_CONFIG) --cxxflags --ldflags)
+ LDFLAGS:=$(LDFLAGS) $(shell $(LLVM_CONFIG) --libs core native --system-libs)
endif
all: debug demo
diff --git a/README.md b/README.md
index 23a969271..a555f1dc8 100644
--- a/README.md
+++ b/README.md
@@ -100,7 +100,6 @@ In addition, the following platform-specific steps are necessary:
- Windows
* Have Visual Studio installed (MSVC 2010 or later, for the linker)
- * Have a copy of `opt.exe` and `llc.exe` in `Odin/bin`. Pre-built Windows binaries can be found [here](https://github.com/odin-lang/Odin/releases/tag/llvm-windows) and *must* be explicitly copied
* Open a valid command prompt:
* **Basic:** run the `x64 Native Tools Command Prompt for VS2017` shortcut bundled with VS 2017, or
* **Advanced:** run `vcvarsall.bat x64` from a blank `cmd` session
@@ -128,7 +127,6 @@ Please read the [Getting Started Guide](https://github.com/odin-lang/Odin/wiki#g
- Windows
* x86-64/amd64
* MSVC 2010 installed (C++11 support)
- * [LLVM binaries](https://github.com/odin-lang/Odin/releases/tag/llvm-windows) for `opt.exe`, `llc.exe`, and `lld-link.exe`
* Requires MSVC's link.exe as the linker
* run `vcvarsall.bat` to setup the path
diff --git a/bin/README.md b/bin/README.md
index 24a19a3c4..9e35adc23 100644
--- a/bin/README.md
+++ b/bin/README.md
@@ -2,13 +2,12 @@
## Setup
-Odin only supports x86-64 at the moment (64-bit), relies on LLVM for code generation and an external linker.
+Odin currently supports x86-64 and ARM64 at the moment (64-bit), relies on LLVM for code generation and an external linker.
In addition, the following platform-specific steps are necessary:
- Windows
* Have Visual Studio installed (MSVC 2010 or later, for the linker)
- * Have a copy of `opt.exe` and `llc.exe` in `Odin/bin`. Pre-built Windows binaries can be found [here](https://github.com/odin-lang/Odin/releases/tag/llvm-windows) and *must* be explicitly copied
* Open a valid command prompt:
* **Basic:** run the `x64 Native Tools Command Prompt for VS2017` shortcut bundled with VS 2017, or
* **Advanced:** run `vcvarsall.bat x64` from a blank `cmd` session
@@ -19,12 +18,12 @@ In addition, the following platform-specific steps are necessary:
* Make sure the LLVM binaries and the linker are added to your `$PATH` environmental variable
- GNU/Linux
- * Have LLVM installed (opt/llc)
* Have Clang installed (version X.X or later, for linking)
* Make sure the LLVM binaries and the linker are added to your `$PATH` environmental variable
Then build the compiler by calling `build.bat` (Windows) or `make` (Linux/MacOS). This will automatically run the demo program if successful.
-**Notes for Linux:**: The compiler currently relies on the `core` and `shared` library collection being relative to the compiler executable. Installing the compiler in the usual sense (to `/usr/local/bin` or similar) is therefore not as straight forward as you need to make sure the mentioned libraries are available. As a result, it is recommended to simply explicitly invoke the compiler with `/path/to/odin` in your preferred build system, or add `/path/to/odin` to `$PATH`.
+**Notes for \*Nix Systems:**: The compiler currently relies on the `core` and `shared` library collection being relative to the compiler executable, by default. Installing the compiler in the usual sense (to `/usr/local/bin` or similar) is therefore not as straight forward as you need to make sure the mentioned libraries are available. As a result, it is recommended to either simply explicitly invoke the compiler with `/path/to/odin` in your preferred build system, or `set ODIN_ROOT=/path/to/odin_root`.
+
Please read the [Getting Started Guide](https://github.com/odin-lang/Odin/wiki#getting-started-with-odin) for more information.
diff --git a/build.bat b/build.bat
index c6a634aeb..038f02866 100644
--- a/build.bat
+++ b/build.bat
@@ -2,8 +2,12 @@
setlocal EnableDelayedExpansion
-set curr_year=%DATE:~-4%
-set curr_month=%DATE:~3,2%
+for /f "usebackq tokens=1,2 delims=,=- " %%i in (`wmic os get LocalDateTime /value`) do @if %%i==LocalDateTime (
+ set CURR_DATE_TIME=%%j
+)
+
+set curr_year=%CURR_DATE_TIME:~0,4%
+set curr_month=%CURR_DATE_TIME:~4,2%
:: Make sure this is a decent name and not generic
set exe_name=odin.exe
diff --git a/core/compress/common.odin b/core/compress/common.odin
new file mode 100644
index 000000000..a0e092643
--- /dev/null
+++ b/core/compress/common.odin
@@ -0,0 +1,196 @@
+package compress
+
+import "core:io"
+import "core:image"
+
+Error :: union {
+ General_Error,
+ Deflate_Error,
+ ZLIB_Error,
+ GZIP_Error,
+ ZIP_Error,
+ /*
+ This is here because png.load will return a this type of error union,
+ as it may involve an I/O error, a Deflate error, etc.
+ */
+ image.Error,
+}
+
+General_Error :: enum {
+ File_Not_Found,
+ Cannot_Open_File,
+ File_Too_Short,
+ Stream_Too_Short,
+ Output_Too_Short,
+ Unknown_Compression_Method,
+ Checksum_Failed,
+ Incompatible_Options,
+ Unimplemented,
+}
+
+GZIP_Error :: enum {
+ Invalid_GZIP_Signature,
+ Reserved_Flag_Set,
+ Invalid_Extra_Data,
+ Original_Name_Too_Long,
+ Comment_Too_Long,
+ Payload_Length_Invalid,
+ Payload_CRC_Invalid,
+}
+
+ZIP_Error :: enum {
+ Invalid_ZIP_File_Signature,
+ Unexpected_Signature,
+ Insert_Next_Disk,
+ Expected_End_of_Central_Directory_Record,
+}
+
+ZLIB_Error :: enum {
+ Unsupported_Window_Size,
+ FDICT_Unsupported,
+ Unsupported_Compression_Level,
+ Code_Buffer_Malformed,
+}
+
+Deflate_Error :: enum {
+ Huffman_Bad_Sizes,
+ Huffman_Bad_Code_Lengths,
+ Inflate_Error,
+ Bad_Distance,
+ Bad_Huffman_Code,
+ Len_Nlen_Mismatch,
+ BType_3,
+}
+
+// General context for ZLIB, LZW, etc.
+Context :: struct {
+ code_buffer: u32,
+ num_bits: i8,
+ /*
+ num_bits will be set to -100 if the buffer is malformed
+ */
+ eof: b8,
+
+ input: io.Stream,
+ output: io.Stream,
+ bytes_written: i64,
+ // Used to update hash as we write instead of all at once
+ rolling_hash: u32,
+
+ // Sliding window buffer. Size must be a power of two.
+ window_size: i64,
+ last: ^[dynamic]byte,
+}
+
+// Stream helpers
+/*
+ TODO: These need to be optimized.
+
+ Streams should really only check if a certain method is available once, perhaps even during setup.
+
+ Bit and byte readers may be merged so that reading bytes will grab them from the bit buffer first.
+ This simplifies end-of-stream handling where bits may be left in the bit buffer.
+*/
+
+read_data :: #force_inline proc(c: ^Context, $T: typeid) -> (res: T, err: io.Error) {
+ b := make([]u8, size_of(T), context.temp_allocator);
+ r, e1 := io.to_reader(c.input);
+ _, e2 := io.read(r, b);
+ if !e1 || e2 != .None {
+ return T{}, e2;
+ }
+
+ res = (^T)(raw_data(b))^;
+ return res, .None;
+}
+
+read_u8 :: #force_inline proc(z: ^Context) -> (res: u8, err: io.Error) {
+ return read_data(z, u8);
+}
+
+peek_data :: #force_inline proc(c: ^Context, $T: typeid) -> (res: T, err: io.Error) {
+ // Get current position to read from.
+ curr, e1 := c.input->impl_seek(0, .Current);
+ if e1 != .None {
+ return T{}, e1;
+ }
+ r, e2 := io.to_reader_at(c.input);
+ if !e2 {
+ return T{}, .Empty;
+ }
+ b := make([]u8, size_of(T), context.temp_allocator);
+ _, e3 := io.read_at(r, b, curr);
+ if e3 != .None {
+ return T{}, .Empty;
+ }
+
+ res = (^T)(raw_data(b))^;
+ return res, .None;
+}
+
+// Sliding window read back
+peek_back_byte :: proc(c: ^Context, offset: i64) -> (res: u8, err: io.Error) {
+ // Look back into the sliding window.
+ return c.last[offset % c.window_size], .None;
+}
+
+// Generalized bit reader LSB
+refill_lsb :: proc(z: ^Context, width := i8(24)) {
+ for {
+ if z.num_bits > width {
+ break;
+ }
+ if z.code_buffer == 0 && z.num_bits == -1 {
+ z.num_bits = 0;
+ }
+ if z.code_buffer >= 1 << uint(z.num_bits) {
+ // Code buffer is malformed.
+ z.num_bits = -100;
+ return;
+ }
+ c, err := read_u8(z);
+ if err != .None {
+ // This is fine at the end of the file.
+ z.num_bits = -42;
+ z.eof = true;
+ return;
+ }
+ z.code_buffer |= (u32(c) << u8(z.num_bits));
+ z.num_bits += 8;
+ }
+}
+
+consume_bits_lsb :: #force_inline proc(z: ^Context, width: u8) {
+ z.code_buffer >>= width;
+ z.num_bits -= i8(width);
+}
+
+peek_bits_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
+ if z.num_bits < i8(width) {
+ refill_lsb(z);
+ }
+ // assert(z.num_bits >= i8(width));
+ return z.code_buffer & ~(~u32(0) << width);
+}
+
+peek_bits_no_refill_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
+ assert(z.num_bits >= i8(width));
+ return z.code_buffer & ~(~u32(0) << width);
+}
+
+read_bits_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
+ k := peek_bits_lsb(z, width);
+ consume_bits_lsb(z, width);
+ return k;
+}
+
+read_bits_no_refill_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
+ k := peek_bits_no_refill_lsb(z, width);
+ consume_bits_lsb(z, width);
+ return k;
+}
+
+discard_to_next_byte_lsb :: proc(z: ^Context) {
+ discard := u8(z.num_bits & 7);
+ consume_bits_lsb(z, discard);
+}
diff --git a/core/compress/gzip/example.odin b/core/compress/gzip/example.odin
new file mode 100644
index 000000000..54576c380
--- /dev/null
+++ b/core/compress/gzip/example.odin
@@ -0,0 +1,70 @@
+//+ignore
+package gzip
+
+import "core:compress/gzip"
+import "core:bytes"
+import "core:os"
+
+// Small GZIP file with fextra, fname and fcomment present.
+@private
+TEST: []u8 = {
+ 0x1f, 0x8b, 0x08, 0x1c, 0xcb, 0x3b, 0x3a, 0x5a,
+ 0x02, 0x03, 0x07, 0x00, 0x61, 0x62, 0x03, 0x00,
+ 0x63, 0x64, 0x65, 0x66, 0x69, 0x6c, 0x65, 0x6e,
+ 0x61, 0x6d, 0x65, 0x00, 0x54, 0x68, 0x69, 0x73,
+ 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x2b, 0x48,
+ 0xac, 0xcc, 0xc9, 0x4f, 0x4c, 0x01, 0x00, 0x15,
+ 0x6a, 0x2c, 0x42, 0x07, 0x00, 0x00, 0x00,
+};
+
+main :: proc() {
+ // Set up output buffer.
+ buf: bytes.Buffer;
+ defer bytes.buffer_destroy(&buf);
+
+ stdout :: proc(s: string) {
+ os.write_string(os.stdout, s);
+ }
+ stderr :: proc(s: string) {
+ os.write_string(os.stderr, s);
+ }
+
+ args := os.args;
+
+ if len(args) < 2 {
+ stderr("No input file specified.\n");
+ err := gzip.load(TEST, &buf);
+ if err != nil {
+ stdout("Displaying test vector: ");
+ stdout(bytes.buffer_to_string(&buf));
+ stdout("\n");
+ }
+ }
+
+ // The rest are all files.
+ args = args[1:];
+ err: gzip.Error;
+
+ for file in args {
+ if file == "-" {
+ // Read from stdin
+ s := os.stream_from_handle(os.stdin);
+ err = gzip.load(s, &buf);
+ } else {
+ err = gzip.load(file, &buf);
+ }
+ if err != nil {
+ if err != E_General.File_Not_Found {
+ stderr("File not found: ");
+ stderr(file);
+ stderr("\n");
+ os.exit(1);
+ }
+ stderr("GZIP returned an error.\n");
+ os.exit(2);
+ }
+ stdout(bytes.buffer_to_string(&buf));
+ }
+ os.exit(0);
+}
diff --git a/core/compress/gzip/gzip.odin b/core/compress/gzip/gzip.odin
new file mode 100644
index 000000000..2b5e513c7
--- /dev/null
+++ b/core/compress/gzip/gzip.odin
@@ -0,0 +1,313 @@
+package gzip
+
+import "core:compress/zlib"
+import "core:compress"
+import "core:os"
+import "core:io"
+import "core:bytes"
+import "core:hash"
+
+/*
+
+ This package implements support for the GZIP file format v4.3,
+ as specified in RFC 1952.
+
+ It is implemented in such a way that it lends itself naturally
+ to be the input to a complementary TAR implementation.
+
+*/
+
+Magic :: enum u16le {
+ GZIP = 0x8b << 8 | 0x1f,
+}
+
+Header :: struct #packed {
+ magic: Magic,
+ compression_method: Compression,
+ flags: Header_Flags,
+ modification_time: u32le,
+ xfl: Compression_Flags,
+ os: OS,
+}
+#assert(size_of(Header) == 10);
+
+Header_Flag :: enum u8 {
+ // Order is important
+ text = 0,
+ header_crc = 1,
+ extra = 2,
+ name = 3,
+ comment = 4,
+ reserved_1 = 5,
+ reserved_2 = 6,
+ reserved_3 = 7,
+}
+Header_Flags :: distinct bit_set[Header_Flag; u8];
+
+OS :: enum u8 {
+ FAT = 0,
+ Amiga = 1,
+ VMS = 2,
+ Unix = 3,
+ VM_CMS = 4,
+ Atari_TOS = 5,
+ HPFS = 6,
+ Macintosh = 7,
+ Z_System = 8,
+ CP_M = 9,
+ TOPS_20 = 10,
+ NTFS = 11,
+ QDOS = 12,
+ Acorn_RISCOS = 13,
+ _Unknown = 14,
+ Unknown = 255,
+}
+OS_Name :: #partial [OS]string{
+ .FAT = "FAT",
+ .Amiga = "Amiga",
+ .VMS = "VMS/OpenVMS",
+ .Unix = "Unix",
+ .VM_CMS = "VM/CMS",
+ .Atari_TOS = "Atari TOS",
+ .HPFS = "HPFS",
+ .Macintosh = "Macintosh",
+ .Z_System = "Z-System",
+ .CP_M = "CP/M",
+ .TOPS_20 = "TOPS-20",
+ .NTFS = "NTFS",
+ .QDOS = "QDOS",
+ .Acorn_RISCOS = "Acorn RISCOS",
+ .Unknown = "Unknown",
+};
+
+Compression :: enum u8 {
+ DEFLATE = 8,
+}
+
+Compression_Flags :: enum u8 {
+ Maximum_Compression = 2,
+ Fastest_Compression = 4,
+}
+
+Error :: compress.Error;
+E_General :: compress.General_Error;
+E_GZIP :: compress.GZIP_Error;
+E_ZLIB :: compress.ZLIB_Error;
+E_Deflate :: compress.Deflate_Error;
+
+load_from_slice :: proc(slice: []u8, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
+
+ r := bytes.Reader{};
+ bytes.reader_init(&r, slice);
+ stream := bytes.reader_to_stream(&r);
+
+ err = load_from_stream(stream, buf, allocator);
+
+ return err;
+}
+
+load_from_file :: proc(filename: string, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
+ data, ok := os.read_entire_file(filename, allocator);
+ defer delete(data);
+
+ err = E_General.File_Not_Found;
+ if ok {
+ err = load_from_slice(data, buf, allocator);
+ }
+ return;
+}
+
+load_from_stream :: proc(stream: io.Stream, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
+ ctx := compress.Context{
+ input = stream,
+ };
+ buf := buf;
+ ws := bytes.buffer_to_stream(buf);
+ ctx.output = ws;
+
+ header, e := compress.read_data(&ctx, Header);
+ if e != .None {
+ return E_General.File_Too_Short;
+ }
+
+ if header.magic != .GZIP {
+ return E_GZIP.Invalid_GZIP_Signature;
+ }
+ if header.compression_method != .DEFLATE {
+ return E_General.Unknown_Compression_Method;
+ }
+
+ if header.os >= ._Unknown {
+ header.os = .Unknown;
+ }
+
+ if .reserved_1 in header.flags || .reserved_2 in header.flags || .reserved_3 in header.flags {
+ return E_GZIP.Reserved_Flag_Set;
+ }
+
+ // printf("signature: %v\n", header.magic);
+ // printf("compression: %v\n", header.compression_method);
+ // printf("flags: %v\n", header.flags);
+ // printf("modification time: %v\n", time.unix(i64(header.modification_time), 0));
+ // printf("xfl: %v (%v)\n", header.xfl, int(header.xfl));
+ // printf("os: %v\n", OS_Name[header.os]);
+
+ if .extra in header.flags {
+ xlen, e_extra := compress.read_data(&ctx, u16le);
+ if e_extra != .None {
+ return E_General.Stream_Too_Short;
+ }
+ // printf("Extra data present (%v bytes)\n", xlen);
+ if xlen < 4 {
+ // Minimum length is 2 for ID + 2 for a field length, if set to zero.
+ return E_GZIP.Invalid_Extra_Data;
+ }
+
+ field_id: [2]u8;
+ field_length: u16le;
+ field_error: io.Error;
+
+ for xlen >= 4 {
+ // println("Parsing Extra field(s).");
+ field_id, field_error = compress.read_data(&ctx, [2]u8);
+ if field_error != .None {
+ // printf("Parsing Extra returned: %v\n", field_error);
+ return E_General.Stream_Too_Short;
+ }
+ xlen -= 2;
+
+ field_length, field_error = compress.read_data(&ctx, u16le);
+ if field_error != .None {
+ // printf("Parsing Extra returned: %v\n", field_error);
+ return E_General.Stream_Too_Short;
+ }
+ xlen -= 2;
+
+ if xlen <= 0 {
+ // We're not going to try and recover by scanning for a ZLIB header.
+ // Who knows what else is wrong with this file.
+ return E_GZIP.Invalid_Extra_Data;
+ }
+
+ // printf(" Field \"%v\" of length %v found: ", string(field_id[:]), field_length);
+ if field_length > 0 {
+ field_data := make([]u8, field_length, context.temp_allocator);
+ _, field_error = ctx.input->impl_read(field_data);
+ if field_error != .None {
+ // printf("Parsing Extra returned: %v\n", field_error);
+ return E_General.Stream_Too_Short;
+ }
+ xlen -= field_length;
+
+ // printf("%v\n", string(field_data));
+ }
+
+ if xlen != 0 {
+ return E_GZIP.Invalid_Extra_Data;
+ }
+ }
+ }
+
+ if .name in header.flags {
+ // Should be enough.
+ name: [1024]u8;
+ b: [1]u8;
+ i := 0;
+ name_error: io.Error;
+
+ for i < len(name) {
+ _, name_error = ctx.input->impl_read(b[:]);
+ if name_error != .None {
+ return E_General.Stream_Too_Short;
+ }
+ if b == 0 {
+ break;
+ }
+ name[i] = b[0];
+ i += 1;
+ if i >= len(name) {
+ return E_GZIP.Original_Name_Too_Long;
+ }
+ }
+ // printf("Original filename: %v\n", string(name[:i]));
+ }
+
+ if .comment in header.flags {
+ // Should be enough.
+ comment: [1024]u8;
+ b: [1]u8;
+ i := 0;
+ comment_error: io.Error;
+
+ for i < len(comment) {
+ _, comment_error = ctx.input->impl_read(b[:]);
+ if comment_error != .None {
+ return E_General.Stream_Too_Short;
+ }
+ if b == 0 {
+ break;
+ }
+ comment[i] = b[0];
+ i += 1;
+ if i >= len(comment) {
+ return E_GZIP.Comment_Too_Long;
+ }
+ }
+ // printf("Comment: %v\n", string(comment[:i]));
+ }
+
+ if .header_crc in header.flags {
+ crc16: [2]u8;
+ crc_error: io.Error;
+ _, crc_error = ctx.input->impl_read(crc16[:]);
+ if crc_error != .None {
+ return E_General.Stream_Too_Short;
+ }
+ /*
+ We don't actually check the CRC16 (lower 2 bytes of CRC32 of header data until the CRC field).
+ If we find a gzip file in the wild that sets this field, we can add proper support for it.
+ */
+ }
+
+ /*
+ We should have arrived at the ZLIB payload.
+ */
+
+ zlib_error := zlib.inflate_raw(&ctx);
+
+ // fmt.printf("ZLIB returned: %v\n", zlib_error);
+
+ if zlib_error != nil {
+ return zlib_error;
+ }
+
+ /*
+ Read CRC32 using the ctx bit reader because zlib may leave bytes in there.
+ */
+ compress.discard_to_next_byte_lsb(&ctx);
+
+ payload_crc_b: [4]u8;
+ payload_len_b: [4]u8;
+ for i in 0..3 {
+ payload_crc_b[i] = u8(compress.read_bits_lsb(&ctx, 8));
+ }
+ payload_crc := transmute(u32le)payload_crc_b;
+ for i in 0..3 {
+ payload_len_b[i] = u8(compress.read_bits_lsb(&ctx, 8));
+ }
+ payload_len := int(transmute(u32le)payload_len_b);
+
+ payload := bytes.buffer_to_bytes(buf);
+ crc32 := u32le(hash.crc32(payload));
+
+ if crc32 != payload_crc {
+ return E_GZIP.Payload_CRC_Invalid;
+ }
+
+ if len(payload) != payload_len {
+ return E_GZIP.Payload_Length_Invalid;
+ }
+ return nil;
+}
+
+load :: proc{load_from_file, load_from_slice, load_from_stream};
diff --git a/core/compress/zlib/example.odin b/core/compress/zlib/example.odin
new file mode 100644
index 000000000..9af61e4b3
--- /dev/null
+++ b/core/compress/zlib/example.odin
@@ -0,0 +1,42 @@
+//+ignore
+package zlib
+
+import "core:compress/zlib"
+import "core:bytes"
+import "core:fmt"
+
+main :: proc() {
+
+ ODIN_DEMO := []u8{
+ 120, 156, 101, 144, 77, 110, 131, 48, 16, 133, 215, 204, 41, 158, 44,
+ 69, 73, 32, 148, 182, 75, 35, 14, 208, 125, 47, 96, 185, 195, 143,
+ 130, 13, 50, 38, 81, 84, 101, 213, 75, 116, 215, 43, 246, 8, 53,
+ 82, 126, 8, 181, 188, 152, 153, 111, 222, 147, 159, 123, 165, 247, 170,
+ 98, 24, 213, 88, 162, 198, 244, 157, 243, 16, 186, 115, 44, 75, 227,
+ 5, 77, 115, 72, 137, 222, 117, 122, 179, 197, 39, 69, 161, 170, 156,
+ 50, 144, 5, 68, 130, 4, 49, 126, 127, 190, 191, 144, 34, 19, 57,
+ 69, 74, 235, 209, 140, 173, 242, 157, 155, 54, 158, 115, 162, 168, 12,
+ 181, 239, 246, 108, 17, 188, 174, 242, 224, 20, 13, 199, 198, 235, 250,
+ 194, 166, 129, 86, 3, 99, 157, 172, 37, 230, 62, 73, 129, 151, 252,
+ 70, 211, 5, 77, 31, 104, 188, 160, 113, 129, 215, 59, 205, 22, 52,
+ 123, 160, 83, 142, 255, 242, 89, 123, 93, 149, 200, 50, 188, 85, 54,
+ 252, 18, 248, 192, 238, 228, 235, 198, 86, 224, 118, 224, 176, 113, 166,
+ 112, 67, 106, 227, 159, 122, 215, 88, 95, 110, 196, 123, 205, 183, 224,
+ 98, 53, 8, 104, 213, 234, 201, 147, 7, 248, 192, 14, 170, 29, 25,
+ 171, 15, 18, 59, 138, 112, 63, 23, 205, 110, 254, 136, 109, 78, 231,
+ 63, 234, 138, 133, 204,
+ };
+
+ buf: bytes.Buffer;
+
+ // We can pass ", true" to inflate a raw DEFLATE stream instead of a ZLIB wrapped one.
+ err := zlib.inflate(ODIN_DEMO, &buf);
+ defer bytes.buffer_destroy(&buf);
+
+ if err != nil {
+ fmt.printf("\nError: %v\n", err);
+ }
+ s := bytes.buffer_to_string(&buf);
+ fmt.printf("Input: %v bytes, output (%v bytes):\n%v\n", len(ODIN_DEMO), len(s), s);
+ assert(len(s) == 438);
+}
diff --git a/core/compress/zlib/zlib.odin b/core/compress/zlib/zlib.odin
new file mode 100644
index 000000000..bc19c37ef
--- /dev/null
+++ b/core/compress/zlib/zlib.odin
@@ -0,0 +1,606 @@
+package zlib
+
+import "core:compress"
+
+import "core:mem"
+import "core:io"
+import "core:bytes"
+import "core:hash"
+/*
+ zlib.inflate decompresses a ZLIB stream passed in as a []u8 or io.Stream.
+ Returns: Error.
+*/
+
+Context :: compress.Context;
+
+Compression_Method :: enum u8 {
+ DEFLATE = 8,
+ Reserved = 15,
+}
+
+Compression_Level :: enum u8 {
+ Fastest = 0,
+ Fast = 1,
+ Default = 2,
+ Maximum = 3,
+}
+
+Options :: struct {
+ window_size: u16,
+ level: u8,
+}
+
+Error :: compress.Error;
+E_General :: compress.General_Error;
+E_ZLIB :: compress.ZLIB_Error;
+E_Deflate :: compress.Deflate_Error;
+
+DEFLATE_MAX_CHUNK_SIZE :: 65535;
+DEFLATE_MAX_LITERAL_SIZE :: 65535;
+DEFLATE_MAX_DISTANCE :: 32768;
+DEFLATE_MAX_LENGTH :: 258;
+
+HUFFMAN_MAX_BITS :: 16;
+HUFFMAN_FAST_BITS :: 9;
+HUFFMAN_FAST_MASK :: ((1 << HUFFMAN_FAST_BITS) - 1);
+
+Z_LENGTH_BASE := [31]u16{
+ 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,
+ 67,83,99,115,131,163,195,227,258,0,0,
+};
+
+Z_LENGTH_EXTRA := [31]u8{
+ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0,
+};
+
+Z_DIST_BASE := [32]u16{
+ 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,
+ 257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0,
+};
+
+Z_DIST_EXTRA := [32]u8{
+ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13,0,0,
+};
+
+Z_LENGTH_DEZIGZAG := []u8{
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15,
+};
+
+Z_FIXED_LENGTH := [288]u8{
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,
+};
+
+Z_FIXED_DIST := [32]u8{
+ 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+};
+
+/*
+ Accelerate all cases in default tables.
+*/
+ZFAST_BITS :: 9;
+ZFAST_MASK :: ((1 << ZFAST_BITS) - 1);
+
+/*
+ ZLIB-style Huffman encoding.
+ JPEG packs from left, ZLIB from right. We can't share code.
+*/
+Huffman_Table :: struct {
+ fast: [1 << ZFAST_BITS]u16,
+ firstcode: [16]u16,
+ maxcode: [17]int,
+ firstsymbol: [16]u16,
+ size: [288]u8,
+ value: [288]u16,
+};
+
+// Implementation starts here
+
+z_bit_reverse :: #force_inline proc(n: u16, bits: u8) -> (r: u16) {
+ assert(bits <= 16);
+ // NOTE: Can optimize with llvm.bitreverse.i64 or some bit twiddling
+ // by reversing all of the bits and masking out the unneeded ones.
+ r = n;
+ r = ((r & 0xAAAA) >> 1) | ((r & 0x5555) << 1);
+ r = ((r & 0xCCCC) >> 2) | ((r & 0x3333) << 2);
+ r = ((r & 0xF0F0) >> 4) | ((r & 0x0F0F) << 4);
+ r = ((r & 0xFF00) >> 8) | ((r & 0x00FF) << 8);
+
+ r >>= (16 - bits);
+ return;
+}
+
+write_byte :: #force_inline proc(z: ^Context, c: u8) -> (err: io.Error) #no_bounds_check {
+ c := c;
+ buf := transmute([]u8)mem.Raw_Slice{data=&c, len=1};
+ z.rolling_hash = hash.adler32(buf, z.rolling_hash);
+
+ _, e := z.output->impl_write(buf);
+ if e != .None {
+ return e;
+ }
+ z.last[z.bytes_written % z.window_size] = c;
+
+ z.bytes_written += 1;
+ return .None;
+}
+
+allocate_huffman_table :: proc(allocator := context.allocator) -> (z: ^Huffman_Table, err: Error) {
+
+ z = new(Huffman_Table, allocator);
+ return z, nil;
+}
+
+build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
+ sizes: [HUFFMAN_MAX_BITS+1]int;
+ next_code: [HUFFMAN_MAX_BITS]int;
+
+ k := int(0);
+
+ mem.zero_slice(sizes[:]);
+ mem.zero_slice(z.fast[:]);
+
+ for v, _ in code_lengths {
+ sizes[v] += 1;
+ }
+ sizes[0] = 0;
+
+ for i in 1..16 {
+ if sizes[i] > (1 << uint(i)) {
+ return E_Deflate.Huffman_Bad_Sizes;
+ }
+ }
+ code := int(0);
+
+ for i in 1..<16 {
+ next_code[i] = code;
+ z.firstcode[i] = u16(code);
+ z.firstsymbol[i] = u16(k);
+ code = code + sizes[i];
+ if sizes[i] != 0 {
+ if (code - 1 >= (1 << u16(i))) {
+ return E_Deflate.Huffman_Bad_Code_Lengths;
+ }
+ }
+ z.maxcode[i] = code << (16 - uint(i));
+ code <<= 1;
+ k += int(sizes[i]);
+ }
+
+ z.maxcode[16] = 0x10000; // Sentinel
+ c: int;
+
+ for v, ci in code_lengths {
+ if v != 0 {
+ c = next_code[v] - int(z.firstcode[v]) + int(z.firstsymbol[v]);
+ fastv := u16((u16(v) << 9) | u16(ci));
+ z.size[c] = u8(v);
+ z.value[c] = u16(ci);
+ if (v <= ZFAST_BITS) {
+ j := z_bit_reverse(u16(next_code[v]), v);
+ for j < (1 << ZFAST_BITS) {
+ z.fast[j] = fastv;
+ j += (1 << v);
+ }
+ }
+ next_code[v] += 1;
+ }
+ }
+ return nil;
+}
+
+decode_huffman_slowpath :: proc(z: ^Context, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
+
+ r = 0;
+ err = nil;
+
+ k: int;
+ s: u8;
+
+ code := u16(compress.peek_bits_lsb(z, 16));
+
+ k = int(z_bit_reverse(code, 16));
+
+ #no_bounds_check for s = HUFFMAN_FAST_BITS+1; ; {
+ if k < t.maxcode[s] {
+ break;
+ }
+ s += 1;
+ }
+ if (s >= 16) {
+ return 0, E_Deflate.Bad_Huffman_Code;
+ }
+ // code size is s, so:
+ b := (k >> (16-s)) - int(t.firstcode[s]) + int(t.firstsymbol[s]);
+ if b >= size_of(t.size) {
+ return 0, E_Deflate.Bad_Huffman_Code;
+ }
+ if t.size[b] != s {
+ return 0, E_Deflate.Bad_Huffman_Code;
+ }
+
+ compress.consume_bits_lsb(z, s);
+
+ r = t.value[b];
+ return r, nil;
+}
+
+decode_huffman :: proc(z: ^Context, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
+
+ if z.num_bits < 16 {
+ if z.num_bits == -100 {
+ return 0, E_ZLIB.Code_Buffer_Malformed;
+ }
+ compress.refill_lsb(z);
+ if z.eof {
+ return 0, E_General.Stream_Too_Short;
+ }
+ }
+ #no_bounds_check b := t.fast[z.code_buffer & ZFAST_MASK];
+ if b != 0 {
+ s := u8(b >> ZFAST_BITS);
+ compress.consume_bits_lsb(z, s);
+ return b & 511, nil;
+ }
+ return decode_huffman_slowpath(z, t);
+}
+
+parse_huffman_block :: proc(z: ^Context, z_repeat, z_offset: ^Huffman_Table) -> (err: Error) #no_bounds_check {
+ #no_bounds_check for {
+ value, e := decode_huffman(z, z_repeat);
+ if e != nil {
+ return err;
+ }
+ if value < 256 {
+ e := write_byte(z, u8(value));
+ if e != .None {
+ return E_General.Output_Too_Short;
+ }
+ } else {
+ if value == 256 {
+ // End of block
+ return nil;
+ }
+
+ value -= 257;
+ length := Z_LENGTH_BASE[value];
+ if Z_LENGTH_EXTRA[value] > 0 {
+ length += u16(compress.read_bits_lsb(z, Z_LENGTH_EXTRA[value]));
+ }
+
+ value, e = decode_huffman(z, z_offset);
+ if e != nil {
+ return E_Deflate.Bad_Huffman_Code;
+ }
+
+ distance := Z_DIST_BASE[value];
+ if Z_DIST_EXTRA[value] > 0 {
+ distance += u16(compress.read_bits_lsb(z, Z_DIST_EXTRA[value]));
+ }
+
+ if z.bytes_written < i64(distance) {
+ // Distance is longer than we've decoded so far.
+ return E_Deflate.Bad_Distance;
+ }
+
+ offset := i64(z.bytes_written - i64(distance));
+ /*
+ These might be sped up with a repl_byte call that copies
+ from the already written output more directly, and that
+ update the Adler checksum once after.
+
+ That way we'd suffer less Stream vtable overhead.
+ */
+ if distance == 1 {
+ /*
+ Replicate the last outputted byte, length times.
+ */
+ if length > 0 {
+ b, e := compress.peek_back_byte(z, offset);
+ if e != .None {
+ return E_General.Output_Too_Short;
+ }
+ #no_bounds_check for _ in 0..<length {
+ write_byte(z, b);
+ }
+ }
+ } else {
+ if length > 0 {
+ #no_bounds_check for _ in 0..<length {
+ b, e := compress.peek_back_byte(z, offset);
+ if e != .None {
+ return E_General.Output_Too_Short;
+ }
+ write_byte(z, b);
+ offset += 1;
+ }
+ }
+ }
+ }
+ }
+}
+
+inflate_from_stream :: proc(using ctx: ^Context, raw := false, allocator := context.allocator) -> (err: Error) #no_bounds_check {
+ /*
+ ctx.input must be an io.Stream backed by an implementation that supports:
+ - read
+ - size
+
+ ctx.output must be an io.Stream backed by an implementation that supports:
+ - write
+
+ raw determines whether the ZLIB header is processed, or we're inflating a raw
+ DEFLATE stream.
+ */
+
+ if !raw {
+ data_size := io.size(ctx.input);
+ if data_size < 6 {
+ return E_General.Stream_Too_Short;
+ }
+
+ cmf, _ := compress.read_u8(ctx);
+
+ method := Compression_Method(cmf & 0xf);
+ if method != .DEFLATE {
+ return E_General.Unknown_Compression_Method;
+ }
+
+ cinfo := (cmf >> 4) & 0xf;
+ if cinfo > 7 {
+ return E_ZLIB.Unsupported_Window_Size;
+ }
+ ctx.window_size = 1 << (cinfo + 8);
+
+ flg, _ := compress.read_u8(ctx);
+
+ fcheck := flg & 0x1f;
+ fcheck_computed := (cmf << 8 | flg) & 0x1f;
+ if fcheck != fcheck_computed {
+ return E_General.Checksum_Failed;
+ }
+
+ fdict := (flg >> 5) & 1;
+ /*
+ We don't handle built-in dictionaries for now.
+ They're application specific and PNG doesn't use them.
+ */
+ if fdict != 0 {
+ return E_ZLIB.FDICT_Unsupported;
+ }
+
+ // flevel := Compression_Level((flg >> 6) & 3);
+ /*
+ Inflate can consume bits belonging to the Adler checksum.
+ We pass the entire stream to Inflate and will unget bytes if we need to
+ at the end to compare checksums.
+ */
+
+ // Seed the Adler32 rolling checksum.
+ ctx.rolling_hash = 1;
+ }
+
+ // Parse ZLIB stream without header.
+ err = inflate_raw(ctx);
+ if err != nil {
+ return err;
+ }
+
+ if !raw {
+ compress.discard_to_next_byte_lsb(ctx);
+
+ adler32 := compress.read_bits_lsb(ctx, 8) << 24 | compress.read_bits_lsb(ctx, 8) << 16 | compress.read_bits_lsb(ctx, 8) << 8 | compress.read_bits_lsb(ctx, 8);
+ if ctx.rolling_hash != u32(adler32) {
+ return E_General.Checksum_Failed;
+ }
+ }
+ return nil;
+}
+
+// @(optimization_mode="speed")
+inflate_from_stream_raw :: proc(z: ^Context, allocator := context.allocator) -> (err: Error) #no_bounds_check {
+ final := u32(0);
+ type := u32(0);
+
+ z.num_bits = 0;
+ z.code_buffer = 0;
+
+ z_repeat: ^Huffman_Table;
+ z_offset: ^Huffman_Table;
+ codelength_ht: ^Huffman_Table;
+
+ z_repeat, err = allocate_huffman_table(allocator=context.allocator);
+ if err != nil {
+ return err;
+ }
+ z_offset, err = allocate_huffman_table(allocator=context.allocator);
+ if err != nil {
+ return err;
+ }
+ codelength_ht, err = allocate_huffman_table(allocator=context.allocator);
+ if err != nil {
+ return err;
+ }
+ defer free(z_repeat);
+ defer free(z_offset);
+ defer free(codelength_ht);
+
+ if z.window_size == 0 {
+ z.window_size = DEFLATE_MAX_DISTANCE;
+ }
+
+ // Allocate rolling window buffer.
+ last_b := mem.make_dynamic_array_len_cap([dynamic]u8, z.window_size, z.window_size, allocator);
+ z.last = &last_b;
+ defer delete(last_b);
+
+ for {
+ final = compress.read_bits_lsb(z, 1);
+ type = compress.read_bits_lsb(z, 2);
+
+ // fmt.printf("Final: %v | Type: %v\n", final, type);
+
+ switch type {
+ case 0:
+ // Uncompressed block
+
+ // Discard bits until next byte boundary
+ compress.discard_to_next_byte_lsb(z);
+
+ uncompressed_len := i16(compress.read_bits_lsb(z, 16));
+ length_check := i16(compress.read_bits_lsb(z, 16));
+
+ // fmt.printf("LEN: %v, ~LEN: %v, NLEN: %v, ~NLEN: %v\n", uncompressed_len, ~uncompressed_len, length_check, ~length_check);
+
+
+ if ~uncompressed_len != length_check {
+ return E_Deflate.Len_Nlen_Mismatch;
+ }
+
+ /*
+ TODO: Maybe speed this up with a stream-to-stream copy (read_from)
+ and a single Adler32 update after.
+ */
+ #no_bounds_check for uncompressed_len > 0 {
+ compress.refill_lsb(z);
+ lit := compress.read_bits_lsb(z, 8);
+ write_byte(z, u8(lit));
+ uncompressed_len -= 1;
+ }
+ case 3:
+ return E_Deflate.BType_3;
+ case:
+ // log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
+ if type == 1 {
+ // Use fixed code lengths.
+ err = build_huffman(z_repeat, Z_FIXED_LENGTH[:]);
+ if err != nil {
+ return err;
+ }
+ err = build_huffman(z_offset, Z_FIXED_DIST[:]);
+ if err != nil {
+ return err;
+ }
+ } else {
+ lencodes: [286+32+137]u8;
+ codelength_sizes: [19]u8;
+
+ //i: u32;
+ n: u32;
+
+ compress.refill_lsb(z, 14);
+ hlit := compress.read_bits_no_refill_lsb(z, 5) + 257;
+ hdist := compress.read_bits_no_refill_lsb(z, 5) + 1;
+ hclen := compress.read_bits_no_refill_lsb(z, 4) + 4;
+ ntot := hlit + hdist;
+
+ #no_bounds_check for i in 0..<hclen {
+ s := compress.read_bits_lsb(z, 3);
+ codelength_sizes[Z_LENGTH_DEZIGZAG[i]] = u8(s);
+ }
+ err = build_huffman(codelength_ht, codelength_sizes[:]);
+ if err != nil {
+ return err;
+ }
+
+ n = 0;
+ c: u16;
+
+ for n < ntot {
+ c, err = decode_huffman(z, codelength_ht);
+ if err != nil {
+ return err;
+ }
+
+ if c < 0 || c >= 19 {
+ return E_Deflate.Huffman_Bad_Code_Lengths;
+ }
+ if c < 16 {
+ lencodes[n] = u8(c);
+ n += 1;
+ } else {
+ fill := u8(0);
+ compress.refill_lsb(z, 7);
+ switch c {
+ case 16:
+ c = u16(compress.read_bits_no_refill_lsb(z, 2) + 3);
+ if n == 0 {
+ return E_Deflate.Huffman_Bad_Code_Lengths;
+ }
+ fill = lencodes[n - 1];
+ case 17:
+ c = u16(compress.read_bits_no_refill_lsb(z, 3) + 3);
+ case 18:
+ c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11);
+ case:
+ return E_Deflate.Huffman_Bad_Code_Lengths;
+ }
+
+ if ntot - n < u32(c) {
+ return E_Deflate.Huffman_Bad_Code_Lengths;
+ }
+
+ nc := n + u32(c);
+ #no_bounds_check for ; n < nc; n += 1 {
+ lencodes[n] = fill;
+ }
+ }
+ }
+
+ if n != ntot {
+ return E_Deflate.Huffman_Bad_Code_Lengths;
+ }
+
+ err = build_huffman(z_repeat, lencodes[:hlit]);
+ if err != nil {
+ return err;
+ }
+
+ err = build_huffman(z_offset, lencodes[hlit:ntot]);
+ if err != nil {
+ return err;
+ }
+ }
+ err = parse_huffman_block(z, z_repeat, z_offset);
+ // log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
+ if err != nil {
+ return err;
+ }
+ }
+ if final == 1 {
+ break;
+ }
+ }
+ return nil;
+}
+
+inflate_from_byte_array :: proc(input: []u8, buf: ^bytes.Buffer, raw := false) -> (err: Error) {
+ ctx := Context{};
+
+ r := bytes.Reader{};
+ bytes.reader_init(&r, input);
+ rs := bytes.reader_to_stream(&r);
+ ctx.input = rs;
+
+ buf := buf;
+ ws := bytes.buffer_to_stream(buf);
+ ctx.output = ws;
+
+ err = inflate_from_stream(&ctx, raw);
+
+ return err;
+}
+
+inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := false) -> (err: Error) {
+ return inflate_from_byte_array(input, buf, true);
+}
+
+inflate :: proc{inflate_from_stream, inflate_from_byte_array};
+inflate_raw :: proc{inflate_from_stream_raw, inflate_from_byte_array_raw};
diff --git a/core/fmt/fmt.odin b/core/fmt/fmt.odin
index 6de6b0245..3b3716a15 100644
--- a/core/fmt/fmt.odin
+++ b/core/fmt/fmt.odin
@@ -641,9 +641,9 @@ fmt_write_padding :: proc(fi: ^Info, width: int) {
return;
}
- pad_byte: byte = '0';
- if fi.space {
- pad_byte = ' ';
+ pad_byte: byte = ' ';
+ if !fi.space {
+ pad_byte = '0';
}
for i := 0; i < width; i += 1 {
@@ -1908,17 +1908,6 @@ fmt_value :: proc(fi: ^Info, v: any, verb: rune) {
}
}
-
- handle_relative_pointer :: proc(ptr: ^$T) -> rawptr where intrinsics.type_is_integer(T) {
- if ptr^ == 0 {
- return nil;
- }
- when intrinsics.type_is_unsigned(T) {
- return rawptr(uintptr(ptr) + uintptr(ptr^));
- } else {
- return rawptr(uintptr(ptr) + uintptr(i64(ptr^)));
- }
- }
}
fmt_complex :: proc(fi: ^Info, c: complex128, bits: int, verb: rune) {
diff --git a/core/image/common.odin b/core/image/common.odin
new file mode 100644
index 000000000..9024ec769
--- /dev/null
+++ b/core/image/common.odin
@@ -0,0 +1,204 @@
+package image
+
+import "core:bytes"
+import "core:mem"
+
+Image :: struct {
+ width: int,
+ height: int,
+ channels: int,
+ depth: u8,
+ pixels: bytes.Buffer,
+ /*
+ Some image loaders/writers can return/take an optional background color.
+ For convenience, we return them as u16 so we don't need to switch on the type
+ in our viewer, and can just test against nil.
+ */
+ background: Maybe([3]u16),
+ sidecar: any,
+}
+
+/*
+ IMPORTANT: `.do_not_expand_*` options currently skip handling of the `alpha_*` options,
+ therefore Gray+Alpha will be returned as such even if you add `.alpha_drop_if_present`,
+ and `.alpha_add_if_missing` and keyed transparency will likewise be ignored.
+
+ The same goes for indexed images. This will be remedied in a near future update.
+*/
+
+/*
+Image_Option:
+ `.info`
+ This option behaves as `.return_ihdr` and `.do_not_decompress_image` and can be used
+ to gather an image's dimensions and color information.
+
+ `.return_header`
+ Fill out img.sidecar.header with the image's format-specific header struct.
+ If we only care about the image specs, we can set `.return_header` +
+ `.do_not_decompress_image`, or `.info`, which works as if both of these were set.
+
+ `.return_metadata`
+ Returns all chunks not needed to decode the data.
+ It also returns the header as if `.return_header` was set.
+
+ `.do_not_decompress_image`
+ Skip decompressing IDAT chunk, defiltering and the rest.
+
+ `.do_not_expand_grayscale`
+ Do not turn grayscale (+ Alpha) images into RGB(A).
+ Returns just the 1 or 2 channels present, although 1, 2 and 4 bit are still scaled to 8-bit.
+
+ `.do_not_expand_indexed`
+ Do not turn indexed (+ Alpha) images into RGB(A).
+ Returns just the 1 or 2 (with `tRNS`) channels present.
+ Make sure to use `return_metadata` to also return the palette chunk so you can recolor it yourself.
+
+ `.do_not_expand_channels`
+ Applies both `.do_not_expand_grayscale` and `.do_not_expand_indexed`.
+
+ `.alpha_add_if_missing`
+ If the image has no alpha channel, it'll add one set to max(type).
+ Turns RGB into RGBA and Gray into Gray+Alpha
+
+ `.alpha_drop_if_present`
+ If the image has an alpha channel, drop it.
+ You may want to use `.alpha_premultiply` in this case.
+
+ NOTE: For PNG, this also skips handling of the tRNS chunk, if present,
+ unless you select `alpha_premultiply`.
+ In this case it'll premultiply the specified pixels in question only,
+ as the others are implicitly fully opaque.
+
+ `.alpha_premultiply`
+ If the image has an alpha channel, returns image data as follows:
+ RGB *= A, Gray = Gray *= A
+
+ `.blend_background`
+ If a bKGD chunk is present in a PNG, we normally just set `img.background`
+ with its value and leave it up to the application to decide how to display the image,
+ as per the PNG specification.
+
+ With `.blend_background` selected, we blend the image against the background
+ color. As this negates the use for an alpha channel, we'll drop it _unless_
+ you also specify `.alpha_add_if_missing`.
+
+ Options that don't apply to an image format will be ignored by their loader.
+*/
+
+Option :: enum {
+ info = 0,
+ do_not_decompress_image,
+ return_header,
+ return_metadata,
+ alpha_add_if_missing,
+ alpha_drop_if_present,
+ alpha_premultiply,
+ blend_background,
+ // Unimplemented
+ do_not_expand_grayscale,
+ do_not_expand_indexed,
+ do_not_expand_channels,
+}
+Options :: distinct bit_set[Option];
+
+Error :: enum {
+ Invalid_PNG_Signature,
+ IHDR_Not_First_Chunk,
+ IHDR_Corrupt,
+ IDAT_Missing,
+ IDAT_Must_Be_Contiguous,
+ IDAT_Corrupt,
+ PNG_Does_Not_Adhere_to_Spec,
+ PLTE_Encountered_Unexpectedly,
+ PLTE_Invalid_Length,
+ TRNS_Encountered_Unexpectedly,
+ BKGD_Invalid_Length,
+ Invalid_Image_Dimensions,
+ Unknown_Color_Type,
+ Invalid_Color_Bit_Depth_Combo,
+ Unknown_Filter_Method,
+ Unknown_Interlace_Method,
+ Requested_Channel_Not_Present,
+ Post_Processing_Error,
+}
+
+/*
+ Functions to help with image buffer calculations
+*/
+
+compute_buffer_size :: proc(width, height, channels, depth: int, extra_row_bytes := int(0)) -> (size: int) {
+
+ size = ((((channels * width * depth) + 7) >> 3) + extra_row_bytes) * height;
+ return;
+}
+
+/*
+ For when you have an RGB(A) image, but want a particular channel.
+*/
+
+Channel :: enum u8 {
+ R = 1,
+ G = 2,
+ B = 3,
+ A = 4,
+}
+
+return_single_channel :: proc(img: ^Image, channel: Channel) -> (res: ^Image, ok: bool) {
+
+ ok = false;
+ t: bytes.Buffer;
+
+ idx := int(channel);
+
+ if img.channels == 2 && idx == 4 {
+ // Alpha requested, which in a two channel image is index 2: G.
+ idx = 2;
+ }
+
+ if idx > img.channels {
+ return {}, false;
+ }
+
+ switch(img.depth) {
+ case 8:
+ buffer_size := compute_buffer_size(img.width, img.height, 1, 8);
+ t = bytes.Buffer{};
+ resize(&t.buf, buffer_size);
+
+ i := bytes.buffer_to_bytes(&img.pixels);
+ o := bytes.buffer_to_bytes(&t);
+
+ for len(i) > 0 {
+ o[0] = i[idx];
+ i = i[img.channels:];
+ o = o[1:];
+ }
+ case 16:
+ buffer_size := compute_buffer_size(img.width, img.height, 2, 8);
+ t = bytes.Buffer{};
+ resize(&t.buf, buffer_size);
+
+ i := mem.slice_data_cast([]u16, img.pixels.buf[:]);
+ o := mem.slice_data_cast([]u16, t.buf[:]);
+
+ for len(i) > 0 {
+ o[0] = i[idx];
+ i = i[img.channels:];
+ o = o[1:];
+ }
+ case 1, 2, 4:
+ // We shouldn't see this case, as the loader already turns these into 8-bit.
+ return {}, false;
+ }
+
+ res = new(Image);
+ res.width = img.width;
+ res.height = img.height;
+ res.channels = 1;
+ res.depth = img.depth;
+ res.pixels = t;
+ res.background = img.background;
+ res.sidecar = img.sidecar;
+
+ return res, true;
+}
diff --git a/core/image/png/example.odin b/core/image/png/example.odin
new file mode 100644
index 000000000..59a4cfd42
--- /dev/null
+++ b/core/image/png/example.odin
@@ -0,0 +1,327 @@
+//+ignore
+package png
+
+import "core:compress"
+import "core:image"
+import "core:image/png"
+import "core:bytes"
+import "core:fmt"
+
+// For PPM writer
+import "core:mem"
+import "core:os"
+
+main :: proc() {
+ file: string;
+
+ options := image.Options{};
+ err: compress.Error;
+ img: ^image.Image;
+
+ file = "../../../misc/logo-slim.png";
+
+ img, err = png.load(file, options);
+ defer png.destroy(img);
+
+ if err != nil {
+ fmt.printf("Trying to read PNG file %v returned %v\n", file, err);
+ } else {
+ v: png.Info;
+ ok: bool;
+
+ fmt.printf("Image: %vx%vx%v, %v-bit.\n", img.width, img.height, img.channels, img.depth);
+
+ if v, ok = img.sidecar.(png.Info); ok {
+ // Handle ancillary chunks as you wish.
+ // We provide helper functions for a few types.
+ for c in v.chunks {
+ #partial switch (c.header.type) {
+ case .tIME:
+ t, _ := png.core_time(c);
+ fmt.printf("[tIME]: %v\n", t);
+ case .gAMA:
+ fmt.printf("[gAMA]: %v\n", png.gamma(c));
+ case .pHYs:
+ phys := png.phys(c);
+ if phys.unit == .Meter {
+ xm := f32(img.width) / f32(phys.ppu_x);
+ ym := f32(img.height) / f32(phys.ppu_y);
+ dpi_x, dpi_y := png.phys_to_dpi(phys);
+ fmt.printf("[pHYs] Image resolution is %v x %v pixels per meter.\n", phys.ppu_x, phys.ppu_y);
+ fmt.printf("[pHYs] Image resolution is %v x %v DPI.\n", dpi_x, dpi_y);
+ fmt.printf("[pHYs] Image dimensions are %v x %v meters.\n", xm, ym);
+ } else {
+ fmt.printf("[pHYs] x: %v, y: %v pixels per unknown unit.\n", phys.ppu_x, phys.ppu_y);
+ }
+ case .iTXt, .zTXt, .tEXt:
+ res, ok_text := png.text(c);
+ if ok_text {
+ if c.header.type == .iTXt {
+ fmt.printf("[iTXt] %v (%v:%v): %v\n", res.keyword, res.language, res.keyword_localized, res.text);
+ } else {
+ fmt.printf("[tEXt/zTXt] %v: %v\n", res.keyword, res.text);
+ }
+ }
+ defer png.text_destroy(res);
+ case .bKGD:
+ fmt.printf("[bKGD] %v\n", img.background);
+ case .eXIf:
+ res, ok_exif := png.exif(c);
+ if ok_exif {
+ /*
+ Other than checking the signature and byte order, we don't handle Exif data.
+ If you wish to interpret it, pass it to an Exif parser.
+ */
+ fmt.printf("[eXIf] %v\n", res);
+ }
+ case .PLTE:
+ plte, plte_ok := png.plte(c);
+ if plte_ok {
+ fmt.printf("[PLTE] %v\n", plte);
+ } else {
+ fmt.printf("[PLTE] Error\n");
+ }
+ case .hIST:
+ res, ok_hist := png.hist(c);
+ if ok_hist {
+ fmt.printf("[hIST] %v\n", res);
+ }
+ case .cHRM:
+ res, ok_chrm := png.chrm(c);
+ if ok_chrm {
+ fmt.printf("[cHRM] %v\n", res);
+ }
+ case .sPLT:
+ res, ok_splt := png.splt(c);
+ if ok_splt {
+ fmt.printf("[sPLT] %v\n", res);
+ }
+ png.splt_destroy(res);
+ case .sBIT:
+ if res, ok_sbit := png.sbit(c); ok_sbit {
+ fmt.printf("[sBIT] %v\n", res);
+ }
+ case .iCCP:
+ res, ok_iccp := png.iccp(c);
+ if ok_iccp {
+ fmt.printf("[iCCP] %v\n", res);
+ }
+ png.iccp_destroy(res);
+ case .sRGB:
+ if res, ok_srgb := png.srgb(c); ok_srgb {
+ fmt.printf("[sRGB] Rendering intent: %v\n", res);
+ }
+ case:
+ type := c.header.type;
+ name := png.chunk_type_to_name(&type);
+ fmt.printf("[%v]: %v\n", name, c.data);
+ }
+ }
+ }
+ }
+
+ if err == nil && .do_not_decompress_image not_in options && .info not_in options {
+ if ok := write_image_as_ppm("out.ppm", img); ok {
+ fmt.println("Saved decoded image.");
+ } else {
+ fmt.println("Error saving out.ppm.");
+ fmt.println(img);
+ }
+ }
+}
+
+// Crappy PPM writer used during testing. Don't use in production.
+write_image_as_ppm :: proc(filename: string, image: ^image.Image) -> (success: bool) {
+
+ _bg :: proc(bg: Maybe([3]u16), x, y: int, high := true) -> (res: [3]u16) {
+ if v, ok := bg.?; ok {
+ res = v;
+ } else {
+ if high {
+ l := u16(30 * 256 + 30);
+
+ if (x & 4 == 0) ~ (y & 4 == 0) {
+ res = [3]u16{l, 0, l};
+ } else {
+ res = [3]u16{l >> 1, 0, l >> 1};
+ }
+ } else {
+ if (x & 4 == 0) ~ (y & 4 == 0) {
+ res = [3]u16{30, 30, 30};
+ } else {
+ res = [3]u16{15, 15, 15};
+ }
+ }
+ }
+ return;
+ }
+
+ // profiler.timed_proc();
+ using image;
+ using os;
+
+ flags: int = O_WRONLY|O_CREATE|O_TRUNC;
+
+ img := image;
+
+ // PBM 16-bit images are big endian
+ when ODIN_ENDIAN == "little" {
+ if img.depth == 16 {
+ // The pixel components are in Big Endian. Let's byteswap back.
+ input := mem.slice_data_cast([]u16, img.pixels.buf[:]);
+ output := mem.slice_data_cast([]u16be, img.pixels.buf[:]);
+ #no_bounds_check for v, i in input {
+ output[i] = u16be(v);
+ }
+ }
+ }
+
+ pix := bytes.buffer_to_bytes(&img.pixels);
+
+ if len(pix) == 0 || len(pix) < image.width * image.height * int(image.channels) {
+ return false;
+ }
+
+ mode: int = 0;
+ when ODIN_OS == "linux" || ODIN_OS == "darwin" {
+ // NOTE(justasd): 644 (owner read, write; group read; others read)
+ mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
+ }
+
+ fd, err := open(filename, flags, mode);
+ if err != 0 {
+ return false;
+ }
+ defer close(fd);
+
+ write_string(fd,
+ fmt.tprintf("P6\n%v %v\n%v\n", width, height, (1 << depth -1)),
+ );
+
+ if channels == 3 {
+ // We don't handle transparency here...
+ write_ptr(fd, raw_data(pix), len(pix));
+ } else {
+ bpp := depth == 16 ? 2 : 1;
+ bytes_needed := width * height * 3 * bpp;
+
+ op := bytes.Buffer{};
+ bytes.buffer_init_allocator(&op, bytes_needed, bytes_needed);
+ defer bytes.buffer_destroy(&op);
+
+ if channels == 1 {
+ if depth == 16 {
+ assert(len(pix) == width * height * 2);
+ p16 := mem.slice_data_cast([]u16, pix);
+ o16 := mem.slice_data_cast([]u16, op.buf[:]);
+ #no_bounds_check for len(p16) != 0 {
+ r := u16(p16[0]);
+ o16[0] = r;
+ o16[1] = r;
+ o16[2] = r;
+ p16 = p16[1:];
+ o16 = o16[3:];
+ }
+ } else {
+ o := 0;
+ for i := 0; i < len(pix); i += 1 {
+ r := pix[i];
+ op.buf[o ] = r;
+ op.buf[o+1] = r;
+ op.buf[o+2] = r;
+ o += 3;
+ }
+ }
+ write_ptr(fd, raw_data(op.buf), len(op.buf));
+ } else if channels == 2 {
+ if depth == 16 {
+ p16 := mem.slice_data_cast([]u16, pix);
+ o16 := mem.slice_data_cast([]u16, op.buf[:]);
+
+ bgcol := img.background;
+
+ #no_bounds_check for len(p16) != 0 {
+ r := f64(u16(p16[0]));
+ bg: f64;
+ if bgcol != nil {
+ v := bgcol.([3]u16)[0];
+ bg = f64(v);
+ }
+ a := f64(u16(p16[1])) / 65535.0;
+ l := (a * r) + (1 - a) * bg;
+
+ o16[0] = u16(l);
+ o16[1] = u16(l);
+ o16[2] = u16(l);
+
+ p16 = p16[2:];
+ o16 = o16[3:];
+ }
+ } else {
+ o := 0;
+ for i := 0; i < len(pix); i += 2 {
+ r := pix[i]; a := pix[i+1]; a1 := f32(a) / 255.0;
+ c := u8(f32(r) * a1);
+ op.buf[o ] = c;
+ op.buf[o+1] = c;
+ op.buf[o+2] = c;
+ o += 3;
+ }
+ }
+ write_ptr(fd, raw_data(op.buf), len(op.buf));
+ } else if channels == 4 {
+ if depth == 16 {
+ p16 := mem.slice_data_cast([]u16be, pix);
+ o16 := mem.slice_data_cast([]u16be, op.buf[:]);
+
+ #no_bounds_check for len(p16) != 0 {
+
+ bg := _bg(img.background, 0, 0);
+ r := f32(p16[0]);
+ g := f32(p16[1]);
+ b := f32(p16[2]);
+ a := f32(p16[3]) / 65535.0;
+
+ lr := (a * r) + (1 - a) * f32(bg[0]);
+ lg := (a * g) + (1 - a) * f32(bg[1]);
+ lb := (a * b) + (1 - a) * f32(bg[2]);
+
+ o16[0] = u16be(lr);
+ o16[1] = u16be(lg);
+ o16[2] = u16be(lb);
+
+ p16 = p16[4:];
+ o16 = o16[3:];
+ }
+ } else {
+ o := 0;
+
+ for i := 0; i < len(pix); i += 4 {
+
+ x := (i / 4) % width;
+ y := i / width / 4;
+
+ _b := _bg(img.background, x, y, false);
+ bgcol := [3]u8{u8(_b[0]), u8(_b[1]), u8(_b[2])};
+
+ r := f32(pix[i]);
+ g := f32(pix[i+1]);
+ b := f32(pix[i+2]);
+ a := f32(pix[i+3]) / 255.0;
+
+ lr := u8(f32(r) * a + (1 - a) * f32(bgcol[0]));
+ lg := u8(f32(g) * a + (1 - a) * f32(bgcol[1]));
+ lb := u8(f32(b) * a + (1 - a) * f32(bgcol[2]));
+ op.buf[o ] = lr;
+ op.buf[o+1] = lg;
+ op.buf[o+2] = lb;
+ o += 3;
+ }
+ }
+ write_ptr(fd, raw_data(op.buf), len(op.buf));
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
diff --git a/core/image/png/helpers.odin b/core/image/png/helpers.odin
new file mode 100644
index 000000000..0975d1d87
--- /dev/null
+++ b/core/image/png/helpers.odin
@@ -0,0 +1,516 @@
+package png
+
+import "core:image"
+import "core:compress/zlib"
+import coretime "core:time"
+import "core:strings"
+import "core:bytes"
+import "core:mem"
+
+/*
+ These are a few useful utility functions to work with PNG images.
+*/
+
+/*
+ Cleanup of image-specific data.
+ There are other helpers for cleanup of PNG-specific data.
+ Those are named *_destroy, where * is the name of the helper.
+*/
+
+destroy :: proc(img: ^Image) {
+ if img == nil {
+ /*
+ Nothing to do.
+ Load must've returned with an error.
+ */
+ return;
+ }
+
+ bytes.buffer_destroy(&img.pixels);
+
+ /*
+ We don't need to do anything for the individual chunks.
+ They're allocated on the temp allocator, as is info.chunks
+
+ See read_chunk.
+ */
+ free(img);
+}
+
+/*
+ Chunk helpers
+*/
+
+gamma :: proc(c: Chunk) -> f32 {
+ assert(c.header.type == .gAMA);
+ res := (^gAMA)(raw_data(c.data))^;
+ when true {
+ // Returns the wrong result on old backend
+ // Fixed for -llvm-api
+ return f32(res.gamma_100k) / 100_000.0;
+ } else {
+ return f32(u32(res.gamma_100k)) / 100_000.0;
+ }
+}
+
+INCHES_PER_METER :: 1000.0 / 25.4;
+
+phys :: proc(c: Chunk) -> pHYs {
+ assert(c.header.type == .pHYs);
+ res := (^pHYs)(raw_data(c.data))^;
+ return res;
+}
+
+phys_to_dpi :: proc(p: pHYs) -> (x_dpi, y_dpi: f32) {
+ return f32(p.ppu_x) / INCHES_PER_METER, f32(p.ppu_y) / INCHES_PER_METER;
+}
+
+time :: proc(c: Chunk) -> tIME {
+ assert(c.header.type == .tIME);
+ res := (^tIME)(raw_data(c.data))^;
+ return res;
+}
+
+core_time :: proc(c: Chunk) -> (t: coretime.Time, ok: bool) {
+ png_time := time(c);
+ using png_time;
+ return coretime.datetime_to_time(
+ int(year), int(month), int(day),
+ int(hour), int(minute), int(second),
+ );
+}
+
+text :: proc(c: Chunk) -> (res: Text, ok: bool) {
+ #partial switch c.header.type {
+ case .tEXt:
+ ok = true;
+
+ fields := bytes.split(s=c.data, sep=[]u8{0}, allocator=context.temp_allocator);
+ if len(fields) == 2 {
+ res.keyword = strings.clone(string(fields[0]));
+ res.text = strings.clone(string(fields[1]));
+ } else {
+ ok = false;
+ }
+ return;
+ case .zTXt:
+ ok = true;
+
+ fields := bytes.split_n(s=c.data, sep=[]u8{0}, n=3, allocator=context.temp_allocator);
+ if len(fields) != 3 || len(fields[1]) != 0 {
+ // Compression method must be 0=Deflate, which thanks to the split above turns
+ // into an empty slice
+ ok = false; return;
+ }
+
+ // Set up ZLIB context and decompress text payload.
+ buf: bytes.Buffer;
+ zlib_error := zlib.inflate_from_byte_array(fields[2], &buf);
+ defer bytes.buffer_destroy(&buf);
+ if zlib_error != nil {
+ ok = false; return;
+ }
+
+ res.keyword = strings.clone(string(fields[0]));
+ res.text = strings.clone(bytes.buffer_to_string(&buf));
+ return;
+ case .iTXt:
+ ok = true;
+
+ s := string(c.data);
+ null := strings.index_byte(s, 0);
+ if null == -1 {
+ ok = false; return;
+ }
+ if len(c.data) < null + 4 {
+ // At a minimum, including the \0 following the keyword, we require 5 more bytes.
+ ok = false; return;
+ }
+ res.keyword = strings.clone(string(c.data[:null]));
+ rest := c.data[null+1:];
+
+ compression_flag := rest[:1][0];
+ if compression_flag > 1 {
+ ok = false; return;
+ }
+ compression_method := rest[1:2][0];
+ if compression_flag == 1 && compression_method > 0 {
+ // Only Deflate is supported
+ ok = false; return;
+ }
+ rest = rest[2:];
+
+ // We now expect an optional language keyword and translated keyword, both followed by a \0
+ null = strings.index_byte(string(rest), 0);
+ if null == -1 {
+ ok = false; return;
+ }
+ res.language = strings.clone(string(rest[:null]));
+ rest = rest[null+1:];
+
+ null = strings.index_byte(string(rest), 0);
+ if null == -1 {
+ ok = false; return;
+ }
+ res.keyword_localized = strings.clone(string(rest[:null]));
+ rest = rest[null+1:];
+ if compression_flag == 0 {
+ res.text = strings.clone(string(rest));
+ } else {
+ // Set up ZLIB context and decompress text payload.
+ buf: bytes.Buffer;
+ zlib_error := zlib.inflate_from_byte_array(rest, &buf);
+ defer bytes.buffer_destroy(&buf);
+ if zlib_error != nil {
+
+ ok = false; return;
+ }
+
+ res.text = strings.clone(bytes.buffer_to_string(&buf));
+ }
+ return;
+ case:
+ // PNG text helper called with an unrecognized chunk type.
+ ok = false; return;
+ }
+}
+
+text_destroy :: proc(text: Text) {
+ delete(text.keyword);
+ delete(text.keyword_localized);
+ delete(text.language);
+ delete(text.text);
+}
+
+iccp :: proc(c: Chunk) -> (res: iCCP, ok: bool) {
+ ok = true;
+
+ fields := bytes.split_n(s=c.data, sep=[]u8{0}, n=3, allocator=context.temp_allocator);
+
+ if len(fields[0]) < 1 || len(fields[0]) > 79 {
+ // Invalid profile name
+ ok = false; return;
+ }
+
+ if len(fields[1]) != 0 {
+ // Compression method should be a zero, which the split turned into an empty slice.
+ ok = false; return;
+ }
+
+ // Set up ZLIB context and decompress iCCP payload
+ buf: bytes.Buffer;
+ zlib_error := zlib.inflate_from_byte_array(fields[2], &buf);
+ if zlib_error != nil {
+ bytes.buffer_destroy(&buf);
+ ok = false; return;
+ }
+
+ res.name = strings.clone(string(fields[0]));
+ res.profile = bytes.buffer_to_bytes(&buf);
+
+ return;
+}
+
+iccp_destroy :: proc(i: iCCP) {
+ delete(i.name);
+
+ delete(i.profile);
+
+}
+
+srgb :: proc(c: Chunk) -> (res: sRGB, ok: bool) {
+ ok = true;
+
+ if c.header.type != .sRGB || len(c.data) != 1 {
+ return {}, false;
+ }
+
+ res.intent = sRGB_Rendering_Intent(c.data[0]);
+ if res.intent > max(sRGB_Rendering_Intent) {
+ ok = false; return;
+ }
+ return;
+}
+
+plte :: proc(c: Chunk) -> (res: PLTE, ok: bool) {
+ if c.header.type != .PLTE {
+ return {}, false;
+ }
+
+ i := 0; j := 0; ok = true;
+ for j < int(c.header.length) {
+ res.entries[i] = {c.data[j], c.data[j+1], c.data[j+2]};
+ i += 1; j += 3;
+ }
+ res.used = u16(i);
+ return;
+}
+
+splt :: proc(c: Chunk) -> (res: sPLT, ok: bool) {
+ if c.header.type != .sPLT {
+ return {}, false;
+ }
+ ok = true;
+
+ fields := bytes.split_n(s=c.data, sep=[]u8{0}, n=2, allocator=context.temp_allocator);
+ if len(fields) != 2 {
+ return {}, false;
+ }
+
+ res.depth = fields[1][0];
+ if res.depth != 8 && res.depth != 16 {
+ return {}, false;
+ }
+
+ data := fields[1][1:];
+ count: int;
+
+ if res.depth == 8 {
+ if len(data) % 6 != 0 {
+ return {}, false;
+ }
+ count = len(data) / 6;
+ if count > 256 {
+ return {}, false;
+ }
+
+ res.entries = mem.slice_data_cast([][4]u8, data);
+ } else { // res.depth == 16
+ if len(data) % 10 != 0 {
+ return {}, false;
+ }
+ count = len(data) / 10;
+ if count > 256 {
+ return {}, false;
+ }
+
+ res.entries = mem.slice_data_cast([][4]u16, data);
+ }
+
+ res.name = strings.clone(string(fields[0]));
+ res.used = u16(count);
+
+ return;
+}
+
+splt_destroy :: proc(s: sPLT) {
+ delete(s.name);
+}
+
+sbit :: proc(c: Chunk) -> (res: [4]u8, ok: bool) {
+ /*
+ Returns [4]u8 with the significant bits in each channel.
+ A channel will contain zero if not applicable to the PNG color type.
+ */
+
+ if len(c.data) < 1 || len(c.data) > 4 {
+ ok = false; return;
+ }
+ ok = true;
+
+ for i := 0; i < len(c.data); i += 1 {
+ res[i] = c.data[i];
+ }
+ return;
+
+}
+
+hist :: proc(c: Chunk) -> (res: hIST, ok: bool) {
+ if c.header.type != .hIST {
+ return {}, false;
+ }
+ if c.header.length & 1 == 1 || c.header.length > 512 {
+ // The entries are u16be, so the length must be even.
+ // At most 256 entries must be present
+ return {}, false;
+ }
+
+ ok = true;
+ data := mem.slice_data_cast([]u16be, c.data);
+ i := 0;
+ for len(data) > 0 {
+ // HIST entries are u16be, we unpack them to machine format
+ res.entries[i] = u16(data[0]);
+ i += 1; data = data[1:];
+ }
+ res.used = u16(i);
+ return;
+}
+
+chrm :: proc(c: Chunk) -> (res: cHRM, ok: bool) {
+ ok = true;
+ if c.header.length != size_of(cHRM_Raw) {
+ return {}, false;
+ }
+ chrm := (^cHRM_Raw)(raw_data(c.data))^;
+
+ res.w.x = f32(chrm.w.x) / 100_000.0;
+ res.w.y = f32(chrm.w.y) / 100_000.0;
+ res.r.x = f32(chrm.r.x) / 100_000.0;
+ res.r.y = f32(chrm.r.y) / 100_000.0;
+ res.g.x = f32(chrm.g.x) / 100_000.0;
+ res.g.y = f32(chrm.g.y) / 100_000.0;
+ res.b.x = f32(chrm.b.x) / 100_000.0;
+ res.b.y = f32(chrm.b.y) / 100_000.0;
+ return;
+}
+
+exif :: proc(c: Chunk) -> (res: Exif, ok: bool) {
+
+ ok = true;
+
+ if len(c.data) < 4 {
+ ok = false; return;
+ }
+
+ if c.data[0] == 'M' && c.data[1] == 'M' {
+ res.byte_order = .big_endian;
+ if c.data[2] != 0 || c.data[3] != 42 {
+ ok = false; return;
+ }
+ } else if c.data[0] == 'I' && c.data[1] == 'I' {
+ res.byte_order = .little_endian;
+ if c.data[2] != 42 || c.data[3] != 0 {
+ ok = false; return;
+ }
+ } else {
+ ok = false; return;
+ }
+
+ res.data = c.data;
+ return;
+}
+
+/*
+ General helper functions
+*/
+
+compute_buffer_size :: image.compute_buffer_size;
+
+/*
+ PNG save helpers
+*/
+
+when false {
+
+ make_chunk :: proc(c: any, t: Chunk_Type) -> (res: Chunk) {
+
+ data: []u8;
+ if v, ok := c.([]u8); ok {
+ data = v;
+ } else {
+ data = mem.any_to_bytes(c);
+ }
+
+ res.header.length = u32be(len(data));
+ res.header.type = t;
+ res.data = data;
+
+ // CRC the type
+ crc := hash.crc32(mem.any_to_bytes(res.header.type));
+ // Extend the CRC with the data
+ res.crc = u32be(hash.crc32(data, crc));
+ return;
+ }
+
+ write_chunk :: proc(fd: os.Handle, chunk: Chunk) {
+ c := chunk;
+ // Write length + type
+ os.write_ptr(fd, &c.header, 8);
+ // Write data
+ os.write_ptr(fd, mem.raw_data(c.data), int(c.header.length));
+ // Write CRC32
+ os.write_ptr(fd, &c.crc, 4);
+ }
+
+ write_image_as_png :: proc(filename: string, image: Image) -> (err: Error) {
+ profiler.timed_proc();
+ using image;
+ using os;
+ flags: int = O_WRONLY|O_CREATE|O_TRUNC;
+
+ if len(image.pixels) == 0 || len(image.pixels) < image.width * image.height * int(image.channels) {
+ return E_PNG.Invalid_Image_Dimensions;
+ }
+
+ mode: int = 0;
+ when ODIN_OS == "linux" || ODIN_OS == "darwin" {
+ // NOTE(justasd): 644 (owner read, write; group read; others read)
+ mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
+ }
+
+ fd, fderr := open(filename, flags, mode);
+ if fderr != 0 {
+ return E_General.Cannot_Open_File;
+ }
+ defer close(fd);
+
+ magic := Signature;
+
+ write_ptr(fd, &magic, 8);
+
+ ihdr := IHDR{
+ width = u32be(width),
+ height = u32be(height),
+ bit_depth = depth,
+ compression_method = 0,
+ filter_method = 0,
+ interlace_method = .None,
+ };
+
+ switch channels {
+ case 1: ihdr.color_type = Color_Type{};
+ case 2: ihdr.color_type = Color_Type{.Alpha};
+ case 3: ihdr.color_type = Color_Type{.Color};
+ case 4: ihdr.color_type = Color_Type{.Color, .Alpha};
+ case:// Unhandled
+ return E_PNG.Unknown_Color_Type;
+ }
+ h := make_chunk(ihdr, .IHDR);
+ write_chunk(fd, h);
+
+ bytes_needed := width * height * int(channels) + height;
+ filter_bytes := mem.make_dynamic_array_len_cap([dynamic]u8, bytes_needed, bytes_needed, context.allocator);
+ defer delete(filter_bytes);
+
+ i := 0; j := 0;
+ // Add a filter byte 0 per pixel row
+ for y := 0; y < height; y += 1 {
+ filter_bytes[j] = 0; j += 1;
+ for x := 0; x < width; x += 1 {
+ for z := 0; z < channels; z += 1 {
+ filter_bytes[j+z] = image.pixels[i+z];
+ }
+ i += channels; j += channels;
+ }
+ }
+ assert(j == bytes_needed);
+
+ a: []u8 = filter_bytes[:];
+
+ out_buf: ^[dynamic]u8;
+ defer free(out_buf);
+
+ ctx := zlib.ZLIB_Context{
+ in_buf = &a,
+ out_buf = out_buf,
+ };
+ err = zlib.write_zlib_stream_from_memory(&ctx);
+
+ b: []u8;
+ if err == nil {
+ b = ctx.out_buf[:];
+ } else {
+ return err;
+ }
+
+ idat := make_chunk(b, .IDAT);
+
+ write_chunk(fd, idat);
+
+ iend := make_chunk([]u8{}, .IEND);
+ write_chunk(fd, iend);
+
+ return nil;
+ }
+}
diff --git a/core/image/png/png.odin b/core/image/png/png.odin
new file mode 100644
index 000000000..7762f0106
--- /dev/null
+++ b/core/image/png/png.odin
@@ -0,0 +1,1657 @@
+package png
+
+import "core:compress"
+import "core:compress/zlib"
+import "core:image"
+
+import "core:os"
+import "core:strings"
+import "core:hash"
+import "core:bytes"
+import "core:io"
+import "core:mem"
+import "core:intrinsics"
+
+Error :: compress.Error;
+E_General :: compress.General_Error;
+E_PNG :: image.Error;
+E_Deflate :: compress.Deflate_Error;
+
+Image :: image.Image;
+Options :: image.Options;
+
+Signature :: enum u64be {
+ // 0x89504e470d0a1a0a
+ PNG = 0x89 << 56 | 'P' << 48 | 'N' << 40 | 'G' << 32 | '\r' << 24 | '\n' << 16 | 0x1a << 8 | '\n',
+}
+
+Info :: struct {
+ header: IHDR,
+ chunks: [dynamic]Chunk,
+}
+
+Chunk_Header :: struct #packed {
+ length: u32be,
+ type: Chunk_Type,
+}
+
+Chunk :: struct #packed {
+ header: Chunk_Header,
+ data: []byte,
+ crc: u32be,
+}
+
+Chunk_Type :: enum u32be {
+ // IHDR must come first in a file
+ IHDR = 'I' << 24 | 'H' << 16 | 'D' << 8 | 'R',
+ // PLTE must precede the first IDAT chunk
+ PLTE = 'P' << 24 | 'L' << 16 | 'T' << 8 | 'E',
+ bKGD = 'b' << 24 | 'K' << 16 | 'G' << 8 | 'D',
+ tRNS = 't' << 24 | 'R' << 16 | 'N' << 8 | 'S',
+ IDAT = 'I' << 24 | 'D' << 16 | 'A' << 8 | 'T',
+
+ iTXt = 'i' << 24 | 'T' << 16 | 'X' << 8 | 't',
+ tEXt = 't' << 24 | 'E' << 16 | 'X' << 8 | 't',
+ zTXt = 'z' << 24 | 'T' << 16 | 'X' << 8 | 't',
+
+ iCCP = 'i' << 24 | 'C' << 16 | 'C' << 8 | 'P',
+ pHYs = 'p' << 24 | 'H' << 16 | 'Y' << 8 | 's',
+ gAMA = 'g' << 24 | 'A' << 16 | 'M' << 8 | 'A',
+ tIME = 't' << 24 | 'I' << 16 | 'M' << 8 | 'E',
+
+ sPLT = 's' << 24 | 'P' << 16 | 'L' << 8 | 'T',
+ sRGB = 's' << 24 | 'R' << 16 | 'G' << 8 | 'B',
+ hIST = 'h' << 24 | 'I' << 16 | 'S' << 8 | 'T',
+ cHRM = 'c' << 24 | 'H' << 16 | 'R' << 8 | 'M',
+ sBIT = 's' << 24 | 'B' << 16 | 'I' << 8 | 'T',
+
+ /*
+ eXIf tags are not part of the core spec, but have been ratified
+ in v1.5.0 of the PNG Ext register.
+
+ We will provide unprocessed chunks to the caller if `.return_metadata` is set.
+ Applications are free to implement an Exif decoder.
+ */
+ eXIf = 'e' << 24 | 'X' << 16 | 'I' << 8 | 'f',
+
+ // PNG files must end with IEND
+ IEND = 'I' << 24 | 'E' << 16 | 'N' << 8 | 'D',
+
+ /*
+ XCode sometimes produces "PNG" files that don't adhere to the PNG spec.
+ We recognize them only in order to avoid doing further work on them.
+
+ Some tools like PNG Defry may be able to repair them, but we're not
+ going to reward Apple for producing proprietary broken files purporting
+ to be PNGs by supporting them.
+
+ */
+ iDOT = 'i' << 24 | 'D' << 16 | 'O' << 8 | 'T',
+ CbGI = 'C' << 24 | 'b' << 16 | 'H' << 8 | 'I',
+}
+
+IHDR :: struct #packed {
+ width: u32be,
+ height: u32be,
+ bit_depth: u8,
+ color_type: Color_Type,
+ compression_method: u8,
+ filter_method: u8,
+ interlace_method: Interlace_Method,
+}
+IHDR_SIZE :: size_of(IHDR);
+#assert (IHDR_SIZE == 13);
+
+Color_Value :: enum u8 {
+ Paletted = 0, // 1 << 0 = 1
+ Color = 1, // 1 << 1 = 2
+ Alpha = 2, // 1 << 2 = 4
+}
+Color_Type :: distinct bit_set[Color_Value; u8];
+
+Interlace_Method :: enum u8 {
+ None = 0,
+ Adam7 = 1,
+}
+
+Row_Filter :: enum u8 {
+ None = 0,
+ Sub = 1,
+ Up = 2,
+ Average = 3,
+ Paeth = 4,
+};
+
+PLTE_Entry :: [3]u8;
+
+PLTE :: struct #packed {
+ entries: [256]PLTE_Entry,
+ used: u16,
+}
+
+hIST :: struct #packed {
+ entries: [256]u16,
+ used: u16,
+}
+
+sPLT :: struct #packed {
+ name: string,
+ depth: u8,
+ entries: union {
+ [][4]u8,
+ [][4]u16,
+ },
+ used: u16,
+}
+
+// Other chunks
+tIME :: struct #packed {
+ year: u16be,
+ month: u8,
+ day: u8,
+ hour: u8,
+ minute: u8,
+ second: u8,
+};
+#assert(size_of(tIME) == 7);
+
+CIE_1931_Raw :: struct #packed {
+ x: u32be,
+ y: u32be,
+}
+
+CIE_1931 :: struct #packed {
+ x: f32,
+ y: f32,
+}
+
+cHRM_Raw :: struct #packed {
+ w: CIE_1931_Raw,
+ r: CIE_1931_Raw,
+ g: CIE_1931_Raw,
+ b: CIE_1931_Raw,
+}
+#assert(size_of(cHRM_Raw) == 32);
+
+cHRM :: struct #packed {
+ w: CIE_1931,
+ r: CIE_1931,
+ g: CIE_1931,
+ b: CIE_1931,
+}
+#assert(size_of(cHRM) == 32);
+
+gAMA :: struct {
+ gamma_100k: u32be, // Gamma * 100k
+};
+#assert(size_of(gAMA) == 4);
+
+pHYs :: struct #packed {
+ ppu_x: u32be,
+ ppu_y: u32be,
+ unit: pHYs_Unit,
+};
+#assert(size_of(pHYs) == 9);
+
+pHYs_Unit :: enum u8 {
+ Unknown = 0,
+ Meter = 1,
+};
+
+Text :: struct {
+ keyword: string,
+ keyword_localized: string,
+ language: string,
+ text: string,
+};
+
+Exif :: struct {
+ byte_order: enum {
+ little_endian,
+ big_endian,
+ },
+ data: []u8,
+}
+
+iCCP :: struct {
+ name: string,
+ profile: []u8,
+}
+
+sRGB_Rendering_Intent :: enum u8 {
+ Perceptual = 0,
+ Relative_colorimetric = 1,
+ Saturation = 2,
+ Absolute_colorimetric = 3,
+}
+
+sRGB :: struct #packed {
+ intent: sRGB_Rendering_Intent,
+}
+
+ADAM7_X_ORIG := []int{ 0,4,0,2,0,1,0 };
+ADAM7_Y_ORIG := []int{ 0,0,4,0,2,0,1 };
+ADAM7_X_SPACING := []int{ 8,8,4,4,2,2,1 };
+ADAM7_Y_SPACING := []int{ 8,8,8,4,4,2,2 };
+
+// Implementation starts here
+
+read_chunk :: proc(ctx: ^compress.Context) -> (Chunk, Error) {
+
+ chunk := Chunk{};
+
+ ch, e := compress.read_data(ctx, Chunk_Header);
+ if e != .None {
+ return {}, E_General.Stream_Too_Short;
+ }
+ chunk.header = ch;
+
+ data := make([]u8, ch.length, context.temp_allocator);
+ _, e2 := ctx.input->impl_read(data);
+ if e2 != .None {
+ return {}, E_General.Stream_Too_Short;
+ }
+ chunk.data = data;
+
+ // Compute CRC over chunk type + data
+ type := (^[4]byte)(&ch.type)^;
+ computed_crc := hash.crc32(type[:]);
+ computed_crc = hash.crc32(data, computed_crc);
+
+ crc, e3 := compress.read_data(ctx, u32be);
+ if e3 != .None {
+ return {}, E_General.Stream_Too_Short;
+ }
+ chunk.crc = crc;
+
+ if chunk.crc != u32be(computed_crc) {
+ return {}, E_General.Checksum_Failed;
+ }
+ return chunk, nil;
+}
+
+read_header :: proc(ctx: ^compress.Context) -> (IHDR, Error) {
+
+ c, e := read_chunk(ctx);
+ if e != nil {
+ return {}, e;
+ }
+
+ header := (^IHDR)(raw_data(c.data))^;
+ // Validate IHDR
+ using header;
+ if width == 0 || height == 0 {
+ return {}, E_PNG.Invalid_Image_Dimensions;
+ }
+
+ if compression_method != 0 {
+ return {}, E_General.Unknown_Compression_Method;
+ }
+
+ if filter_method != 0 {
+ return {}, E_PNG.Unknown_Filter_Method;
+ }
+
+ if interlace_method != .None && interlace_method != .Adam7 {
+ return {}, E_PNG.Unknown_Interlace_Method;
+
+ }
+
+ switch (transmute(u8)color_type) {
+ case 0:
+ /*
+ Grayscale.
+ Allowed bit depths: 1, 2, 4, 8 and 16.
+ */
+ allowed := false;
+ for i in ([]u8{1, 2, 4, 8, 16}) {
+ if bit_depth == i {
+ allowed = true;
+ break;
+ }
+ }
+ if !allowed {
+ return {}, E_PNG.Invalid_Color_Bit_Depth_Combo;
+ }
+ case 2, 4, 6:
+ /*
+ RGB, Grayscale+Alpha, RGBA.
+ Allowed bit depths: 8 and 16
+ */
+ if bit_depth != 8 && bit_depth != 16 {
+ return {}, E_PNG.Invalid_Color_Bit_Depth_Combo;
+ }
+ case 3:
+ /*
+ Paletted. PLTE chunk must appear.
+ Allowed bit depths: 1, 2, 4 and 8.
+ */
+ allowed := false;
+ for i in ([]u8{1, 2, 4, 8}) {
+ if bit_depth == i {
+ allowed = true;
+ break;
+ }
+ }
+ if !allowed {
+ return {}, E_PNG.Invalid_Color_Bit_Depth_Combo;
+ }
+
+ case:
+ return {}, E_PNG.Unknown_Color_Type;
+ }
+
+ return header, nil;
+}
+
+chunk_type_to_name :: proc(type: ^Chunk_Type) -> string {
+ t := transmute(^u8)type;
+ return strings.string_from_ptr(t, 4);
+}
+
+load_from_slice :: proc(slice: []u8, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+ r := bytes.Reader{};
+ bytes.reader_init(&r, slice);
+ stream := bytes.reader_to_stream(&r);
+
+ /*
+ TODO: Add a flag to tell the PNG loader that the stream is backed by a slice.
+ This way the stream reader could avoid the copy into the temp memory returned by it,
+ and instead return a slice into the original memory that's already owned by the caller.
+ */
+ img, err = load_from_stream(stream, options, allocator);
+
+ return img, err;
+}
+
+load_from_file :: proc(filename: string, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+ data, ok := os.read_entire_file(filename, allocator);
+ defer delete(data);
+
+ if ok {
+ img, err = load_from_slice(data, options, allocator);
+ return;
+ } else {
+ img = new(Image);
+ return img, E_General.File_Not_Found;
+ }
+}
+
+load_from_stream :: proc(stream: io.Stream, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+ options := options;
+ if .info in options {
+ options |= {.return_metadata, .do_not_decompress_image};
+ options -= {.info};
+ }
+
+ if .alpha_drop_if_present in options && .alpha_add_if_missing in options {
+ return {}, E_General.Incompatible_Options;
+ }
+
+ if .do_not_expand_channels in options {
+ options |= {.do_not_expand_grayscale, .do_not_expand_indexed};
+ }
+
+ if img == nil {
+ img = new(Image);
+ }
+
+ img.sidecar = nil;
+
+ ctx := compress.Context{
+ input = stream,
+ };
+
+ signature, io_error := compress.read_data(&ctx, Signature);
+ if io_error != .None || signature != .PNG {
+ return img, E_PNG.Invalid_PNG_Signature;
+ }
+
+ idat: []u8;
+ idat_b: bytes.Buffer;
+ idat_length := u32be(0);
+ defer bytes.buffer_destroy(&idat_b);
+
+ c: Chunk;
+ ch: Chunk_Header;
+ e: io.Error;
+
+ header: IHDR;
+ info: Info;
+ info.chunks.allocator = context.temp_allocator;
+
+ // State to ensure correct chunk ordering.
+ seen_ihdr := false; first := true;
+ seen_plte := false;
+ seen_bkgd := false;
+ seen_trns := false;
+ seen_idat := false;
+ seen_iend := false;
+
+ _plte := PLTE{};
+ trns := Chunk{};
+
+ final_image_channels := 0;
+
+ read_error: io.Error;
+ // 12 bytes is the size of a chunk with a zero-length payload.
+ for (read_error == .None && !seen_iend) {
+ // Peek at next chunk's length and type.
+ // TODO: Some streams may not provide seek/read_at
+
+ ch, e = compress.peek_data(&ctx, Chunk_Header);
+ if e != .None {
+ return img, E_General.Stream_Too_Short;
+ }
+ // name := chunk_type_to_name(&ch.type); // Only used for debug prints during development.
+
+ #partial switch(ch.type) {
+ case .IHDR:
+ if seen_ihdr || !first {
+ return {}, E_PNG.IHDR_Not_First_Chunk;
+ }
+ seen_ihdr = true;
+
+ header, err = read_header(&ctx);
+ if err != nil {
+ return img, err;
+ }
+
+ if .Paletted in header.color_type {
+ // Color type 3
+ img.channels = 1;
+ final_image_channels = 3;
+ img.depth = 8;
+ } else if .Color in header.color_type {
+ // Color image without a palette
+ img.channels = 3;
+ final_image_channels = 3;
+ img.depth = header.bit_depth;
+ } else {
+ // Grayscale
+ img.channels = 1;
+ final_image_channels = 1;
+ img.depth = header.bit_depth;
+ }
+
+ if .Alpha in header.color_type {
+ img.channels += 1;
+ final_image_channels += 1;
+ }
+
+ if img.channels == 0 || img.depth == 0 {
+ return {}, E_PNG.IHDR_Corrupt;
+ }
+
+ img.width = int(header.width);
+ img.height = int(header.height);
+
+ using header;
+ h := IHDR{
+ width = width,
+ height = height,
+ bit_depth = bit_depth,
+ color_type = color_type,
+ compression_method = compression_method,
+ filter_method = filter_method,
+ interlace_method = interlace_method,
+ };
+ info.header = h;
+ case .PLTE:
+ seen_plte = true;
+ // PLTE must appear before IDAT and can't appear for color types 0, 4.
+ ct := transmute(u8)info.header.color_type;
+ if seen_idat || ct == 0 || ct == 4 {
+ return img, E_PNG.PLTE_Encountered_Unexpectedly;
+ }
+
+ c, err = read_chunk(&ctx);
+ if err != nil {
+ return img, err;
+ }
+
+ if c.header.length % 3 != 0 || c.header.length > 768 {
+ return img, E_PNG.PLTE_Invalid_Length;
+ }
+ plte_ok: bool;
+ _plte, plte_ok = plte(c);
+ if !plte_ok {
+ return img, E_PNG.PLTE_Invalid_Length;
+ }
+
+ if .return_metadata in options {
+ append(&info.chunks, c);
+ }
+ case .IDAT:
+ // If we only want image metadata and don't want the pixel data, we can early out.
+ if .return_metadata not_in options && .do_not_decompress_image in options {
+ img.channels = final_image_channels;
+ img.sidecar = info;
+ return img, nil;
+ }
+ // There must be at least 1 IDAT, contiguous if more.
+ if seen_idat {
+ return img, E_PNG.IDAT_Must_Be_Contiguous;
+ }
+
+ if idat_length > 0 {
+ return img, E_PNG.IDAT_Must_Be_Contiguous;
+ }
+
+ next := ch.type;
+ for next == .IDAT {
+ c, err = read_chunk(&ctx);
+ if err != nil {
+ return img, err;
+ }
+
+ bytes.buffer_write(&idat_b, c.data);
+ idat_length += c.header.length;
+
+ ch, e = compress.peek_data(&ctx, Chunk_Header);
+ if e != .None {
+ return img, E_General.Stream_Too_Short;
+ }
+ next = ch.type;
+ }
+ idat = bytes.buffer_to_bytes(&idat_b);
+ if int(idat_length) != len(idat) {
+ return {}, E_PNG.IDAT_Corrupt;
+ }
+ seen_idat = true;
+ case .IEND:
+ c, err = read_chunk(&ctx);
+ if err != nil {
+ return img, err;
+ }
+ seen_iend = true;
+ case .bKGD:
+
+ // TODO: Make sure that 16-bit bKGD + tRNS chunks return u16 instead of u16be
+
+ c, err = read_chunk(&ctx);
+ if err != nil {
+ return img, err;
+ }
+ seen_bkgd = true;
+ if .return_metadata in options {
+ append(&info.chunks, c);
+ }
+
+ ct := transmute(u8)info.header.color_type;
+ switch(ct) {
+ case 3: // Indexed color
+ if c.header.length != 1 {
+ return {}, E_PNG.BKGD_Invalid_Length;
+ }
+ col := _plte.entries[c.data[0]];
+ img.background = [3]u16{
+ u16(col[0]) << 8 | u16(col[0]),
+ u16(col[1]) << 8 | u16(col[1]),
+ u16(col[2]) << 8 | u16(col[2]),
+ };
+ case 0, 4: // Grayscale, with and without Alpha
+ if c.header.length != 2 {
+ return {}, E_PNG.BKGD_Invalid_Length;
+ }
+ col := u16(mem.slice_data_cast([]u16be, c.data[:])[0]);
+ img.background = [3]u16{col, col, col};
+ case 2, 6: // Color, with and without Alpha
+ if c.header.length != 6 {
+ return {}, E_PNG.BKGD_Invalid_Length;
+ }
+ col := mem.slice_data_cast([]u16be, c.data[:]);
+ img.background = [3]u16{u16(col[0]), u16(col[1]), u16(col[2])};
+ }
+ case .tRNS:
+ c, err = read_chunk(&ctx);
+ if err != nil {
+ return img, err;
+ }
+
+ if .Alpha in info.header.color_type {
+ return img, E_PNG.TRNS_Encountered_Unexpectedly;
+ }
+
+ if .return_metadata in options {
+ append(&info.chunks, c);
+ }
+
+ /*
+ This makes the image one with transparency, so set it to +1 here,
+ even if we need we leave img.channels alone for the defilterer's
+ sake. If we early because the user just cares about metadata,
+ we'll set it to 'final_image_channels'.
+ */
+
+ final_image_channels += 1;
+
+ seen_trns = true;
+ if info.header.bit_depth < 8 && .Paletted not_in info.header.color_type {
+ // Rescale tRNS data so key matches intensity
+ dsc := depth_scale_table;
+ scale := dsc[info.header.bit_depth];
+ if scale != 1 {
+ key := mem.slice_data_cast([]u16be, c.data)[0] * u16be(scale);
+ c.data = []u8{0, u8(key & 255)};
+ }
+ }
+ trns = c;
+ case .iDOT, .CbGI:
+ /*
+ iPhone PNG bastardization that doesn't adhere to spec with broken IDAT chunk.
+ We're not going to add support for it. If you have the misfortunte of coming
+ across one of these files, use a utility to defry it.s
+ */
+ return img, E_PNG.PNG_Does_Not_Adhere_to_Spec;
+ case:
+ // Unhandled type
+ c, err = read_chunk(&ctx);
+ if err != nil {
+ return img, err;
+ }
+ if .return_metadata in options {
+ // NOTE: Chunk cata is currently allocated on the temp allocator.
+ append(&info.chunks, c);
+ }
+
+ first = false;
+ }
+ }
+
+ if .return_header in options || .return_metadata in options {
+ img.sidecar = info;
+ }
+ if .do_not_decompress_image in options {
+ img.channels = final_image_channels;
+ return img, nil;
+ }
+
+ if !seen_idat {
+ return img, E_PNG.IDAT_Missing;
+ }
+
+ buf: bytes.Buffer;
+ zlib_error := zlib.inflate(idat, &buf);
+ defer bytes.buffer_destroy(&buf);
+
+ if zlib_error != nil {
+ return {}, zlib_error;
+ } else {
+ /*
+ Let's calcalate the expected size of the IDAT based on its dimensions,
+ and whether or not it's interlaced
+ */
+ expected_size: int;
+ buf_len := len(buf.buf);
+
+ if header.interlace_method != .Adam7 {
+ expected_size = compute_buffer_size(int(header.width), int(header.height), int(img.channels), int(header.bit_depth), 1);
+ } else {
+ /*
+ Because Adam7 divides the image up into sub-images, and each scanline must start
+ with a filter byte, Adam7 interlaced images can have a larger raw size.
+ */
+ for p := 0; p < 7; p += 1 {
+ x := (int(header.width) - ADAM7_X_ORIG[p] + ADAM7_X_SPACING[p] - 1) / ADAM7_X_SPACING[p];
+ y := (int(header.height) - ADAM7_Y_ORIG[p] + ADAM7_Y_SPACING[p] - 1) / ADAM7_Y_SPACING[p];
+ if (x > 0 && y > 0) {
+ expected_size += compute_buffer_size(int(x), int(y), int(img.channels), int(header.bit_depth), 1);
+ }
+ }
+ }
+
+ if expected_size != buf_len {
+ return {}, E_PNG.IDAT_Corrupt;
+ }
+ }
+
+ /*
+ Defilter just cares about the raw number of image channels present.
+ So, we'll save the old value of img.channels we return to the user
+ as metadata, and set it instead to the raw number of channels.
+ */
+ defilter_error := defilter(img, &buf, &header, options);
+ if defilter_error != nil {
+ bytes.buffer_destroy(&img.pixels);
+ return {}, defilter_error;
+ }
+
+ /*
+ Now we'll handle the relocoring of paletted images, handling of tRNS chunks,
+ and we'll expand grayscale images to RGB(A).
+
+ For the sake of convenience we return only RGB(A) images. In the future we
+ may supply an option to return Gray/Gray+Alpha as-is, in which case RGB(A)
+ will become the default.
+ */
+
+ if .Paletted in header.color_type && .do_not_expand_indexed in options {
+ return img, nil;
+ }
+ if .Color not_in header.color_type && .do_not_expand_grayscale in options {
+ return img, nil;
+ }
+
+
+ raw_image_channels := img.channels;
+ out_image_channels := 3;
+
+ /*
+ To give ourselves less options to test, we'll knock out
+ `.blend_background` and `seen_bkgd` if we haven't seen both.
+ */
+ if !(seen_bkgd && .blend_background in options) {
+ options -= {.blend_background};
+ seen_bkgd = false;
+ }
+
+ if seen_trns || .Alpha in info.header.color_type || .alpha_add_if_missing in options {
+ out_image_channels = 4;
+ }
+
+ if .alpha_drop_if_present in options {
+ out_image_channels = 3;
+ }
+
+ if seen_bkgd && .blend_background in options && .alpha_add_if_missing not_in options {
+ out_image_channels = 3;
+ }
+
+ add_alpha := (seen_trns && .alpha_drop_if_present not_in options) || (.alpha_add_if_missing in options);
+ premultiply := .alpha_premultiply in options || seen_bkgd;
+
+ img.channels = out_image_channels;
+
+ if .Paletted in header.color_type {
+ temp := img.pixels;
+ defer bytes.buffer_destroy(&temp);
+
+ // We need to create a new image buffer
+ dest_raw_size := compute_buffer_size(int(header.width), int(header.height), out_image_channels, 8);
+ t := bytes.Buffer{};
+ resize(&t.buf, dest_raw_size);
+
+ i := 0; j := 0;
+
+ // If we don't have transparency or drop it without applying it, we can do this:
+ if (!seen_trns || (seen_trns && .alpha_drop_if_present in options && .alpha_premultiply not_in options)) && .alpha_add_if_missing not_in options {
+ for h := 0; h < int(img.height); h += 1 {
+ for w := 0; w < int(img.width); w += 1 {
+ c := _plte.entries[temp.buf[i]];
+ t.buf[j ] = c.r;
+ t.buf[j+1] = c.g;
+ t.buf[j+2] = c.b;
+ i += 1; j += 3;
+ }
+ }
+ } else if add_alpha || .alpha_drop_if_present in options {
+ bg := [3]f32{0, 0, 0};
+ if premultiply && seen_bkgd {
+ c16 := img.background.([3]u16);
+ bg = [3]f32{f32(c16.r), f32(c16.g), f32(c16.b)};
+ }
+
+ no_alpha := (.alpha_drop_if_present in options || premultiply) && .alpha_add_if_missing not_in options;
+ blend_background := seen_bkgd && .blend_background in options;
+
+ for h := 0; h < int(img.height); h += 1 {
+ for w := 0; w < int(img.width); w += 1 {
+ index := temp.buf[i];
+
+ c := _plte.entries[index];
+ a := int(index) < len(trns.data) ? trns.data[index] : 255;
+ alpha := f32(a) / 255.0;
+
+ if blend_background {
+ c.r = u8((1.0 - alpha) * bg[0] + f32(c.r) * alpha);
+ c.g = u8((1.0 - alpha) * bg[1] + f32(c.g) * alpha);
+ c.b = u8((1.0 - alpha) * bg[2] + f32(c.b) * alpha);
+ a = 255;
+ } else if premultiply {
+ c.r = u8(f32(c.r) * alpha);
+ c.g = u8(f32(c.g) * alpha);
+ c.b = u8(f32(c.b) * alpha);
+ }
+
+ t.buf[j ] = c.r;
+ t.buf[j+1] = c.g;
+ t.buf[j+2] = c.b;
+ i += 1;
+
+ if no_alpha {
+ j += 3;
+ } else {
+ t.buf[j+3] = u8(a);
+ j += 4;
+ }
+ }
+ }
+ } else {
+ unreachable();
+ }
+
+ img.pixels = t;
+
+ } else if img.depth == 16 {
+ // Check if we need to do something.
+ if raw_image_channels == out_image_channels {
+ // If we have 3 in and 3 out, or 4 in and 4 out without premultiplication...
+ if raw_image_channels == 4 && .alpha_premultiply not_in options && !seen_bkgd {
+ // Then we're done.
+ return img, nil;
+ }
+ }
+
+ temp := img.pixels;
+ defer bytes.buffer_destroy(&temp);
+
+ // We need to create a new image buffer
+ dest_raw_size := compute_buffer_size(int(header.width), int(header.height), out_image_channels, 16);
+ t := bytes.Buffer{};
+ resize(&t.buf, dest_raw_size);
+
+ p16 := mem.slice_data_cast([]u16, temp.buf[:]);
+ o16 := mem.slice_data_cast([]u16, t.buf[:]);
+
+ switch (raw_image_channels) {
+ case 1:
+ // Gray without Alpha. Might have tRNS alpha.
+ key := u16(0);
+ if seen_trns {
+ key = mem.slice_data_cast([]u16, trns.data)[0];
+ }
+
+ for len(p16) > 0 {
+ r := p16[0];
+
+ alpha := u16(1); // Default to full opaque
+
+ if seen_trns {
+ if r == key {
+ if seen_bkgd {
+ c := img.background.([3]u16);
+ r = c[0];
+ } else {
+ alpha = 0; // Keyed transparency
+ }
+ }
+ }
+
+ if premultiply {
+ o16[0] = r * alpha;
+ o16[1] = r * alpha;
+ o16[2] = r * alpha;
+ } else {
+ o16[0] = r;
+ o16[1] = r;
+ o16[2] = r;
+ }
+
+ if out_image_channels == 4 {
+ o16[3] = alpha * 65535;
+ }
+
+ p16 = p16[1:];
+ o16 = o16[out_image_channels:];
+ }
+ case 2:
+ // Gray with alpha, we shouldn't have a tRNS chunk.
+ bg := f32(0.0);
+ if seen_bkgd {
+ bg = f32(img.background.([3]u16)[0]);
+ }
+
+ for len(p16) > 0 {
+ r := p16[0];
+ if seen_bkgd {
+ alpha := f32(p16[1]) / f32(65535);
+ c := u16(f32(r) * alpha + (1.0 - alpha) * bg);
+ o16[0] = c;
+ o16[1] = c;
+ o16[2] = c;
+ /*
+ After BG blending, the pixel is now fully opaque.
+ Update the value we'll write to the output alpha.
+ */
+ p16[1] = 65535;
+ } else if premultiply {
+ alpha := p16[1];
+ c := u16(f32(r) * f32(alpha) / f32(65535));
+ o16[0] = c;
+ o16[1] = c;
+ o16[2] = c;
+ } else {
+ o16[0] = r;
+ o16[1] = r;
+ o16[2] = r;
+ }
+
+ if out_image_channels == 4 {
+ o16[3] = p16[1];
+ }
+
+ p16 = p16[2:];
+ o16 = o16[out_image_channels:];
+ }
+ case 3:
+ /*
+ Color without Alpha.
+ We may still have a tRNS chunk or `.alpha_add_if_missing`.
+ */
+
+ key: []u16;
+ if seen_trns {
+ key = mem.slice_data_cast([]u16, trns.data);
+ }
+
+ for len(p16) > 0 {
+ r := p16[0];
+ g := p16[1];
+ b := p16[2];
+
+ alpha := u16(1); // Default to full opaque
+
+ if seen_trns {
+ if r == key[0] && g == key[1] && b == key[2] {
+ if seen_bkgd {
+ c := img.background.([3]u16);
+ r = c[0];
+ g = c[1];
+ b = c[2];
+ } else {
+ alpha = 0; // Keyed transparency
+ }
+ }
+ }
+
+ if premultiply {
+ o16[0] = r * alpha;
+ o16[1] = g * alpha;
+ o16[2] = b * alpha;
+ } else {
+ o16[0] = r;
+ o16[1] = g;
+ o16[2] = b;
+ }
+
+ if out_image_channels == 4 {
+ o16[3] = alpha * 65535;
+ }
+
+ p16 = p16[3:];
+ o16 = o16[out_image_channels:];
+ }
+ case 4:
+ // Color with Alpha, can't have tRNS.
+ for len(p16) > 0 {
+ r := p16[0];
+ g := p16[1];
+ b := p16[2];
+ a := p16[3];
+
+ if seen_bkgd {
+ alpha := f32(a) / 65535.0;
+ c := img.background.([3]u16);
+ rb := f32(c[0]) * (1.0 - alpha);
+ gb := f32(c[1]) * (1.0 - alpha);
+ bb := f32(c[2]) * (1.0 - alpha);
+
+ o16[0] = u16(f32(r) * alpha + rb);
+ o16[1] = u16(f32(g) * alpha + gb);
+ o16[2] = u16(f32(b) * alpha + bb);
+ /*
+ After BG blending, the pixel is now fully opaque.
+ Update the value we'll write to the output alpha.
+ */
+ a = 65535;
+ } else if premultiply {
+ alpha := f32(a) / 65535.0;
+ o16[0] = u16(f32(r) * alpha);
+ o16[1] = u16(f32(g) * alpha);
+ o16[2] = u16(f32(b) * alpha);
+ } else {
+ o16[0] = r;
+ o16[1] = g;
+ o16[2] = b;
+ }
+
+ if out_image_channels == 4 {
+ o16[3] = a;
+ }
+
+ p16 = p16[4:];
+ o16 = o16[out_image_channels:];
+ }
+ case:
+ unreachable("We should never seen # channels other than 1-4 inclusive.");
+ }
+
+ img.pixels = t;
+ img.channels = out_image_channels;
+
+ } else if img.depth == 8 {
+ // Check if we need to do something.
+ if raw_image_channels == out_image_channels {
+ // If we have 3 in and 3 out, or 4 in and 4 out without premultiplication...
+ if !premultiply {
+ // Then we're done.
+ return img, nil;
+ }
+ }
+
+ temp := img.pixels;
+ defer bytes.buffer_destroy(&temp);
+
+ // We need to create a new image buffer
+ dest_raw_size := compute_buffer_size(int(header.width), int(header.height), out_image_channels, 8);
+ t := bytes.Buffer{};
+ resize(&t.buf, dest_raw_size);
+
+ p := mem.slice_data_cast([]u8, temp.buf[:]);
+ o := mem.slice_data_cast([]u8, t.buf[:]);
+
+ switch (raw_image_channels) {
+ case 1:
+ // Gray without Alpha. Might have tRNS alpha.
+ key := u8(0);
+ if seen_trns {
+ key = u8(mem.slice_data_cast([]u16be, trns.data)[0]);
+ }
+
+ for len(p) > 0 {
+ r := p[0];
+ alpha := u8(1);
+
+ if seen_trns {
+ if r == key {
+ if seen_bkgd {
+ bc := img.background.([3]u16);
+ r = u8(bc[0]);
+ } else {
+ alpha = 0; // Keyed transparency
+ }
+ }
+ if premultiply {
+ r *= alpha;
+ }
+ }
+ o[0] = r;
+ o[1] = r;
+ o[2] = r;
+
+ if out_image_channels == 4 {
+ o[3] = alpha * 255;
+ }
+
+ p = p[1:];
+ o = o[out_image_channels:];
+ }
+ case 2:
+ // Gray with alpha, we shouldn't have a tRNS chunk.
+ bg := f32(0.0);
+ if seen_bkgd {
+ bg = f32(img.background.([3]u16)[0]);
+ }
+
+ for len(p) > 0 {
+ r := p[0];
+ if seen_bkgd {
+ alpha := f32(p[1]) / f32(255);
+ c := u8(f32(r) * alpha + (1.0 - alpha) * bg);
+ o[0] = c;
+ o[1] = c;
+ o[2] = c;
+ /*
+ After BG blending, the pixel is now fully opaque.
+ Update the value we'll write to the output alpha.
+ */
+ p[1] = 255;
+ } else if .alpha_premultiply in options {
+ alpha := p[1];
+ c := u8(f32(r) * f32(alpha) / f32(255));
+ o[0] = c;
+ o[1] = c;
+ o[2] = c;
+ } else {
+ o[0] = r;
+ o[1] = r;
+ o[2] = r;
+ }
+
+ if out_image_channels == 4 {
+ o[3] = p[1];
+ }
+
+ p = p[2:];
+ o = o[out_image_channels:];
+ }
+ case 3:
+ // Color without Alpha. We may still have a tRNS chunk
+ key: []u8;
+ if seen_trns {
+ /*
+ For 8-bit images, the tRNS chunk still contains a triple in u16be.
+ We use only the low byte in this case.
+ */
+ key = []u8{trns.data[1], trns.data[3], trns.data[5]};
+ }
+
+ for len(p) > 0 {
+ r := p[0];
+ g := p[1];
+ b := p[2];
+
+ alpha := u8(1); // Default to full opaque
+
+ if seen_trns {
+ if r == key[0] && g == key[1] && b == key[2] {
+ if seen_bkgd {
+ c := img.background.([3]u16);
+ r = u8(c[0]);
+ g = u8(c[1]);
+ b = u8(c[2]);
+ } else {
+ alpha = 0; // Keyed transparency
+ }
+ }
+
+ if premultiply {
+ r *= alpha;
+ g *= alpha;
+ b *= alpha;
+ }
+ }
+
+ o[0] = r;
+ o[1] = g;
+ o[2] = b;
+
+ if out_image_channels == 4 {
+ o[3] = alpha * 255;
+ }
+
+ p = p[3:];
+ o = o[out_image_channels:];
+ }
+ case 4:
+ // Color with Alpha, can't have tRNS.
+ for len(p) > 0 {
+ r := p[0];
+ g := p[1];
+ b := p[2];
+ a := p[3];
+ if seen_bkgd {
+ alpha := f32(a) / 255.0;
+ c := img.background.([3]u16);
+ rb := f32(c[0]) * (1.0 - alpha);
+ gb := f32(c[1]) * (1.0 - alpha);
+ bb := f32(c[2]) * (1.0 - alpha);
+
+ o[0] = u8(f32(r) * alpha + rb);
+ o[1] = u8(f32(g) * alpha + gb);
+ o[2] = u8(f32(b) * alpha + bb);
+ /*
+ After BG blending, the pixel is now fully opaque.
+ Update the value we'll write to the output alpha.
+ */
+ a = 255;
+ } else if premultiply {
+ alpha := f32(a) / 255.0;
+ o[0] = u8(f32(r) * alpha);
+ o[1] = u8(f32(g) * alpha);
+ o[2] = u8(f32(b) * alpha);
+ } else {
+ o[0] = r;
+ o[1] = g;
+ o[2] = b;
+ }
+
+ if out_image_channels == 4 {
+ o[3] = a;
+ }
+
+ p = p[4:];
+ o = o[out_image_channels:];
+ }
+ case:
+ unreachable("We should never seen # channels other than 1-4 inclusive.");
+ }
+
+ img.pixels = t;
+ img.channels = out_image_channels;
+
+ } else {
+ /*
+ This may change if we ever don't expand 1, 2 and 4 bit images. But, those raw
+ returns will likely bypass this processing pipeline.
+ */
+ unreachable("We should never see bit depths other than 8, 16 and 'Paletted' here.");
+ }
+
+ return img, nil;
+}
+
+
+filter_paeth :: #force_inline proc(left, up, up_left: u8) -> u8 {
+ aa, bb, cc := i16(left), i16(up), i16(up_left);
+ p := aa + bb - cc;
+ pa := abs(p - aa);
+ pb := abs(p - bb);
+ pc := abs(p - cc);
+ if pa <= pb && pa <= pc {
+ return left;
+ }
+ if pb <= pc {
+ return up;
+ }
+ return up_left;
+}
+
+Filter_Params :: struct #packed {
+ src: []u8,
+ dest: []u8,
+ width: int,
+ height: int,
+ depth: int,
+ channels: int,
+ rescale: bool,
+}
+
+depth_scale_table :: []u8{0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01};
+
+// @(optimization_mode="speed")
+defilter_8 :: proc(params: ^Filter_Params) -> (ok: bool) {
+
+ using params;
+ row_stride := channels * width;
+
+ // TODO: See about doing a Duff's #unroll where practicable
+
+ // Apron so we don't need to special case first rows.
+ up := make([]u8, row_stride, context.temp_allocator);
+ ok = true;
+
+ for _ in 0..<height {
+ nk := row_stride - channels;
+
+ filter := Row_Filter(src[0]); src = src[1:];
+ switch(filter) {
+ case .None:
+ copy(dest, src[:row_stride]);
+ case .Sub:
+ for i := 0; i < channels; i += 1 {
+ dest[i] = src[i];
+ }
+ for k := 0; k < nk; k += 1 {
+ dest[channels+k] = (src[channels+k] + dest[k]) & 255;
+ }
+ case .Up:
+ for k := 0; k < row_stride; k += 1 {
+ dest[k] = (src[k] + up[k]) & 255;
+ }
+ case .Average:
+ for i := 0; i < channels; i += 1 {
+ avg := up[i] >> 1;
+ dest[i] = (src[i] + avg) & 255;
+ }
+ for k := 0; k < nk; k += 1 {
+ avg := u8((u16(up[channels+k]) + u16(dest[k])) >> 1);
+ dest[channels+k] = (src[channels+k] + avg) & 255;
+ }
+ case .Paeth:
+ for i := 0; i < channels; i += 1 {
+ paeth := filter_paeth(0, up[i], 0);
+ dest[i] = (src[i] + paeth) & 255;
+ }
+ for k := 0; k < nk; k += 1 {
+ paeth := filter_paeth(dest[k], up[channels+k], up[k]);
+ dest[channels+k] = (src[channels+k] + paeth) & 255;
+ }
+ case:
+ return false;
+ }
+
+ src = src[row_stride:];
+ up = dest;
+ dest = dest[row_stride:];
+ }
+ return;
+}
+
+// @(optimization_mode="speed")
+defilter_less_than_8 :: proc(params: ^Filter_Params) -> (ok: bool) #no_bounds_check {
+
+ using params;
+ ok = true;
+
+ row_stride_in := ((channels * width * depth) + 7) >> 3;
+ row_stride_out := channels * width;
+
+ // Store defiltered bytes rightmost so we can widen in-place.
+ row_offset := row_stride_out - row_stride_in;
+ // Save original dest because we'll need it for the bit widening.
+ orig_dest := dest;
+
+ // TODO: See about doing a Duff's #unroll where practicable
+
+ // Apron so we don't need to special case first rows.
+ up := make([]u8, row_stride_out, context.temp_allocator);
+
+ #no_bounds_check for _ in 0..<height {
+ nk := row_stride_in - channels;
+
+ dest = dest[row_offset:];
+
+ filter := Row_Filter(src[0]); src = src[1:];
+ switch filter {
+ case .None:
+ copy(dest, src[:row_stride_in]);
+ case .Sub:
+ for i in 0..channels {
+ dest[i] = src[i];
+ }
+ for k in 0..nk {
+ dest[channels+k] = (src[channels+k] + dest[k]) & 255;
+ }
+ case .Up:
+ for k in 0..row_stride_in {
+ dest[k] = (src[k] + up[k]) & 255;
+ }
+ case .Average:
+ for i in 0..channels {
+ avg := up[i] >> 1;
+ dest[i] = (src[i] + avg) & 255;
+ }
+ for k in 0..nk {
+ avg := u8((u16(up[channels+k]) + u16(dest[k])) >> 1);
+ dest[channels+k] = (src[channels+k] + avg) & 255;
+ }
+ case .Paeth:
+ for i in 0..channels {
+ paeth := filter_paeth(0, up[i], 0);
+ dest[i] = (src[i] + paeth) & 255;
+ }
+ for k in 0..nk {
+ paeth := filter_paeth(dest[k], up[channels+k], up[k]);
+ dest[channels+k] = (src[channels+k] + paeth) & 255;
+ }
+ case:
+ return false;
+ }
+
+ src = src [row_stride_in:];
+ up = dest;
+ dest = dest[row_stride_in:];
+ }
+
+ // Let's expand the bits
+ dest = orig_dest;
+
+ // Don't rescale the bits if we're a paletted image.
+ dsc := depth_scale_table;
+ scale := rescale ? dsc[depth] : 1;
+
+ /*
+ For sBIT support we should probably set scale to 1 and mask the significant bits.
+ Seperately, do we want to support packed pixels? i.e defiltering only, no expansion?
+ If so, all we have to do is call defilter_8 for that case and not set img.depth to 8.
+ */
+
+ for j := 0; j < height; j += 1 {
+ src = dest[row_offset:];
+
+ switch depth {
+ case 4:
+ k := row_stride_out;
+ for ; k >= 2; k -= 2 {
+ c := src[0];
+ dest[0] = scale * (c >> 4);
+ dest[1] = scale * (c & 15);
+ dest = dest[2:]; src = src[1:];
+ }
+ if k > 0 {
+ c := src[0];
+ dest[0] = scale * (c >> 4);
+ dest = dest[1:];
+ }
+ case 2:
+ k := row_stride_out;
+ for ; k >= 4; k -= 4 {
+ c := src[0];
+ dest[0] = scale * ((c >> 6) );
+ dest[1] = scale * ((c >> 4) & 3);
+ dest[2] = scale * ((c >> 2) & 3);
+ dest[3] = scale * ((c ) & 3);
+ dest = dest[4:]; src = src[1:];
+ }
+ if k > 0 {
+ c := src[0];
+ dest[0] = scale * ((c >> 6) );
+ if k > 1 {
+ dest[1] = scale * ((c >> 4) & 3);
+ }
+ if k > 2 {
+ dest[2] = scale * ((c >> 2) & 3);
+ }
+ dest = dest[k:];
+ }
+ case 1:
+ k := row_stride_out;
+ for ; k >= 8; k -= 8 {
+ c := src[0];
+ dest[0] = scale * ((c >> 7) );
+ dest[1] = scale * ((c >> 6) & 1);
+ dest[2] = scale * ((c >> 5) & 1);
+ dest[3] = scale * ((c >> 4) & 1);
+ dest[4] = scale * ((c >> 3) & 1);
+ dest[5] = scale * ((c >> 2) & 1);
+ dest[6] = scale * ((c >> 1) & 1);
+ dest[7] = scale * ((c ) & 1);
+ dest = dest[8:]; src = src[1:];
+ }
+ if k > 0 {
+ c := src[0];
+ dest[0] = scale * ((c >> 7) );
+ if k > 1 {
+ dest[1] = scale * ((c >> 6) & 1);
+ }
+ if k > 2 {
+ dest[2] = scale * ((c >> 5) & 1);
+ }
+ if k > 3 {
+ dest[3] = scale * ((c >> 4) & 1);
+ }
+ if k > 4 {
+ dest[4] = scale * ((c >> 3) & 1);
+ }
+ if k > 5 {
+ dest[5] = scale * ((c >> 2) & 1);
+ }
+ if k > 6 {
+ dest[6] = scale * ((c >> 1) & 1);
+ }
+ dest = dest[k:];
+
+ }
+
+ }
+ }
+
+ return;
+}
+
+// @(optimization_mode="speed")
+defilter_16 :: proc(params: ^Filter_Params) -> (ok: bool) {
+
+ using params;
+ ok = true;
+
+ stride := channels * 2;
+ row_stride := width * stride;
+
+ // TODO: See about doing a Duff's #unroll where practicable
+ // Apron so we don't need to special case first rows.
+ up := make([]u8, row_stride, context.temp_allocator);
+
+ for y := 0; y < height; y += 1 {
+ nk := row_stride - stride;
+
+ filter := Row_Filter(src[0]); src = src[1:];
+ switch filter {
+ case .None:
+ copy(dest, src[:row_stride]);
+ case .Sub:
+ for i := 0; i < stride; i += 1 {
+ dest[i] = src[i];
+ }
+ for k := 0; k < nk; k += 1 {
+ dest[stride+k] = (src[stride+k] + dest[k]) & 255;
+ }
+ case .Up:
+ for k := 0; k < row_stride; k += 1 {
+ dest[k] = (src[k] + up[k]) & 255;
+ }
+ case .Average:
+ for i := 0; i < stride; i += 1 {
+ avg := up[i] >> 1;
+ dest[i] = (src[i] + avg) & 255;
+ }
+ for k := 0; k < nk; k += 1 {
+ avg := u8((u16(up[stride+k]) + u16(dest[k])) >> 1);
+ dest[stride+k] = (src[stride+k] + avg) & 255;
+ }
+ case .Paeth:
+ for i := 0; i < stride; i += 1 {
+ paeth := filter_paeth(0, up[i], 0);
+ dest[i] = (src[i] + paeth) & 255;
+ }
+ for k := 0; k < nk; k += 1 {
+ paeth := filter_paeth(dest[k], up[stride+k], up[k]);
+ dest[stride+k] = (src[stride+k] + paeth) & 255;
+ }
+ case:
+ return false;
+ }
+
+ src = src[row_stride:];
+ up = dest;
+ dest = dest[row_stride:];
+ }
+
+ return;
+}
+
+defilter :: proc(img: ^Image, filter_bytes: ^bytes.Buffer, header: ^IHDR, options: Options) -> (err: compress.Error) {
+ input := bytes.buffer_to_bytes(filter_bytes);
+ width := int(header.width);
+ height := int(header.height);
+ channels := int(img.channels);
+ depth := int(header.bit_depth);
+ rescale := .Color not_in header.color_type;
+
+ bytes_per_channel := depth == 16 ? 2 : 1;
+
+ num_bytes := compute_buffer_size(width, height, channels, depth == 16 ? 16 : 8);
+ resize(&img.pixels.buf, num_bytes);
+
+ filter_ok: bool;
+
+ if header.interlace_method != .Adam7 {
+ params := Filter_Params{
+ src = input,
+ width = width,
+ height = height,
+ channels = channels,
+ depth = depth,
+ rescale = rescale,
+ dest = img.pixels.buf[:],
+ };
+
+ if depth == 8 {
+ filter_ok = defilter_8(&params);
+ } else if depth < 8 {
+ filter_ok = defilter_less_than_8(&params);
+ img.depth = 8;
+ } else {
+ filter_ok = defilter_16(&params);
+ }
+ if !filter_ok {
+ // Caller will destroy buffer for us.
+ return E_PNG.Unknown_Filter_Method;
+ }
+ } else {
+ /*
+ For deinterlacing we need to make a temporary buffer, defiilter part of the image,
+ and copy that back into the actual output buffer.
+ */
+
+ for p := 0; p < 7; p += 1 {
+ i,j,x,y: int;
+ x = (width - ADAM7_X_ORIG[p] + ADAM7_X_SPACING[p] - 1) / ADAM7_X_SPACING[p];
+ y = (height - ADAM7_Y_ORIG[p] + ADAM7_Y_SPACING[p] - 1) / ADAM7_Y_SPACING[p];
+ if (x > 0 && y > 0) {
+ temp: bytes.Buffer;
+ temp_len := compute_buffer_size(x, y, channels, depth == 16 ? 16 : 8);
+ resize(&temp.buf, temp_len);
+
+ params := Filter_Params{
+ src = input,
+ width = x,
+ height = y,
+ channels = channels,
+ depth = depth,
+ rescale = rescale,
+ dest = temp.buf[:],
+ };
+
+ if depth == 8 {
+ filter_ok = defilter_8(&params);
+ } else if depth < 8 {
+ filter_ok = defilter_less_than_8(&params);
+ img.depth = 8;
+ } else {
+ filter_ok = defilter_16(&params);
+ }
+
+ if !filter_ok {
+ // Caller will destroy buffer for us.
+ return E_PNG.Unknown_Filter_Method;
+ }
+
+ t := temp.buf[:];
+ for j = 0; j < y; j += 1 {
+ for i = 0; i < x; i += 1 {
+ out_y := j * ADAM7_Y_SPACING[p] + ADAM7_Y_ORIG[p];
+ out_x := i * ADAM7_X_SPACING[p] + ADAM7_X_ORIG[p];
+
+ out_off := out_y * width * channels * bytes_per_channel;
+ out_off += out_x * channels * bytes_per_channel;
+
+ for z := 0; z < channels * bytes_per_channel; z += 1 {
+ img.pixels.buf[out_off + z] = t[z];
+ }
+ t = t[channels * bytes_per_channel:];
+ }
+ }
+ bytes.buffer_destroy(&temp);
+ input_stride := compute_buffer_size(x, y, channels, depth, 1);
+ input = input[input_stride:];
+ }
+ }
+ }
+ when ODIN_ENDIAN == "little" {
+ if img.depth == 16 {
+ // The pixel components are in Big Endian. Let's byteswap.
+ input := mem.slice_data_cast([]u16be, img.pixels.buf[:]);
+ output := mem.slice_data_cast([]u16 , img.pixels.buf[:]);
+ #no_bounds_check for v, i in input {
+ output[i] = u16(v);
+ }
+ }
+ }
+
+ return nil;
+}
+
+load :: proc{load_from_file, load_from_slice, load_from_stream};
diff --git a/core/intrinsics/intrinsics.odin b/core/intrinsics/intrinsics.odin
index ac916a693..60b595aab 100644
--- a/core/intrinsics/intrinsics.odin
+++ b/core/intrinsics/intrinsics.odin
@@ -12,7 +12,33 @@ volatile_store :: proc(dst: ^$T, val: T) -> T ---
// Trapping
debug_trap :: proc() ---
-trap :: proc() -> ! ---
+trap :: proc() -> ! ---
+
+// Instructions
+
+alloca :: proc(size, align: int) -> ^u8 ---
+cpu_relax :: proc() ---
+read_cycle_counter :: proc() -> i64 ---
+
+count_ones :: proc(x: $T) -> T where type_is_integer(T) ---
+count_zeros :: proc(x: $T) -> T where type_is_integer(T) ---
+count_trailing_zeros :: proc(x: $T) -> T where type_is_integer(T) ---
+count_leading_zeros :: proc(x: $T) -> T where type_is_integer(T) ---
+reverse_bits :: proc(x: $T) -> T where type_is_integer(T) ---
+byte_swap :: proc(x: $T) -> T where type_is_integer(T) || type_is_float(T) ---
+
+overflow_add :: proc(lhs, rhs: $T) -> (T, bool) #optional_ok ---
+overflow_sub :: proc(lhs, rhs: $T) -> (T, bool) #optional_ok ---
+overflow_mul :: proc(lhs, rhs: $T) -> (T, bool) #optional_ok ---
+
+fixed_point_mul :: proc(lhs, rhs: $T, #const scale: uint) -> T where type_is_integer(T) ---
+fixed_point_div :: proc(lhs, rhs: $T, #const scale: uint) -> T where type_is_integer(T) ---
+fixed_point_mul_sat :: proc(lhs, rhs: $T, #const scale: uint) -> T where type_is_integer(T) ---
+fixed_point_div_sat :: proc(lhs, rhs: $T, #const scale: uint) -> T where type_is_integer(T) ---
+
+// Compiler Hints
+expect :: proc(val, expected_val: T) -> T ---
+
// Atomics
atomic_fence :: proc() ---
@@ -67,36 +93,25 @@ atomic_xchg_rel :: proc(dst; ^$T, val: T) -> T ---
atomic_xchg_acqrel :: proc(dst; ^$T, val: T) -> T ---
atomic_xchg_relaxed :: proc(dst; ^$T, val: T) -> T ---
-atomic_cxchg :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchg_acq :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchg_rel :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchg_acqrel :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchg_relaxed :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchg_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchg_failacq :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchg_acq_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchg_acqrel_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-
-atomic_cxchgweak :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchgweak_acq :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchgweak_rel :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchgweak_acqrel :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchgweak_relaxed :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchgweak_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchgweak_failacq :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchgweak_acq_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-atomic_cxchgweak_acqrel_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) ---
-
-// Instructions
-
-alloca :: proc(size, align: int) -> ^u8 ---
-cpu_relax :: proc() ---
-read_cycle_counter :: proc() -> i64 ---
-
-
-// Compiler Hints
-expect :: proc(val, expected_val: T) -> T ---
-
+atomic_cxchg :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchg_acq :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchg_rel :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchg_acqrel :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchg_relaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchg_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchg_failacq :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchg_acq_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchg_acqrel_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+
+atomic_cxchgweak :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchgweak_acq :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchgweak_rel :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchgweak_acqrel :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchgweak_relaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchgweak_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchgweak_failacq :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchgweak_acq_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
+atomic_cxchgweak_acqrel_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
// Constant type tests
@@ -144,6 +159,7 @@ type_is_simd_vector :: proc($T: typeid) -> bool ---
type_has_nil :: proc($T: typeid) -> bool ---
type_is_specialization_of :: proc($T, $S: typeid) -> bool ---
+type_is_variant_of :: proc($U, $V: typeid) -> bool where type_is_union(U) ---
type_has_field :: proc($T: typeid, $name: string) -> bool ---
@@ -159,5 +175,5 @@ type_polymorphic_record_parameter_value :: proc($T: typeid, index: int) -> $V --
type_field_index_of :: proc($T: typeid, $name: string) -> uintptr ---
-type_equal_proc :: proc($T: typeid) -> (equal: proc "contextless" (rawptr, rawptr) -> bool) ---
-type_hasher_proc :: proc($T: typeid) -> (hasher: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr) ---
+type_equal_proc :: proc($T: typeid) -> (equal: proc "contextless" (rawptr, rawptr) -> bool) where type_is_comparable(T) ---
+type_hasher_proc :: proc($T: typeid) -> (hasher: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr) where type_is_comparable(T) ---
diff --git a/core/math/rand/rand.odin b/core/math/rand/rand.odin
index 4f6e7474f..f5558bb8c 100644
--- a/core/math/rand/rand.odin
+++ b/core/math/rand/rand.odin
@@ -6,9 +6,9 @@ Rand :: struct {
}
-@(private, static)
+@(private)
_GLOBAL_SEED_DATA := 1234567890;
-@(private, static)
+@(private)
global_rand := create(u64(uintptr(&_GLOBAL_SEED_DATA)));
set_global_seed :: proc(seed: u64) {
diff --git a/core/mem/alloc.odin b/core/mem/alloc.odin
index 0df68255f..0da7a9708 100644
--- a/core/mem/alloc.odin
+++ b/core/mem/alloc.odin
@@ -22,7 +22,7 @@ Allocator_Mode_Set :: distinct bit_set[Allocator_Mode];
Allocator_Query_Info :: runtime.Allocator_Query_Info;
/*
Allocator_Query_Info :: struct {
- pointer: Maybe(rawptr),
+ pointer: rawptr,
size: Maybe(int),
alignment: Maybe(int),
}
diff --git a/core/mem/mem.odin b/core/mem/mem.odin
index ddf9e9637..ecf232557 100644
--- a/core/mem/mem.odin
+++ b/core/mem/mem.odin
@@ -142,6 +142,7 @@ slice_ptr :: proc(ptr: ^$T, len: int) -> []T {
byte_slice :: #force_inline proc "contextless" (data: rawptr, len: int) -> []byte {
return transmute([]u8)Raw_Slice{data=data, len=max(len, 0)};
}
+@(deprecated="use byte_slice")
slice_ptr_to_bytes :: proc(data: rawptr, len: int) -> []byte {
return transmute([]u8)Raw_Slice{data=data, len=max(len, 0)};
}
diff --git a/core/odin/ast/ast.odin b/core/odin/ast/ast.odin
index 0d015f9bb..cf2cdeacc 100644
--- a/core/odin/ast/ast.odin
+++ b/core/odin/ast/ast.odin
@@ -69,7 +69,7 @@ File :: struct {
pkg: ^Package,
fullpath: string,
- src: []byte,
+ src: string,
docs: ^Comment_Group,
diff --git a/core/odin/parser/parse_files.odin b/core/odin/parser/parse_files.odin
index 99275777c..f622c9781 100644
--- a/core/odin/parser/parse_files.odin
+++ b/core/odin/parser/parse_files.odin
@@ -39,7 +39,7 @@ collect_package :: proc(path: string) -> (pkg: ^ast.Package, success: bool) {
}
file := ast.new(ast.File, NO_POS, NO_POS);
file.pkg = pkg;
- file.src = src;
+ file.src = string(src);
file.fullpath = fullpath;
pkg.files[fullpath] = file;
}
diff --git a/core/odin/parser/parser.odin b/core/odin/parser/parser.odin
index 51bb3a261..890ebe86d 100644
--- a/core/odin/parser/parser.odin
+++ b/core/odin/parser/parser.odin
@@ -8,10 +8,21 @@ import "core:fmt"
Warning_Handler :: #type proc(pos: tokenizer.Pos, fmt: string, args: ..any);
Error_Handler :: #type proc(pos: tokenizer.Pos, fmt: string, args: ..any);
+Flag :: enum u32 {
+ Optional_Semicolons,
+}
+
+Flags :: distinct bit_set[Flag; u32];
+
+
Parser :: struct {
file: ^ast.File,
tok: tokenizer.Tokenizer,
+ // If .Optional_Semicolons is true, semicolons are completely as statement terminators
+ // different to .Insert_Semicolon in tok.flags
+ flags: Flags,
+
warn: Warning_Handler,
err: Error_Handler,
@@ -100,8 +111,9 @@ end_pos :: proc(tok: tokenizer.Token) -> tokenizer.Pos {
return pos;
}
-default_parser :: proc() -> Parser {
+default_parser :: proc(flags := Flags{}) -> Parser {
return Parser {
+ flags = flags,
err = default_error_handler,
warn = default_warning_handler,
};
@@ -128,6 +140,10 @@ parse_file :: proc(p: ^Parser, file: ^ast.File) -> bool {
p.line_comment = nil;
}
+ if .Optional_Semicolons in p.flags {
+ p.tok.flags += {.Insert_Semicolon};
+ }
+
p.file = file;
tokenizer.init(&p.tok, file.src, file.fullpath, p.err);
if p.tok.ch <= 0 {
@@ -400,6 +416,11 @@ is_semicolon_optional_for_node :: proc(p: ^Parser, node: ^ast.Node) -> bool {
if node == nil {
return false;
}
+
+ if .Optional_Semicolons in p.flags {
+ return true;
+ }
+
switch n in node.derived {
case ast.Empty_Stmt, ast.Block_Stmt:
return true;
@@ -439,14 +460,34 @@ is_semicolon_optional_for_node :: proc(p: ^Parser, node: ^ast.Node) -> bool {
return false;
}
+expect_semicolon_newline_error :: proc(p: ^Parser, token: tokenizer.Token, s: ^ast.Node) {
+ if .Optional_Semicolons not_in p.flags && .Insert_Semicolon in p.tok.flags && token.text == "\n" {
+ #partial switch token.kind {
+ case .Close_Brace:
+ case .Close_Paren:
+ case .Else:
+ return;
+ }
+ if is_semicolon_optional_for_node(p, s) {
+ return;
+ }
+
+ tok := token;
+ tok.pos.column -= 1;
+ error(p, tok.pos, "expected ';', got newline");
+ }
+}
+
expect_semicolon :: proc(p: ^Parser, node: ^ast.Node) -> bool {
if allow_token(p, .Semicolon) {
+ expect_semicolon_newline_error(p, p.prev_tok, node);
return true;
}
prev := p.prev_tok;
if prev.kind == .Semicolon {
+ expect_semicolon_newline_error(p, p.prev_tok, node);
return true;
}
@@ -615,7 +656,7 @@ parse_if_stmt :: proc(p: ^Parser) -> ^ast.If_Stmt {
cond = parse_expr(p, false);
} else {
init = parse_simple_stmt(p, nil);
- if allow_token(p, .Semicolon) {
+ if parse_control_statement_semicolon_separator(p) {
cond = parse_expr(p, false);
} else {
cond = convert_stmt_to_expr(p, init, "boolean expression");
@@ -668,6 +709,18 @@ parse_if_stmt :: proc(p: ^Parser) -> ^ast.If_Stmt {
return if_stmt;
}
+parse_control_statement_semicolon_separator :: proc(p: ^Parser) -> bool {
+ tok := peek_token(p);
+ if tok.kind != .Open_Brace {
+ return allow_token(p, .Semicolon);
+ }
+ if tok.text == ";" {
+ return allow_token(p, .Semicolon);
+ }
+ return false;
+
+}
+
parse_for_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
if p.curr_proc == nil {
error(p, p.curr_tok.pos, "you cannot use a for statement in the file scope");
@@ -716,7 +769,7 @@ parse_for_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
}
}
- if !is_range && allow_token(p, .Semicolon) {
+ if !is_range && parse_control_statement_semicolon_separator(p) {
init = cond;
cond = nil;
if p.curr_tok.kind != .Semicolon {
@@ -820,7 +873,7 @@ parse_switch_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
tag = parse_simple_stmt(p, {Stmt_Allow_Flag.In});
if as, ok := tag.derived.(ast.Assign_Stmt); ok && as.op.kind == .In {
is_type_switch = true;
- } else if allow_token(p, .Semicolon) {
+ } else if parse_control_statement_semicolon_separator(p) {
init = tag;
tag = nil;
if p.curr_tok.kind != .Open_Brace {
@@ -831,6 +884,7 @@ parse_switch_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
}
+ skip_possible_newline(p);
open := expect_token(p, .Open_Brace);
for p.curr_tok.kind == .Case {
@@ -958,6 +1012,7 @@ parse_foreign_block :: proc(p: ^Parser, tok: tokenizer.Token) -> ^ast.Foreign_Bl
defer p.in_foreign_block = prev_in_foreign_block;
p.in_foreign_block = true;
+ skip_possible_newline_for_literal(p);
open := expect_token(p, .Open_Brace);
for p.curr_tok.kind != .Close_Brace && p.curr_tok.kind != .EOF {
decl := parse_foreign_block_decl(p);
@@ -1287,7 +1342,7 @@ token_precedence :: proc(p: ^Parser, kind: tokenizer.Token_Kind) -> int {
#partial switch kind {
case .Question, .If, .When:
return 1;
- case .Ellipsis, .Range_Half:
+ case .Ellipsis, .Range_Half, .Range_Full:
if !p.allow_range {
return 0;
}
@@ -2234,6 +2289,8 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
}
body: ^ast.Stmt;
+ skip_possible_newline_for_literal(p);
+
if allow_token(p, .Undef) {
body = nil;
if where_token.kind != .Invalid {
@@ -2406,6 +2463,7 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
p.expr_level = where_prev_level;
}
+ skip_possible_newline_for_literal(p);
expect_token(p, .Open_Brace);
fields, name_count = parse_field_list(p, .Close_Brace, ast.Field_Flags_Struct);
close := expect_token(p, .Close_Brace);
@@ -2474,6 +2532,7 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
variants: [dynamic]^ast.Expr;
+ skip_possible_newline_for_literal(p);
expect_token_after(p, .Open_Brace, "union");
for p.curr_tok.kind != .Close_Brace && p.curr_tok.kind != .EOF {
@@ -2504,6 +2563,8 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
if p.curr_tok.kind != .Open_Brace {
base_type = parse_type(p);
}
+
+ skip_possible_newline_for_literal(p);
open := expect_token(p, .Open_Brace);
fields := parse_elem_list(p);
close := expect_token(p, .Close_Brace);
@@ -2602,6 +2663,7 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
}
}
+ skip_possible_newline_for_literal(p);
open := expect_token(p, .Open_Brace);
asm_string := parse_expr(p, false);
expect_token(p, .Comma);
@@ -2812,7 +2874,7 @@ parse_atom_expr :: proc(p: ^Parser, value: ^ast.Expr, lhs: bool) -> (operand: ^a
open := expect_token(p, .Open_Bracket);
#partial switch p.curr_tok.kind {
- case .Colon, .Ellipsis, .Range_Half:
+ case .Colon, .Ellipsis, .Range_Half, .Range_Full:
// NOTE(bill): Do not err yet
break;
case:
@@ -2820,7 +2882,7 @@ parse_atom_expr :: proc(p: ^Parser, value: ^ast.Expr, lhs: bool) -> (operand: ^a
}
#partial switch p.curr_tok.kind {
- case .Ellipsis, .Range_Half:
+ case .Ellipsis, .Range_Half, .Range_Full:
error(p, p.curr_tok.pos, "expected a colon, not a range");
fallthrough;
case .Colon:
diff --git a/core/odin/tokenizer/token.odin b/core/odin/tokenizer/token.odin
index 1b37bae23..88908d7f8 100644
--- a/core/odin/tokenizer/token.odin
+++ b/core/odin/tokenizer/token.odin
@@ -107,6 +107,7 @@ Token_Kind :: enum u32 {
Comma, // ,
Ellipsis, // ..
Range_Half, // ..<
+ Range_Full, // ..=
Back_Slash, // \
B_Operator_End,
@@ -233,6 +234,7 @@ tokens := [Token_Kind.COUNT]string {
",",
"..",
"..<",
+ "..=",
"\\",
"",
diff --git a/core/odin/tokenizer/tokenizer.odin b/core/odin/tokenizer/tokenizer.odin
index b1b446192..e0cc6dcd3 100644
--- a/core/odin/tokenizer/tokenizer.odin
+++ b/core/odin/tokenizer/tokenizer.odin
@@ -14,7 +14,7 @@ Flags :: distinct bit_set[Flag; u32];
Tokenizer :: struct {
// Immutable data
path: string,
- src: []byte,
+ src: string,
err: Error_Handler,
flags: Flags,
@@ -31,7 +31,7 @@ Tokenizer :: struct {
error_count: int,
}
-init :: proc(t: ^Tokenizer, src: []byte, path: string, err: Error_Handler = default_error_handler) {
+init :: proc(t: ^Tokenizer, src: string, path: string, err: Error_Handler = default_error_handler) {
t.src = src;
t.err = err;
t.ch = ' ';
@@ -87,7 +87,7 @@ advance_rune :: proc(using t: ^Tokenizer) {
case r == 0:
error(t, t.offset, "illegal character NUL");
case r >= utf8.RUNE_SELF:
- r, w = utf8.decode_rune(src[read_offset:]);
+ r, w = utf8.decode_rune_in_string(src[read_offset:]);
if r == utf8.RUNE_ERROR && w == 1 {
error(t, t.offset, "illegal UTF-8 encoding");
} else if r == utf8.RUNE_BOM && offset > 0 {
@@ -623,6 +623,9 @@ scan :: proc(t: ^Tokenizer) -> Token {
if t.ch == '<' {
advance_rune(t);
kind = .Range_Half;
+ } else if t.ch == '=' {
+ advance_rune(t);
+ kind = .Range_Full;
}
}
}
diff --git a/core/os/os2/errors.odin b/core/os/os2/errors.odin
index 00cd600a8..2fc49deed 100644
--- a/core/os/os2/errors.odin
+++ b/core/os/os2/errors.odin
@@ -1,11 +1,8 @@
package os2
-Platform_Error_Min_Bits :: 32;
+import "core:io"
-Error :: enum u64 {
- None = 0,
-
- // General Errors
+General_Error :: enum u32 {
Invalid_Argument,
Permission_Denied,
@@ -13,42 +10,19 @@ Error :: enum u64 {
Not_Exist,
Closed,
- // Timeout Errors
Timeout,
+}
- // I/O Errors
- // EOF is the error returned by `read` when no more input is available
- EOF,
-
- // Unexpected_EOF means that EOF was encountered in the middle of reading a fixed-sized block of data
- Unexpected_EOF,
-
- // Short_Write means that a write accepted fewer bytes than requested but failed to return an explicit error
- Short_Write,
-
- // Invalid_Write means that a write returned an impossible count
- Invalid_Write,
-
- // Short_Buffer means that a read required a longer buffer than was provided
- Short_Buffer,
-
- // No_Progress is returned by some implementations of `io.Reader` when many calls
- // to `read` have failed to return any data or error.
- // This is usually a signed of a broken `io.Reader` implementation
- No_Progress,
-
- Invalid_Whence,
- Invalid_Offset,
- Invalid_Unread,
-
- Negative_Read,
- Negative_Write,
- Negative_Count,
- Buffer_Full,
+Platform_Error :: struct {
+ err: i32,
+}
- // Platform Specific Errors
- Platform_Minimum = 1<<Platform_Error_Min_Bits,
+Error :: union {
+ General_Error,
+ io.Error,
+ Platform_Error,
}
+#assert(size_of(Error) == size_of(u64));
Path_Error :: struct {
op: string,
@@ -83,20 +57,17 @@ link_error_delete :: proc(lerr: Maybe(Link_Error)) {
is_platform_error :: proc(ferr: Error) -> (err: i32, ok: bool) {
- if ferr >= .Platform_Minimum {
- err = i32(u64(ferr)>>Platform_Error_Min_Bits);
- ok = true;
+ v: Platform_Error;
+ if v, ok = ferr.(Platform_Error); ok {
+ err = v.err;
}
return;
}
-error_from_platform_error :: proc(errno: i32) -> Error {
- return Error(u64(errno) << Platform_Error_Min_Bits);
-}
error_string :: proc(ferr: Error) -> string {
- #partial switch ferr {
- case .None: return "";
+ switch ferr {
+ case nil: return "";
case .Invalid_Argument: return "invalid argument";
case .Permission_Denied: return "permission denied";
case .Exist: return "file already exists";
diff --git a/core/os/os2/file_stream.odin b/core/os/os2/file_stream.odin
index 6877faea4..52f5b30e9 100644
--- a/core/os/os2/file_stream.odin
+++ b/core/os/os2/file_stream.odin
@@ -10,23 +10,14 @@ file_to_stream :: proc(fd: Handle) -> (s: io.Stream) {
@(private)
error_to_io_error :: proc(ferr: Error) -> io.Error {
- #partial switch ferr {
- case .None: return .None;
- case .EOF: return .EOF;
- case .Unexpected_EOF: return .Unexpected_EOF;
- case .Short_Write: return .Short_Write;
- case .Invalid_Write: return .Invalid_Write;
- case .Short_Buffer: return .Short_Buffer;
- case .No_Progress: return .No_Progress;
- case .Invalid_Whence: return .Invalid_Whence;
- case .Invalid_Offset: return .Invalid_Offset;
- case .Invalid_Unread: return .Invalid_Unread;
- case .Negative_Read: return .Negative_Read;
- case .Negative_Write: return .Negative_Write;
- case .Negative_Count: return .Negative_Count;
- case .Buffer_Full: return .Buffer_Full;
+ if ferr == nil {
+ return .None;
}
- return .Unknown;
+ err, ok := ferr.(io.Error);
+ if !ok {
+ err = .Unknown;
+ }
+ return err;
}
diff --git a/core/os/os2/file_util.odin b/core/os/os2/file_util.odin
index 435eba3ab..db6842cf8 100644
--- a/core/os/os2/file_util.odin
+++ b/core/os/os2/file_util.odin
@@ -1,6 +1,7 @@
package os2
import "core:mem"
+import "core:io"
import "core:strconv"
import "core:unicode/utf8"
diff --git a/core/os/os2/file_windows.odin b/core/os/os2/file_windows.odin
index 97fe6b3d9..5e87d80a4 100644
--- a/core/os/os2/file_windows.odin
+++ b/core/os/os2/file_windows.odin
@@ -5,19 +5,19 @@ import "core:io"
import "core:time"
_create :: proc(name: string) -> (Handle, Error) {
- return 0, .None;
+ return 0, nil;
}
_open :: proc(name: string) -> (Handle, Error) {
- return 0, .None;
+ return 0, nil;
}
_open_file :: proc(name: string, flag: int, perm: File_Mode) -> (Handle, Error) {
- return 0, .None;
+ return 0, nil;
}
_close :: proc(fd: Handle) -> Error {
- return .None;
+ return nil;
}
_name :: proc(fd: Handle, allocator := context.allocator) -> string {
@@ -58,11 +58,11 @@ _file_size :: proc(fd: Handle) -> (n: i64, err: Error) {
_sync :: proc(fd: Handle) -> Error {
- return .None;
+ return nil;
}
_flush :: proc(fd: Handle) -> Error {
- return .None;
+ return nil;
}
_truncate :: proc(fd: Handle, size: i64) -> Maybe(Path_Error) {
@@ -92,20 +92,20 @@ _read_link :: proc(name: string) -> (string, Maybe(Path_Error)) {
_chdir :: proc(fd: Handle) -> Error {
- return .None;
+ return nil;
}
_chmod :: proc(fd: Handle, mode: File_Mode) -> Error {
- return .None;
+ return nil;
}
_chown :: proc(fd: Handle, uid, gid: int) -> Error {
- return .None;
+ return nil;
}
_lchown :: proc(name: string, uid, gid: int) -> Error {
- return .None;
+ return nil;
}
diff --git a/core/os/os2/pipe_windows.odin b/core/os/os2/pipe_windows.odin
index 68adb6c3b..04750bf88 100644
--- a/core/os/os2/pipe_windows.odin
+++ b/core/os/os2/pipe_windows.odin
@@ -6,7 +6,7 @@ import win32 "core:sys/windows"
_pipe :: proc() -> (r, w: Handle, err: Error) {
p: [2]win32.HANDLE;
if !win32.CreatePipe(&p[0], &p[1], nil, 0) {
- return 0, 0, error_from_platform_error(i32(win32.GetLastError()));
+ return 0, 0, Platform_Error{i32(win32.GetLastError())};
}
return Handle(p[0]), Handle(p[1]), nil;
}
diff --git a/core/os/os2/stat_windows.odin b/core/os/os2/stat_windows.odin
index ed739b894..48811340a 100644
--- a/core/os/os2/stat_windows.odin
+++ b/core/os/os2/stat_windows.odin
@@ -40,7 +40,7 @@ _same_file :: proc(fi1, fi2: File_Info) -> bool {
_stat_errno :: proc(errno: win32.DWORD) -> Path_Error {
- return Path_Error{err = error_from_platform_error(i32(errno))};
+ return Path_Error{err = Platform_Error{i32(errno)}};
}
@@ -89,7 +89,7 @@ internal_stat :: proc(name: string, create_file_attributes: u32, allocator := co
fd: win32.WIN32_FIND_DATAW;
sh := win32.FindFirstFileW(wname, &fd);
if sh == win32.INVALID_HANDLE_VALUE {
- e = Path_Error{err = error_from_platform_error(i32(win32.GetLastError()))};
+ e = Path_Error{err = Platform_Error{i32(win32.GetLastError())}};
return;
}
win32.FindClose(sh);
@@ -99,7 +99,7 @@ internal_stat :: proc(name: string, create_file_attributes: u32, allocator := co
h := win32.CreateFileW(wname, 0, 0, nil, win32.OPEN_EXISTING, create_file_attributes, nil);
if h == win32.INVALID_HANDLE_VALUE {
- e = Path_Error{err = error_from_platform_error(i32(win32.GetLastError()))};
+ e = Path_Error{err = Platform_Error{i32(win32.GetLastError())}};
return;
}
defer win32.CloseHandle(h);
diff --git a/core/os/os2/temp_file_windows.odin b/core/os/os2/temp_file_windows.odin
index 19dca1b04..dd050ab48 100644
--- a/core/os/os2/temp_file_windows.odin
+++ b/core/os/os2/temp_file_windows.odin
@@ -4,11 +4,11 @@ package os2
import win32 "core:sys/windows"
_create_temp :: proc(dir, pattern: string) -> (Handle, Error) {
- return 0, .None;
+ return 0, nil;
}
_mkdir_temp :: proc(dir, pattern: string, allocator := context.allocator) -> (string, Error) {
- return "", .None;
+ return "", nil;
}
_temp_dir :: proc(allocator := context.allocator) -> string {
diff --git a/core/os/os_freebsd.odin b/core/os/os_freebsd.odin
index 137c6f864..2afa8bd14 100644
--- a/core/os/os_freebsd.odin
+++ b/core/os/os_freebsd.odin
@@ -10,7 +10,7 @@ import "core:c"
Handle :: distinct i32;
File_Time :: distinct u64;
Errno :: distinct i32;
-Syscall :: distinct int;
+Syscall :: distinct i32;
INVALID_HANDLE :: ~Handle(0);
diff --git a/core/os/os_linux.odin b/core/os/os_linux.odin
index dd0914f40..7569909d7 100644
--- a/core/os/os_linux.odin
+++ b/core/os/os_linux.odin
@@ -11,7 +11,7 @@ import "core:strconv"
Handle :: distinct i32;
File_Time :: distinct u64;
Errno :: distinct i32;
-Syscall :: distinct int;
+Syscall :: distinct i32;
INVALID_HANDLE :: ~Handle(0);
@@ -269,7 +269,7 @@ SYS_GETTID: Syscall : 186;
foreign libc {
@(link_name="__errno_location") __errno_location :: proc() -> ^int ---;
- @(link_name="syscall") syscall :: proc(number: Syscall, #c_vararg args: ..any) -> int ---;
+ @(link_name="syscall") syscall :: proc(number: Syscall, #c_vararg args: ..any) -> i32 ---;
@(link_name="open") _unix_open :: proc(path: cstring, flags: c.int, mode: c.int) -> Handle ---;
@(link_name="close") _unix_close :: proc(fd: Handle) -> c.int ---;
@@ -595,7 +595,7 @@ exit :: proc "contextless" (code: int) -> ! {
}
current_thread_id :: proc "contextless" () -> int {
- return syscall(SYS_GETTID);
+ return cast(int)syscall(SYS_GETTID);
}
dlopen :: proc(filename: string, flags: int) -> rawptr {
diff --git a/core/runtime/core.odin b/core/runtime/core.odin
index 0033aad9a..cb526ed2d 100644
--- a/core/runtime/core.odin
+++ b/core/runtime/core.odin
@@ -32,6 +32,7 @@ Calling_Convention :: enum u8 {
Fast_Call = 5,
None = 6,
+ Naked = 7,
}
Type_Info_Enum_Value :: distinct i64;
@@ -120,6 +121,9 @@ Type_Info_Union :: struct {
variants: []^Type_Info,
tag_offset: uintptr,
tag_type: ^Type_Info,
+
+ equal: Equal_Proc, // set only when the struct has .Comparable set but does not have .Simple_Compare set
+
custom_align: bool,
no_nil: bool,
maybe: bool,
diff --git a/core/runtime/internal.odin b/core/runtime/internal.odin
index 0e128567a..8a7b22ca4 100644
--- a/core/runtime/internal.odin
+++ b/core/runtime/internal.odin
@@ -105,17 +105,9 @@ mem_copy :: proc "contextless" (dst, src: rawptr, len: int) -> rawptr {
if src == nil {
return dst;
}
+
// NOTE(bill): This _must_ be implemented like C's memmove
- foreign _ {
- when size_of(rawptr) == 8 {
- @(link_name="llvm.memmove.p0i8.p0i8.i64")
- llvm_memmove :: proc "none" (dst, src: rawptr, len: int, is_volatile: bool = false) ---;
- } else {
- @(link_name="llvm.memmove.p0i8.p0i8.i32")
- llvm_memmove :: proc "none" (dst, src: rawptr, len: int, is_volatile: bool = false) ---;
- }
- }
- llvm_memmove(dst, src, len);
+ intrinsics.mem_copy(dst, src, len);
return dst;
}
@@ -123,17 +115,9 @@ mem_copy_non_overlapping :: proc "contextless" (dst, src: rawptr, len: int) -> r
if src == nil {
return dst;
}
+
// NOTE(bill): This _must_ be implemented like C's memcpy
- foreign _ {
- when size_of(rawptr) == 8 {
- @(link_name="llvm.memcpy.p0i8.p0i8.i64")
- llvm_memcpy :: proc "none" (dst, src: rawptr, len: int, is_volatile: bool = false) ---;
- } else {
- @(link_name="llvm.memcpy.p0i8.p0i8.i32")
- llvm_memcpy :: proc "none" (dst, src: rawptr, len: int, is_volatile: bool = false) ---;
- }
- }
- llvm_memcpy(dst, src, len);
+ intrinsics.mem_copy_non_overlapping(dst, src, len);
return dst;
}
@@ -409,11 +393,6 @@ string_decode_rune :: #force_inline proc "contextless" (s: string) -> (rune, int
return rune(s0&MASK4)<<18 | rune(b1&MASKX)<<12 | rune(b2&MASKX)<<6 | rune(b3&MASKX), 4;
}
-@(default_calling_convention = "none")
-foreign {
- @(link_name="llvm.sqrt.f32") _sqrt_f32 :: proc(x: f32) -> f32 ---
- @(link_name="llvm.sqrt.f64") _sqrt_f64 :: proc(x: f64) -> f64 ---
-}
abs_f16 :: #force_inline proc "contextless" (x: f16) -> f16 {
return -x if x < 0 else x;
}
@@ -445,27 +424,27 @@ max_f64 :: proc(a, b: f64) -> f64 {
abs_complex32 :: #force_inline proc "contextless" (x: complex32) -> f16 {
r, i := real(x), imag(x);
- return f16(_sqrt_f32(f32(r*r + i*i)));
+ return f16(intrinsics.sqrt(f32(r*r + i*i)));
}
abs_complex64 :: #force_inline proc "contextless" (x: complex64) -> f32 {
r, i := real(x), imag(x);
- return _sqrt_f32(r*r + i*i);
+ return intrinsics.sqrt(r*r + i*i);
}
abs_complex128 :: #force_inline proc "contextless" (x: complex128) -> f64 {
r, i := real(x), imag(x);
- return _sqrt_f64(r*r + i*i);
+ return intrinsics.sqrt(r*r + i*i);
}
abs_quaternion64 :: #force_inline proc "contextless" (x: quaternion64) -> f16 {
r, i, j, k := real(x), imag(x), jmag(x), kmag(x);
- return f16(_sqrt_f32(f32(r*r + i*i + j*j + k*k)));
+ return f16(intrinsics.sqrt(f32(r*r + i*i + j*j + k*k)));
}
abs_quaternion128 :: #force_inline proc "contextless" (x: quaternion128) -> f32 {
r, i, j, k := real(x), imag(x), jmag(x), kmag(x);
- return _sqrt_f32(r*r + i*i + j*j + k*k);
+ return intrinsics.sqrt(r*r + i*i + j*j + k*k);
}
abs_quaternion256 :: #force_inline proc "contextless" (x: quaternion256) -> f64 {
r, i, j, k := real(x), imag(x), jmag(x), kmag(x);
- return _sqrt_f64(r*r + i*i + j*j + k*k);
+ return intrinsics.sqrt(r*r + i*i + j*j + k*k);
}
diff --git a/core/runtime/udivmod128.odin b/core/runtime/udivmod128.odin
index e4b7380d3..fff856ab6 100644
--- a/core/runtime/udivmod128.odin
+++ b/core/runtime/udivmod128.odin
@@ -11,7 +11,7 @@ udivmod128 :: proc "c" (a, b: u128, rem: ^u128) -> u128 {
q, r: [2]u64 = ---, ---;
sr: u32 = 0;
- low :: ODIN_ENDIAN == "big" ? 1 : 0;
+ low :: 1 when ODIN_ENDIAN == "big" else 0;
high :: 1 - low;
U64_BITS :: 8*size_of(u64);
U128_BITS :: 8*size_of(u128);
diff --git a/core/strings/builder.odin b/core/strings/builder.odin
index dd7fd4f1e..843f79381 100644
--- a/core/strings/builder.odin
+++ b/core/strings/builder.odin
@@ -221,7 +221,7 @@ pop_rune :: proc(b: ^Builder) -> (r: rune, width: int) {
}
-@(private, static)
+@(private)
DIGITS_LOWER := "0123456789abcdefx";
write_quoted_string :: proc{
diff --git a/core/sync/sync2/atomic.odin b/core/sync/sync2/atomic.odin
index 1f8e2f3a8..fa86ec352 100644
--- a/core/sync/sync2/atomic.odin
+++ b/core/sync/sync2/atomic.odin
@@ -2,78 +2,76 @@ package sync2
import "intrinsics"
-// TODO(bill): Is this even a good design? The intrinsics seem to be more than good enough and just as clean
-
cpu_relax :: intrinsics.cpu_relax;
-atomic_fence :: intrinsics.atomic_fence;
-atomic_fence_acq :: intrinsics.atomic_fence_acq;
-atomic_fence_rel :: intrinsics.atomic_fence_rel;
-atomic_fence_acqrel :: intrinsics.atomic_fence_acqrel;
+atomic_fence :: intrinsics.atomic_fence;
+atomic_fence_acquire :: intrinsics.atomic_fence_acq;
+atomic_fence_release :: intrinsics.atomic_fence_rel;
+atomic_fence_acqrel :: intrinsics.atomic_fence_acqrel;
atomic_store :: intrinsics.atomic_store;
-atomic_store_rel :: intrinsics.atomic_store_rel;
+atomic_store_release :: intrinsics.atomic_store_rel;
atomic_store_relaxed :: intrinsics.atomic_store_relaxed;
atomic_store_unordered :: intrinsics.atomic_store_unordered;
atomic_load :: intrinsics.atomic_load;
-atomic_load_acq :: intrinsics.atomic_load_acq;
+atomic_load_acquire :: intrinsics.atomic_load_acq;
atomic_load_relaxed :: intrinsics.atomic_load_relaxed;
atomic_load_unordered :: intrinsics.atomic_load_unordered;
atomic_add :: intrinsics.atomic_add;
-atomic_add_acq :: intrinsics.atomic_add_acq;
-atomic_add_rel :: intrinsics.atomic_add_rel;
+atomic_add_acquire :: intrinsics.atomic_add_acq;
+atomic_add_release :: intrinsics.atomic_add_rel;
atomic_add_acqrel :: intrinsics.atomic_add_acqrel;
atomic_add_relaxed :: intrinsics.atomic_add_relaxed;
atomic_sub :: intrinsics.atomic_sub;
-atomic_sub_acq :: intrinsics.atomic_sub_acq;
-atomic_sub_rel :: intrinsics.atomic_sub_rel;
+atomic_sub_acquire :: intrinsics.atomic_sub_acq;
+atomic_sub_release :: intrinsics.atomic_sub_rel;
atomic_sub_acqrel :: intrinsics.atomic_sub_acqrel;
atomic_sub_relaxed :: intrinsics.atomic_sub_relaxed;
atomic_and :: intrinsics.atomic_and;
-atomic_and_acq :: intrinsics.atomic_and_acq;
-atomic_and_rel :: intrinsics.atomic_and_rel;
+atomic_and_acquire :: intrinsics.atomic_and_acq;
+atomic_and_release :: intrinsics.atomic_and_rel;
atomic_and_acqrel :: intrinsics.atomic_and_acqrel;
atomic_and_relaxed :: intrinsics.atomic_and_relaxed;
atomic_nand :: intrinsics.atomic_nand;
-atomic_nand_acq :: intrinsics.atomic_nand_acq;
-atomic_nand_rel :: intrinsics.atomic_nand_rel;
+atomic_nand_acquire :: intrinsics.atomic_nand_acq;
+atomic_nand_release :: intrinsics.atomic_nand_rel;
atomic_nand_acqrel :: intrinsics.atomic_nand_acqrel;
atomic_nand_relaxed :: intrinsics.atomic_nand_relaxed;
atomic_or :: intrinsics.atomic_or;
-atomic_or_acq :: intrinsics.atomic_or_acq;
-atomic_or_rel :: intrinsics.atomic_or_rel;
+atomic_or_acquire :: intrinsics.atomic_or_acq;
+atomic_or_release :: intrinsics.atomic_or_rel;
atomic_or_acqrel :: intrinsics.atomic_or_acqrel;
atomic_or_relaxed :: intrinsics.atomic_or_relaxed;
atomic_xor :: intrinsics.atomic_xor;
-atomic_xor_acq :: intrinsics.atomic_xor_acq;
-atomic_xor_rel :: intrinsics.atomic_xor_rel;
+atomic_xor_acquire :: intrinsics.atomic_xor_acq;
+atomic_xor_release :: intrinsics.atomic_xor_rel;
atomic_xor_acqrel :: intrinsics.atomic_xor_acqrel;
atomic_xor_relaxed :: intrinsics.atomic_xor_relaxed;
-atomic_xchg :: intrinsics.atomic_xchg;
-atomic_xchg_acq :: intrinsics.atomic_xchg_acq;
-atomic_xchg_rel :: intrinsics.atomic_xchg_rel;
-atomic_xchg_acqrel :: intrinsics.atomic_xchg_acqrel;
-atomic_xchg_relaxed :: intrinsics.atomic_xchg_relaxed;
+atomic_exchange :: intrinsics.atomic_xchg;
+atomic_exchange_acquire :: intrinsics.atomic_xchg_acq;
+atomic_exchange_release :: intrinsics.atomic_xchg_rel;
+atomic_exchange_acqrel :: intrinsics.atomic_xchg_acqrel;
+atomic_exchange_relaxed :: intrinsics.atomic_xchg_relaxed;
-atomic_cxchg :: intrinsics.atomic_cxchg;
-atomic_cxchg_acq :: intrinsics.atomic_cxchg_acq;
-atomic_cxchg_rel :: intrinsics.atomic_cxchg_rel;
-atomic_cxchg_acqrel :: intrinsics.atomic_cxchg_acqrel;
-atomic_cxchg_relaxed :: intrinsics.atomic_cxchg_relaxed;
-atomic_cxchg_failrelaxed :: intrinsics.atomic_cxchg_failrelaxed;
-atomic_cxchg_failacq :: intrinsics.atomic_cxchg_failacq;
-atomic_cxchg_acq_failrelaxed :: intrinsics.atomic_cxchg_acq_failrelaxed;
-atomic_cxchg_acqrel_failrelaxed :: intrinsics.atomic_cxchg_acqrel_failrelaxed;
+atomic_compare_exchange_strong :: intrinsics.atomic_cxchg;
+atomic_compare_exchange_strong_acquire :: intrinsics.atomic_cxchg_acq;
+atomic_compare_exchange_strong_release :: intrinsics.atomic_cxchg_rel;
+atomic_compare_exchange_strong_acqrel :: intrinsics.atomic_cxchg_acqrel;
+atomic_compare_exchange_strong_relaxed :: intrinsics.atomic_cxchg_relaxed;
+atomic_compare_exchange_strong_failrelaxed :: intrinsics.atomic_cxchg_failrelaxed;
+atomic_compare_exchange_strong_failacquire :: intrinsics.atomic_cxchg_failacq;
+atomic_compare_exchange_strong_acquire_failrelaxed :: intrinsics.atomic_cxchg_acq_failrelaxed;
+atomic_compare_exchange_strong_acqrel_failrelaxed :: intrinsics.atomic_cxchg_acqrel_failrelaxed;
-atomic_cxchgweak :: intrinsics.atomic_cxchgweak;
-atomic_cxchgweak_acq :: intrinsics.atomic_cxchgweak_acq;
-atomic_cxchgweak_rel :: intrinsics.atomic_cxchgweak_rel;
-atomic_cxchgweak_acqrel :: intrinsics.atomic_cxchgweak_acqrel;
-atomic_cxchgweak_relaxed :: intrinsics.atomic_cxchgweak_relaxed;
-atomic_cxchgweak_failrelaxed :: intrinsics.atomic_cxchgweak_failrelaxed;
-atomic_cxchgweak_failacq :: intrinsics.atomic_cxchgweak_failacq;
-atomic_cxchgweak_acq_failrelaxed :: intrinsics.atomic_cxchgweak_acq_failrelaxed;
-atomic_cxchgweak_acqrel_failrelaxed :: intrinsics.atomic_cxchgweak_acqrel_failrelaxed;
+atomic_compare_exchange_weak :: intrinsics.atomic_cxchgweak;
+atomic_compare_exchange_weak_acquire :: intrinsics.atomic_cxchgweak_acq;
+atomic_compare_exchange_weak_release :: intrinsics.atomic_cxchgweak_rel;
+atomic_compare_exchange_weak_acqrel :: intrinsics.atomic_cxchgweak_acqrel;
+atomic_compare_exchange_weak_relaxed :: intrinsics.atomic_cxchgweak_relaxed;
+atomic_compare_exchange_weak_failrelaxed :: intrinsics.atomic_cxchgweak_failrelaxed;
+atomic_compare_exchange_weak_failacquire :: intrinsics.atomic_cxchgweak_failacq;
+atomic_compare_exchange_weak_acquire_failrelaxed :: intrinsics.atomic_cxchgweak_acq_failrelaxed;
+atomic_compare_exchange_weak_acqrel_failrelaxed :: intrinsics.atomic_cxchgweak_acqrel_failrelaxed;
diff --git a/core/sync/sync2/channel.odin b/core/sync/sync2/channel.odin
deleted file mode 100644
index fc30d8280..000000000
--- a/core/sync/sync2/channel.odin
+++ /dev/null
@@ -1,886 +0,0 @@
-package sync2
-
-// TODO(bill): The Channel implementation needs a complete rewrite for this new package sync design
-// Especially how the `select` things work
-
-import "core:mem"
-import "core:time"
-import "core:math/rand"
-
-_, _ :: time, rand;
-
-Channel_Direction :: enum i8 {
- Both = 0,
- Send = +1,
- Recv = -1,
-}
-
-Channel :: struct(T: typeid, Direction := Channel_Direction.Both) {
- using _internal: ^Raw_Channel,
-}
-
-channel_init :: proc(ch: ^$C/Channel($T, $D), cap := 0, allocator := context.allocator) {
- context.allocator = allocator;
- ch._internal = raw_channel_create(size_of(T), align_of(T), cap);
- return;
-}
-
-channel_make :: proc($T: typeid, cap := 0, allocator := context.allocator) -> (ch: Channel(T, .Both)) {
- context.allocator = allocator;
- ch._internal = raw_channel_create(size_of(T), align_of(T), cap);
- return;
-}
-
-channel_make_send :: proc($T: typeid, cap := 0, allocator := context.allocator) -> (ch: Channel(T, .Send)) {
- context.allocator = allocator;
- ch._internal = raw_channel_create(size_of(T), align_of(T), cap);
- return;
-}
-channel_make_recv :: proc($T: typeid, cap := 0, allocator := context.allocator) -> (ch: Channel(T, .Recv)) {
- context.allocator = allocator;
- ch._internal = raw_channel_create(size_of(T), align_of(T), cap);
- return;
-}
-
-channel_destroy :: proc(ch: $C/Channel($T, $D)) {
- raw_channel_destroy(ch._internal);
-}
-
-channel_as_send :: proc(ch: $C/Channel($T, .Both)) -> (res: Channel(T, .Send)) {
- res._internal = ch._internal;
- return;
-}
-
-channel_as_recv :: proc(ch: $C/Channel($T, .Both)) -> (res: Channel(T, .Recv)) {
- res._internal = ch._internal;
- return;
-}
-
-
-channel_len :: proc(ch: $C/Channel($T, $D)) -> int {
- return ch._internal.len if ch._internal != nil else 0;
-}
-channel_cap :: proc(ch: $C/Channel($T, $D)) -> int {
- return ch._internal.cap if ch._internal != nil else 0;
-}
-
-
-channel_send :: proc(ch: $C/Channel($T, $D), msg: T, loc := #caller_location) where D >= .Both {
- msg := msg;
- _ = raw_channel_send_impl(ch._internal, &msg, /*block*/true, loc);
-}
-channel_try_send :: proc(ch: $C/Channel($T, $D), msg: T, loc := #caller_location) -> bool where D >= .Both {
- msg := msg;
- return raw_channel_send_impl(ch._internal, &msg, /*block*/false, loc);
-}
-
-channel_recv :: proc(ch: $C/Channel($T, $D), loc := #caller_location) -> (msg: T) where D <= .Both {
- c := ch._internal;
- if c == nil {
- panic(message="cannot recv message; channel is nil", loc=loc);
- }
- mutex_lock(&c.mutex);
- raw_channel_recv_impl(c, &msg, loc);
- mutex_unlock(&c.mutex);
- return;
-}
-channel_try_recv :: proc(ch: $C/Channel($T, $D), loc := #caller_location) -> (msg: T, ok: bool) where D <= .Both {
- c := ch._internal;
- if c != nil && mutex_try_lock(&c.mutex) {
- if c.len > 0 {
- raw_channel_recv_impl(c, &msg, loc);
- ok = true;
- }
- mutex_unlock(&c.mutex);
- }
- return;
-}
-channel_try_recv_ptr :: proc(ch: $C/Channel($T, $D), msg: ^T, loc := #caller_location) -> (ok: bool) where D <= .Both {
- res: T;
- res, ok = channel_try_recv(ch, loc);
- if ok && msg != nil {
- msg^ = res;
- }
- return;
-}
-
-
-channel_is_nil :: proc(ch: $C/Channel($T, $D)) -> bool {
- return ch._internal == nil;
-}
-channel_is_open :: proc(ch: $C/Channel($T, $D)) -> bool {
- c := ch._internal;
- return c != nil && !c.closed;
-}
-
-
-channel_eq :: proc(a, b: $C/Channel($T, $D)) -> bool {
- return a._internal == b._internal;
-}
-channel_ne :: proc(a, b: $C/Channel($T, $D)) -> bool {
- return a._internal != b._internal;
-}
-
-
-channel_can_send :: proc(ch: $C/Channel($T, $D)) -> (ok: bool) where D >= .Both {
- return raw_channel_can_send(ch._internal);
-}
-channel_can_recv :: proc(ch: $C/Channel($T, $D)) -> (ok: bool) where D <= .Both {
- return raw_channel_can_recv(ch._internal);
-}
-
-
-channel_peek :: proc(ch: $C/Channel($T, $D)) -> int {
- c := ch._internal;
- if c == nil {
- return -1;
- }
- if atomic_load(&c.closed) {
- return -1;
- }
- return atomic_load(&c.len);
-}
-
-
-channel_close :: proc(ch: $C/Channel($T, $D), loc := #caller_location) {
- raw_channel_close(ch._internal, loc);
-}
-
-
-channel_iterator :: proc(ch: $C/Channel($T, $D)) -> (msg: T, ok: bool) where D <= .Both {
- c := ch._internal;
- if c == nil {
- return;
- }
-
- if !c.closed || c.len > 0 {
- msg, ok = channel_recv(ch), true;
- }
- return;
-}
-channel_drain :: proc(ch: $C/Channel($T, $D)) where D >= .Both {
- raw_channel_drain(ch._internal);
-}
-
-
-channel_move :: proc(dst: $C1/Channel($T, $D1) src: $C2/Channel(T, $D2)) where D1 <= .Both, D2 >= .Both {
- for msg in channel_iterator(src) {
- channel_send(dst, msg);
- }
-}
-
-
-Raw_Channel_Wait_Queue :: struct {
- next: ^Raw_Channel_Wait_Queue,
- state: ^uintptr,
-}
-
-
-Raw_Channel :: struct {
- closed: bool,
- ready: bool, // ready to recv
- data_offset: u16, // data is stored at the end of this data structure
- elem_size: u32,
- len, cap: int,
- read, write: int,
- mutex: Mutex,
- cond: Cond,
- allocator: mem.Allocator,
-
- sendq: ^Raw_Channel_Wait_Queue,
- recvq: ^Raw_Channel_Wait_Queue,
-}
-
-raw_channel_wait_queue_insert :: proc(head: ^^Raw_Channel_Wait_Queue, val: ^Raw_Channel_Wait_Queue) {
- val.next = head^;
- head^ = val;
-}
-raw_channel_wait_queue_remove :: proc(head: ^^Raw_Channel_Wait_Queue, val: ^Raw_Channel_Wait_Queue) {
- p := head;
- for p^ != nil && p^ != val {
- p = &p^.next;
- }
- if p != nil {
- p^ = p^.next;
- }
-}
-
-
-raw_channel_create :: proc(elem_size, elem_align: int, cap := 0) -> ^Raw_Channel {
- assert(int(u32(elem_size)) == elem_size);
-
- s := size_of(Raw_Channel);
- s = mem.align_forward_int(s, elem_align);
- data_offset := uintptr(s);
- s += elem_size * max(cap, 1);
-
- a := max(elem_align, align_of(Raw_Channel));
-
- c := (^Raw_Channel)(mem.alloc(s, a));
- if c == nil {
- return nil;
- }
-
- c.data_offset = u16(data_offset);
- c.elem_size = u32(elem_size);
- c.len, c.cap = 0, max(cap, 0);
- c.read, c.write = 0, 0;
- c.allocator = context.allocator;
- c.closed = false;
-
- return c;
-}
-
-
-raw_channel_destroy :: proc(c: ^Raw_Channel) {
- if c == nil {
- return;
- }
- context.allocator = c.allocator;
- atomic_store(&c.closed, true);
- free(c);
-}
-
-raw_channel_close :: proc(c: ^Raw_Channel, loc := #caller_location) {
- if c == nil {
- panic(message="cannot close nil channel", loc=loc);
- }
- mutex_lock(&c.mutex);
- defer mutex_unlock(&c.mutex);
- atomic_store(&c.closed, true);
-
- // Release readers and writers
- raw_channel_wait_queue_broadcast(c.recvq);
- raw_channel_wait_queue_broadcast(c.sendq);
- cond_broadcast(&c.cond);
-}
-
-
-
-raw_channel_send_impl :: proc(c: ^Raw_Channel, msg: rawptr, block: bool, loc := #caller_location) -> bool {
- send :: proc(c: ^Raw_Channel, src: rawptr) {
- data := uintptr(c) + uintptr(c.data_offset);
- dst := data + uintptr(c.write * int(c.elem_size));
- mem.copy(rawptr(dst), src, int(c.elem_size));
- c.len += 1;
- c.write = (c.write + 1) % max(c.cap, 1);
- }
-
- switch {
- case c == nil:
- panic(message="cannot send message; channel is nil", loc=loc);
- case c.closed:
- panic(message="cannot send message; channel is closed", loc=loc);
- }
-
- mutex_lock(&c.mutex);
- defer mutex_unlock(&c.mutex);
-
- if c.cap > 0 {
- if !block && c.len >= c.cap {
- return false;
- }
-
- for c.len >= c.cap {
- cond_wait(&c.cond, &c.mutex);
- }
- } else if c.len > 0 { // TODO(bill): determine correct behaviour
- if !block {
- return false;
- }
- cond_wait(&c.cond, &c.mutex);
- } else if c.len == 0 && !block {
- return false;
- }
-
- send(c, msg);
- cond_signal(&c.cond);
- raw_channel_wait_queue_signal(c.recvq);
-
- return true;
-}
-
-raw_channel_recv_impl :: proc(c: ^Raw_Channel, res: rawptr, loc := #caller_location) {
- recv :: proc(c: ^Raw_Channel, dst: rawptr, loc := #caller_location) {
- if c.len < 1 {
- panic(message="cannot recv message; channel is empty", loc=loc);
- }
- c.len -= 1;
-
- data := uintptr(c) + uintptr(c.data_offset);
- src := data + uintptr(c.read * int(c.elem_size));
- mem.copy(dst, rawptr(src), int(c.elem_size));
- c.read = (c.read + 1) % max(c.cap, 1);
- }
-
- if c == nil {
- panic(message="cannot recv message; channel is nil", loc=loc);
- }
- atomic_store(&c.ready, true);
- for c.len < 1 {
- raw_channel_wait_queue_signal(c.sendq);
- cond_wait(&c.cond, &c.mutex);
- }
- atomic_store(&c.ready, false);
- recv(c, res, loc);
- if c.cap > 0 {
- if c.len == c.cap - 1 {
- // NOTE(bill): Only signal on the last one
- cond_signal(&c.cond);
- }
- } else {
- cond_signal(&c.cond);
- }
-}
-
-
-raw_channel_can_send :: proc(c: ^Raw_Channel) -> (ok: bool) {
- if c == nil {
- return false;
- }
- mutex_lock(&c.mutex);
- switch {
- case c.closed:
- ok = false;
- case c.cap > 0:
- ok = c.ready && c.len < c.cap;
- case:
- ok = c.ready && c.len == 0;
- }
- mutex_unlock(&c.mutex);
- return;
-}
-raw_channel_can_recv :: proc(c: ^Raw_Channel) -> (ok: bool) {
- if c == nil {
- return false;
- }
- mutex_lock(&c.mutex);
- ok = c.len > 0;
- mutex_unlock(&c.mutex);
- return;
-}
-
-
-raw_channel_drain :: proc(c: ^Raw_Channel) {
- if c == nil {
- return;
- }
- mutex_lock(&c.mutex);
- c.len = 0;
- c.read = 0;
- c.write = 0;
- mutex_unlock(&c.mutex);
-}
-
-
-
-MAX_SELECT_CHANNELS :: 64;
-SELECT_MAX_TIMEOUT :: max(time.Duration);
-
-Select_Command :: enum {
- Recv,
- Send,
-}
-
-Select_Channel :: struct {
- channel: ^Raw_Channel,
- command: Select_Command,
-}
-
-
-
-select :: proc(channels: ..Select_Channel) -> (index: int) {
- return select_timeout(SELECT_MAX_TIMEOUT, ..channels);
-}
-select_timeout :: proc(timeout: time.Duration, channels: ..Select_Channel) -> (index: int) {
- switch len(channels) {
- case 0:
- panic("sync: select with no channels");
- }
-
- assert(len(channels) <= MAX_SELECT_CHANNELS);
-
- backing: [MAX_SELECT_CHANNELS]int;
- queues: [MAX_SELECT_CHANNELS]Raw_Channel_Wait_Queue;
- candidates := backing[:];
- cap := len(channels);
- candidates = candidates[:cap];
-
- count := u32(0);
- for c, i in channels {
- if c.channel == nil {
- continue;
- }
- switch c.command {
- case .Recv:
- if raw_channel_can_recv(c.channel) {
- candidates[count] = i;
- count += 1;
- }
- case .Send:
- if raw_channel_can_send(c.channel) {
- candidates[count] = i;
- count += 1;
- }
- }
- }
-
- if count == 0 {
- wait_state: uintptr = 0;
- for _, i in channels {
- q := &queues[i];
- q.state = &wait_state;
- }
-
- for c, i in channels {
- if c.channel == nil {
- continue;
- }
- q := &queues[i];
- switch c.command {
- case .Recv: raw_channel_wait_queue_insert(&c.channel.recvq, q);
- case .Send: raw_channel_wait_queue_insert(&c.channel.sendq, q);
- }
- }
- raw_channel_wait_queue_wait_on(&wait_state, timeout);
- for c, i in channels {
- if c.channel == nil {
- continue;
- }
- q := &queues[i];
- switch c.command {
- case .Recv: raw_channel_wait_queue_remove(&c.channel.recvq, q);
- case .Send: raw_channel_wait_queue_remove(&c.channel.sendq, q);
- }
- }
-
- for c, i in channels {
- switch c.command {
- case .Recv:
- if raw_channel_can_recv(c.channel) {
- candidates[count] = i;
- count += 1;
- }
- case .Send:
- if raw_channel_can_send(c.channel) {
- candidates[count] = i;
- count += 1;
- }
- }
- }
- if count == 0 && timeout == SELECT_MAX_TIMEOUT {
- index = -1;
- return;
- }
-
- assert(count != 0);
- }
-
- t := time.now();
- r := rand.create(transmute(u64)t);
- i := rand.uint32(&r);
-
- index = candidates[i % count];
- return;
-}
-
-select_recv :: proc(channels: ..^Raw_Channel) -> (index: int) {
- switch len(channels) {
- case 0:
- panic("sync: select with no channels");
- }
-
- assert(len(channels) <= MAX_SELECT_CHANNELS);
-
- backing: [MAX_SELECT_CHANNELS]int;
- queues: [MAX_SELECT_CHANNELS]Raw_Channel_Wait_Queue;
- candidates := backing[:];
- cap := len(channels);
- candidates = candidates[:cap];
-
- count := u32(0);
- for c, i in channels {
- if raw_channel_can_recv(c) {
- candidates[count] = i;
- count += 1;
- }
- }
-
- if count == 0 {
- state: uintptr;
- for c, i in channels {
- q := &queues[i];
- q.state = &state;
- raw_channel_wait_queue_insert(&c.recvq, q);
- }
- raw_channel_wait_queue_wait_on(&state, SELECT_MAX_TIMEOUT);
- for c, i in channels {
- q := &queues[i];
- raw_channel_wait_queue_remove(&c.recvq, q);
- }
-
- for c, i in channels {
- if raw_channel_can_recv(c) {
- candidates[count] = i;
- count += 1;
- }
- }
- assert(count != 0);
- }
-
- t := time.now();
- r := rand.create(transmute(u64)t);
- i := rand.uint32(&r);
-
- index = candidates[i % count];
- return;
-}
-
-select_recv_msg :: proc(channels: ..$C/Channel($T, $D)) -> (msg: T, index: int) {
- switch len(channels) {
- case 0:
- panic("sync: select with no channels");
- }
-
- assert(len(channels) <= MAX_SELECT_CHANNELS);
-
- queues: [MAX_SELECT_CHANNELS]Raw_Channel_Wait_Queue;
- candidates: [MAX_SELECT_CHANNELS]int;
-
- count := u32(0);
- for c, i in channels {
- if raw_channel_can_recv(c) {
- candidates[count] = i;
- count += 1;
- }
- }
-
- if count == 0 {
- state: uintptr;
- for c, i in channels {
- q := &queues[i];
- q.state = &state;
- raw_channel_wait_queue_insert(&c.recvq, q);
- }
- raw_channel_wait_queue_wait_on(&state, SELECT_MAX_TIMEOUT);
- for c, i in channels {
- q := &queues[i];
- raw_channel_wait_queue_remove(&c.recvq, q);
- }
-
- for c, i in channels {
- if raw_channel_can_recv(c) {
- candidates[count] = i;
- count += 1;
- }
- }
- assert(count != 0);
- }
-
- t := time.now();
- r := rand.create(transmute(u64)t);
- i := rand.uint32(&r);
-
- index = candidates[i % count];
- msg = channel_recv(channels[index]);
-
- return;
-}
-
-select_send_msg :: proc(msg: $T, channels: ..$C/Channel(T, $D)) -> (index: int) {
- switch len(channels) {
- case 0:
- panic("sync: select with no channels");
- }
-
- assert(len(channels) <= MAX_SELECT_CHANNELS);
-
- backing: [MAX_SELECT_CHANNELS]int;
- queues: [MAX_SELECT_CHANNELS]Raw_Channel_Wait_Queue;
- candidates := backing[:];
- cap := len(channels);
- candidates = candidates[:cap];
-
- count := u32(0);
- for c, i in channels {
- if raw_channel_can_recv(c) {
- candidates[count] = i;
- count += 1;
- }
- }
-
- if count == 0 {
- state: uintptr;
- for c, i in channels {
- q := &queues[i];
- q.state = &state;
- raw_channel_wait_queue_insert(&c.recvq, q);
- }
- raw_channel_wait_queue_wait_on(&state, SELECT_MAX_TIMEOUT);
- for c, i in channels {
- q := &queues[i];
- raw_channel_wait_queue_remove(&c.recvq, q);
- }
-
- for c, i in channels {
- if raw_channel_can_recv(c) {
- candidates[count] = i;
- count += 1;
- }
- }
- assert(count != 0);
- }
-
- t := time.now();
- r := rand.create(transmute(u64)t);
- i := rand.uint32(&r);
-
- index = candidates[i % count];
-
- if msg != nil {
- channel_send(channels[index], msg);
- }
-
- return;
-}
-
-select_send :: proc(channels: ..^Raw_Channel) -> (index: int) {
- switch len(channels) {
- case 0:
- panic("sync: select with no channels");
- }
-
- assert(len(channels) <= MAX_SELECT_CHANNELS);
- candidates: [MAX_SELECT_CHANNELS]int;
- queues: [MAX_SELECT_CHANNELS]Raw_Channel_Wait_Queue;
-
- count := u32(0);
- for c, i in channels {
- if raw_channel_can_send(c) {
- candidates[count] = i;
- count += 1;
- }
- }
-
- if count == 0 {
- state: uintptr;
- for c, i in channels {
- q := &queues[i];
- q.state = &state;
- raw_channel_wait_queue_insert(&c.sendq, q);
- }
- raw_channel_wait_queue_wait_on(&state, SELECT_MAX_TIMEOUT);
- for c, i in channels {
- q := &queues[i];
- raw_channel_wait_queue_remove(&c.sendq, q);
- }
-
- for c, i in channels {
- if raw_channel_can_send(c) {
- candidates[count] = i;
- count += 1;
- }
- }
- assert(count != 0);
- }
-
- t := time.now();
- r := rand.create(transmute(u64)t);
- i := rand.uint32(&r);
-
- index = candidates[i % count];
- return;
-}
-
-select_try :: proc(channels: ..Select_Channel) -> (index: int) {
- switch len(channels) {
- case 0:
- panic("sync: select with no channels");
- }
-
- assert(len(channels) <= MAX_SELECT_CHANNELS);
-
- backing: [MAX_SELECT_CHANNELS]int;
- candidates := backing[:];
- cap := len(channels);
- candidates = candidates[:cap];
-
- count := u32(0);
- for c, i in channels {
- switch c.command {
- case .Recv:
- if raw_channel_can_recv(c.channel) {
- candidates[count] = i;
- count += 1;
- }
- case .Send:
- if raw_channel_can_send(c.channel) {
- candidates[count] = i;
- count += 1;
- }
- }
- }
-
- if count == 0 {
- index = -1;
- return;
- }
-
- t := time.now();
- r := rand.create(transmute(u64)t);
- i := rand.uint32(&r);
-
- index = candidates[i % count];
- return;
-}
-
-
-select_try_recv :: proc(channels: ..^Raw_Channel) -> (index: int) {
- switch len(channels) {
- case 0:
- index = -1;
- return;
- case 1:
- index = -1;
- if raw_channel_can_recv(channels[0]) {
- index = 0;
- }
- return;
- }
-
- assert(len(channels) <= MAX_SELECT_CHANNELS);
- candidates: [MAX_SELECT_CHANNELS]int;
-
- count := u32(0);
- for c, i in channels {
- if raw_channel_can_recv(c) {
- candidates[count] = i;
- count += 1;
- }
- }
-
- if count == 0 {
- index = -1;
- return;
- }
-
- t := time.now();
- r := rand.create(transmute(u64)t);
- i := rand.uint32(&r);
-
- index = candidates[i % count];
- return;
-}
-
-
-select_try_send :: proc(channels: ..^Raw_Channel) -> (index: int) #no_bounds_check {
- switch len(channels) {
- case 0:
- return -1;
- case 1:
- if raw_channel_can_send(channels[0]) {
- return 0;
- }
- return -1;
- }
-
- assert(len(channels) <= MAX_SELECT_CHANNELS);
- candidates: [MAX_SELECT_CHANNELS]int;
-
- count := u32(0);
- for c, i in channels {
- if raw_channel_can_send(c) {
- candidates[count] = i;
- count += 1;
- }
- }
-
- if count == 0 {
- index = -1;
- return;
- }
-
- t := time.now();
- r := rand.create(transmute(u64)t);
- i := rand.uint32(&r);
-
- index = candidates[i % count];
- return;
-}
-
-select_try_recv_msg :: proc(channels: ..$C/Channel($T, $D)) -> (msg: T, index: int) {
- switch len(channels) {
- case 0:
- index = -1;
- return;
- case 1:
- ok: bool;
- if msg, ok = channel_try_recv(channels[0]); ok {
- index = 0;
- }
- return;
- }
-
- assert(len(channels) <= MAX_SELECT_CHANNELS);
- candidates: [MAX_SELECT_CHANNELS]int;
-
- count := u32(0);
- for c, i in channels {
- if channel_can_recv(c) {
- candidates[count] = i;
- count += 1;
- }
- }
-
- if count == 0 {
- index = -1;
- return;
- }
-
- t := time.now();
- r := rand.create(transmute(u64)t);
- i := rand.uint32(&r);
-
- index = candidates[i % count];
- msg = channel_recv(channels[index]);
- return;
-}
-
-select_try_send_msg :: proc(msg: $T, channels: ..$C/Channel(T, $D)) -> (index: int) {
- index = -1;
- switch len(channels) {
- case 0:
- return;
- case 1:
- if channel_try_send(channels[0], msg) {
- index = 0;
- }
- return;
- }
-
-
- assert(len(channels) <= MAX_SELECT_CHANNELS);
- candidates: [MAX_SELECT_CHANNELS]int;
-
- count := u32(0);
- for c, i in channels {
- if raw_channel_can_send(c) {
- candidates[count] = i;
- count += 1;
- }
- }
-
- if count == 0 {
- index = -1;
- return;
- }
-
- t := time.now();
- r := rand.create(transmute(u64)t);
- i := rand.uint32(&r);
-
- index = candidates[i % count];
- channel_send(channels[index], msg);
- return;
-}
-
diff --git a/core/sync/sync2/channel_unix.odin b/core/sync/sync2/channel_unix.odin
deleted file mode 100644
index 7429b67db..000000000
--- a/core/sync/sync2/channel_unix.odin
+++ /dev/null
@@ -1,17 +0,0 @@
-//+build linux, darwin, freebsd
-//+private
-package sync2
-
-import "core:time"
-
-raw_channel_wait_queue_wait_on :: proc(state: ^uintptr, timeout: time.Duration) {
- // stub
-}
-
-raw_channel_wait_queue_signal :: proc(q: ^Raw_Channel_Wait_Queue) {
- // stub
-}
-
-raw_channel_wait_queue_broadcast :: proc(q: ^Raw_Channel_Wait_Queue) {
- // stub
-}
diff --git a/core/sync/sync2/channel_windows.odin b/core/sync/sync2/channel_windows.odin
deleted file mode 100644
index e365506c8..000000000
--- a/core/sync/sync2/channel_windows.odin
+++ /dev/null
@@ -1,34 +0,0 @@
-//+build windows
-//+private
-package sync2
-
-import win32 "core:sys/windows"
-import "core:time"
-
-raw_channel_wait_queue_wait_on :: proc(state: ^uintptr, timeout: time.Duration) {
- ms: win32.DWORD = win32.INFINITE;
- if max(time.Duration) != SELECT_MAX_TIMEOUT {
- ms = win32.DWORD((max(time.duration_nanoseconds(timeout), 0) + 999999)/1000000);
- }
-
- v := atomic_load(state);
- for v == 0 {
- win32.WaitOnAddress(state, &v, size_of(state^), ms);
- v = atomic_load(state);
- }
- atomic_store(state, 0);
-}
-
-raw_channel_wait_queue_signal :: proc(q: ^Raw_Channel_Wait_Queue) {
- for x := q; x != nil; x = x.next {
- atomic_add(x.state, 1);
- win32.WakeByAddressSingle(x.state);
- }
-}
-
-raw_channel_wait_queue_broadcast :: proc(q: ^Raw_Channel_Wait_Queue) {
- for x := q; x != nil; x = x.next {
- atomic_add(x.state, 1);
- win32.WakeByAddressAll(x.state);
- }
-}
diff --git a/core/sync/sync2/extended.odin b/core/sync/sync2/extended.odin
index 3f44a172a..06051c822 100644
--- a/core/sync/sync2/extended.odin
+++ b/core/sync/sync2/extended.odin
@@ -122,6 +122,36 @@ barrier_wait :: proc(b: ^Barrier) -> (is_leader: bool) {
}
+Auto_Reset_Event :: struct {
+ // status == 0: Event is reset and no threads are waiting
+ // status == 1: Event is signaled
+ // status == -N: Event is reset and N threads are waiting
+ status: i32,
+ sema: Sema,
+}
+
+auto_reset_event_signal :: proc(e: ^Auto_Reset_Event) {
+ old_status := atomic_load_relaxed(&e.status);
+ for {
+ new_status := old_status + 1 if old_status < 1 else 1;
+ if _, ok := atomic_compare_exchange_weak_release(&e.status, old_status, new_status); ok {
+ break;
+ }
+
+ if old_status < 0 {
+ sema_post(&e.sema);
+ }
+ }
+}
+
+auto_reset_event_wait :: proc(e: ^Auto_Reset_Event) {
+ old_status := atomic_sub_acquire(&e.status, 1);
+ if old_status < 1 {
+ sema_wait(&e.sema);
+ }
+}
+
+
Ticket_Mutex :: struct {
ticket: uint,
@@ -130,7 +160,7 @@ Ticket_Mutex :: struct {
ticket_mutex_lock :: #force_inline proc(m: ^Ticket_Mutex) {
ticket := atomic_add_relaxed(&m.ticket, 1);
- for ticket != atomic_load_acq(&m.serving) {
+ for ticket != atomic_load_acquire(&m.serving) {
cpu_relax();
}
}
@@ -142,23 +172,23 @@ ticket_mutex_unlock :: #force_inline proc(m: ^Ticket_Mutex) {
Benaphore :: struct {
- counter: int,
+ counter: i32,
sema: Sema,
}
benaphore_lock :: proc(b: ^Benaphore) {
- if atomic_add_acq(&b.counter, 1) > 1 {
+ if atomic_add_acquire(&b.counter, 1) > 1 {
sema_wait(&b.sema);
}
}
benaphore_try_lock :: proc(b: ^Benaphore) -> bool {
- v, _ := atomic_cxchg_acq(&b.counter, 1, 0);
+ v, _ := atomic_compare_exchange_strong_acquire(&b.counter, 1, 0);
return v == 0;
}
benaphore_unlock :: proc(b: ^Benaphore) {
- if atomic_sub_rel(&b.counter, 1) > 0 {
+ if atomic_sub_release(&b.counter, 1) > 0 {
sema_post(&b.sema);
}
}
@@ -166,13 +196,13 @@ benaphore_unlock :: proc(b: ^Benaphore) {
Recursive_Benaphore :: struct {
counter: int,
owner: int,
- recursion: int,
+ recursion: i32,
sema: Sema,
}
recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
tid := runtime.current_thread_id();
- if atomic_add_acq(&b.counter, 1) > 1 {
+ if atomic_add_acquire(&b.counter, 1) > 1 {
if tid != b.owner {
sema_wait(&b.sema);
}
@@ -185,10 +215,10 @@ recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
recursive_benaphore_try_lock :: proc(b: ^Recursive_Benaphore) -> bool {
tid := runtime.current_thread_id();
if b.owner == tid {
- atomic_add_acq(&b.counter, 1);
+ atomic_add_acquire(&b.counter, 1);
}
- if v, _ := atomic_cxchg_acq(&b.counter, 1, 0); v != 0 {
+ if v, _ := atomic_compare_exchange_strong_acquire(&b.counter, 1, 0); v != 0 {
return false;
}
// inside the lock
@@ -205,7 +235,7 @@ recursive_benaphore_unlock :: proc(b: ^Recursive_Benaphore) {
if recursion == 0 {
b.owner = 0;
}
- if atomic_sub_rel(&b.counter, 1) > 0 {
+ if atomic_sub_release(&b.counter, 1) > 0 {
if recursion == 0 {
sema_post(&b.sema);
}
@@ -223,7 +253,7 @@ Once :: struct {
}
once_do :: proc(o: ^Once, fn: proc()) {
- if atomic_load_acq(&o.done) == false {
+ if atomic_load_acquire(&o.done) == false {
_once_do_slow(o, fn);
}
}
@@ -234,6 +264,6 @@ _once_do_slow :: proc(o: ^Once, fn: proc()) {
defer mutex_unlock(&o.m);
if !o.done {
fn();
- atomic_store_rel(&o.done, true);
+ atomic_store_release(&o.done, true);
}
}
diff --git a/core/sync/sync2/primitives.odin b/core/sync/sync2/primitives.odin
index dd6688a50..1ed83f706 100644
--- a/core/sync/sync2/primitives.odin
+++ b/core/sync/sync2/primitives.odin
@@ -1,7 +1,6 @@
package sync2
import "core:time"
-import "core:runtime"
// A Mutex is a mutual exclusion lock
// The zero value for a Mutex is an unlocked mutex
@@ -26,6 +25,18 @@ mutex_try_lock :: proc(m: ^Mutex) -> bool {
return _mutex_try_lock(m);
}
+// Example:
+//
+// if mutex_guard(&m) {
+// ...
+// }
+//
+@(deferred_in=mutex_unlock)
+mutex_guard :: proc(m: ^Mutex) -> bool {
+ mutex_lock(m);
+ return true;
+}
+
// A RW_Mutex is a reader/writer mutual exclusion lock
// The lock can be held by any arbitrary number of readers or a single writer
// The zero value for a RW_Mutex is an unlocked mutex
@@ -66,61 +77,65 @@ rw_mutex_try_shared_lock :: proc(rw: ^RW_Mutex) -> bool {
return _rw_mutex_try_shared_lock(rw);
}
+// Example:
+//
+// if rw_mutex_guard(&m) {
+// ...
+// }
+//
+@(deferred_in=rw_mutex_unlock)
+rw_mutex_guard :: proc(m: ^RW_Mutex) -> bool {
+ rw_mutex_lock(m);
+ return true;
+}
+
+// Example:
+//
+// if rw_mutex_shared_guard(&m) {
+// ...
+// }
+//
+@(deferred_in=rw_mutex_shared_unlock)
+rw_mutex_shared_guard :: proc(m: ^RW_Mutex) -> bool {
+ rw_mutex_shared_lock(m);
+ return true;
+}
+
+
// A Recusrive_Mutex is a recursive mutual exclusion lock
// The zero value for a Recursive_Mutex is an unlocked mutex
//
// A Recursive_Mutex must not be copied after first use
Recursive_Mutex :: struct {
- // TODO(bill): Is this implementation too lazy?
- // Can this be made to work on all OSes without construction and destruction, i.e. Zero is Initialized
- // CRITICAL_SECTION would be a perfect candidate for this on Windows but that cannot be "dumb"
-
- owner: int,
- recursion: int,
- mutex: Mutex,
+ impl: _Recursive_Mutex,
}
recursive_mutex_lock :: proc(m: ^Recursive_Mutex) {
- tid := runtime.current_thread_id();
- if tid != m.owner {
- mutex_lock(&m.mutex);
- }
- // inside the lock
- m.owner = tid;
- m.recursion += 1;
+ _recursive_mutex_lock(m);
}
recursive_mutex_unlock :: proc(m: ^Recursive_Mutex) {
- tid := runtime.current_thread_id();
- assert(tid == m.owner);
- m.recursion -= 1;
- recursion := m.recursion;
- if recursion == 0 {
- m.owner = 0;
- }
- if recursion == 0 {
- mutex_unlock(&m.mutex);
- }
- // outside the lock
-
+ _recursive_mutex_unlock(m);
}
recursive_mutex_try_lock :: proc(m: ^Recursive_Mutex) -> bool {
- tid := runtime.current_thread_id();
- if m.owner == tid {
- return mutex_try_lock(&m.mutex);
- }
- if !mutex_try_lock(&m.mutex) {
- return false;
- }
- // inside the lock
- m.owner = tid;
- m.recursion += 1;
- return true;
+ return _recursive_mutex_try_lock(m);
}
+// Example:
+//
+// if recursive_mutex_guard(&m) {
+// ...
+// }
+//
+@(deferred_in=recursive_mutex_unlock)
+recursive_mutex_guard :: proc(m: ^Recursive_Mutex) -> bool {
+ recursive_mutex_lock(m);
+ return true;
+}
+
// Cond implements a condition variable, a rendezvous point for threads
// waiting for signalling the occurence of an event
@@ -153,33 +168,14 @@ cond_broadcast :: proc(c: ^Cond) {
//
// A Sema must not be copied after first use
Sema :: struct {
- // TODO(bill): Is this implementation too lazy?
- // Can this be made to work on all OSes without construction and destruction, i.e. Zero is Initialized
-
- mutex: Mutex,
- cond: Cond,
- count: int,
+ impl: _Sema,
}
sema_wait :: proc(s: ^Sema) {
- mutex_lock(&s.mutex);
- defer mutex_unlock(&s.mutex);
-
- for s.count == 0 {
- cond_wait(&s.cond, &s.mutex);
- }
-
- s.count -= 1;
- if s.count > 0 {
- cond_signal(&s.cond);
- }
+ _sema_wait(s);
}
sema_post :: proc(s: ^Sema, count := 1) {
- mutex_lock(&s.mutex);
- defer mutex_unlock(&s.mutex);
-
- s.count += count;
- cond_signal(&s.cond);
+ _sema_post(s, count);
}
diff --git a/core/sync/sync2/primitives_atomic.odin b/core/sync/sync2/primitives_atomic.odin
index 610ab7ee0..7043f8c84 100644
--- a/core/sync/sync2/primitives_atomic.odin
+++ b/core/sync/sync2/primitives_atomic.odin
@@ -5,6 +5,7 @@ package sync2
when !#config(ODIN_SYNC_USE_PTHREADS, true) {
import "core:time"
+import "core:runtime"
_Mutex_State :: enum i32 {
Unlocked = 0,
@@ -160,6 +161,54 @@ _rw_mutex_try_shared_lock :: proc(rw: ^RW_Mutex) -> bool {
}
+_Recursive_Mutex :: struct {
+ owner: int,
+ recursion: int,
+ mutex: Mutex,
+}
+
+_recursive_mutex_lock :: proc(m: ^Recursive_Mutex) {
+ tid := runtime.current_thread_id();
+ if tid != m.impl.owner {
+ mutex_lock(&m.impl.mutex);
+ }
+ // inside the lock
+ m.impl.owner = tid;
+ m.impl.recursion += 1;
+}
+
+_recursive_mutex_unlock :: proc(m: ^Recursive_Mutex) {
+ tid := runtime.current_thread_id();
+ assert(tid == m.impl.owner);
+ m.impl.recursion -= 1;
+ recursion := m.impl.recursion;
+ if recursion == 0 {
+ m.impl.owner = 0;
+ }
+ if recursion == 0 {
+ mutex_unlock(&m.impl.mutex);
+ }
+ // outside the lock
+
+}
+
+_recursive_mutex_try_lock :: proc(m: ^Recursive_Mutex) -> bool {
+ tid := runtime.current_thread_id();
+ if m.impl.owner == tid {
+ return mutex_try_lock(&m.impl.mutex);
+ }
+ if !mutex_try_lock(&m.impl.mutex) {
+ return false;
+ }
+ // inside the lock
+ m.impl.owner = tid;
+ m.impl.recursion += 1;
+ return true;
+}
+
+
+
+
Queue_Item :: struct {
next: ^Queue_Item,
@@ -240,5 +289,35 @@ _cond_broadcast :: proc(c: ^Cond) {
}
}
+_Sema :: struct {
+ mutex: Mutex,
+ cond: Cond,
+ count: int,
+}
+
+_sema_wait :: proc(s: ^Sema) {
+ mutex_lock(&s.impl.mutex);
+ defer mutex_unlock(&s.impl.mutex);
+
+ for s.impl.count == 0 {
+ cond_wait(&s.impl.cond, &s.impl.mutex);
+ }
+
+ s.impl.count -= 1;
+ if s.impl.count > 0 {
+ cond_signal(&s.impl.cond);
+ }
+}
+
+_sema_post :: proc(s: ^Sema, count := 1) {
+ mutex_lock(&s.impl.mutex);
+ defer mutex_unlock(&s.impl.mutex);
+
+ s.impl.count += count;
+ cond_signal(&s.impl.cond);
+}
+
+
+
} // !ODIN_SYNC_USE_PTHREADS
diff --git a/core/sync/sync2/primitives_pthreads.odin b/core/sync/sync2/primitives_pthreads.odin
index e85cff7fc..5fd43d871 100644
--- a/core/sync/sync2/primitives_pthreads.odin
+++ b/core/sync/sync2/primitives_pthreads.odin
@@ -5,6 +5,7 @@ package sync2
when #config(ODIN_SYNC_USE_PTHREADS, true) {
import "core:time"
+import "core:runtime"
import "core:sys/unix"
_Mutex_State :: enum i32 {
@@ -83,7 +84,7 @@ _rw_mutex_shared_lock :: proc(rw: ^RW_Mutex) {
state := atomic_load(&rw.impl.state);
for state & (RW_Mutex_State_Is_Writing|RW_Mutex_State_Writer_Mask) == 0 {
ok: bool;
- state, ok = atomic_cxchgweak(&rw.impl.state, state, state + RW_Mutex_State_Reader);
+ state, ok = atomic_compare_exchange_weak(&rw.impl.state, state, state + RW_Mutex_State_Reader);
if ok {
return;
}
@@ -106,7 +107,7 @@ _rw_mutex_shared_unlock :: proc(rw: ^RW_Mutex) {
_rw_mutex_try_shared_lock :: proc(rw: ^RW_Mutex) -> bool {
state := atomic_load(&rw.impl.state);
if state & (RW_Mutex_State_Is_Writing|RW_Mutex_State_Writer_Mask) == 0 {
- _, ok := atomic_cxchg(&rw.impl.state, state, state + RW_Mutex_State_Reader);
+ _, ok := atomic_compare_exchange_strong(&rw.impl.state, state, state + RW_Mutex_State_Reader);
if ok {
return true;
}
@@ -120,6 +121,53 @@ _rw_mutex_try_shared_lock :: proc(rw: ^RW_Mutex) -> bool {
return false;
}
+
+_Recursive_Mutex :: struct {
+ owner: int,
+ recursion: int,
+ mutex: Mutex,
+}
+
+_recursive_mutex_lock :: proc(m: ^Recursive_Mutex) {
+ tid := runtime.current_thread_id();
+ if tid != m.impl.owner {
+ mutex_lock(&m.impl.mutex);
+ }
+ // inside the lock
+ m.impl.owner = tid;
+ m.impl.recursion += 1;
+}
+
+_recursive_mutex_unlock :: proc(m: ^Recursive_Mutex) {
+ tid := runtime.current_thread_id();
+ assert(tid == m.impl.owner);
+ m.impl.recursion -= 1;
+ recursion := m.impl.recursion;
+ if recursion == 0 {
+ m.impl.owner = 0;
+ }
+ if recursion == 0 {
+ mutex_unlock(&m.impl.mutex);
+ }
+ // outside the lock
+
+}
+
+_recursive_mutex_try_lock :: proc(m: ^Recursive_Mutex) -> bool {
+ tid := runtime.current_thread_id();
+ if m.impl.owner == tid {
+ return mutex_try_lock(&m.impl.mutex);
+ }
+ if !mutex_try_lock(&m.impl.mutex) {
+ return false;
+ }
+ // inside the lock
+ m.impl.owner = tid;
+ m.impl.recursion += 1;
+ return true;
+}
+
+
_Cond :: struct {
pthread_cond: unix.pthread_cond_t,
}
@@ -150,5 +198,34 @@ _cond_broadcast :: proc(c: ^Cond) {
assert(err == 0);
}
+_Sema :: struct {
+ mutex: Mutex,
+ cond: Cond,
+ count: int,
+}
+
+_sema_wait :: proc(s: ^Sema) {
+ mutex_lock(&s.impl.mutex);
+ defer mutex_unlock(&s.impl.mutex);
+
+ for s.impl.count == 0 {
+ cond_wait(&s.impl.cond, &s.impl.mutex);
+ }
+
+ s.impl.count -= 1;
+ if s.impl.count > 0 {
+ cond_signal(&s.impl.cond);
+ }
+}
+
+_sema_post :: proc(s: ^Sema, count := 1) {
+ mutex_lock(&s.impl.mutex);
+ defer mutex_unlock(&s.impl.mutex);
+
+ s.impl.count += count;
+ cond_signal(&s.impl.cond);
+}
+
+
} // ODIN_SYNC_USE_PTHREADS
diff --git a/core/sync/sync2/primitives_windows.odin b/core/sync/sync2/primitives_windows.odin
index 02b6cd733..219af0162 100644
--- a/core/sync/sync2/primitives_windows.odin
+++ b/core/sync/sync2/primitives_windows.odin
@@ -50,6 +50,56 @@ _rw_mutex_try_shared_lock :: proc(rw: ^RW_Mutex) -> bool {
}
+_Recursive_Mutex :: struct {
+ owner: u32,
+ claim_count: i32,
+}
+
+_recursive_mutex_lock :: proc(m: ^Recursive_Mutex) {
+ tid := win32.GetCurrentThreadId();
+ for {
+ prev_owner := atomic_compare_exchange_strong_acquire(&m.impl.owner, tid, 0);
+ switch prev_owner {
+ case 0, tid:
+ m.impl.claim_count += 1;
+ // inside the lock
+ return;
+ }
+
+ win32.WaitOnAddress(
+ &m.impl.owner,
+ &prev_owner,
+ size_of(prev_owner),
+ win32.INFINITE,
+ );
+ }
+}
+
+_recursive_mutex_unlock :: proc(m: ^Recursive_Mutex) {
+ m.impl.claim_count -= 1;
+ if m.impl.claim_count != 0 {
+ return;
+ }
+ atomic_exchange_release(&m.impl.owner, 0);
+ win32.WakeByAddressSingle(&m.impl.owner);
+ // outside the lock
+
+}
+
+_recursive_mutex_try_lock :: proc(m: ^Recursive_Mutex) -> bool {
+ tid := win32.GetCurrentThreadId();
+ prev_owner := atomic_compare_exchange_strong_acquire(&m.impl.owner, tid, 0);
+ switch prev_owner {
+ case 0, tid:
+ m.impl.claim_count += 1;
+ // inside the lock
+ return true;
+ }
+ return false;
+}
+
+
+
_Cond :: struct {
cond: win32.CONDITION_VARIABLE,
@@ -71,3 +121,35 @@ _cond_signal :: proc(c: ^Cond) {
_cond_broadcast :: proc(c: ^Cond) {
win32.WakeAllConditionVariable(&c.impl.cond);
}
+
+
+_Sema :: struct {
+ count: i32,
+}
+
+_sema_wait :: proc(s: ^Sema) {
+ for {
+ original_count := s.impl.count;
+ for original_count == 0 {
+ win32.WaitOnAddress(
+ &s.impl.count,
+ &original_count,
+ size_of(original_count),
+ win32.INFINITE,
+ );
+ original_count = s.impl.count;
+ }
+ if original_count == atomic_compare_exchange_strong(&s.impl.count, original_count-1, original_count) {
+ return;
+ }
+ }
+}
+
+_sema_post :: proc(s: ^Sema, count := 1) {
+ atomic_add(&s.impl.count, i32(count));
+ if count == 1 {
+ win32.WakeByAddressSingle(&s.impl.count);
+ } else {
+ win32.WakeByAddressAll(&s.impl.count);
+ }
+}
diff --git a/core/testing/runner.odin b/core/testing/runner.odin
index efeaa04f6..e3286988c 100644
--- a/core/testing/runner.odin
+++ b/core/testing/runner.odin
@@ -3,7 +3,6 @@ package testing
import "core:io"
import "core:os"
-import "core:strings"
import "core:slice"
reset_t :: proc(t: ^T) {
@@ -55,12 +54,9 @@ runner :: proc(internal_tests: []Internal_Test) -> bool {
logf(t, "[Test: %s]", it.name);
- // TODO(bill): Catch panics
- {
- it.p(t);
- }
+ run_internal_test(t, it);
- if t.error_count != 0 {
+ if failed(t) {
logf(t, "[%s : FAILURE]", it.name);
} else {
logf(t, "[%s : SUCCESS]", it.name);
diff --git a/core/testing/runner_other.odin b/core/testing/runner_other.odin
new file mode 100644
index 000000000..0bd95e10a
--- /dev/null
+++ b/core/testing/runner_other.odin
@@ -0,0 +1,8 @@
+//+private
+//+build !windows
+package testing
+
+run_internal_test :: proc(t: ^T, it: Internal_Test) {
+ // TODO(bill): Catch panics on other platforms
+ it.p(t);
+}
diff --git a/core/testing/runner_windows.odin b/core/testing/runner_windows.odin
new file mode 100644
index 000000000..d8633f703
--- /dev/null
+++ b/core/testing/runner_windows.odin
@@ -0,0 +1,191 @@
+//+private
+//+build windows
+package testing
+
+import win32 "core:sys/windows"
+import "core:runtime"
+import "intrinsics"
+
+
+Sema :: struct {
+ count: i32,
+}
+
+sema_reset :: proc "contextless" (s: ^Sema) {
+ intrinsics.atomic_store(&s.count, 0);
+}
+sema_wait :: proc "contextless" (s: ^Sema) {
+ for {
+ original_count := s.count;
+ for original_count == 0 {
+ win32.WaitOnAddress(
+ &s.count,
+ &original_count,
+ size_of(original_count),
+ win32.INFINITE,
+ );
+ original_count = s.count;
+ }
+ if original_count == intrinsics.atomic_cxchg(&s.count, original_count-1, original_count) {
+ return;
+ }
+ }
+}
+
+sema_post :: proc "contextless" (s: ^Sema, count := 1) {
+ intrinsics.atomic_add(&s.count, i32(count));
+ if count == 1 {
+ win32.WakeByAddressSingle(&s.count);
+ } else {
+ win32.WakeByAddressAll(&s.count);
+ }
+}
+
+
+Thread_Proc :: #type proc(^Thread);
+
+MAX_USER_ARGUMENTS :: 8;
+
+Thread :: struct {
+ using specific: Thread_Os_Specific,
+ procedure: Thread_Proc,
+
+ t: ^T,
+ it: Internal_Test,
+ success: bool,
+
+ init_context: Maybe(runtime.Context),
+
+ creation_allocator: runtime.Allocator,
+}
+
+Thread_Os_Specific :: struct {
+ win32_thread: win32.HANDLE,
+ win32_thread_id: win32.DWORD,
+ done: bool, // see note in `is_done`
+}
+
+thread_create :: proc(procedure: Thread_Proc) -> ^Thread {
+ __windows_thread_entry_proc :: proc "stdcall" (t_: rawptr) -> win32.DWORD {
+ t := (^Thread)(t_);
+ context = runtime.default_context();
+ c := context;
+ if ic, ok := t.init_context.?; ok {
+ c = ic;
+ }
+ context = c;
+
+ t.procedure(t);
+
+ if t.init_context == nil {
+ if context.temp_allocator.data == &runtime.global_default_temp_allocator_data {
+ runtime.default_temp_allocator_destroy(auto_cast context.temp_allocator.data);
+ }
+ }
+
+ intrinsics.atomic_store(&t.done, true);
+ return 0;
+ }
+
+
+ thread := new(Thread);
+ if thread == nil {
+ return nil;
+ }
+ thread.creation_allocator = context.allocator;
+
+ win32_thread_id: win32.DWORD;
+ win32_thread := win32.CreateThread(nil, 0, __windows_thread_entry_proc, thread, win32.CREATE_SUSPENDED, &win32_thread_id);
+ if win32_thread == nil {
+ free(thread, thread.creation_allocator);
+ return nil;
+ }
+ thread.procedure = procedure;
+ thread.win32_thread = win32_thread;
+ thread.win32_thread_id = win32_thread_id;
+ thread.init_context = context;
+
+ return thread;
+}
+
+thread_start :: proc "contextless" (thread: ^Thread) {
+ win32.ResumeThread(thread.win32_thread);
+}
+
+thread_join_and_destroy :: proc(thread: ^Thread) {
+ if thread.win32_thread != win32.INVALID_HANDLE {
+ win32.WaitForSingleObject(thread.win32_thread, win32.INFINITE);
+ win32.CloseHandle(thread.win32_thread);
+ thread.win32_thread = win32.INVALID_HANDLE;
+ }
+ free(thread, thread.creation_allocator);
+}
+
+thread_terminate :: proc "contextless" (thread: ^Thread, exit_code: int) {
+ win32.TerminateThread(thread.win32_thread, u32(exit_code));
+}
+
+
+
+
+global_threaded_runner_semaphore: Sema;
+global_exception_handler: rawptr;
+global_current_thread: ^Thread;
+global_current_t: ^T;
+
+run_internal_test :: proc(t: ^T, it: Internal_Test) {
+ thread := thread_create(proc(thread: ^Thread) {
+ exception_handler_proc :: proc "stdcall" (ExceptionInfo: ^win32.EXCEPTION_POINTERS) -> win32.LONG {
+ switch ExceptionInfo.ExceptionRecord.ExceptionCode {
+ case
+ win32.EXCEPTION_DATATYPE_MISALIGNMENT,
+ win32.EXCEPTION_BREAKPOINT,
+ win32.EXCEPTION_ACCESS_VIOLATION,
+ win32.EXCEPTION_ILLEGAL_INSTRUCTION,
+ win32.EXCEPTION_ARRAY_BOUNDS_EXCEEDED,
+ win32.EXCEPTION_STACK_OVERFLOW:
+
+ sema_post(&global_threaded_runner_semaphore);
+ return win32.EXCEPTION_EXECUTE_HANDLER;
+ }
+
+ return win32.EXCEPTION_CONTINUE_SEARCH;
+ }
+ global_exception_handler = win32.AddVectoredExceptionHandler(0, exception_handler_proc);
+
+ context.assertion_failure_proc = proc(prefix, message: string, loc: runtime.Source_Code_Location) {
+ errorf(t=global_current_t, format="%s %s", args={prefix, message}, loc=loc);
+ intrinsics.trap();
+ };
+
+ thread.it.p(thread.t);
+
+ thread.success = true;
+ sema_post(&global_threaded_runner_semaphore);
+ });
+
+ sema_reset(&global_threaded_runner_semaphore);
+ global_current_t = t;
+
+ t._fail_now = proc() -> ! {
+ intrinsics.trap();
+ };
+
+ thread.t = t;
+ thread.it = it;
+ thread.success = false;
+
+ thread_start(thread);
+
+ sema_wait(&global_threaded_runner_semaphore);
+ thread_terminate(thread, int(!thread.success));
+ thread_join_and_destroy(thread);
+
+ win32.RemoveVectoredExceptionHandler(global_exception_handler);
+
+ if !thread.success && t.error_count == 0 {
+ t.error_count += 1;
+ }
+
+ return;
+}
diff --git a/core/testing/testing.odin b/core/testing/testing.odin
index a431d8575..ec47ca4d4 100644
--- a/core/testing/testing.odin
+++ b/core/testing/testing.odin
@@ -25,16 +25,21 @@ T :: struct {
w: io.Writer,
cleanups: [dynamic]Internal_Cleanup,
+
+ _fail_now: proc() -> !,
}
error :: proc(t: ^T, args: ..any, loc := #caller_location) {
- log(t=t, args=args, loc=loc);
+ fmt.wprintf(t.w, "%v: ", loc);
+ fmt.wprintln(t.w, ..args);
t.error_count += 1;
}
errorf :: proc(t: ^T, format: string, args: ..any, loc := #caller_location) {
- logf(t=t, format=format, args=args, loc=loc);
+ fmt.wprintf(t.w, "%v: ", loc);
+ fmt.wprintf(t.w, format, ..args);
+ fmt.wprintln(t.w);
t.error_count += 1;
}
@@ -43,6 +48,13 @@ fail :: proc(t: ^T) {
t.error_count += 1;
}
+fail_now :: proc(t: ^T) {
+ fail(t);
+ if t._fail_now != nil {
+ t._fail_now();
+ }
+}
+
failed :: proc(t: ^T) -> bool {
return t.error_count != 0;
}
diff --git a/core/time/time.odin b/core/time/time.odin
index 00d7e529a..c75549b17 100644
--- a/core/time/time.odin
+++ b/core/time/time.odin
@@ -262,19 +262,18 @@ datetime_to_time :: proc(year, month, day, hour, minute, second: int, nsec := in
return;
}
+ ok = true;
+
_y := year - 1970;
_m := month - 1;
_d := day - 1;
- if _m < 0 || _m > 11 {
+ if month < 1 || month > 12 {
_m %= 12; ok = false;
}
- if _d < 0 || _m > 30 {
+ if day < 1 || day > 31 {
_d %= 31; ok = false;
}
- if _m < 0 || _m > 11 {
- _m %= 12; ok = false;
- }
s := i64(0);
div, mod := divmod(_y, 400);
diff --git a/core/unicode/tables.odin b/core/unicode/tables.odin
index bb858fd04..ff4793402 100644
--- a/core/unicode/tables.odin
+++ b/core/unicode/tables.odin
@@ -12,7 +12,6 @@ package unicode
@(private) pLo :: pLl | pLu; // a letter that is neither upper nor lower case.
@(private) pLmask :: pLo;
-@(static)
char_properties := [MAX_LATIN1+1]u8{
0x00 = pC, // '\x00'
0x01 = pC, // '\x01'
@@ -273,7 +272,6 @@ char_properties := [MAX_LATIN1+1]u8{
};
-@(static)
alpha_ranges := [?]i32{
0x00d8, 0x00f6,
0x00f8, 0x01f5,
@@ -429,7 +427,6 @@ alpha_ranges := [?]i32{
0xffda, 0xffdc,
};
-@(static)
alpha_singlets := [?]i32{
0x00aa,
0x00b5,
@@ -465,7 +462,6 @@ alpha_singlets := [?]i32{
0xfe74,
};
-@(static)
space_ranges := [?]i32{
0x0009, 0x000d, // tab and newline
0x0020, 0x0020, // space
@@ -481,7 +477,6 @@ space_ranges := [?]i32{
0xfeff, 0xfeff,
};
-@(static)
unicode_spaces := [?]i32{
0x0009, // tab
0x000a, // LF
@@ -499,7 +494,6 @@ unicode_spaces := [?]i32{
0xfeff, // unknown
};
-@(static)
to_upper_ranges := [?]i32{
0x0061, 0x007a, 468, // a-z A-Z
0x00e0, 0x00f6, 468,
@@ -538,7 +532,6 @@ to_upper_ranges := [?]i32{
0xff41, 0xff5a, 468,
};
-@(static)
to_upper_singlets := [?]i32{
0x00ff, 621,
0x0101, 499,
@@ -882,7 +875,6 @@ to_upper_singlets := [?]i32{
0x1ff3, 509,
};
-@(static)
to_lower_ranges := [?]i32{
0x0041, 0x005a, 532, // A-Z a-z
0x00c0, 0x00d6, 532, // - -
@@ -922,7 +914,6 @@ to_lower_ranges := [?]i32{
0xff21, 0xff3a, 532, // - -
};
-@(static)
to_lower_singlets := [?]i32{
0x0100, 501,
0x0102, 501,
@@ -1259,7 +1250,6 @@ to_lower_singlets := [?]i32{
0x1ffc, 491,
};
-@(static)
to_title_singlets := [?]i32{
0x01c4, 501,
0x01c6, 499,
diff --git a/examples/demo/demo.odin b/examples/demo/demo.odin
index 6817b15a4..af67df3e9 100644
--- a/examples/demo/demo.odin
+++ b/examples/demo/demo.odin
@@ -1352,8 +1352,8 @@ bit_set_type :: proc() {
d: Days;
d = {Sunday, Monday};
- e := d | WEEKEND;
- e |= {Monday};
+ e := d + WEEKEND;
+ e += {Monday};
fmt.println(d, e);
ok := Saturday in e; // `in` is only allowed for `map` and `bit_set` types
@@ -1372,12 +1372,12 @@ bit_set_type :: proc() {
fmt.println(typeid_of(type_of(x))); // bit_set[A..Z]
fmt.println(typeid_of(type_of(y))); // bit_set[0..8; u16]
- incl(&x, 'F');
+ x += {'F'};
assert('F' in x);
- excl(&x, 'F');
+ x -= {'F'};
assert('F' not_in x);
- y |= {1, 4, 2};
+ y += {1, 4, 2};
assert(2 in y);
}
{
@@ -1760,8 +1760,6 @@ range_statements_with_multiple_return_values :: proc() {
soa_struct_layout :: proc() {
- // IMPORTANT NOTE(bill, 2019-11-03): This feature is subject to be changed/removed
- // NOTE(bill): Most likely #soa [N]T
fmt.println("\n#SOA Struct Layout");
{
@@ -1858,6 +1856,30 @@ soa_struct_layout :: proc() {
fmt.println(cap(d));
fmt.println(d[:]);
}
+ { // soa_zip and soa_unzip
+ fmt.println("\nsoa_zip and soa_unzip");
+
+ x := []i32{1, 3, 9};
+ y := []f32{2, 4, 16};
+ z := []b32{true, false, true};
+
+ // produce an #soa slice the normal slices passed
+ s := soa_zip(a=x, b=y, c=z);
+
+ // iterate over the #soa slice
+ for v, i in s {
+ fmt.println(v, i); // exactly the same as s[i]
+ // NOTE: 'v' is NOT a temporary value but has a specialized addressing mode
+ // which means that when accessing v.a etc, it does the correct transformation
+ // internally:
+ // s[i].a === s.a[i]
+ fmt.println(v.a, v.b, v.c);
+ }
+
+ // Recover the slices from the #soa slice
+ a, b, c := soa_unzip(s);
+ fmt.println(a, b, c);
+ }
}
constant_literal_expressions :: proc() {
diff --git a/examples/demo_insert_semicolon/demo.odin b/examples/demo_insert_semicolon/demo.odin
index ae677e5a9..3fbae274e 100644
--- a/examples/demo_insert_semicolon/demo.odin
+++ b/examples/demo_insert_semicolon/demo.odin
@@ -1347,8 +1347,8 @@ bit_set_type :: proc() {
d: Days
d = {Sunday, Monday}
- e := d | WEEKEND
- e |= {Monday}
+ e := d + WEEKEND
+ e += {Monday}
fmt.println(d, e)
ok := Saturday in e // `in` is only allowed for `map` and `bit_set` types
@@ -1367,12 +1367,12 @@ bit_set_type :: proc() {
fmt.println(typeid_of(type_of(x))) // bit_set[A..Z]
fmt.println(typeid_of(type_of(y))) // bit_set[0..8; u16]
- incl(&x, 'F')
+ x += {'F'};
assert('F' in x)
- excl(&x, 'F')
+ x -= {'F'};
assert('F' not_in x)
- y |= {1, 4, 2}
+ y += {1, 4, 2}
assert(2 in y)
}
{
diff --git a/src/build_settings.cpp b/src/build_settings.cpp
index 82ed24f83..0207221bc 100644
--- a/src/build_settings.cpp
+++ b/src/build_settings.cpp
@@ -173,8 +173,6 @@ struct BuildContext {
String resource_filepath;
String pdb_filepath;
bool has_resource;
- String opt_flags;
- String llc_flags;
String link_flags;
String extra_linker_flags;
String microarch;
@@ -202,18 +200,24 @@ struct BuildContext {
bool disallow_do;
bool insert_semicolon;
+
bool ignore_warnings;
bool warnings_as_errors;
+ bool show_error_line;
bool use_subsystem_windows;
bool ignore_microsoft_magic;
bool linker_map_file;
+ bool use_separate_modules;
+
u32 cmd_doc_flags;
Array<String> extra_packages;
QueryDataSetSettings query_data_set_settings;
+ StringSet test_names;
+
gbAffinity affinity;
isize thread_count;
@@ -744,6 +748,9 @@ String get_fullpath_core(gbAllocator a, String path) {
return path_to_fullpath(a, res);
}
+bool show_error_line(void) {
+ return build_context.show_error_line;
+}
void init_build_context(TargetMetrics *cross_target) {
@@ -806,22 +813,12 @@ void init_build_context(TargetMetrics *cross_target) {
bc->word_size = metrics->word_size;
bc->max_align = metrics->max_align;
bc->link_flags = str_lit(" ");
- bc->opt_flags = str_lit(" ");
- gbString llc_flags = gb_string_make_reserve(heap_allocator(), 64);
- if (bc->ODIN_DEBUG) {
- // llc_flags = gb_string_appendc(llc_flags, "-debug-compile ");
- }
-
// NOTE(zangent): The linker flags to set the build architecture are different
// across OSs. It doesn't make sense to allocate extra data on the heap
// here, so I just #defined the linker flags to keep things concise.
if (bc->metrics.arch == TargetArch_amd64) {
- if (bc->microarch.len == 0) {
- llc_flags = gb_string_appendc(llc_flags, "-march=x86-64 ");
- }
-
switch (bc->metrics.os) {
case TargetOs_windows:
bc->link_flags = str_lit("/machine:x64 ");
@@ -836,10 +833,6 @@ void init_build_context(TargetMetrics *cross_target) {
break;
}
} else if (bc->metrics.arch == TargetArch_386) {
- if (bc->microarch.len == 0) {
- llc_flags = gb_string_appendc(llc_flags, "-march=x86 ");
- }
-
switch (bc->metrics.os) {
case TargetOs_windows:
bc->link_flags = str_lit("/machine:x86 ");
@@ -856,10 +849,6 @@ void init_build_context(TargetMetrics *cross_target) {
break;
}
} else if (bc->metrics.arch == TargetArch_arm64) {
- if (bc->microarch.len == 0) {
- llc_flags = gb_string_appendc(llc_flags, "-march=arm64 ");
- }
-
switch (bc->metrics.os) {
case TargetOs_darwin:
bc->link_flags = str_lit("-arch arm64 ");
@@ -872,50 +861,9 @@ void init_build_context(TargetMetrics *cross_target) {
gb_printf_err("Compiler Error: Unsupported architecture\n");;
gb_exit(1);
}
- llc_flags = gb_string_appendc(llc_flags, " ");
-
bc->optimization_level = gb_clamp(bc->optimization_level, 0, 3);
- gbString opt_flags = gb_string_make_reserve(heap_allocator(), 64);
-
- if (bc->microarch.len != 0) {
- opt_flags = gb_string_appendc(opt_flags, "-march=");
- opt_flags = gb_string_append_length(opt_flags, bc->microarch.text, bc->microarch.len);
- opt_flags = gb_string_appendc(opt_flags, " ");
-
- // llc_flags = gb_string_appendc(opt_flags, "-march=");
- // llc_flags = gb_string_append_length(llc_flags, bc->microarch.text, bc->microarch.len);
- // llc_flags = gb_string_appendc(llc_flags, " ");
- }
-
-
- if (bc->optimization_level != 0) {
- opt_flags = gb_string_append_fmt(opt_flags, "-O%d ", bc->optimization_level);
- // NOTE(lachsinc): The following options were previously passed during call
- // to opt in main.cpp:exec_llvm_opt().
- // -die: Dead instruction elimination
- // -memcpyopt: MemCpy optimization
- }
- if (bc->ODIN_DEBUG == false) {
- opt_flags = gb_string_appendc(opt_flags, "-mem2reg -die ");
- }
-
-
-
-
-
- // NOTE(lachsinc): This optimization option was previously required to get
- // around an issue in fmt.odin. Thank bp for tracking it down! Leaving for now until the issue
- // is resolved and confirmed by Bill. Maybe it should be readded in non-debug builds.
- // if (bc->ODIN_DEBUG == false) {
- // opt_flags = gb_string_appendc(opt_flags, "-mem2reg ");
- // }
-
- bc->opt_flags = make_string_c(opt_flags);
- bc->llc_flags = make_string_c(llc_flags);
-
-
#undef LINK_FLAG_X64
#undef LINK_FLAG_386
}
diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp
index 00963487f..1acb9732f 100644
--- a/src/check_builtin.cpp
+++ b/src/check_builtin.cpp
@@ -87,7 +87,7 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
case BuiltinProc_DIRECTIVE: {
ast_node(bd, BasicDirective, ce->proc);
- String name = bd->name;
+ String name = bd->name.string;
if (name == "defined") {
break;
}
@@ -124,7 +124,7 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
case BuiltinProc_DIRECTIVE: {
ast_node(bd, BasicDirective, ce->proc);
- String name = bd->name;
+ String name = bd->name.string;
if (name == "location") {
if (ce->args.count > 1) {
error(ce->args[0], "'#location' expects either 0 or 1 arguments, got %td", ce->args.count);
@@ -1509,6 +1509,10 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
{
Type *bt = base_type(x.type);
+ if (are_types_identical(bt, t_f16)) {
+ add_package_dependency(c, "runtime", "min_f16");
+ add_package_dependency(c, "runtime", "max_f16");
+ }
if (are_types_identical(bt, t_f32)) {
add_package_dependency(c, "runtime", "min_f32");
add_package_dependency(c, "runtime", "max_f32");
@@ -2017,11 +2021,92 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
}
}
+ operand->mode = Addressing_OptionalOk;
+ operand->type = default_type(x.type);
+ }
+ break;
+
+ case BuiltinProc_sqrt:
+ {
+ Operand x = {};
+ check_expr(c, &x, ce->args[0]);
+ if (x.mode == Addressing_Invalid) {
+ return false;
+ }
+ if (!is_type_float(x.type)) {
+ gbString xts = type_to_string(x.type);
+ error(x.expr, "Expected a floating point value for '%.*s', got %s", LIT(builtin_procs[id].name), xts);
+ gb_string_free(xts);
+ return false;
+ }
+
+ if (x.mode == Addressing_Constant) {
+ f64 v = exact_value_to_f64(x.value);
+
+ operand->mode = Addressing_Constant;
+ operand->type = x.type;
+ operand->value = exact_value_float(gb_sqrt(v));
+ break;
+ }
operand->mode = Addressing_Value;
- operand->type = make_optional_ok_type(default_type(x.type), false); // Just reusing this procedure, it's not optional
+ operand->type = default_type(x.type);
}
break;
+ case BuiltinProc_mem_copy:
+ case BuiltinProc_mem_copy_non_overlapping:
+ {
+ operand->mode = Addressing_NoValue;
+ operand->type = t_invalid;
+
+ Operand dst = {};
+ Operand src = {};
+ Operand len = {};
+ check_expr(c, &dst, ce->args[0]);
+ check_expr(c, &src, ce->args[1]);
+ check_expr(c, &len, ce->args[2]);
+ if (dst.mode == Addressing_Invalid) {
+ return false;
+ }
+ if (src.mode == Addressing_Invalid) {
+ return false;
+ }
+ if (len.mode == Addressing_Invalid) {
+ return false;
+ }
+
+
+ if (!is_type_pointer(dst.type)) {
+ gbString str = type_to_string(dst.type);
+ error(dst.expr, "Expected a pointer value for '%.*s', got %s", LIT(builtin_procs[id].name), str);
+ gb_string_free(str);
+ return false;
+ }
+ if (!is_type_pointer(src.type)) {
+ gbString str = type_to_string(src.type);
+ error(src.expr, "Expected a pointer value for '%.*s', got %s", LIT(builtin_procs[id].name), str);
+ gb_string_free(str);
+ return false;
+ }
+ if (!is_type_integer(len.type)) {
+ gbString str = type_to_string(len.type);
+ error(len.expr, "Expected an integer value for the number of bytes for '%.*s', got %s", LIT(builtin_procs[id].name), str);
+ gb_string_free(str);
+ return false;
+ }
+
+ if (len.mode == Addressing_Constant) {
+ i64 n = exact_value_to_i64(len.value);
+ if (n < 0) {
+ gbString str = expr_to_string(len.expr);
+ error(len.expr, "Expected a non-negative integer value for the number of bytes for '%.*s', got %s", LIT(builtin_procs[id].name), str);
+ gb_string_free(str);
+ }
+ }
+ }
+ break;
+
+
case BuiltinProc_atomic_fence:
case BuiltinProc_atomic_fence_acq:
case BuiltinProc_atomic_fence_rel:
@@ -2149,8 +2234,8 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
check_assignment(c, &x, elem, builtin_name);
check_assignment(c, &y, elem, builtin_name);
- operand->mode = Addressing_Value;
- operand->type = make_optional_ok_type(elem, /*typed*/false);
+ operand->mode = Addressing_OptionalOk;
+ operand->type = elem;
break;
}
break;
@@ -2425,6 +2510,46 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
}
break;
+ case BuiltinProc_type_is_variant_of:
+ {
+ if (operand->mode != Addressing_Type) {
+ error(operand->expr, "Expected a type for '%.*s'", LIT(builtin_name));
+ operand->mode = Addressing_Invalid;
+ operand->type = t_invalid;
+ return false;
+ }
+
+
+ Type *u = operand->type;
+
+ if (!is_type_union(u)) {
+ error(operand->expr, "Expected a union type for '%.*s'", LIT(builtin_name));
+ operand->mode = Addressing_Invalid;
+ operand->type = t_invalid;
+ return false;
+ }
+
+ Type *v = check_type(c, ce->args[1]);
+
+ u = base_type(u);
+ GB_ASSERT(u->kind == Type_Union);
+
+ bool is_variant = false;
+
+ for_array(i, u->Union.variants) {
+ Type *vt = u->Union.variants[i];
+ if (are_types_identical(v, vt)) {
+ is_variant = true;
+ break;
+ }
+ }
+
+ operand->mode = Addressing_Constant;
+ operand->type = t_untyped_bool;
+ operand->value = exact_value_bool(is_variant);
+ }
+ break;
+
case BuiltinProc_type_struct_field_count:
operand->value = exact_value_i64(0);
if (operand->mode != Addressing_Type) {
diff --git a/src/check_decl.cpp b/src/check_decl.cpp
index f008317ad..0aef40546 100644
--- a/src/check_decl.cpp
+++ b/src/check_decl.cpp
@@ -289,17 +289,6 @@ void check_type_decl(CheckerContext *ctx, Entity *e, Ast *init_expr, Type *def)
if (decl != nullptr) {
AttributeContext ac = {};
check_decl_attributes(ctx, decl->attributes, type_decl_attribute, &ac);
- if (ac.atom_op_table != nullptr) {
- Type *bt = base_type(e->type);
- switch (bt->kind) {
- case Type_Struct:
- bt->Struct.atom_op_table = ac.atom_op_table;
- break;
- default:
- error(e->token, "Only struct types can have custom atom operations");
- break;
- }
- }
}
@@ -352,16 +341,17 @@ void override_entity_in_scope(Entity *original_entity, Entity *new_entity) {
string_map_set(&found_scope->elements, original_name, new_entity);
+ original_entity->flags |= EntityFlag_Overridden;
original_entity->type = new_entity->type;
+ original_entity->aliased_of = new_entity;
if (original_entity->identifier == nullptr) {
original_entity->identifier = new_entity->identifier;
}
if (original_entity->identifier != nullptr &&
original_entity->identifier->kind == Ast_Ident) {
- original_entity->identifier->Ident.entity = nullptr;
+ original_entity->identifier->Ident.entity = new_entity;
}
- original_entity->flags |= EntityFlag_Overridden;
// IMPORTANT NOTE(bill, 2021-04-10): copy only the variants
// This is most likely NEVER required, but it does not at all hurt to keep
@@ -375,6 +365,7 @@ void override_entity_in_scope(Entity *original_entity, Entity *new_entity) {
void check_const_decl(CheckerContext *ctx, Entity *e, Ast *type_expr, Ast *init, Type *named_type) {
GB_ASSERT(e->type == nullptr);
GB_ASSERT(e->kind == Entity_Constant);
+ init = unparen_expr(init);
if (e->flags & EntityFlag_Visited) {
e->type = t_invalid;
@@ -408,6 +399,18 @@ void check_const_decl(CheckerContext *ctx, Entity *e, Ast *type_expr, Ast *init,
e->kind = Entity_TypeName;
e->type = nullptr;
+ if (entity != nullptr && entity->type != nullptr &&
+ is_type_polymorphic_record_unspecialized(entity->type)) {
+ DeclInfo *decl = decl_info_of_entity(e);
+ if (decl != nullptr) {
+ if (decl->attributes.count > 0) {
+ error(decl->attributes[0], "Constant alias declarations cannot have attributes");
+ }
+ }
+
+ override_entity_in_scope(e, entity);
+ return;
+ }
check_type_decl(ctx, e, ctx->decl->init_expr, named_type);
return;
}
@@ -896,10 +899,9 @@ void check_global_variable_decl(CheckerContext *ctx, Entity *&e, Ast *type_expr,
e->Variable.thread_local_model = ac.thread_local_model;
e->Variable.is_export = ac.is_export;
+ e->flags &= ~EntityFlag_Static;
if (ac.is_static) {
- e->flags |= EntityFlag_Static;
- } else {
- e->flags &= ~EntityFlag_Static;
+ error(e->token, "@(static) is not supported for global variables, nor required");
}
ac.link_name = handle_link_name(ctx, e->token, ac.link_name, ac.link_prefix);
diff --git a/src/check_expr.cpp b/src/check_expr.cpp
index 6c4a78e53..81fe3baa9 100644
--- a/src/check_expr.cpp
+++ b/src/check_expr.cpp
@@ -621,7 +621,9 @@ i64 check_distance_between_types(CheckerContext *c, Operand *operand, Type *type
}
PolyProcData poly_proc_data = {};
if (check_polymorphic_procedure_assignment(c, operand, type, operand->expr, &poly_proc_data)) {
- add_entity_use(c, operand->expr, poly_proc_data.gen_entity);
+ Entity *e = poly_proc_data.gen_entity;
+ add_type_and_value(c->info, operand->expr, Addressing_Value, e->type, {});
+ add_entity_use(c, operand->expr, e);
return 4;
}
}
@@ -1113,6 +1115,7 @@ bool check_cycle(CheckerContext *c, Entity *curr, bool report) {
error(curr->token, "\t%.*s refers to", LIT(curr->token.string));
}
error(curr->token, "\t%.*s", LIT(curr->token.string));
+ curr->type = t_invalid;
}
return true;
}
@@ -1130,7 +1133,7 @@ Entity *check_ident(CheckerContext *c, Operand *o, Ast *n, Type *named_type, Typ
Entity *e = scope_lookup(c->scope, name);
if (e == nullptr) {
if (is_blank_ident(name)) {
- error(n, "'_' cannot be used as a value type");
+ error(n, "'_' cannot be used as a value");
} else {
error(n, "Undeclared name: %.*s", LIT(name));
}
@@ -1141,6 +1144,9 @@ Entity *check_ident(CheckerContext *c, Operand *o, Ast *n, Type *named_type, Typ
}
return nullptr;
}
+
+ GB_ASSERT((e->flags & EntityFlag_Overridden) == 0);
+
if (e->parent_proc_decl != nullptr &&
e->parent_proc_decl != c->curr_proc_decl) {
if (e->kind == Entity_Variable) {
@@ -1195,8 +1201,6 @@ Entity *check_ident(CheckerContext *c, Operand *o, Ast *n, Type *named_type, Typ
if (e->state == EntityState_Unresolved) {
check_entity_decl(c, e, nullptr, named_type);
}
-
-
if (e->type == nullptr) {
// TODO(bill): Which is correct? return or compiler_error?
// compiler_error("How did this happen? type: %s; identifier: %.*s\n", type_to_string(e->type), LIT(name));
@@ -2212,6 +2216,10 @@ void check_shift(CheckerContext *c, Operand *x, Operand *y, Ast *node, Type *typ
return;
}
+ if (is_type_untyped(y->type)) {
+ convert_to_typed(c, y, t_uint);
+ }
+
x->mode = Addressing_Value;
}
@@ -2379,9 +2387,15 @@ bool check_cast_internal(CheckerContext *c, Operand *x, Type *type) {
if (core_type(bt)->kind == Type_Basic) {
if (check_representable_as_constant(c, x->value, bt, &x->value)) {
return true;
- } else if (is_type_pointer(type) && check_is_castable_to(c, x, type)) {
- return true;
+ } else if (check_is_castable_to(c, x, type)) {
+ if (is_type_pointer(type)) {
+ return true;
+ }
}
+ } else if (check_is_castable_to(c, x, type)) {
+ x->value = {};
+ x->mode = Addressing_Value;
+ return true;
}
} else if (check_is_castable_to(c, x, type)) {
if (x->mode != Addressing_Constant) {
@@ -2391,6 +2405,9 @@ bool check_cast_internal(CheckerContext *c, Operand *x, Type *type) {
} else if (is_type_union(type)) {
x->mode = Addressing_Value;
}
+ if (x->mode == Addressing_Value) {
+ x->value = {};
+ }
return true;
}
return false;
@@ -2503,6 +2520,10 @@ bool check_binary_array_expr(CheckerContext *c, Token op, Operand *x, Operand *y
return false;
}
+bool is_ise_expr(Ast *node) {
+ node = unparen_expr(node);
+ return node->kind == Ast_ImplicitSelectorExpr;
+}
void check_binary_expr(CheckerContext *c, Operand *x, Ast *node, Type *type_hint, bool use_lhs_as_type_hint=false) {
GB_ASSERT(node->kind == Ast_BinaryExpr);
@@ -2520,8 +2541,14 @@ void check_binary_expr(CheckerContext *c, Operand *x, Ast *node, Type *type_hint
case Token_CmpEq:
case Token_NotEq: {
// NOTE(bill): Allow comparisons between types
- check_expr_or_type(c, x, be->left, type_hint);
- check_expr_or_type(c, y, be->right, x->type);
+ if (is_ise_expr(be->left)) {
+ // Evalute the right before the left for an '.X' expression
+ check_expr_or_type(c, y, be->right, type_hint);
+ check_expr_or_type(c, x, be->left, y->type);
+ } else {
+ check_expr_or_type(c, x, be->left, type_hint);
+ check_expr_or_type(c, y, be->right, x->type);
+ }
bool xt = x->mode == Addressing_Type;
bool yt = y->mode == Addressing_Type;
// If only one is a type, this is an error
@@ -2628,11 +2655,22 @@ void check_binary_expr(CheckerContext *c, Operand *x, Ast *node, Type *type_hint
return;
default:
- check_expr_with_type_hint(c, x, be->left, type_hint);
- if (use_lhs_as_type_hint) {
- check_expr_with_type_hint(c, y, be->right, x->type);
+ if (is_ise_expr(be->left)) {
+ // Evalute the right before the left for an '.X' expression
+ check_expr_or_type(c, y, be->right, type_hint);
+
+ if (use_lhs_as_type_hint) { // RHS in this case
+ check_expr_or_type(c, x, be->left, y->type);
+ } else {
+ check_expr_with_type_hint(c, x, be->left, type_hint);
+ }
} else {
- check_expr_with_type_hint(c, y, be->right, type_hint);
+ check_expr_with_type_hint(c, x, be->left, type_hint);
+ if (use_lhs_as_type_hint) {
+ check_expr_with_type_hint(c, y, be->right, x->type);
+ } else {
+ check_expr_with_type_hint(c, y, be->right, type_hint);
+ }
}
break;
}
@@ -2852,13 +2890,33 @@ void update_expr_type(CheckerContext *c, Ast *e, Type *type, bool final) {
if (token_is_comparison(be->op.kind)) {
// NOTE(bill): Do nothing as the types are fine
} else if (token_is_shift(be->op.kind)) {
- update_expr_type(c, be->left, type, final);
+ update_expr_type(c, be->left, type, final);
} else {
update_expr_type(c, be->left, type, final);
update_expr_type(c, be->right, type, final);
}
case_end;
+ case_ast_node(te, TernaryIfExpr, e);
+ if (old.value.kind != ExactValue_Invalid) {
+ // See above note in UnaryExpr case
+ break;
+ }
+
+ update_expr_type(c, te->x, type, final);
+ update_expr_type(c, te->y, type, final);
+ case_end;
+
+ case_ast_node(te, TernaryWhenExpr, e);
+ if (old.value.kind != ExactValue_Invalid) {
+ // See above note in UnaryExpr case
+ break;
+ }
+
+ update_expr_type(c, te->x, type, final);
+ update_expr_type(c, te->y, type, final);
+ case_end;
+
case_ast_node(pe, ParenExpr, e);
update_expr_type(c, pe->expr, type, final);
case_end;
@@ -3146,8 +3204,8 @@ void convert_to_typed(CheckerContext *c, Operand *operand, Type *target_type) {
break;
}
- operand->type = target_type;
update_expr_type(c, operand->expr, target_type, true);
+ operand->type = target_type;
}
bool check_index_value(CheckerContext *c, bool open_range, Ast *index_value, i64 max_count, i64 *value, Type *type_hint=nullptr) {
@@ -3896,6 +3954,16 @@ bool check_assignment_arguments(CheckerContext *ctx, Array<Operand> const &lhs,
add_type_and_value(&c->checker->info, o.expr, o.mode, tuple, o.value);
}
+ if (o.mode == Addressing_OptionalOk && expr->kind == Ast_TypeAssertion) {
+ // NOTE(bill): Used only for optimizations in the backend
+ if (is_blank_ident(lhs[0].expr)) {
+ expr->TypeAssertion.ignores[0] = true;
+ }
+ if (is_blank_ident(lhs[1].expr)) {
+ expr->TypeAssertion.ignores[1] = true;
+ }
+ }
+
array_add(operands, val0);
array_add(operands, val1);
optional_ok = true;
@@ -4010,6 +4078,16 @@ bool check_unpack_arguments(CheckerContext *ctx, Entity **lhs, isize lhs_count,
add_type_and_value(&c->checker->info, o.expr, o.mode, tuple, o.value);
}
+ if (o.mode == Addressing_OptionalOk && expr->kind == Ast_TypeAssertion) {
+ // NOTE(bill): Used only for optimizations in the backend
+ if (is_blank_ident(lhs[0]->token)) {
+ expr->TypeAssertion.ignores[0] = true;
+ }
+ if (is_blank_ident(lhs[1]->token)) {
+ expr->TypeAssertion.ignores[1] = true;
+ }
+ }
+
array_add(operands, val0);
array_add(operands, val1);
optional_ok = true;
@@ -4034,6 +4112,16 @@ bool check_unpack_arguments(CheckerContext *ctx, Entity **lhs, isize lhs_count,
}
+bool is_expr_constant_zero(Ast *expr) {
+ GB_ASSERT(expr != nullptr);
+ auto v = exact_value_to_integer(expr->tav.value);
+ if (v.kind == ExactValue_Integer) {
+ return big_int_cmp_zero(&v.value_integer) == 0;
+ }
+ return false;
+}
+
+
CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
ast_node(ce, CallExpr, call);
GB_ASSERT(is_type_proc(proc_type));
@@ -4203,8 +4291,13 @@ CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
if (show_error) {
check_assignment(c, &o, t, str_lit("argument"));
}
- err = CallArgumentError_WrongTypes;
+ // TODO(bill, 2021-05-05): Is this incorrect logic to only fail if there is ambiguity for definite?
+ if (o.mode == Addressing_Invalid) {
+ err = CallArgumentError_WrongTypes;
+ }
}
+ } else if (show_error) {
+ check_assignment(c, &o, t, str_lit("argument"));
}
score += s;
@@ -4220,7 +4313,10 @@ CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
if (o.mode == Addressing_Type && is_type_typeid(e->type)) {
add_type_info_type(c, o.type);
add_type_and_value(c->info, o.expr, Addressing_Value, e->type, exact_value_typeid(o.type));
+ } else if (show_error && is_type_untyped(o.type)) {
+ update_expr_type(c, o.expr, t, true);
}
+
}
if (variadic) {
@@ -4258,6 +4354,8 @@ CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
check_assignment(c, &o, t, str_lit("argument"));
}
err = CallArgumentError_WrongTypes;
+ } else if (show_error) {
+ check_assignment(c, &o, t, str_lit("argument"));
}
score += s;
if (is_type_any(elem)) {
@@ -4266,6 +4364,8 @@ CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
if (o.mode == Addressing_Type && is_type_typeid(t)) {
add_type_info_type(c, o.type);
add_type_and_value(c->info, o.expr, Addressing_Value, t, exact_value_typeid(o.type));
+ } else if (show_error && is_type_untyped(o.type)) {
+ update_expr_type(c, o.expr, t, true);
}
}
}
@@ -4480,6 +4580,8 @@ CALL_ARGUMENT_CHECKER(check_named_call_arguments) {
err = CallArgumentError_NoneConstantParameter;
}
}
+ } else if (show_error) {
+ check_assignment(c, o, e->type, str_lit("procedure argument"));
}
score += s;
}
@@ -5414,7 +5516,7 @@ ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *call, Ast *pr
if (proc != nullptr &&
proc->kind == Ast_BasicDirective) {
ast_node(bd, BasicDirective, proc);
- String name = bd->name;
+ String name = bd->name.string;
if (name == "location" || name == "assert" || name == "panic" || name == "defined" || name == "config" || name == "load") {
operand->mode = Addressing_Builtin;
operand->builtin_id = BuiltinProc_DIRECTIVE;
@@ -5475,6 +5577,15 @@ ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *call, Ast *pr
if (operand->mode == Addressing_Type) {
Type *t = operand->type;
if (is_type_polymorphic_record(t)) {
+ if (!is_type_named(t)) {
+ gbString s = expr_to_string(operand->expr);
+ error(call, "Illegal use of an unnamed polymorphic record, %s", s);
+ gb_string_free(s);
+ operand->mode = Addressing_Invalid;
+ operand->type = t_invalid;;
+ return Expr_Expr;
+ }
+
auto err = check_polymorphic_record_type(c, operand, call);
if (err == 0) {
Ast *ident = operand->expr;
@@ -5509,9 +5620,15 @@ ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *call, Ast *pr
}
check_expr(c, operand, arg);
if (operand->mode != Addressing_Invalid) {
- check_cast(c, operand, t);
+ if (is_type_polymorphic(t)) {
+ error(call, "A polymorphic type cannot be used in a type conversion");
+ } else {
+ // NOTE(bill): Otherwise the compiler can override the polymorphic type
+ // as it assumes it is determining the type
+ check_cast(c, operand, t);
+ }
}
-
+ operand->type = t;
break;
}
}
@@ -5661,7 +5778,7 @@ void check_expr_with_type_hint(CheckerContext *c, Operand *o, Ast *e, Type *t) {
break;
case Addressing_Type:
if (t == nullptr || !is_type_typeid(t)) {
- err_str = "is not an expression but a type";
+ err_str = "is not an expression but a type, in this context it is ambiguous";
}
break;
case Addressing_Builtin:
@@ -5849,8 +5966,9 @@ bool check_range(CheckerContext *c, Ast *node, Operand *x, Operand *y, ExactValu
TokenKind op = Token_Lt;
switch (ie->op.kind) {
- case Token_Ellipsis: op = Token_LtEq; break;
- case Token_RangeHalf: op = Token_Lt; break;
+ case Token_Ellipsis: op = Token_LtEq; break; // ..
+ case Token_RangeFull: op = Token_LtEq; break; // ..=
+ case Token_RangeHalf: op = Token_Lt; break; // ..<
default: error(ie->op, "Invalid range operator"); break;
}
bool ok = compare_exact_values(op, a, b);
@@ -5861,7 +5979,7 @@ bool check_range(CheckerContext *c, Ast *node, Operand *x, Operand *y, ExactValu
}
ExactValue inline_for_depth = exact_value_sub(b, a);
- if (ie->op.kind == Token_Ellipsis) {
+ if (ie->op.kind != Token_RangeHalf) {
inline_for_depth = exact_value_increment_one(inline_for_depth);
}
@@ -5896,6 +6014,88 @@ bool check_is_operand_compound_lit_constant(CheckerContext *c, Operand *o) {
}
+bool attempt_implicit_selector_expr(CheckerContext *c, Operand *o, AstImplicitSelectorExpr *ise, Type *th) {
+ if (is_type_enum(th)) {
+ Type *enum_type = base_type(th);
+ GB_ASSERT(enum_type->kind == Type_Enum);
+
+ String name = ise->selector->Ident.token.string;
+
+ Entity *e = scope_lookup_current(enum_type->Enum.scope, name);
+ if (e == nullptr) {
+ return false;
+ }
+ GB_ASSERT(are_types_identical(base_type(e->type), enum_type));
+ GB_ASSERT(e->kind == Entity_Constant);
+ o->value = e->Constant.value;
+ o->mode = Addressing_Constant;
+ o->type = e->type;
+ return true;
+ }
+ bool show_error = true;
+ if (is_type_union(th)) {
+ Type *union_type = base_type(th);
+ isize enum_count = 0;
+ Type *et = nullptr;
+
+ auto operands = array_make<Operand>(temporary_allocator(), 0, union_type->Union.variants.count);
+
+ for_array(i, union_type->Union.variants) {
+ Type *vt = union_type->Union.variants[i];
+
+ Operand x = {};
+ if (attempt_implicit_selector_expr(c, &x, ise, vt)) {
+ array_add(&operands, x);
+ }
+ }
+
+ if (operands.count == 1) {
+ *o = operands[0];
+ return true;
+ }
+ }
+ return false;
+}
+
+ExprKind check_implicit_selector_expr(CheckerContext *c, Operand *o, Ast *node, Type *type_hint) {
+ ast_node(ise, ImplicitSelectorExpr, node);
+
+ o->type = t_invalid;
+ o->expr = node;
+ o->mode = Addressing_Invalid;
+
+ Type *th = type_hint;
+
+ if (th == nullptr) {
+ gbString str = expr_to_string(node);
+ error(node, "Cannot determine type for implicit selector expression '%s'", str);
+ gb_string_free(str);
+ return Expr_Expr;
+ }
+ o->type = th;
+ Type *enum_type = th;
+
+ bool ok = attempt_implicit_selector_expr(c, o, ise, th);
+ if (!ok) {
+ String name = ise->selector->Ident.token.string;
+
+ if (is_type_enum(th)) {
+ gbString typ = type_to_string(th);
+ error(node, "Undeclared name %.*s for type '%s'", LIT(name), typ);
+ gb_string_free(typ);
+ } else {
+ gbString typ = type_to_string(th);
+ gbString str = expr_to_string(node);
+ error(node, "Invalid type '%s' for implicit selector expression '%s'", typ, str);
+ gb_string_free(str);
+ gb_string_free(typ);
+ }
+ }
+
+ o->expr = node;
+ return Expr_Expr;
+}
+
ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type *type_hint) {
u32 prev_state_flags = c->state_flags;
defer (c->state_flags = prev_state_flags);
@@ -5991,13 +6191,14 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
case_ast_node(bd, BasicDirective, node);
o->mode = Addressing_Constant;
- if (bd->name == "file") {
+ String name = bd->name.string;
+ if (name == "file") {
o->type = t_untyped_string;
o->value = exact_value_string(get_file_path_string(bd->token.pos.file_id));
- } else if (bd->name == "line") {
+ } else if (name == "line") {
o->type = t_untyped_integer;
o->value = exact_value_i64(bd->token.pos.line);
- } else if (bd->name == "procedure") {
+ } else if (name == "procedure") {
if (c->curr_proc_decl == nullptr) {
error(node, "#procedure may only be used within procedures");
o->type = t_untyped_string;
@@ -6006,7 +6207,7 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
o->type = t_untyped_string;
o->value = exact_value_string(c->proc_name);
}
- } else if (bd->name == "caller_location") {
+ } else if (name == "caller_location") {
init_core_source_code_location(c->checker);
error(node, "#caller_location may only be used as a default argument parameter");
o->type = t_source_code_location;
@@ -6061,88 +6262,6 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
o->type = type;
case_end;
- case_ast_node(te, TernaryExpr, node);
- Operand cond = {Addressing_Invalid};
- check_expr(c, &cond, te->cond);
- node->viral_state_flags |= te->cond->viral_state_flags;
-
- if (cond.mode != Addressing_Invalid && !is_type_boolean(cond.type)) {
- error(te->cond, "Non-boolean condition in if expression");
- }
-
- Operand x = {Addressing_Invalid};
- Operand y = {Addressing_Invalid};
- check_expr_or_type(c, &x, te->x, type_hint);
- node->viral_state_flags |= te->x->viral_state_flags;
-
- if (te->y != nullptr) {
- check_expr_or_type(c, &y, te->y, type_hint);
- node->viral_state_flags |= te->y->viral_state_flags;
- } else {
- error(node, "A ternary expression must have an else clause");
- return kind;
- }
-
- if (x.type == nullptr || x.type == t_invalid ||
- y.type == nullptr || y.type == t_invalid) {
- return kind;
- }
-
- if (x.mode == Addressing_Type && y.mode == Addressing_Type &&
- cond.mode == Addressing_Constant && is_type_boolean(cond.type)) {
- o->mode = Addressing_Type;
- if (cond.value.value_bool) {
- o->type = x.type;
- o->expr = x.expr;
- } else {
- o->type = y.type;
- o->expr = y.expr;
- }
- return Expr_Expr;
- }
-
- convert_to_typed(c, &x, y.type);
- if (x.mode == Addressing_Invalid) {
- return kind;
- }
- convert_to_typed(c, &y, x.type);
- if (y.mode == Addressing_Invalid) {
- x.mode = Addressing_Invalid;
- return kind;
- }
-
- if (!ternary_compare_types(x.type, y.type)) {
- gbString its = type_to_string(x.type);
- gbString ets = type_to_string(y.type);
- error(node, "Mismatched types in ternary expression, %s vs %s", its, ets);
- gb_string_free(ets);
- gb_string_free(its);
- return kind;
- }
-
- Type *type = x.type;
- if (is_type_untyped_nil(type) || is_type_untyped_undef(type)) {
- type = y.type;
- }
-
- o->type = type;
- o->mode = Addressing_Value;
-
- if (cond.mode == Addressing_Constant && is_type_boolean(cond.type) &&
- x.mode == Addressing_Constant &&
- y.mode == Addressing_Constant) {
-
- o->mode = Addressing_Constant;
-
- if (cond.value.value_bool) {
- o->value = x.value;
- } else {
- o->value = y.value;
- }
- }
-
- case_end;
-
case_ast_node(te, TernaryIfExpr, node);
Operand cond = {Addressing_Invalid};
check_expr(c, &cond, te->cond);
@@ -6255,7 +6374,7 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
if (cl->type->ArrayType.tag != nullptr) {
Ast *tag = cl->type->ArrayType.tag;
GB_ASSERT(tag->kind == Ast_BasicDirective);
- String name = tag->BasicDirective.name;
+ String name = tag->BasicDirective.name.string;
if (name == "soa") {
error(node, "#soa arrays are not supported for compound literals");
return kind;
@@ -6267,7 +6386,7 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
if (cl->elems.count > 0) {
Ast *tag = cl->type->DynamicArrayType.tag;
GB_ASSERT(tag->kind == Ast_BasicDirective);
- String name = tag->BasicDirective.name;
+ String name = tag->BasicDirective.name.string;
if (name == "soa") {
error(node, "#soa arrays are not supported for compound literals");
return kind;
@@ -7279,10 +7398,11 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
case_end;
case_ast_node(ue, UnaryExpr, node);
- Ast *prev_unary_address_hint = c->unary_address_hint;
- c->unary_address_hint = unparen_expr(node);
- check_expr_base(c, o, ue->expr, type_hint);
- c->unary_address_hint = prev_unary_address_hint;
+ Type *th = type_hint;
+ if (ue->op.kind == Token_And) {
+ th = type_deref(th);
+ }
+ check_expr_base(c, o, ue->expr, th);
node->viral_state_flags |= ue->expr->viral_state_flags;
if (o->mode == Addressing_Invalid) {
@@ -7444,68 +7564,7 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
case_ast_node(ise, ImplicitSelectorExpr, node);
- o->type = t_invalid;
- o->expr = node;
- o->mode = Addressing_Invalid;
-
- Type *th = type_hint;
-
- if (th == nullptr) {
- gbString str = expr_to_string(node);
- error(node, "Cannot determine type for implicit selector expression '%s'", str);
- gb_string_free(str);
- return Expr_Expr;
- }
- o->type = th;
- Type *enum_type = th;
-
- if (!is_type_enum(th)) {
- bool show_error = true;
- if (is_type_union(th)) {
- Type *union_type = base_type(th);
- isize enum_count = 0;
- Type *et = nullptr;
- for_array(i, union_type->Union.variants) {
- Type *vt = union_type->Union.variants[i];
- if (is_type_enum(vt)) {
- enum_count += 1;
- et = vt;
- }
- }
- if (enum_count == 1) {
- show_error = false;
- enum_type = et;
- }
- }
-
- if (show_error) {
- gbString typ = type_to_string(th);
- gbString str = expr_to_string(node);
- error(node, "Invalid type '%s' for implicit selector expression '%s'", typ, str);
- gb_string_free(str);
- gb_string_free(typ);
- return Expr_Expr;
- }
- }
- GB_ASSERT(ise->selector->kind == Ast_Ident);
- String name = ise->selector->Ident.token.string;
-
- enum_type = base_type(enum_type);
- GB_ASSERT(enum_type->kind == Type_Enum);
- Entity *e = scope_lookup_current(enum_type->Enum.scope, name);
- if (e == nullptr) {
- gbString typ = type_to_string(th);
- error(node, "Undeclared name %.*s for type '%s'", LIT(name), typ);
- gb_string_free(typ);
- return Expr_Expr;
- }
- GB_ASSERT(are_types_identical(base_type(e->type), enum_type));
- GB_ASSERT(e->kind == Entity_Constant);
- o->value = e->Constant.value;
- o->mode = Addressing_Constant;
- o->type = e->type;
-
- return Expr_Expr;
+ return check_implicit_selector_expr(c, o, node, type_hint);
case_end;
case_ast_node(ie, IndexExpr, node);
@@ -7542,47 +7601,6 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
return Expr_Expr;
}
- if (t->kind == Type_Struct) {
- TypeAtomOpTable *atom_op_table = t->Struct.atom_op_table;
- if (atom_op_table != nullptr) {
- if (atom_op_table->op[TypeAtomOp_index_set]) {
- if (c->assignment_lhs_hint == node) {
- o->mode = Addressing_AtomOpAssign;
- o->type = o->type;
- o->expr = node;
- return kind;
- }
- }
- if (atom_op_table->op[TypeAtomOp_index_get]) {
- Entity *e = atom_op_table->op[TypeAtomOp_index_get];
- if (ie->index == nullptr) {
- gbString str = expr_to_string(o->expr);
- error(o->expr, "Missing index for '%s'", str);
- gb_string_free(str);
- o->mode = Addressing_Invalid;
- o->expr = node;
- return kind;
- }
-
- GB_ASSERT(e->identifier != nullptr);
- Ast *proc_ident = clone_ast(e->identifier);
-
- auto args = array_make<Ast *>(heap_allocator(), 2);
- args[0] = ie->expr;
- args[1] = ie->index;
-
- GB_ASSERT(c->file != nullptr);
- Ast *fake_call = ast_call_expr(c->file, proc_ident, args, ie->open, ie->close, {});
- check_expr_base(c, o, fake_call, type_hint);
- AtomOpMapEntry entry = {TypeAtomOp_index_get, fake_call};
- map_set(&c->info->atom_op_map, hash_pointer(node), entry);
- o->expr = node;
- return kind;
- }
- }
- }
-
-
i64 max_count = -1;
bool valid = check_set_index_data(o, t, is_ptr, &max_count, o->type);
@@ -7721,37 +7739,6 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
if (is_type_soa_struct(t)) {
valid = true;
o->type = make_soa_struct_slice(c, nullptr, nullptr, t->Struct.soa_elem);
- } else {
- TypeAtomOpTable *atom_op_table = t->Struct.atom_op_table;
- if (atom_op_table != nullptr && atom_op_table->op[TypeAtomOp_slice]) {
- Entity *e = atom_op_table->op[TypeAtomOp_slice];
- GB_ASSERT(e->identifier != nullptr);
- Ast *proc_ident = clone_ast(e->identifier);
-
- Ast *expr = se->expr;
- if (o->mode == Addressing_Variable) {
- expr = ast_unary_expr(c->file, {Token_And, STR_LIT("&")}, expr);
- } else if (is_type_pointer(o->type)) {
- // Okay
- } else {
- gbString str = expr_to_string(node);
- error(node, "Cannot slice '%s', value is not addressable", str);
- gb_string_free(str);
- o->mode = Addressing_Invalid;
- o->expr = node;
- return kind;
- }
- auto args = array_make<Ast *>(heap_allocator(), 1);
- args[0] = expr;
-
-
- GB_ASSERT(c->file != nullptr);
- Ast *fake_call = ast_call_expr(c->file, proc_ident, args, se->open, se->close, {});
- check_expr_base(c, o, fake_call, type_hint);
- AtomOpMapEntry entry = {TypeAtomOp_slice, fake_call};
- map_set(&c->info->atom_op_map, hash_pointer(node), entry);
- valid = true;
- }
}
break;
@@ -7780,10 +7767,7 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
return kind;
}
- o->mode = Addressing_Value;
-
if (se->low == nullptr && se->high != nullptr) {
- // error(se->interval0, "1st index is required if a 2nd index is specified");
// It is okay to continue as it will assume the 1st index is zero
}
@@ -7818,6 +7802,16 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
}
}
+ if (max_count < 0) {
+ if (o->mode == Addressing_Constant) {
+ gbString s = expr_to_string(se->expr);
+ error(se->expr, "Cannot slice constant value '%s'", s);
+ gb_string_free(s);
+ }
+ }
+
+ o->mode = Addressing_Value;
+
if (is_type_string(t) && max_count >= 0) {
bool all_constant = true;
for (isize i = 0; i < gb_count_of(nodes); i++) {
@@ -8158,7 +8152,7 @@ gbString write_expr_to_string(gbString str, Ast *node, bool shorthand) {
case_ast_node(bd, BasicDirective, node);
str = gb_string_append_rune(str, '#');
- str = string_append_string(str, bd->name);
+ str = string_append_string(str, bd->name.string);
case_end;
case_ast_node(ud, Undef, node);
@@ -8222,20 +8216,22 @@ gbString write_expr_to_string(gbString str, Ast *node, bool shorthand) {
str = write_expr_to_string(str, be->right, shorthand);
case_end;
- case_ast_node(te, TernaryExpr, node);
- str = write_expr_to_string(str, te->cond, shorthand);
- str = gb_string_appendc(str, " ? ");
- str = write_expr_to_string(str, te->x, shorthand);
- str = gb_string_appendc(str, " : ");
- str = write_expr_to_string(str, te->y, shorthand);
- case_end;
-
case_ast_node(te, TernaryIfExpr, node);
- str = write_expr_to_string(str, te->x, shorthand);
- str = gb_string_appendc(str, " if ");
- str = write_expr_to_string(str, te->cond, shorthand);
- str = gb_string_appendc(str, " else ");
- str = write_expr_to_string(str, te->y, shorthand);
+ TokenPos x = ast_token(te->x).pos;
+ TokenPos cond = ast_token(te->cond).pos;
+ if (x < cond) {
+ str = write_expr_to_string(str, te->x, shorthand);
+ str = gb_string_appendc(str, " if ");
+ str = write_expr_to_string(str, te->cond, shorthand);
+ str = gb_string_appendc(str, " else ");
+ str = write_expr_to_string(str, te->y, shorthand);
+ } else {
+ str = write_expr_to_string(str, te->cond, shorthand);
+ str = gb_string_appendc(str, " ? ");
+ str = write_expr_to_string(str, te->x, shorthand);
+ str = gb_string_appendc(str, " : ");
+ str = write_expr_to_string(str, te->y, shorthand);
+ }
case_end;
case_ast_node(te, TernaryWhenExpr, node);
@@ -8525,8 +8521,18 @@ gbString write_expr_to_string(gbString str, Ast *node, bool shorthand) {
case_ast_node(st, StructType, node);
str = gb_string_appendc(str, "struct ");
+ if (st->polymorphic_params) {
+ str = gb_string_append_rune(str, '(');
+ str = write_expr_to_string(str, st->polymorphic_params, shorthand);
+ str = gb_string_appendc(str, ") ");
+ }
if (st->is_packed) str = gb_string_appendc(str, "#packed ");
if (st->is_raw_union) str = gb_string_appendc(str, "#raw_union ");
+ if (st->align) {
+ str = gb_string_appendc(str, "#align ");
+ str = write_expr_to_string(str, st->align, shorthand);
+ str = gb_string_append_rune(str, ' ');
+ }
str = gb_string_append_rune(str, '{');
if (shorthand) {
str = gb_string_appendc(str, "...");
@@ -8539,6 +8545,18 @@ gbString write_expr_to_string(gbString str, Ast *node, bool shorthand) {
case_ast_node(st, UnionType, node);
str = gb_string_appendc(str, "union ");
+ if (st->polymorphic_params) {
+ str = gb_string_append_rune(str, '(');
+ str = write_expr_to_string(str, st->polymorphic_params, shorthand);
+ str = gb_string_appendc(str, ") ");
+ }
+ if (st->no_nil) str = gb_string_appendc(str, "#no_nil ");
+ if (st->maybe) str = gb_string_appendc(str, "#maybe ");
+ if (st->align) {
+ str = gb_string_appendc(str, "#align ");
+ str = write_expr_to_string(str, st->align, shorthand);
+ str = gb_string_append_rune(str, ' ');
+ }
str = gb_string_append_rune(str, '{');
if (shorthand) {
str = gb_string_appendc(str, "...");
diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp
index a3c9a529c..7772b5c97 100644
--- a/src/check_stmt.cpp
+++ b/src/check_stmt.cpp
@@ -7,7 +7,7 @@ bool is_diverging_stmt(Ast *stmt) {
return false;
}
if (expr->CallExpr.proc->kind == Ast_BasicDirective) {
- String name = expr->CallExpr.proc->BasicDirective.name;
+ String name = expr->CallExpr.proc->BasicDirective.name.string;
return name == "panic";
}
Ast *proc = unparen_expr(expr->CallExpr.proc);
@@ -939,6 +939,7 @@ void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags) {
TokenKind upper_op = Token_Invalid;
switch (be->op.kind) {
case Token_Ellipsis: upper_op = Token_GtEq; break;
+ case Token_RangeFull: upper_op = Token_GtEq; break;
case Token_RangeHalf: upper_op = Token_Gt; break;
default: GB_PANIC("Invalid range operator"); break;
}
@@ -960,9 +961,44 @@ void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags) {
Operand b1 = rhs;
check_comparison(ctx, &a1, &b1, Token_LtEq);
- add_constant_switch_case(ctx, &seen, lhs);
- if (upper_op == Token_GtEq) {
- add_constant_switch_case(ctx, &seen, rhs);
+ if (is_type_enum(x.type)) {
+ // TODO(bill): Fix this logic so it's fast!!!
+
+ i64 v0 = exact_value_to_i64(lhs.value);
+ i64 v1 = exact_value_to_i64(rhs.value);
+ Operand v = {};
+ v.mode = Addressing_Constant;
+ v.type = x.type;
+ v.expr = x.expr;
+
+ Type *bt = base_type(x.type);
+ GB_ASSERT(bt->kind == Type_Enum);
+ for (i64 vi = v0; vi <= v1; vi++) {
+ if (upper_op != Token_GtEq && vi == v1) {
+ break;
+ }
+
+ bool found = false;
+ for_array(j, bt->Enum.fields) {
+ Entity *f = bt->Enum.fields[j];
+ GB_ASSERT(f->kind == Entity_Constant);
+
+ i64 fv = exact_value_to_i64(f->Constant.value);
+ if (fv == vi) {
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ v.value = exact_value_i64(vi);
+ add_constant_switch_case(ctx, &seen, v);
+ }
+ }
+ } else {
+ add_constant_switch_case(ctx, &seen, lhs);
+ if (upper_op == Token_GtEq) {
+ add_constant_switch_case(ctx, &seen, rhs);
+ }
}
if (is_type_string(x.type)) {
@@ -1400,6 +1436,28 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
gbString expr_str = expr_to_string(operand.expr);
error(node, "Expression is not used: '%s'", expr_str);
gb_string_free(expr_str);
+ if (operand.expr->kind == Ast_BinaryExpr) {
+ ast_node(be, BinaryExpr, operand.expr);
+ if (be->op.kind != Token_CmpEq) {
+ break;
+ }
+
+ switch (be->left->tav.mode) {
+ case Addressing_Context:
+ case Addressing_Variable:
+ case Addressing_MapIndex:
+ case Addressing_SoaVariable:
+ {
+ gbString lhs = expr_to_string(be->left);
+ gbString rhs = expr_to_string(be->right);
+ error_line("\tSuggestion: Did you mean to do an assignment?\n", lhs, rhs);
+ error_line("\t '%s = %s;'\n", lhs, rhs);
+ gb_string_free(rhs);
+ gb_string_free(lhs);
+ }
+ break;
+ }
+ }
break;
}
@@ -1454,53 +1512,6 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
auto lhs_to_ignore = array_make<bool>(temporary_allocator(), lhs_count);
isize max = gb_min(lhs_count, rhs_count);
- // NOTE(bill, 2020-05-02): This is an utter hack to get these custom atom operations working
- // correctly for assignments
- for (isize i = 0; i < max; i++) {
- if (lhs_operands[i].mode == Addressing_AtomOpAssign) {
- Operand lhs = lhs_operands[i];
-
- Type *t = base_type(lhs.type);
- GB_ASSERT(t->kind == Type_Struct);
- ast_node(ie, IndexExpr, unparen_expr(lhs.expr));
-
- TypeAtomOpTable *atom_op_table = t->Struct.atom_op_table;
- GB_ASSERT(atom_op_table->op[TypeAtomOp_index_set] != nullptr);
- Entity *e = atom_op_table->op[TypeAtomOp_index_set];
-
- GB_ASSERT(e->identifier != nullptr);
- Ast *proc_ident = clone_ast(e->identifier);
- GB_ASSERT(ctx->file != nullptr);
-
-
- TypeAndValue tv = type_and_value_of_expr(ie->expr);
- Ast *expr = ie->expr;
- if (is_type_pointer(tv.type)) {
- // Okay
- } else if (tv.mode == Addressing_Variable) {
- // NOTE(bill): Hack it to take the address instead
- expr = ast_unary_expr(ctx->file, {Token_And, STR_LIT("&")}, ie->expr);
- } else {
- continue;
- }
-
- auto args = array_make<Ast *>(heap_allocator(), 3);
- args[0] = expr;
- args[1] = ie->index;
- args[2] = rhs_operands[i].expr;
-
- Ast *fake_call = ast_call_expr(ctx->file, proc_ident, args, ie->open, ie->close, {});
- Operand fake_operand = {};
- fake_operand.expr = lhs.expr;
- check_expr_base(ctx, &fake_operand, fake_call, nullptr);
- AtomOpMapEntry entry = {TypeAtomOp_index_set, fake_call};
- map_set(&ctx->info->atom_op_map, hash_pointer(lhs.expr), entry);
-
- lhs_to_ignore[i] = true;
-
- }
- }
-
for (isize i = 0; i < max; i++) {
if (lhs_to_ignore[i]) {
continue;
@@ -1526,8 +1537,8 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
}
Operand lhs = {Addressing_Invalid};
Operand rhs = {Addressing_Invalid};
- Ast binary_expr = {Ast_BinaryExpr};
- ast_node(be, BinaryExpr, &binary_expr);
+ Ast *binary_expr = alloc_ast_node(node->file, Ast_BinaryExpr);
+ ast_node(be, BinaryExpr, binary_expr);
be->op = op;
be->op.kind = cast(TokenKind)(cast(i32)be->op.kind - (Token_AddEq - Token_Add));
// NOTE(bill): Only use the first one will be used
@@ -1535,7 +1546,7 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
be->right = as->rhs[0];
check_expr(ctx, &lhs, as->lhs[0]);
- check_binary_expr(ctx, &rhs, &binary_expr, nullptr, true);
+ check_binary_expr(ctx, &rhs, binary_expr, nullptr, true);
if (rhs.mode == Addressing_Invalid) {
return;
}
@@ -1632,7 +1643,11 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
} else {
for (isize i = 0; i < result_count; i++) {
Entity *e = pt->results->Tuple.variables[i];
- check_assignment(ctx, &operands[i], e->type, str_lit("return statement"));
+ Operand *o = &operands[i];
+ check_assignment(ctx, o, e->type, str_lit("return statement"));
+ if (is_type_untyped(o->type)) {
+ update_expr_type(ctx, o->expr, e->type, true);
+ }
}
}
case_end;
diff --git a/src/check_type.cpp b/src/check_type.cpp
index aef1ddc7a..419904876 100644
--- a/src/check_type.cpp
+++ b/src/check_type.cpp
@@ -322,19 +322,6 @@ void add_polymorphic_record_entity(CheckerContext *ctx, Ast *node, Type *named_t
array_add(&array, e);
map_set(&ctx->checker->info.gen_types, hash_pointer(original_type), array);
}
-
- {
- Type *dst_bt = base_type(named_type);
- Type *src_bt = base_type(original_type);
- if ((dst_bt != nullptr && src_bt != nullptr) &&
- (dst_bt->kind == src_bt->kind)){
- if (dst_bt->kind == Type_Struct) {
- if (dst_bt->Struct.atom_op_table == nullptr) {
- dst_bt->Struct.atom_op_table = src_bt->Struct.atom_op_table;
- }
- }
- }
- }
}
Type *check_record_polymorphic_params(CheckerContext *ctx, Ast *polymorphic_params,
@@ -944,6 +931,7 @@ void check_bit_set_type(CheckerContext *c, Type *type, Type *named_type, Ast *no
switch (be->op.kind) {
case Token_Ellipsis:
+ case Token_RangeFull:
if (upper - lower >= bits) {
error(bs->elem, "bit_set range is greater than %lld bits, %lld bits are required", bits, (upper-lower+1));
}
@@ -1203,10 +1191,15 @@ ParameterValue handle_parameter_value(CheckerContext *ctx, Type *in_type, Type *
if (allow_caller_location &&
expr->kind == Ast_BasicDirective &&
- expr->BasicDirective.name == "caller_location") {
+ expr->BasicDirective.name.string == "caller_location") {
init_core_source_code_location(ctx->checker);
param_value.kind = ParameterValue_Location;
o.type = t_source_code_location;
+
+ if (in_type) {
+ check_assignment(ctx, &o, in_type, str_lit("parameter value"));
+ }
+
} else {
if (in_type) {
check_expr_with_type_hint(ctx, &o, expr, in_type);
@@ -1214,6 +1207,11 @@ ParameterValue handle_parameter_value(CheckerContext *ctx, Type *in_type, Type *
check_expr(ctx, &o, expr);
}
+ if (in_type) {
+ check_assignment(ctx, &o, in_type, str_lit("parameter value"));
+ }
+
+
if (is_operand_nil(o)) {
param_value.kind = ParameterValue_Nil;
} else if (o.mode != Addressing_Constant) {
@@ -1221,16 +1219,7 @@ ParameterValue handle_parameter_value(CheckerContext *ctx, Type *in_type, Type *
param_value.kind = ParameterValue_Constant;
param_value.value = exact_value_procedure(expr);
} else {
- Entity *e = nullptr;
- // if (o.mode == Addressing_Value && is_type_proc(o.type)) {
- if (o.mode == Addressing_Value || o.mode == Addressing_Variable) {
- Operand x = {};
- if (expr->kind == Ast_Ident) {
- e = check_ident(ctx, &x, expr, nullptr, nullptr, false);
- } else if (expr->kind == Ast_SelectorExpr) {
- e = check_selector(ctx, &x, expr, nullptr);
- }
- }
+ Entity *e = entity_from_expr(o.expr);
if (e != nullptr) {
if (e->kind == Entity_Procedure) {
@@ -1253,8 +1242,11 @@ ParameterValue handle_parameter_value(CheckerContext *ctx, Type *in_type, Type *
} else if (allow_caller_location && o.mode == Addressing_Context) {
param_value.kind = ParameterValue_Value;
param_value.ast_value = expr;
+ } else if (o.value.kind != ExactValue_Invalid) {
+ param_value.kind = ParameterValue_Constant;
+ param_value.value = o.value;
} else {
- error(expr, "Default parameter must be a constant");
+ error(expr, "Default parameter must be a constant, %d", o.mode);
}
}
} else {
@@ -1267,12 +1259,14 @@ ParameterValue handle_parameter_value(CheckerContext *ctx, Type *in_type, Type *
}
}
- if (in_type) {
- check_assignment(ctx, &o, in_type, str_lit("parameter value"));
+ if (out_type_) {
+ if (in_type != nullptr) {
+ *out_type_ = in_type;
+ } else {
+ *out_type_ = default_type(o.type);
+ }
}
- if (out_type_) *out_type_ = default_type(o.type);
-
return param_value;
}
@@ -1389,6 +1383,9 @@ Type *check_get_params(CheckerContext *ctx, Scope *scope, Ast *_params, bool *is
}
}
}
+
+
+
if (type == nullptr) {
error(param, "Invalid parameter type");
type = t_invalid;
@@ -1408,6 +1405,21 @@ Type *check_get_params(CheckerContext *ctx, Scope *scope, Ast *_params, bool *is
type = t_invalid;
}
+ if (is_type_polymorphic(type)) {
+ switch (param_value.kind) {
+ case ParameterValue_Invalid:
+ case ParameterValue_Constant:
+ case ParameterValue_Nil:
+ break;
+ case ParameterValue_Location:
+ case ParameterValue_Value:
+ gbString str = type_to_string(type);
+ error(params[i], "A default value for a parameter must not be a polymorphic constant type, got %s", str);
+ gb_string_free(str);
+ break;
+ }
+ }
+
if (p->flags&FieldFlag_c_vararg) {
if (p->type == nullptr ||
@@ -2109,6 +2121,12 @@ void add_map_key_type_dependencies(CheckerContext *ctx, Type *key) {
Entity *field = key->Struct.fields[i];
add_map_key_type_dependencies(ctx, field->type);
}
+ } else if (key->kind == Type_Union) {
+ add_package_dependency(ctx, "runtime", "default_hasher_n");
+ for_array(i, key->Union.variants) {
+ Type *v = key->Union.variants[i];
+ add_map_key_type_dependencies(ctx, v);
+ }
} else if (key->kind == Type_EnumeratedArray) {
add_package_dependency(ctx, "runtime", "default_hasher_n");
add_map_key_type_dependencies(ctx, key->EnumeratedArray.elem);
@@ -2511,16 +2529,6 @@ bool check_type_internal(CheckerContext *ctx, Ast *e, Type **type, Type *named_t
return true;
}
}
-
- // if (ctx->type_level == 0 && entity->state == EntityState_InProgress) {
- // error(entity->token, "Illegal declaration cycle of `%.*s`", LIT(entity->token.string));
- // for_array(j, *ctx->type_path) {
- // Entity *k = (*ctx->type_path)[j];
- // error(k->token, "\t%.*s refers to", LIT(k->token.string));
- // }
- // error(entity->token, "\t%.*s", LIT(entity->token.string));
- // *type = t_invalid;
- // }
return true;
}
@@ -2703,7 +2711,7 @@ bool check_type_internal(CheckerContext *ctx, Ast *e, Type **type, Type *named_t
bool is_partial = false;
if (at->tag != nullptr) {
GB_ASSERT(at->tag->kind == Ast_BasicDirective);
- String name = at->tag->BasicDirective.name;
+ String name = at->tag->BasicDirective.name.string;
if (name == "partial") {
is_partial = true;
} else {
@@ -2737,7 +2745,7 @@ bool check_type_internal(CheckerContext *ctx, Ast *e, Type **type, Type *named_t
if (at->tag != nullptr) {
GB_ASSERT(at->tag->kind == Ast_BasicDirective);
- String name = at->tag->BasicDirective.name;
+ String name = at->tag->BasicDirective.name.string;
if (name == "soa") {
*type = make_soa_struct_fixed(ctx, e, at->elem, elem, count, generic_type);
} else if (name == "simd") {
@@ -2762,7 +2770,7 @@ bool check_type_internal(CheckerContext *ctx, Ast *e, Type **type, Type *named_t
if (at->tag != nullptr) {
GB_ASSERT(at->tag->kind == Ast_BasicDirective);
- String name = at->tag->BasicDirective.name;
+ String name = at->tag->BasicDirective.name.string;
if (name == "soa") {
*type = make_soa_struct_slice(ctx, e, at->elem, elem);
} else {
@@ -2782,7 +2790,7 @@ bool check_type_internal(CheckerContext *ctx, Ast *e, Type **type, Type *named_t
Type *elem = check_type(ctx, dat->elem);
if (dat->tag != nullptr) {
GB_ASSERT(dat->tag->kind == Ast_BasicDirective);
- String name = dat->tag->BasicDirective.name;
+ String name = dat->tag->BasicDirective.name.string;
if (name == "soa") {
*type = make_soa_struct_dynamic_array(ctx, e, dat->elem, elem);
} else {
@@ -2883,16 +2891,6 @@ bool check_type_internal(CheckerContext *ctx, Ast *e, Type **type, Type *named_t
}
case_end;
- case_ast_node(te, TernaryExpr, e);
- Operand o = {};
- check_expr_or_type(ctx, &o, e);
- if (o.mode == Addressing_Type) {
- *type = o.type;
- set_base_type(named_type, *type);
- return true;
- }
- case_end;
-
case_ast_node(te, TernaryIfExpr, e);
Operand o = {};
check_expr_or_type(ctx, &o, e);
diff --git a/src/checker.cpp b/src/checker.cpp
index 878435d67..21ca4c398 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -675,55 +675,44 @@ Entity *add_global_entity(Entity *entity, Scope *scope=builtin_pkg->scope) {
return entity;
}
-void add_global_constant(String name, Type *type, ExactValue value) {
+void add_global_constant(char const *name, Type *type, ExactValue value) {
Entity *entity = alloc_entity(Entity_Constant, nullptr, make_token_ident(name), type);
entity->Constant.value = value;
add_global_entity(entity);
}
-void add_global_string_constant(String name, String value) {
+void add_global_string_constant(char const *name, String const &value) {
add_global_constant(name, t_untyped_string, exact_value_string(value));
}
+void add_global_bool_constant(char const *name, bool value) {
+ add_global_constant(name, t_untyped_bool, exact_value_bool(value));
+}
void add_global_type_entity(String name, Type *type) {
add_global_entity(alloc_entity_type_name(nullptr, make_token_ident(name), type));
}
-
-void init_universal(void) {
- BuildContext *bc = &build_context;
-
- // NOTE(bill): No need to free these
- // gbAllocator a = heap_allocator();
+AstPackage *create_builtin_package(char const *name) {
gbAllocator a = permanent_allocator();
+ AstPackage *pkg = gb_alloc_item(a, AstPackage);
+ pkg->name = make_string_c(name);
+ pkg->kind = Package_Normal;
- builtin_pkg = gb_alloc_item(a, AstPackage);
- builtin_pkg->name = str_lit("builtin");
- builtin_pkg->kind = Package_Normal;
-
- builtin_pkg->scope = create_scope(nullptr);
- builtin_pkg->scope->flags |= ScopeFlag_Pkg | ScopeFlag_Global | ScopeFlag_Builtin;
- builtin_pkg->scope->pkg = builtin_pkg;
-
- intrinsics_pkg = gb_alloc_item(a, AstPackage);
- intrinsics_pkg->name = str_lit("intrinsics");
- intrinsics_pkg->kind = Package_Normal;
-
- intrinsics_pkg->scope = create_scope(nullptr);
- intrinsics_pkg->scope->flags |= ScopeFlag_Pkg | ScopeFlag_Global | ScopeFlag_Builtin;
- intrinsics_pkg->scope->pkg = intrinsics_pkg;
-
- config_pkg = gb_alloc_item(a, AstPackage);
- config_pkg->name = str_lit("config");
- config_pkg->kind = Package_Normal;
+ pkg->scope = create_scope(nullptr);
+ pkg->scope->flags |= ScopeFlag_Pkg | ScopeFlag_Global | ScopeFlag_Builtin;
+ pkg->scope->pkg = pkg;
+ return pkg;
+}
- config_pkg->scope = create_scope(nullptr);
- config_pkg->scope->flags |= ScopeFlag_Pkg | ScopeFlag_Global | ScopeFlag_Builtin;
- config_pkg->scope->pkg = config_pkg;
+void init_universal(void) {
+ BuildContext *bc = &build_context;
+ builtin_pkg = create_builtin_package("builtin");
+ intrinsics_pkg = create_builtin_package("intrinsics");
+ config_pkg = create_builtin_package("config");
// Types
for (isize i = 0; i < gb_count_of(basic_types); i++) {
@@ -740,23 +729,23 @@ void init_universal(void) {
}
// Constants
- add_global_constant(str_lit("true"), t_untyped_bool, exact_value_bool(true));
- add_global_constant(str_lit("false"), t_untyped_bool, exact_value_bool(false));
-
add_global_entity(alloc_entity_nil(str_lit("nil"), t_untyped_nil));
+ add_global_bool_constant("true", true);
+ add_global_bool_constant("false", false);
+
// TODO(bill): Set through flags in the compiler
- add_global_string_constant(str_lit("ODIN_OS"), bc->ODIN_OS);
- add_global_string_constant(str_lit("ODIN_ARCH"), bc->ODIN_ARCH);
- add_global_string_constant(str_lit("ODIN_ENDIAN"), bc->ODIN_ENDIAN);
- add_global_string_constant(str_lit("ODIN_VENDOR"), bc->ODIN_VENDOR);
- add_global_string_constant(str_lit("ODIN_VERSION"), bc->ODIN_VERSION);
- add_global_string_constant(str_lit("ODIN_ROOT"), bc->ODIN_ROOT);
- add_global_constant(str_lit("ODIN_DEBUG"), t_untyped_bool, exact_value_bool(bc->ODIN_DEBUG));
- add_global_constant(str_lit("ODIN_DISABLE_ASSERT"), t_untyped_bool, exact_value_bool(bc->ODIN_DISABLE_ASSERT));
- add_global_constant(str_lit("ODIN_DEFAULT_TO_NIL_ALLOCATOR"), t_untyped_bool, exact_value_bool(bc->ODIN_DEFAULT_TO_NIL_ALLOCATOR));
- add_global_constant(str_lit("ODIN_NO_DYNAMIC_LITERALS"), t_untyped_bool, exact_value_bool(bc->no_dynamic_literals));
- add_global_constant(str_lit("ODIN_TEST"), t_untyped_bool, exact_value_bool(bc->command_kind == Command_test));
+ add_global_string_constant("ODIN_OS", bc->ODIN_OS);
+ add_global_string_constant("ODIN_ARCH", bc->ODIN_ARCH);
+ add_global_string_constant("ODIN_ENDIAN", bc->ODIN_ENDIAN);
+ add_global_string_constant("ODIN_VENDOR", bc->ODIN_VENDOR);
+ add_global_string_constant("ODIN_VERSION", bc->ODIN_VERSION);
+ add_global_string_constant("ODIN_ROOT", bc->ODIN_ROOT);
+ add_global_bool_constant("ODIN_DEBUG", bc->ODIN_DEBUG);
+ add_global_bool_constant("ODIN_DISABLE_ASSERT", bc->ODIN_DISABLE_ASSERT);
+ add_global_bool_constant("ODIN_DEFAULT_TO_NIL_ALLOCATOR", bc->ODIN_DEFAULT_TO_NIL_ALLOCATOR);
+ add_global_bool_constant("ODIN_NO_DYNAMIC_LITERALS", bc->no_dynamic_literals);
+ add_global_bool_constant("ODIN_TEST", bc->command_kind == Command_test);
// Builtin Procedures
@@ -801,8 +790,6 @@ void init_universal(void) {
}
GB_ASSERT(type != nullptr);
-
-
Entity *entity = alloc_entity_constant(nullptr, make_token_ident(name), type, value);
entity->state = EntityState_Resolved;
if (scope_insert(config_pkg->scope, entity)) {
@@ -960,7 +947,11 @@ Entity *entity_of_node(Ast *expr) {
expr = unparen_expr(expr);
switch (expr->kind) {
case_ast_node(ident, Ident, expr);
- return ident->entity;
+ Entity *e = ident->entity;
+ if (e && e->flags & EntityFlag_Overridden) {
+ // GB_PANIC("use of an overriden entity: %.*s", LIT(e->token.string));
+ }
+ return e;
case_end;
case_ast_node(se, SelectorExpr, expr);
Ast *s = unselector_expr(se->selector);
@@ -973,7 +964,6 @@ Entity *entity_of_node(Ast *expr) {
return nullptr;
}
-
DeclInfo *decl_info_of_entity(Entity *e) {
if (e != nullptr) {
return e->decl_info;
@@ -1870,7 +1860,8 @@ void generate_minimum_dependency_set(Checker *c, Entity *start) {
continue;
}
- if (e->file == nullptr || !e->file->is_test) {
+ // if (e->file == nullptr || !e->file->is_test) {
+ if (e->file == nullptr) {
continue;
}
@@ -2675,206 +2666,6 @@ DECL_ATTRIBUTE_PROC(type_decl_attribute) {
if (name == "private") {
// NOTE(bill): Handled elsewhere `check_collect_value_decl`
return true;
- } else if (name == "index_get") {
- if (value != nullptr) {
- Operand o = {};
- check_expr_or_type(c, &o, value);
- Entity *e = entity_of_node(value);
- if (e != nullptr && e->kind == Entity_Procedure) {
- if (ac->deferred_procedure.entity != nullptr) {
- error(elem, "Previous usage of the '%.*s' attribute", LIT(name));
- }
-
- bool valid = true;
-
- {
- Type *pt = base_type(e->type);
- GB_ASSERT(pt->kind == Type_Proc);
-
- if (pt->Proc.result_count == 0) {
- error(value, "'%s' attribute must return something", LIT(name));
- valid = false;
- }
-
- if (pt->Proc.param_count < 2) {
- error(value, "'%s' attribute must allow for 2 parameters", LIT(name));
- valid = false;
- } else {
- isize minimum_param_count = 0;
- for_array(i, pt->Proc.params->Tuple.variables) {
- Entity *param = pt->Proc.params->Tuple.variables[i];
- if (param->kind == Entity_Variable) {
- if (param->Variable.param_value.kind == ParameterValue_Invalid) {
- minimum_param_count += 1;
- } else {
- break;
- }
- } else if (param->kind == Entity_Constant) {
- minimum_param_count += 1;
- } else {
- break;
- }
- }
-
- if (minimum_param_count > 2) {
- error(value, "'%s' attribute must allow for at a minimum 2 parameters", LIT(name));
- valid = false;
- }
- }
- }
-
- if (valid) {
- if (ac->atom_op_table == nullptr) {
- ac->atom_op_table = gb_alloc_item(permanent_allocator(), TypeAtomOpTable);
- }
- ac->atom_op_table->op[TypeAtomOp_index_get] = e;
- }
- return true;
- }
- }
- error(elem, "Expected a procedure entity for '%.*s'", LIT(name));
- return false;
- } else if (name == "index_set") {
- if (value != nullptr) {
- Operand o = {};
- check_expr_or_type(c, &o, value);
- Entity *e = entity_of_node(value);
- if (e != nullptr && e->kind == Entity_Procedure) {
- if (ac->deferred_procedure.entity != nullptr) {
- error(elem, "Previous usage of the '%.*s' attribute", LIT(name));
- }
-
- bool valid = true;
-
- {
- Type *pt = base_type(e->type);
- GB_ASSERT(pt->kind == Type_Proc);
-
- if (pt->Proc.param_count < 3) {
- error(value, "'%s' attribute must allow for 3 parameters", LIT(name));
- valid = false;
- } else {
- isize minimum_param_count = 0;
- for_array(i, pt->Proc.params->Tuple.variables) {
- Entity *param = pt->Proc.params->Tuple.variables[i];
- if (param->kind == Entity_Variable) {
- if (param->Variable.param_value.kind == ParameterValue_Invalid) {
- minimum_param_count += 1;
- } else {
- break;
- }
- } else if (param->kind == Entity_Constant) {
- minimum_param_count += 1;
- } else {
- break;
- }
- }
-
- if (minimum_param_count > 3) {
- error(value, "'%s' attribute must allow for at a minimum 3 parameters", LIT(name));
- valid = false;
- }
- }
-
- if (pt->Proc.variadic || pt->Proc.c_vararg) {
- error(value, "'%s' attribute does not allow variadic procedures", LIT(name));
- valid = false;
- }
- }
-
- if (valid) {
- if (ac->atom_op_table == nullptr) {
- ac->atom_op_table = gb_alloc_item(permanent_allocator(), TypeAtomOpTable);
- }
- ac->atom_op_table->op[TypeAtomOp_index_set] = e;
- }
- return true;
- }
- }
- error(elem, "Expected a procedure entity for '%.*s'", LIT(name));
- return false;
- } else if (name == "slice") {
- if (value != nullptr) {
- Operand o = {};
- check_expr_or_type(c, &o, value);
- Entity *e = entity_of_node(value);
- if (e != nullptr && e->kind == Entity_Procedure) {
- if (ac->deferred_procedure.entity != nullptr) {
- error(elem, "Previous usage of the '%.*s' attribute", LIT(name));
- }
-
- bool valid = true;
-
- {
- Type *pt = base_type(e->type);
- GB_ASSERT(pt->kind == Type_Proc);
-
- if (pt->Proc.param_count < 1) {
- error(value, "'%s' attribute must allow for 1 parameter", LIT(name));
- valid = false;
- } else {
- isize minimum_param_count = 0;
- for_array(i, pt->Proc.params->Tuple.variables) {
- Entity *param = pt->Proc.params->Tuple.variables[i];
- if (param->kind == Entity_Variable) {
- if (param->Variable.param_value.kind == ParameterValue_Invalid) {
- minimum_param_count += 1;
- } else {
- break;
- }
- } else if (param->kind == Entity_Constant) {
- minimum_param_count += 1;
- } else {
- break;
- }
- }
-
- if (minimum_param_count > 1) {
- error(value, "'%s' attribute must allow for at a minimum 1 parameter", LIT(name));
- valid = false;
- }
- {
- Entity *param = pt->Proc.params->Tuple.variables[0];
- Type *param_type = base_type(param->type);
- if (is_type_pointer(param_type) && !is_type_rawptr(param_type)) {
- // okay
- } else {
- error(value, "'%s' attribute's first parameter must be a pointer", LIT(name));
- valid = false;
- }
-
- }
- }
-
- if (pt->Proc.variadic || pt->Proc.c_vararg) {
- error(value, "'%s' attribute does not allow variadic procedures", LIT(name));
- valid = false;
- }
-
- if (pt->Proc.result_count != 1) {
- error(value, "'%s' attribute must return 1 result", LIT(name));
- valid = false;
- } else {
- Type *rt = pt->Proc.results->Tuple.variables[0]->type;
- rt = base_type(rt);
- if (!is_type_slice(rt)) {
- error(value, "'%s' attribute must return a slice", LIT(name));
- valid = false;
- }
- }
- }
-
- if (valid) {
- if (ac->atom_op_table == nullptr) {
- ac->atom_op_table = gb_alloc_item(permanent_allocator(), TypeAtomOpTable);
- }
- ac->atom_op_table->op[TypeAtomOp_slice] = e;
- }
- return true;
- }
- }
- error(elem, "Expected a procedure entity for '%.*s'", LIT(name));
- return false;
}
return false;
}
@@ -4498,6 +4289,38 @@ void check_unchecked_bodies(Checker *c) {
}
}
+void check_test_names(Checker *c) {
+ if (build_context.test_names.entries.count == 0) {
+ return;
+ }
+
+ AstPackage *pkg = c->info.init_package;
+ Scope *s = pkg->scope;
+
+ for_array(i, build_context.test_names.entries) {
+ String name = build_context.test_names.entries[i].value;
+ Entity *e = scope_lookup(s, name);
+ if (e == nullptr) {
+ Token tok = {};
+ if (pkg->files.count != 0) {
+ tok = pkg->files[0]->tokens[0];
+ }
+ error(tok, "Unable to find the test '%.*s' in 'package %.*s' ", LIT(name), LIT(pkg->name));
+ }
+ }
+
+ for (isize i = 0; i < c->info.testing_procedures.count; /**/) {
+ Entity *e = c->info.testing_procedures[i];
+ String name = e->token.string;
+ if (!string_set_exists(&build_context.test_names, name)) {
+ array_ordered_remove(&c->info.testing_procedures, i);
+ } else {
+ i += 1;
+ }
+ }
+
+}
+
void check_parsed_files(Checker *c) {
#define TIME_SECTION(str) do { if (build_context.show_more_timings) timings_start_section(&global_timings, str_lit(str)); } while (0)
@@ -4572,6 +4395,9 @@ void check_parsed_files(Checker *c) {
TIME_SECTION("generate minimum dependency set");
generate_minimum_dependency_set(c, c->info.entry_point);
+ TIME_SECTION("check test names");
+ check_test_names(c);
+
TIME_SECTION("calculate global init order");
// Calculate initialization order of global variables
calculate_global_init_order(c);
diff --git a/src/checker.hpp b/src/checker.hpp
index 4ff72717d..38628ed51 100644
--- a/src/checker.hpp
+++ b/src/checker.hpp
@@ -114,7 +114,6 @@ struct AttributeContext {
String deprecated_message;
DeferredProcedure deferred_procedure;
u32 optimization_mode; // ProcedureOptimizationMode
- struct TypeAtomOpTable *atom_op_table;
};
AttributeContext make_attribute_context(String link_prefix) {
@@ -335,7 +334,6 @@ struct CheckerContext {
Scope * polymorphic_scope;
Ast *assignment_lhs_hint;
- Ast *unary_address_hint;
};
struct Checker {
diff --git a/src/checker_builtin_procs.hpp b/src/checker_builtin_procs.hpp
index 98ef5180b..b69bacd30 100644
--- a/src/checker_builtin_procs.hpp
+++ b/src/checker_builtin_procs.hpp
@@ -56,6 +56,11 @@ enum BuiltinProcId {
BuiltinProc_overflow_sub,
BuiltinProc_overflow_mul,
+ BuiltinProc_sqrt,
+
+ BuiltinProc_mem_copy,
+ BuiltinProc_mem_copy_non_overlapping,
+
BuiltinProc_volatile_store,
BuiltinProc_volatile_load,
@@ -197,6 +202,8 @@ BuiltinProc__type_simple_boolean_end,
BuiltinProc_type_is_specialization_of,
+ BuiltinProc_type_is_variant_of,
+
BuiltinProc_type_struct_field_count,
BuiltinProc_type_proc_parameter_count,
@@ -276,6 +283,11 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = {
{STR_LIT("overflow_sub"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("overflow_mul"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
+ {STR_LIT("sqrt"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
+
+ {STR_LIT("mem_copy"), 3, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
+ {STR_LIT("mem_copy_non_overlapping"), 3, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
+
{STR_LIT("volatile_store"), 2, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
{STR_LIT("volatile_load"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
@@ -415,6 +427,8 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = {
{STR_LIT("type_is_specialization_of"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
+ {STR_LIT("type_is_variant_of"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
+
{STR_LIT("type_struct_field_count"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("type_proc_parameter_count"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
diff --git a/src/docs_writer.cpp b/src/docs_writer.cpp
index f4fd02376..03ea25930 100644
--- a/src/docs_writer.cpp
+++ b/src/docs_writer.cpp
@@ -697,6 +697,9 @@ OdinDocTypeIndex odin_doc_type(OdinDocWriter *w, Type *type) {
case ProcCC_None:
calling_convention = str_lit("none");
break;
+ case ProcCC_Naked:
+ calling_convention = str_lit("naked");
+ break;
case ProcCC_InlineAsm:
calling_convention = str_lit("inline-assembly");
break;
diff --git a/src/entity.cpp b/src/entity.cpp
index d1f4c78e6..460f4ec6d 100644
--- a/src/entity.cpp
+++ b/src/entity.cpp
@@ -126,6 +126,8 @@ struct Entity {
Entity * using_parent;
Ast * using_expr;
+ Entity * aliased_of;
+
lbModule * code_gen_module;
lbProcedure *code_gen_procedure;
diff --git a/src/llvm_abi.cpp b/src/llvm_abi.cpp
index 65e3b2c58..aba85ae83 100644
--- a/src/llvm_abi.cpp
+++ b/src/llvm_abi.cpp
@@ -10,21 +10,35 @@ struct lbArgType {
LLVMTypeRef cast_type; // Optional
LLVMTypeRef pad_type; // Optional
LLVMAttributeRef attribute; // Optional
+ LLVMAttributeRef align_attribute; // Optional
};
+
+i64 lb_sizeof(LLVMTypeRef type);
+i64 lb_alignof(LLVMTypeRef type);
+
lbArgType lb_arg_type_direct(LLVMTypeRef type, LLVMTypeRef cast_type, LLVMTypeRef pad_type, LLVMAttributeRef attr) {
- return lbArgType{lbArg_Direct, type, cast_type, pad_type, attr};
+ return lbArgType{lbArg_Direct, type, cast_type, pad_type, attr, nullptr};
}
lbArgType lb_arg_type_direct(LLVMTypeRef type) {
return lb_arg_type_direct(type, nullptr, nullptr, nullptr);
}
lbArgType lb_arg_type_indirect(LLVMTypeRef type, LLVMAttributeRef attr) {
- return lbArgType{lbArg_Indirect, type, nullptr, nullptr, attr};
+ return lbArgType{lbArg_Indirect, type, nullptr, nullptr, attr, nullptr};
+}
+
+lbArgType lb_arg_type_indirect_byval(LLVMContextRef c, LLVMTypeRef type) {
+ i64 alignment = lb_alignof(type);
+ alignment = gb_max(alignment, 8);
+
+ LLVMAttributeRef byval_attr = lb_create_enum_attribute_with_type(c, "byval", type);
+ LLVMAttributeRef align_attr = lb_create_enum_attribute(c, "align", alignment);
+ return lbArgType{lbArg_Indirect, type, nullptr, nullptr, byval_attr, align_attr};
}
lbArgType lb_arg_type_ignore(LLVMTypeRef type) {
- return lbArgType{lbArg_Ignore, type, nullptr, nullptr, nullptr};
+ return lbArgType{lbArg_Ignore, type, nullptr, nullptr, nullptr, nullptr};
}
struct lbFunctionType {
@@ -121,6 +135,9 @@ void lb_add_function_type_attributes(LLVMValueRef fn, lbFunctionType *ft, ProcCa
if (arg->attribute) {
LLVMAddAttributeAtIndex(fn, arg_index+1, arg->attribute);
}
+ if (arg->align_attribute) {
+ LLVMAddAttributeAtIndex(fn, arg_index+1, arg->align_attribute);
+ }
arg_index++;
}
@@ -145,8 +162,6 @@ void lb_add_function_type_attributes(LLVMValueRef fn, lbFunctionType *ft, ProcCa
}
-i64 lb_sizeof(LLVMTypeRef type);
-i64 lb_alignof(LLVMTypeRef type);
i64 lb_sizeof(LLVMTypeRef type) {
LLVMTypeKind kind = LLVMGetTypeKind(type);
@@ -271,110 +286,6 @@ i64 lb_alignof(LLVMTypeRef type) {
return 1;
}
-#if 0
-Type *lb_abi_to_odin_type(lbModule *m, LLVMTypeRef type, bool is_return, u32 level = 0) {
- Type **found = map_get(&m->llvm_types, hash_pointer(type));
- if (found) {
- return *found;
- }
- GB_ASSERT_MSG(level < 64, "%s %d", LLVMPrintTypeToString(type), is_return);
-
- LLVMTypeKind kind = LLVMGetTypeKind(type);
- switch (kind) {
- case LLVMVoidTypeKind:
- return nullptr;
- case LLVMIntegerTypeKind:
- {
- unsigned w = LLVMGetIntTypeWidth(type);
- if (w == 1) {
- return t_llvm_bool;
- }
- unsigned bytes = (w + 7)/8;
- switch (bytes) {
- case 1: return t_u8;
- case 2: return t_u16;
- case 4: return t_u32;
- case 8: return t_u64;
- case 16: return t_u128;
- }
- GB_PANIC("Unhandled integer type");
- }
- case LLVMFloatTypeKind:
- return t_f32;
- case LLVMDoubleTypeKind:
- return t_f64;
- case LLVMPointerTypeKind:
- {
- LLVMTypeRef elem = LLVMGetElementType(type);
- if (lb_is_type_kind(elem, LLVMFunctionTypeKind)) {
- unsigned param_count = LLVMCountParamTypes(elem);
- LLVMTypeRef *params = gb_alloc_array(heap_allocator(), LLVMTypeRef, param_count);
- defer (gb_free(heap_allocator(), params));
- LLVMGetParamTypes(elem, params);
-
- Type **param_types = gb_alloc_array(heap_allocator(), Type *, param_count);
- defer (gb_free(heap_allocator(), param_types));
-
- for (unsigned i = 0; i < param_count; i++) {
- param_types[i] = lb_abi_to_odin_type(m, params[i], false, level+1);
- }
-
- LLVMTypeRef ret = LLVMGetReturnType(elem);
- Type *ret_type = lb_abi_to_odin_type(m, ret, true, level+1);
-
- bool is_c_vararg = !!LLVMIsFunctionVarArg(elem);
- return alloc_type_proc_from_types(param_types, param_count, ret_type, is_c_vararg);
- }
- return alloc_type_pointer(lb_abi_to_odin_type(m, elem, false, level+1));
- }
- case LLVMFunctionTypeKind:
- GB_PANIC("LLVMFunctionTypeKind should not be seen on its own");
- break;
-
- case LLVMStructTypeKind:
- {
- unsigned field_count = LLVMCountStructElementTypes(type);
- Type **fields = gb_alloc_array(heap_allocator(), Type *, field_count);
- for (unsigned i = 0; i < field_count; i++) {
- LLVMTypeRef field_type = LLVMStructGetTypeAtIndex(type, i);
- if (lb_is_type_kind(field_type, LLVMPointerTypeKind) && level > 0) {
- fields[i] = t_rawptr;
- } else {
- fields[i] = lb_abi_to_odin_type(m, field_type, false, level+1);
- }
- }
- if (is_return) {
- return alloc_type_tuple_from_field_types(fields, field_count, !!LLVMIsPackedStruct(type), false);
- } else {
- return alloc_type_struct_from_field_types(fields, field_count, !!LLVMIsPackedStruct(type));
- }
- }
- break;
- case LLVMArrayTypeKind:
- {
-
- i64 count = LLVMGetArrayLength(type);
- Type *elem = lb_abi_to_odin_type(m, LLVMGetElementType(type), false, level+1);
- return alloc_type_array(elem, count);
- }
- break;
-
- case LLVMX86_MMXTypeKind:
- return t_vector_x86_mmx;
- case LLVMVectorTypeKind:
- {
- i64 count = LLVMGetVectorSize(type);
- Type *elem = lb_abi_to_odin_type(m, LLVMGetElementType(type), false, level+1);
- return alloc_type_simd_vector(count, elem);
- }
-
- }
- GB_PANIC("Unhandled type for lb_abi_to_odin_type -> %s", LLVMPrintTypeToString(type));
-
- return 0;
-}
-#endif
-
#define LB_ABI_INFO(name) lbFunctionType *name(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count, LLVMTypeRef return_type, bool return_is_defined, ProcCallingConvention calling_convention)
typedef LB_ABI_INFO(lbAbiInfoType);
@@ -432,7 +343,7 @@ namespace lbAbi386 {
if (sz == 0) {
args[i] = lb_arg_type_ignore(t);
} else {
- args[i] = lb_arg_type_indirect(t, lb_create_enum_attribute(c, "byval"));
+ args[i] = lb_arg_type_indirect(t, nullptr);
}
} else {
args[i] = non_struct(c, t, false);
@@ -452,7 +363,7 @@ namespace lbAbi386 {
case 4: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 32), nullptr, nullptr);
case 8: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 64), nullptr, nullptr);
}
- LLVMAttributeRef attr = lb_create_enum_attribute(c, "sret");
+ LLVMAttributeRef attr = lb_create_enum_attribute_with_type(c, "sret", return_type);
return lb_arg_type_indirect(return_type, attr);
}
return non_struct(c, return_type, true);
@@ -523,8 +434,14 @@ namespace lbAbiAmd64SysV {
switch (reg_class) {
case RegClass_SSEFs:
case RegClass_SSEFv:
+ case RegClass_SSEDs:
case RegClass_SSEDv:
return true;
+ case RegClass_SSEInt8:
+ case RegClass_SSEInt16:
+ case RegClass_SSEInt32:
+ case RegClass_SSEInt64:
+ return true;
}
return false;
}
@@ -610,9 +527,9 @@ namespace lbAbiAmd64SysV {
if (is_mem_cls(cls, attribute_kind)) {
LLVMAttributeRef attribute = nullptr;
if (attribute_kind == Amd64TypeAttribute_ByVal) {
- attribute = lb_create_enum_attribute(c, "byval");
+ return lb_arg_type_indirect_byval(c, type);
} else if (attribute_kind == Amd64TypeAttribute_StructRect) {
- attribute = lb_create_enum_attribute(c, "sret");
+ attribute = lb_create_enum_attribute_with_type(c, "sret", type);
}
return lb_arg_type_indirect(type, attribute);
} else {
@@ -642,30 +559,48 @@ namespace lbAbiAmd64SysV {
return reg_classes;
}
- void unify(Array<RegClass> *cls, i64 i, RegClass newv) {
- RegClass &oldv = (*cls)[i];
+ void unify(Array<RegClass> *cls, i64 i, RegClass const newv) {
+ RegClass const oldv = (*cls)[i];
if (oldv == newv) {
return;
- } else if (oldv == RegClass_NoClass) {
- oldv = newv;
+ }
+
+ RegClass to_write = newv;
+ if (oldv == RegClass_NoClass) {
+ to_write = newv;
} else if (newv == RegClass_NoClass) {
return;
} else if (oldv == RegClass_Memory || newv == RegClass_Memory) {
- return;
- } else if (oldv == RegClass_Int || newv == RegClass_Int) {
- return;
- } else if (oldv == RegClass_X87 || oldv == RegClass_X87Up || oldv == RegClass_ComplexX87 ||
- newv == RegClass_X87 || newv == RegClass_X87Up || newv == RegClass_ComplexX87) {
- oldv = RegClass_Memory;
- } else {
- oldv = newv;
+ to_write = RegClass_Memory;
+ } else if (oldv == RegClass_Int || newv == RegClass_Int) {
+ to_write = RegClass_Int;
+ } else if (oldv == RegClass_X87 || oldv == RegClass_X87Up || oldv == RegClass_ComplexX87) {
+ to_write = RegClass_Memory;
+ } else if (newv == RegClass_X87 || newv == RegClass_X87Up || newv == RegClass_ComplexX87) {
+ to_write = RegClass_Memory;
+ } else if (newv == RegClass_SSEUp) {
+ switch (oldv) {
+ case RegClass_SSEFv:
+ case RegClass_SSEFs:
+ case RegClass_SSEDv:
+ case RegClass_SSEDs:
+ case RegClass_SSEInt8:
+ case RegClass_SSEInt16:
+ case RegClass_SSEInt32:
+ case RegClass_SSEInt64:
+ return;
+ }
}
+
+ (*cls)[i] = to_write;
}
void fixup(LLVMTypeRef t, Array<RegClass> *cls) {
i64 i = 0;
i64 e = cls->count;
- if (e > 2 && (lb_is_type_kind(t, LLVMStructTypeKind) || lb_is_type_kind(t, LLVMArrayTypeKind))) {
+ if (e > 2 && (lb_is_type_kind(t, LLVMStructTypeKind) ||
+ lb_is_type_kind(t, LLVMArrayTypeKind) ||
+ lb_is_type_kind(t, LLVMVectorTypeKind))) {
RegClass &oldv = (*cls)[i];
if (is_sse(oldv)) {
for (i++; i < e; i++) {
@@ -709,8 +644,8 @@ namespace lbAbiAmd64SysV {
unsigned llvec_len(Array<RegClass> const &reg_classes, isize offset) {
unsigned len = 1;
- for (isize i = offset+1; i < reg_classes.count; i++) {
- if (reg_classes[offset] != RegClass_SSEFv && reg_classes[i] != RegClass_SSEUp) {
+ for (isize i = offset; i < reg_classes.count; i++) {
+ if (reg_classes[i] != RegClass_SSEUp) {
break;
}
len++;
@@ -721,7 +656,7 @@ namespace lbAbiAmd64SysV {
LLVMTypeRef llreg(LLVMContextRef c, Array<RegClass> const &reg_classes) {
auto types = array_make<LLVMTypeRef>(heap_allocator(), 0, reg_classes.count);
- for_array(i, reg_classes) {
+ for (isize i = 0; i < reg_classes.count; /**/) {
RegClass reg_class = reg_classes[i];
switch (reg_class) {
case RegClass_Int:
@@ -763,7 +698,7 @@ namespace lbAbiAmd64SysV {
break;
}
- unsigned vec_len = llvec_len(reg_classes, i);
+ unsigned vec_len = llvec_len(reg_classes, i+1);
LLVMTypeRef vec_type = LLVMVectorType(elem_type, vec_len * elems_per_word);
array_add(&types, vec_type);
i += vec_len;
@@ -779,9 +714,9 @@ namespace lbAbiAmd64SysV {
default:
GB_PANIC("Unhandled RegClass");
}
+ i += 1;
}
- GB_ASSERT(types.count != 0);
if (types.count == 1) {
return types[0];
}
@@ -893,7 +828,7 @@ namespace lbAbiAmd64SysV {
if (sz == 0) {
args[i] = lb_arg_type_ignore(t);
} else {
- args[i] = lb_arg_type_indirect(t, lb_create_enum_attribute(c, "byval"));
+ args[i] = lb_arg_type_indirect_byval(c, t);
}
} else {
args[i] = non_struct(c, t);
@@ -913,7 +848,7 @@ namespace lbAbiAmd64SysV {
case 4: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 32), nullptr, nullptr);
case 8: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 64), nullptr, nullptr);
}
- LLVMAttributeRef attr = lb_create_enum_attribute(c, "sret");
+ LLVMAttributeRef attr = lb_create_enum_attribute_with_type(c, "sret", return_type);
return lb_arg_type_indirect(return_type, attr);
} else if (build_context.metrics.os == TargetOs_windows && lb_is_type_kind(return_type, LLVMIntegerTypeKind) && lb_sizeof(return_type) == 16) {
return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 128), nullptr, nullptr);
@@ -1063,7 +998,7 @@ namespace lbAbiArm64 {
}
return lb_arg_type_direct(type, cast_type, nullptr, nullptr);
} else {
- LLVMAttributeRef attr = lb_create_enum_attribute(c, "sret");
+ LLVMAttributeRef attr = lb_create_enum_attribute_with_type(c, "sret", type);
return lb_arg_type_indirect(type, attr);
}
}
diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp
index 758f8e5d1..08c9445bd 100644
--- a/src/llvm_backend.cpp
+++ b/src/llvm_backend.cpp
@@ -1,8 +1,21 @@
+#define MULTITHREAD_OBJECT_GENERATION 1
+
+#ifndef USE_SEPARTE_MODULES
+#define USE_SEPARTE_MODULES build_context.use_separate_modules
+#endif
+
+#ifndef MULTITHREAD_OBJECT_GENERATION
+#define MULTITHREAD_OBJECT_GENERATION 0
+#endif
+
+
#include "llvm_backend.hpp"
#include "llvm_abi.cpp"
#include "llvm_backend_opt.cpp"
-gb_global lbAddr lb_global_type_info_data = {};
+gb_global ThreadPool lb_thread_pool = {};
+
+gb_global Entity *lb_global_type_info_data_entity = {};
gb_global lbAddr lb_global_type_info_member_types = {};
gb_global lbAddr lb_global_type_info_member_names = {};
gb_global lbAddr lb_global_type_info_member_offsets = {};
@@ -17,6 +30,12 @@ gb_global isize lb_global_type_info_member_usings_index = 0;
gb_global isize lb_global_type_info_member_tags_index = 0;
+lbValue lb_global_type_info_data_ptr(lbModule *m) {
+ lbValue v = lb_find_value_from_entity(m, lb_global_type_info_data_entity);
+ return v;
+}
+
+
struct lbLoopData {
lbAddr idx_addr;
lbValue idx;
@@ -74,6 +93,15 @@ bool lb_is_instr_terminating(LLVMValueRef instr) {
+lbModule *lb_pkg_module(lbGenerator *gen, AstPackage *pkg) {
+ auto *found = map_get(&gen->modules, hash_pointer(pkg));
+ if (found) {
+ return *found;
+ }
+ return &gen->default_module;
+}
+
+
lbAddr lb_addr(lbValue addr) {
lbAddr v = {lbAddr_Default, addr};
if (addr.type != nullptr && is_type_relative_pointer(type_deref(addr.type))) {
@@ -184,7 +212,7 @@ void lb_emit_bounds_check(lbProcedure *p, Token token, lbValue index, lbValue le
if (build_context.no_bounds_check) {
return;
}
- if ((p->module->state_flags & StateFlag_no_bounds_check) != 0) {
+ if ((p->state_flags & StateFlag_no_bounds_check) != 0) {
return;
}
@@ -209,7 +237,7 @@ void lb_emit_slice_bounds_check(lbProcedure *p, Token token, lbValue low, lbValu
if (build_context.no_bounds_check) {
return;
}
- if ((p->module->state_flags & StateFlag_no_bounds_check) != 0) {
+ if ((p->state_flags & StateFlag_no_bounds_check) != 0) {
return;
}
@@ -372,18 +400,7 @@ void lb_addr_store(lbProcedure *p, lbAddr addr, lbValue value) {
GB_ASSERT(is_type_polymorphic(e->type));
{
- lbValue *found = nullptr;
- if (p->module != e->code_gen_module) {
- gb_mutex_lock(&p->module->mutex);
- }
- GB_ASSERT(e->code_gen_module != nullptr);
- found = map_get(&e->code_gen_module->values, hash_entity(e));
- if (p->module != e->code_gen_module) {
- gb_mutex_unlock(&p->module->mutex);
- }
- GB_ASSERT_MSG(found != nullptr, "%.*s", LIT(e->token.string));
-
- lb_emit_call(p, *found, args);
+ lb_emit_call(p, lb_find_procedure_value_from_entity(p->module, e), args);
}
return;
@@ -501,6 +518,38 @@ void lb_const_store(lbValue ptr, lbValue value) {
}
+bool lb_is_type_proc_recursive(Type *t) {
+ for (;;) {
+ if (t == nullptr) {
+ return false;
+ }
+ switch (t->kind) {
+ case Type_Named:
+ t = t->Named.base;
+ break;
+ case Type_Pointer:
+ t = t->Pointer.elem;
+ break;
+ case Type_Array:
+ t = t->Array.elem;
+ break;
+ case Type_EnumeratedArray:
+ t = t->EnumeratedArray.elem;
+ break;
+ case Type_Slice:
+ t = t->Slice.elem;
+ break;
+ case Type_DynamicArray:
+ t = t->DynamicArray.elem;
+ break;
+ case Type_Proc:
+ return true;
+ default:
+ return false;
+ }
+ }
+}
+
void lb_emit_store(lbProcedure *p, lbValue ptr, lbValue value) {
GB_ASSERT(value.value != nullptr);
Type *a = type_deref(ptr.type);
@@ -513,7 +562,7 @@ void lb_emit_store(lbProcedure *p, lbValue ptr, lbValue value) {
GB_ASSERT_MSG(are_types_identical(ca, core_type(value.type)), "%s != %s", type_to_string(a), type_to_string(value.type));
}
- if (is_type_proc(a)) {
+ if (lb_is_type_proc_recursive(a)) {
// NOTE(bill, 2020-11-11): Because of certain LLVM rules, a procedure value may be
// stored as regular pointer with no procedure information
@@ -1110,7 +1159,7 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
case Basic_uintptr: return LLVMIntTypeInContext(ctx, 8*cast(unsigned)build_context.word_size);
- case Basic_rawptr: return LLVMPointerType(LLVMInt8Type(), 0);
+ case Basic_rawptr: return LLVMPointerType(LLVMInt8TypeInContext(ctx), 0);
case Basic_string:
{
char const *name = "..string";
@@ -1126,7 +1175,7 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
LLVMStructSetBody(type, fields, 2, false);
return type;
}
- case Basic_cstring: return LLVMPointerType(LLVMInt8Type(), 0);
+ case Basic_cstring: return LLVMPointerType(LLVMInt8TypeInContext(ctx), 0);
case Basic_any:
{
char const *name = "..any";
@@ -1452,21 +1501,35 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
}
if (param_index < param_count) {
params[param_index++] = lb_type(m, t_rawptr);
- // params[param_index++] = lb_type(m, t_context_ptr);
}
GB_ASSERT(param_index == param_count);
-
lbFunctionType *ft = lb_get_abi_info(m->ctx, params, param_count, ret, ret != nullptr, type->Proc.calling_convention);
+ {
+ for_array(j, ft->args) {
+ auto arg = ft->args[j];
+ GB_ASSERT_MSG(LLVMGetTypeContext(arg.type) == ft->ctx,
+ "\n\t%s %td/%td"
+ "\n\tArgTypeCtx: %p\n\tCurrentCtx: %p\n\tGlobalCtx: %p",
+ LLVMPrintTypeToString(arg.type),
+ j, ft->args.count,
+ LLVMGetTypeContext(arg.type), ft->ctx, LLVMGetGlobalContext());
+ }
+ GB_ASSERT_MSG(LLVMGetTypeContext(ft->ret.type) == ft->ctx,
+ "\n\t%s"
+ "\n\tRetTypeCtx: %p\n\tCurrentCtx: %p\n\tGlobalCtx: %p",
+ LLVMPrintTypeToString(ft->ret.type),
+ LLVMGetTypeContext(ft->ret.type), ft->ctx, LLVMGetGlobalContext());
+ }
+
map_set(&m->function_type_map, hash_type(type), ft);
LLVMTypeRef new_abi_fn_ptr_type = lb_function_type_to_llvm_ptr(ft, type->Proc.c_vararg);
LLVMTypeRef new_abi_fn_type = LLVMGetElementType(new_abi_fn_ptr_type);
- // LLVMTypeRef new_ret = LLVMGetReturnType(new_abi_fn_type);
- // LLVMTypeRef old_ret = LLVMGetReturnType(old_abi_fn_type);
- // unsigned new_count = LLVMCountParamTypes(new_abi_fn_type);
- // unsigned old_count = LLVMCountParamTypes(old_abi_fn_type);
- // GB_ASSERT_MSG(new_count == old_count, "%u %u, %s %s", new_count, old_count, LLVMPrintTypeToString(new_abi_fn_type), LLVMPrintTypeToString(old_abi_fn_type));
+ GB_ASSERT_MSG(LLVMGetTypeContext(new_abi_fn_type) == m->ctx,
+ "\n\tFuncTypeCtx: %p\n\tCurrentCtx: %p\n\tGlobalCtx: %p",
+ LLVMGetTypeContext(new_abi_fn_type), m->ctx, LLVMGetGlobalContext());
+
return new_abi_fn_ptr_type;
}
@@ -1823,13 +1886,32 @@ LLVMMetadataRef lb_debug_type_internal(lbModule *m, Type *type) {
case Type_Pointer:
return LLVMDIBuilderCreatePointerType(m->debug_builder, lb_debug_type(m, type->Pointer.elem), word_bits, word_bits, 0, nullptr, 0);
- case Type_Array:
+ case Type_Array: {
+ LLVMMetadataRef subscripts[1] = {};
+ subscripts[0] = LLVMDIBuilderGetOrCreateSubrange(m->debug_builder,
+ 0ll,
+ type->Array.count
+ );
+
return LLVMDIBuilderCreateArrayType(m->debug_builder,
- type->Array.count, 8*cast(unsigned)type_align_of(type), lb_debug_type(m, type->Array.elem), nullptr, 0);
+ 8*cast(uint64_t)type_size_of(type),
+ 8*cast(unsigned)type_align_of(type),
+ lb_debug_type(m, type->Array.elem),
+ subscripts, gb_count_of(subscripts));
+ }
case Type_EnumeratedArray: {
+ LLVMMetadataRef subscripts[1] = {};
+ subscripts[0] = LLVMDIBuilderGetOrCreateSubrange(m->debug_builder,
+ 0ll,
+ type->EnumeratedArray.count
+ );
+
LLVMMetadataRef array_type = LLVMDIBuilderCreateArrayType(m->debug_builder,
- type->EnumeratedArray.count, 8*cast(unsigned)type_align_of(type), lb_debug_type(m, type->EnumeratedArray.elem), nullptr, 0);
+ 8*cast(uint64_t)type_size_of(type),
+ 8*cast(unsigned)type_align_of(type),
+ lb_debug_type(m, type->EnumeratedArray.elem),
+ subscripts, gb_count_of(subscripts));
gbString name = type_to_string(type, temporary_allocator());
return LLVMDIBuilderCreateTypedef(m->debug_builder, array_type, name, gb_string_length(name), nullptr, 0, nullptr, cast(u32)(8*type_align_of(type)));
}
@@ -2187,7 +2269,7 @@ void lb_debug_complete_types(lbModule *m) {
8*type_size_of(bt)-word_bits + 1*word_bits,
LLVMDIFlagZero, lb_debug_type(m, t_int)
);
- elements[3] = LLVMDIBuilderCreateMemberType(
+ elements[2] = LLVMDIBuilderCreateMemberType(
m->debug_builder, record_scope,
".allocator", 12,
file, 0,
@@ -2463,20 +2545,41 @@ lbValue lb_emit_string(lbProcedure *p, lbValue str_elem, lbValue str_len) {
}
}
-LLVMAttributeRef lb_create_enum_attribute(LLVMContextRef ctx, char const *name, u64 value) {
+LLVMAttributeRef lb_create_enum_attribute_with_type(LLVMContextRef ctx, char const *name, LLVMTypeRef type) {
String s = make_string_c(name);
// NOTE(2021-02-25, bill); All this attributes require a type associated with them
// and the current LLVM C API does not expose this functionality yet.
// It is better to ignore the attributes for the time being
if (s == "byval") {
- return nullptr;
+ // return nullptr;
} else if (s == "byref") {
return nullptr;
} else if (s == "preallocated") {
return nullptr;
} else if (s == "sret") {
- return nullptr;
+ // return nullptr;
+ }
+
+ unsigned kind = LLVMGetEnumAttributeKindForName(name, s.len);
+ GB_ASSERT_MSG(kind != 0, "unknown attribute: %s", name);
+ return LLVMCreateEnumAttribute(ctx, kind, 0);
+}
+
+LLVMAttributeRef lb_create_enum_attribute(LLVMContextRef ctx, char const *name, u64 value) {
+ String s = make_string_c(name);
+
+ // NOTE(2021-02-25, bill); All this attributes require a type associated with them
+ // and the current LLVM C API does not expose this functionality yet.
+ // It is better to ignore the attributes for the time being
+ if (s == "byval") {
+ GB_PANIC("lb_create_enum_attribute_with_type should be used for %s", name);
+ } else if (s == "byref") {
+ GB_PANIC("lb_create_enum_attribute_with_type should be used for %s", name);
+ } else if (s == "preallocated") {
+ GB_PANIC("lb_create_enum_attribute_with_type should be used for %s", name);
+ } else if (s == "sret") {
+ GB_PANIC("lb_create_enum_attribute_with_type should be used for %s", name);
}
unsigned kind = LLVMGetEnumAttributeKindForName(name, s.len);
@@ -2514,10 +2617,18 @@ void lb_ensure_abi_function_type(lbModule *m, lbProcedure *p) {
GB_ASSERT(p->abi_function_type != nullptr);
}
-lbProcedure *lb_create_procedure(lbModule *m, Entity *entity) {
+lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool ignore_body) {
GB_ASSERT(entity != nullptr);
+ GB_ASSERT(entity->kind == Entity_Procedure);
+
+ String link_name = {};
- String link_name = lb_get_entity_name(m, entity);
+ if (ignore_body) {
+ lbModule *other_module = lb_pkg_module(m->gen, entity->pkg);
+ link_name = lb_get_entity_name(other_module, entity);
+ } else {
+ link_name = lb_get_entity_name(m, entity);
+ }
{
StringHashKey key = string_hash_string(link_name);
@@ -2588,6 +2699,10 @@ lbProcedure *lb_create_procedure(lbModule *m, Entity *entity) {
lb_add_attribute_to_proc(m, p->value, "noreturn");
}
+ if (pt->Proc.calling_convention == ProcCC_Naked) {
+ lb_add_attribute_to_proc(m, p->value, "naked");
+ }
+
switch (p->inlining) {
case ProcInlining_inline:
lb_add_attribute_to_proc(m, p->value, "alwaysinline");
@@ -2681,6 +2796,11 @@ lbProcedure *lb_create_procedure(lbModule *m, Entity *entity) {
}
}
+ if (ignore_body) {
+ p->body = nullptr;
+ LLVMSetLinkage(p->value, LLVMExternalLinkage);
+ }
+
if (m->debug_builder) { // Debug Information
Type *bt = base_type(p->type);
@@ -2851,72 +2971,6 @@ Type *struct_type_from_systemv_distribute_struct_fields(Type *abi_type) {
}
-lbValue lb_add_param(lbProcedure *p, Entity *e, Ast *expr, Type *abi_type, i32 index) {
- lbParamPasskind kind = lbParamPass_Value;
- lbValue v = lb_value_param(p, e, abi_type, index, &kind);
- array_add(&p->params, v);
-
- lbValue res = {};
-
- switch (kind) {
- case lbParamPass_Value: {
- lbAddr l = lb_add_local(p, e->type, e, false, index);
- lbValue x = v;
- if (abi_type == t_llvm_bool) {
- x = lb_emit_conv(p, x, t_bool);
- }
- lb_addr_store(p, l, x);
- return x;
- }
- case lbParamPass_Pointer:
- lb_add_entity(p->module, e, v);
- return lb_emit_load(p, v);
-
- case lbParamPass_Integer: {
- lbAddr l = lb_add_local(p, e->type, e, false, index);
- lbValue iptr = lb_emit_conv(p, l.addr, alloc_type_pointer(abi_type));
- lb_emit_store(p, iptr, v);
- return lb_addr_load(p, l);
- }
-
- case lbParamPass_ConstRef:
- lb_add_entity(p->module, e, v);
- return lb_emit_load(p, v);
-
- case lbParamPass_BitCast: {
- lbAddr l = lb_add_local(p, e->type, e, false, index);
- lbValue x = lb_emit_transmute(p, v, e->type);
- lb_addr_store(p, l, x);
- return x;
- }
- case lbParamPass_Tuple: {
- lbAddr l = lb_add_local(p, e->type, e, true, index);
- Type *st = struct_type_from_systemv_distribute_struct_fields(abi_type);
- lbValue ptr = lb_emit_transmute(p, l.addr, alloc_type_pointer(st));
- if (abi_type->Tuple.variables.count > 0) {
- array_pop(&p->params);
- }
- for_array(i, abi_type->Tuple.variables) {
- Type *t = abi_type->Tuple.variables[i]->type;
- GB_ASSERT(!is_type_tuple(t));
-
- lbParamPasskind elem_kind = lbParamPass_Value;
- lbValue elem = lb_value_param(p, nullptr, t, index+cast(i32)i, &elem_kind);
- array_add(&p->params, elem);
-
- lbValue dst = lb_emit_struct_ep(p, ptr, cast(i32)i);
- lb_emit_store(p, dst, elem);
- }
- return lb_addr_load(p, l);
- }
-
- }
-
-
- GB_PANIC("Unreachable");
- return {};
-}
-
void lb_start_block(lbProcedure *p, lbBlock *b) {
GB_ASSERT(b != nullptr);
if (!b->appended) {
@@ -3113,20 +3167,7 @@ void lb_begin_procedure_body(lbProcedure *p) {
}
}
- if (p->tags != 0) {
- u64 in = p->tags;
- u64 out = p->module->state_flags;
- if (in & ProcTag_bounds_check) {
- out |= StateFlag_bounds_check;
- out &= ~StateFlag_no_bounds_check;
- } else if (in & ProcTag_no_bounds_check) {
- out |= StateFlag_no_bounds_check;
- out &= ~StateFlag_bounds_check;
- }
- p->module->state_flags = out;
- }
-
- p->builder = LLVMCreateBuilder();
+ p->builder = LLVMCreateBuilderInContext(p->module->ctx);
p->decl_block = lb_create_block(p, "decls", true);
p->entry_block = lb_create_block(p, "entry", true);
@@ -3143,11 +3184,15 @@ void lb_begin_procedure_body(lbProcedure *p) {
lbValue return_ptr_value = {};
if (ft->ret.kind == lbArg_Indirect) {
// NOTE(bill): this must be parameter 0
+
+ String name = str_lit("agg.result");
+
Type *ptr_type = alloc_type_pointer(reduce_tuple_to_single_type(p->type->Proc.results));
- Entity *e = alloc_entity_param(nullptr, make_token_ident(str_lit("agg.result")), ptr_type, false, false);
+ Entity *e = alloc_entity_param(nullptr, make_token_ident(name), ptr_type, false, false);
e->flags |= EntityFlag_Sret | EntityFlag_NoAlias;
return_ptr_value.value = LLVMGetParam(p->value, 0);
+ LLVMSetValueName2(return_ptr_value.value, cast(char const *)name.text, name.len);
return_ptr_value.type = ptr_type;
p->return_ptr = lb_addr(return_ptr_value);
@@ -3273,9 +3318,11 @@ void lb_end_procedure_body(lbProcedure *p) {
LLVMBuildBr(p->builder, p->entry_block->block);
LLVMPositionBuilderAtEnd(p->builder, p->curr_block->block);
+ LLVMValueRef instr = nullptr;
+
// Make sure there is a "ret void" at the end of a procedure with no return type
if (p->type->Proc.result_count == 0) {
- LLVMValueRef instr = LLVMGetLastInstruction(p->curr_block->block);
+ instr = LLVMGetLastInstruction(p->curr_block->block);
if (!lb_is_instr_terminating(instr)) {
lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr);
LLVMBuildRetVoid(p->builder);
@@ -3287,7 +3334,7 @@ void lb_end_procedure_body(lbProcedure *p) {
// Make sure every block terminates, and if not, make it unreachable
for (block = first_block; block != nullptr; block = LLVMGetNextBasicBlock(block)) {
- LLVMValueRef instr = LLVMGetLastInstruction(block);
+ instr = LLVMGetLastInstruction(block);
if (instr == nullptr || !lb_is_instr_terminating(instr)) {
LLVMPositionBuilderAtEnd(p->builder, block);
LLVMBuildUnreachable(p->builder);
@@ -3295,7 +3342,7 @@ void lb_end_procedure_body(lbProcedure *p) {
}
p->curr_block = nullptr;
- p->module->state_flags = 0;
+ p->state_flags = 0;
}
void lb_end_procedure(lbProcedure *p) {
LLVMDisposeBuilder(p->builder);
@@ -3362,6 +3409,20 @@ void lb_emit_if(lbProcedure *p, lbValue cond, lbBlock *true_block, lbBlock *fals
LLVMBuildCondBr(p->builder, cv, true_block->block, false_block->block);
}
+bool lb_is_expr_untyped_const(Ast *expr) {
+ auto const &tv = type_and_value_of_expr(expr);
+ if (is_type_untyped(tv.type)) {
+ return tv.value.kind != ExactValue_Invalid;
+ }
+ return false;
+}
+
+lbValue lb_expr_untyped_const_to_typed(lbModule *m, Ast *expr, Type *t) {
+ GB_ASSERT(is_type_typed(t));
+ auto const &tv = type_and_value_of_expr(expr);
+ return lb_const_value(m, t, tv.value);
+}
+
lbValue lb_build_cond(lbProcedure *p, Ast *cond, lbBlock *true_block, lbBlock *false_block) {
GB_ASSERT(cond != nullptr);
GB_ASSERT(true_block != nullptr);
@@ -3393,8 +3454,13 @@ lbValue lb_build_cond(lbProcedure *p, Ast *cond, lbBlock *true_block, lbBlock *f
case_end;
}
- lbValue v = lb_build_expr(p, cond);
- // v = lb_emit_conv(p, v, t_bool);
+ lbValue v = {};
+ if (lb_is_expr_untyped_const(cond)) {
+ v = lb_expr_untyped_const_to_typed(p->module, cond, t_llvm_bool);
+ } else {
+ v = lb_build_expr(p, cond);
+ }
+
v = lb_emit_conv(p, v, t_llvm_bool);
lb_emit_if(p, v, true_block, false_block);
@@ -3419,6 +3485,21 @@ lbAddr lb_add_local(lbProcedure *p, Type *type, Entity *e, bool zero_init, i32 p
LLVMSetAlignment(ptr, alignment);
LLVMPositionBuilderAtEnd(p->builder, p->curr_block->block);
+
+
+ if (!zero_init) {
+ // If there is any padding of any kind, just zero init regardless of zero_init parameter
+ LLVMTypeKind kind = LLVMGetTypeKind(llvm_type);
+ if (kind == LLVMStructTypeKind) {
+ i64 sz = type_size_of(type);
+ if (type_size_of_struct_pretend_is_packed(type) != sz) {
+ zero_init = true;
+ }
+ } else if (kind == LLVMArrayTypeKind) {
+ zero_init = true;
+ }
+ }
+
if (zero_init) {
LLVMTypeKind kind = LLVMGetTypeKind(llvm_type);
@@ -3544,16 +3625,14 @@ void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd) {
Ast *ident = vd->names[i];
GB_ASSERT(ident->kind == Ast_Ident);
Entity *e = entity_of_node(ident);
- if (e == nullptr) {
- continue;
- }
+ GB_ASSERT(e != nullptr);
if (e->kind != Entity_TypeName) {
continue;
}
bool polymorphic_struct = false;
if (e->type != nullptr && e->kind == Entity_TypeName) {
- Type *bt = base_type(e->type);
+ Type *bt = base_type(e->type);
if (bt->kind == Type_Struct) {
polymorphic_struct = bt->Struct.is_polymorphic;
}
@@ -3575,12 +3654,16 @@ void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd) {
Ast *ident = vd->names[i];
GB_ASSERT(ident->kind == Ast_Ident);
Entity *e = entity_of_node(ident);
- if (e == nullptr) {
- continue;
- }
+ GB_ASSERT(e != nullptr);
if (e->kind != Entity_Procedure) {
continue;
}
+ GB_ASSERT (vd->values[i] != nullptr);
+
+ Ast *value = unparen_expr(vd->values[i]);
+ if (value->kind != Ast_ProcLit) {
+ continue; // It's an alias
+ }
CheckerInfo *info = p->module->info;
DeclInfo *decl = decl_info_of_entity(e);
@@ -3948,63 +4031,105 @@ void lb_build_range_string(lbProcedure *p, lbValue expr, Type *val_type,
}
-void lb_build_range_interval(lbProcedure *p, AstBinaryExpr *node, Type *val_type,
- lbValue *val_, lbValue *idx_, lbBlock **loop_, lbBlock **done_) {
+void lb_build_range_interval(lbProcedure *p, AstBinaryExpr *node,
+ AstRangeStmt *rs, Scope *scope) {
+ bool ADD_EXTRA_WRAPPING_CHECK = true;
+
lbModule *m = p->module;
- // TODO(bill): How should the behaviour work for lower and upper bounds checking for iteration?
- // If 'lower' is changed, should 'val' do so or is that not typical behaviour?
+ lb_open_scope(p, scope);
- lbValue lower = lb_build_expr(p, node->left);
- lbValue upper = {};
+ Type *val0_type = nullptr;
+ Type *val1_type = nullptr;
+ if (rs->vals.count > 0 && rs->vals[0] != nullptr && !is_blank_ident(rs->vals[0])) {
+ val0_type = type_of_expr(rs->vals[0]);
+ }
+ if (rs->vals.count > 1 && rs->vals[1] != nullptr && !is_blank_ident(rs->vals[1])) {
+ val1_type = type_of_expr(rs->vals[1]);
+ }
- lbValue val = {};
- lbValue idx = {};
- lbBlock *loop = nullptr;
- lbBlock *done = nullptr;
- lbBlock *body = nullptr;
+ if (val0_type != nullptr) {
+ Entity *e = entity_of_node(rs->vals[0]);
+ lb_add_local(p, e->type, e, true);
+ }
+ if (val1_type != nullptr) {
+ Entity *e = entity_of_node(rs->vals[1]);
+ lb_add_local(p, e->type, e, true);
+ }
- if (val_type == nullptr) {
- val_type = lower.type;
+ TokenKind op = Token_Lt;
+ switch (node->op.kind) {
+ case Token_Ellipsis: op = Token_LtEq; break;
+ case Token_RangeFull: op = Token_LtEq; break;
+ case Token_RangeHalf: op = Token_Lt; break;
+ default: GB_PANIC("Invalid interval operator"); break;
}
- lbAddr value = lb_add_local_generated(p, val_type, false);
+
+ lbValue lower = lb_build_expr(p, node->left);
+ lbValue upper = {}; // initialized each time in the loop
+
+ lbAddr value = lb_add_local_generated(p, val0_type ? val0_type : lower.type, false);
lb_addr_store(p, value, lower);
lbAddr index = lb_add_local_generated(p, t_int, false);
lb_addr_store(p, index, lb_const_int(m, t_int, 0));
- loop = lb_create_block(p, "for.interval.loop");
+ lbBlock *loop = lb_create_block(p, "for.interval.loop");
+ lbBlock *body = lb_create_block(p, "for.interval.body");
+ lbBlock *done = lb_create_block(p, "for.interval.done");
+
lb_emit_jump(p, loop);
lb_start_block(p, loop);
- body = lb_create_block(p, "for.interval.body");
- done = lb_create_block(p, "for.interval.done");
-
-
- TokenKind op = Token_Lt;
- switch (node->op.kind) {
- case Token_Ellipsis: op = Token_LtEq; break;
- case Token_RangeHalf: op = Token_Lt; break;
- default: GB_PANIC("Invalid interval operator"); break;
- }
-
upper = lb_build_expr(p, node->right);
-
lbValue curr_value = lb_addr_load(p, value);
lbValue cond = lb_emit_comp(p, op, curr_value, upper);
lb_emit_if(p, cond, body, done);
lb_start_block(p, body);
- val = lb_addr_load(p, value);
- idx = lb_addr_load(p, index);
+ lbValue val = lb_addr_load(p, value);
+ lbValue idx = lb_addr_load(p, index);
+ if (val0_type) lb_store_range_stmt_val(p, rs->vals[0], val);
+ if (val1_type) lb_store_range_stmt_val(p, rs->vals[1], idx);
- lb_emit_increment(p, value.addr);
- lb_emit_increment(p, index.addr);
+ {
+ // NOTE: this check block will most likely be optimized out, and is here
+ // to make this code easier to read
+ lbBlock *check = nullptr;
+ lbBlock *post = lb_create_block(p, "for.interval.post");
- if (val_) *val_ = val;
- if (idx_) *idx_ = idx;
- if (loop_) *loop_ = loop;
- if (done_) *done_ = done;
+ lbBlock *continue_block = post;
+
+ if (ADD_EXTRA_WRAPPING_CHECK &&
+ op == Token_LtEq) {
+ check = lb_create_block(p, "for.interval.check");
+ continue_block = check;
+ }
+
+ lb_push_target_list(p, rs->label, done, continue_block, nullptr);
+
+ lb_build_stmt(p, rs->body);
+
+ lb_close_scope(p, lbDeferExit_Default, nullptr);
+ lb_pop_target_list(p);
+
+ if (check != nullptr) {
+ lb_emit_jump(p, check);
+ lb_start_block(p, check);
+
+ lbValue check_cond = lb_emit_comp(p, Token_NotEq, curr_value, upper);
+ lb_emit_if(p, check_cond, post, done);
+ } else {
+ lb_emit_jump(p, post);
+ }
+
+ lb_start_block(p, post);
+ lb_emit_increment(p, value.addr);
+ lb_emit_increment(p, index.addr);
+ lb_emit_jump(p, loop);
+ }
+
+ lb_start_block(p, done);
}
void lb_build_range_enum(lbProcedure *p, Type *enum_type, Type *val_type, lbValue *val_, lbValue *idx_, lbBlock **loop_, lbBlock **done_) {
@@ -4159,6 +4284,11 @@ void lb_build_range_stmt_struct_soa(lbProcedure *p, AstRangeStmt *rs, Scope *sco
void lb_build_range_stmt(lbProcedure *p, AstRangeStmt *rs, Scope *scope) {
Ast *expr = unparen_expr(rs->expr);
+ if (is_ast_range(expr)) {
+ lb_build_range_interval(p, &expr->BinaryExpr, rs, scope);
+ return;
+ }
+
Type *expr_type = type_of_expr(expr);
if (expr_type != nullptr) {
Type *et = base_type(type_deref(expr_type));
@@ -4195,10 +4325,7 @@ void lb_build_range_stmt(lbProcedure *p, AstRangeStmt *rs, Scope *scope) {
bool is_map = false;
TypeAndValue tav = type_and_value_of_expr(expr);
-
- if (is_ast_range(expr)) {
- lb_build_range_interval(p, &expr->BinaryExpr, val0_type, &val, &key, &loop, &done);
- } else if (tav.mode == Addressing_Type) {
+ if (tav.mode == Addressing_Type) {
lb_build_range_enum(p, type_deref(tav.type), val0_type, &val, &key, &loop, &done);
} else {
Type *expr_type = type_of_expr(expr);
@@ -4347,7 +4474,7 @@ void lb_build_inline_range_stmt(lbProcedure *p, AstUnrollRangeStmt *rs, Scope *s
ExactValue start = start_expr->tav.value;
ExactValue end = end_expr->tav.value;
- if (op == Token_Ellipsis) { // .. [start, end]
+ if (op != Token_RangeHalf) { // .. [start, end] (or ..=)
ExactValue index = exact_value_i64(0);
for (ExactValue val = start;
compare_exact_values(Token_LtEq, val, end);
@@ -4358,7 +4485,7 @@ void lb_build_inline_range_stmt(lbProcedure *p, AstUnrollRangeStmt *rs, Scope *s
lb_build_stmt(p, rs->body);
}
- } else if (op == Token_RangeHalf) { // ..< [start, end)
+ } else { // ..< [start, end)
ExactValue index = exact_value_i64(0);
for (ExactValue val = start;
compare_exact_values(Token_Lt, val, end);
@@ -4525,6 +4652,7 @@ void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope *scope) {
TokenKind op = Token_Invalid;
switch (ie->op.kind) {
case Token_Ellipsis: op = Token_LtEq; break;
+ case Token_RangeFull: op = Token_LtEq; break;
case Token_RangeHalf: op = Token_Lt; break;
default: GB_PANIC("Invalid interval operator"); break;
}
@@ -4763,6 +4891,9 @@ lbValue lb_emit_logical_binary_expr(lbProcedure *p, TokenKind op, Ast *left, Ast
if (done->preds.count == 0) {
lb_start_block(p, rhs);
+ if (lb_is_expr_untyped_const(right)) {
+ return lb_expr_untyped_const_to_typed(m, right, type);
+ }
return lb_build_expr(p, right);
}
@@ -4777,7 +4908,12 @@ lbValue lb_emit_logical_binary_expr(lbProcedure *p, TokenKind op, Ast *left, Ast
}
lb_start_block(p, rhs);
- lbValue edge = lb_build_expr(p, right);
+ lbValue edge = {};
+ if (lb_is_expr_untyped_const(right)) {
+ edge = lb_expr_untyped_const_to_typed(m, right, type);
+ } else {
+ edge = lb_build_expr(p, right);
+ }
incoming_values[done->preds.count] = edge.value;
incoming_blocks[done->preds.count] = p->curr_block->block;
@@ -4815,12 +4951,12 @@ void lb_build_stmt(lbProcedure *p, Ast *node) {
LLVMSetCurrentDebugLocation2(p->builder, prev_debug_location);
});
- u64 prev_state_flags = p->module->state_flags;
- defer (p->module->state_flags = prev_state_flags);
+ u16 prev_state_flags = p->state_flags;
+ defer (p->state_flags = prev_state_flags);
if (node->state_flags != 0) {
- u64 in = node->state_flags;
- u64 out = p->module->state_flags;
+ u16 in = node->state_flags;
+ u16 out = p->state_flags;
if (in & StateFlag_bounds_check) {
out |= StateFlag_bounds_check;
@@ -4830,7 +4966,7 @@ void lb_build_stmt(lbProcedure *p, Ast *node) {
out &= ~StateFlag_bounds_check;
}
- p->module->state_flags = out;
+ p->state_flags = out;
}
switch (node->kind) {
@@ -5412,6 +5548,7 @@ lbValue lb_emit_min(lbProcedure *p, Type *t, lbValue x, lbValue y) {
args[0] = x;
args[1] = y;
switch (sz) {
+ case 16: return lb_emit_runtime_call(p, "min_f16", args);
case 32: return lb_emit_runtime_call(p, "min_f32", args);
case 64: return lb_emit_runtime_call(p, "min_f64", args);
}
@@ -5429,6 +5566,7 @@ lbValue lb_emit_max(lbProcedure *p, Type *t, lbValue x, lbValue y) {
args[0] = x;
args[1] = y;
switch (sz) {
+ case 16: return lb_emit_runtime_call(p, "max_f16", args);
case 32: return lb_emit_runtime_call(p, "max_f32", args);
case 64: return lb_emit_runtime_call(p, "max_f64", args);
}
@@ -5462,9 +5600,10 @@ LLVMValueRef lb_find_or_add_entity_string_ptr(lbModule *m, String const &str) {
isize max_len = 7+8+1;
char *name = gb_alloc_array(permanent_allocator(), char, max_len);
- isize len = gb_snprintf(name, max_len, "csbs$%x", m->global_array_index);
+
+ u32 id = cast(u32)gb_atomic32_fetch_add(&m->gen->global_array_index, 1);
+ isize len = gb_snprintf(name, max_len, "csbs$%x", id);
len -= 1;
- m->global_array_index++;
LLVMValueRef global_data = LLVMAddGlobal(m->mod, LLVMTypeOf(data), name);
LLVMSetInitializer(global_data, data);
@@ -5504,9 +5643,9 @@ lbValue lb_find_or_add_entity_string_byte_slice(lbModule *m, String const &str)
{
isize max_len = 7+8+1;
name = gb_alloc_array(permanent_allocator(), char, max_len);
- isize len = gb_snprintf(name, max_len, "csbs$%x", m->global_array_index);
+ u32 id = cast(u32)gb_atomic32_fetch_add(&m->gen->global_array_index, 1);
+ isize len = gb_snprintf(name, max_len, "csbs$%x", id);
len -= 1;
- m->global_array_index++;
}
LLVMValueRef global_data = LLVMAddGlobal(m->mod, LLVMTypeOf(data), name);
LLVMSetInitializer(global_data, data);
@@ -5628,7 +5767,7 @@ lbValue lb_type_info(lbModule *m, Type *type) {
};
lbValue value = {};
- value.value = LLVMConstGEP(lb_global_type_info_data.addr.value, indices, gb_count_of(indices));
+ value.value = LLVMConstGEP(lb_global_type_info_data_ptr(m).value, indices, gb_count_of(indices));
value.type = t_type_info_ptr;
return value;
}
@@ -5661,6 +5800,112 @@ LLVMValueRef lb_build_constant_array_values(lbModule *m, Type *type, Type *elem_
return llvm_const_array(lb_type(m, elem_type), values, cast(unsigned int)count);
}
+lbValue lb_find_procedure_value_from_entity(lbModule *m, Entity *e) {
+ GB_ASSERT(is_type_proc(e->type));
+ e = strip_entity_wrapping(e);
+ GB_ASSERT(e != nullptr);
+ auto *found = map_get(&m->values, hash_entity(e));
+ if (found) {
+ return *found;
+ }
+
+ bool ignore_body = false;
+
+ if (USE_SEPARTE_MODULES) {
+ lbModule *other_module = lb_pkg_module(m->gen, e->pkg);
+ ignore_body = other_module != m;
+ }
+
+ lbProcedure *missing_proc = lb_create_procedure(m, e, ignore_body);
+ found = map_get(&m->values, hash_entity(e));
+ if (found) {
+ return *found;
+ }
+
+ GB_PANIC("Error in: %s, missing procedure %.*s\n", token_pos_to_string(e->token.pos), LIT(e->token.string));
+ return {};
+}
+
+lbValue lb_find_value_from_entity(lbModule *m, Entity *e) {
+ e = strip_entity_wrapping(e);
+ GB_ASSERT(e != nullptr);
+
+ GB_ASSERT(e->token.string != "_");
+
+ if (e->kind == Entity_Procedure) {
+ return lb_find_procedure_value_from_entity(m, e);
+ }
+
+ auto *found = map_get(&m->values, hash_entity(e));
+ if (found) {
+ return *found;
+ }
+
+ if (USE_SEPARTE_MODULES) {
+ lbModule *other_module = lb_pkg_module(m->gen, e->pkg);
+
+ // TODO(bill): correct this logic
+ bool is_external = other_module != m;
+ if (!is_external) {
+ if (e->code_gen_module != nullptr) {
+ other_module = e->code_gen_module;
+ } else {
+ other_module = nullptr;
+ }
+ is_external = other_module != m;
+ }
+
+ if (is_external) {
+ String name = lb_get_entity_name(other_module, e);
+
+ lbValue g = {};
+ g.value = LLVMAddGlobal(m->mod, lb_type(m, e->type), alloc_cstring(permanent_allocator(), name));
+ g.type = alloc_type_pointer(e->type);
+ lb_add_entity(m, e, g);
+ lb_add_member(m, name, g);
+
+ LLVMSetLinkage(g.value, LLVMExternalLinkage);
+
+ // if (other_module != nullptr) {
+ // lbValue *other_found = string_map_get(&other_module->members, name);
+ // if (other_found) {
+ // lbValue other_g = *other_found;
+ // }
+ // }
+
+ // LLVMSetLinkage(other_g.value, LLVMExternalLinkage);
+
+ if (e->Variable.thread_local_model != "") {
+ LLVMSetThreadLocal(g.value, true);
+
+ String m = e->Variable.thread_local_model;
+ LLVMThreadLocalMode mode = LLVMGeneralDynamicTLSModel;
+ if (m == "default") {
+ mode = LLVMGeneralDynamicTLSModel;
+ } else if (m == "localdynamic") {
+ mode = LLVMLocalDynamicTLSModel;
+ } else if (m == "initialexec") {
+ mode = LLVMInitialExecTLSModel;
+ } else if (m == "localexec") {
+ mode = LLVMLocalExecTLSModel;
+ } else {
+ GB_PANIC("Unhandled thread local mode %.*s", LIT(m));
+ }
+ LLVMSetThreadLocalMode(g.value, mode);
+ }
+
+
+ return g;
+ }
+ }
+
+ GB_PANIC("\n\tError in: %s, missing value %.*s\n", token_pos_to_string(e->token.pos), LIT(e->token.string));
+ return {};
+}
+
+
+
+
lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_local) {
LLVMContextRef ctx = m->ctx;
@@ -5687,14 +5932,7 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc
return lb_generate_anonymous_proc_lit(m, str_lit("_proclit"), expr);
}
Entity *e = entity_from_expr(expr);
- e = strip_entity_wrapping(e);
- GB_ASSERT(e != nullptr);
- auto *found = map_get(&m->values, hash_entity(e));
- if (found) {
- return *found;
- }
-
- GB_PANIC("Error in: %s, missing procedure %.*s\n", token_pos_to_string(e->token.pos), LIT(e->token.string));
+ return lb_find_procedure_value_from_entity(m, e);
}
bool is_local = allow_local && m->curr_procedure != nullptr;
@@ -5743,8 +5981,8 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc
} else {
isize max_len = 7+8+1;
char *str = gb_alloc_array(permanent_allocator(), char, max_len);
- isize len = gb_snprintf(str, max_len, "csba$%x", m->global_array_index);
- m->global_array_index++;
+ u32 id = cast(u32)gb_atomic32_fetch_add(&m->gen->global_array_index, 1);
+ isize len = gb_snprintf(str, max_len, "csba$%x", id);
String name = make_string(cast(u8 *)str, len-1);
@@ -5797,7 +6035,7 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc
res.value = llvm_const_array(et, elems, cast(unsigned)count);
return res;
}
- GB_PANIC("HERE!\n");
+ GB_PANIC("This should not have happened!\n");
LLVMValueRef data = LLVMConstStringInContext(ctx,
cast(char const *)value.value_string.text,
@@ -6799,15 +7037,29 @@ lbValue lb_build_binary_expr(lbProcedure *p, Ast *expr) {
case Token_And:
case Token_Or:
case Token_Xor:
- case Token_AndNot:
- case Token_Shl:
- case Token_Shr: {
+ case Token_AndNot: {
Type *type = default_type(tv.type);
lbValue left = lb_build_expr(p, be->left);
lbValue right = lb_build_expr(p, be->right);
return lb_emit_arith(p, be->op.kind, left, right, type);
}
+ case Token_Shl:
+ case Token_Shr: {
+ lbValue left, right;
+ Type *type = default_type(tv.type);
+ left = lb_build_expr(p, be->left);
+
+ if (lb_is_expr_untyped_const(be->right)) {
+ // NOTE(bill): RHS shift operands can still be untyped
+ // Just bypass the standard lb_build_expr
+ right = lb_expr_untyped_const_to_typed(p->module, be->right, type);
+ } else {
+ right = lb_build_expr(p, be->right);
+ }
+ return lb_emit_arith(p, be->op.kind, left, right, type);
+ }
+
case Token_CmpEq:
case Token_NotEq:
case Token_Lt:
@@ -7598,10 +7850,13 @@ lbContextData *lb_push_context_onto_stack_from_implicit_parameter(lbProcedure *p
GB_ASSERT(pt->kind == Type_Proc);
GB_ASSERT(pt->Proc.calling_convention == ProcCC_Odin);
- Entity *e = alloc_entity_param(nullptr, make_token_ident(str_lit("__.context_ptr")), t_context_ptr, false, false);
+ String name = str_lit("__.context_ptr");
+
+ Entity *e = alloc_entity_param(nullptr, make_token_ident(name), t_context_ptr, false, false);
e->flags |= EntityFlag_NoAlias;
LLVMValueRef context_ptr = LLVMGetParam(p->value, LLVMCountParams(p->value)-1);
+ LLVMSetValueName2(context_ptr, cast(char const *)name.text, name.len);
context_ptr = LLVMBuildPointerCast(p->builder, context_ptr, lb_type(p->module, e->type), "");
lbValue param = {context_ptr, e->type};
@@ -8084,6 +8339,8 @@ Array<lbValue> lb_value_to_array(lbProcedure *p, lbValue value) {
lbValue lb_emit_call_internal(lbProcedure *p, lbValue value, lbValue return_ptr, Array<lbValue> const &processed_args, Type *abi_rt, lbAddr context_ptr, ProcInlining inlining) {
+ GB_ASSERT(p->module->ctx == LLVMGetTypeContext(LLVMTypeOf(value.value)));
+
unsigned arg_count = cast(unsigned)processed_args.count;
if (return_ptr.value != nullptr) {
arg_count += 1;
@@ -8158,31 +8415,18 @@ lbValue lb_emit_call_internal(lbProcedure *p, lbValue value, lbValue return_ptr,
}
}
-lbValue lb_emit_runtime_call(lbProcedure *p, char const *c_name, Array<lbValue> const &args) {
- // LLVMMetadataRef curr_loc = LLVMGetCurrentDebugLocation2(p->builder);
- // LLVMSetCurrentDebugLocation2(p->builder, nullptr);
- // defer (if (curr_loc) {
- // LLVMSetCurrentDebugLocation2(p->builder, curr_loc);
- // });
-
- String name = make_string_c(c_name);
-
- AstPackage *pkg = p->module->info->runtime_package;
+lbValue lb_lookup_runtime_procedure(lbModule *m, String const &name) {
+ AstPackage *pkg = m->info->runtime_package;
Entity *e = scope_lookup_current(pkg->scope, name);
+ return lb_find_procedure_value_from_entity(m, e);
+}
- lbValue *found = nullptr;
- if (p->module != e->code_gen_module) {
- gb_mutex_lock(&p->module->mutex);
- }
- GB_ASSERT(e->code_gen_module != nullptr);
- found = map_get(&e->code_gen_module->values, hash_entity(e));
- if (p->module != e->code_gen_module) {
- gb_mutex_unlock(&p->module->mutex);
- }
- GB_ASSERT_MSG(found != nullptr, "%s", c_name);
- return lb_emit_call(p, *found, args);
+lbValue lb_emit_runtime_call(lbProcedure *p, char const *c_name, Array<lbValue> const &args) {
+ String name = make_string_c(c_name);
+ lbValue proc = lb_lookup_runtime_procedure(p->module, name);
+ return lb_emit_call(p, proc, args);
}
lbValue lb_emit_call(lbProcedure *p, lbValue value, Array<lbValue> const &args, ProcInlining inlining, bool use_return_ptr_hint) {
@@ -8321,9 +8565,7 @@ lbValue lb_emit_call(lbProcedure *p, lbValue value, Array<lbValue> const &args,
if (e != nullptr && entity_has_deferred_procedure(e)) {
DeferredProcedureKind kind = e->Procedure.deferred_procedure.kind;
Entity *deferred_entity = e->Procedure.deferred_procedure.entity;
- lbValue *deferred_found = map_get(&p->module->values, hash_entity(deferred_entity));
- GB_ASSERT(deferred_found != nullptr);
- lbValue deferred = *deferred_found;
+ lbValue deferred = lb_find_procedure_value_from_entity(p->module, deferred_entity);
auto in_args = args;
@@ -8645,7 +8887,7 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
switch (id) {
case BuiltinProc_DIRECTIVE: {
ast_node(bd, BasicDirective, ce->proc);
- String name = bd->name;
+ String name = bd->name.string;
GB_ASSERT(name == "location");
String procedure = p->entity->token.string;
TokenPos pos = ast_token(ce->proc).pos;
@@ -9134,9 +9376,11 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
case BuiltinProc_overflow_sub:
case BuiltinProc_overflow_mul:
{
- Type *tuple = tv.type;
- GB_ASSERT(is_type_tuple(tuple));
- Type *type = tuple->Tuple.variables[0]->type;
+ Type *main_type = tv.type;
+ Type *type = main_type;
+ if (is_type_tuple(main_type)) {
+ type = main_type->Tuple.variables[0]->type;
+ }
lbValue x = lb_build_expr(p, ce->args[0]);
lbValue y = lb_build_expr(p, ce->args[1]);
@@ -9166,21 +9410,85 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
args[0] = x.value;
args[1] = y.value;
- Type *res_type = nullptr;
- {
+ lbValue res = {};
+ res.value = LLVMBuildCall(p->builder, ip, args, gb_count_of(args), "");
+
+ if (is_type_tuple(main_type)) {
+ Type *res_type = nullptr;
gbAllocator a = permanent_allocator();
res_type = alloc_type_tuple();
array_init(&res_type->Tuple.variables, a, 2);
res_type->Tuple.variables[0] = alloc_entity_field(nullptr, blank_token, type, false, 0);
res_type->Tuple.variables[1] = alloc_entity_field(nullptr, blank_token, t_llvm_bool, false, 1);
+
+ res.type = res_type;
+ } else {
+ res.value = LLVMBuildExtractValue(p->builder, res.value, 0, "");
+ res.type = type;
}
+ return res;
+ }
+
+ case BuiltinProc_sqrt:
+ {
+ Type *type = tv.type;
+
+ lbValue x = lb_build_expr(p, ce->args[0]);
+ x = lb_emit_conv(p, x, type);
+
+ char const *name = "llvm.sqrt";
+ LLVMTypeRef types[1] = {lb_type(p->module, type)};
+ unsigned id = LLVMLookupIntrinsicID(name, gb_strlen(name));
+ GB_ASSERT_MSG(id != 0, "Unable to find %s.%s", name, LLVMPrintTypeToString(types[0]));
+ LLVMValueRef ip = LLVMGetIntrinsicDeclaration(p->module->mod, id, types, gb_count_of(types));
+
+ LLVMValueRef args[1] = {};
+ args[0] = x.value;
lbValue res = {};
res.value = LLVMBuildCall(p->builder, ip, args, gb_count_of(args), "");
- res.type = res_type;
+ res.type = type;
return res;
}
+ case BuiltinProc_mem_copy:
+ case BuiltinProc_mem_copy_non_overlapping:
+ {
+
+
+ lbValue dst = lb_build_expr(p, ce->args[0]);
+ lbValue src = lb_build_expr(p, ce->args[1]);
+ lbValue len = lb_build_expr(p, ce->args[2]);
+ dst = lb_emit_conv(p, dst, t_rawptr);
+ src = lb_emit_conv(p, src, t_rawptr);
+ len = lb_emit_conv(p, len, t_int);
+
+ char const *name = nullptr;
+ switch (id) {
+ case BuiltinProc_mem_copy: name = "llvm.memmove"; break;
+ case BuiltinProc_mem_copy_non_overlapping: name = "llvm.memcpy"; break;
+ }
+
+ LLVMTypeRef types[3] = {
+ lb_type(p->module, t_rawptr),
+ lb_type(p->module, t_rawptr),
+ lb_type(p->module, t_int)
+ };
+ unsigned id = LLVMLookupIntrinsicID(name, gb_strlen(name));
+ GB_ASSERT_MSG(id != 0, "Unable to find %s.%s.%s.%s", name, LLVMPrintTypeToString(types[0]), LLVMPrintTypeToString(types[1]), LLVMPrintTypeToString(types[2]));
+ LLVMValueRef ip = LLVMGetIntrinsicDeclaration(p->module->mod, id, types, gb_count_of(types));
+
+ LLVMValueRef args[4] = {};
+ args[0] = dst.value;
+ args[1] = src.value;
+ args[2] = len.value;
+ args[3] = LLVMConstInt(LLVMInt1TypeInContext(p->module->ctx), 0, false); // is_volatile parameter
+
+ LLVMBuildCall(p->builder, ip, args, gb_count_of(args), "");
+
+ return {};
+ }
+
case BuiltinProc_atomic_fence:
LLVMBuildFence(p->builder, LLVMAtomicOrderingSequentiallyConsistent, false, "");
@@ -9390,16 +9698,22 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
single_threaded
);
- GB_ASSERT(tv.type->kind == Type_Tuple);
- Type *fix_typed = alloc_type_tuple();
- array_init(&fix_typed->Tuple.variables, permanent_allocator(), 2);
- fix_typed->Tuple.variables[0] = tv.type->Tuple.variables[0];
- fix_typed->Tuple.variables[1] = alloc_entity_field(nullptr, blank_token, t_llvm_bool, false, 1);
+ if (tv.type->kind == Type_Tuple) {
+ Type *fix_typed = alloc_type_tuple();
+ array_init(&fix_typed->Tuple.variables, permanent_allocator(), 2);
+ fix_typed->Tuple.variables[0] = tv.type->Tuple.variables[0];
+ fix_typed->Tuple.variables[1] = alloc_entity_field(nullptr, blank_token, t_llvm_bool, false, 1);
- lbValue res = {};
- res.value = value;
- res.type = fix_typed;
- return res;
+ lbValue res = {};
+ res.value = value;
+ res.type = fix_typed;
+ return res;
+ } else {
+ lbValue res = {};
+ res.value = LLVMBuildExtractValue(p->builder, value, 0, "");
+ res.type = tv.type;
+ return res;
+ }
}
@@ -9490,7 +9804,8 @@ lbValue lb_handle_param_value(lbProcedure *p, Type *parameter_type, ParameterVal
switch (param_value.kind) {
case ParameterValue_Constant:
if (is_type_constant_type(parameter_type)) {
- return lb_const_value(p->module, parameter_type, param_value.value);
+ auto res = lb_const_value(p->module, parameter_type, param_value.value);
+ return res;
} else {
ExactValue ev = param_value.value;
lbValue arg = {};
@@ -9900,24 +10215,6 @@ void lb_emit_increment(lbProcedure *p, lbValue addr) {
}
-LLVMValueRef lb_lookup_runtime_procedure(lbModule *m, String const &name) {
- AstPackage *pkg = m->info->runtime_package;
- Entity *e = scope_lookup_current(pkg->scope, name);
-
- lbValue *found = nullptr;
- if (m != e->code_gen_module) {
- gb_mutex_lock(&m->mutex);
- }
- GB_ASSERT(e->code_gen_module != nullptr);
- found = map_get(&e->code_gen_module->values, hash_entity(e));
- if (m != e->code_gen_module) {
- gb_mutex_unlock(&m->mutex);
- }
- GB_ASSERT(found != nullptr);
-
- return found->value;
-}
-
lbValue lb_emit_byte_swap(lbProcedure *p, lbValue value, Type *end_type) {
GB_ASSERT(type_size_of(value.type) == type_size_of(end_type));
@@ -10242,7 +10539,7 @@ lbValue lb_get_equal_proc_for_type(lbModule *m, Type *type) {
lb_start_block(p, block_diff_ptr);
- if (type->kind == Type_Struct) {
+ if (type->kind == Type_Struct) {
type_set_offsets(type);
lbBlock *block_false = lb_create_block(p, "bfalse");
@@ -10268,6 +10565,56 @@ lbValue lb_get_equal_proc_for_type(lbModule *m, Type *type) {
lb_start_block(p, block_false);
LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_bool), 0, false));
+ } else if (type->kind == Type_Union) {
+ if (is_type_union_maybe_pointer(type)) {
+ Type *v = type->Union.variants[0];
+ Type *pv = alloc_type_pointer(v);
+
+ lbValue left = lb_emit_load(p, lb_emit_conv(p, lhs, pv));
+ lbValue right = lb_emit_load(p, lb_emit_conv(p, rhs, pv));
+
+ lbValue ok = lb_emit_comp(p, Token_CmpEq, left, right);
+ ok = lb_emit_conv(p, ok, t_bool);
+ LLVMBuildRet(p->builder, ok.value);
+ } else {
+ lbBlock *block_false = lb_create_block(p, "bfalse");
+ lbBlock *block_switch = lb_create_block(p, "bswitch");
+
+ lbValue left_tag = lb_emit_load(p, lb_emit_union_tag_ptr(p, lhs));
+ lbValue right_tag = lb_emit_load(p, lb_emit_union_tag_ptr(p, rhs));
+
+ lbValue tag_eq = lb_emit_comp(p, Token_CmpEq, left_tag, right_tag);
+ lb_emit_if(p, tag_eq, block_switch, block_false);
+
+ lb_start_block(p, block_switch);
+ LLVMValueRef v_switch = LLVMBuildSwitch(p->builder, left_tag.value, block_false->block, cast(unsigned)type->Union.variants.count);
+
+
+ for_array(i, type->Union.variants) {
+ lbBlock *case_block = lb_create_block(p, "bcase");
+ lb_start_block(p, case_block);
+
+ Type *v = type->Union.variants[i];
+ lbValue case_tag = lb_const_union_tag(p->module, type, v);
+
+ Type *vp = alloc_type_pointer(v);
+
+ lbValue left = lb_emit_load(p, lb_emit_conv(p, lhs, vp));
+ lbValue right = lb_emit_load(p, lb_emit_conv(p, rhs, vp));
+ lbValue ok = lb_emit_comp(p, Token_CmpEq, left, right);
+ ok = lb_emit_conv(p, ok, t_bool);
+
+ LLVMBuildRet(p->builder, ok.value);
+
+
+ LLVMAddCase(v_switch, case_tag.value, case_block->block);
+ }
+
+ lb_start_block(p, block_false);
+
+ LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_bool), 0, false));
+ }
+
} else {
lbValue left = lb_emit_load(p, lhs);
lbValue right = lb_emit_load(p, rhs);
@@ -10336,6 +10683,9 @@ lbValue lb_get_hasher_proc_for_type(lbModule *m, Type *type) {
lbValue data = {x, t_rawptr};
lbValue seed = {y, t_uintptr};
+ LLVMAttributeRef nonnull_attr = lb_create_enum_attribute(m->ctx, "nonnull");
+ LLVMAddAttributeAtIndex(p->value, 1+0, nonnull_attr);
+
if (is_type_simple_compare(type)) {
lbValue res = lb_simple_compare_hash(p, type, data, seed);
LLVMBuildRet(p->builder, res.value);
@@ -10358,6 +10708,48 @@ lbValue lb_get_hasher_proc_for_type(lbModule *m, Type *type) {
seed = lb_emit_call(p, field_hasher, args);
}
LLVMBuildRet(p->builder, seed.value);
+ } else if (type->kind == Type_Union) {
+ auto args = array_make<lbValue>(permanent_allocator(), 2);
+
+ if (is_type_union_maybe_pointer(type)) {
+ Type *v = type->Union.variants[0];
+ lbValue variant_hasher = lb_get_hasher_proc_for_type(m, v);
+
+ args[0] = data;
+ args[1] = seed;
+ lbValue res = lb_emit_call(p, variant_hasher, args);
+ LLVMBuildRet(p->builder, res.value);
+ }
+
+ lbBlock *end_block = lb_create_block(p, "bend");
+ data = lb_emit_conv(p, data, pt);
+
+ lbValue tag_ptr = lb_emit_union_tag_ptr(p, data);
+ lbValue tag = lb_emit_load(p, tag_ptr);
+
+ LLVMValueRef v_switch = LLVMBuildSwitch(p->builder, tag.value, end_block->block, cast(unsigned)type->Union.variants.count);
+
+ for_array(i, type->Union.variants) {
+ lbBlock *case_block = lb_create_block(p, "bcase");
+ lb_start_block(p, case_block);
+
+ Type *v = type->Union.variants[i];
+ Type *vp = alloc_type_pointer(v);
+ lbValue case_tag = lb_const_union_tag(p->module, type, v);
+
+ lbValue variant_hasher = lb_get_hasher_proc_for_type(m, v);
+
+ args[0] = data;
+ args[1] = seed;
+ lbValue res = lb_emit_call(p, variant_hasher, args);
+ LLVMBuildRet(p->builder, res.value);
+
+ LLVMAddCase(v_switch, case_tag.value, case_block->block);
+ }
+
+ lb_start_block(p, end_block);
+ LLVMBuildRet(p->builder, seed.value);
+
} else if (type->kind == Type_Array) {
lbAddr pres = lb_add_local_generated(p, t_uintptr, false);
lb_addr_store(p, pres, seed);
@@ -10419,6 +10811,30 @@ lbValue lb_get_hasher_proc_for_type(lbModule *m, Type *type) {
return {p->value, p->type};
}
+lbValue lb_compare_records(lbProcedure *p, TokenKind op_kind, lbValue left, lbValue right, Type *type) {
+ GB_ASSERT((is_type_struct(type) || is_type_union(type)) && is_type_comparable(type));
+ lbValue left_ptr = lb_address_from_load_or_generate_local(p, left);
+ lbValue right_ptr = lb_address_from_load_or_generate_local(p, right);
+ lbValue res = {};
+ if (is_type_simple_compare(type)) {
+ // TODO(bill): Test to see if this is actually faster!!!!
+ auto args = array_make<lbValue>(permanent_allocator(), 3);
+ args[0] = lb_emit_conv(p, left_ptr, t_rawptr);
+ args[1] = lb_emit_conv(p, right_ptr, t_rawptr);
+ args[2] = lb_const_int(p->module, t_int, type_size_of(type));
+ res = lb_emit_runtime_call(p, "memory_equal", args);
+ } else {
+ lbValue value = lb_get_equal_proc_for_type(p->module, type);
+ auto args = array_make<lbValue>(permanent_allocator(), 2);
+ args[0] = lb_emit_conv(p, left_ptr, t_rawptr);
+ args[1] = lb_emit_conv(p, right_ptr, t_rawptr);
+ res = lb_emit_call(p, value, args);
+ }
+ if (op_kind == Token_NotEq) {
+ res = lb_emit_unary_arith(p, Token_Not, res, res.type);
+ }
+ return res;
+}
lbValue lb_emit_comp(lbProcedure *p, TokenKind op_kind, lbValue left, lbValue right) {
@@ -10447,42 +10863,22 @@ lbValue lb_emit_comp(lbProcedure *p, TokenKind op_kind, lbValue left, lbValue ri
Type *lt = left.type;
Type *rt = right.type;
- // if (is_type_bit_set(lt) && is_type_bit_set(rt)) {
- // Type *blt = base_type(lt);
- // Type *brt = base_type(rt);
- // i64 bits = gb_max(blt->BitSet.bits, brt->BitSet.bits);
- // i64 bytes = bits / 8;
- // switch (bytes) {
- // case 1:
- // left = lb_emit_conv(p, left, t_u8);
- // right = lb_emit_conv(p, right, t_u8);
- // break;
- // case 2:
- // left = lb_emit_conv(p, left, t_u16);
- // right = lb_emit_conv(p, right, t_u16);
- // break;
- // case 4:
- // left = lb_emit_conv(p, left, t_u32);
- // right = lb_emit_conv(p, right, t_u32);
- // break;
- // case 8:
- // left = lb_emit_conv(p, left, t_u64);
- // right = lb_emit_conv(p, right, t_u64);
- // break;
- // default: GB_PANIC("Unknown integer size"); break;
- // }
- // }
-
lt = left.type;
rt = right.type;
i64 ls = type_size_of(lt);
i64 rs = type_size_of(rt);
+
+ // NOTE(bill): Quick heuristic, larger types are usually the target type
if (ls < rs) {
left = lb_emit_conv(p, left, rt);
} else if (ls > rs) {
right = lb_emit_conv(p, right, lt);
} else {
- right = lb_emit_conv(p, right, lt);
+ if (is_type_union(rt)) {
+ left = lb_emit_conv(p, left, rt);
+ } else {
+ right = lb_emit_conv(p, right, lt);
+ }
}
}
@@ -10548,28 +10944,12 @@ lbValue lb_emit_comp(lbProcedure *p, TokenKind op_kind, lbValue left, lbValue ri
}
- if (is_type_struct(a) && is_type_comparable(a)) {
- lbValue left_ptr = lb_address_from_load_or_generate_local(p, left);
- lbValue right_ptr = lb_address_from_load_or_generate_local(p, right);
- lbValue res = {};
- if (is_type_simple_compare(a)) {
- // TODO(bill): Test to see if this is actually faster!!!!
- auto args = array_make<lbValue>(permanent_allocator(), 3);
- args[0] = lb_emit_conv(p, left_ptr, t_rawptr);
- args[1] = lb_emit_conv(p, right_ptr, t_rawptr);
- args[2] = lb_const_int(p->module, t_int, type_size_of(a));
- res = lb_emit_runtime_call(p, "memory_equal", args);
- } else {
- lbValue value = lb_get_equal_proc_for_type(p->module, a);
- auto args = array_make<lbValue>(permanent_allocator(), 2);
- args[0] = lb_emit_conv(p, left_ptr, t_rawptr);
- args[1] = lb_emit_conv(p, right_ptr, t_rawptr);
- res = lb_emit_call(p, value, args);
- }
- if (op_kind == Token_NotEq) {
- res = lb_emit_unary_arith(p, Token_Not, res, res.type);
- }
- return res;
+ if ((is_type_struct(a) || is_type_union(a)) && is_type_comparable(a)) {
+ return lb_compare_records(p, op_kind, left, right, a);
+ }
+
+ if ((is_type_struct(b) || is_type_union(b)) && is_type_comparable(b)) {
+ return lb_compare_records(p, op_kind, left, right, b);
}
if (is_type_string(a)) {
@@ -10783,12 +11163,9 @@ lbValue lb_emit_comp(lbProcedure *p, TokenKind op_kind, lbValue left, lbValue ri
lbValue lb_generate_anonymous_proc_lit(lbModule *m, String const &prefix_name, Ast *expr, lbProcedure *parent) {
- auto *found = map_get(&m->anonymous_proc_lits, hash_pointer(expr));
- if (found != nullptr) {
- lbValue value = {};
- value.value = (*found)->value;
- value.type = (*found)->type;
- return value;
+ lbProcedure **found = map_get(&m->gen->anonymous_proc_lits, hash_pointer(expr));
+ if (found) {
+ return lb_find_procedure_value_from_entity(m, (*found)->entity);
}
ast_node(pl, ProcLit, expr);
@@ -10797,7 +11174,7 @@ lbValue lb_generate_anonymous_proc_lit(lbModule *m, String const &prefix_name, A
// parent$count
isize name_len = prefix_name.len + 1 + 8 + 1;
char *name_text = gb_alloc_array(permanent_allocator(), char, name_len);
- i32 name_id = cast(i32)m->anonymous_proc_lits.entries.count;
+ i32 name_id = cast(i32)m->gen->anonymous_proc_lits.entries.count;
name_len = gb_snprintf(name_text, name_len, "%.*s$anon-%d", LIT(prefix_name), name_id);
String name = make_string((u8 *)name_text, name_len-1);
@@ -10811,6 +11188,7 @@ lbValue lb_generate_anonymous_proc_lit(lbModule *m, String const &prefix_name, A
Entity *e = alloc_entity_procedure(nullptr, token, type, pl->tags);
e->file = expr->file;
e->decl_info = pl->decl;
+ e->code_gen_module = m;
lbProcedure *p = lb_create_procedure(m, e);
lbValue value = {};
@@ -10825,11 +11203,59 @@ lbValue lb_generate_anonymous_proc_lit(lbModule *m, String const &prefix_name, A
}
map_set(&m->anonymous_proc_lits, hash_pointer(expr), p);
+ map_set(&m->gen->anonymous_proc_lits, hash_pointer(expr), p);
return value;
}
-lbValue lb_emit_union_cast(lbProcedure *p, lbValue value, Type *type, TokenPos pos, bool do_conversion_check=true) {
+lbValue lb_emit_union_cast_only_ok_check(lbProcedure *p, lbValue value, Type *type, TokenPos pos) {
+ GB_ASSERT(is_type_tuple(type));
+ lbModule *m = p->module;
+
+ Type *src_type = value.type;
+ bool is_ptr = is_type_pointer(src_type);
+
+
+ // IMPORTANT NOTE(bill): This assumes that the value is completely ignored
+ // so when it does an assignment, it complete ignores the value.
+ // Just make it two booleans and ignore the first one
+ //
+ // _, ok := x.(T);
+ //
+ Type *ok_type = type->Tuple.variables[1]->type;
+ Type *gen_tuple_types[2] = {};
+ gen_tuple_types[0] = ok_type;
+ gen_tuple_types[1] = ok_type;
+
+ Type *gen_tuple = alloc_type_tuple_from_field_types(gen_tuple_types, gb_count_of(gen_tuple_types), false, true);
+
+ lbAddr v = lb_add_local_generated(p, gen_tuple, false);
+
+ if (is_ptr) {
+ value = lb_emit_load(p, value);
+ }
+ Type *src = base_type(type_deref(src_type));
+ GB_ASSERT_MSG(is_type_union(src), "%s", type_to_string(src_type));
+ Type *dst = type->Tuple.variables[0]->type;
+
+ lbValue cond = {};
+
+ if (is_type_union_maybe_pointer(src)) {
+ lbValue data = lb_emit_transmute(p, value, dst);
+ cond = lb_emit_comp_against_nil(p, Token_NotEq, data);
+ } else {
+ lbValue tag = lb_emit_union_tag_value(p, value);
+ lbValue dst_tag = lb_const_union_tag(m, src, dst);
+ cond = lb_emit_comp(p, Token_CmpEq, tag, dst_tag);
+ }
+
+ lbValue gep1 = lb_emit_struct_ep(p, v.addr, 1);
+ lb_emit_store(p, gep1, cond);
+
+ return lb_addr_load(p, v);
+}
+
+lbValue lb_emit_union_cast(lbProcedure *p, lbValue value, Type *type, TokenPos pos) {
lbModule *m = p->module;
Type *src_type = value.type;
@@ -10893,7 +11319,7 @@ lbValue lb_emit_union_cast(lbProcedure *p, lbValue value, Type *type, TokenPos p
lb_start_block(p, end_block);
if (!is_tuple) {
- if (do_conversion_check) {
+ {
// NOTE(bill): Panic on invalid conversion
Type *dst_type = tuple->Tuple.variables[0]->type;
@@ -10981,15 +11407,66 @@ lbValue lb_emit_any_cast(lbProcedure *p, lbValue value, Type *type, TokenPos pos
}
+lbValue lb_find_ident(lbProcedure *p, lbModule *m, Entity *e, Ast *expr) {
+ auto *found = map_get(&m->values, hash_entity(e));
+ if (found) {
+ auto v = *found;
+ // NOTE(bill): This is because pointers are already pointers in LLVM
+ if (is_type_proc(v.type)) {
+ return v;
+ }
+ return lb_emit_load(p, v);
+ } else if (e != nullptr && e->kind == Entity_Variable) {
+ return lb_addr_load(p, lb_build_addr(p, expr));
+ }
+
+ if (e->kind == Entity_Procedure) {
+ return lb_find_procedure_value_from_entity(m, e);
+ }
+ if (USE_SEPARTE_MODULES) {
+ lbModule *other_module = lb_pkg_module(m->gen, e->pkg);
+ if (other_module != m) {
+
+ String name = lb_get_entity_name(other_module, e);
+
+ lbValue g = {};
+ g.value = LLVMAddGlobal(m->mod, lb_type(m, e->type), alloc_cstring(permanent_allocator(), name));
+ g.type = alloc_type_pointer(e->type);
+ LLVMSetLinkage(g.value, LLVMExternalLinkage);
+
+ lb_add_entity(m, e, g);
+ lb_add_member(m, name, g);
+ return lb_emit_load(p, g);
+ }
+ }
+
+ String pkg = {};
+ if (e->pkg) {
+ pkg = e->pkg->name;
+ }
+ gb_printf_err("Error in: %s\n", token_pos_to_string(ast_token(expr).pos));
+ GB_PANIC("nullptr value for expression from identifier: %.*s.%.*s (%p) : %s @ %p", LIT(pkg), LIT(e->token.string), e, type_to_string(e->type), expr);
+ return {};
+}
+
+bool lb_is_expr_constant_zero(Ast *expr) {
+ GB_ASSERT(expr != nullptr);
+ auto v = exact_value_to_integer(expr->tav.value);
+ if (v.kind == ExactValue_Integer) {
+ return big_int_cmp_zero(&v.value_integer) == 0;
+ }
+ return false;
+}
+
lbValue lb_build_expr(lbProcedure *p, Ast *expr) {
lbModule *m = p->module;
- u64 prev_state_flags = p->module->state_flags;
- defer (p->module->state_flags = prev_state_flags);
+ u16 prev_state_flags = p->state_flags;
+ defer (p->state_flags = prev_state_flags);
if (expr->state_flags != 0) {
- u64 in = expr->state_flags;
- u64 out = p->module->state_flags;
+ u16 in = expr->state_flags;
+ u16 out = p->state_flags;
if (in & StateFlag_bounds_check) {
out |= StateFlag_bounds_check;
@@ -10999,7 +11476,7 @@ lbValue lb_build_expr(lbProcedure *p, Ast *expr) {
out &= ~StateFlag_bounds_check;
}
- p->module->state_flags = out;
+ p->state_flags = out;
}
expr = unparen_expr(expr);
@@ -11009,6 +11486,12 @@ lbValue lb_build_expr(lbProcedure *p, Ast *expr) {
GB_ASSERT_MSG(tv.mode != Addressing_Invalid, "invalid expression '%s' (tv.mode = %d, tv.type = %s) @ %s\n Current Proc: %.*s : %s", expr_to_string(expr), tv.mode, type_to_string(tv.type), token_pos_to_string(expr_pos), LIT(p->name), type_to_string(p->type));
if (tv.value.kind != ExactValue_Invalid) {
+ // NOTE(bill): The commented out code below is just for debug purposes only
+ // GB_ASSERT_MSG(!is_type_untyped(tv.type), "%s @ %s\n%s", type_to_string(tv.type), token_pos_to_string(expr_pos), expr_to_string(expr));
+ // if (is_type_untyped(tv.type)) {
+ // gb_printf_err("%s %s\n", token_pos_to_string(expr_pos), expr_to_string(expr));
+ // }
+
// NOTE(bill): Short on constant values
return lb_const_value(p->module, tv.type, tv.value);
}
@@ -11032,7 +11515,7 @@ lbValue lb_build_expr(lbProcedure *p, Ast *expr) {
case_ast_node(bd, BasicDirective, expr);
TokenPos pos = bd->token.pos;
- GB_PANIC("Non-constant basic literal %s - %.*s", token_pos_to_string(pos), LIT(bd->name));
+ GB_PANIC("Non-constant basic literal %s - %.*s", token_pos_to_string(pos), LIT(bd->name.string));
case_end;
case_ast_node(i, Implicit, expr);
@@ -11070,24 +11553,7 @@ lbValue lb_build_expr(lbProcedure *p, Ast *expr) {
}
GB_ASSERT(e->kind != Entity_ProcGroup);
- auto *found = map_get(&p->module->values, hash_entity(e));
- if (found) {
- auto v = *found;
- // NOTE(bill): This is because pointers are already pointers in LLVM
- if (is_type_proc(v.type)) {
- return v;
- }
- return lb_emit_load(p, v);
- } else if (e != nullptr && e->kind == Entity_Variable) {
- return lb_addr_load(p, lb_build_addr(p, expr));
- }
- gb_printf_err("Error in: %s\n", token_pos_to_string(i->token.pos));
- String pkg = {};
- if (e->pkg) {
- pkg = e->pkg->name;
- }
- GB_PANIC("nullptr value for expression from identifier: %.*s.%.*s (%p) : %s @ %p", LIT(pkg), LIT(e->token.string), e, type_to_string(e->type), expr);
- return {};
+ return lb_find_ident(p, m, e, expr);
case_end;
case_ast_node(de, DerefExpr, expr);
@@ -11114,47 +11580,6 @@ lbValue lb_build_expr(lbProcedure *p, Ast *expr) {
return lb_build_expr(p, se->call);
case_end;
- case_ast_node(te, TernaryExpr, expr);
- LLVMValueRef incoming_values[2] = {};
- LLVMBasicBlockRef incoming_blocks[2] = {};
-
- GB_ASSERT(te->y != nullptr);
- lbBlock *then = lb_create_block(p, "if.then");
- lbBlock *done = lb_create_block(p, "if.done"); // NOTE(bill): Append later
- lbBlock *else_ = lb_create_block(p, "if.else");
-
- lbValue cond = lb_build_cond(p, te->cond, then, else_);
- lb_start_block(p, then);
-
- Type *type = default_type(type_of_expr(expr));
-
- lb_open_scope(p, nullptr);
- incoming_values[0] = lb_emit_conv(p, lb_build_expr(p, te->x), type).value;
- lb_close_scope(p, lbDeferExit_Default, nullptr);
-
- lb_emit_jump(p, done);
- lb_start_block(p, else_);
-
- lb_open_scope(p, nullptr);
- incoming_values[1] = lb_emit_conv(p, lb_build_expr(p, te->y), type).value;
- lb_close_scope(p, lbDeferExit_Default, nullptr);
-
- lb_emit_jump(p, done);
- lb_start_block(p, done);
-
- lbValue res = {};
- res.value = LLVMBuildPhi(p->builder, lb_type(p->module, type), "");
- res.type = type;
-
- GB_ASSERT(p->curr_block->preds.count >= 2);
- incoming_blocks[0] = p->curr_block->preds[0]->block;
- incoming_blocks[1] = p->curr_block->preds[1]->block;
-
- LLVMAddIncoming(res.value, incoming_values, incoming_blocks, 2);
-
- return res;
- case_end;
-
case_ast_node(te, TernaryIfExpr, expr);
LLVMValueRef incoming_values[2] = {};
LLVMBasicBlockRef incoming_blocks[2] = {};
@@ -11213,6 +11638,10 @@ lbValue lb_build_expr(lbProcedure *p, Ast *expr) {
lbValue e = lb_build_expr(p, ta->expr);
Type *t = type_deref(e.type);
if (is_type_union(t)) {
+ if (ta->ignores[0]) {
+ // NOTE(bill): This is not needed for optimization levels other than 0
+ return lb_emit_union_cast_only_ok_check(p, e, type, pos);
+ }
return lb_emit_union_cast(p, e, type, pos);
} else if (is_type_any(t)) {
return lb_emit_any_cast(p, e, type, pos);
@@ -11358,6 +11787,13 @@ lbValue lb_build_expr(lbProcedure *p, Ast *expr) {
case_end;
case_ast_node(se, SliceExpr, expr);
+ if (is_type_slice(type_of_expr(se->expr))) {
+ // NOTE(bill): Quick optimization
+ if (se->high == nullptr &&
+ (se->low == nullptr || lb_is_expr_constant_zero(se->low))) {
+ return lb_build_expr(p, se->expr);
+ }
+ }
return lb_addr_load(p, lb_build_addr(p, expr));
case_end;
@@ -11436,7 +11872,13 @@ lbValue lb_get_using_variable(lbProcedure *p, Entity *e) {
GB_ASSERT(v.value != nullptr);
GB_ASSERT_MSG(parent->type == type_deref(v.type), "%s %s", type_to_string(parent->type), type_to_string(v.type));
lbValue ptr = lb_emit_deep_field_gep(p, v, sel);
- lb_add_debug_local_variable(p, ptr.value, e->type, e->token);
+ if (parent->scope) {
+ if ((parent->scope->flags & (ScopeFlag_File|ScopeFlag_Pkg)) == 0) {
+ lb_add_debug_local_variable(p, ptr.value, e->type, e->token);
+ }
+ } else {
+ lb_add_debug_local_variable(p, ptr.value, e->type, e->token);
+ }
return ptr;
}
@@ -11462,11 +11904,14 @@ lbAddr lb_build_addr_from_entity(lbProcedure *p, Entity *e, Ast *expr) {
return lb_get_soa_variable_addr(p, e);
}
+
if (v.value == nullptr) {
- error(expr, "%.*s Unknown value: %.*s, entity: %p %.*s",
- LIT(p->name),
- LIT(e->token.string), e, LIT(entity_strings[e->kind]));
- GB_PANIC("Unknown value");
+ return lb_addr(lb_find_value_from_entity(p->module, e));
+
+ // error(expr, "%.*s Unknown value: %.*s, entity: %p %.*s",
+ // LIT(p->name),
+ // LIT(e->token.string), e, LIT(entity_strings[e->kind]));
+ // GB_PANIC("Unknown value");
}
return lb_addr(v);
@@ -11813,27 +12258,6 @@ lbAddr lb_build_addr(lbProcedure *p, Ast *expr) {
return lb_addr(val);
}
- if (!is_type_indexable(t)) {
- AtomOpMapEntry *found = map_get(&p->module->info->atom_op_map, hash_pointer(expr));
- if (found != nullptr) {
- if (found->kind == TypeAtomOp_index_get) {
- return lb_build_addr(p, found->node);
- } else if (found->kind == TypeAtomOp_index_get_ptr) {
- return lb_addr(lb_build_expr(p, found->node));
- } else if (found->kind == TypeAtomOp_index_set) {
- lbValue ptr = lb_build_addr_ptr(p, ie->expr);
- if (deref) {
- ptr = lb_emit_load(p, ptr);
- }
-
- lbAddr addr = {lbAddr_AtomOp_index_set};
- addr.addr = ptr;
- addr.index_set.index = lb_build_expr(p, ie->index);
- addr.index_set.node = found->node;
- return addr;
- }
- }
- }
GB_ASSERT_MSG(is_type_indexable(t), "%s %s", type_to_string(t), expr_to_string(expr));
if (is_type_map(t)) {
@@ -11969,6 +12393,7 @@ lbAddr lb_build_addr(lbProcedure *p, Ast *expr) {
case_end;
case_ast_node(se, SliceExpr, expr);
+
lbValue low = lb_const_int(p->module, t_int, 0);
lbValue high = {};
@@ -11977,36 +12402,6 @@ lbAddr lb_build_addr(lbProcedure *p, Ast *expr) {
bool no_indices = se->low == nullptr && se->high == nullptr;
- {
- Type *type = base_type(type_of_expr(se->expr));
- if (type->kind == Type_Struct && !is_type_soa_struct(type)) {
- TypeAtomOpTable *atom_op_table = type->Struct.atom_op_table;
- if (atom_op_table != nullptr && atom_op_table->op[TypeAtomOp_slice]) {
- AtomOpMapEntry *found = map_get(&p->module->info->atom_op_map, hash_pointer(expr));
- if (found) {
- lbValue base = lb_build_expr(p, found->node);
-
- Type *slice_type = base.type;
- lbValue len = lb_slice_len(p, base);
- if (high.value == nullptr) high = len;
-
- if (!no_indices) {
- lb_emit_slice_bounds_check(p, se->open, low, high, len, se->low != nullptr);
- }
-
-
- lbValue elem = lb_emit_ptr_offset(p, lb_slice_elem(p, base), low);
- lbValue new_len = lb_emit_arith(p, Token_Sub, high, low, t_int);
-
- lbAddr slice = lb_add_local_generated(p, slice_type, false);
- lb_fill_slice(p, slice, elem, new_len);
- return slice;
- }
- }
- }
- }
-
-
lbAddr addr = lb_build_addr(p, se->expr);
lbValue base = lb_addr_load(p, addr);
Type *type = base_type(base.type);
@@ -12780,8 +13175,16 @@ lbAddr lb_build_addr(lbProcedure *p, Ast *expr) {
void lb_init_module(lbModule *m, Checker *c) {
m->info = &c->info;
- m->ctx = LLVMGetGlobalContext();
- m->mod = LLVMModuleCreateWithNameInContext("odin_module", m->ctx);
+ gbString module_name = gb_string_make(heap_allocator(), "odin_package");
+ if (m->pkg) {
+ module_name = gb_string_appendc(module_name, "-");
+ module_name = gb_string_append_length(module_name, m->pkg->name.text, m->pkg->name.len);
+ } else if (USE_SEPARTE_MODULES) {
+ module_name = gb_string_appendc(module_name, "-builtin");
+ }
+
+ m->ctx = LLVMContextCreate();
+ m->mod = LLVMModuleCreateWithNameInContext(module_name ? module_name : "odin_package", m->ctx);
// m->debug_builder = nullptr;
if (build_context.ODIN_DEBUG) {
enum {DEBUG_METADATA_VERSION = 3};
@@ -12808,10 +13211,6 @@ void lb_init_module(lbModule *m, Checker *c) {
m->debug_builder = LLVMCreateDIBuilder(m->mod);
}
- m->state_flags = 0;
- m->state_flags |= StateFlag_bounds_check;
-
- gb_mutex_init(&m->mutex);
gbAllocator a = heap_allocator();
map_init(&m->types, a);
map_init(&m->llvm_types, a);
@@ -12870,6 +13269,7 @@ bool lb_init_generator(lbGenerator *gen, Checker *c) {
}
gbAllocator ha = heap_allocator();
array_init(&gen->output_object_paths, ha);
+ array_init(&gen->output_temp_paths, ha);
gen->output_base = path_to_full_path(ha, gen->output_base);
@@ -12879,8 +13279,34 @@ bool lb_init_generator(lbGenerator *gen, Checker *c) {
gen->info = &c->info;
- lb_init_module(&gen->module, c);
+ map_init(&gen->modules, permanent_allocator(), gen->info->packages.entries.count*2);
+ map_init(&gen->modules_through_ctx, permanent_allocator(), gen->info->packages.entries.count*2);
+ map_init(&gen->anonymous_proc_lits, heap_allocator(), 1024);
+
+ gb_mutex_init(&gen->mutex);
+ if (USE_SEPARTE_MODULES) {
+ for_array(i, gen->info->packages.entries) {
+ AstPackage *pkg = gen->info->packages.entries[i].value;
+
+ auto m = gb_alloc_item(permanent_allocator(), lbModule);
+ m->pkg = pkg;
+ m->gen = gen;
+ map_set(&gen->modules, hash_pointer(pkg), m);
+ lb_init_module(m, c);
+ }
+ }
+
+ gen->default_module.gen = gen;
+ map_set(&gen->modules, hash_pointer(nullptr), &gen->default_module);
+ lb_init_module(&gen->default_module, c);
+
+
+ for_array(i, gen->modules.entries) {
+ lbModule *m = gen->modules.entries[i].value;
+ LLVMContextRef ctx = LLVMGetModuleContext(m->mod);
+ map_set(&gen->modules_through_ctx, hash_pointer(ctx), m);
+ }
return true;
}
@@ -12891,8 +13317,10 @@ lbAddr lb_add_global_generated(lbModule *m, Type *type, lbValue value) {
isize max_len = 7+8+1;
u8 *str = cast(u8 *)gb_alloc_array(permanent_allocator(), u8, max_len);
- isize len = gb_snprintf(cast(char *)str, max_len, "ggv$%x", m->global_generated_index);
- m->global_generated_index++;
+
+ u32 id = cast(u32)gb_atomic32_fetch_add(&m->gen->global_generated_index, 1);
+
+ isize len = gb_snprintf(cast(char *)str, max_len, "ggv$%x", id);
String name = make_string(str, len-1);
Scope *scope = nullptr;
@@ -12901,7 +13329,7 @@ lbAddr lb_add_global_generated(lbModule *m, Type *type, lbValue value) {
g.type = alloc_type_pointer(type);
g.value = LLVMAddGlobal(m->mod, lb_type(m, type), cast(char const *)str);
if (value.value != nullptr) {
- GB_ASSERT(LLVMIsConstant(value.value));
+ GB_ASSERT_MSG(LLVMIsConstant(value.value), LLVMPrintValueToString(value.value));
LLVMSetInitializer(g.value, value.value);
} else {
LLVMSetInitializer(g.value, LLVMConstNull(lb_type(m, type)));
@@ -12915,17 +13343,12 @@ lbAddr lb_add_global_generated(lbModule *m, Type *type, lbValue value) {
lbValue lb_find_runtime_value(lbModule *m, String const &name) {
AstPackage *p = m->info->runtime_package;
Entity *e = scope_lookup_current(p->scope, name);
- lbValue *found = map_get(&m->values, hash_entity(e));
- GB_ASSERT_MSG(found != nullptr, "Unable to find runtime value '%.*s'", LIT(name));
- lbValue value = *found;
- return value;
+ return lb_find_value_from_entity(m, e);
}
lbValue lb_find_package_value(lbModule *m, String const &pkg, String const &name) {
Entity *e = find_entity_in_pkg(m->info, pkg, name);
lbValue *found = map_get(&m->values, hash_entity(e));
- GB_ASSERT_MSG(found != nullptr, "Unable to find value '%.*s.%.*s'", LIT(pkg), LIT(name));
- lbValue value = *found;
- return value;
+ return lb_find_value_from_entity(m, e);
}
lbValue lb_get_type_info_ptr(lbModule *m, Type *type) {
@@ -12940,32 +13363,37 @@ lbValue lb_get_type_info_ptr(lbModule *m, Type *type) {
lbValue res = {};
res.type = t_type_info_ptr;
- res.value = LLVMConstGEP(lb_global_type_info_data.addr.value, indices, cast(unsigned)gb_count_of(indices));
+ res.value = LLVMConstGEP(lb_global_type_info_data_ptr(m).value, indices, cast(unsigned)gb_count_of(indices));
return res;
}
lbValue lb_type_info_member_types_offset(lbProcedure *p, isize count) {
+ GB_ASSERT(p->module == &p->module->gen->default_module);
lbValue offset = lb_emit_array_epi(p, lb_global_type_info_member_types.addr, lb_global_type_info_member_types_index);
lb_global_type_info_member_types_index += cast(i32)count;
return offset;
}
lbValue lb_type_info_member_names_offset(lbProcedure *p, isize count) {
+ GB_ASSERT(p->module == &p->module->gen->default_module);
lbValue offset = lb_emit_array_epi(p, lb_global_type_info_member_names.addr, lb_global_type_info_member_names_index);
lb_global_type_info_member_names_index += cast(i32)count;
return offset;
}
lbValue lb_type_info_member_offsets_offset(lbProcedure *p, isize count) {
+ GB_ASSERT(p->module == &p->module->gen->default_module);
lbValue offset = lb_emit_array_epi(p, lb_global_type_info_member_offsets.addr, lb_global_type_info_member_offsets_index);
lb_global_type_info_member_offsets_index += cast(i32)count;
return offset;
}
lbValue lb_type_info_member_usings_offset(lbProcedure *p, isize count) {
+ GB_ASSERT(p->module == &p->module->gen->default_module);
lbValue offset = lb_emit_array_epi(p, lb_global_type_info_member_usings.addr, lb_global_type_info_member_usings_index);
lb_global_type_info_member_usings_index += cast(i32)count;
return offset;
}
lbValue lb_type_info_member_tags_offset(lbProcedure *p, isize count) {
+ GB_ASSERT(p->module == &p->module->gen->default_module);
lbValue offset = lb_emit_array_epi(p, lb_global_type_info_member_tags.addr, lb_global_type_info_member_tags_index);
lb_global_type_info_member_tags_index += cast(i32)count;
return offset;
@@ -13007,12 +13435,12 @@ void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup type_info da
{
// NOTE(bill): Set the type_table slice with the global backing array
lbValue global_type_table = lb_find_runtime_value(m, str_lit("type_table"));
- Type *type = base_type(lb_addr_type(lb_global_type_info_data));
+ Type *type = base_type(lb_global_type_info_data_entity->type);
GB_ASSERT(is_type_array(type));
LLVMValueRef indices[2] = {llvm_zero(m), llvm_zero(m)};
LLVMValueRef values[2] = {
- LLVMConstInBoundsGEP(lb_global_type_info_data.addr.value, indices, gb_count_of(indices)),
+ LLVMConstInBoundsGEP(lb_global_type_info_data_ptr(m).value, indices, gb_count_of(indices)),
LLVMConstInt(lb_type(m, t_int), type->Array.count, true),
};
LLVMValueRef slice = llvm_const_named_struct(llvm_addr_type(global_type_table), values, gb_count_of(values));
@@ -13044,7 +13472,7 @@ void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup type_info da
}
lbValue tag = {};
- lbValue ti_ptr = lb_emit_array_epi(p, lb_global_type_info_data.addr, cast(i32)entry_index);
+ lbValue ti_ptr = lb_emit_array_epi(p, lb_global_type_info_data_ptr(m), cast(i32)entry_index);
lbValue variant_ptr = lb_emit_struct_ep(p, ti_ptr, 4);
lbValue type_info_flags = lb_const_int(p->module, t_type_info_flags, type_info_flags_of_type(t));
@@ -13447,7 +13875,7 @@ void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup type_info da
tag = lb_const_ptr_cast(m, variant_ptr, t_type_info_union_ptr);
{
- LLVMValueRef vals[6] = {};
+ LLVMValueRef vals[7] = {};
isize variant_count = gb_max(0, t->Union.variants.count);
lbValue memory_types = lb_type_info_member_types_offset(p, variant_count);
@@ -13476,10 +13904,19 @@ void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup type_info da
vals[2] = LLVMConstNull(lb_type(m, t_type_info_ptr));
}
- vals[3] = lb_const_bool(m, t_bool, t->Union.custom_align != 0).value;
- vals[4] = lb_const_bool(m, t_bool, t->Union.no_nil).value;
- vals[5] = lb_const_bool(m, t_bool, t->Union.maybe).value;
+ if (is_type_comparable(t) && !is_type_simple_compare(t)) {
+ vals[3] = lb_get_equal_proc_for_type(m, t).value;
+ }
+
+ vals[4] = lb_const_bool(m, t_bool, t->Union.custom_align != 0).value;
+ vals[5] = lb_const_bool(m, t_bool, t->Union.no_nil).value;
+ vals[6] = lb_const_bool(m, t_bool, t->Union.maybe).value;
+ for (isize i = 0; i < gb_count_of(vals); i++) {
+ if (vals[i] == nullptr) {
+ vals[i] = LLVMConstNull(lb_type(m, get_struct_field_type(tag.type, i)));
+ }
+ }
lbValue res = {};
res.type = type_deref(tag.type);
@@ -13691,18 +14128,445 @@ void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup type_info da
}
}
+struct lbGlobalVariable {
+ lbValue var;
+ lbValue init;
+ DeclInfo *decl;
+ bool is_initialized;
+};
+
+lbProcedure *lb_create_startup_type_info(lbModule *m) {
+ LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(m->mod);
+ lb_populate_function_pass_manager(m, default_function_pass_manager, false, build_context.optimization_level);
+ LLVMFinalizeFunctionPassManager(default_function_pass_manager);
+
+ Type *params = alloc_type_tuple();
+ Type *results = alloc_type_tuple();
+
+ Type *proc_type = alloc_type_proc(nullptr, nullptr, 0, nullptr, 0, false, ProcCC_CDecl);
+
+ lbProcedure *p = lb_create_dummy_procedure(m, str_lit(LB_STARTUP_TYPE_INFO_PROC_NAME), proc_type);
+ p->is_startup = true;
+
+ lb_begin_procedure_body(p);
+
+ lb_setup_type_info_data(p);
+
+ lb_end_procedure_body(p);
+
+ if (!m->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) {
+ gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %s\n", "main");
+ LLVMDumpValue(p->value);
+ gb_printf_err("\n\n\n\n");
+ LLVMVerifyFunction(p->value, LLVMAbortProcessAction);
+ }
+
+ lb_run_function_pass_manager(default_function_pass_manager, p);
+
+ return p;
+}
+
+lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProcedure *startup_type_info, Array<lbGlobalVariable> &global_variables) { // Startup Runtime
+ LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(main_module->mod);
+ lb_populate_function_pass_manager(main_module, default_function_pass_manager, false, build_context.optimization_level);
+ LLVMFinalizeFunctionPassManager(default_function_pass_manager);
+
+ Type *params = alloc_type_tuple();
+ Type *results = alloc_type_tuple();
+
+ Type *proc_type = alloc_type_proc(nullptr, nullptr, 0, nullptr, 0, false, ProcCC_CDecl);
+
+ lbProcedure *p = lb_create_dummy_procedure(main_module, str_lit(LB_STARTUP_RUNTIME_PROC_NAME), proc_type);
+ p->is_startup = true;
+
+ lb_begin_procedure_body(p);
+
+ LLVMBuildCall2(p->builder, LLVMGetElementType(lb_type(main_module, startup_type_info->type)), startup_type_info->value, nullptr, 0, "");
+
+ for_array(i, global_variables) {
+ auto *var = &global_variables[i];
+ if (var->is_initialized) {
+ continue;
+ }
+
+ lbModule *entity_module = main_module;
+
+ Entity *e = var->decl->entity;
+ GB_ASSERT(e->kind == Entity_Variable);
+ e->code_gen_module = entity_module;
+
+ if (var->decl->init_expr != nullptr) {
+ // gb_printf_err("%s\n", expr_to_string(var->decl->init_expr));
+ lbValue init = lb_build_expr(p, var->decl->init_expr);
+ LLVMValueKind value_kind = LLVMGetValueKind(init.value);
+ // gb_printf_err("%s %d\n", LLVMPrintValueToString(init.value));
+
+ if (lb_is_const_or_global(init)) {
+ if (!var->is_initialized) {
+ LLVMSetInitializer(var->var.value, init.value);
+ var->is_initialized = true;
+ continue;
+ }
+ } else {
+ var->init = init;
+ }
+ }
+
+ if (var->init.value != nullptr) {
+ GB_ASSERT(!var->is_initialized);
+ Type *t = type_deref(var->var.type);
+
+ if (is_type_any(t)) {
+ // NOTE(bill): Edge case for 'any' type
+ Type *var_type = default_type(var->init.type);
+ lbAddr g = lb_add_global_generated(main_module, var_type, var->init);
+ lb_addr_store(p, g, var->init);
+ lbValue gp = lb_addr_get_ptr(p, g);
+
+ lbValue data = lb_emit_struct_ep(p, var->var, 0);
+ lbValue ti = lb_emit_struct_ep(p, var->var, 1);
+ lb_emit_store(p, data, lb_emit_conv(p, gp, t_rawptr));
+ lb_emit_store(p, ti, lb_type_info(main_module, var_type));
+ } else {
+ LLVMTypeRef pvt = LLVMTypeOf(var->var.value);
+ LLVMTypeRef vt = LLVMGetElementType(pvt);
+ lbValue src0 = lb_emit_conv(p, var->init, t);
+ LLVMValueRef src = OdinLLVMBuildTransmute(p, src0.value, vt);
+ LLVMValueRef dst = var->var.value;
+ LLVMBuildStore(p->builder, src, dst);
+ }
+
+ var->is_initialized = true;
+ }
+ }
+
+
+ lb_end_procedure_body(p);
+
+ if (!main_module->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) {
+ gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %s\n", "main");
+ LLVMDumpValue(p->value);
+ gb_printf_err("\n\n\n\n");
+ LLVMVerifyFunction(p->value, LLVMAbortProcessAction);
+ }
+
+ lb_run_function_pass_manager(default_function_pass_manager, p);
+
+ return p;
+}
+
+
+lbProcedure *lb_create_main_procedure(lbModule *m, lbProcedure *startup_runtime) {
+ LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(m->mod);
+ lb_populate_function_pass_manager(m, default_function_pass_manager, false, build_context.optimization_level);
+ LLVMFinalizeFunctionPassManager(default_function_pass_manager);
+
+ Type *params = alloc_type_tuple();
+ Type *results = alloc_type_tuple();
+
+ Type *t_ptr_cstring = alloc_type_pointer(t_cstring);
+
+ String name = str_lit("main");
+ if (build_context.metrics.os == TargetOs_windows && build_context.metrics.arch == TargetArch_386) {
+ name = str_lit("mainCRTStartup");
+ } else {
+ array_init(&params->Tuple.variables, permanent_allocator(), 2);
+ params->Tuple.variables[0] = alloc_entity_param(nullptr, make_token_ident("argc"), t_i32, false, true);
+ params->Tuple.variables[1] = alloc_entity_param(nullptr, make_token_ident("argv"), t_ptr_cstring, false, true);
+ }
+
+ array_init(&results->Tuple.variables, permanent_allocator(), 1);
+ results->Tuple.variables[0] = alloc_entity_param(nullptr, blank_token, t_i32, false, true);
+
+ Type *proc_type = alloc_type_proc(nullptr,
+ params, params->Tuple.variables.count,
+ results, results->Tuple.variables.count, false, ProcCC_CDecl);
+
+
+ lbProcedure *p = lb_create_dummy_procedure(m, name, proc_type);
+ p->is_startup = true;
+
+ lb_begin_procedure_body(p);
+
+ { // initialize `runtime.args__`
+ lbValue argc = {LLVMGetParam(p->value, 0), t_i32};
+ lbValue argv = {LLVMGetParam(p->value, 1), t_ptr_cstring};
+ LLVMSetValueName2(argc.value, "argc", 4);
+ LLVMSetValueName2(argv.value, "argv", 4);
+ argc = lb_emit_conv(p, argc, t_int);
+ lbAddr args = lb_addr(lb_find_runtime_value(p->module, str_lit("args__")));
+ lb_fill_slice(p, args, argv, argc);
+ }
+
+ LLVMBuildCall2(p->builder, LLVMGetElementType(lb_type(m, startup_runtime->type)), startup_runtime->value, nullptr, 0, "");
+
+ if (build_context.command_kind == Command_test) {
+ Type *t_Internal_Test = find_type_in_pkg(m->info, str_lit("testing"), str_lit("Internal_Test"));
+ Type *array_type = alloc_type_array(t_Internal_Test, m->info->testing_procedures.count);
+ Type *slice_type = alloc_type_slice(t_Internal_Test);
+ lbAddr all_tests_array_addr = lb_add_global_generated(p->module, array_type, {});
+ lbValue all_tests_array = lb_addr_get_ptr(p, all_tests_array_addr);
+
+ LLVMTypeRef lbt_Internal_Test = lb_type(m, t_Internal_Test);
+
+ LLVMValueRef indices[2] = {};
+ indices[0] = LLVMConstInt(lb_type(m, t_i32), 0, false);
+
+ for_array(i, m->info->testing_procedures) {
+ Entity *testing_proc = m->info->testing_procedures[i];
+ String name = testing_proc->token.string;
+
+ String pkg_name = {};
+ if (testing_proc->pkg != nullptr) {
+ pkg_name = testing_proc->pkg->name;
+ }
+ lbValue v_pkg = lb_find_or_add_entity_string(m, pkg_name);
+ lbValue v_name = lb_find_or_add_entity_string(m, name);
+ lbValue v_proc = lb_find_procedure_value_from_entity(m, testing_proc);
+
+ indices[1] = LLVMConstInt(lb_type(m, t_int), i, false);
+
+ LLVMValueRef vals[3] = {};
+ vals[0] = v_pkg.value;
+ vals[1] = v_name.value;
+ vals[2] = v_proc.value;
+ GB_ASSERT(LLVMIsConstant(vals[0]));
+ GB_ASSERT(LLVMIsConstant(vals[1]));
+ GB_ASSERT(LLVMIsConstant(vals[2]));
+
+ LLVMValueRef dst = LLVMConstInBoundsGEP(all_tests_array.value, indices, gb_count_of(indices));
+ LLVMValueRef src = llvm_const_named_struct(lbt_Internal_Test, vals, gb_count_of(vals));
+
+ LLVMBuildStore(p->builder, src, dst);
+ }
+
+ lbAddr all_tests_slice = lb_add_local_generated(p, slice_type, true);
+ lb_fill_slice(p, all_tests_slice,
+ lb_array_elem(p, all_tests_array),
+ lb_const_int(m, t_int, m->info->testing_procedures.count));
+
+
+ lbValue runner = lb_find_package_value(m, str_lit("testing"), str_lit("runner"));
+
+ auto args = array_make<lbValue>(heap_allocator(), 1);
+ args[0] = lb_addr_load(p, all_tests_slice);
+ lb_emit_call(p, runner, args);
+ } else {
+ lbValue entry_point = lb_find_procedure_value_from_entity(m, m->info->entry_point);
+ lb_emit_call(p, entry_point, {});
+ }
+
+ LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_i32), 0, false));
+
+ lb_end_procedure_body(p);
+
+ if (!m->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) {
+ gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %s\n", "main");
+ LLVMDumpValue(p->value);
+ gb_printf_err("\n\n\n\n");
+ LLVMVerifyFunction(p->value, LLVMAbortProcessAction);
+ }
+
+ lb_run_function_pass_manager(default_function_pass_manager, p);
+ return p;
+}
+
+String lb_filepath_ll_for_module(lbModule *m) {
+ String path = m->gen->output_base;
+ if (m->pkg) {
+ path = concatenate3_strings(permanent_allocator(), path, STR_LIT("-"), m->pkg->name);
+ } else if (USE_SEPARTE_MODULES) {
+ path = concatenate_strings(permanent_allocator(), path, STR_LIT("-builtin"));
+ }
+ path = concatenate_strings(permanent_allocator(), path, STR_LIT(".ll"));
+
+ return path;
+}
+String lb_filepath_obj_for_module(lbModule *m) {
+ String path = m->gen->output_base;
+ if (m->pkg) {
+ path = concatenate3_strings(permanent_allocator(), path, STR_LIT("-"), m->pkg->name);
+ }
+
+ String ext = {};
+
+ if (build_context.build_mode == BuildMode_Assembly) {
+ ext = STR_LIT(".S");
+ } else {
+ switch (build_context.metrics.os) {
+ case TargetOs_windows:
+ ext = STR_LIT(".obj");
+ break;
+ case TargetOs_darwin:
+ case TargetOs_linux:
+ case TargetOs_essence:
+ ext = STR_LIT(".o");
+ break;
+ case TargetOs_js:
+ ext = STR_LIT(".wasm-obj");
+ break;
+ }
+ }
+
+ return concatenate_strings(permanent_allocator(), path, ext);
+}
+
+
+bool lb_is_module_empty(lbModule *m) {
+ if (LLVMGetFirstFunction(m->mod) == nullptr &&
+ LLVMGetFirstGlobal(m->mod) == nullptr) {
+ return true;
+ }
+ for (auto fn = LLVMGetFirstFunction(m->mod); fn != nullptr; fn = LLVMGetNextFunction(fn)) {
+ if (LLVMGetFirstBasicBlock(fn) != nullptr) {
+ return false;
+ }
+ }
+
+ for (auto g = LLVMGetFirstGlobal(m->mod); g != nullptr; g = LLVMGetNextGlobal(g)) {
+ if (LLVMGetLinkage(g) == LLVMExternalLinkage) {
+ continue;
+ }
+ if (!LLVMIsExternallyInitialized(g)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+struct lbLLVMEmitWorker {
+ LLVMTargetMachineRef target_machine;
+ LLVMCodeGenFileType code_gen_file_type;
+ String filepath_obj;
+ lbModule *m;
+};
+
+WORKER_TASK_PROC(lb_llvm_emit_worker_proc) {
+ GB_ASSERT(MULTITHREAD_OBJECT_GENERATION);
+
+ char *llvm_error = nullptr;
+
+ auto wd = cast(lbLLVMEmitWorker *)data;
+
+ if (LLVMTargetMachineEmitToFile(wd->target_machine, wd->m->mod, cast(char *)wd->filepath_obj.text, wd->code_gen_file_type, &llvm_error)) {
+ gb_printf_err("LLVM Error: %s\n", llvm_error);
+ gb_exit(1);
+ }
+
+ return 0;
+}
+
+WORKER_TASK_PROC(lb_llvm_function_pass_worker_proc) {
+ GB_ASSERT(MULTITHREAD_OBJECT_GENERATION);
+
+ auto m = cast(lbModule *)data;
+
+ LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(m->mod);
+ LLVMPassManagerRef function_pass_manager_minimal = LLVMCreateFunctionPassManagerForModule(m->mod);
+ LLVMPassManagerRef function_pass_manager_size = LLVMCreateFunctionPassManagerForModule(m->mod);
+ LLVMPassManagerRef function_pass_manager_speed = LLVMCreateFunctionPassManagerForModule(m->mod);
+
+ LLVMInitializeFunctionPassManager(default_function_pass_manager);
+ LLVMInitializeFunctionPassManager(function_pass_manager_minimal);
+ LLVMInitializeFunctionPassManager(function_pass_manager_size);
+ LLVMInitializeFunctionPassManager(function_pass_manager_speed);
+
+ lb_populate_function_pass_manager(m, default_function_pass_manager, false, build_context.optimization_level);
+ lb_populate_function_pass_manager_specific(m, function_pass_manager_minimal, 0);
+ lb_populate_function_pass_manager_specific(m, function_pass_manager_size, 1);
+ lb_populate_function_pass_manager_specific(m, function_pass_manager_speed, 2);
+
+ LLVMFinalizeFunctionPassManager(default_function_pass_manager);
+ LLVMFinalizeFunctionPassManager(function_pass_manager_minimal);
+ LLVMFinalizeFunctionPassManager(function_pass_manager_size);
+ LLVMFinalizeFunctionPassManager(function_pass_manager_speed);
+
+
+ LLVMPassManagerRef default_function_pass_manager_without_memcpy = LLVMCreateFunctionPassManagerForModule(m->mod);
+ LLVMInitializeFunctionPassManager(default_function_pass_manager_without_memcpy);
+ lb_populate_function_pass_manager(m, default_function_pass_manager_without_memcpy, true, build_context.optimization_level);
+ LLVMFinalizeFunctionPassManager(default_function_pass_manager_without_memcpy);
+
+
+ for_array(i, m->procedures_to_generate) {
+ lbProcedure *p = m->procedures_to_generate[i];
+ if (p->body != nullptr) { // Build Procedure
+ if (p->flags & lbProcedureFlag_WithoutMemcpyPass) {
+ lb_run_function_pass_manager(default_function_pass_manager_without_memcpy, p);
+ } else {
+ if (p->entity && p->entity->kind == Entity_Procedure) {
+ switch (p->entity->Procedure.optimization_mode) {
+ case ProcedureOptimizationMode_None:
+ case ProcedureOptimizationMode_Minimal:
+ lb_run_function_pass_manager(function_pass_manager_minimal, p);
+ break;
+ case ProcedureOptimizationMode_Size:
+ lb_run_function_pass_manager(function_pass_manager_size, p);
+ break;
+ case ProcedureOptimizationMode_Speed:
+ lb_run_function_pass_manager(function_pass_manager_speed, p);
+ break;
+ default:
+ lb_run_function_pass_manager(default_function_pass_manager, p);
+ break;
+ }
+ } else {
+ lb_run_function_pass_manager(default_function_pass_manager, p);
+ }
+ }
+ }
+ }
+
+ for_array(i, m->equal_procs.entries) {
+ lbProcedure *p = m->equal_procs.entries[i].value;
+ lb_run_function_pass_manager(default_function_pass_manager, p);
+ }
+ for_array(i, m->hasher_procs.entries) {
+ lbProcedure *p = m->hasher_procs.entries[i].value;
+ lb_run_function_pass_manager(default_function_pass_manager, p);
+ }
+
+ return 0;
+}
+
+
+struct lbLLVMModulePassWorkerData {
+ lbModule *m;
+ LLVMTargetMachineRef target_machine;
+};
+
+WORKER_TASK_PROC(lb_llvm_module_pass_worker_proc) {
+ GB_ASSERT(MULTITHREAD_OBJECT_GENERATION);
+
+ auto wd = cast(lbLLVMModulePassWorkerData *)data;
+
+ LLVMPassManagerRef module_pass_manager = LLVMCreatePassManager();
+ lb_populate_module_pass_manager(wd->target_machine, module_pass_manager, build_context.optimization_level);
+ LLVMRunPassManager(module_pass_manager, wd->m->mod);
+
+ return 0;
+}
+
+
void lb_generate_code(lbGenerator *gen) {
#define TIME_SECTION(str) do { if (build_context.show_more_timings) timings_start_section(&global_timings, str_lit(str)); } while (0)
+ #define TIME_SECTION_WITH_LEN(str, len) do { if (build_context.show_more_timings) timings_start_section(&global_timings, make_string((u8 *)str, len)); } while (0)
TIME_SECTION("LLVM Initializtion");
- lbModule *m = &gen->module;
- LLVMModuleRef mod = gen->module.mod;
+ isize thread_count = gb_max(build_context.thread_count, 1);
+ isize worker_count = thread_count-1;
+
+ LLVMBool do_threading = (LLVMIsMultithreaded() && USE_SEPARTE_MODULES && MULTITHREAD_OBJECT_GENERATION && worker_count > 0);
+
+ thread_pool_init(&lb_thread_pool, heap_allocator(), worker_count, "LLVMBackend");
+ defer (thread_pool_destroy(&lb_thread_pool));
+
+ lbModule *default_module = &gen->default_module;
CheckerInfo *info = gen->info;
auto *min_dep_set = &info->minimum_dependency_set;
-
LLVMInitializeAllTargetInfos();
LLVMInitializeAllTargets();
LLVMInitializeAllTargetMCs();
@@ -13711,16 +14575,19 @@ void lb_generate_code(lbGenerator *gen) {
LLVMInitializeAllDisassemblers();
LLVMInitializeNativeTarget();
-
char const *target_triple = alloc_cstring(permanent_allocator(), build_context.metrics.target_triplet);
char const *target_data_layout = alloc_cstring(permanent_allocator(), build_context.metrics.target_data_layout);
- LLVMSetTarget(mod, target_triple);
+ for_array(i, gen->modules.entries) {
+ LLVMSetTarget(gen->modules.entries[i].value->mod, target_triple);
+ }
LLVMTargetRef target = {};
char *llvm_error = nullptr;
LLVMGetTargetFromTriple(target_triple, &target, &llvm_error);
GB_ASSERT(target != nullptr);
+
+
TIME_SECTION("LLVM Create Target Machine");
LLVMCodeModel code_mode = LLVMCodeModelDefault;
@@ -13754,78 +14621,89 @@ void lb_generate_code(lbGenerator *gen) {
}
// NOTE(bill): Target Machine Creation
- LLVMTargetMachineRef target_machine = LLVMCreateTargetMachine(
- target, target_triple, llvm_cpu,
- llvm_features,
- code_gen_level,
- LLVMRelocDefault,
- code_mode);
- defer (LLVMDisposeTargetMachine(target_machine));
-
-
- LLVMSetModuleDataLayout(mod, LLVMCreateTargetDataLayout(target_machine));
-
- if (m->debug_builder) { // Debug Info
- for_array(i, info->files.entries) {
- AstFile *f = info->files.entries[i].value;
- String fullpath = f->fullpath;
- String filename = remove_directory_from_path(fullpath);
- String directory = directory_from_path(fullpath);
- LLVMMetadataRef res = LLVMDIBuilderCreateFile(m->debug_builder,
- cast(char const *)filename.text, filename.len,
- cast(char const *)directory.text, directory.len);
- lb_set_llvm_metadata(m, f, res);
- }
-
- gbString producer = gb_string_make(heap_allocator(), "odin");
- producer = gb_string_append_fmt(producer, " version %.*s", LIT(ODIN_VERSION));
- #ifdef NIGHTLY
- producer = gb_string_appendc(producer, "-nightly");
- #endif
- #ifdef GIT_SHA
- producer = gb_string_append_fmt(producer, "-%s", GIT_SHA);
- #endif
-
- gbString split_name = gb_string_make(heap_allocator(), "");
-
- LLVMBool is_optimized = build_context.optimization_level > 0;
- AstFile *init_file = m->info->init_package->files[0];
- if (m->info->entry_point && m->info->entry_point->identifier && m->info->entry_point->identifier->file) {
- init_file = m->info->entry_point->identifier->file;
+ // NOTE(bill, 2021-05-04): Target machines must be unique to each module because they are not thread safe
+ auto target_machines = array_make<LLVMTargetMachineRef>(permanent_allocator(), gen->modules.entries.count);
+
+ for_array(i, gen->modules.entries) {
+ target_machines[i] = LLVMCreateTargetMachine(
+ target, target_triple, llvm_cpu,
+ llvm_features,
+ code_gen_level,
+ LLVMRelocDefault,
+ code_mode);
+ LLVMSetModuleDataLayout(gen->modules.entries[i].value->mod, LLVMCreateTargetDataLayout(target_machines[i]));
+ }
+
+ for_array(i, gen->modules.entries) {
+ lbModule *m = gen->modules.entries[i].value;
+ if (m->debug_builder) { // Debug Info
+ for_array(i, info->files.entries) {
+ AstFile *f = info->files.entries[i].value;
+ String fullpath = f->fullpath;
+ String filename = remove_directory_from_path(fullpath);
+ String directory = directory_from_path(fullpath);
+ LLVMMetadataRef res = LLVMDIBuilderCreateFile(m->debug_builder,
+ cast(char const *)filename.text, filename.len,
+ cast(char const *)directory.text, directory.len);
+ lb_set_llvm_metadata(m, f, res);
+ }
+
+ gbString producer = gb_string_make(heap_allocator(), "odin");
+ // producer = gb_string_append_fmt(producer, " version %.*s", LIT(ODIN_VERSION));
+ // #ifdef NIGHTLY
+ // producer = gb_string_appendc(producer, "-nightly");
+ // #endif
+ // #ifdef GIT_SHA
+ // producer = gb_string_append_fmt(producer, "-%s", GIT_SHA);
+ // #endif
+
+ gbString split_name = gb_string_make(heap_allocator(), "");
+
+ LLVMBool is_optimized = build_context.optimization_level > 0;
+ AstFile *init_file = m->info->init_package->files[0];
+ if (m->info->entry_point && m->info->entry_point->identifier && m->info->entry_point->identifier->file) {
+ init_file = m->info->entry_point->identifier->file;
+ }
+
+ LLVMBool split_debug_inlining = false;
+ LLVMBool debug_info_for_profiling = false;
+
+ m->debug_compile_unit = LLVMDIBuilderCreateCompileUnit(m->debug_builder, LLVMDWARFSourceLanguageC99,
+ lb_get_llvm_metadata(m, init_file),
+ producer, gb_string_length(producer),
+ is_optimized, "", 0,
+ 1, split_name, gb_string_length(split_name),
+ LLVMDWARFEmissionFull,
+ 0, split_debug_inlining,
+ debug_info_for_profiling,
+ "", 0, // sys_root
+ "", 0 // SDK
+ );
+ GB_ASSERT(m->debug_compile_unit != nullptr);
}
-
- LLVMBool split_debug_inlining = false;
- LLVMBool debug_info_for_profiling = false;
-
- m->debug_compile_unit = LLVMDIBuilderCreateCompileUnit(m->debug_builder, LLVMDWARFSourceLanguageC99,
- lb_get_llvm_metadata(m, init_file),
- producer, gb_string_length(producer),
- is_optimized, "", 0,
- 1, split_name, gb_string_length(split_name),
- LLVMDWARFEmissionFull,
- 0, split_debug_inlining,
- debug_info_for_profiling,
- "", 0, // sys_root
- "", 0 // SDK
- );
- GB_ASSERT(m->debug_compile_unit != nullptr);
}
TIME_SECTION("LLVM Global Variables");
{
+ lbModule *m = default_module;
+
{ // Add type info data
isize max_type_info_count = info->minimum_dependency_type_info_set.entries.count+1;
// gb_printf_err("max_type_info_count: %td\n", max_type_info_count);
Type *t = alloc_type_array(t_type_info, max_type_info_count);
- LLVMValueRef g = LLVMAddGlobal(mod, lb_type(m, t), LB_TYPE_INFO_DATA_NAME);
+ LLVMValueRef g = LLVMAddGlobal(m->mod, lb_type(m, t), LB_TYPE_INFO_DATA_NAME);
LLVMSetInitializer(g, LLVMConstNull(lb_type(m, t)));
- LLVMSetLinkage(g, LLVMInternalLinkage);
+ if (!USE_SEPARTE_MODULES) {
+ LLVMSetLinkage(g, LLVMInternalLinkage);
+ }
lbValue value = {};
value.value = g;
value.type = alloc_type_pointer(t);
- lb_global_type_info_data = lb_addr(value);
+
+ lb_global_type_info_data_entity = alloc_entity_variable(nullptr, make_token_ident(LB_TYPE_INFO_DATA_NAME), t, EntityState_Resolved);
+ lb_add_entity(m, lb_global_type_info_data_entity, value);
}
{ // Type info member buffer
// NOTE(bill): Removes need for heap allocation by making it global memory
@@ -13856,7 +14734,7 @@ void lb_generate_code(lbGenerator *gen) {
{
char const *name = LB_TYPE_INFO_TYPES_NAME;
Type *t = alloc_type_array(t_type_info_ptr, count);
- LLVMValueRef g = LLVMAddGlobal(mod, lb_type(m, t), name);
+ LLVMValueRef g = LLVMAddGlobal(m->mod, lb_type(m, t), name);
LLVMSetInitializer(g, LLVMConstNull(lb_type(m, t)));
LLVMSetLinkage(g, LLVMInternalLinkage);
lb_global_type_info_member_types = lb_addr({g, alloc_type_pointer(t)});
@@ -13865,7 +14743,7 @@ void lb_generate_code(lbGenerator *gen) {
{
char const *name = LB_TYPE_INFO_NAMES_NAME;
Type *t = alloc_type_array(t_string, count);
- LLVMValueRef g = LLVMAddGlobal(mod, lb_type(m, t), name);
+ LLVMValueRef g = LLVMAddGlobal(m->mod, lb_type(m, t), name);
LLVMSetInitializer(g, LLVMConstNull(lb_type(m, t)));
LLVMSetLinkage(g, LLVMInternalLinkage);
lb_global_type_info_member_names = lb_addr({g, alloc_type_pointer(t)});
@@ -13873,7 +14751,7 @@ void lb_generate_code(lbGenerator *gen) {
{
char const *name = LB_TYPE_INFO_OFFSETS_NAME;
Type *t = alloc_type_array(t_uintptr, count);
- LLVMValueRef g = LLVMAddGlobal(mod, lb_type(m, t), name);
+ LLVMValueRef g = LLVMAddGlobal(m->mod, lb_type(m, t), name);
LLVMSetInitializer(g, LLVMConstNull(lb_type(m, t)));
LLVMSetLinkage(g, LLVMInternalLinkage);
lb_global_type_info_member_offsets = lb_addr({g, alloc_type_pointer(t)});
@@ -13882,7 +14760,7 @@ void lb_generate_code(lbGenerator *gen) {
{
char const *name = LB_TYPE_INFO_USINGS_NAME;
Type *t = alloc_type_array(t_bool, count);
- LLVMValueRef g = LLVMAddGlobal(mod, lb_type(m, t), name);
+ LLVMValueRef g = LLVMAddGlobal(m->mod, lb_type(m, t), name);
LLVMSetInitializer(g, LLVMConstNull(lb_type(m, t)));
LLVMSetLinkage(g, LLVMInternalLinkage);
lb_global_type_info_member_usings = lb_addr({g, alloc_type_pointer(t)});
@@ -13891,7 +14769,7 @@ void lb_generate_code(lbGenerator *gen) {
{
char const *name = LB_TYPE_INFO_TAGS_NAME;
Type *t = alloc_type_array(t_string, count);
- LLVMValueRef g = LLVMAddGlobal(mod, lb_type(m, t), name);
+ LLVMValueRef g = LLVMAddGlobal(m->mod, lb_type(m, t), name);
LLVMSetInitializer(g, LLVMConstNull(lb_type(m, t)));
LLVMSetLinkage(g, LLVMInternalLinkage);
lb_global_type_info_member_tags = lb_addr({g, alloc_type_pointer(t)});
@@ -13931,13 +14809,8 @@ void lb_generate_code(lbGenerator *gen) {
}
}
- struct GlobalVariable {
- lbValue var;
- lbValue init;
- DeclInfo *decl;
- bool is_initialized;
- };
- auto global_variables = array_make<GlobalVariable>(permanent_allocator(), 0, global_variable_max_count);
+
+ auto global_variables = array_make<lbGlobalVariable>(permanent_allocator(), 0, global_variable_max_count);
for_array(i, info->variable_init_order) {
DeclInfo *d = info->variable_init_order[i];
@@ -13960,8 +14833,9 @@ void lb_generate_code(lbGenerator *gen) {
bool is_foreign = e->Variable.is_foreign;
bool is_export = e->Variable.is_export;
- String name = lb_get_entity_name(m, e);
+ lbModule *m = &gen->default_module;
+ String name = lb_get_entity_name(m, e);
lbValue g = {};
g.value = LLVMAddGlobal(m->mod, lb_type(m, e->type), alloc_cstring(permanent_allocator(), name));
@@ -13985,6 +14859,7 @@ void lb_generate_code(lbGenerator *gen) {
LLVMSetThreadLocalMode(g.value, mode);
}
if (is_foreign) {
+ LLVMSetLinkage(g.value, LLVMExternalLinkage);
LLVMSetExternallyInitialized(g.value, true);
lb_add_foreign_library_path(m, e->Variable.foreign_library);
} else {
@@ -13993,13 +14868,15 @@ void lb_generate_code(lbGenerator *gen) {
if (is_export) {
LLVMSetLinkage(g.value, LLVMDLLExportLinkage);
LLVMSetDLLStorageClass(g.value, LLVMDLLExportStorageClass);
+ } else {
+ if (USE_SEPARTE_MODULES) {
+ LLVMSetLinkage(g.value, LLVMExternalLinkage);
+ } else {
+ LLVMSetLinkage(g.value, LLVMInternalLinkage);
+ }
}
- if (e->flags & EntityFlag_Static) {
- LLVMSetLinkage(g.value, LLVMInternalLinkage);
- }
-
- GlobalVariable var = {};
+ lbGlobalVariable var = {};
var.var = g;
var.decl = decl;
@@ -14026,13 +14903,14 @@ void lb_generate_code(lbGenerator *gen) {
lb_add_entity(m, e, g);
lb_add_member(m, name, g);
+
if (m->debug_builder) {
String global_name = e->token.string;
if (global_name.len != 0 && global_name != "_") {
LLVMMetadataRef llvm_file = lb_get_llvm_metadata(m, e->file);
LLVMMetadataRef llvm_scope = llvm_file;
- LLVMBool local_to_unit = e->flags & EntityFlag_Static;
+ LLVMBool local_to_unit = LLVMGetLinkage(g.value) == LLVMInternalLinkage;
LLVMMetadataRef llvm_expr = LLVMDIBuilderCreateExpression(m->debug_builder, nullptr, 0);
LLVMMetadataRef llvm_decl = nullptr;
@@ -14096,6 +14974,10 @@ void lb_generate_code(lbGenerator *gen) {
continue;
}
+ lbModule *m = &gen->default_module;
+ if (USE_SEPARTE_MODULES) {
+ m = lb_pkg_module(gen, e->pkg);
+ }
String mangled_name = lb_get_entity_name(m, e);
@@ -14113,450 +14995,209 @@ void lb_generate_code(lbGenerator *gen) {
}
- TIME_SECTION("LLVM Registry Initializtion");
-
- LLVMPassRegistryRef pass_registry = LLVMGetGlobalPassRegistry();
-
- LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(mod);
- LLVMPassManagerRef function_pass_manager_minimal = LLVMCreateFunctionPassManagerForModule(mod);
- LLVMPassManagerRef function_pass_manager_size = LLVMCreateFunctionPassManagerForModule(mod);
- LLVMPassManagerRef function_pass_manager_speed = LLVMCreateFunctionPassManagerForModule(mod);
- defer (LLVMDisposePassManager(default_function_pass_manager));
- defer (LLVMDisposePassManager(function_pass_manager_minimal));
- defer (LLVMDisposePassManager(function_pass_manager_size));
- defer (LLVMDisposePassManager(function_pass_manager_speed));
-
- LLVMInitializeFunctionPassManager(default_function_pass_manager);
- LLVMInitializeFunctionPassManager(function_pass_manager_minimal);
- LLVMInitializeFunctionPassManager(function_pass_manager_size);
- LLVMInitializeFunctionPassManager(function_pass_manager_speed);
-
- lb_populate_function_pass_manager(default_function_pass_manager, false, build_context.optimization_level);
- lb_populate_function_pass_manager_specific(function_pass_manager_minimal, 0);
- lb_populate_function_pass_manager_specific(function_pass_manager_size, 1);
- lb_populate_function_pass_manager_specific(function_pass_manager_speed, 2);
-
- LLVMFinalizeFunctionPassManager(default_function_pass_manager);
- LLVMFinalizeFunctionPassManager(function_pass_manager_minimal);
- LLVMFinalizeFunctionPassManager(function_pass_manager_size);
- LLVMFinalizeFunctionPassManager(function_pass_manager_speed);
-
-
- LLVMPassManagerRef default_function_pass_manager_without_memcpy = LLVMCreateFunctionPassManagerForModule(mod);
- defer (LLVMDisposePassManager(default_function_pass_manager_without_memcpy));
- LLVMInitializeFunctionPassManager(default_function_pass_manager_without_memcpy);
- lb_populate_function_pass_manager(default_function_pass_manager_without_memcpy, true, build_context.optimization_level);
- LLVMFinalizeFunctionPassManager(default_function_pass_manager_without_memcpy);
-
TIME_SECTION("LLVM Runtime Type Information Creation");
+ lbProcedure *startup_type_info = lb_create_startup_type_info(default_module);
- lbProcedure *startup_type_info = nullptr;
- lbProcedure *startup_runtime = nullptr;
- { // Startup Type Info
- Type *params = alloc_type_tuple();
- Type *results = alloc_type_tuple();
-
- Type *proc_type = alloc_type_proc(nullptr, nullptr, 0, nullptr, 0, false, ProcCC_CDecl);
-
- lbProcedure *p = lb_create_dummy_procedure(m, str_lit(LB_STARTUP_TYPE_INFO_PROC_NAME), proc_type);
- p->is_startup = true;
- startup_type_info = p;
-
- lb_begin_procedure_body(p);
-
- lb_setup_type_info_data(p);
-
- lb_end_procedure_body(p);
-
- if (!m->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) {
- gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %s\n", "main");
- LLVMDumpValue(p->value);
- gb_printf_err("\n\n\n\n");
- LLVMVerifyFunction(p->value, LLVMAbortProcessAction);
- }
-
- LLVMRunFunctionPassManager(default_function_pass_manager, p->value);
- }
TIME_SECTION("LLVM Runtime Startup Creation (Global Variables)");
- { // Startup Runtime
- Type *params = alloc_type_tuple();
- Type *results = alloc_type_tuple();
-
- Type *proc_type = alloc_type_proc(nullptr, nullptr, 0, nullptr, 0, false, ProcCC_CDecl);
-
- lbProcedure *p = lb_create_dummy_procedure(m, str_lit(LB_STARTUP_RUNTIME_PROC_NAME), proc_type);
- p->is_startup = true;
- startup_runtime = p;
+ lbProcedure *startup_runtime = lb_create_startup_runtime(default_module, startup_type_info, global_variables);
- lb_begin_procedure_body(p);
-
- LLVMBuildCall2(p->builder, LLVMGetElementType(lb_type(m, startup_type_info->type)), startup_type_info->value, nullptr, 0, "");
-
- for_array(i, global_variables) {
- auto *var = &global_variables[i];
- if (var->is_initialized) {
+ TIME_SECTION("LLVM Procedure Generation");
+ for_array(j, gen->modules.entries) {
+ lbModule *m = gen->modules.entries[j].value;
+ for_array(i, m->procedures_to_generate) {
+ lbProcedure *p = m->procedures_to_generate[i];
+ if (p->is_done) {
continue;
}
-
- Entity *e = var->decl->entity;
- GB_ASSERT(e->kind == Entity_Variable);
-
- if (var->decl->init_expr != nullptr) {
- // gb_printf_err("%s\n", expr_to_string(var->decl->init_expr));
- lbValue init = lb_build_expr(p, var->decl->init_expr);
- LLVMValueKind value_kind = LLVMGetValueKind(init.value);
- // gb_printf_err("%s %d\n", LLVMPrintValueToString(init.value));
-
- if (lb_is_const_or_global(init)) {
- if (!var->is_initialized) {
- LLVMSetInitializer(var->var.value, init.value);
- var->is_initialized = true;
- continue;
- }
- } else {
- var->init = init;
- }
- }
-
- if (var->init.value != nullptr) {
- GB_ASSERT(!var->is_initialized);
- Type *t = type_deref(var->var.type);
-
- if (is_type_any(t)) {
- // NOTE(bill): Edge case for 'any' type
- Type *var_type = default_type(var->init.type);
- lbAddr g = lb_add_global_generated(m, var_type, var->init);
- lb_addr_store(p, g, var->init);
- lbValue gp = lb_addr_get_ptr(p, g);
-
- lbValue data = lb_emit_struct_ep(p, var->var, 0);
- lbValue ti = lb_emit_struct_ep(p, var->var, 1);
- lb_emit_store(p, data, lb_emit_conv(p, gp, t_rawptr));
- lb_emit_store(p, ti, lb_type_info(m, var_type));
- } else {
- LLVMTypeRef pvt = LLVMTypeOf(var->var.value);
- LLVMTypeRef vt = LLVMGetElementType(pvt);
- lbValue src0 = lb_emit_conv(p, var->init, t);
- LLVMValueRef src = OdinLLVMBuildTransmute(p, src0.value, vt);
- LLVMValueRef dst = var->var.value;
- LLVMBuildStore(p->builder, src, dst);
- }
-
- var->is_initialized = true;
+ if (p->body != nullptr) { // Build Procedure
+ m->curr_procedure = p;
+ lb_begin_procedure_body(p);
+ lb_build_stmt(p, p->body);
+ lb_end_procedure_body(p);
+ p->is_done = true;
+ m->curr_procedure = nullptr;
+ }
+ lb_end_procedure(p);
+
+ // Add Flags
+ if (p->body != nullptr) {
+ if (p->name == "memcpy" || p->name == "memmove" ||
+ p->name == "runtime.mem_copy" || p->name == "mem_copy_non_overlapping" ||
+ string_starts_with(p->name, str_lit("llvm.memcpy")) ||
+ string_starts_with(p->name, str_lit("llvm.memmove"))) {
+ p->flags |= lbProcedureFlag_WithoutMemcpyPass;
+ }
+ }
+
+ if (!m->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) {
+ gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %.*s\n", LIT(p->name));
+ LLVMDumpValue(p->value);
+ gb_printf_err("\n\n\n\n");
+ String filepath_ll = lb_filepath_ll_for_module(m);
+ if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, &llvm_error)) {
+ gb_printf_err("LLVM Error: %s\n", llvm_error);
+ }
+ LLVMVerifyFunction(p->value, LLVMPrintMessageAction);
+ gb_exit(1);
}
}
-
-
- lb_end_procedure_body(p);
-
- if (!m->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) {
- gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %s\n", "main");
- LLVMDumpValue(p->value);
- gb_printf_err("\n\n\n\n");
- LLVMVerifyFunction(p->value, LLVMAbortProcessAction);
- }
-
- LLVMRunFunctionPassManager(default_function_pass_manager, p->value);
-
- /*{
- LLVMValueRef last_instr = LLVMGetLastInstruction(p->decl_block->block);
- for (LLVMValueRef instr = LLVMGetFirstInstruction(p->decl_block->block);
- instr != last_instr;
- instr = LLVMGetNextInstruction(instr)) {
- if (LLVMIsAAllocaInst(instr)) {
- LLVMTypeRef type = LLVMGetAllocatedType(instr);
- LLVMValueRef sz_val = LLVMSizeOf(type);
- GB_ASSERT(LLVMIsConstant(sz_val));
- gb_printf_err(">> 0x%p\n", sz_val);
- LLVMTypeRef sz_type = LLVMTypeOf(sz_val);
- gb_printf_err(">> %s\n", LLVMPrintTypeToString(sz_type));
- unsigned long long sz = LLVMConstIntGetZExtValue(sz_val);
- // long long sz = LLVMConstIntGetSExtValue(sz_val);
- gb_printf_err(">> %ll\n", sz);
- }
- }
- }*/
}
- String filepath_ll = concatenate_strings(permanent_allocator(), gen->output_base, STR_LIT(".ll"));
- TIME_SECTION("LLVM Procedure Generation");
- for_array(i, m->procedures_to_generate) {
- lbProcedure *p = m->procedures_to_generate[i];
- if (p->is_done) {
- continue;
- }
- if (p->body != nullptr) { // Build Procedure
- m->curr_procedure = p;
- lb_begin_procedure_body(p);
- lb_build_stmt(p, p->body);
- lb_end_procedure_body(p);
- p->is_done = true;
- m->curr_procedure = nullptr;
- }
- lb_end_procedure(p);
-
- // Add Flags
- if (p->body != nullptr) {
- if (p->name == "memcpy" || p->name == "memmove" ||
- p->name == "runtime.mem_copy" || p->name == "mem_copy_non_overlapping" ||
- string_starts_with(p->name, str_lit("llvm.memcpy")) ||
- string_starts_with(p->name, str_lit("llvm.memmove"))) {
- p->flags |= lbProcedureFlag_WithoutMemcpyPass;
- }
- }
+ if (!(build_context.build_mode == BuildMode_DynamicLibrary && !has_dll_main)) {
+ TIME_SECTION("LLVM main");
+ lb_create_main_procedure(default_module, startup_runtime);
+ }
- if (!m->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) {
- gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %.*s\n", LIT(p->name));
- LLVMDumpValue(p->value);
- gb_printf_err("\n\n\n\n");
- if (LLVMPrintModuleToFile(mod, cast(char const *)filepath_ll.text, &llvm_error)) {
- gb_printf_err("LLVM Error: %s\n", llvm_error);
+ if (build_context.ODIN_DEBUG) {
+ TIME_SECTION("LLVM Debug Info Complete Types and Finalize");
+ for_array(j, gen->modules.entries) {
+ lbModule *m = gen->modules.entries[j].value;
+ if (m->debug_builder != nullptr) {
+ lb_debug_complete_types(m);
+ LLVMDIBuilderFinalize(m->debug_builder);
}
- LLVMVerifyFunction(p->value, LLVMPrintMessageAction);
- gb_exit(1);
}
}
- if (!(build_context.build_mode == BuildMode_DynamicLibrary && !has_dll_main)) {
- TIME_SECTION("LLVM DLL main");
-
-
- Type *params = alloc_type_tuple();
- Type *results = alloc_type_tuple();
-
- String name = str_lit("main");
- if (build_context.metrics.os == TargetOs_windows && build_context.metrics.arch == TargetArch_386) {
- name = str_lit("mainCRTStartup");
- } else {
- array_init(&params->Tuple.variables, permanent_allocator(), 2);
- params->Tuple.variables[0] = alloc_entity_param(nullptr, make_token_ident("argc"), t_i32, false, true);
- params->Tuple.variables[1] = alloc_entity_param(nullptr, make_token_ident("argv"), alloc_type_pointer(t_cstring), false, true);
- }
-
- array_init(&results->Tuple.variables, permanent_allocator(), 1);
- results->Tuple.variables[0] = alloc_entity_param(nullptr, make_token_ident("_"), t_i32, false, true);
-
- Type *proc_type = alloc_type_proc(nullptr,
- params, params->Tuple.variables.count,
- results, results->Tuple.variables.count, false, ProcCC_CDecl);
+ TIME_SECTION("LLVM Function Pass");
+ for_array(i, gen->modules.entries) {
+ lbModule *m = gen->modules.entries[i].value;
+ lb_llvm_function_pass_worker_proc(m);
+ }
- lbProcedure *p = lb_create_dummy_procedure(m, name, proc_type);
- p->is_startup = true;
+ TIME_SECTION("LLVM Module Pass");
- lb_begin_procedure_body(p);
+ for_array(i, gen->modules.entries) {
+ lbModule *m = gen->modules.entries[i].value;
- LLVMBuildCall2(p->builder, LLVMGetElementType(lb_type(m, startup_runtime->type)), startup_runtime->value, nullptr, 0, "");
+ auto wd = gb_alloc_item(permanent_allocator(), lbLLVMModulePassWorkerData);
+ wd->m = m;
+ wd->target_machine = target_machines[i];
- if (build_context.command_kind == Command_test) {
- Type *t_Internal_Test = find_type_in_pkg(m->info, str_lit("testing"), str_lit("Internal_Test"));
- Type *array_type = alloc_type_array(t_Internal_Test, m->info->testing_procedures.count);
- Type *slice_type = alloc_type_slice(t_Internal_Test);
- lbAddr all_tests_array_addr = lb_add_global_generated(p->module, array_type, {});
- lbValue all_tests_array = lb_addr_get_ptr(p, all_tests_array_addr);
+ lb_llvm_module_pass_worker_proc(wd);
+ }
- LLVMTypeRef lbt_Internal_Test = lb_type(m, t_Internal_Test);
- LLVMValueRef indices[2] = {};
- indices[0] = LLVMConstInt(lb_type(m, t_i32), 0, false);
+ llvm_error = nullptr;
+ defer (LLVMDisposeMessage(llvm_error));
- for_array(i, m->info->testing_procedures) {
- Entity *testing_proc = m->info->testing_procedures[i];
- String name = testing_proc->token.string;
- lbValue *found = map_get(&m->values, hash_entity(testing_proc));
- GB_ASSERT(found != nullptr);
+ LLVMCodeGenFileType code_gen_file_type = LLVMObjectFile;
+ if (build_context.build_mode == BuildMode_Assembly) {
+ code_gen_file_type = LLVMAssemblyFile;
+ }
- String pkg_name = {};
- if (testing_proc->pkg != nullptr) {
- pkg_name = testing_proc->pkg->name;
+ for_array(j, gen->modules.entries) {
+ lbModule *m = gen->modules.entries[j].value;
+ if (LLVMVerifyModule(m->mod, LLVMReturnStatusAction, &llvm_error)) {
+ gb_printf_err("LLVM Error:\n%s\n", llvm_error);
+ if (build_context.keep_temp_files) {
+ TIME_SECTION("LLVM Print Module to File");
+ String filepath_ll = lb_filepath_ll_for_module(m);
+ if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, &llvm_error)) {
+ gb_printf_err("LLVM Error: %s\n", llvm_error);
+ gb_exit(1);
+ return;
}
- lbValue v_pkg = lb_find_or_add_entity_string(m, pkg_name);
- lbValue v_name = lb_find_or_add_entity_string(m, name);
- lbValue v_proc = *found;
-
- indices[1] = LLVMConstInt(lb_type(m, t_int), i, false);
-
- LLVMValueRef vals[3] = {};
- vals[0] = v_pkg.value;
- vals[1] = v_name.value;
- vals[2] = v_proc.value;
- GB_ASSERT(LLVMIsConstant(vals[0]));
- GB_ASSERT(LLVMIsConstant(vals[1]));
- GB_ASSERT(LLVMIsConstant(vals[2]));
-
- LLVMValueRef dst = LLVMConstInBoundsGEP(all_tests_array.value, indices, gb_count_of(indices));
- LLVMValueRef src = llvm_const_named_struct(lbt_Internal_Test, vals, gb_count_of(vals));
-
- LLVMBuildStore(p->builder, src, dst);
}
+ gb_exit(1);
+ return;
+ }
+ }
+ llvm_error = nullptr;
+ if (build_context.keep_temp_files ||
+ build_context.build_mode == BuildMode_LLVM_IR) {
+ TIME_SECTION("LLVM Print Module to File");
- lbAddr all_tests_slice = lb_add_local_generated(p, slice_type, true);
- lb_fill_slice(p, all_tests_slice,
- lb_array_elem(p, all_tests_array),
- lb_const_int(m, t_int, m->info->testing_procedures.count));
+ for_array(j, gen->modules.entries) {
+ lbModule *m = gen->modules.entries[j].value;
+ if (lb_is_module_empty(m)) {
+ continue;
+ }
- lbValue runner = lb_find_package_value(m, str_lit("testing"), str_lit("runner"));
+ String filepath_ll = lb_filepath_ll_for_module(m);
+ if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, &llvm_error)) {
+ gb_printf_err("LLVM Error: %s\n", llvm_error);
+ gb_exit(1);
+ return;
+ }
+ array_add(&gen->output_temp_paths, filepath_ll);
- auto args = array_make<lbValue>(heap_allocator(), 1);
- args[0] = lb_addr_load(p, all_tests_slice);
- lb_emit_call(p, runner, args);
- } else {
- lbValue *found = map_get(&m->values, hash_entity(entry_point));
- GB_ASSERT(found != nullptr);
- lb_emit_call(p, *found, {});
}
-
- LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_i32), 0, false));
-
- lb_end_procedure_body(p);
-
- if (!m->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) {
- gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %s\n", "main");
- LLVMDumpValue(p->value);
- gb_printf_err("\n\n\n\n");
- LLVMVerifyFunction(p->value, LLVMAbortProcessAction);
+ if (build_context.build_mode == BuildMode_LLVM_IR) {
+ gb_exit(0);
+ return;
}
-
- LLVMRunFunctionPassManager(default_function_pass_manager, p->value);
}
- if (m->debug_builder != nullptr) {
- TIME_SECTION("LLVM Debug Info Complete Types");
- lb_debug_complete_types(m);
+ TIME_SECTION("LLVM Add Foreign Library Paths");
+ for_array(j, gen->modules.entries) {
+ lbModule *m = gen->modules.entries[j].value;
+ for_array(i, m->info->required_foreign_imports_through_force) {
+ Entity *e = m->info->required_foreign_imports_through_force[i];
+ lb_add_foreign_library_path(m, e);
+ }
- TIME_SECTION("LLVM Print Module to File");
- if (LLVMPrintModuleToFile(mod, cast(char const *)filepath_ll.text, &llvm_error)) {
- gb_printf_err("LLVM Error: %s\n", llvm_error);
- gb_exit(1);
- return;
+ if (lb_is_module_empty(m)) {
+ continue;
}
- TIME_SECTION("LLVM Debug Info Builder Finalize");
- LLVMDIBuilderFinalize(m->debug_builder);
}
+ TIME_SECTION("LLVM Object Generation");
- TIME_SECTION("LLVM Function Pass");
- {
- for_array(i, m->procedures_to_generate) {
- lbProcedure *p = m->procedures_to_generate[i];
- if (p->body != nullptr) { // Build Procedure
- if (p->flags & lbProcedureFlag_WithoutMemcpyPass) {
- LLVMRunFunctionPassManager(default_function_pass_manager_without_memcpy, p->value);
- } else {
- if (p->entity && p->entity->kind == Entity_Procedure) {
- switch (p->entity->Procedure.optimization_mode) {
- case ProcedureOptimizationMode_None:
- case ProcedureOptimizationMode_Minimal:
- LLVMRunFunctionPassManager(function_pass_manager_minimal, p->value);
- break;
- case ProcedureOptimizationMode_Size:
- LLVMRunFunctionPassManager(function_pass_manager_size, p->value);
- break;
- case ProcedureOptimizationMode_Speed:
- LLVMRunFunctionPassManager(function_pass_manager_speed, p->value);
- break;
- default:
- LLVMRunFunctionPassManager(default_function_pass_manager, p->value);
- break;
- }
- } else {
- LLVMRunFunctionPassManager(default_function_pass_manager, p->value);
- }
- }
+ if (do_threading) {
+ for_array(j, gen->modules.entries) {
+ lbModule *m = gen->modules.entries[j].value;
+ if (lb_is_module_empty(m)) {
+ continue;
}
- }
- for_array(i, m->equal_procs.entries) {
- lbProcedure *p = m->equal_procs.entries[i].value;
- LLVMRunFunctionPassManager(default_function_pass_manager, p->value);
- }
- for_array(i, m->hasher_procs.entries) {
- lbProcedure *p = m->hasher_procs.entries[i].value;
- LLVMRunFunctionPassManager(default_function_pass_manager, p->value);
- }
- }
+ String filepath_ll = lb_filepath_ll_for_module(m);
+ String filepath_obj = lb_filepath_obj_for_module(m);
+ array_add(&gen->output_object_paths, filepath_obj);
+ array_add(&gen->output_temp_paths, filepath_ll);
- TIME_SECTION("LLVM Module Pass");
+ auto *wd = gb_alloc_item(heap_allocator(), lbLLVMEmitWorker);
+ wd->target_machine = target_machines[j];
+ wd->code_gen_file_type = code_gen_file_type;
+ wd->filepath_obj = filepath_obj;
+ wd->m = m;
+ thread_pool_add_task(&lb_thread_pool, lb_llvm_emit_worker_proc, wd);
+ }
- LLVMPassManagerRef module_pass_manager = LLVMCreatePassManager();
- defer (LLVMDisposePassManager(module_pass_manager));
- lb_populate_module_pass_manager(target_machine, module_pass_manager, build_context.optimization_level);
+ thread_pool_start(&lb_thread_pool);
+ thread_pool_wait_to_process(&lb_thread_pool);
+ } else {
+ for_array(j, gen->modules.entries) {
+ lbModule *m = gen->modules.entries[j].value;
+ if (lb_is_module_empty(m)) {
+ continue;
+ }
- LLVMRunPassManager(module_pass_manager, mod);
+ String filepath_obj = lb_filepath_obj_for_module(m);
+ array_add(&gen->output_object_paths, filepath_obj);
- llvm_error = nullptr;
- defer (LLVMDisposeMessage(llvm_error));
+ String short_name = remove_directory_from_path(filepath_obj);
+ gbString section_name = gb_string_make(heap_allocator(), "LLVM Generate Object: ");
+ section_name = gb_string_append_length(section_name, short_name.text, short_name.len);
- String filepath_obj = {};
- LLVMCodeGenFileType code_gen_file_type = LLVMObjectFile;
+ TIME_SECTION_WITH_LEN(section_name, gb_string_length(section_name));
- if (build_context.build_mode == BuildMode_Assembly) {
- filepath_obj = concatenate_strings(permanent_allocator(), gen->output_base, STR_LIT(".S"));
- code_gen_file_type = LLVMAssemblyFile;
- } else {
- switch (build_context.metrics.os) {
- case TargetOs_windows:
- filepath_obj = concatenate_strings(permanent_allocator(), gen->output_base, STR_LIT(".obj"));
- break;
- case TargetOs_darwin:
- case TargetOs_linux:
- case TargetOs_essence:
- filepath_obj = concatenate_strings(permanent_allocator(), gen->output_base, STR_LIT(".o"));
- break;
- case TargetOs_js:
- filepath_obj = concatenate_strings(permanent_allocator(), gen->output_base, STR_LIT(".wasm-obj"));
- break;
- }
- }
-
- if (LLVMVerifyModule(mod, LLVMReturnStatusAction, &llvm_error)) {
- gb_printf_err("LLVM Error:\n%s\n", llvm_error);
- if (build_context.keep_temp_files) {
- TIME_SECTION("LLVM Print Module to File");
- if (LLVMPrintModuleToFile(mod, cast(char const *)filepath_ll.text, &llvm_error)) {
+ if (LLVMTargetMachineEmitToFile(target_machines[j], m->mod, cast(char *)filepath_obj.text, code_gen_file_type, &llvm_error)) {
gb_printf_err("LLVM Error: %s\n", llvm_error);
gb_exit(1);
return;
}
}
- gb_exit(1);
- return;
- }
- llvm_error = nullptr;
- if (build_context.keep_temp_files ||
- build_context.build_mode == BuildMode_LLVM_IR) {
- TIME_SECTION("LLVM Print Module to File");
- if (LLVMPrintModuleToFile(mod, cast(char const *)filepath_ll.text, &llvm_error)) {
- gb_printf_err("LLVM Error: %s\n", llvm_error);
- gb_exit(1);
- return;
- }
- if (build_context.build_mode == BuildMode_LLVM_IR) {
- gb_exit(0);
- return;
- }
}
- TIME_SECTION("LLVM Object Generation");
- if (LLVMTargetMachineEmitToFile(target_machine, mod, cast(char *)filepath_obj.text, code_gen_file_type, &llvm_error)) {
- gb_printf_err("LLVM Error: %s\n", llvm_error);
- gb_exit(1);
- return;
- }
-
- array_add(&gen->output_object_paths, filepath_obj);
-
- for_array(i, m->info->required_foreign_imports_through_force) {
- Entity *e = m->info->required_foreign_imports_through_force[i];
- lb_add_foreign_library_path(m, e);
- }
#undef TIME_SECTION
}
diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp
index e8811a91e..b35c042ee 100644
--- a/src/llvm_backend.hpp
+++ b/src/llvm_backend.hpp
@@ -85,11 +85,10 @@ struct lbModule {
LLVMModuleRef mod;
LLVMContextRef ctx;
- u64 state_flags;
+ struct lbGenerator *gen;
CheckerInfo *info;
-
- gbMutex mutex;
+ AstPackage *pkg; // associated
Map<LLVMTypeRef> types; // Key: Type *
Map<Type *> llvm_types; // Key: LLVMTypeRef
@@ -109,8 +108,6 @@ struct lbModule {
Map<lbProcedure *> equal_procs; // Key: Type *
Map<lbProcedure *> hasher_procs; // Key: Type *
- u32 global_array_index;
- u32 global_generated_index;
u32 nested_type_name_guid;
Array<lbProcedure *> procedures_to_generate;
@@ -126,12 +123,22 @@ struct lbModule {
};
struct lbGenerator {
- lbModule module;
CheckerInfo *info;
+ gbMutex mutex;
+
Array<String> output_object_paths;
+ Array<String> output_temp_paths;
String output_base;
String output_name;
+ Map<lbModule *> modules; // Key: AstPackage *
+ Map<lbModule *> modules_through_ctx; // Key: LLVMContextRef *
+ lbModule default_module;
+
+ Map<lbProcedure *> anonymous_proc_lits; // Key: Ast *
+
+ gbAtomic32 global_array_index;
+ gbAtomic32 global_generated_index;
};
@@ -210,6 +217,7 @@ enum lbProcedureFlag : u32 {
struct lbProcedure {
u32 flags;
+ u16 state_flags;
lbProcedure *parent;
Array<lbProcedure *> children;
@@ -268,9 +276,10 @@ String lb_mangle_name(lbModule *m, Entity *e);
String lb_get_entity_name(lbModule *m, Entity *e, String name = {});
LLVMAttributeRef lb_create_enum_attribute(LLVMContextRef ctx, char const *name, u64 value=0);
+LLVMAttributeRef lb_create_enum_attribute_with_type(LLVMContextRef ctx, char const *name, LLVMTypeRef type);
void lb_add_proc_attribute_at_index(lbProcedure *p, isize index, char const *name, u64 value);
void lb_add_proc_attribute_at_index(lbProcedure *p, isize index, char const *name);
-lbProcedure *lb_create_procedure(lbModule *module, Entity *entity);
+lbProcedure *lb_create_procedure(lbModule *module, Entity *entity, bool ignore_body=false);
void lb_end_procedure(lbProcedure *p);
@@ -381,6 +390,8 @@ lbValue lb_gen_map_header(lbProcedure *p, lbValue map_val_ptr, Type *map_type);
lbValue lb_gen_map_hash(lbProcedure *p, lbValue key, Type *key_type);
void lb_insert_dynamic_map_key_and_value(lbProcedure *p, lbAddr addr, Type *map_type, lbValue map_key, lbValue map_value, Ast *node);
+lbValue lb_find_procedure_value_from_entity(lbModule *m, Entity *e);
+lbValue lb_find_value_from_entity(lbModule *m, Entity *e);
void lb_store_type_case_implicit(lbProcedure *p, Ast *clause, lbValue value);
lbAddr lb_store_range_stmt_val(lbProcedure *p, Ast *stmt_val, lbValue value);
@@ -472,6 +483,7 @@ lbCallingConventionKind const lb_calling_convention_map[ProcCC_MAX] = {
lbCallingConvention_X86_FastCall, // ProcCC_FastCall,
lbCallingConvention_C, // ProcCC_None,
+ lbCallingConvention_C, // ProcCC_Naked,
lbCallingConvention_C, // ProcCC_InlineAsm,
};
diff --git a/src/llvm_backend_opt.cpp b/src/llvm_backend_opt.cpp
index 3b268dffa..213005f8f 100644
--- a/src/llvm_backend_opt.cpp
+++ b/src/llvm_backend_opt.cpp
@@ -1,7 +1,25 @@
-void lb_populate_function_pass_manager(LLVMPassManagerRef fpm, bool ignore_memcpy_pass, i32 optimization_level);
+void lb_populate_function_pass_manager(lbModule *m, LLVMPassManagerRef fpm, bool ignore_memcpy_pass, i32 optimization_level);
void lb_add_function_simplifcation_passes(LLVMPassManagerRef mpm, i32 optimization_level);
void lb_populate_module_pass_manager(LLVMTargetMachineRef target_machine, LLVMPassManagerRef mpm, i32 optimization_level);
-void lb_populate_function_pass_manager_specific(LLVMPassManagerRef fpm, i32 optimization_level);
+void lb_populate_function_pass_manager_specific(lbModule *m, LLVMPassManagerRef fpm, i32 optimization_level);
+
+LLVMBool lb_must_preserve_predicate_callback(LLVMValueRef value, void *user_data) {
+ lbModule *m = cast(lbModule *)user_data;
+ if (m == nullptr) {
+ return false;
+ }
+ if (value == nullptr) {
+ return false;
+ }
+ return LLVMIsAAllocaInst(value) != nullptr;
+}
+
+void lb_add_must_preserve_predicate_pass(lbModule *m, LLVMPassManagerRef fpm, i32 optimization_level) {
+ if (false && optimization_level == 0 && m->debug_builder) {
+ // LLVMAddInternalizePassWithMustPreservePredicate(fpm, m, lb_must_preserve_predicate_callback);
+ }
+}
+
void lb_basic_populate_function_pass_manager(LLVMPassManagerRef fpm) {
LLVMAddPromoteMemoryToRegisterPass(fpm);
@@ -15,11 +33,13 @@ void lb_basic_populate_function_pass_manager(LLVMPassManagerRef fpm) {
LLVMAddCFGSimplificationPass(fpm);
}
-void lb_populate_function_pass_manager(LLVMPassManagerRef fpm, bool ignore_memcpy_pass, i32 optimization_level) {
+void lb_populate_function_pass_manager(lbModule *m, LLVMPassManagerRef fpm, bool ignore_memcpy_pass, i32 optimization_level) {
// NOTE(bill): Treat -opt:3 as if it was -opt:2
// TODO(bill): Determine which opt definitions should exist in the first place
optimization_level = gb_clamp(optimization_level, 0, 2);
+ lb_add_must_preserve_predicate_pass(m, fpm, optimization_level);
+
if (ignore_memcpy_pass) {
lb_basic_populate_function_pass_manager(fpm);
return;
@@ -57,11 +77,13 @@ void lb_populate_function_pass_manager(LLVMPassManagerRef fpm, bool ignore_memcp
#endif
}
-void lb_populate_function_pass_manager_specific(LLVMPassManagerRef fpm, i32 optimization_level) {
+void lb_populate_function_pass_manager_specific(lbModule *m, LLVMPassManagerRef fpm, i32 optimization_level) {
// NOTE(bill): Treat -opt:3 as if it was -opt:2
// TODO(bill): Determine which opt definitions should exist in the first place
optimization_level = gb_clamp(optimization_level, 0, 2);
+ lb_add_must_preserve_predicate_pass(m, fpm, optimization_level);
+
if (optimization_level == 0) {
LLVMAddMemCpyOptPass(fpm);
lb_basic_populate_function_pass_manager(fpm);
@@ -226,3 +248,104 @@ void lb_populate_module_pass_manager(LLVMTargetMachineRef target_machine, LLVMPa
LLVMAddCFGSimplificationPass(mpm);
}
+
+void lb_run_remove_dead_instruction_pass(lbProcedure *p) {
+ isize removal_count = 0;
+ isize pass_count = 0;
+ isize const max_pass_count = 10;
+ isize original_instruction_count = 0;
+ // Custom remove dead instruction pass
+ for (; pass_count < max_pass_count; pass_count++) {
+ bool was_dead_instructions = false;
+
+ // NOTE(bill): Iterate backwards
+ // reduces the number of passes as things later on will depend on things previously
+ for (LLVMBasicBlockRef block = LLVMGetLastBasicBlock(p->value);
+ block != nullptr;
+ block = LLVMGetPreviousBasicBlock(block)) {
+ // NOTE(bill): Iterate backwards
+ // reduces the number of passes as things later on will depend on things previously
+ for (LLVMValueRef instr = LLVMGetLastInstruction(block);
+ instr != nullptr;
+ /**/) {
+ if (pass_count == 0) {
+ original_instruction_count += 1;
+ }
+
+ LLVMValueRef curr_instr = instr;
+ instr = LLVMGetPreviousInstruction(instr);
+
+ LLVMUseRef first_use = LLVMGetFirstUse(curr_instr);
+ if (first_use != nullptr) {
+ continue;
+ }
+ if (LLVMTypeOf(curr_instr) == nullptr) {
+ continue;
+ }
+
+ // NOTE(bill): Explicit instructions are set here because some instructions could have side effects
+ switch (LLVMGetInstructionOpcode(curr_instr)) {
+ case LLVMFNeg:
+ case LLVMAdd:
+ case LLVMFAdd:
+ case LLVMSub:
+ case LLVMFSub:
+ case LLVMMul:
+ case LLVMFMul:
+ case LLVMUDiv:
+ case LLVMSDiv:
+ case LLVMFDiv:
+ case LLVMURem:
+ case LLVMSRem:
+ case LLVMFRem:
+ case LLVMShl:
+ case LLVMLShr:
+ case LLVMAShr:
+ case LLVMAnd:
+ case LLVMOr:
+ case LLVMXor:
+ case LLVMAlloca:
+ case LLVMLoad:
+ case LLVMGetElementPtr:
+ case LLVMTrunc:
+ case LLVMZExt:
+ case LLVMSExt:
+ case LLVMFPToUI:
+ case LLVMFPToSI:
+ case LLVMUIToFP:
+ case LLVMSIToFP:
+ case LLVMFPTrunc:
+ case LLVMFPExt:
+ case LLVMPtrToInt:
+ case LLVMIntToPtr:
+ case LLVMBitCast:
+ case LLVMAddrSpaceCast:
+ case LLVMICmp:
+ case LLVMFCmp:
+ case LLVMSelect:
+ case LLVMExtractElement:
+ case LLVMShuffleVector:
+ case LLVMExtractValue:
+ removal_count += 1;
+ LLVMInstructionEraseFromParent(curr_instr);
+ was_dead_instructions = true;
+ break;
+ }
+ }
+ }
+
+ if (!was_dead_instructions) {
+ break;
+ }
+ }
+}
+
+
+void lb_run_function_pass_manager(LLVMPassManagerRef fpm, lbProcedure *p) {
+ LLVMRunFunctionPassManager(fpm, p->value);
+ // NOTE(bill): LLVMAddDCEPass doesn't seem to be exported in the official DLL's for LLVM
+ // which means we cannot rely upon it
+ // This is also useful for read the .ll for debug purposes because a lot of instructions
+ // are not removed
+ lb_run_remove_dead_instruction_pass(p);
+}
diff --git a/src/main.cpp b/src/main.cpp
index 251616b56..d16a110e3 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -218,8 +218,19 @@ i32 linker_stage(lbGenerator *gen) {
add_path(find_result.vs_library_path);
}
- for_array(i, gen->module.foreign_library_paths) {
- String lib = gen->module.foreign_library_paths[i];
+ for_array(j, gen->modules.entries) {
+ lbModule *m = gen->modules.entries[j].value;
+ for_array(i, m->foreign_library_paths) {
+ String lib = m->foreign_library_paths[i];
+ GB_ASSERT(lib.len < gb_count_of(lib_str_buf)-1);
+ isize len = gb_snprintf(lib_str_buf, gb_size_of(lib_str_buf),
+ " \"%.*s\"", LIT(lib));
+ lib_str = gb_string_appendc(lib_str, lib_str_buf);
+ }
+ }
+
+ for_array(i, gen->default_module.foreign_library_paths) {
+ String lib = gen->default_module.foreign_library_paths[i];
GB_ASSERT(lib.len < gb_count_of(lib_str_buf)-1);
isize len = gb_snprintf(lib_str_buf, gb_size_of(lib_str_buf),
" \"%.*s\"", LIT(lib));
@@ -265,22 +276,22 @@ i32 linker_stage(lbGenerator *gen) {
LIT(build_context.resource_filepath)
);
- if(result == 0) {
- result = system_exec_command_line_app("msvc-link",
- "\"%.*slink.exe\" %s \"%.*s.res\" -OUT:\"%.*s.%s\" %s "
- "/nologo /incremental:no /opt:ref /subsystem:%s "
- " %.*s "
- " %.*s "
- " %s "
- "",
- LIT(find_result.vs_exe_path), object_files, LIT(output_base), LIT(output_base), output_ext,
- link_settings,
- subsystem_str,
- LIT(build_context.link_flags),
- LIT(build_context.extra_linker_flags),
- lib_str
- );
- }
+ if (result == 0) {
+ result = system_exec_command_line_app("msvc-link",
+ "\"%.*slink.exe\" %s \"%.*s.res\" -OUT:\"%.*s.%s\" %s "
+ "/nologo /incremental:no /opt:ref /subsystem:%s "
+ " %.*s "
+ " %.*s "
+ " %s "
+ "",
+ LIT(find_result.vs_exe_path), object_files, LIT(output_base), LIT(output_base), output_ext,
+ link_settings,
+ subsystem_str,
+ LIT(build_context.link_flags),
+ LIT(build_context.extra_linker_flags),
+ lib_str
+ );
+ }
} else {
result = system_exec_command_line_app("msvc-link",
"\"%.*slink.exe\" %s -OUT:\"%.*s.%s\" %s "
@@ -327,8 +338,8 @@ i32 linker_stage(lbGenerator *gen) {
gbString lib_str = gb_string_make(heap_allocator(), "-L/");
defer (gb_string_free(lib_str));
- for_array(i, gen->module.foreign_library_paths) {
- String lib = gen->module.foreign_library_paths[i];
+ for_array(i, gen->default_module.foreign_library_paths) {
+ String lib = gen->default_module.foreign_library_paths[i];
// NOTE(zangent): Sometimes, you have to use -framework on MacOS.
// This allows you to specify '-f' in a #foreign_system_library,
@@ -409,6 +420,11 @@ i32 linker_stage(lbGenerator *gen) {
#endif
}
+ if (build_context.metrics.os == TargetOs_linux) {
+ link_settings = gb_string_appendc(link_settings, "-no-pie ");
+ }
+
+
if (build_context.out_filepath.len > 0) {
//NOTE(thebirk): We have a custom -out arguments, so we should use the extension from that
isize pos = string_extension_position(build_context.out_filepath);
@@ -580,6 +596,7 @@ enum BuildFlagKind {
BuildFlag_NoCRT,
BuildFlag_NoEntryPoint,
BuildFlag_UseLLD,
+ BuildFlag_UseSeparateModules,
BuildFlag_Vet,
BuildFlag_VetExtra,
BuildFlag_UseLLVMApi,
@@ -587,6 +604,8 @@ enum BuildFlagKind {
BuildFlag_ExtraLinkerFlags,
BuildFlag_Microarch,
+ BuildFlag_TestName,
+
BuildFlag_DisallowDo,
BuildFlag_DefaultToNilAllocator,
BuildFlag_InsertSemicolon,
@@ -602,6 +621,7 @@ enum BuildFlagKind {
BuildFlag_IgnoreWarnings,
BuildFlag_WarningsAsErrors,
+ BuildFlag_VerboseErrors,
#if defined(GB_SYSTEM_WINDOWS)
BuildFlag_IgnoreVsSearch,
@@ -698,6 +718,7 @@ bool parse_build_flags(Array<String> args) {
add_flag(&build_flags, BuildFlag_NoCRT, str_lit("no-crt"), BuildFlagParam_None, Command__does_build);
add_flag(&build_flags, BuildFlag_NoEntryPoint, str_lit("no-entry-point"), BuildFlagParam_None, Command__does_check &~ Command_test);
add_flag(&build_flags, BuildFlag_UseLLD, str_lit("lld"), BuildFlagParam_None, Command__does_build);
+ add_flag(&build_flags, BuildFlag_UseSeparateModules,str_lit("use-separate-modules"),BuildFlagParam_None, Command__does_build);
add_flag(&build_flags, BuildFlag_Vet, str_lit("vet"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_VetExtra, str_lit("vet-extra"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_UseLLVMApi, str_lit("llvm-api"), BuildFlagParam_None, Command__does_build);
@@ -705,6 +726,8 @@ bool parse_build_flags(Array<String> args) {
add_flag(&build_flags, BuildFlag_ExtraLinkerFlags, str_lit("extra-linker-flags"), BuildFlagParam_String, Command__does_build);
add_flag(&build_flags, BuildFlag_Microarch, str_lit("microarch"), BuildFlagParam_String, Command__does_build);
+ add_flag(&build_flags, BuildFlag_TestName, str_lit("test-name"), BuildFlagParam_String, Command_test);
+
add_flag(&build_flags, BuildFlag_DisallowDo, str_lit("disallow-do"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_DefaultToNilAllocator, str_lit("default-to-nil-allocator"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_InsertSemicolon, str_lit("insert-semicolon"), BuildFlagParam_None, Command__does_check);
@@ -719,6 +742,7 @@ bool parse_build_flags(Array<String> args) {
add_flag(&build_flags, BuildFlag_IgnoreWarnings, str_lit("ignore-warnings"), BuildFlagParam_None, Command_all);
add_flag(&build_flags, BuildFlag_WarningsAsErrors, str_lit("warnings-as-errors"), BuildFlagParam_None, Command_all);
+ add_flag(&build_flags, BuildFlag_VerboseErrors, str_lit("verbose-errors"), BuildFlagParam_None, Command_all);
#if defined(GB_SYSTEM_WINDOWS)
add_flag(&build_flags, BuildFlag_IgnoreVsSearch, str_lit("ignore-vs-search"), BuildFlagParam_None, Command__does_build);
@@ -1175,6 +1199,10 @@ bool parse_build_flags(Array<String> args) {
build_context.use_lld = true;
break;
+ case BuildFlag_UseSeparateModules:
+ build_context.use_separate_modules = true;
+ break;
+
case BuildFlag_Vet:
build_context.vet = true;
break;
@@ -1203,6 +1231,21 @@ bool parse_build_flags(Array<String> args) {
string_to_lower(&build_context.microarch);
break;
+ case BuildFlag_TestName:
+ GB_ASSERT(value.kind == ExactValue_String);
+ {
+ String name = value.value_string;
+ if (!string_is_valid_identifier(name)) {
+ gb_printf_err("Test name '%.*s' must be a valid identifier\n", LIT(name));
+ bad_flags = true;
+ break;
+ }
+ string_set_add(&build_context.test_names, name);
+
+ // NOTE(bill): Allow for multiple -test-name
+ continue;
+ }
+
case BuildFlag_DisallowDo:
build_context.disallow_do = true;
break;
@@ -1279,6 +1322,10 @@ bool parse_build_flags(Array<String> args) {
}
break;
+ case BuildFlag_VerboseErrors:
+ build_context.show_error_line = true;
+ break;
+
#if defined(GB_SYSTEM_WINDOWS)
case BuildFlag_IgnoreVsSearch:
GB_ASSERT(value.kind == ExactValue_Invalid);
@@ -1507,96 +1554,22 @@ void show_timings(Checker *c, Timings *t) {
}
}
-void remove_temp_files(String output_base) {
+void remove_temp_files(lbGenerator *gen) {
if (build_context.keep_temp_files) return;
- auto data = array_make<u8>(heap_allocator(), output_base.len + 30);
- defer (array_free(&data));
-
- isize n = output_base.len;
- gb_memmove(data.data, output_base.text, n);
-#define EXT_REMOVE(s) do { \
- gb_memmove(data.data+n, s, gb_size_of(s)); \
- gb_file_remove(cast(char const *)data.data); \
- } while (0)
- EXT_REMOVE(".ll");
- EXT_REMOVE(".bc");
- EXT_REMOVE("_memcpy_pass.bc");
- if (build_context.build_mode != BuildMode_Object && !build_context.keep_object_files) {
- #if defined(GB_SYSTEM_WINDOWS)
- EXT_REMOVE(".obj");
- EXT_REMOVE(".res");
- #else
- EXT_REMOVE(".o");
- #endif
+ for_array(i, gen->output_temp_paths) {
+ String path = gen->output_temp_paths[i];
+ gb_file_remove(cast(char const *)path.text);
}
-#undef EXT_REMOVE
-}
-
-
-
-
-i32 exec_llvm_opt(String output_base) {
-#if defined(GB_SYSTEM_WINDOWS)
- // For more passes arguments: http://llvm.org/docs/Passes.html
-
- return system_exec_command_line_app("llvm-opt",
- "\"%.*sbin/opt\" \"%.*s.ll\" -o \"%.*s_memcpy_pass.bc\" -memcpyopt"
- "",
- LIT(build_context.ODIN_ROOT),
- LIT(output_base), LIT(output_base))
-
- || system_exec_command_line_app("llvm-opt",
- "\"%.*sbin/opt\" \"%.*s_memcpy_pass.bc\" -o \"%.*s.bc\" %.*s "
- "",
- LIT(build_context.ODIN_ROOT),
- LIT(output_base), LIT(output_base),
- LIT(build_context.opt_flags));
-#else
- // NOTE(zangent): This is separate because it seems that LLVM tools are packaged
- // with the Windows version, while they will be system-provided on MacOS and GNU/Linux
-
- return system_exec_command_line_app("llvm-opt",
- "opt \"%.*s.ll\" -o \"%.*s_memcpy_pass.bc\" -memcpyopt"
- "",
- LIT(output_base), LIT(output_base))
-
- || system_exec_command_line_app("llvm-opt",
- "opt \"%.*s_memcpy_pass.bc\" -o \"%.*s.bc\" %.*s "
- "",
- LIT(output_base), LIT(output_base),
- LIT(build_context.opt_flags));
-#endif
+ if (build_context.build_mode != BuildMode_Object && !build_context.keep_object_files) {
+ for_array(i, gen->output_object_paths) {
+ String path = gen->output_object_paths[i];
+ gb_file_remove(cast(char const *)path.text);
+ }
+ }
}
-i32 exec_llvm_llc(String output_base) {
- // For more arguments: http://llvm.org/docs/CommandGuide/llc.html
-#if defined(GB_SYSTEM_WINDOWS)
- return system_exec_command_line_app("llvm-llc",
- "\"%.*sbin\\llc\" \"%.*s.bc\" -filetype=obj -O%d "
- "-o \"%.*s.obj\" "
- "%.*s"
- "",
- LIT(build_context.ODIN_ROOT),
- LIT(output_base),
- build_context.optimization_level,
- LIT(output_base),
- LIT(build_context.llc_flags));
-#else
- // NOTE(zangent): Linux / Unix is unfinished and not tested very well.
- return system_exec_command_line_app("llc",
- "llc \"%.*s.bc\" -filetype=obj -relocation-model=pic -O%d "
- "%.*s "
- "%s%.*s",
- LIT(output_base),
- build_context.optimization_level,
- LIT(build_context.llc_flags),
- build_context.cross_compiling ? "-mtriple=" : "",
- cast(int)(build_context.cross_compiling ? build_context.metrics.target_triplet.len : 0),
- build_context.metrics.target_triplet.text);
-#endif
-}
void print_show_help(String const arg0, String const &command) {
print_usage_line(0, "%.*s is a tool for managing Odin source code", LIT(arg0));
@@ -1612,7 +1585,7 @@ void print_show_help(String const arg0, String const &command) {
} else if (command == "check") {
print_usage_line(1, "check parse and type check .odin file");
} else if (command == "test") {
- print_usage_line(1, "test build ands runs 'test_*' procedures in the initial package");
+ print_usage_line(1, "test build ands runs procedures with the attribute @(test) in the initial package");
} else if (command == "query") {
print_usage_line(1, "query [experimental] parse, type check, and output a .json file containing information about the program");
} else if (command == "doc") {
@@ -1627,6 +1600,7 @@ void print_show_help(String const arg0, String const &command) {
bool doc = command == "doc";
bool build = command == "build";
bool run_or_build = command == "run" || command == "build" || command == "test";
+ bool test_only = command == "test";
bool check_only = command == "check";
bool check = run_or_build || command == "check";
@@ -1720,6 +1694,11 @@ void print_show_help(String const arg0, String const &command) {
print_usage_line(3, "-build-mode:shared Build as a dynamically linked library");
print_usage_line(3, "-build-mode:obj Build as an object file");
print_usage_line(3, "-build-mode:object Build as an object file");
+ print_usage_line(3, "-build-mode:assembly Build as an object file");
+ print_usage_line(3, "-build-mode:assembler Build as an assembly file");
+ print_usage_line(3, "-build-mode:asm Build as an assembly file");
+ print_usage_line(3, "-build-mode:llvm-ir Build as an LLVM IR file");
+ print_usage_line(3, "-build-mode:llvm Build as an LLVM IR file");
print_usage_line(0, "");
}
@@ -1746,9 +1725,16 @@ void print_show_help(String const arg0, String const &command) {
print_usage_line(2, "Disables automatic linking with the C Run Time");
print_usage_line(0, "");
- print_usage_line(1, "-use-lld");
+ print_usage_line(1, "-lld");
print_usage_line(2, "Use the LLD linker rather than the default");
print_usage_line(0, "");
+
+ print_usage_line(1, "-use-separate-modules");
+ print_usage_line(1, "[EXPERIMENTAL]");
+ print_usage_line(2, "The backend generates multiple build units which are then linked together");
+ print_usage_line(2, "Normally, a single build unit is generated for a standard project");
+ print_usage_line(0, "");
+
}
if (check) {
@@ -1776,6 +1762,12 @@ void print_show_help(String const arg0, String const &command) {
}
}
+ if (test_only) {
+ print_usage_line(1, "-test-name:<string>");
+ print_usage_line(2, "Run specific test only by name");
+ print_usage_line(0, "");
+ }
+
if (run_or_build) {
print_usage_line(1, "-extra-linker-flags:<string>");
print_usage_line(2, "Adds extra linker specific flags in a string");
@@ -1943,7 +1935,7 @@ int main(int arg_count, char const **arg_ptr) {
Timings *timings = &global_timings;
- timings_init(timings, str_lit("Total Time"), 128);
+ timings_init(timings, str_lit("Total Time"), 2048);
defer (timings_destroy(timings));
arena_init(&permanent_arena, heap_allocator());
@@ -1967,7 +1959,7 @@ int main(int arg_count, char const **arg_ptr) {
map_init(&build_context.defined_values, heap_allocator());
build_context.extra_packages.allocator = heap_allocator();
-
+ string_set_init(&build_context.test_names, heap_allocator());
Array<String> args = setup_args(arg_count, arg_ptr);
@@ -2190,6 +2182,9 @@ int main(int arg_count, char const **arg_ptr) {
case BuildMode_DynamicLibrary:
i32 result = linker_stage(&gen);
if (result != 0) {
+ if (build_context.show_timings) {
+ show_timings(&checker, timings);
+ }
return 1;
}
break;
@@ -2199,7 +2194,7 @@ int main(int arg_count, char const **arg_ptr) {
show_timings(&checker, timings);
}
- remove_temp_files(gen.output_base);
+ remove_temp_files(&gen);
if (run_output) {
#if defined(GB_SYSTEM_WINDOWS)
diff --git a/src/parser.cpp b/src/parser.cpp
index a7e4c9162..a5180b4dd 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -1,110 +1,4 @@
-Token ast_token(Ast *node) {
- switch (node->kind) {
- case Ast_Ident: return node->Ident.token;
- case Ast_Implicit: return node->Implicit;
- case Ast_Undef: return node->Undef;
- case Ast_BasicLit: return node->BasicLit.token;
- case Ast_BasicDirective: return node->BasicDirective.token;
- case Ast_ProcGroup: return node->ProcGroup.token;
- case Ast_ProcLit: return ast_token(node->ProcLit.type);
- case Ast_CompoundLit:
- if (node->CompoundLit.type != nullptr) {
- return ast_token(node->CompoundLit.type);
- }
- return node->CompoundLit.open;
-
- case Ast_TagExpr: return node->TagExpr.token;
- case Ast_BadExpr: return node->BadExpr.begin;
- case Ast_UnaryExpr: return node->UnaryExpr.op;
- case Ast_BinaryExpr: return ast_token(node->BinaryExpr.left);
- case Ast_ParenExpr: return node->ParenExpr.open;
- case Ast_CallExpr: return ast_token(node->CallExpr.proc);
- case Ast_SelectorExpr:
- if (node->SelectorExpr.selector != nullptr) {
- return ast_token(node->SelectorExpr.selector);
- }
- return node->SelectorExpr.token;
- case Ast_SelectorCallExpr:
- if (node->SelectorCallExpr.expr != nullptr) {
- return ast_token(node->SelectorCallExpr.expr);
- }
- return node->SelectorCallExpr.token;
- case Ast_ImplicitSelectorExpr:
- if (node->ImplicitSelectorExpr.selector != nullptr) {
- return ast_token(node->ImplicitSelectorExpr.selector);
- }
- return node->ImplicitSelectorExpr.token;
- case Ast_IndexExpr: return node->IndexExpr.open;
- case Ast_SliceExpr: return node->SliceExpr.open;
- case Ast_Ellipsis: return node->Ellipsis.token;
- case Ast_FieldValue: return node->FieldValue.eq;
- case Ast_DerefExpr: return node->DerefExpr.op;
- case Ast_TernaryExpr: return ast_token(node->TernaryExpr.cond);
- case Ast_TernaryIfExpr: return ast_token(node->TernaryIfExpr.x);
- case Ast_TernaryWhenExpr: return ast_token(node->TernaryWhenExpr.x);
- case Ast_TypeAssertion: return ast_token(node->TypeAssertion.expr);
- case Ast_TypeCast: return node->TypeCast.token;
- case Ast_AutoCast: return node->AutoCast.token;
- case Ast_InlineAsmExpr: return node->InlineAsmExpr.token;
-
- case Ast_BadStmt: return node->BadStmt.begin;
- case Ast_EmptyStmt: return node->EmptyStmt.token;
- case Ast_ExprStmt: return ast_token(node->ExprStmt.expr);
- case Ast_TagStmt: return node->TagStmt.token;
- case Ast_AssignStmt: return node->AssignStmt.op;
- case Ast_BlockStmt: return node->BlockStmt.open;
- case Ast_IfStmt: return node->IfStmt.token;
- case Ast_WhenStmt: return node->WhenStmt.token;
- case Ast_ReturnStmt: return node->ReturnStmt.token;
- case Ast_ForStmt: return node->ForStmt.token;
- case Ast_RangeStmt: return node->RangeStmt.token;
- case Ast_UnrollRangeStmt: return node->UnrollRangeStmt.unroll_token;
- case Ast_CaseClause: return node->CaseClause.token;
- case Ast_SwitchStmt: return node->SwitchStmt.token;
- case Ast_TypeSwitchStmt: return node->TypeSwitchStmt.token;
- case Ast_DeferStmt: return node->DeferStmt.token;
- case Ast_BranchStmt: return node->BranchStmt.token;
- case Ast_UsingStmt: return node->UsingStmt.token;
-
- case Ast_BadDecl: return node->BadDecl.begin;
- case Ast_Label: return node->Label.token;
-
- case Ast_ValueDecl: return ast_token(node->ValueDecl.names[0]);
- case Ast_PackageDecl: return node->PackageDecl.token;
- case Ast_ImportDecl: return node->ImportDecl.token;
- case Ast_ForeignImportDecl: return node->ForeignImportDecl.token;
-
- case Ast_ForeignBlockDecl: return node->ForeignBlockDecl.token;
-
- case Ast_Attribute:
- return node->Attribute.token;
-
- case Ast_Field:
- if (node->Field.names.count > 0) {
- return ast_token(node->Field.names[0]);
- }
- return ast_token(node->Field.type);
- case Ast_FieldList:
- return node->FieldList.token;
-
- case Ast_TypeidType: return node->TypeidType.token;
- case Ast_HelperType: return node->HelperType.token;
- case Ast_DistinctType: return node->DistinctType.token;
- case Ast_PolyType: return node->PolyType.token;
- case Ast_ProcType: return node->ProcType.token;
- case Ast_RelativeType: return ast_token(node->RelativeType.tag);
- case Ast_PointerType: return node->PointerType.token;
- case Ast_ArrayType: return node->ArrayType.token;
- case Ast_DynamicArrayType: return node->DynamicArrayType.token;
- case Ast_StructType: return node->StructType.token;
- case Ast_UnionType: return node->UnionType.token;
- case Ast_EnumType: return node->EnumType.token;
- case Ast_BitSetType: return node->BitSetType.token;
- case Ast_MapType: return node->MapType.token;
- }
-
- return empty_token;
-}
+#include "parser_pos.cpp"
Token token_end_of_line(AstFile *f, Token tok) {
u8 const *start = f->tokenizer.start + tok.pos.offset;
@@ -116,6 +10,48 @@ Token token_end_of_line(AstFile *f, Token tok) {
return tok;
}
+gbString get_file_line_as_string(TokenPos const &pos, i32 *offset_) {
+ AstFile *file = get_ast_file_from_id(pos.file_id);
+ if (file == nullptr) {
+ return nullptr;
+ }
+ isize offset = pos.offset;
+
+ u8 *start = file->tokenizer.start;
+ u8 *end = file->tokenizer.end;
+ isize len = end-start;
+ if (len < offset) {
+ return nullptr;
+ }
+
+ u8 *pos_offset = start+offset;
+
+ u8 *line_start = pos_offset;
+ u8 *line_end = pos_offset;
+ while (line_start >= start) {
+ if (*line_start == '\n') {
+ line_start += 1;
+ break;
+ }
+ line_start -= 1;
+ }
+
+ while (line_end < end) {
+ if (*line_end == '\n') {
+ line_end -= 1;
+ break;
+ }
+ line_end += 1;
+ }
+ String the_line = make_string(line_start, line_end-line_start);
+ the_line = string_trim_whitespace(the_line);
+
+ if (offset_) *offset_ = cast(i32)(pos_offset - the_line.text);
+
+ return gb_string_make_length(heap_allocator(), the_line.text, the_line.len);
+}
+
+
isize ast_node_size(AstKind kind) {
return align_formula_isize(gb_size_of(AstCommonStuff) + ast_variant_sizes[kind], gb_align_of(void *));
@@ -241,11 +177,6 @@ Ast *clone_ast(Ast *node) {
n->FieldValue.value = clone_ast(n->FieldValue.value);
break;
- case Ast_TernaryExpr:
- n->TernaryExpr.cond = clone_ast(n->TernaryExpr.cond);
- n->TernaryExpr.x = clone_ast(n->TernaryExpr.x);
- n->TernaryExpr.y = clone_ast(n->TernaryExpr.y);
- break;
case Ast_TernaryIfExpr:
n->TernaryIfExpr.x = clone_ast(n->TernaryIfExpr.x);
n->TernaryIfExpr.cond = clone_ast(n->TernaryIfExpr.cond);
@@ -438,12 +369,15 @@ Ast *clone_ast(Ast *node) {
void error(Ast *node, char const *fmt, ...) {
Token token = {};
+ TokenPos end_pos = {};
if (node != nullptr) {
token = ast_token(node);
+ end_pos = ast_end_pos(node);
}
+
va_list va;
va_start(va, fmt);
- error_va(token, fmt, va);
+ error_va(token.pos, end_pos, fmt, va);
va_end(va);
if (node != nullptr && node->file != nullptr) {
node->file->error_count += 1;
@@ -457,7 +391,7 @@ void error_no_newline(Ast *node, char const *fmt, ...) {
}
va_list va;
va_start(va, fmt);
- error_no_newline_va(token, fmt, va);
+ error_no_newline_va(token.pos, fmt, va);
va_end(va);
if (node != nullptr && node->file != nullptr) {
node->file->error_count += 1;
@@ -465,16 +399,28 @@ void error_no_newline(Ast *node, char const *fmt, ...) {
}
void warning(Ast *node, char const *fmt, ...) {
+ Token token = {};
+ TokenPos end_pos = {};
+ if (node != nullptr) {
+ token = ast_token(node);
+ end_pos = ast_end_pos(node);
+ }
va_list va;
va_start(va, fmt);
- warning_va(ast_token(node), fmt, va);
+ warning_va(token.pos, end_pos, fmt, va);
va_end(va);
}
void syntax_error(Ast *node, char const *fmt, ...) {
+ Token token = {};
+ TokenPos end_pos = {};
+ if (node != nullptr) {
+ token = ast_token(node);
+ end_pos = ast_end_pos(node);
+ }
va_list va;
va_start(va, fmt);
- syntax_error_va(ast_token(node), fmt, va);
+ syntax_error_va(token.pos, end_pos, fmt, va);
va_end(va);
if (node != nullptr && node->file != nullptr) {
node->file->error_count += 1;
@@ -646,7 +592,7 @@ Ast *ast_basic_lit(AstFile *f, Token basic_lit) {
return result;
}
-Ast *ast_basic_directive(AstFile *f, Token token, String name) {
+Ast *ast_basic_directive(AstFile *f, Token token, Token name) {
Ast *result = alloc_ast_node(f, Ast_BasicDirective);
result->BasicDirective.token = token;
result->BasicDirective.name = name;
@@ -698,13 +644,6 @@ Ast *ast_compound_lit(AstFile *f, Ast *type, Array<Ast *> const &elems, Token op
}
-Ast *ast_ternary_expr(AstFile *f, Ast *cond, Ast *x, Ast *y) {
- Ast *result = alloc_ast_node(f, Ast_TernaryExpr);
- result->TernaryExpr.cond = cond;
- result->TernaryExpr.x = x;
- result->TernaryExpr.y = y;
- return result;
-}
Ast *ast_ternary_if_expr(AstFile *f, Ast *x, Ast *cond, Ast *y) {
Ast *result = alloc_ast_node(f, Ast_TernaryIfExpr);
result->TernaryIfExpr.x = x;
@@ -1357,6 +1296,7 @@ Token expect_token_after(AstFile *f, TokenKind kind, char const *msg) {
bool is_token_range(TokenKind kind) {
switch (kind) {
case Token_Ellipsis:
+ case Token_RangeFull:
case Token_RangeHalf:
return true;
}
@@ -1587,6 +1527,10 @@ void expect_semicolon(AstFile *f, Ast *s) {
return;
}
+ if (f->curr_token.kind == Token_EOF) {
+ return;
+ }
+
if (s != nullptr) {
bool insert_semi = (f->tokenizer.flags & TokenizerFlag_InsertSemicolon) != 0;
if (insert_semi) {
@@ -2007,35 +1951,28 @@ Ast *parse_operand(AstFile *f, bool lhs) {
Token name = expect_token(f, Token_Ident);
if (name.string == "type") {
return ast_helper_type(f, token, parse_type(f));
- } /* else if (name.string == "no_deferred") {
- operand = parse_expr(f, false);
- if (unparen_expr(operand)->kind != Ast_CallExpr) {
- syntax_error(operand, "#no_deferred can only be applied to procedure calls");
- operand = ast_bad_expr(f, token, f->curr_token);
- }
- operand->state_flags |= StateFlag_no_deferred;
- } */ else if (name.string == "file") {
- return ast_basic_directive(f, token, name.string);
- } else if (name.string == "line") { return ast_basic_directive(f, token, name.string);
- } else if (name.string == "procedure") { return ast_basic_directive(f, token, name.string);
- } else if (name.string == "caller_location") { return ast_basic_directive(f, token, name.string);
+ } else if (name.string == "file") {
+ return ast_basic_directive(f, token, name);
+ } else if (name.string == "line") { return ast_basic_directive(f, token, name);
+ } else if (name.string == "procedure") { return ast_basic_directive(f, token, name);
+ } else if (name.string == "caller_location") { return ast_basic_directive(f, token, name);
} else if (name.string == "location") {
- Ast *tag = ast_basic_directive(f, token, name.string);
+ Ast *tag = ast_basic_directive(f, token, name);
return parse_call_expr(f, tag);
} else if (name.string == "load") {
- Ast *tag = ast_basic_directive(f, token, name.string);
+ Ast *tag = ast_basic_directive(f, token, name);
return parse_call_expr(f, tag);
} else if (name.string == "assert") {
- Ast *tag = ast_basic_directive(f, token, name.string);
+ Ast *tag = ast_basic_directive(f, token, name);
return parse_call_expr(f, tag);
} else if (name.string == "defined") {
- Ast *tag = ast_basic_directive(f, token, name.string);
+ Ast *tag = ast_basic_directive(f, token, name);
return parse_call_expr(f, tag);
} else if (name.string == "config") {
- Ast *tag = ast_basic_directive(f, token, name.string);
+ Ast *tag = ast_basic_directive(f, token, name);
return parse_call_expr(f, tag);
} else if (name.string == "soa" || name.string == "simd") {
- Ast *tag = ast_basic_directive(f, token, name.string);
+ Ast *tag = ast_basic_directive(f, token, name);
Ast *original_type = parse_type(f);
Ast *type = unparen_expr(original_type);
switch (type->kind) {
@@ -2047,7 +1984,7 @@ Ast *parse_operand(AstFile *f, bool lhs) {
}
return original_type;
} else if (name.string == "partial") {
- Ast *tag = ast_basic_directive(f, token, name.string);
+ Ast *tag = ast_basic_directive(f, token, name);
Ast *original_type = parse_type(f);
Ast *type = unparen_expr(original_type);
switch (type->kind) {
@@ -2059,6 +1996,10 @@ Ast *parse_operand(AstFile *f, bool lhs) {
return original_type;
} else if (name.string == "bounds_check") {
Ast *operand = parse_expr(f, lhs);
+ if (operand == nullptr) {
+ syntax_error(token, "Invalid expresssion for #%.*s", LIT(name.string));
+ return nullptr;
+ }
operand->state_flags |= StateFlag_bounds_check;
if ((operand->state_flags & StateFlag_no_bounds_check) != 0) {
syntax_error(token, "#bounds_check and #no_bounds_check cannot be applied together");
@@ -2066,13 +2007,17 @@ Ast *parse_operand(AstFile *f, bool lhs) {
return operand;
} else if (name.string == "no_bounds_check") {
Ast *operand = parse_expr(f, lhs);
+ if (operand == nullptr) {
+ syntax_error(token, "Invalid expresssion for #%.*s", LIT(name.string));
+ return nullptr;
+ }
operand->state_flags |= StateFlag_no_bounds_check;
if ((operand->state_flags & StateFlag_bounds_check) != 0) {
syntax_error(token, "#bounds_check and #no_bounds_check cannot be applied together");
}
return operand;
} else if (name.string == "relative") {
- Ast *tag = ast_basic_directive(f, token, name.string);
+ Ast *tag = ast_basic_directive(f, token, name);
tag = parse_call_expr(f, tag);
Ast *type = parse_type(f);
return ast_relative_type(f, tag, type);
@@ -2158,6 +2103,8 @@ Ast *parse_operand(AstFile *f, bool lhs) {
return type;
}
+ skip_possible_newline_for_literal(f);
+
if (allow_token(f, Token_Undef)) {
if (where_token.kind != Token_Invalid) {
syntax_error(where_token, "'where' clauses are not allowed on procedure literals without a defined body (replaced with ---)");
@@ -2170,6 +2117,14 @@ Ast *parse_operand(AstFile *f, bool lhs) {
body = parse_body(f);
f->curr_proc = curr_proc;
+ // Apply the tags directly to the body rather than the type
+ if (tags & ProcTag_no_bounds_check) {
+ body->state_flags |= StateFlag_no_bounds_check;
+ }
+ if (tags & ProcTag_bounds_check) {
+ body->state_flags |= StateFlag_bounds_check;
+ }
+
return ast_proc_lit(f, type, body, tags, where_token, where_clauses);
} else if (allow_token(f, Token_do)) {
Ast *curr_proc = f->curr_proc;
@@ -2317,7 +2272,7 @@ Ast *parse_operand(AstFile *f, bool lhs) {
f->expr_level = prev_level;
}
-
+ skip_possible_newline_for_literal(f);
Token open = expect_token_after(f, Token_OpenBrace, "struct");
isize name_count = 0;
@@ -2394,6 +2349,7 @@ Ast *parse_operand(AstFile *f, bool lhs) {
}
+ skip_possible_newline_for_literal(f);
Token open = expect_token_after(f, Token_OpenBrace, "union");
while (f->curr_token.kind != Token_CloseBrace &&
@@ -2418,6 +2374,8 @@ Ast *parse_operand(AstFile *f, bool lhs) {
if (f->curr_token.kind != Token_OpenBrace) {
base_type = parse_type(f);
}
+
+ skip_possible_newline_for_literal(f);
Token open = expect_token(f, Token_OpenBrace);
Array<Ast *> values = parse_element_list(f);
@@ -2509,6 +2467,7 @@ Ast *parse_operand(AstFile *f, bool lhs) {
}
}
+ skip_possible_newline_for_literal(f);
Token open = expect_token(f, Token_OpenBrace);
Ast *asm_string = parse_expr(f, false);
expect_token(f, Token_Comma);
@@ -2673,6 +2632,7 @@ Ast *parse_atom_expr(AstFile *f, Ast *operand, bool lhs) {
switch (f->curr_token.kind) {
case Token_Ellipsis:
+ case Token_RangeFull:
case Token_RangeHalf:
// NOTE(bill): Do not err yet
case Token_Colon:
@@ -2684,6 +2644,7 @@ Ast *parse_atom_expr(AstFile *f, Ast *operand, bool lhs) {
switch (f->curr_token.kind) {
case Token_Ellipsis:
+ case Token_RangeFull:
case Token_RangeHalf:
syntax_error(f->curr_token, "Expected a colon, not a range");
/* fallthrough */
@@ -2722,6 +2683,16 @@ Ast *parse_atom_expr(AstFile *f, Ast *operand, bool lhs) {
}
break;
+ case Token_Increment:
+ case Token_Decrement:
+ if (!lhs) {
+ Token token = advance_token(f);
+ syntax_error(token, "Postfix '%.*s' operator is not supported", LIT(token.string));
+ } else {
+ loop = false;
+ }
+ break;
+
default:
loop = false;
break;
@@ -2752,16 +2723,26 @@ Ast *parse_unary_expr(AstFile *f, bool lhs) {
return ast_auto_cast(f, token, expr);
}
+
case Token_Add:
case Token_Sub:
- case Token_Not:
case Token_Xor:
- case Token_And: {
+ case Token_And:
+ case Token_Not: {
Token token = advance_token(f);
Ast *expr = parse_unary_expr(f, lhs);
return ast_unary_expr(f, token, expr);
}
+ case Token_Increment:
+ case Token_Decrement: {
+ Token token = advance_token(f);
+ syntax_error(token, "Unary '%.*s' operator is not supported", LIT(token.string));
+ Ast *expr = parse_unary_expr(f, lhs);
+ return ast_unary_expr(f, token, expr);
+ }
+
+
case Token_Period: {
Token token = expect_token(f, Token_Period);
Ast *ident = parse_ident(f);
@@ -2790,6 +2771,7 @@ i32 token_precedence(AstFile *f, TokenKind t) {
case Token_when:
return 1;
case Token_Ellipsis:
+ case Token_RangeFull:
case Token_RangeHalf:
if (!f->allow_range) {
return 0;
@@ -2857,7 +2839,7 @@ Ast *parse_binary_expr(AstFile *f, bool lhs, i32 prec_in) {
Ast *x = parse_expr(f, lhs);
Token token_c = expect_token(f, Token_Colon);
Ast *y = parse_expr(f, lhs);
- expr = ast_ternary_expr(f, cond, x, y);
+ expr = ast_ternary_if_expr(f, x, cond, y);
} else if (op.kind == Token_if) {
Ast *x = expr;
// Token_if
@@ -2979,7 +2961,7 @@ Ast *parse_foreign_block(AstFile *f, Token token) {
defer (f->in_foreign_block = prev_in_foreign_block);
f->in_foreign_block = true;
-
+ skip_possible_newline_for_literal(f);
open = expect_token(f, Token_OpenBrace);
while (f->curr_token.kind != Token_CloseBrace &&
@@ -3151,6 +3133,13 @@ Ast *parse_simple_stmt(AstFile *f, u32 flags) {
return ast_bad_stmt(f, token, f->curr_token);
}
+ switch (token.kind) {
+ case Token_Increment:
+ case Token_Decrement:
+ advance_token(f);
+ syntax_error(token, "Postfix '%.*s' statement is not supported", LIT(token.string));
+ break;
+ }
#if 0
@@ -3220,6 +3209,7 @@ ProcCallingConvention string_to_calling_convention(String s) {
if (s == "fastcall") return ProcCC_FastCall;
if (s == "fast") return ProcCC_FastCall;
if (s == "none") return ProcCC_None;
+ if (s == "naked") return ProcCC_Naked;
return ProcCC_Invalid;
}
@@ -3896,12 +3886,6 @@ Ast *parse_return_stmt(AstFile *f) {
while (f->curr_token.kind != Token_Semicolon) {
Ast *arg = parse_expr(f, false);
- // if (f->curr_token.kind == Token_Eq) {
- // Token eq = expect_token(f, Token_Eq);
- // Ast *value = parse_value(f);
- // arg = ast_field_value(f, arg, value, eq);
- // }
-
array_add(&results, arg);
if (f->curr_token.kind != Token_Comma ||
f->curr_token.kind == Token_EOF) {
@@ -3966,7 +3950,7 @@ Ast *parse_for_stmt(AstFile *f) {
}
}
- if (!is_range && allow_token(f, Token_Semicolon)) {
+ if (!is_range && parse_control_statement_semicolon_separator(f)) {
init = cond;
cond = nullptr;
if (f->curr_token.kind != Token_Semicolon) {
@@ -4022,7 +4006,7 @@ Ast *parse_case_clause(AstFile *f, bool is_type) {
}
f->allow_range = prev_allow_range;
f->allow_in_expr = prev_allow_in_expr;
- expect_token(f, Token_Colon); // TODO(bill): Is this the best syntax?
+ expect_token(f, Token_Colon);
Array<Ast *> stmts = parse_stmt_list(f);
return ast_case_clause(f, token, list, stmts);
@@ -4332,6 +4316,16 @@ Ast *parse_unrolled_for_loop(AstFile *f, Token unroll_token) {
return ast_unroll_range_stmt(f, unroll_token, for_token, val0, val1, in_token, expr, body);
}
+void parse_check_directive_for_empty_statement(Ast *s, Token const &name) {
+ if (s != nullptr && s->kind == Ast_EmptyStmt) {
+ if (s->EmptyStmt.token.string == "\n") {
+ syntax_error(name, "#%.*s cannot be followed by a newline", LIT(name.string));
+ } else {
+ syntax_error(name, "#%.*s cannot be applied to an empty statement ';'", LIT(name.string));
+ }
+ }
+}
+
Ast *parse_stmt(AstFile *f) {
Ast *s = nullptr;
Token token = f->curr_token;
@@ -4438,6 +4432,7 @@ Ast *parse_stmt(AstFile *f) {
if (tag == "bounds_check") {
s = parse_stmt(f);
+ parse_check_directive_for_empty_statement(s, name);
s->state_flags |= StateFlag_bounds_check;
if ((s->state_flags & StateFlag_no_bounds_check) != 0) {
syntax_error(token, "#bounds_check and #no_bounds_check cannot be applied together");
@@ -4445,27 +4440,12 @@ Ast *parse_stmt(AstFile *f) {
return s;
} else if (tag == "no_bounds_check") {
s = parse_stmt(f);
+ parse_check_directive_for_empty_statement(s, name);
s->state_flags |= StateFlag_no_bounds_check;
if ((s->state_flags & StateFlag_bounds_check) != 0) {
syntax_error(token, "#bounds_check and #no_bounds_check cannot be applied together");
}
return s;
- } else if (tag == "complete") {
- s = parse_stmt(f);
- switch (s->kind) {
- case Ast_SwitchStmt:
- s->SwitchStmt.partial = false;
- syntax_warning(token, "#complete is now the default and has been replaced with its opposite: #partial");
- break;
- case Ast_TypeSwitchStmt:
- s->TypeSwitchStmt.partial = false;
- syntax_warning(token, "#complete is now the default and has been replaced with its opposite: #partial");
- break;
- default:
- syntax_error(token, "#complete can only be applied to a switch statement");
- break;
- }
- return s;
} else if (tag == "partial") {
s = parse_stmt(f);
switch (s->kind) {
@@ -4475,16 +4455,19 @@ Ast *parse_stmt(AstFile *f) {
case Ast_TypeSwitchStmt:
s->TypeSwitchStmt.partial = true;
break;
+ case Ast_EmptyStmt:
+ parse_check_directive_for_empty_statement(s, name);
+ break;
default:
syntax_error(token, "#partial can only be applied to a switch statement");
break;
}
return s;
} else if (tag == "assert") {
- Ast *t = ast_basic_directive(f, hash_token, tag);
+ Ast *t = ast_basic_directive(f, hash_token, name);
return ast_expr_stmt(f, parse_call_expr(f, t));
} else if (tag == "panic") {
- Ast *t = ast_basic_directive(f, hash_token, tag);
+ Ast *t = ast_basic_directive(f, hash_token, name);
return ast_expr_stmt(f, parse_call_expr(f, t));
} else if (name.string == "force_inline" ||
name.string == "force_no_inline") {
@@ -4571,6 +4554,7 @@ ParseFileError init_ast_file(AstFile *f, String fullpath, TokenPos *err_pos) {
GB_ASSERT(f != nullptr);
f->fullpath = string_trim_whitespace(fullpath); // Just in case
set_file_path_string(f->id, fullpath);
+ set_ast_file_from_id(f->id, f);
if (!string_ends_with(f->fullpath, str_lit(".odin"))) {
return ParseFile_WrongExtension;
}
diff --git a/src/parser.hpp b/src/parser.hpp
index 8c2eadb46..89f714aaa 100644
--- a/src/parser.hpp
+++ b/src/parser.hpp
@@ -7,22 +7,21 @@ struct AstFile;
struct AstPackage;
enum AddressingMode {
- Addressing_Invalid, // invalid addressing mode
- Addressing_NoValue, // no value (void in C)
- Addressing_Value, // computed value (rvalue)
- Addressing_Context, // context value
- Addressing_Variable, // addressable variable (lvalue)
- Addressing_Constant, // constant
- Addressing_Type, // type
- Addressing_Builtin, // built-in procedure
- Addressing_ProcGroup, // procedure group (overloaded procedure)
- Addressing_MapIndex, // map index expression -
- // lhs: acts like a Variable
- // rhs: acts like OptionalOk
- Addressing_OptionalOk, // rhs: acts like a value with an optional boolean part (for existence check)
- Addressing_SoaVariable, // Struct-Of-Arrays indexed variable
-
- Addressing_AtomOpAssign, // Specialized for custom atom operations for assignments
+ Addressing_Invalid = 0, // invalid addressing mode
+ Addressing_NoValue = 1, // no value (void in C)
+ Addressing_Value = 2, // computed value (rvalue)
+ Addressing_Context = 3, // context value
+ Addressing_Variable = 4, // addressable variable (lvalue)
+ Addressing_Constant = 5, // constant
+ Addressing_Type = 6, // type
+ Addressing_Builtin = 7, // built-in procedure
+ Addressing_ProcGroup = 8, // procedure group (overloaded procedure)
+ Addressing_MapIndex = 9, // map index expression -
+ // lhs: acts like a Variable
+ // rhs: acts like OptionalOk
+ Addressing_OptionalOk = 10, // rhs: acts like a value with an optional boolean part (for existence check)
+ Addressing_SoaVariable = 11, // Struct-Of-Arrays indexed variable
+
};
struct TypeAndValue {
@@ -215,8 +214,9 @@ enum ProcCallingConvention {
ProcCC_FastCall = 5,
ProcCC_None = 6,
+ ProcCC_Naked = 7,
- ProcCC_InlineAsm = 7,
+ ProcCC_InlineAsm = 8,
ProcCC_MAX,
@@ -286,8 +286,8 @@ char const *inline_asm_dialect_strings[InlineAsmDialect_COUNT] = {
Token token; \
}) \
AST_KIND(BasicDirective, "basic directive", struct { \
- Token token; \
- String name; \
+ Token token; \
+ Token name; \
}) \
AST_KIND(Ellipsis, "ellipsis", struct { \
Token token; \
@@ -324,7 +324,7 @@ AST_KIND(_ExprBegin, "", bool) \
AST_KIND(ImplicitSelectorExpr, "implicit selector expression", struct { Token token; Ast *selector; }) \
AST_KIND(SelectorCallExpr, "selector call expression", struct { Token token; Ast *expr, *call; bool modified_call; }) \
AST_KIND(IndexExpr, "index expression", struct { Ast *expr, *index; Token open, close; }) \
- AST_KIND(DerefExpr, "dereference expression", struct { Token op; Ast *expr; }) \
+ AST_KIND(DerefExpr, "dereference expression", struct { Ast *expr; Token op; }) \
AST_KIND(SliceExpr, "slice expression", struct { \
Ast *expr; \
Token open, close; \
@@ -342,10 +342,15 @@ AST_KIND(_ExprBegin, "", bool) \
i32 builtin_id; \
}) \
AST_KIND(FieldValue, "field value", struct { Token eq; Ast *field, *value; }) \
- AST_KIND(TernaryExpr, "ternary expression", struct { Ast *cond, *x, *y; }) \
AST_KIND(TernaryIfExpr, "ternary if expression", struct { Ast *x, *cond, *y; }) \
AST_KIND(TernaryWhenExpr, "ternary when expression", struct { Ast *x, *cond, *y; }) \
- AST_KIND(TypeAssertion, "type assertion", struct { Ast *expr; Token dot; Ast *type; Type *type_hint; }) \
+ AST_KIND(TypeAssertion, "type assertion", struct { \
+ Ast *expr; \
+ Token dot; \
+ Ast *type; \
+ Type *type_hint; \
+ bool ignores[2]; \
+ }) \
AST_KIND(TypeCast, "type cast", struct { Token token; Ast *type, *expr; }) \
AST_KIND(AutoCast, "auto_cast", struct { Token token; Ast *expr; }) \
AST_KIND(InlineAsmExpr, "inline asm expression", struct { \
diff --git a/src/parser_pos.cpp b/src/parser_pos.cpp
new file mode 100644
index 000000000..c5ad89604
--- /dev/null
+++ b/src/parser_pos.cpp
@@ -0,0 +1,331 @@
+Token ast_token(Ast *node) {
+ switch (node->kind) {
+ case Ast_Ident: return node->Ident.token;
+ case Ast_Implicit: return node->Implicit;
+ case Ast_Undef: return node->Undef;
+ case Ast_BasicLit: return node->BasicLit.token;
+ case Ast_BasicDirective: return node->BasicDirective.token;
+ case Ast_ProcGroup: return node->ProcGroup.token;
+ case Ast_ProcLit: return ast_token(node->ProcLit.type);
+ case Ast_CompoundLit:
+ if (node->CompoundLit.type != nullptr) {
+ return ast_token(node->CompoundLit.type);
+ }
+ return node->CompoundLit.open;
+
+ case Ast_TagExpr: return node->TagExpr.token;
+ case Ast_BadExpr: return node->BadExpr.begin;
+ case Ast_UnaryExpr: return node->UnaryExpr.op;
+ case Ast_BinaryExpr: return ast_token(node->BinaryExpr.left);
+ case Ast_ParenExpr: return node->ParenExpr.open;
+ case Ast_CallExpr: return ast_token(node->CallExpr.proc);
+ case Ast_SelectorExpr:
+ if (node->SelectorExpr.selector != nullptr) {
+ return ast_token(node->SelectorExpr.selector);
+ }
+ return node->SelectorExpr.token;
+ case Ast_SelectorCallExpr:
+ if (node->SelectorCallExpr.expr != nullptr) {
+ return ast_token(node->SelectorCallExpr.expr);
+ }
+ return node->SelectorCallExpr.token;
+ case Ast_ImplicitSelectorExpr:
+ if (node->ImplicitSelectorExpr.selector != nullptr) {
+ return ast_token(node->ImplicitSelectorExpr.selector);
+ }
+ return node->ImplicitSelectorExpr.token;
+ case Ast_IndexExpr: return node->IndexExpr.open;
+ case Ast_SliceExpr: return node->SliceExpr.open;
+ case Ast_Ellipsis: return node->Ellipsis.token;
+ case Ast_FieldValue: return node->FieldValue.eq;
+ case Ast_DerefExpr: return node->DerefExpr.op;
+ case Ast_TernaryIfExpr: return ast_token(node->TernaryIfExpr.x);
+ case Ast_TernaryWhenExpr: return ast_token(node->TernaryWhenExpr.x);
+ case Ast_TypeAssertion: return ast_token(node->TypeAssertion.expr);
+ case Ast_TypeCast: return node->TypeCast.token;
+ case Ast_AutoCast: return node->AutoCast.token;
+ case Ast_InlineAsmExpr: return node->InlineAsmExpr.token;
+
+ case Ast_BadStmt: return node->BadStmt.begin;
+ case Ast_EmptyStmt: return node->EmptyStmt.token;
+ case Ast_ExprStmt: return ast_token(node->ExprStmt.expr);
+ case Ast_TagStmt: return node->TagStmt.token;
+ case Ast_AssignStmt: return node->AssignStmt.op;
+ case Ast_BlockStmt: return node->BlockStmt.open;
+ case Ast_IfStmt: return node->IfStmt.token;
+ case Ast_WhenStmt: return node->WhenStmt.token;
+ case Ast_ReturnStmt: return node->ReturnStmt.token;
+ case Ast_ForStmt: return node->ForStmt.token;
+ case Ast_RangeStmt: return node->RangeStmt.token;
+ case Ast_UnrollRangeStmt: return node->UnrollRangeStmt.unroll_token;
+ case Ast_CaseClause: return node->CaseClause.token;
+ case Ast_SwitchStmt: return node->SwitchStmt.token;
+ case Ast_TypeSwitchStmt: return node->TypeSwitchStmt.token;
+ case Ast_DeferStmt: return node->DeferStmt.token;
+ case Ast_BranchStmt: return node->BranchStmt.token;
+ case Ast_UsingStmt: return node->UsingStmt.token;
+
+ case Ast_BadDecl: return node->BadDecl.begin;
+ case Ast_Label: return node->Label.token;
+
+ case Ast_ValueDecl: return ast_token(node->ValueDecl.names[0]);
+ case Ast_PackageDecl: return node->PackageDecl.token;
+ case Ast_ImportDecl: return node->ImportDecl.token;
+ case Ast_ForeignImportDecl: return node->ForeignImportDecl.token;
+
+ case Ast_ForeignBlockDecl: return node->ForeignBlockDecl.token;
+
+ case Ast_Attribute:
+ return node->Attribute.token;
+
+ case Ast_Field:
+ if (node->Field.names.count > 0) {
+ return ast_token(node->Field.names[0]);
+ }
+ return ast_token(node->Field.type);
+ case Ast_FieldList:
+ return node->FieldList.token;
+
+ case Ast_TypeidType: return node->TypeidType.token;
+ case Ast_HelperType: return node->HelperType.token;
+ case Ast_DistinctType: return node->DistinctType.token;
+ case Ast_PolyType: return node->PolyType.token;
+ case Ast_ProcType: return node->ProcType.token;
+ case Ast_RelativeType: return ast_token(node->RelativeType.tag);
+ case Ast_PointerType: return node->PointerType.token;
+ case Ast_ArrayType: return node->ArrayType.token;
+ case Ast_DynamicArrayType: return node->DynamicArrayType.token;
+ case Ast_StructType: return node->StructType.token;
+ case Ast_UnionType: return node->UnionType.token;
+ case Ast_EnumType: return node->EnumType.token;
+ case Ast_BitSetType: return node->BitSetType.token;
+ case Ast_MapType: return node->MapType.token;
+ }
+
+ return empty_token;
+}
+
+TokenPos token_pos_end(Token const &token) {
+ TokenPos pos = token.pos;
+ pos.offset += cast(i32)token.string.len;
+ for (isize i = 0; i < token.string.len; i++) {
+ // TODO(bill): This assumes ASCII
+ char c = token.string[i];
+ if (c == '\n') {
+ pos.line += 1;
+ pos.column = 1;
+ } else {
+ pos.column += 1;
+ }
+ }
+ return pos;
+}
+
+Token ast_end_token(Ast *node) {
+ GB_ASSERT(node != nullptr);
+
+ switch (node->kind) {
+ case Ast_Ident: return node->Ident.token;
+ case Ast_Implicit: return node->Implicit;
+ case Ast_Undef: return node->Undef;
+ case Ast_BasicLit: return node->BasicLit.token;
+ case Ast_BasicDirective: return node->BasicDirective.token;
+ case Ast_ProcGroup: return node->ProcGroup.close;
+ case Ast_ProcLit:
+ if (node->ProcLit.body) {
+ return ast_end_token(node->ProcLit.body);
+ }
+ return ast_end_token(node->ProcLit.type);
+ case Ast_CompoundLit:
+ return node->CompoundLit.close;
+
+ case Ast_BadExpr: return node->BadExpr.end;
+ case Ast_TagExpr: return ast_end_token(node->TagExpr.expr);
+ case Ast_UnaryExpr: return ast_end_token(node->UnaryExpr.expr);
+ case Ast_BinaryExpr: return ast_end_token(node->BinaryExpr.right);
+ case Ast_ParenExpr: return node->ParenExpr.close;
+ case Ast_CallExpr: return node->CallExpr.close;
+ case Ast_SelectorExpr:
+ return ast_end_token(node->SelectorExpr.selector);
+ case Ast_SelectorCallExpr:
+ return ast_end_token(node->SelectorCallExpr.call);
+ case Ast_ImplicitSelectorExpr:
+ return ast_end_token(node->SelectorExpr.selector);
+ case Ast_IndexExpr: return node->IndexExpr.close;
+ case Ast_SliceExpr: return node->SliceExpr.close;
+ case Ast_Ellipsis:
+ if (node->Ellipsis.expr) {
+ return ast_end_token(node->Ellipsis.expr);
+ }
+ return node->Ellipsis.token;
+ case Ast_FieldValue: return ast_end_token(node->FieldValue.value);
+ case Ast_DerefExpr: return node->DerefExpr.op;
+ case Ast_TernaryIfExpr: return ast_end_token(node->TernaryIfExpr.y);
+ case Ast_TernaryWhenExpr: return ast_end_token(node->TernaryWhenExpr.y);
+ case Ast_TypeAssertion: return ast_end_token(node->TypeAssertion.type);
+ case Ast_TypeCast: return ast_end_token(node->TypeCast.expr);
+ case Ast_AutoCast: return ast_end_token(node->AutoCast.expr);
+ case Ast_InlineAsmExpr: return node->InlineAsmExpr.close;
+
+ case Ast_BadStmt: return node->BadStmt.end;
+ case Ast_EmptyStmt: return node->EmptyStmt.token;
+ case Ast_ExprStmt: return ast_end_token(node->ExprStmt.expr);
+ case Ast_TagStmt: return ast_end_token(node->TagStmt.stmt);
+ case Ast_AssignStmt:
+ if (node->AssignStmt.rhs.count > 0) {
+ return ast_end_token(node->AssignStmt.rhs[node->AssignStmt.rhs.count-1]);
+ }
+ return node->AssignStmt.op;
+ case Ast_BlockStmt: return node->BlockStmt.close;
+ case Ast_IfStmt:
+ if (node->IfStmt.else_stmt) {
+ return ast_end_token(node->IfStmt.else_stmt);
+ }
+ return ast_end_token(node->IfStmt.body);
+ case Ast_WhenStmt:
+ if (node->WhenStmt.else_stmt) {
+ return ast_end_token(node->WhenStmt.else_stmt);
+ }
+ return ast_end_token(node->WhenStmt.body);
+ case Ast_ReturnStmt:
+ if (node->ReturnStmt.results.count > 0) {
+ return ast_end_token(node->ReturnStmt.results[node->ReturnStmt.results.count-1]);
+ }
+ return node->ReturnStmt.token;
+ case Ast_ForStmt: return ast_end_token(node->ForStmt.body);
+ case Ast_RangeStmt: return ast_end_token(node->RangeStmt.body);
+ case Ast_UnrollRangeStmt: return ast_end_token(node->UnrollRangeStmt.body);
+ case Ast_CaseClause:
+ if (node->CaseClause.stmts.count) {
+ return ast_end_token(node->CaseClause.stmts[node->CaseClause.stmts.count-1]);
+ } else if (node->CaseClause.list.count) {
+ return ast_end_token(node->CaseClause.list[node->CaseClause.list.count-1]);
+ }
+ return node->CaseClause.token;
+ case Ast_SwitchStmt: return ast_end_token(node->SwitchStmt.body);
+ case Ast_TypeSwitchStmt: return ast_end_token(node->TypeSwitchStmt.body);
+ case Ast_DeferStmt: return ast_end_token(node->DeferStmt.stmt);
+ case Ast_BranchStmt:
+ if (node->BranchStmt.label) {
+ return ast_end_token(node->BranchStmt.label);
+ }
+ return node->BranchStmt.token;
+ case Ast_UsingStmt:
+ if (node->UsingStmt.list.count > 0) {
+ return ast_end_token(node->UsingStmt.list[node->UsingStmt.list.count-1]);
+ }
+ return node->UsingStmt.token;
+
+ case Ast_BadDecl: return node->BadDecl.end;
+ case Ast_Label:
+ if (node->Label.name) {
+ return ast_end_token(node->Label.name);
+ }
+ return node->Label.token;
+
+ case Ast_ValueDecl:
+ if (node->ValueDecl.values.count > 0) {
+ return ast_end_token(node->ValueDecl.values[node->ValueDecl.values.count-1]);
+ }
+ if (node->ValueDecl.type) {
+ return ast_end_token(node->ValueDecl.type);
+ }
+ if (node->ValueDecl.names.count > 0) {
+ return ast_end_token(node->ValueDecl.names[node->ValueDecl.names.count-1]);
+ }
+ return {};
+
+ case Ast_PackageDecl: return node->PackageDecl.name;
+ case Ast_ImportDecl: return node->ImportDecl.relpath;
+ case Ast_ForeignImportDecl:
+ if (node->ForeignImportDecl.filepaths.count > 0) {
+ return node->ForeignImportDecl.filepaths[node->ForeignImportDecl.filepaths.count-1];
+ }
+ if (node->ForeignImportDecl.library_name.kind != Token_Invalid) {
+ return node->ForeignImportDecl.library_name;
+ }
+ return node->ForeignImportDecl.token;
+
+ case Ast_ForeignBlockDecl:
+ return ast_end_token(node->ForeignBlockDecl.body);
+
+ case Ast_Attribute:
+ if (node->Attribute.close.kind != Token_Invalid) {
+ return node->Attribute.close;
+ }
+ return ast_end_token(node->Attribute.elems[node->Attribute.elems.count-1]);
+
+ case Ast_Field:
+ if (node->Field.tag.kind != Token_Invalid) {
+ return node->Field.tag;
+ }
+ if (node->Field.default_value) {
+ return ast_end_token(node->Field.default_value);
+ }
+ if (node->Field.type) {
+ return ast_end_token(node->Field.type);
+ }
+ return ast_end_token(node->Field.names[node->Field.names.count-1]);
+ case Ast_FieldList:
+ if (node->FieldList.list.count > 0) {
+ return ast_end_token(node->FieldList.list[node->FieldList.list.count-1]);
+ }
+ return node->FieldList.token;
+
+ case Ast_TypeidType:
+ if (node->TypeidType.specialization) {
+ return ast_end_token(node->TypeidType.specialization);
+ }
+ return node->TypeidType.token;
+ case Ast_HelperType: return ast_end_token(node->HelperType.type);
+ case Ast_DistinctType: return ast_end_token(node->DistinctType.type);
+ case Ast_PolyType:
+ if (node->PolyType.specialization) {
+ return ast_end_token(node->PolyType.specialization);
+ }
+ return ast_end_token(node->PolyType.type);
+ case Ast_ProcType:
+ if (node->ProcType.results) {
+ return ast_end_token(node->ProcType.results);
+ }
+ if (node->ProcType.params) {
+ return ast_end_token(node->ProcType.params);
+ }
+ return node->ProcType.token;
+ case Ast_RelativeType:
+ return ast_end_token(node->RelativeType.type);
+ case Ast_PointerType: return ast_end_token(node->PointerType.type);
+ case Ast_ArrayType: return ast_end_token(node->ArrayType.elem);
+ case Ast_DynamicArrayType: return ast_end_token(node->DynamicArrayType.elem);
+ case Ast_StructType:
+ if (node->StructType.fields.count > 0) {
+ return ast_end_token(node->StructType.fields[node->StructType.fields.count-1]);
+ }
+ return node->StructType.token;
+ case Ast_UnionType:
+ if (node->UnionType.variants.count > 0) {
+ return ast_end_token(node->UnionType.variants[node->UnionType.variants.count-1]);
+ }
+ return node->UnionType.token;
+ case Ast_EnumType:
+ if (node->EnumType.fields.count > 0) {
+ return ast_end_token(node->EnumType.fields[node->EnumType.fields.count-1]);
+ }
+ if (node->EnumType.base_type) {
+ return ast_end_token(node->EnumType.base_type);
+ }
+ return node->EnumType.token;
+ case Ast_BitSetType:
+ if (node->BitSetType.underlying) {
+ return ast_end_token(node->BitSetType.underlying);
+ }
+ return ast_end_token(node->BitSetType.elem);
+ case Ast_MapType: return ast_end_token(node->MapType.value);
+ }
+
+ return empty_token;
+}
+
+TokenPos ast_end_pos(Ast *node) {
+ return token_pos_end(ast_end_token(node));
+}
diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp
index 2467ba609..73118321b 100644
--- a/src/thread_pool.cpp
+++ b/src/thread_pool.cpp
@@ -35,8 +35,6 @@ void thread_pool_destroy(ThreadPool *pool);
void thread_pool_start(ThreadPool *pool);
void thread_pool_join(ThreadPool *pool);
void thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, void *data);
-void thread_pool_kick(ThreadPool *pool);
-void thread_pool_kick_and_wait(ThreadPool *pool);
GB_THREAD_PROC(worker_thread_internal);
void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count, char const *worker_prefix) {
@@ -181,4 +179,3 @@ GB_THREAD_PROC(worker_thread_internal) {
return 0;
}
-
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 081ef6443..826fccc04 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -51,8 +51,10 @@ TOKEN_KIND(Token__AssignOpBegin, ""), \
TOKEN_KIND(Token_CmpAndEq, "&&="), \
TOKEN_KIND(Token_CmpOrEq, "||="), \
TOKEN_KIND(Token__AssignOpEnd, ""), \
- TOKEN_KIND(Token_ArrowRight, "->"), \
- TOKEN_KIND(Token_Undef, "---"), \
+ TOKEN_KIND(Token_Increment, "++"), \
+ TOKEN_KIND(Token_Decrement, "--"), \
+ TOKEN_KIND(Token_ArrowRight,"->"), \
+ TOKEN_KIND(Token_Undef, "---"), \
\
TOKEN_KIND(Token__ComparisonBegin, ""), \
TOKEN_KIND(Token_CmpEq, "=="), \
@@ -74,6 +76,7 @@ TOKEN_KIND(Token__ComparisonEnd, ""), \
TOKEN_KIND(Token_Period, "."), \
TOKEN_KIND(Token_Comma, ","), \
TOKEN_KIND(Token_Ellipsis, ".."), \
+ TOKEN_KIND(Token_RangeFull, "..="), \
TOKEN_KIND(Token_RangeHalf, "..<"), \
TOKEN_KIND(Token_BackSlash, "\\"), \
TOKEN_KIND(Token__OperatorEnd, ""), \
@@ -185,9 +188,11 @@ void init_keyword_hash_table(void) {
GB_ASSERT(max_keyword_size < 16);
}
-gb_global Array<String> global_file_path_strings; // index is file id
+gb_global Array<String> global_file_path_strings; // index is file id
+gb_global Array<struct AstFile *> global_files; // index is file id
-String get_file_path_string(i32 index);
+String get_file_path_string(i32 index);
+struct AstFile *get_ast_file_from_id(i32 index);
struct TokenPos {
i32 file_id;
@@ -281,6 +286,7 @@ void init_global_error_collector(void) {
array_init(&global_error_collector.errors, heap_allocator());
array_init(&global_error_collector.error_buffer, heap_allocator());
array_init(&global_file_path_strings, heap_allocator(), 4096);
+ array_init(&global_files, heap_allocator(), 4096);
}
@@ -302,6 +308,24 @@ bool set_file_path_string(i32 index, String const &path) {
return ok;
}
+bool set_ast_file_from_id(i32 index, AstFile *file) {
+ bool ok = false;
+ GB_ASSERT(index >= 0);
+ gb_mutex_lock(&global_error_collector.string_mutex);
+
+ if (index >= global_files.count) {
+ array_resize(&global_files, index);
+ }
+ AstFile *prev = global_files[index];
+ if (prev == nullptr) {
+ global_files[index] = file;
+ ok = true;
+ }
+
+ gb_mutex_unlock(&global_error_collector.string_mutex);
+ return ok;
+}
+
String get_file_path_string(i32 index) {
GB_ASSERT(index >= 0);
gb_mutex_lock(&global_error_collector.string_mutex);
@@ -315,6 +339,20 @@ String get_file_path_string(i32 index) {
return path;
}
+AstFile *get_ast_file_from_id(i32 index) {
+ GB_ASSERT(index >= 0);
+ gb_mutex_lock(&global_error_collector.string_mutex);
+
+ AstFile *file = nullptr;
+ if (index < global_files.count) {
+ file = global_files[index];
+ }
+
+ gb_mutex_unlock(&global_error_collector.string_mutex);
+ return file;
+}
+
+
void begin_error_block(void) {
gb_mutex_lock(&global_error_collector.mutex);
global_error_collector.in_block = true;
@@ -374,6 +412,8 @@ ErrorOutProc *error_out_va = default_error_out_va;
// NOTE: defined in build_settings.cpp
bool global_warnings_as_errors(void);
bool global_ignore_warnings(void);
+bool show_error_line(void);
+gbString get_file_line_as_string(TokenPos const &pos, i32 *offset);
void error_out(char const *fmt, ...) {
va_list va;
@@ -383,17 +423,85 @@ void error_out(char const *fmt, ...) {
}
-void error_va(Token token, char const *fmt, va_list va) {
+bool show_error_on_line(TokenPos const &pos, TokenPos end) {
+ if (!show_error_line()) {
+ return false;
+ }
+
+ i32 offset = 0;
+ gbString the_line = get_file_line_as_string(pos, &offset);
+ defer (gb_string_free(the_line));
+
+ if (the_line != nullptr) {
+ String line = make_string(cast(u8 const *)the_line, gb_string_length(the_line));
+
+ // TODO(bill): This assumes ASCII
+
+ enum {
+ MAX_LINE_LENGTH = 76,
+ MAX_TAB_WIDTH = 8,
+ ELLIPSIS_PADDING = 8
+ };
+
+ error_out("\n\t");
+ if (line.len+MAX_TAB_WIDTH+ELLIPSIS_PADDING > MAX_LINE_LENGTH) {
+ i32 const half_width = MAX_LINE_LENGTH/2;
+ i32 left = cast(i32)(offset);
+ i32 right = cast(i32)(line.len - offset);
+ left = gb_min(left, half_width);
+ right = gb_min(right, half_width);
+
+ line.text += offset-left;
+ line.len -= offset+right-left;
+
+ line = string_trim_whitespace(line);
+
+ offset = left + ELLIPSIS_PADDING/2;
+
+ error_out("... %.*s ...", LIT(line));
+ } else {
+ error_out("%.*s", LIT(line));
+ }
+ error_out("\n\t");
+
+ for (i32 i = 0; i < offset; i++) {
+ error_out(" ");
+ }
+ error_out("^");
+ if (end.file_id == pos.file_id) {
+ if (end.line > pos.line) {
+ for (i32 i = offset; i < line.len; i++) {
+ error_out("~");
+ }
+ } else if (end.line == pos.line && end.column > pos.column) {
+ i32 length = gb_min(end.offset - pos.offset, cast(i32)(line.len-offset));
+ for (i32 i = 1; i < length-1; i++) {
+ error_out("~");
+ }
+ if (length > 1) {
+ error_out("^");
+ }
+ }
+ }
+
+ error_out("\n\n");
+ return true;
+ }
+ return false;
+}
+
+void error_va(TokenPos const &pos, TokenPos end, char const *fmt, va_list va) {
gb_mutex_lock(&global_error_collector.mutex);
global_error_collector.count++;
// NOTE(bill): Duplicate error, skip it
- if (token.pos.line == 0) {
+ if (pos.line == 0) {
error_out("Error: %s\n", gb_bprintf_va(fmt, va));
- } else if (global_error_collector.prev != token.pos) {
- global_error_collector.prev = token.pos;
+ } else if (global_error_collector.prev != pos) {
+ global_error_collector.prev = pos;
error_out("%s %s\n",
- token_pos_to_string(token.pos),
+ token_pos_to_string(pos),
gb_bprintf_va(fmt, va));
+ show_error_on_line(pos, end);
}
gb_mutex_unlock(&global_error_collector.mutex);
if (global_error_collector.count > MAX_ERROR_COLLECTOR_COUNT) {
@@ -401,22 +509,23 @@ void error_va(Token token, char const *fmt, va_list va) {
}
}
-void warning_va(Token token, char const *fmt, va_list va) {
+void warning_va(TokenPos const &pos, TokenPos end, char const *fmt, va_list va) {
if (global_warnings_as_errors()) {
- error_va(token, fmt, va);
+ error_va(pos, end, fmt, va);
return;
}
gb_mutex_lock(&global_error_collector.mutex);
global_error_collector.warning_count++;
if (!global_ignore_warnings()) {
// NOTE(bill): Duplicate error, skip it
- if (token.pos.line == 0) {
+ if (pos.line == 0) {
error_out("Warning: %s\n", gb_bprintf_va(fmt, va));
- } else if (global_error_collector.prev != token.pos) {
- global_error_collector.prev = token.pos;
+ } else if (global_error_collector.prev != pos) {
+ global_error_collector.prev = pos;
error_out("%s Warning: %s\n",
- token_pos_to_string(token.pos),
+ token_pos_to_string(pos),
gb_bprintf_va(fmt, va));
+ show_error_on_line(pos, end);
}
}
gb_mutex_unlock(&global_error_collector.mutex);
@@ -429,16 +538,16 @@ void error_line_va(char const *fmt, va_list va) {
gb_mutex_unlock(&global_error_collector.mutex);
}
-void error_no_newline_va(Token token, char const *fmt, va_list va) {
+void error_no_newline_va(TokenPos const &pos, char const *fmt, va_list va) {
gb_mutex_lock(&global_error_collector.mutex);
global_error_collector.count++;
// NOTE(bill): Duplicate error, skip it
- if (token.pos.line == 0) {
+ if (pos.line == 0) {
error_out("Error: %s", gb_bprintf_va(fmt, va));
- } else if (global_error_collector.prev != token.pos) {
- global_error_collector.prev = token.pos;
+ } else if (global_error_collector.prev != pos) {
+ global_error_collector.prev = pos;
error_out("%s %s",
- token_pos_to_string(token.pos),
+ token_pos_to_string(pos),
gb_bprintf_va(fmt, va));
}
gb_mutex_unlock(&global_error_collector.mutex);
@@ -448,16 +557,17 @@ void error_no_newline_va(Token token, char const *fmt, va_list va) {
}
-void syntax_error_va(Token token, char const *fmt, va_list va) {
+void syntax_error_va(TokenPos const &pos, TokenPos end, char const *fmt, va_list va) {
gb_mutex_lock(&global_error_collector.mutex);
global_error_collector.count++;
// NOTE(bill): Duplicate error, skip it
- if (global_error_collector.prev != token.pos) {
- global_error_collector.prev = token.pos;
+ if (global_error_collector.prev != pos) {
+ global_error_collector.prev = pos;
error_out("%s Syntax Error: %s\n",
- token_pos_to_string(token.pos),
+ token_pos_to_string(pos),
gb_bprintf_va(fmt, va));
- } else if (token.pos.line == 0) {
+ show_error_on_line(pos, end);
+ } else if (pos.line == 0) {
error_out("Syntax Error: %s\n", gb_bprintf_va(fmt, va));
}
@@ -467,21 +577,22 @@ void syntax_error_va(Token token, char const *fmt, va_list va) {
}
}
-void syntax_warning_va(Token token, char const *fmt, va_list va) {
+void syntax_warning_va(TokenPos const &pos, TokenPos end, char const *fmt, va_list va) {
if (global_warnings_as_errors()) {
- syntax_error_va(token, fmt, va);
+ syntax_error_va(pos, end, fmt, va);
return;
}
gb_mutex_lock(&global_error_collector.mutex);
global_error_collector.warning_count++;
if (!global_ignore_warnings()) {
// NOTE(bill): Duplicate error, skip it
- if (global_error_collector.prev != token.pos) {
- global_error_collector.prev = token.pos;
+ if (global_error_collector.prev != pos) {
+ global_error_collector.prev = pos;
error_out("%s Syntax Warning: %s\n",
- token_pos_to_string(token.pos),
+ token_pos_to_string(pos),
gb_bprintf_va(fmt, va));
- } else if (token.pos.line == 0) {
+ show_error_on_line(pos, end);
+ } else if (pos.line == 0) {
error_out("Warning: %s\n", gb_bprintf_va(fmt, va));
}
}
@@ -490,17 +601,17 @@ void syntax_warning_va(Token token, char const *fmt, va_list va) {
-void warning(Token token, char const *fmt, ...) {
+void warning(Token const &token, char const *fmt, ...) {
va_list va;
va_start(va, fmt);
- warning_va(token, fmt, va);
+ warning_va(token.pos, {}, fmt, va);
va_end(va);
}
-void error(Token token, char const *fmt, ...) {
+void error(Token const &token, char const *fmt, ...) {
va_list va;
va_start(va, fmt);
- error_va(token, fmt, va);
+ error_va(token.pos, {}, fmt, va);
va_end(va);
}
@@ -509,7 +620,7 @@ void error(TokenPos pos, char const *fmt, ...) {
va_start(va, fmt);
Token token = {};
token.pos = pos;
- error_va(token, fmt, va);
+ error_va(pos, {}, fmt, va);
va_end(va);
}
@@ -521,26 +632,24 @@ void error_line(char const *fmt, ...) {
}
-void syntax_error(Token token, char const *fmt, ...) {
+void syntax_error(Token const &token, char const *fmt, ...) {
va_list va;
va_start(va, fmt);
- syntax_error_va(token, fmt, va);
+ syntax_error_va(token.pos, {}, fmt, va);
va_end(va);
}
void syntax_error(TokenPos pos, char const *fmt, ...) {
va_list va;
va_start(va, fmt);
- Token token = {};
- token.pos = pos;
- syntax_error_va(token, fmt, va);
+ syntax_error_va(pos, {}, fmt, va);
va_end(va);
}
-void syntax_warning(Token token, char const *fmt, ...) {
+void syntax_warning(Token const &token, char const *fmt, ...) {
va_list va;
va_start(va, fmt);
- syntax_warning_va(token, fmt, va);
+ syntax_warning_va(token.pos, {}, fmt, va);
va_end(va);
}
@@ -652,13 +761,14 @@ void tokenizer_err(Tokenizer *t, char const *msg, ...) {
if (column < 1) {
column = 1;
}
- Token token = {};
- token.pos.file_id = t->curr_file_id;
- token.pos.line = t->line_count;
- token.pos.column = cast(i32)column;
+ TokenPos pos = {};
+ pos.file_id = t->curr_file_id;
+ pos.line = t->line_count;
+ pos.column = cast(i32)column;
+ pos.offset = cast(i32)(t->read_curr - t->start);
va_start(va, msg);
- syntax_error_va(token, msg, va);
+ syntax_error_va(pos, {}, msg, va);
va_end(va);
t->error_count++;
@@ -670,11 +780,9 @@ void tokenizer_err(Tokenizer *t, TokenPos const &pos, char const *msg, ...) {
if (column < 1) {
column = 1;
}
- Token token = {};
- token.pos = pos;
va_start(va, msg);
- syntax_error_va(token, msg, va);
+ syntax_error_va(pos, {}, msg, va);
va_end(va);
t->error_count++;
@@ -1202,6 +1310,9 @@ void tokenizer_get_token(Tokenizer *t, Token *token, int repeat=0) {
if (t->curr_rune == '<') {
advance_to_next_rune(t);
token->kind = Token_RangeHalf;
+ } else if (t->curr_rune == '=') {
+ advance_to_next_rune(t);
+ token->kind = Token_RangeFull;
}
} else if ('0' <= t->curr_rune && t->curr_rune <= '9') {
scan_number_to_token(t, token, true);
@@ -1287,6 +1398,10 @@ void tokenizer_get_token(Tokenizer *t, Token *token, int repeat=0) {
if (t->curr_rune == '=') {
advance_to_next_rune(t);
token->kind = Token_AddEq;
+ } else if (t->curr_rune == '+') {
+ advance_to_next_rune(t);
+ token->kind = Token_Increment;
+ insert_semicolon = true;
}
break;
case '-':
@@ -1298,6 +1413,10 @@ void tokenizer_get_token(Tokenizer *t, Token *token, int repeat=0) {
advance_to_next_rune(t);
advance_to_next_rune(t);
token->kind = Token_Undef;
+ } else if (t->curr_rune == '-') {
+ advance_to_next_rune(t);
+ token->kind = Token_Decrement;
+ insert_semicolon = true;
} else if (t->curr_rune == '>') {
advance_to_next_rune(t);
token->kind = Token_ArrowRight;
diff --git a/src/types.cpp b/src/types.cpp
index 56081acc8..8a78e08d1 100644
--- a/src/types.cpp
+++ b/src/types.cpp
@@ -128,21 +128,6 @@ enum StructSoaKind {
StructSoa_Dynamic = 3,
};
-enum TypeAtomOpKind {
- TypeAtomOp_Invalid,
-
- TypeAtomOp_index_get,
- TypeAtomOp_index_set,
- TypeAtomOp_slice,
- TypeAtomOp_index_get_ptr,
-
- TypeAtomOp_COUNT,
-};
-
-struct TypeAtomOpTable {
- Entity *op[TypeAtomOp_COUNT];
-};
-
struct TypeStruct {
Array<Entity *> fields;
Array<String> tags;
@@ -156,8 +141,6 @@ struct TypeStruct {
i64 custom_align;
Entity * names;
- TypeAtomOpTable *atom_op_table;
-
Type * soa_elem;
i64 soa_count;
StructSoaKind soa_kind;
@@ -180,8 +163,6 @@ struct TypeUnion {
Type * polymorphic_params; // Type_Tuple
Type * polymorphic_parent;
- TypeAtomOpTable *atom_op_table;
-
bool no_nil;
bool maybe;
bool is_polymorphic;
@@ -1915,6 +1896,18 @@ bool is_type_comparable(Type *t) {
}
}
return true;
+
+ case Type_Union:
+ if (type_size_of(t) == 0) {
+ return false;
+ }
+ for_array(i, t->Union.variants) {
+ Type *v = t->Union.variants[i];
+ if (!is_type_comparable(v)) {
+ return false;
+ }
+ }
+ return true;
}
return false;
}
@@ -1959,7 +1952,8 @@ bool is_type_simple_compare(Type *t) {
return false;
}
}
- return true;
+ // make it dumb on purpose
+ return t->Union.variants.count == 1;
case Type_SimdVector:
return is_type_simple_compare(t->SimdVector.elem);
@@ -2766,7 +2760,36 @@ void type_path_pop(TypePath *tp) {
i64 type_size_of_internal (Type *t, TypePath *path);
i64 type_align_of_internal(Type *t, TypePath *path);
+i64 type_size_of(Type *t);
+i64 type_align_of(Type *t);
+i64 type_size_of_struct_pretend_is_packed(Type *ot) {
+ if (ot == nullptr) {
+ return 0;
+ }
+ Type *t = core_type(ot);
+ if (t->kind != Type_Struct) {
+ return type_size_of(ot);
+ }
+
+ if (t->Struct.is_packed) {
+ return type_size_of(ot);
+ }
+
+ i64 count = 0, size = 0, align = 1;
+
+ auto const &fields = t->Struct.fields;
+ count = fields.count;
+ if (count == 0) {
+ return 0;
+ }
+
+ for_array(i, fields) {
+ size += type_size_of(fields[i]->type);
+ }
+
+ return align_formula(size, align);
+}
i64 type_size_of(Type *t) {
@@ -3618,6 +3641,9 @@ gbString write_type_to_string(gbString str, Type *type) {
case ProcCC_None:
str = gb_string_appendc(str, " \"none\" ");
break;
+ case ProcCC_Naked:
+ str = gb_string_appendc(str, " \"naked\" ");
+ break;
// case ProcCC_VectorCall:
// str = gb_string_appendc(str, " \"vectorcall\" ");
// break;