aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeroen van Rijn <Kelimion@users.noreply.github.com>2021-04-30 00:21:52 +0200
committerJeroen van Rijn <Kelimion@users.noreply.github.com>2021-04-30 00:21:52 +0200
commit58e023e0cf581db71b4bf3c341b781a2f09ff50a (patch)
treecf070b07edb9d8e3e0cb3dd849d26cf8228377c5
parent222bab501ce886692f300b02d15b9e7099457406 (diff)
Add `compress` and `image` to core.
-rw-r--r--core/compress/common.odin203
-rw-r--r--core/compress/gzip/example.odin70
-rw-r--r--core/compress/gzip/gzip.odin314
-rw-r--r--core/compress/zlib/example.odin42
-rw-r--r--core/compress/zlib/zlib.odin602
-rw-r--r--core/image/common.odin107
-rw-r--r--core/image/png/example.odin327
-rw-r--r--core/image/png/helpers.odin521
-rw-r--r--core/image/png/png.odin1590
9 files changed, 3776 insertions, 0 deletions
diff --git a/core/compress/common.odin b/core/compress/common.odin
new file mode 100644
index 000000000..99b054903
--- /dev/null
+++ b/core/compress/common.odin
@@ -0,0 +1,203 @@
+package compress
+
+import "core:io"
+import "core:image"
+
+// Error helper, e.g. is_kind(err, General_Error.OK);
+is_kind :: proc(u: $U, x: $V) -> bool {
+ v, ok := u.(V);
+ return ok && v == x;
+}
+
+Error :: union {
+ General_Error,
+ Deflate_Error,
+ ZLIB_Error,
+ GZIP_Error,
+ ZIP_Error,
+ /*
+ This is here because png.load will return a this type of error union,
+ as it may involve an I/O error, a Deflate error, etc.
+ */
+ image.PNG_Error,
+}
+
+General_Error :: enum {
+ OK = 0,
+ File_Not_Found,
+ Cannot_Open_File,
+ File_Too_Short,
+ Stream_Too_Short,
+ Output_Too_Short,
+ Unknown_Compression_Method,
+ Checksum_Failed,
+ Incompatible_Options,
+ Unimplemented,
+}
+
+GZIP_Error :: enum {
+ Invalid_GZIP_Signature,
+ Reserved_Flag_Set,
+ Invalid_Extra_Data,
+ Original_Name_Too_Long,
+ Comment_Too_Long,
+ Payload_Length_Invalid,
+ Payload_CRC_Invalid,
+}
+
+ZIP_Error :: enum {
+ Invalid_ZIP_File_Signature,
+ Unexpected_Signature,
+ Insert_Next_Disk,
+ Expected_End_of_Central_Directory_Record,
+}
+
+ZLIB_Error :: enum {
+ Unsupported_Window_Size,
+ FDICT_Unsupported,
+ Unsupported_Compression_Level,
+ Code_Buffer_Malformed,
+}
+
+Deflate_Error :: enum {
+ Huffman_Bad_Sizes,
+ Huffman_Bad_Code_Lengths,
+ Inflate_Error,
+ Bad_Distance,
+ Bad_Huffman_Code,
+ Len_Nlen_Mismatch,
+ BType_3,
+}
+
+// General context for ZLIB, LZW, etc.
+Context :: struct {
+ code_buffer: u32,
+ num_bits: i8,
+ /*
+ num_bits will be set to -100 if the buffer is malformed
+ */
+ eof: b8,
+
+ input: io.Stream,
+ output: io.Stream,
+ bytes_written: i64,
+ // Used to update hash as we write instead of all at once
+ rolling_hash: u32,
+
+ // Sliding window buffer. Size must be a power of two.
+ window_size: i64,
+ last: ^[dynamic]byte,
+}
+
+// Stream helpers
+/*
+ TODO: These need to be optimized.
+
+ Streams should really only check if a certain method is available once, perhaps even during setup.
+
+ Bit and byte readers may be merged so that reading bytes will grab them from the bit buffer first.
+ This simplifies end-of-stream handling where bits may be left in the bit buffer.
+*/
+
+read_data :: #force_inline proc(c: ^Context, $T: typeid) -> (res: T, err: io.Error) {
+ b := make([]u8, size_of(T), context.temp_allocator);
+ r, e1 := io.to_reader(c.input);
+ _, e2 := io.read(r, b);
+ if !e1 || e2 != .None {
+ return T{}, e2;
+ }
+
+ res = (^T)(raw_data(b))^;
+ return res, .None;
+}
+
+read_u8 :: #force_inline proc(z: ^Context) -> (res: u8, err: io.Error) {
+ return read_data(z, u8);
+}
+
+peek_data :: #force_inline proc(c: ^Context, $T: typeid) -> (res: T, err: io.Error) {
+ // Get current position to read from.
+ curr, e1 := c.input->impl_seek(0, .Current);
+ if e1 != .None {
+ return T{}, e1;
+ }
+ r, e2 := io.to_reader_at(c.input);
+ if !e2 {
+ return T{}, .Empty;
+ }
+ b := make([]u8, size_of(T), context.temp_allocator);
+ _, e3 := io.read_at(r, b, curr);
+ if e3 != .None {
+ return T{}, .Empty;
+ }
+
+ res = (^T)(raw_data(b))^;
+ return res, .None;
+}
+
+// Sliding window read back
+peek_back_byte :: proc(c: ^Context, offset: i64) -> (res: u8, err: io.Error) {
+ // Look back into the sliding window.
+ return c.last[offset % c.window_size], .None;
+}
+
+// Generalized bit reader LSB
+refill_lsb :: proc(z: ^Context, width := i8(24)) {
+ for {
+ if z.num_bits > width {
+ break;
+ }
+ if z.code_buffer == 0 && z.num_bits == -1 {
+ z.num_bits = 0;
+ }
+ if z.code_buffer >= 1 << uint(z.num_bits) {
+ // Code buffer is malformed.
+ z.num_bits = -100;
+ return;
+ }
+ c, err := read_u8(z);
+ if err != .None {
+ // This is fine at the end of the file.
+ z.num_bits = -42;
+ z.eof = true;
+ return;
+ }
+ z.code_buffer |= (u32(c) << u8(z.num_bits));
+ z.num_bits += 8;
+ }
+}
+
+consume_bits_lsb :: #force_inline proc(z: ^Context, width: u8) {
+ z.code_buffer >>= width;
+ z.num_bits -= i8(width);
+}
+
+peek_bits_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
+ if z.num_bits < i8(width) {
+ refill_lsb(z);
+ }
+ // assert(z.num_bits >= i8(width));
+ return z.code_buffer & ~(~u32(0) << width);
+}
+
+peek_bits_no_refill_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
+ assert(z.num_bits >= i8(width));
+ return z.code_buffer & ~(~u32(0) << width);
+}
+
+read_bits_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
+ k := peek_bits_lsb(z, width);
+ consume_bits_lsb(z, width);
+ return k;
+}
+
+read_bits_no_refill_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
+ k := peek_bits_no_refill_lsb(z, width);
+ consume_bits_lsb(z, width);
+ return k;
+}
+
+discard_to_next_byte_lsb :: proc(z: ^Context) {
+ discard := u8(z.num_bits & 7);
+ consume_bits_lsb(z, discard);
+} \ No newline at end of file
diff --git a/core/compress/gzip/example.odin b/core/compress/gzip/example.odin
new file mode 100644
index 000000000..fa0cba9db
--- /dev/null
+++ b/core/compress/gzip/example.odin
@@ -0,0 +1,70 @@
+//+ignore
+package gzip
+
+import "core:compress/gzip"
+import "core:bytes"
+import "core:os"
+
+// Small GZIP file with fextra, fname and fcomment present.
+@private
+TEST: []u8 = {
+ 0x1f, 0x8b, 0x08, 0x1c, 0xcb, 0x3b, 0x3a, 0x5a,
+ 0x02, 0x03, 0x07, 0x00, 0x61, 0x62, 0x03, 0x00,
+ 0x63, 0x64, 0x65, 0x66, 0x69, 0x6c, 0x65, 0x6e,
+ 0x61, 0x6d, 0x65, 0x00, 0x54, 0x68, 0x69, 0x73,
+ 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x2b, 0x48,
+ 0xac, 0xcc, 0xc9, 0x4f, 0x4c, 0x01, 0x00, 0x15,
+ 0x6a, 0x2c, 0x42, 0x07, 0x00, 0x00, 0x00,
+};
+
+main :: proc() {
+ // Set up output buffer.
+ buf: bytes.Buffer;
+ defer bytes.buffer_destroy(&buf);
+
+ stdout :: proc(s: string) {
+ os.write_string(os.stdout, s);
+ }
+ stderr :: proc(s: string) {
+ os.write_string(os.stderr, s);
+ }
+
+ args := os.args;
+
+ if len(args) < 2 {
+ stderr("No input file specified.\n");
+ err := gzip.load(&TEST, &buf);
+ if gzip.is_kind(err, gzip.E_General.OK) {
+ stdout("Displaying test vector: ");
+ stdout(bytes.buffer_to_string(&buf));
+ stdout("\n");
+ }
+ }
+
+ // The rest are all files.
+ args = args[1:];
+ err: gzip.Error;
+
+ for file in args {
+ if file == "-" {
+ // Read from stdin
+ s := os.stream_from_handle(os.stdin);
+ err = gzip.load(&s, &buf);
+ } else {
+ err = gzip.load(file, &buf);
+ }
+ if !gzip.is_kind(err, gzip.E_General.OK) {
+ if gzip.is_kind(err, gzip.E_General.File_Not_Found) {
+ stderr("File not found: ");
+ stderr(file);
+ stderr("\n");
+ os.exit(1);
+ }
+ stderr("GZIP returned an error.\n");
+ os.exit(2);
+ }
+ stdout(bytes.buffer_to_string(&buf));
+ }
+ os.exit(0);
+} \ No newline at end of file
diff --git a/core/compress/gzip/gzip.odin b/core/compress/gzip/gzip.odin
new file mode 100644
index 000000000..3278d7c1d
--- /dev/null
+++ b/core/compress/gzip/gzip.odin
@@ -0,0 +1,314 @@
+package gzip
+
+import "core:compress/zlib"
+import "core:compress"
+import "core:os"
+import "core:io"
+import "core:bytes"
+import "core:hash"
+
+/*
+
+ This package implements support for the GZIP file format v4.3,
+ as specified in RFC 1952.
+
+ It is implemented in such a way that it lends itself naturally
+ to be the input to a complementary TAR implementation.
+
+*/
+
+Magic :: enum u16le {
+ GZIP = 0x8b << 8 | 0x1f,
+}
+
+Header :: struct #packed {
+ magic: Magic,
+ compression_method: Compression,
+ flags: Header_Flags,
+ modification_time: u32le,
+ xfl: Compression_Flags,
+ os: OS,
+}
+#assert(size_of(Header) == 10);
+
+Header_Flag :: enum u8 {
+ // Order is important
+ text = 0,
+ header_crc = 1,
+ extra = 2,
+ name = 3,
+ comment = 4,
+ reserved_1 = 5,
+ reserved_2 = 6,
+ reserved_3 = 7,
+}
+Header_Flags :: distinct bit_set[Header_Flag; u8];
+
+OS :: enum u8 {
+ FAT = 0,
+ Amiga = 1,
+ VMS = 2,
+ Unix = 3,
+ VM_CMS = 4,
+ Atari_TOS = 5,
+ HPFS = 6,
+ Macintosh = 7,
+ Z_System = 8,
+ CP_M = 9,
+ TOPS_20 = 10,
+ NTFS = 11,
+ QDOS = 12,
+ Acorn_RISCOS = 13,
+ _Unknown = 14,
+ Unknown = 255,
+}
+OS_Name :: #partial [OS]string{
+ .FAT = "FAT",
+ .Amiga = "Amiga",
+ .VMS = "VMS/OpenVMS",
+ .Unix = "Unix",
+ .VM_CMS = "VM/CMS",
+ .Atari_TOS = "Atari TOS",
+ .HPFS = "HPFS",
+ .Macintosh = "Macintosh",
+ .Z_System = "Z-System",
+ .CP_M = "CP/M",
+ .TOPS_20 = "TOPS-20",
+ .NTFS = "NTFS",
+ .QDOS = "QDOS",
+ .Acorn_RISCOS = "Acorn RISCOS",
+ .Unknown = "Unknown",
+};
+
+Compression :: enum u8 {
+ DEFLATE = 8,
+}
+
+Compression_Flags :: enum u8 {
+ Maximum_Compression = 2,
+ Fastest_Compression = 4,
+}
+
+Error :: compress.Error;
+E_General :: compress.General_Error;
+E_GZIP :: compress.GZIP_Error;
+E_ZLIB :: compress.ZLIB_Error;
+E_Deflate :: compress.Deflate_Error;
+is_kind :: compress.is_kind;
+
+load_from_slice :: proc(slice: ^[]u8, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
+
+ r := bytes.Reader{};
+ bytes.reader_init(&r, slice^);
+ stream := bytes.reader_to_stream(&r);
+
+ err = load_from_stream(&stream, buf, allocator);
+
+ return err;
+}
+
+load_from_file :: proc(filename: string, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
+ data, ok := os.read_entire_file(filename, context.temp_allocator);
+ if ok {
+ err = load_from_slice(&data, buf, allocator);
+ return;
+ } else {
+ return E_General.File_Not_Found;
+ }
+}
+
+load_from_stream :: proc(stream: ^io.Stream, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
+
+ ctx := compress.Context{
+ input = stream^,
+ };
+ buf := buf;
+ ws := bytes.buffer_to_stream(buf);
+ ctx.output = ws;
+
+ header, e := compress.read_data(&ctx, Header);
+ if e != .None {
+ return E_General.File_Too_Short;
+ }
+
+ if header.magic != .GZIP {
+ return E_GZIP.Invalid_GZIP_Signature;
+ }
+ if header.compression_method != .DEFLATE {
+ return E_General.Unknown_Compression_Method;
+ }
+
+ if header.os >= ._Unknown {
+ header.os = .Unknown;
+ }
+
+ if .reserved_1 in header.flags || .reserved_2 in header.flags || .reserved_3 in header.flags {
+ return E_GZIP.Reserved_Flag_Set;
+ }
+
+ // printf("signature: %v\n", header.magic);
+ // printf("compression: %v\n", header.compression_method);
+ // printf("flags: %v\n", header.flags);
+ // printf("modification time: %v\n", time.unix(i64(header.modification_time), 0));
+ // printf("xfl: %v (%v)\n", header.xfl, int(header.xfl));
+ // printf("os: %v\n", OS_Name[header.os]);
+
+ if .extra in header.flags {
+ xlen, e_extra := compress.read_data(&ctx, u16le);
+ if e_extra != .None {
+ return E_General.Stream_Too_Short;
+ }
+ // printf("Extra data present (%v bytes)\n", xlen);
+ if xlen < 4 {
+ // Minimum length is 2 for ID + 2 for a field length, if set to zero.
+ return E_GZIP.Invalid_Extra_Data;
+ }
+
+ field_id: [2]u8;
+ field_length: u16le;
+ field_error: io.Error;
+
+ for xlen >= 4 {
+ // println("Parsing Extra field(s).");
+ field_id, field_error = compress.read_data(&ctx, [2]u8);
+ if field_error != .None {
+ // printf("Parsing Extra returned: %v\n", field_error);
+ return E_General.Stream_Too_Short;
+ }
+ xlen -= 2;
+
+ field_length, field_error = compress.read_data(&ctx, u16le);
+ if field_error != .None {
+ // printf("Parsing Extra returned: %v\n", field_error);
+ return E_General.Stream_Too_Short;
+ }
+ xlen -= 2;
+
+ if xlen <= 0 {
+ // We're not going to try and recover by scanning for a ZLIB header.
+ // Who knows what else is wrong with this file.
+ return E_GZIP.Invalid_Extra_Data;
+ }
+
+ // printf(" Field \"%v\" of length %v found: ", string(field_id[:]), field_length);
+ if field_length > 0 {
+ field_data := make([]u8, field_length, context.temp_allocator);
+ _, field_error = ctx.input->impl_read(field_data);
+ if field_error != .None {
+ // printf("Parsing Extra returned: %v\n", field_error);
+ return E_General.Stream_Too_Short;
+ }
+ xlen -= field_length;
+
+ // printf("%v\n", string(field_data));
+ }
+
+ if xlen != 0 {
+ return E_GZIP.Invalid_Extra_Data;
+ }
+ }
+ }
+
+ if .name in header.flags {
+ // Should be enough.
+ name: [1024]u8;
+ b: [1]u8;
+ i := 0;
+ name_error: io.Error;
+
+ for i < len(name) {
+ _, name_error = ctx.input->impl_read(b[:]);
+ if name_error != .None {
+ return E_General.Stream_Too_Short;
+ }
+ if b == 0 {
+ break;
+ }
+ name[i] = b[0];
+ i += 1;
+ if i >= len(name) {
+ return E_GZIP.Original_Name_Too_Long;
+ }
+ }
+ // printf("Original filename: %v\n", string(name[:i]));
+ }
+
+ if .comment in header.flags {
+ // Should be enough.
+ comment: [1024]u8;
+ b: [1]u8;
+ i := 0;
+ comment_error: io.Error;
+
+ for i < len(comment) {
+ _, comment_error = ctx.input->impl_read(b[:]);
+ if comment_error != .None {
+ return E_General.Stream_Too_Short;
+ }
+ if b == 0 {
+ break;
+ }
+ comment[i] = b[0];
+ i += 1;
+ if i >= len(comment) {
+ return E_GZIP.Comment_Too_Long;
+ }
+ }
+ // printf("Comment: %v\n", string(comment[:i]));
+ }
+
+ if .header_crc in header.flags {
+ crc16: [2]u8;
+ crc_error: io.Error;
+ _, crc_error = ctx.input->impl_read(crc16[:]);
+ if crc_error != .None {
+ return E_General.Stream_Too_Short;
+ }
+ /*
+ We don't actually check the CRC16 (lower 2 bytes of CRC32 of header data until the CRC field).
+ If we find a gzip file in the wild that sets this field, we can add proper support for it.
+ */
+ }
+
+ /*
+ We should have arrived at the ZLIB payload.
+ */
+
+ zlib_error := zlib.inflate_raw(&ctx);
+
+ // fmt.printf("ZLIB returned: %v\n", zlib_error);
+
+ if !is_kind(zlib_error, E_General.OK) || zlib_error == nil {
+ return zlib_error;
+ }
+
+ /*
+ Read CRC32 using the ctx bit reader because zlib may leave bytes in there.
+ */
+ compress.discard_to_next_byte_lsb(&ctx);
+
+ payload_crc_b: [4]u8;
+ payload_len_b: [4]u8;
+ for i in 0..3 {
+ payload_crc_b[i] = u8(compress.read_bits_lsb(&ctx, 8));
+ }
+ payload_crc := transmute(u32le)payload_crc_b;
+ for i in 0..3 {
+ payload_len_b[i] = u8(compress.read_bits_lsb(&ctx, 8));
+ }
+ payload_len := int(transmute(u32le)payload_len_b);
+
+ payload := bytes.buffer_to_bytes(buf);
+ crc32 := u32le(hash.crc32(payload));
+
+ if crc32 != payload_crc {
+ return E_GZIP.Payload_CRC_Invalid;
+ }
+
+ if len(payload) != payload_len {
+ return E_GZIP.Payload_Length_Invalid;
+ }
+ return E_General.OK;
+}
+
+load :: proc{load_from_file, load_from_slice, load_from_stream}; \ No newline at end of file
diff --git a/core/compress/zlib/example.odin b/core/compress/zlib/example.odin
new file mode 100644
index 000000000..a7fa76d62
--- /dev/null
+++ b/core/compress/zlib/example.odin
@@ -0,0 +1,42 @@
+//+ignore
+package zlib
+
+import "core:compress/zlib"
+import "core:bytes"
+import "core:fmt"
+
+main :: proc() {
+
+ ODIN_DEMO: []u8 = {
+ 120, 156, 101, 144, 77, 110, 131, 48, 16, 133, 215, 204, 41, 158, 44,
+ 69, 73, 32, 148, 182, 75, 35, 14, 208, 125, 47, 96, 185, 195, 143,
+ 130, 13, 50, 38, 81, 84, 101, 213, 75, 116, 215, 43, 246, 8, 53,
+ 82, 126, 8, 181, 188, 152, 153, 111, 222, 147, 159, 123, 165, 247, 170,
+ 98, 24, 213, 88, 162, 198, 244, 157, 243, 16, 186, 115, 44, 75, 227,
+ 5, 77, 115, 72, 137, 222, 117, 122, 179, 197, 39, 69, 161, 170, 156,
+ 50, 144, 5, 68, 130, 4, 49, 126, 127, 190, 191, 144, 34, 19, 57,
+ 69, 74, 235, 209, 140, 173, 242, 157, 155, 54, 158, 115, 162, 168, 12,
+ 181, 239, 246, 108, 17, 188, 174, 242, 224, 20, 13, 199, 198, 235, 250,
+ 194, 166, 129, 86, 3, 99, 157, 172, 37, 230, 62, 73, 129, 151, 252,
+ 70, 211, 5, 77, 31, 104, 188, 160, 113, 129, 215, 59, 205, 22, 52,
+ 123, 160, 83, 142, 255, 242, 89, 123, 93, 149, 200, 50, 188, 85, 54,
+ 252, 18, 248, 192, 238, 228, 235, 198, 86, 224, 118, 224, 176, 113, 166,
+ 112, 67, 106, 227, 159, 122, 215, 88, 95, 110, 196, 123, 205, 183, 224,
+ 98, 53, 8, 104, 213, 234, 201, 147, 7, 248, 192, 14, 170, 29, 25,
+ 171, 15, 18, 59, 138, 112, 63, 23, 205, 110, 254, 136, 109, 78, 231,
+ 63, 234, 138, 133, 204,
+ };
+
+ buf: bytes.Buffer;
+
+ // We can pass ", true" to inflate a raw DEFLATE stream instead of a ZLIB wrapped one.
+ err := zlib.inflate(&ODIN_DEMO, &buf);
+ defer bytes.buffer_destroy(&buf);
+
+ if !zlib.is_kind(err, zlib.E_General.OK) {
+ fmt.printf("\nError: %v\n", err);
+ }
+ s := bytes.buffer_to_string(&buf);
+ fmt.printf("Input: %v bytes, output (%v bytes):\n%v\n", len(ODIN_DEMO), len(s), s);
+ assert(len(s) == 438);
+} \ No newline at end of file
diff --git a/core/compress/zlib/zlib.odin b/core/compress/zlib/zlib.odin
new file mode 100644
index 000000000..34a7984a7
--- /dev/null
+++ b/core/compress/zlib/zlib.odin
@@ -0,0 +1,602 @@
+package zlib
+
+import "core:compress"
+
+import "core:mem"
+import "core:io"
+import "core:bytes"
+import "core:hash"
+/*
+ zlib.inflate decompresses a ZLIB stream passed in as a []u8 or io.Stream.
+ Returns: Error. You can use zlib.is_kind or compress.is_kind to easily test for OK.
+*/
+
+Context :: compress.Context;
+
+Compression_Method :: enum u8 {
+ DEFLATE = 8,
+ Reserved = 15,
+}
+
+Compression_Level :: enum u8 {
+ Fastest = 0,
+ Fast = 1,
+ Default = 2,
+ Maximum = 3,
+}
+
+Options :: struct {
+ window_size: u16,
+ level: u8,
+}
+
+Error :: compress.Error;
+E_General :: compress.General_Error;
+E_ZLIB :: compress.ZLIB_Error;
+E_Deflate :: compress.Deflate_Error;
+is_kind :: compress.is_kind;
+
+DEFLATE_MAX_CHUNK_SIZE :: 65535;
+DEFLATE_MAX_LITERAL_SIZE :: 65535;
+DEFLATE_MAX_DISTANCE :: 32768;
+DEFLATE_MAX_LENGTH :: 258;
+
+HUFFMAN_MAX_BITS :: 16;
+HUFFMAN_FAST_BITS :: 9;
+HUFFMAN_FAST_MASK :: ((1 << HUFFMAN_FAST_BITS) - 1);
+
+Z_LENGTH_BASE := [31]u16{
+ 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,
+ 67,83,99,115,131,163,195,227,258,0,0,
+};
+
+Z_LENGTH_EXTRA := [31]u8{
+ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0,
+};
+
+Z_DIST_BASE := [32]u16{
+ 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,
+ 257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0,
+};
+
+Z_DIST_EXTRA := [32]u8{
+ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13,0,0,
+};
+
+Z_LENGTH_DEZIGZAG := []u8{
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15,
+};
+
+Z_FIXED_LENGTH := [288]u8{
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,
+};
+
+Z_FIXED_DIST := [32]u8{
+ 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+};
+
+/*
+ Accelerate all cases in default tables.
+*/
+ZFAST_BITS :: 9;
+ZFAST_MASK :: ((1 << ZFAST_BITS) - 1);
+
+/*
+ ZLIB-style Huffman encoding.
+ JPEG packs from left, ZLIB from right. We can't share code.
+*/
+Huffman_Table :: struct {
+ fast: [1 << ZFAST_BITS]u16,
+ firstcode: [16]u16,
+ maxcode: [17]int,
+ firstsymbol: [16]u16,
+ size: [288]u8,
+ value: [288]u16,
+};
+
+// Implementation starts here
+
+z_bit_reverse :: #force_inline proc(n: u16, bits: u8) -> (r: u16) {
+ assert(bits <= 16);
+ // NOTE: Can optimize with llvm.bitreverse.i64 or some bit twiddling
+ // by reversing all of the bits and masking out the unneeded ones.
+ r = n;
+ r = ((r & 0xAAAA) >> 1) | ((r & 0x5555) << 1);
+ r = ((r & 0xCCCC) >> 2) | ((r & 0x3333) << 2);
+ r = ((r & 0xF0F0) >> 4) | ((r & 0x0F0F) << 4);
+ r = ((r & 0xFF00) >> 8) | ((r & 0x00FF) << 8);
+
+ r >>= (16 - bits);
+ return;
+}
+
+write_byte :: #force_inline proc(z: ^Context, c: u8) -> (err: io.Error) #no_bounds_check {
+ c := c;
+ buf := transmute([]u8)mem.Raw_Slice{data=&c, len=1};
+ z.rolling_hash = hash.adler32(buf, z.rolling_hash);
+
+ _, e := z.output->impl_write(buf);
+ if e != .None {
+ return e;
+ }
+ z.last[z.bytes_written % z.window_size] = c;
+
+ z.bytes_written += 1;
+ return .None;
+}
+
+allocate_huffman_table :: proc(allocator := context.allocator) -> (z: ^Huffman_Table, err: Error) {
+
+ z = new(Huffman_Table, allocator);
+ return z, E_General.OK;
+}
+
+build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
+ sizes: [HUFFMAN_MAX_BITS+1]int;
+ next_code: [HUFFMAN_MAX_BITS]int;
+
+ k := int(0);
+
+ mem.zero_slice(sizes[:]);
+ mem.zero_slice(z.fast[:]);
+
+ for v, _ in code_lengths {
+ sizes[v] += 1;
+ }
+ sizes[0] = 0;
+
+ for i in 1..16 {
+ if sizes[i] > (1 << uint(i)) {
+ return E_Deflate.Huffman_Bad_Sizes;
+ }
+ }
+ code := int(0);
+
+ for i in 1..<16 {
+ next_code[i] = code;
+ z.firstcode[i] = u16(code);
+ z.firstsymbol[i] = u16(k);
+ code = code + sizes[i];
+ if sizes[i] != 0 {
+ if (code - 1 >= (1 << u16(i))) {
+ return E_Deflate.Huffman_Bad_Code_Lengths;
+ }
+ }
+ z.maxcode[i] = code << (16 - uint(i));
+ code <<= 1;
+ k += int(sizes[i]);
+ }
+
+ z.maxcode[16] = 0x10000; // Sentinel
+ c: int;
+
+ for v, ci in code_lengths {
+ if v != 0 {
+ c = next_code[v] - int(z.firstcode[v]) + int(z.firstsymbol[v]);
+ fastv := u16((u16(v) << 9) | u16(ci));
+ z.size[c] = u8(v);
+ z.value[c] = u16(ci);
+ if (v <= ZFAST_BITS) {
+ j := z_bit_reverse(u16(next_code[v]), v);
+ for j < (1 << ZFAST_BITS) {
+ z.fast[j] = fastv;
+ j += (1 << v);
+ }
+ }
+ next_code[v] += 1;
+ }
+ }
+ return E_General.OK;
+}
+
+decode_huffman_slowpath :: proc(z: ^Context, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
+
+ r = 0;
+ err = E_General.OK;
+
+ k: int;
+ s: u8;
+
+ code := u16(compress.peek_bits_lsb(z, 16));
+
+ k = int(z_bit_reverse(code, 16));
+
+ #no_bounds_check for s = HUFFMAN_FAST_BITS+1; ; {
+ if k < t.maxcode[s] {
+ break;
+ }
+ s += 1;
+ }
+ if (s >= 16) {
+ return 0, E_Deflate.Bad_Huffman_Code;
+ }
+ // code size is s, so:
+ b := (k >> (16-s)) - int(t.firstcode[s]) + int(t.firstsymbol[s]);
+ if b >= size_of(t.size) {
+ return 0, E_Deflate.Bad_Huffman_Code;
+ }
+ if t.size[b] != s {
+ return 0, E_Deflate.Bad_Huffman_Code;
+ }
+
+ compress.consume_bits_lsb(z, s);
+
+ r = t.value[b];
+ return r, E_General.OK;
+}
+
+decode_huffman :: proc(z: ^Context, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
+
+ if z.num_bits < 16 {
+ if z.num_bits == -100 {
+ return 0, E_ZLIB.Code_Buffer_Malformed;
+ }
+ compress.refill_lsb(z);
+ if z.eof {
+ return 0, E_General.Stream_Too_Short;
+ }
+ }
+ #no_bounds_check b := t.fast[z.code_buffer & ZFAST_MASK];
+ if b != 0 {
+ s := u8(b >> ZFAST_BITS);
+ compress.consume_bits_lsb(z, s);
+ return b & 511, E_General.OK;
+ }
+ return decode_huffman_slowpath(z, t);
+}
+
+parse_huffman_block :: proc(z: ^Context, z_repeat, z_offset: ^Huffman_Table) -> (err: Error) #no_bounds_check {
+
+ #no_bounds_check for {
+ value, e := decode_huffman(z, z_repeat);
+ if !is_kind(e, E_General.OK) {
+ return err;
+ }
+ if value < 256 {
+ e := write_byte(z, u8(value));
+ if e != .None {
+ return E_General.Output_Too_Short;
+ }
+ } else {
+ if value == 256 {
+ // End of block
+ return E_General.OK;
+ }
+
+ value -= 257;
+ length := Z_LENGTH_BASE[value];
+ if Z_LENGTH_EXTRA[value] > 0 {
+ length += u16(compress.read_bits_lsb(z, Z_LENGTH_EXTRA[value]));
+ }
+
+ value, e = decode_huffman(z, z_offset);
+ if !is_kind(e, E_General.OK) {
+ return E_Deflate.Bad_Huffman_Code;
+ }
+
+ distance := Z_DIST_BASE[value];
+ if Z_DIST_EXTRA[value] > 0 {
+ distance += u16(compress.read_bits_lsb(z, Z_DIST_EXTRA[value]));
+ }
+
+ if z.bytes_written < i64(distance) {
+ // Distance is longer than we've decoded so far.
+ return E_Deflate.Bad_Distance;
+ }
+
+ offset := i64(z.bytes_written - i64(distance));
+ /*
+ These might be sped up with a repl_byte call that copies
+ from the already written output more directly, and that
+ update the Adler checksum once after.
+
+ That way we'd suffer less Stream vtable overhead.
+ */
+ if distance == 1 {
+ /*
+ Replicate the last outputted byte, length times.
+ */
+ if length > 0 {
+ b, e := compress.peek_back_byte(z, offset);
+ if e != .None {
+ return E_General.Output_Too_Short;
+ }
+ #no_bounds_check for _ in 0..<length {
+ write_byte(z, b);
+ }
+ }
+ } else {
+ if length > 0 {
+ #no_bounds_check for _ in 0..<length {
+ b, e := compress.peek_back_byte(z, offset);
+ if e != .None {
+ return E_General.Output_Too_Short;
+ }
+ write_byte(z, b);
+ offset += 1;
+ }
+ }
+ }
+ }
+ }
+}
+
+inflate_from_stream :: proc(using ctx: ^Context, raw := false, allocator := context.allocator) -> (err: Error) #no_bounds_check {
+ /*
+ ctx.input must be an io.Stream backed by an implementation that supports:
+ - read
+ - size
+
+ ctx.output must be an io.Stream backed by an implementation that supports:
+ - write
+
+ raw determines whether the ZLIB header is processed, or we're inflating a raw
+ DEFLATE stream.
+ */
+
+ if !raw {
+ data_size := io.size(ctx.input);
+ if data_size < 6 {
+ return E_General.Stream_Too_Short;
+ }
+
+ cmf, _ := compress.read_u8(ctx);
+
+ method := Compression_Method(cmf & 0xf);
+ if method != .DEFLATE {
+ return E_General.Unknown_Compression_Method;
+ }
+
+ cinfo := (cmf >> 4) & 0xf;
+ if cinfo > 7 {
+ return E_ZLIB.Unsupported_Window_Size;
+ }
+ ctx.window_size = 1 << (cinfo + 8);
+
+ flg, _ := compress.read_u8(ctx);
+
+ fcheck := flg & 0x1f;
+ fcheck_computed := (cmf << 8 | flg) & 0x1f;
+ if fcheck != fcheck_computed {
+ return E_General.Checksum_Failed;
+ }
+
+ fdict := (flg >> 5) & 1;
+ /*
+ We don't handle built-in dictionaries for now.
+ They're application specific and PNG doesn't use them.
+ */
+ if fdict != 0 {
+ return E_ZLIB.FDICT_Unsupported;
+ }
+
+ // flevel := Compression_Level((flg >> 6) & 3);
+ /*
+ Inflate can consume bits belonging to the Adler checksum.
+ We pass the entire stream to Inflate and will unget bytes if we need to
+ at the end to compare checksums.
+ */
+
+ // Seed the Adler32 rolling checksum.
+ ctx.rolling_hash = 1;
+ }
+
+ // Parse ZLIB stream without header.
+ err = inflate_raw(ctx);
+ if !is_kind(err, E_General.OK) {
+ return err;
+ }
+
+ if !raw {
+ compress.discard_to_next_byte_lsb(ctx);
+
+ adler32 := compress.read_bits_lsb(ctx, 8) << 24 | compress.read_bits_lsb(ctx, 8) << 16 | compress.read_bits_lsb(ctx, 8) << 8 | compress.read_bits_lsb(ctx, 8);
+ if ctx.rolling_hash != u32(adler32) {
+ return E_General.Checksum_Failed;
+ }
+ }
+ return E_General.OK;
+}
+
+// @(optimization_mode="speed")
+inflate_from_stream_raw :: proc(z: ^Context, allocator := context.allocator) -> (err: Error) #no_bounds_check {
+ final := u32(0);
+ type := u32(0);
+
+ z.num_bits = 0;
+ z.code_buffer = 0;
+
+ z_repeat: ^Huffman_Table;
+ z_offset: ^Huffman_Table;
+ codelength_ht: ^Huffman_Table;
+
+ z_repeat, err = allocate_huffman_table(allocator=context.allocator);
+ if !is_kind(err, E_General.OK) {
+ return err;
+ }
+ z_offset, err = allocate_huffman_table(allocator=context.allocator);
+ if !is_kind(err, E_General.OK) {
+ return err;
+ }
+ codelength_ht, err = allocate_huffman_table(allocator=context.allocator);
+ if !is_kind(err, E_General.OK) {
+ return err;
+ }
+ defer free(z_repeat);
+ defer free(z_offset);
+ defer free(codelength_ht);
+
+ if z.window_size == 0 {
+ z.window_size = DEFLATE_MAX_DISTANCE;
+ }
+
+ // Allocate rolling window buffer.
+ last_b := mem.make_dynamic_array_len_cap([dynamic]u8, z.window_size, z.window_size, allocator);
+ z.last = &last_b;
+ defer delete(last_b);
+
+ for {
+ final = compress.read_bits_lsb(z, 1);
+ type = compress.read_bits_lsb(z, 2);
+
+ // log.debugf("Final: %v | Type: %v\n", final, type);
+
+ if type == 0 {
+ // Uncompressed block
+
+ // Discard bits until next byte boundary
+ compress.discard_to_next_byte_lsb(z);
+
+ uncompressed_len := int(compress.read_bits_lsb(z, 16));
+ length_check := int(compress.read_bits_lsb(z, 16));
+ if uncompressed_len != ~length_check {
+ return E_Deflate.Len_Nlen_Mismatch;
+ }
+
+ /*
+ TODO: Maybe speed this up with a stream-to-stream copy (read_from)
+ and a single Adler32 update after.
+ */
+ #no_bounds_check for uncompressed_len > 0 {
+ compress.refill_lsb(z);
+ lit := compress.read_bits_lsb(z, 8);
+ write_byte(z, u8(lit));
+ uncompressed_len -= 1;
+ }
+ } else if type == 3 {
+ return E_Deflate.BType_3;
+ } else {
+ // log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
+ if type == 1 {
+ // Use fixed code lengths.
+ err = build_huffman(z_repeat, Z_FIXED_LENGTH[:]);
+ if !is_kind(err, E_General.OK) {
+ return err;
+ }
+ err = build_huffman(z_offset, Z_FIXED_DIST[:]);
+ if !is_kind(err, E_General.OK) {
+ return err;
+ }
+ } else {
+ lencodes: [286+32+137]u8;
+ codelength_sizes: [19]u8;
+
+ //i: u32;
+ n: u32;
+
+ compress.refill_lsb(z, 14);
+ hlit := compress.read_bits_no_refill_lsb(z, 5) + 257;
+ hdist := compress.read_bits_no_refill_lsb(z, 5) + 1;
+ hclen := compress.read_bits_no_refill_lsb(z, 4) + 4;
+ ntot := hlit + hdist;
+
+ #no_bounds_check for i in 0..<hclen {
+ s := compress.read_bits_lsb(z, 3);
+ codelength_sizes[Z_LENGTH_DEZIGZAG[i]] = u8(s);
+ }
+ err = build_huffman(codelength_ht, codelength_sizes[:]);
+ if !is_kind(err, E_General.OK) {
+ return err;
+ }
+
+ n = 0;
+ c: u16;
+
+ for n < ntot {
+ c, err = decode_huffman(z, codelength_ht);
+ if !is_kind(err, E_General.OK) {
+ return err;
+ }
+
+ if c < 0 || c >= 19 {
+ return E_Deflate.Huffman_Bad_Code_Lengths;
+ }
+ if c < 16 {
+ lencodes[n] = u8(c);
+ n += 1;
+ } else {
+ fill := u8(0);
+ compress.refill_lsb(z, 7);
+ if c == 16 {
+ c = u16(compress.read_bits_no_refill_lsb(z, 2) + 3);
+ if n == 0 {
+ return E_Deflate.Huffman_Bad_Code_Lengths;
+ }
+ fill = lencodes[n - 1];
+ } else if c == 17 {
+ c = u16(compress.read_bits_no_refill_lsb(z, 3) + 3);
+ } else if c == 18 {
+ c = u16(compress.read_bits_no_refill_lsb(z, 7) + 11);
+ } else {
+ return E_Deflate.Huffman_Bad_Code_Lengths;
+ }
+
+ if ntot - n < u32(c) {
+ return E_Deflate.Huffman_Bad_Code_Lengths;
+ }
+
+ nc := n + u32(c);
+ #no_bounds_check for ; n < nc; n += 1 {
+ lencodes[n] = fill;
+ }
+ }
+ }
+
+ if n != ntot {
+ return E_Deflate.Huffman_Bad_Code_Lengths;
+ }
+
+ err = build_huffman(z_repeat, lencodes[:hlit]);
+ if !is_kind(err, E_General.OK) {
+ return err;
+ }
+
+ err = build_huffman(z_offset, lencodes[hlit:ntot]);
+ if !is_kind(err, E_General.OK) {
+ return err;
+ }
+ }
+ err = parse_huffman_block(z, z_repeat, z_offset);
+ // log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
+ if !is_kind(err, E_General.OK) {
+ return err;
+ }
+ }
+ if final == 1 {
+ break;
+ }
+ }
+ return E_General.OK;
+}
+
+inflate_from_byte_array :: proc(input: ^[]u8, buf: ^bytes.Buffer, raw := false) -> (err: Error) {
+ ctx := Context{};
+
+ r := bytes.Reader{};
+ bytes.reader_init(&r, input^);
+ rs := bytes.reader_to_stream(&r);
+ ctx.input = rs;
+
+ buf := buf;
+ ws := bytes.buffer_to_stream(buf);
+ ctx.output = ws;
+
+ err = inflate_from_stream(&ctx, raw);
+
+ return err;
+}
+
+inflate_from_byte_array_raw :: proc(input: ^[]u8, buf: ^bytes.Buffer, raw := false) -> (err: Error) {
+ return inflate_from_byte_array(input, buf, true);
+}
+
+inflate :: proc{inflate_from_stream, inflate_from_byte_array};
+inflate_raw :: proc{inflate_from_stream_raw, inflate_from_byte_array_raw}; \ No newline at end of file
diff --git a/core/image/common.odin b/core/image/common.odin
new file mode 100644
index 000000000..d05feabaa
--- /dev/null
+++ b/core/image/common.odin
@@ -0,0 +1,107 @@
+package image
+
+import "core:bytes"
+
+Image :: struct {
+ width: int,
+ height: int,
+ channels: int,
+ depth: u8,
+ pixels: bytes.Buffer,
+ /*
+ Some image loaders/writers can return/take an optional background color.
+ For convenience, we return them as u16 so we don't need to switch on the type
+ in our viewer, and can just test against nil.
+ */
+ background: Maybe([3]u16),
+ sidecar: any,
+}
+
+/*
+Image_Option:
+ `.info`
+ This option behaves as `return_ihdr` and `do_not_decompress_image` and can be used
+ to gather an image's dimensions and color information.
+
+ `.return_header`
+ Fill out img.sidecar.header with the image's format-specific header struct.
+ If we only care about the image specs, we can set `return_header` +
+ `do_not_decompress_image`, or `.info`, which works as if both of these were set.
+
+ `.return_metadata`
+ Returns all chunks not needed to decode the data.
+ It also returns the header as if `.return_header` is set.
+
+ `do_not_decompress_image`
+ Skip decompressing IDAT chunk, defiltering and the rest.
+
+ `alpha_add_if_missing`
+ If the image has no alpha channel, it'll add one set to max(type).
+ Turns RGB into RGBA and Gray into Gray+Alpha
+
+ `alpha_drop_if_present`
+ If the image has an alpha channel, drop it.
+ You may want to use `alpha_premultiply` in this case.
+
+ NOTE: For PNG, this also skips handling of the tRNS chunk, if present,
+ unless you select `alpha_premultiply`.
+ In this case it'll premultiply the specified pixels in question only,
+ as the others are implicitly fully opaque.
+
+ `alpha_premultiply`
+ If the image has an alpha channel, returns image data as follows:
+ RGB *= A, Gray = Gray *= A
+
+ `blend_background`
+ If a bKGD chunk is present in a PNG, we normally just set `img.background`
+ with its value and leave it up to the application to decide how to display the image,
+ as per the PNG specification.
+
+ With `blend_background` selected, we blend the image against the background
+ color. As this negates the use for an alpha channel, we'll drop it _unless_
+ you also specify `alpha_add_if_missing`.
+
+ Options that don't apply to an image format will be ignored by their loader.
+*/
+
+Option :: enum {
+ info = 0,
+ do_not_decompress_image,
+ return_header,
+ return_metadata,
+ alpha_add_if_missing,
+ alpha_drop_if_present,
+ alpha_premultiply,
+ blend_background,
+}
+Options :: distinct bit_set[Option];
+
+PNG_Error :: enum {
+ Invalid_PNG_Signature,
+ IHDR_Not_First_Chunk,
+ IHDR_Corrupt,
+ IDAT_Missing,
+ IDAT_Must_Be_Contiguous,
+ IDAT_Corrupt,
+ PNG_Does_Not_Adhere_to_Spec,
+ PLTE_Encountered_Unexpectedly,
+ PLTE_Invalid_Length,
+ TRNS_Encountered_Unexpectedly,
+ BKGD_Invalid_Length,
+ Invalid_Image_Dimensions,
+ Unknown_Color_Type,
+ Invalid_Color_Bit_Depth_Combo,
+ Unknown_Filter_Method,
+ Unknown_Interlace_Method,
+}
+
+
+/*
+ Functions to help with image buffer calculations
+*/
+
+compute_buffer_size :: proc(width, height, channels, depth: int, extra_row_bytes := int(0)) -> (size: int) {
+
+ size = ((((channels * width * depth) + 7) >> 3) + extra_row_bytes) * height;
+ return;
+} \ No newline at end of file
diff --git a/core/image/png/example.odin b/core/image/png/example.odin
new file mode 100644
index 000000000..8cac5a505
--- /dev/null
+++ b/core/image/png/example.odin
@@ -0,0 +1,327 @@
+//+ignore
+package png
+
+import "core:compress"
+import "core:image"
+import "core:image/png"
+import "core:bytes"
+import "core:fmt"
+
+// For PPM writer
+import "core:mem"
+import "core:os"
+
+main :: proc() {
+ file: string;
+
+ options := image.Options{};
+ err: compress.Error;
+ img: ^image.Image;
+
+ file = "../../../misc/logo-slim.png";
+
+ img, err = png.load(file, options);
+ defer png.destroy(img);
+
+ if !png.is_kind(err, png.E_General.OK) {
+ fmt.printf("Trying to read PNG file %v returned %v\n", file, err);
+ } else {
+ v: png.Info;
+ ok: bool;
+
+ fmt.printf("Image: %vx%vx%v, %v-bit.\n", img.width, img.height, img.channels, img.depth);
+
+ if v, ok = img.sidecar.(png.Info); ok {
+ // Handle ancillary chunks as you wish.
+ // We provide helper functions for a few types.
+ for c in v.chunks {
+ #partial switch (c.header.type) {
+ case .tIME:
+ t, _ := png.core_time(c);
+ fmt.printf("[tIME]: %v\n", t);
+ case .gAMA:
+ fmt.printf("[gAMA]: %v\n", png.gamma(c));
+ case .pHYs:
+ phys := png.phys(c);
+ if phys.unit == .Meter {
+ xm := f32(img.width) / f32(phys.ppu_x);
+ ym := f32(img.height) / f32(phys.ppu_y);
+ dpi_x, dpi_y := png.phys_to_dpi(phys);
+ fmt.printf("[pHYs] Image resolution is %v x %v pixels per meter.\n", phys.ppu_x, phys.ppu_y);
+ fmt.printf("[pHYs] Image resolution is %v x %v DPI.\n", dpi_x, dpi_y);
+ fmt.printf("[pHYs] Image dimensions are %v x %v meters.\n", xm, ym);
+ } else {
+ fmt.printf("[pHYs] x: %v, y: %v pixels per unknown unit.\n", phys.ppu_x, phys.ppu_y);
+ }
+ case .iTXt, .zTXt, .tEXt:
+ res, ok_text := png.text(c);
+ if ok_text {
+ if c.header.type == .iTXt {
+ fmt.printf("[iTXt] %v (%v:%v): %v\n", res.keyword, res.language, res.keyword_localized, res.text);
+ } else {
+ fmt.printf("[tEXt/zTXt] %v: %v\n", res.keyword, res.text);
+ }
+ }
+ defer png.text_destroy(res);
+ case .bKGD:
+ fmt.printf("[bKGD] %v\n", img.background);
+ case .eXIf:
+ res, ok_exif := png.exif(c);
+ if ok_exif {
+ /*
+ Other than checking the signature and byte order, we don't handle Exif data.
+ If you wish to interpret it, pass it to an Exif parser.
+ */
+ fmt.printf("[eXIf] %v\n", res);
+ }
+ case .PLTE:
+ plte, plte_ok := png.plte(c);
+ if plte_ok {
+ fmt.printf("[PLTE] %v\n", plte);
+ } else {
+ fmt.printf("[PLTE] Error\n");
+ }
+ case .hIST:
+ res, ok_hist := png.hist(c);
+ if ok_hist {
+ fmt.printf("[hIST] %v\n", res);
+ }
+ case .cHRM:
+ res, ok_chrm := png.chrm(c);
+ if ok_chrm {
+ fmt.printf("[cHRM] %v\n", res);
+ }
+ case .sPLT:
+ res, ok_splt := png.splt(c);
+ if ok_splt {
+ fmt.printf("[sPLT] %v\n", res);
+ }
+ png.splt_destroy(res);
+ case .sBIT:
+ if res, ok_sbit := png.sbit(c); ok_sbit {
+ fmt.printf("[sBIT] %v\n", res);
+ }
+ case .iCCP:
+ res, ok_iccp := png.iccp(c);
+ if ok_iccp {
+ fmt.printf("[iCCP] %v\n", res);
+ }
+ png.iccp_destroy(res);
+ case .sRGB:
+ if res, ok_srgb := png.srgb(c); ok_srgb {
+ fmt.printf("[sRGB] Rendering intent: %v\n", res);
+ }
+ case:
+ type := c.header.type;
+ name := png.chunk_type_to_name(&type);
+ fmt.printf("[%v]: %v\n", name, c.data);
+ }
+ }
+ }
+ }
+
+ if is_kind(err, E_General.OK) && .do_not_decompress_image not_in options && .info not_in options {
+ if ok := write_image_as_ppm("out.ppm", img); ok {
+ fmt.println("Saved decoded image.");
+ } else {
+ fmt.println("Error saving out.ppm.");
+ fmt.println(img);
+ }
+ }
+}
+
+// Crappy PPM writer used during testing. Don't use in production.
+write_image_as_ppm :: proc(filename: string, image: ^image.Image) -> (success: bool) {
+
+ _bg :: proc(bg: Maybe([3]u16), x, y: int, high := true) -> (res: [3]u16) {
+ if v, ok := bg.?; ok {
+ res = v;
+ } else {
+ if high {
+ l := u16(30 * 256 + 30);
+
+ if (x & 4 == 0) ~ (y & 4 == 0) {
+ res = [3]u16{l, 0, l};
+ } else {
+ res = [3]u16{l >> 1, 0, l >> 1};
+ }
+ } else {
+ if (x & 4 == 0) ~ (y & 4 == 0) {
+ res = [3]u16{30, 30, 30};
+ } else {
+ res = [3]u16{15, 15, 15};
+ }
+ }
+ }
+ return;
+ }
+
+ // profiler.timed_proc();
+ using image;
+ using os;
+
+ flags: int = O_WRONLY|O_CREATE|O_TRUNC;
+
+ img := image;
+
+ // PBM 16-bit images are big endian
+ when ODIN_ENDIAN == "little" {
+ if img.depth == 16 {
+ // The pixel components are in Big Endian. Let's byteswap back.
+ input := mem.slice_data_cast([]u16, img.pixels.buf[:]);
+ output := mem.slice_data_cast([]u16be, img.pixels.buf[:]);
+ #no_bounds_check for v, i in input {
+ output[i] = u16be(v);
+ }
+ }
+ }
+
+ pix := bytes.buffer_to_bytes(&img.pixels);
+
+ if len(pix) == 0 || len(pix) < image.width * image.height * int(image.channels) {
+ return false;
+ }
+
+ mode: int = 0;
+ when ODIN_OS == "linux" || ODIN_OS == "darwin" {
+ // NOTE(justasd): 644 (owner read, write; group read; others read)
+ mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
+ }
+
+ fd, err := open(filename, flags, mode);
+ if err != 0 {
+ return false;
+ }
+ defer close(fd);
+
+ write_string(fd,
+ fmt.tprintf("P6\n%v %v\n%v\n", width, height, (1 << depth -1)),
+ );
+
+ if channels == 3 {
+ // We don't handle transparency here...
+ write_ptr(fd, raw_data(pix), len(pix));
+ } else {
+ bpp := depth == 16 ? 2 : 1;
+ bytes_needed := width * height * 3 * bpp;
+
+ op := bytes.Buffer{};
+ bytes.buffer_init_allocator(&op, bytes_needed, bytes_needed);
+ defer bytes.buffer_destroy(&op);
+
+ if channels == 1 {
+ if depth == 16 {
+ assert(len(pix) == width * height * 2);
+ p16 := mem.slice_data_cast([]u16, pix);
+ o16 := mem.slice_data_cast([]u16, op.buf[:]);
+ #no_bounds_check for len(p16) != 0 {
+ r := u16(p16[0]);
+ o16[0] = r;
+ o16[1] = r;
+ o16[2] = r;
+ p16 = p16[1:];
+ o16 = o16[3:];
+ }
+ } else {
+ o := 0;
+ for i := 0; i < len(pix); i += 1 {
+ r := pix[i];
+ op.buf[o ] = r;
+ op.buf[o+1] = r;
+ op.buf[o+2] = r;
+ o += 3;
+ }
+ }
+ write_ptr(fd, raw_data(op.buf), len(op.buf));
+ } else if channels == 2 {
+ if depth == 16 {
+ p16 := mem.slice_data_cast([]u16, pix);
+ o16 := mem.slice_data_cast([]u16, op.buf[:]);
+
+ bgcol := img.background;
+
+ #no_bounds_check for len(p16) != 0 {
+ r := f64(u16(p16[0]));
+ bg: f64;
+ if bgcol != nil {
+ v := bgcol.([3]u16)[0];
+ bg = f64(v);
+ }
+ a := f64(u16(p16[1])) / 65535.0;
+ l := (a * r) + (1 - a) * bg;
+
+ o16[0] = u16(l);
+ o16[1] = u16(l);
+ o16[2] = u16(l);
+
+ p16 = p16[2:];
+ o16 = o16[3:];
+ }
+ } else {
+ o := 0;
+ for i := 0; i < len(pix); i += 2 {
+ r := pix[i]; a := pix[i+1]; a1 := f32(a) / 255.0;
+ c := u8(f32(r) * a1);
+ op.buf[o ] = c;
+ op.buf[o+1] = c;
+ op.buf[o+2] = c;
+ o += 3;
+ }
+ }
+ write_ptr(fd, raw_data(op.buf), len(op.buf));
+ } else if channels == 4 {
+ if depth == 16 {
+ p16 := mem.slice_data_cast([]u16be, pix);
+ o16 := mem.slice_data_cast([]u16be, op.buf[:]);
+
+ #no_bounds_check for len(p16) != 0 {
+
+ bg := _bg(img.background, 0, 0);
+ r := f32(p16[0]);
+ g := f32(p16[1]);
+ b := f32(p16[2]);
+ a := f32(p16[3]) / 65535.0;
+
+ lr := (a * r) + (1 - a) * f32(bg[0]);
+ lg := (a * g) + (1 - a) * f32(bg[1]);
+ lb := (a * b) + (1 - a) * f32(bg[2]);
+
+ o16[0] = u16be(lr);
+ o16[1] = u16be(lg);
+ o16[2] = u16be(lb);
+
+ p16 = p16[4:];
+ o16 = o16[3:];
+ }
+ } else {
+ o := 0;
+
+ for i := 0; i < len(pix); i += 4 {
+
+ x := (i / 4) % width;
+ y := i / width / 4;
+
+ _b := _bg(img.background, x, y, false);
+ bgcol := [3]u8{u8(_b[0]), u8(_b[1]), u8(_b[2])};
+
+ r := f32(pix[i]);
+ g := f32(pix[i+1]);
+ b := f32(pix[i+2]);
+ a := f32(pix[i+3]) / 255.0;
+
+ lr := u8(f32(r) * a + (1 - a) * f32(bgcol[0]));
+ lg := u8(f32(g) * a + (1 - a) * f32(bgcol[1]));
+ lb := u8(f32(b) * a + (1 - a) * f32(bgcol[2]));
+ op.buf[o ] = lr;
+ op.buf[o+1] = lg;
+ op.buf[o+2] = lb;
+ o += 3;
+ }
+ }
+ write_ptr(fd, raw_data(op.buf), len(op.buf));
+ } else {
+ return false;
+ }
+ }
+ return true;
+} \ No newline at end of file
diff --git a/core/image/png/helpers.odin b/core/image/png/helpers.odin
new file mode 100644
index 000000000..a179cd23b
--- /dev/null
+++ b/core/image/png/helpers.odin
@@ -0,0 +1,521 @@
+package png
+
+import "core:image"
+import "core:compress/zlib"
+import coretime "core:time"
+import "core:strings"
+import "core:bytes"
+import "core:mem"
+
+/*
+ These are a few useful utility functions to work with PNG images.
+*/
+
+/*
+ Cleanup of image-specific data.
+ There are other helpers for cleanup of PNG-specific data.
+ Those are named *_destroy, where * is the name of the helper.
+*/
+
+destroy :: proc(img: ^Image) {
+ if img == nil {
+ /*
+ Nothing to do.
+ Load must've returned with an error.
+ */
+ return;
+ }
+
+ bytes.buffer_destroy(&img.pixels);
+
+ /*
+ We don't need to do anything for the individual chunks.
+ They're allocated on the temp allocator, as is info.chunks
+
+ See read_chunk.
+ */
+ free(img);
+}
+
+/*
+ Chunk helpers
+*/
+
+gamma :: proc(c: Chunk) -> f32 {
+ assert(c.header.type == .gAMA);
+ res := (^gAMA)(raw_data(c.data))^;
+ when true {
+ // Returns the wrong result on old backend
+ // Fixed for -llvm-api
+ return f32(res.gamma_100k) / 100_000.0;
+ } else {
+ return f32(u32(res.gamma_100k)) / 100_000.0;
+ }
+}
+
+INCHES_PER_METER :: 1000.0 / 25.4;
+
+phys :: proc(c: Chunk) -> pHYs {
+ assert(c.header.type == .pHYs);
+ res := (^pHYs)(raw_data(c.data))^;
+ return res;
+}
+
+phys_to_dpi :: proc(p: pHYs) -> (x_dpi, y_dpi: f32) {
+ return f32(p.ppu_x) / INCHES_PER_METER, f32(p.ppu_y) / INCHES_PER_METER;
+}
+
+time :: proc(c: Chunk) -> tIME {
+ assert(c.header.type == .tIME);
+ res := (^tIME)(raw_data(c.data))^;
+ return res;
+}
+
+core_time :: proc(c: Chunk) -> (t: coretime.Time, ok: bool) {
+ png_time := time(c);
+ using png_time;
+ return coretime.datetime_to_time(
+ int(year), int(month), int(day),
+ int(hour), int(minute), int(second));
+}
+
+text :: proc(c: Chunk) -> (res: Text, ok: bool) {
+ #partial switch c.header.type {
+ case .tEXt:
+ ok = true;
+
+ fields := bytes.split(s=c.data, sep=[]u8{0}, allocator=context.temp_allocator);
+ if len(fields) == 2 {
+ res.keyword = strings.clone(string(fields[0]));
+ res.text = strings.clone(string(fields[1]));
+ } else {
+ ok = false;
+ }
+ return;
+ case .zTXt:
+ ok = true;
+
+ fields := bytes.split_n(s=c.data, sep=[]u8{0}, n=3, allocator=context.temp_allocator);
+ if len(fields) != 3 || len(fields[1]) != 0 {
+ // Compression method must be 0=Deflate, which thanks to the split above turns
+ // into an empty slice
+ ok = false; return;
+ }
+
+ // Set up ZLIB context and decompress text payload.
+ buf: bytes.Buffer;
+ zlib_error := zlib.inflate_from_byte_array(&fields[2], &buf);
+ defer bytes.buffer_destroy(&buf);
+ if !is_kind(zlib_error, E_General.OK) {
+ ok = false; return;
+ }
+
+ res.keyword = strings.clone(string(fields[0]));
+ res.text = strings.clone(bytes.buffer_to_string(&buf));
+ return;
+ case .iTXt:
+ ok = true;
+
+ s := string(c.data);
+ null := strings.index_byte(s, 0);
+ if null == -1 {
+ ok = false; return;
+ }
+ if len(c.data) < null + 4 {
+ // At a minimum, including the \0 following the keyword, we require 5 more bytes.
+ ok = false; return;
+ }
+ res.keyword = strings.clone(string(c.data[:null]));
+ rest := c.data[null+1:];
+
+ compression_flag := rest[:1][0];
+ if compression_flag > 1 {
+ ok = false; return;
+ }
+ compression_method := rest[1:2][0];
+ if compression_flag == 1 && compression_method > 0 {
+ // Only Deflate is supported
+ ok = false; return;
+ }
+ rest = rest[2:];
+
+ // We now expect an optional language keyword and translated keyword, both followed by a \0
+ null = strings.index_byte(string(rest), 0);
+ if null == -1 {
+ ok = false; return;
+ }
+ res.language = strings.clone(string(rest[:null]));
+ rest = rest[null+1:];
+
+ null = strings.index_byte(string(rest), 0);
+ if null == -1 {
+ ok = false; return;
+ }
+ res.keyword_localized = strings.clone(string(rest[:null]));
+ rest = rest[null+1:];
+ if compression_flag == 0 {
+ res.text = strings.clone(string(rest));
+ } else {
+ // Set up ZLIB context and decompress text payload.
+ buf: bytes.Buffer;
+ zlib_error := zlib.inflate_from_byte_array(&rest, &buf);
+ defer bytes.buffer_destroy(&buf);
+ if !is_kind(zlib_error, E_General.OK) {
+
+ ok = false; return;
+ }
+
+ res.text = strings.clone(bytes.buffer_to_string(&buf));
+ }
+ return;
+ case:
+ // PNG text helper called with an unrecognized chunk type.
+ ok = false; return;
+
+ }
+}
+
+text_destroy :: proc(text: Text) {
+ delete(text.keyword);
+ delete(text.keyword_localized);
+ delete(text.language);
+ delete(text.text);
+}
+
+iccp :: proc(c: Chunk) -> (res: iCCP, ok: bool) {
+ ok = true;
+
+ fields := bytes.split_n(s=c.data, sep=[]u8{0}, n=3, allocator=context.temp_allocator);
+
+ if len(fields[0]) < 1 || len(fields[0]) > 79 {
+ // Invalid profile name
+ ok = false; return;
+ }
+
+ if len(fields[1]) != 0 {
+ // Compression method should be a zero, which the split turned into an empty slice.
+ ok = false; return;
+ }
+
+ // Set up ZLIB context and decompress iCCP payload
+ buf: bytes.Buffer;
+ zlib_error := zlib.inflate_from_byte_array(&fields[2], &buf);
+ if !is_kind(zlib_error, E_General.OK) {
+ bytes.buffer_destroy(&buf);
+ ok = false; return;
+ }
+
+ res.name = strings.clone(string(fields[0]));
+ res.profile = bytes.buffer_to_bytes(&buf);
+
+ return;
+}
+
+iccp_destroy :: proc(i: iCCP) {
+ delete(i.name);
+
+ delete(i.profile);
+
+}
+
+srgb :: proc(c: Chunk) -> (res: sRGB, ok: bool) {
+ ok = true;
+
+ if c.header.type != .sRGB || len(c.data) != 1 {
+ return {}, false;
+ }
+
+ res.intent = sRGB_Rendering_Intent(c.data[0]);
+ if res.intent > max(sRGB_Rendering_Intent) {
+ ok = false; return;
+ }
+ return;
+}
+
+plte :: proc(c: Chunk) -> (res: PLTE, ok: bool) {
+ if c.header.type != .PLTE {
+ return {}, false;
+ }
+
+ i := 0; j := 0; ok = true;
+ for j < int(c.header.length) {
+ res.entries[i] = {c.data[j], c.data[j+1], c.data[j+2]};
+ i += 1; j += 3;
+ }
+ res.used = u16(i);
+ return;
+}
+
+splt :: proc(c: Chunk) -> (res: sPLT, ok: bool) {
+ if c.header.type != .sPLT {
+ return {}, false;
+ }
+ ok = true;
+
+ fields := bytes.split_n(s=c.data, sep=[]u8{0}, n=2, allocator=context.temp_allocator);
+ if len(fields) != 2 {
+ return {}, false;
+ }
+
+ res.depth = fields[1][0];
+ if res.depth != 8 && res.depth != 16 {
+ return {}, false;
+ }
+
+ data := fields[1][1:];
+ count: int;
+
+ if res.depth == 8 {
+ if len(data) % 6 != 0 {
+ return {}, false;
+ }
+ count = len(data) / 6;
+ if count > 256 {
+ return {}, false;
+ }
+
+ res.entries = mem.slice_data_cast([][4]u8, data);
+ } else { // res.depth == 16
+ if len(data) % 10 != 0 {
+ return {}, false;
+ }
+ count = len(data) / 10;
+ if count > 256 {
+ return {}, false;
+ }
+
+ res.entries = mem.slice_data_cast([][4]u16, data);
+ }
+
+ res.name = strings.clone(string(fields[0]));
+ res.used = u16(count);
+
+ return;
+}
+
+splt_destroy :: proc(s: sPLT) {
+ delete(s.name);
+}
+
+sbit :: proc(c: Chunk) -> (res: [4]u8, ok: bool) {
+ /*
+ Returns [4]u8 with the significant bits in each channel.
+ A channel will contain zero if not applicable to the PNG color type.
+ */
+
+ if len(c.data) < 1 || len(c.data) > 4 {
+ ok = false; return;
+ }
+ ok = true;
+
+ for i := 0; i < len(c.data); i += 1 {
+ res[i] = c.data[i];
+ }
+ return;
+
+}
+
+hist :: proc(c: Chunk) -> (res: hIST, ok: bool) {
+ if c.header.type != .hIST {
+ return {}, false;
+ }
+ if c.header.length & 1 == 1 || c.header.length > 512 {
+ // The entries are u16be, so the length must be even.
+ // At most 256 entries must be present
+ return {}, false;
+ }
+
+ ok = true;
+ data := mem.slice_data_cast([]u16be, c.data);
+ i := 0;
+ for len(data) > 0 {
+ // HIST entries are u16be, we unpack them to machine format
+ res.entries[i] = u16(data[0]);
+ i += 1; data = data[1:];
+ }
+ res.used = u16(i);
+ return;
+}
+
+chrm :: proc(c: Chunk) -> (res: cHRM, ok: bool) {
+ ok = true;
+ if c.header.length != size_of(cHRM_Raw) {
+ return {}, false;
+ }
+ chrm := (^cHRM_Raw)(raw_data(c.data))^;
+
+ res.w.x = f32(chrm.w.x) / 100_000.0;
+ res.w.y = f32(chrm.w.y) / 100_000.0;
+ res.r.x = f32(chrm.r.x) / 100_000.0;
+ res.r.y = f32(chrm.r.y) / 100_000.0;
+ res.g.x = f32(chrm.g.x) / 100_000.0;
+ res.g.y = f32(chrm.g.y) / 100_000.0;
+ res.b.x = f32(chrm.b.x) / 100_000.0;
+ res.b.y = f32(chrm.b.y) / 100_000.0;
+ return;
+}
+
+exif :: proc(c: Chunk) -> (res: Exif, ok: bool) {
+
+ ok = true;
+
+ if len(c.data) < 4 {
+ ok = false; return;
+ }
+
+ if c.data[0] == 'M' && c.data[1] == 'M' {
+ res.byte_order = .big_endian;
+ if c.data[2] != 0 || c.data[3] != 42 {
+ ok = false; return;
+ }
+ } else if c.data[0] == 'I' && c.data[1] == 'I' {
+ res.byte_order = .little_endian;
+ if c.data[2] != 42 || c.data[3] != 0 {
+ ok = false; return;
+ }
+ } else {
+ ok = false; return;
+ }
+
+ res.data = c.data;
+ return;
+}
+
+/*
+ General helper functions
+*/
+
+compute_buffer_size :: image.compute_buffer_size;
+
+/*
+ PNG save helpers
+*/
+
+when false {
+
+ make_chunk :: proc(c: any, t: Chunk_Type) -> (res: Chunk) {
+
+ data: []u8;
+ if v, ok := c.([]u8); ok {
+ data = v;
+ } else {
+ data = mem.any_to_bytes(c);
+ }
+
+ res.header.length = u32be(len(data));
+ res.header.type = t;
+ res.data = data;
+
+ // CRC the type
+ crc := hash.crc32(mem.any_to_bytes(res.header.type));
+ // Extend the CRC with the data
+ res.crc = u32be(hash.crc32(data, crc));
+ return;
+ }
+
+ write_chunk :: proc(fd: os.Handle, chunk: Chunk) {
+ c := chunk;
+ // Write length + type
+ os.write_ptr(fd, &c.header, 8);
+ // Write data
+ os.write_ptr(fd, mem.raw_data(c.data), int(c.header.length));
+ // Write CRC32
+ os.write_ptr(fd, &c.crc, 4);
+ }
+
+ write_image_as_png :: proc(filename: string, image: Image) -> (err: Error) {
+ profiler.timed_proc();
+ using image;
+ using os;
+ flags: int = O_WRONLY|O_CREATE|O_TRUNC;
+
+ if len(image.pixels) == 0 || len(image.pixels) < image.width * image.height * int(image.channels) {
+ return E_PNG.Invalid_Image_Dimensions;
+ }
+
+ mode: int = 0;
+ when ODIN_OS == "linux" || ODIN_OS == "darwin" {
+ // NOTE(justasd): 644 (owner read, write; group read; others read)
+ mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
+ }
+
+ fd, fderr := open(filename, flags, mode);
+ if fderr != 0 {
+ return E_General.Cannot_Open_File;
+ }
+ defer close(fd);
+
+ magic := Signature;
+
+ write_ptr(fd, &magic, 8);
+
+ ihdr := IHDR{
+ width = u32be(width),
+ height = u32be(height),
+ bit_depth = depth,
+ compression_method = 0,
+ filter_method = 0,
+ interlace_method = .None,
+ };
+
+ if channels == 1 {
+ ihdr.color_type = Color_Type{};
+ } else if channels == 2 {
+ ihdr.color_type = Color_Type{.Alpha};
+ } else if channels == 3 {
+ ihdr.color_type = Color_Type{.Color};
+ } else if channels == 4 {
+ ihdr.color_type = Color_Type{.Color, .Alpha};
+ } else {
+ // Unhandled
+ return E_PNG.Unknown_Color_Type;
+ }
+
+ h := make_chunk(ihdr, .IHDR);
+ write_chunk(fd, h);
+
+ bytes_needed := width * height * int(channels) + height;
+ filter_bytes := mem.make_dynamic_array_len_cap([dynamic]u8, bytes_needed, bytes_needed, context.allocator);
+ defer delete(filter_bytes);
+
+ i := 0; j := 0;
+ // Add a filter byte 0 per pixel row
+ for y := 0; y < height; y += 1 {
+ filter_bytes[j] = 0; j += 1;
+ for x := 0; x < width; x += 1 {
+ for z := 0; z < channels; z += 1 {
+ filter_bytes[j+z] = image.pixels[i+z];
+ }
+ i += channels; j += channels;
+ }
+ }
+ assert(j == bytes_needed);
+
+ a: []u8 = filter_bytes[:];
+
+ out_buf: ^[dynamic]u8;
+ defer free(out_buf);
+
+ ctx := zlib.ZLIB_Context{
+ in_buf = &a,
+ out_buf = out_buf,
+ };
+ err = zlib.write_zlib_stream_from_memory(&ctx);
+
+ b: []u8;
+ if is_kind(err, E_General, E_General.OK) {
+ b = ctx.out_buf[:];
+ } else {
+ return err;
+ }
+
+ idat := make_chunk(b, .IDAT);
+
+ write_chunk(fd, idat);
+
+ iend := make_chunk([]u8{}, .IEND);
+ write_chunk(fd, iend);
+
+ return E_General.OK;
+ }
+} \ No newline at end of file
diff --git a/core/image/png/png.odin b/core/image/png/png.odin
new file mode 100644
index 000000000..a25fed1ec
--- /dev/null
+++ b/core/image/png/png.odin
@@ -0,0 +1,1590 @@
+package png
+
+import "core:compress"
+import "core:compress/zlib"
+import "core:image"
+
+import "core:os"
+import "core:strings"
+import "core:hash"
+import "core:bytes"
+import "core:io"
+import "core:mem"
+import "core:intrinsics"
+
+Error :: compress.Error;
+E_General :: compress.General_Error;
+E_PNG :: image.PNG_Error;
+E_Deflate :: compress.Deflate_Error;
+is_kind :: compress.is_kind;
+
+Image :: image.Image;
+Options :: image.Options;
+
+Signature :: enum u64be {
+ // 0x89504e470d0a1a0a
+ PNG = 0x89 << 56 | 'P' << 48 | 'N' << 40 | 'G' << 32 | '\r' << 24 | '\n' << 16 | 0x1a << 8 | '\n',
+}
+
+Info :: struct {
+ header: IHDR,
+ chunks: [dynamic]Chunk,
+}
+
+Chunk_Header :: struct #packed {
+ length: u32be,
+ type: Chunk_Type,
+}
+
+Chunk :: struct #packed {
+ header: Chunk_Header,
+ data: []byte,
+ crc: u32be,
+}
+
+Chunk_Type :: enum u32be {
+ // IHDR must come first in a file
+ IHDR = 'I' << 24 | 'H' << 16 | 'D' << 8 | 'R',
+ // PLTE must precede the first IDAT chunk
+ PLTE = 'P' << 24 | 'L' << 16 | 'T' << 8 | 'E',
+ bKGD = 'b' << 24 | 'K' << 16 | 'G' << 8 | 'D',
+ tRNS = 't' << 24 | 'R' << 16 | 'N' << 8 | 'S',
+ IDAT = 'I' << 24 | 'D' << 16 | 'A' << 8 | 'T',
+
+ iTXt = 'i' << 24 | 'T' << 16 | 'X' << 8 | 't',
+ tEXt = 't' << 24 | 'E' << 16 | 'X' << 8 | 't',
+ zTXt = 'z' << 24 | 'T' << 16 | 'X' << 8 | 't',
+
+ iCCP = 'i' << 24 | 'C' << 16 | 'C' << 8 | 'P',
+ pHYs = 'p' << 24 | 'H' << 16 | 'Y' << 8 | 's',
+ gAMA = 'g' << 24 | 'A' << 16 | 'M' << 8 | 'A',
+ tIME = 't' << 24 | 'I' << 16 | 'M' << 8 | 'E',
+
+ sPLT = 's' << 24 | 'P' << 16 | 'L' << 8 | 'T',
+ sRGB = 's' << 24 | 'R' << 16 | 'G' << 8 | 'B',
+ hIST = 'h' << 24 | 'I' << 16 | 'S' << 8 | 'T',
+ cHRM = 'c' << 24 | 'H' << 16 | 'R' << 8 | 'M',
+ sBIT = 's' << 24 | 'B' << 16 | 'I' << 8 | 'T',
+
+ /*
+ eXIf tags are not part of the core spec, but have been ratified
+ in v1.5.0 of the PNG Ext register.
+
+ We will provide unprocessed chunks to the caller if `.return_metadata` is set.
+ Applications are free to implement an Exif decoder.
+ */
+ eXIf = 'e' << 24 | 'X' << 16 | 'I' << 8 | 'f',
+
+ // PNG files must end with IEND
+ IEND = 'I' << 24 | 'E' << 16 | 'N' << 8 | 'D',
+
+ /*
+ XCode sometimes produces "PNG" files that don't adhere to the PNG spec.
+ We recognize them only in order to avoid doing further work on them.
+
+ Some tools like PNG Defry may be able to repair them, but we're not
+ going to reward Apple for producing proprietary broken files purporting
+ to be PNGs by supporting them.
+
+ */
+ iDOT = 'i' << 24 | 'D' << 16 | 'O' << 8 | 'T',
+ CbGI = 'C' << 24 | 'b' << 16 | 'H' << 8 | 'I',
+}
+
+IHDR :: struct #packed {
+ width: u32be,
+ height: u32be,
+ bit_depth: u8,
+ color_type: Color_Type,
+ compression_method: u8,
+ filter_method: u8,
+ interlace_method: Interlace_Method,
+}
+IHDR_SIZE :: size_of(IHDR);
+#assert (IHDR_SIZE == 13);
+
+Color_Value :: enum u8 {
+ Paletted = 0, // 1 << 0 = 1
+ Color = 1, // 1 << 1 = 2
+ Alpha = 2, // 1 << 2 = 4
+}
+Color_Type :: distinct bit_set[Color_Value; u8];
+
+Interlace_Method :: enum u8 {
+ None = 0,
+ Adam7 = 1,
+}
+
+Row_Filter :: enum u8 {
+ None = 0,
+ Sub = 1,
+ Up = 2,
+ Average = 3,
+ Paeth = 4,
+};
+
+PLTE_Entry :: [3]u8;
+
+PLTE :: struct #packed {
+ entries: [256]PLTE_Entry,
+ used: u16,
+}
+
+hIST :: struct #packed {
+ entries: [256]u16,
+ used: u16,
+}
+
+sPLT :: struct #packed {
+ name: string,
+ depth: u8,
+ entries: union {
+ [][4]u8,
+ [][4]u16,
+ },
+ used: u16,
+}
+
+// Other chunks
+tIME :: struct #packed {
+ year: u16be,
+ month: u8,
+ day: u8,
+ hour: u8,
+ minute: u8,
+ second: u8,
+};
+#assert(size_of(tIME) == 7);
+
+CIE_1931_Raw :: struct #packed {
+ x: u32be,
+ y: u32be,
+}
+
+CIE_1931 :: struct #packed {
+ x: f32,
+ y: f32,
+}
+
+cHRM_Raw :: struct #packed {
+ w: CIE_1931_Raw,
+ r: CIE_1931_Raw,
+ g: CIE_1931_Raw,
+ b: CIE_1931_Raw,
+}
+#assert(size_of(cHRM_Raw) == 32);
+
+cHRM :: struct #packed {
+ w: CIE_1931,
+ r: CIE_1931,
+ g: CIE_1931,
+ b: CIE_1931,
+}
+#assert(size_of(cHRM) == 32);
+
+gAMA :: struct {
+ gamma_100k: u32be, // Gamma * 100k
+};
+#assert(size_of(gAMA) == 4);
+
+pHYs :: struct #packed {
+ ppu_x: u32be,
+ ppu_y: u32be,
+ unit: pHYs_Unit,
+};
+#assert(size_of(pHYs) == 9);
+
+pHYs_Unit :: enum u8 {
+ Unknown = 0,
+ Meter = 1,
+};
+
+Text :: struct {
+ keyword: string,
+ keyword_localized: string,
+ language: string,
+ text: string,
+};
+
+Exif :: struct {
+ byte_order: enum {
+ little_endian,
+ big_endian,
+ },
+ data: []u8,
+}
+
+iCCP :: struct {
+ name: string,
+ profile: []u8,
+}
+
+sRGB_Rendering_Intent :: enum u8 {
+ Perceptual = 0,
+ Relative_colorimetric = 1,
+ Saturation = 2,
+ Absolute_colorimetric = 3,
+}
+
+sRGB :: struct #packed {
+ intent: sRGB_Rendering_Intent,
+}
+
+ADAM7_X_ORIG := []int{ 0,4,0,2,0,1,0 };
+ADAM7_Y_ORIG := []int{ 0,0,4,0,2,0,1 };
+ADAM7_X_SPACING := []int{ 8,8,4,4,2,2,1 };
+ADAM7_Y_SPACING := []int{ 8,8,8,4,4,2,2 };
+
+// Implementation starts here
+
+read_chunk :: proc(ctx: ^compress.Context) -> (Chunk, Error) {
+
+ chunk := Chunk{};
+
+ ch, e := compress.read_data(ctx, Chunk_Header);
+ if e != .None {
+ return {}, E_General.Stream_Too_Short;
+ }
+ chunk.header = ch;
+
+ data := make([]u8, ch.length, context.temp_allocator);
+ _, e2 := ctx.input->impl_read(data);
+ if e2 != .None {
+ return {}, E_General.Stream_Too_Short;
+ }
+ chunk.data = data;
+
+ // Compute CRC over chunk type + data
+ type := (^[4]byte)(&ch.type)^;
+ computed_crc := hash.crc32(type[:]);
+ computed_crc = hash.crc32(data, computed_crc);
+
+ crc, e3 := compress.read_data(ctx, u32be);
+ if e3 != .None {
+ return {}, E_General.Stream_Too_Short;
+ }
+ chunk.crc = crc;
+
+ if chunk.crc != u32be(computed_crc) {
+ return {}, E_General.Checksum_Failed;
+ }
+ return chunk, E_General.OK;
+}
+
+read_header :: proc(ctx: ^compress.Context) -> (IHDR, Error) {
+
+ c, e := read_chunk(ctx);
+ if !is_kind(e, E_General.OK) {
+ return {}, e;
+ }
+
+ header := (^IHDR)(raw_data(c.data))^;
+ // Validate IHDR
+ using header;
+ if width == 0 || height == 0 {
+ return {}, E_PNG.Invalid_Image_Dimensions;
+ }
+
+ if compression_method != 0 {
+ return {}, E_General.Unknown_Compression_Method;
+ }
+
+ if filter_method != 0 {
+ return {}, E_PNG.Unknown_Filter_Method;
+ }
+
+ if interlace_method != .None && interlace_method != .Adam7 {
+ return {}, E_PNG.Unknown_Interlace_Method;
+
+ }
+
+ switch (transmute(u8)color_type) {
+ case 0:
+ /*
+ Grayscale.
+ Allowed bit depths: 1, 2, 4, 8 and 16.
+ */
+ allowed := false;
+ for i in ([]u8{1, 2, 4, 8, 16}) {
+ if bit_depth == i {
+ allowed = true;
+ break;
+ }
+ }
+ if !allowed {
+ return {}, E_PNG.Invalid_Color_Bit_Depth_Combo;
+ }
+ case 2, 4, 6:
+ /*
+ RGB, Grayscale+Alpha, RGBA.
+ Allowed bit depths: 8 and 16
+ */
+ if bit_depth != 8 && bit_depth != 16 {
+ return {}, E_PNG.Invalid_Color_Bit_Depth_Combo;
+ }
+ case 3:
+ /*
+ Paletted. PLTE chunk must appear.
+ Allowed bit depths: 1, 2, 4 and 8.
+ */
+ allowed := false;
+ for i in ([]u8{1, 2, 4, 8}) {
+ if bit_depth == i {
+ allowed = true;
+ break;
+ }
+ }
+ if !allowed {
+ return {}, E_PNG.Invalid_Color_Bit_Depth_Combo;
+ }
+
+ case:
+ return {}, E_PNG.Unknown_Color_Type;
+ }
+
+ return header, E_General.OK;
+}
+
+chunk_type_to_name :: proc(type: ^Chunk_Type) -> string {
+ t := transmute(^u8)type;
+ return strings.string_from_ptr(t, 4);
+}
+
+load_from_slice :: proc(slice: ^[]u8, options: Options = {}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+ r := bytes.Reader{};
+ bytes.reader_init(&r, slice^);
+ stream := bytes.reader_to_stream(&r);
+
+ /*
+ TODO: Add a flag to tell the PNG loader that the stream is backed by a slice.
+ This way the stream reader could avoid the copy into the temp memory returned by it,
+ and instead return a slice into the original memory that's already owned by the caller.
+ */
+ img, err = load_from_stream(&stream, options, allocator);
+
+ return img, err;
+}
+
+load_from_file :: proc(filename: string, options: Options = {}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+ load_file :: proc(filename: string) -> (res: []u8, ok: bool) {
+ return os.read_entire_file(filename, context.temp_allocator);
+ }
+
+ data, ok := load_file(filename);
+ if ok {
+ img, err = load_from_slice(&data, options, allocator);
+ return;
+ } else {
+ img = new(Image);
+ return img, E_General.File_Not_Found;
+ }
+}
+
+load_from_stream :: proc(stream: ^io.Stream, options: Options = {}, allocator := context.allocator) -> (img: ^Image, err: Error) {
+ options := options;
+ if .info in options {
+ options |= {.return_metadata, .do_not_decompress_image};
+ options ~= {.info};
+ }
+
+ if .alpha_drop_if_present in options && .alpha_add_if_missing in options {
+ return {}, E_General.Incompatible_Options;
+ }
+
+ if img == nil {
+ img = new(Image);
+ }
+
+ img.sidecar = nil;
+
+ ctx := compress.Context{
+ input = stream^,
+ };
+
+ signature, io_error := compress.read_data(&ctx, Signature);
+ if io_error != .None || signature != .PNG {
+ return img, E_PNG.Invalid_PNG_Signature;
+ }
+
+ idat: []u8;
+ idat_b: bytes.Buffer;
+ idat_length := u32be(0);
+ defer bytes.buffer_destroy(&idat_b);
+
+ c: Chunk;
+ ch: Chunk_Header;
+ e: io.Error;
+
+ header: IHDR;
+ info: Info;
+ info.chunks.allocator = context.temp_allocator;
+
+ // State to ensure correct chunk ordering.
+ seen_ihdr := false; first := true;
+ seen_plte := false;
+ seen_bkgd := false;
+ seen_trns := false;
+ seen_idat := false;
+ seen_iend := false;
+
+ _plte := PLTE{};
+ trns := Chunk{};
+
+ final_image_channels := 0;
+
+ read_error: io.Error;
+ // 12 bytes is the size of a chunk with a zero-length payload.
+ for (read_error == .None && !seen_iend) {
+ // Peek at next chunk's length and type.
+ // TODO: Some streams may not provide seek/read_at
+
+ ch, e = compress.peek_data(&ctx, Chunk_Header);
+ if e != .None {
+ return img, E_General.Stream_Too_Short;
+ }
+ // name := chunk_type_to_name(&ch.type); // Only used for debug prints during development.
+
+ #partial switch(ch.type) {
+ case .IHDR:
+ if seen_ihdr || !first {
+ return {}, E_PNG.IHDR_Not_First_Chunk;
+ }
+ seen_ihdr = true;
+
+ header, err = read_header(&ctx);
+ if !is_kind(err, E_General.OK) {
+ return img, err;
+ }
+
+ if .Paletted in header.color_type {
+ // Color type 3
+ img.channels = 1;
+ final_image_channels = 3;
+ img.depth = 8;
+ } else if .Color in header.color_type {
+ // Color image without a palette
+ img.channels = 3;
+ final_image_channels = 3;
+ img.depth = header.bit_depth;
+ } else {
+ // Grayscale
+ img.channels = 1;
+ final_image_channels = 1;
+ img.depth = header.bit_depth;
+ }
+
+ if .Alpha in header.color_type {
+ img.channels += 1;
+ final_image_channels += 1;
+ }
+
+ if img.channels == 0 || img.depth == 0 {
+ return {}, E_PNG.IHDR_Corrupt;
+ }
+
+ img.width = int(header.width);
+ img.height = int(header.height);
+
+ using header;
+ h := IHDR{
+ width = width,
+ height = height,
+ bit_depth = bit_depth,
+ color_type = color_type,
+ compression_method = compression_method,
+ filter_method = filter_method,
+ interlace_method = interlace_method,
+ };
+ info.header = h;
+ case .PLTE:
+ seen_plte = true;
+ // PLTE must appear before IDAT and can't appear for color types 0, 4.
+ ct := transmute(u8)info.header.color_type;
+ if seen_idat || ct == 0 || ct == 4 {
+ return img, E_PNG.PLTE_Encountered_Unexpectedly;
+ }
+
+ c, err = read_chunk(&ctx);
+ if !is_kind(err, E_General.OK) {
+ return img, err;
+ }
+
+ if c.header.length % 3 != 0 || c.header.length > 768 {
+ return img, E_PNG.PLTE_Invalid_Length;
+ }
+ plte_ok: bool;
+ _plte, plte_ok = plte(c);
+ if !plte_ok {
+ return img, E_PNG.PLTE_Invalid_Length;
+ }
+
+ if .return_metadata in options {
+ append(&info.chunks, c);
+ }
+ case .IDAT:
+ // If we only want image metadata and don't want the pixel data, we can early out.
+ if .return_metadata not_in options && .do_not_decompress_image in options {
+ img.channels = final_image_channels;
+ img.sidecar = info;
+ return img, E_General.OK;
+ }
+ // There must be at least 1 IDAT, contiguous if more.
+ if seen_idat {
+ return img, E_PNG.IDAT_Must_Be_Contiguous;
+ }
+
+ if idat_length > 0 {
+ return img, E_PNG.IDAT_Must_Be_Contiguous;
+ }
+
+ next := ch.type;
+ for next == .IDAT {
+ c, err = read_chunk(&ctx);
+ if !is_kind(err, E_General.OK) {
+ return img, err;
+ }
+
+ bytes.buffer_write(&idat_b, c.data);
+ idat_length += c.header.length;
+
+ ch, e = compress.peek_data(&ctx, Chunk_Header);
+ if e != .None {
+ return img, E_General.Stream_Too_Short;
+ }
+ next = ch.type;
+ }
+ idat = bytes.buffer_to_bytes(&idat_b);
+ if int(idat_length) != len(idat) {
+ return {}, E_PNG.IDAT_Corrupt;
+ }
+ seen_idat = true;
+ case .IEND:
+ c, err = read_chunk(&ctx);
+ if !is_kind(err, E_General.OK) {
+ return img, err;
+ }
+ seen_iend = true;
+ case .bKGD:
+
+ // TODO: Make sure that 16-bit bKGD + tRNS chunks return u16 instead of u16be
+
+ c, err = read_chunk(&ctx);
+ if !is_kind(err, E_General.OK) {
+ return img, err;
+ }
+ seen_bkgd = true;
+ if .return_metadata in options {
+ append(&info.chunks, c);
+ }
+
+ ct := transmute(u8)info.header.color_type;
+ switch(ct) {
+ case 3: // Indexed color
+ if c.header.length != 1 {
+ return {}, E_PNG.BKGD_Invalid_Length;
+ }
+ col := _plte.entries[c.data[0]];
+ img.background = [3]u16{
+ u16(col[0]) << 8 | u16(col[0]),
+ u16(col[1]) << 8 | u16(col[1]),
+ u16(col[2]) << 8 | u16(col[2]),
+ };
+ case 0, 4: // Grayscale, with and without Alpha
+ if c.header.length != 2 {
+ return {}, E_PNG.BKGD_Invalid_Length;
+ }
+ col := u16(mem.slice_data_cast([]u16be, c.data[:])[0]);
+ img.background = [3]u16{col, col, col};
+ case 2, 6: // Color, with and without Alpha
+ if c.header.length != 6 {
+ return {}, E_PNG.BKGD_Invalid_Length;
+ }
+ col := mem.slice_data_cast([]u16be, c.data[:]);
+ img.background = [3]u16{u16(col[0]), u16(col[1]), u16(col[2])};
+ }
+ case .tRNS:
+ c, err = read_chunk(&ctx);
+ if !is_kind(err, E_General.OK) {
+ return img, err;
+ }
+
+ if .Alpha in info.header.color_type {
+ return img, E_PNG.TRNS_Encountered_Unexpectedly;
+ }
+
+ if .return_metadata in options {
+ append(&info.chunks, c);
+ }
+
+ /*
+ This makes the image one with transparency, so set it to +1 here,
+ even if we need we leave img.channels alone for the defilterer's
+ sake. If we early because the user just cares about metadata,
+ we'll set it to 'final_image_channels'.
+ */
+
+ final_image_channels += 1;
+
+ seen_trns = true;
+ if info.header.bit_depth < 8 && .Paletted not_in info.header.color_type {
+ // Rescale tRNS data so key matches intensity
+ dsc := depth_scale_table;
+ scale := dsc[info.header.bit_depth];
+ if scale != 1 {
+ key := mem.slice_data_cast([]u16be, c.data)[0] * u16be(scale);
+ c.data = []u8{0, u8(key & 255)};
+ }
+ }
+ trns = c;
+ case .iDOT, .CbGI:
+ /*
+ iPhone PNG bastardization that doesn't adhere to spec with broken IDAT chunk.
+ We're not going to add support for it. If you have the misfortunte of coming
+ across one of these files, use a utility to defry it.s
+ */
+ return img, E_PNG.PNG_Does_Not_Adhere_to_Spec;
+ case:
+ // Unhandled type
+ c, err = read_chunk(&ctx);
+ if !is_kind(err, E_General.OK) {
+ return img, err;
+ }
+ if .return_metadata in options {
+ // NOTE: Chunk cata is currently allocated on the temp allocator.
+ append(&info.chunks, c);
+ }
+
+ first = false;
+ }
+ }
+
+ if .return_header in options || .return_metadata in options {
+ img.sidecar = info;
+ }
+ if .do_not_decompress_image in options {
+ img.channels = final_image_channels;
+ return img, E_General.OK;
+ }
+
+ if !seen_idat {
+ return img, E_PNG.IDAT_Missing;
+ }
+
+ buf: bytes.Buffer;
+ zlib_error := zlib.inflate(&idat, &buf);
+ defer bytes.buffer_destroy(&buf);
+
+ if !is_kind(zlib_error, E_General.OK) {
+ return {}, zlib_error;
+ } else {
+ /*
+ Let's calcalate the expected size of the IDAT based on its dimensions,
+ and whether or not it's interlaced
+ */
+ expected_size: int;
+ buf_len := len(buf.buf);
+
+ if header.interlace_method != .Adam7 {
+ expected_size = compute_buffer_size(int(header.width), int(header.height), int(img.channels), int(header.bit_depth), 1);
+ } else {
+ /*
+ Because Adam7 divides the image up into sub-images, and each scanline must start
+ with a filter byte, Adam7 interlaced images can have a larger raw size.
+ */
+ for p := 0; p < 7; p += 1 {
+ x := (int(header.width) - ADAM7_X_ORIG[p] + ADAM7_X_SPACING[p] - 1) / ADAM7_X_SPACING[p];
+ y := (int(header.height) - ADAM7_Y_ORIG[p] + ADAM7_Y_SPACING[p] - 1) / ADAM7_Y_SPACING[p];
+ if (x > 0 && y > 0) {
+ expected_size += compute_buffer_size(int(x), int(y), int(img.channels), int(header.bit_depth), 1);
+ }
+ }
+ }
+
+ if expected_size != buf_len {
+ return {}, E_PNG.IDAT_Corrupt;
+ }
+ }
+
+ /*
+ Defilter just cares about the raw number of image channels present.
+ So, we'll save the old value of img.channels we return to the user
+ as metadata, and set it instead to the raw number of channels.
+ */
+ defilter_error := defilter(img, &buf, &header, options);
+ if !is_kind(defilter_error, E_General.OK) {
+ bytes.buffer_destroy(&img.pixels);
+ return {}, defilter_error;
+ }
+
+ /*
+ Now we'll handle the relocoring of paletted images, handling of tRNS chunks,
+ and we'll expand grayscale images to RGB(A).
+
+ For the sake of convenience we return only RGB(A) images. In the future we
+ may supply an option to return Gray/Gray+Alpha as-is, in which case RGB(A)
+ will become the default.
+ */
+
+ raw_image_channels := img.channels;
+ out_image_channels := 3;
+
+ /*
+ To give ourselves less options to test, we'll knock out
+ `.blend_background` and `seen_bkgd` if we haven't seen both.
+ */
+ if !(seen_bkgd && .blend_background in options) {
+ options ~= {.blend_background};
+ seen_bkgd = false;
+ }
+
+ if seen_trns || .Alpha in info.header.color_type || .alpha_add_if_missing in options {
+ out_image_channels = 4;
+ }
+
+ if .alpha_drop_if_present in options {
+ out_image_channels = 3;
+ }
+
+ if seen_bkgd && .blend_background in options && .alpha_add_if_missing not_in options {
+ out_image_channels = 3;
+ }
+
+ add_alpha := (seen_trns && .alpha_drop_if_present not_in options) || (.alpha_add_if_missing in options);
+ premultiply := .alpha_premultiply in options || .blend_background in options;
+
+ img.channels = out_image_channels;
+
+ if .Paletted in header.color_type {
+ temp := img.pixels;
+ defer bytes.buffer_destroy(&temp);
+
+ // We need to create a new image buffer
+ dest_raw_size := compute_buffer_size(int(header.width), int(header.height), out_image_channels, 8);
+ t := bytes.Buffer{};
+ resize(&t.buf, dest_raw_size);
+
+ i := 0; j := 0;
+
+ // If we don't have transparency or drop it without applying it, we can do this:
+ if (!seen_trns || (seen_trns && .alpha_drop_if_present in options && .alpha_premultiply not_in options)) && .alpha_add_if_missing not_in options {
+ for h := 0; h < int(img.height); h += 1 {
+ for w := 0; w < int(img.width); w += 1 {
+ c := _plte.entries[temp.buf[i]];
+ t.buf[j ] = c.r;
+ t.buf[j+1] = c.g;
+ t.buf[j+2] = c.b;
+ i += 1; j += 3;
+ }
+ }
+ } else if add_alpha || .alpha_drop_if_present in options {
+ bg := [3]f32{0, 0, 0};
+ if premultiply && seen_bkgd {
+ c16 := img.background.([3]u16);
+ bg = [3]f32{f32(c16.r), f32(c16.g), f32(c16.b)};
+ }
+
+ no_alpha := (.alpha_drop_if_present in options || premultiply) && .alpha_add_if_missing not_in options;
+ blend_background := seen_bkgd && .blend_background in options;
+
+ for h := 0; h < int(img.height); h += 1 {
+ for w := 0; w < int(img.width); w += 1 {
+ index := temp.buf[i];
+
+ c := _plte.entries[index];
+ a := int(index) < len(trns.data) ? trns.data[index] : 255;
+ alpha := f32(a) / 255.0;
+
+ if blend_background {
+ c.r = u8((1.0 - alpha) * bg[0] + f32(c.r) * alpha);
+ c.g = u8((1.0 - alpha) * bg[1] + f32(c.g) * alpha);
+ c.b = u8((1.0 - alpha) * bg[2] + f32(c.b) * alpha);
+ a = 255;
+ } else if premultiply {
+ c.r = u8(f32(c.r) * alpha);
+ c.g = u8(f32(c.g) * alpha);
+ c.b = u8(f32(c.b) * alpha);
+ }
+
+ t.buf[j ] = c.r;
+ t.buf[j+1] = c.g;
+ t.buf[j+2] = c.b;
+ i += 1;
+
+ if no_alpha {
+ j += 3;
+ } else {
+ t.buf[j+3] = u8(a);
+ j += 4;
+ }
+ }
+ }
+ } else {
+ // This should be impossible.
+ assert(false);
+ }
+
+ img.pixels = t;
+
+ } else if img.depth == 16 {
+ // Check if we need to do something.
+ if raw_image_channels == out_image_channels {
+ // If we have 3 in and 3 out, or 4 in and 4 out without premultiplication...
+ if raw_image_channels == 4 && .alpha_premultiply not_in options && !seen_bkgd {
+ // Then we're done.
+ return img, E_General.OK;
+ }
+ }
+
+ temp := img.pixels;
+ defer bytes.buffer_destroy(&temp);
+
+ // We need to create a new image buffer
+ dest_raw_size := compute_buffer_size(int(header.width), int(header.height), out_image_channels, 16);
+ t := bytes.Buffer{};
+ resize(&t.buf, dest_raw_size);
+
+ p16 := mem.slice_data_cast([]u16, temp.buf[:]);
+ o16 := mem.slice_data_cast([]u16, t.buf[:]);
+
+ switch (raw_image_channels) {
+ case 1:
+ // Gray without Alpha. Might have tRNS alpha.
+ key := u16(0);
+ if seen_trns {
+ key = mem.slice_data_cast([]u16, trns.data)[0];
+ }
+
+ for len(p16) > 0 {
+ r := p16[0];
+
+ alpha := u16(1); // Default to full opaque
+
+ if seen_trns {
+ if r == key {
+ if seen_bkgd {
+ c := img.background.([3]u16);
+ r = c[0];
+ } else {
+ alpha = 0; // Keyed transparency
+ }
+ }
+ }
+
+ if premultiply {
+ o16[0] = r * alpha;
+ o16[1] = r * alpha;
+ o16[2] = r * alpha;
+ } else {
+ o16[0] = r;
+ o16[1] = r;
+ o16[2] = r;
+ }
+
+ if out_image_channels == 4 {
+ o16[3] = alpha * 65535;
+ }
+
+ p16 = p16[1:];
+ o16 = o16[out_image_channels:];
+ }
+ case 2:
+ // Gray with alpha, we shouldn't have a tRNS chunk.
+ for len(p16) > 0 {
+ r := p16[0];
+ if premultiply {
+ alpha := p16[1];
+ c := u16(f32(r) * f32(alpha) / f32(65535));
+ o16[0] = c;
+ o16[1] = c;
+ o16[2] = c;
+ } else {
+ o16[0] = r;
+ o16[1] = r;
+ o16[2] = r;
+ }
+
+ if .alpha_drop_if_present not_in options {
+ o16[3] = p16[1];
+ }
+
+ p16 = p16[2:];
+ o16 = o16[out_image_channels:];
+ }
+ case 3:
+ /*
+ Color without Alpha.
+ We may still have a tRNS chunk or `.alpha_add_if_missing`.
+ */
+
+ key: []u16;
+ if seen_trns {
+ key = mem.slice_data_cast([]u16, trns.data);
+ }
+
+ for len(p16) > 0 {
+ r := p16[0];
+ g := p16[1];
+ b := p16[2];
+
+ alpha := u16(1); // Default to full opaque
+
+ if seen_trns {
+ if r == key[0] && g == key[1] && b == key[2] {
+ if seen_bkgd {
+ c := img.background.([3]u16);
+ r = c[0];
+ g = c[1];
+ b = c[2];
+ } else {
+ alpha = 0; // Keyed transparency
+ }
+ }
+ }
+
+ if premultiply {
+ o16[0] = r * alpha;
+ o16[1] = g * alpha;
+ o16[2] = b * alpha;
+ } else {
+ o16[0] = r;
+ o16[1] = g;
+ o16[2] = b;
+ }
+
+ if out_image_channels == 4 {
+ o16[3] = alpha * 65535;
+ }
+
+ p16 = p16[3:];
+ o16 = o16[out_image_channels:];
+ }
+ case 4:
+ // Color with Alpha, can't have tRNS.
+ for len(p16) > 0 {
+ r := p16[0];
+ g := p16[1];
+ b := p16[2];
+ a := p16[3];
+
+ if premultiply {
+ alpha := f32(a) / 65535.0;
+ o16[0] = u16(f32(r) * alpha);
+ o16[1] = u16(f32(g) * alpha);
+ o16[2] = u16(f32(b) * alpha);
+ } else {
+ o16[0] = r;
+ o16[1] = g;
+ o16[2] = b;
+ }
+
+ if .alpha_drop_if_present not_in options {
+ o16[3] = a;
+ }
+
+ p16 = p16[4:];
+ o16 = o16[out_image_channels:];
+ }
+ case:
+ unreachable("We should never seen # channels other than 1-4 inclusive.");
+ }
+
+ img.pixels = t;
+ img.channels = out_image_channels;
+
+ } else if img.depth == 8 {
+ // Check if we need to do something.
+ if raw_image_channels == out_image_channels {
+ // If we have 3 in and 3 out, or 4 in and 4 out without premultiplication...
+ if raw_image_channels == 4 && .alpha_premultiply not_in options {
+ // Then we're done.
+ return img, E_General.OK;
+ }
+ }
+
+ temp := img.pixels;
+ defer bytes.buffer_destroy(&temp);
+
+ // We need to create a new image buffer
+ dest_raw_size := compute_buffer_size(int(header.width), int(header.height), out_image_channels, 8);
+ t := bytes.Buffer{};
+ resize(&t.buf, dest_raw_size);
+
+ p := mem.slice_data_cast([]u8, temp.buf[:]);
+ o := mem.slice_data_cast([]u8, t.buf[:]);
+
+ switch (raw_image_channels) {
+ case 1:
+ // Gray without Alpha. Might have tRNS alpha.
+ key := u8(0);
+ if seen_trns {
+ key = u8(mem.slice_data_cast([]u16be, trns.data)[0]);
+ }
+
+ for len(p) > 0 {
+ r := p[0];
+ alpha := u8(1);
+
+ if seen_trns {
+ if r == key {
+ if seen_bkgd {
+ c := img.background.([3]u16);
+ r = u8(c[0]);
+ } else {
+ alpha = 0; // Keyed transparency
+ }
+ }
+ if premultiply {
+ o[0] = r * alpha;
+ o[1] = r * alpha;
+ o[2] = r * alpha;
+ }
+ } else {
+ o[0] = r;
+ o[1] = r;
+ o[2] = r;
+ }
+
+ if out_image_channels == 4 {
+ o[3] = alpha * 255;
+ }
+
+ p = p[1:];
+ o = o[out_image_channels:];
+ }
+ case 2:
+ // Gray with alpha, we shouldn't have a tRNS chunk.
+ for len(p) > 0 {
+ r := p[0];
+ if .alpha_premultiply in options {
+ alpha := p[1];
+ c := u8(f32(r) * f32(alpha) / f32(255));
+ o[0] = c;
+ o[1] = c;
+ o[2] = c;
+ } else {
+ o[0] = r;
+ o[1] = r;
+ o[2] = r;
+ }
+
+ if .alpha_drop_if_present not_in options {
+ o[3] = p[1];
+ }
+
+ p = p[2:];
+ o = o[out_image_channels:];
+ }
+ case 3:
+ // Color without Alpha. We may still have a tRNS chunk
+ key: []u8;
+ if seen_trns {
+ /*
+ For 8-bit images, the tRNS chunk still contains a triple in u16be.
+ We use only the low byte in this case.
+ */
+ key = []u8{trns.data[1], trns.data[3], trns.data[5]};
+ }
+ for len(p) > 0 {
+ r := p[0];
+ g := p[1];
+ b := p[2];
+
+ alpha := u8(1); // Default to full opaque
+
+ // TODO: Combine the seen_trns cases.
+ if seen_trns {
+ if r == key[0] && g == key[1] && b == key[2] {
+ if seen_bkgd {
+ c := img.background.([3]u16);
+ r = u8(c[0]);
+ g = u8(c[1]);
+ b = u8(c[2]);
+ } else {
+ alpha = 0; // Keyed transparency
+ }
+ }
+
+ if .alpha_premultiply in options || .blend_background in options {
+ o[0] = r * alpha;
+ o[1] = g * alpha;
+ o[2] = b * alpha;
+ }
+ } else {
+ o[0] = r;
+ o[1] = g;
+ o[2] = b;
+ }
+
+ if out_image_channels == 4 {
+ o[3] = alpha * 255;
+ }
+
+ p = p[3:];
+ o = o[out_image_channels:];
+ }
+ case 4:
+ // Color with Alpha, can't have tRNS.
+ for len(p) > 0 {
+ r := p[0];
+ g := p[1];
+ b := p[2];
+ a := p[3];
+
+ if .alpha_premultiply in options {
+ alpha := f32(a) / 255.0;
+ o[0] = u8(f32(r) * alpha);
+ o[1] = u8(f32(g) * alpha);
+ o[2] = u8(f32(b) * alpha);
+ } else {
+ o[0] = r;
+ o[1] = g;
+ o[2] = b;
+ }
+
+ if .alpha_drop_if_present not_in options {
+ o[3] = a;
+ }
+
+ p = p[4:];
+ o = o[out_image_channels:];
+ }
+ case:
+ unreachable("We should never seen # channels other than 1-4 inclusive.");
+ }
+
+ img.pixels = t;
+ img.channels = out_image_channels;
+
+ } else {
+ /*
+ This may change if we ever don't expand 1, 2 and 4 bit images. But, those raw
+ returns will likely bypass this processing pipeline.
+ */
+ unreachable("We should never see bit depths other than 8, 16 and 'Paletted' here.");
+ }
+
+ return img, E_General.OK;
+}
+
+
+filter_paeth :: #force_inline proc(left, up, up_left: u8) -> u8 {
+ aa, bb, cc := i16(left), i16(up), i16(up_left);
+ p := aa + bb - cc;
+ pa := abs(p - aa);
+ pb := abs(p - bb);
+ pc := abs(p - cc);
+ if pa <= pb && pa <= pc {
+ return left;
+ }
+ if pb <= pc {
+ return up;
+ }
+ return up_left;
+}
+
+Filter_Params :: struct #packed {
+ src : []u8,
+ dest : []u8,
+ width : int,
+ height : int,
+ depth : int,
+ channels: int,
+ rescale : bool,
+}
+
+depth_scale_table :: []u8{0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01};
+
+// @(optimization_mode="speed")
+defilter_8 :: proc(params: ^Filter_Params) -> (ok: bool) {
+
+ using params;
+ row_stride := channels * width;
+
+ // TODO: See about doing a Duff's #unroll where practicable
+
+ // Apron so we don't need to special case first rows.
+ up := make([]u8, row_stride, context.temp_allocator);
+ ok = true;
+
+ for _ in 0..<height {
+ nk := row_stride - channels;
+
+ filter := Row_Filter(src[0]); src = src[1:];
+ // fmt.printf("Row: %v | Filter: %v\n", y, filter);
+ switch(filter) {
+ case .None:
+ copy(dest, src[:row_stride]);
+ case .Sub:
+ for i := 0; i < channels; i += 1 {
+ dest[i] = src[i];
+ }
+ for k := 0; k < nk; k += 1 {
+ dest[channels+k] = (src[channels+k] + dest[k]) & 255;
+ }
+ case .Up:
+ for k := 0; k < row_stride; k += 1 {
+ dest[k] = (src[k] + up[k]) & 255;
+ }
+ case .Average:
+ for i := 0; i < channels; i += 1 {
+ avg := up[i] >> 1;
+ dest[i] = (src[i] + avg) & 255;
+ }
+ for k := 0; k < nk; k += 1 {
+ avg := u8((u16(up[channels+k]) + u16(dest[k])) >> 1);
+ dest[channels+k] = (src[channels+k] + avg) & 255;
+ }
+ case .Paeth:
+ for i := 0; i < channels; i += 1 {
+ paeth := filter_paeth(0, up[i], 0);
+ dest[i] = (src[i] + paeth) & 255;
+ }
+ for k := 0; k < nk; k += 1 {
+ paeth := filter_paeth(dest[k], up[channels+k], up[k]);
+ dest[channels+k] = (src[channels+k] + paeth) & 255;
+ }
+ case:
+ return false;
+ }
+
+ src = src[row_stride:];
+ up = dest;
+ dest = dest[row_stride:];
+ }
+ return;
+}
+
+// @(optimization_mode="speed")
+defilter_less_than_8 :: proc(params: ^Filter_Params) -> (ok: bool) #no_bounds_check {
+
+ using params;
+ ok = true;
+
+ row_stride_in := ((channels * width * depth) + 7) >> 3;
+ row_stride_out := channels * width;
+
+ // Store defiltered bytes rightmost so we can widen in-place.
+ row_offset := row_stride_out - row_stride_in;
+ // Save original dest because we'll need it for the bit widening.
+ orig_dest := dest;
+
+ // TODO: See about doing a Duff's #unroll where practicable
+
+ // Apron so we don't need to special case first rows.
+ up := make([]u8, row_stride_out, context.temp_allocator);
+
+ #no_bounds_check for _ in 0..<height {
+ nk := row_stride_in - channels;
+
+ dest = dest[row_offset:];
+
+ filter := Row_Filter(src[0]); src = src[1:];
+ switch(filter) {
+ case .None:
+ copy(dest, src[:row_stride_in]);
+ case .Sub:
+ for i in 0..channels {
+ dest[i] = src[i];
+ }
+ for k in 0..nk {
+ dest[channels+k] = (src[channels+k] + dest[k]) & 255;
+ }
+ case .Up:
+ for k in 0..row_stride_in {
+ dest[k] = (src[k] + up[k]) & 255;
+ }
+ case .Average:
+ for i in 0..channels {
+ avg := up[i] >> 1;
+ dest[i] = (src[i] + avg) & 255;
+ }
+ for k in 0..nk {
+ avg := u8((u16(up[channels+k]) + u16(dest[k])) >> 1);
+ dest[channels+k] = (src[channels+k] + avg) & 255;
+ }
+ case .Paeth:
+ for i in 0..channels {
+ paeth := filter_paeth(0, up[i], 0);
+ dest[i] = (src[i] + paeth) & 255;
+ }
+ for k in 0..nk {
+ paeth := filter_paeth(dest[k], up[channels], up[k]);
+ dest[channels+k] = (src[channels+k] + paeth) & 255;
+ }
+ case:
+ return false;
+ }
+
+ src = src [row_stride_in:];
+ up = dest;
+ dest = dest[row_stride_in:];
+ }
+
+ // Let's expand the bits
+ dest = orig_dest;
+
+ // Don't rescale the bits if we're a paletted image.
+ dsc := depth_scale_table;
+ scale := rescale ? dsc[depth] : 1;
+
+ /*
+ For sBIT support we should probably set scale to 1 and mask the significant bits.
+ Seperately, do we want to support packed pixels? i.e defiltering only, no expansion?
+ If so, all we have to do is call defilter_8 for that case and not set img.depth to 8.
+ */
+
+ for j := 0; j < height; j += 1 {
+ src = dest[row_offset:];
+
+ if depth == 4 {
+ k := row_stride_out;
+ for ; k >= 2; k -= 2 {
+ c := src[0];
+ dest[0] = scale * (c >> 4);
+ dest[1] = scale * (c & 15);
+ dest = dest[2:]; src = src[1:];
+ }
+ if k > 0 {
+ c := src[0];
+ dest[0] = scale * (c >> 4);
+ dest = dest[1:];
+ }
+ } else if depth == 2 {
+ k := row_stride_out;
+ for ; k >= 4; k -= 4 {
+ c := src[0];
+ dest[0] = scale * ((c >> 6) );
+ dest[1] = scale * ((c >> 4) & 3);
+ dest[2] = scale * ((c >> 2) & 3);
+ dest[3] = scale * ((c ) & 3);
+ dest = dest[4:]; src = src[1:];
+ }
+ if k > 0 {
+ c := src[0];
+ dest[0] = scale * ((c >> 6) );
+ if k > 1 {
+ dest[1] = scale * ((c >> 4) & 3);
+ }
+ if k > 2 {
+ dest[2] = scale * ((c >> 2) & 3);
+ }
+ dest = dest[k:];
+ }
+ } else if depth == 1 {
+ k := row_stride_out;
+ for ; k >= 8; k -= 8 {
+ c := src[0];
+ dest[0] = scale * ((c >> 7) );
+ dest[1] = scale * ((c >> 6) & 1);
+ dest[2] = scale * ((c >> 5) & 1);
+ dest[3] = scale * ((c >> 4) & 1);
+ dest[4] = scale * ((c >> 3) & 1);
+ dest[5] = scale * ((c >> 2) & 1);
+ dest[6] = scale * ((c >> 1) & 1);
+ dest[7] = scale * ((c ) & 1);
+ dest = dest[8:]; src = src[1:];
+ }
+ if k > 0 {
+ c := src[0];
+ dest[0] = scale * ((c >> 7) );
+ if k > 1 {
+ dest[1] = scale * ((c >> 6) & 1);
+ }
+ if k > 2 {
+ dest[2] = scale * ((c >> 5) & 1);
+ }
+ if k > 3 {
+ dest[3] = scale * ((c >> 4) & 1);
+ }
+ if k > 4 {
+ dest[4] = scale * ((c >> 3) & 1);
+ }
+ if k > 5 {
+ dest[5] = scale * ((c >> 2) & 1);
+ }
+ if k > 6 {
+ dest[6] = scale * ((c >> 1) & 1);
+ }
+ dest = dest[k:];
+
+ }
+ }
+ }
+
+ return;
+}
+
+// @(optimization_mode="speed")
+defilter_16 :: proc(params: ^Filter_Params) -> (ok: bool) {
+
+ using params;
+ ok = true;
+
+ stride := channels * 2;
+ row_stride := width * stride;
+
+ // TODO: See about doing a Duff's #unroll where practicable
+ // Apron so we don't need to special case first rows.
+ up := make([]u8, row_stride, context.temp_allocator);
+
+ for y := 0; y < height; y += 1 {
+ nk := row_stride - stride;
+
+ filter := Row_Filter(src[0]); src = src[1:];
+ switch(filter) {
+ case .None:
+ copy(dest, src[:row_stride]);
+ case .Sub:
+ for i := 0; i < stride; i += 1 {
+ dest[i] = src[i];
+ }
+ for k := 0; k < nk; k += 1 {
+ dest[stride+k] = (src[stride+k] + dest[k]) & 255;
+ }
+ case .Up:
+ for k := 0; k < row_stride; k += 1 {
+ dest[k] = (src[k] + up[k]) & 255;
+ }
+ case .Average:
+ for i := 0; i < stride; i += 1 {
+ avg := up[i] >> 1;
+ dest[i] = (src[i] + avg) & 255;
+ }
+ for k := 0; k < nk; k += 1 {
+ avg := u8((u16(up[stride+k]) + u16(dest[k])) >> 1);
+ dest[stride+k] = (src[stride+k] + avg) & 255;
+ }
+ case .Paeth:
+ for i := 0; i < stride; i += 1 {
+ paeth := filter_paeth(0, up[i], 0);
+ dest[i] = (src[i] + paeth) & 255;
+ }
+ for k := 0; k < nk; k += 1 {
+ paeth := filter_paeth(dest[k], up[stride+k], up[k]);
+ dest[stride+k] = (src[stride+k] + paeth) & 255;
+ }
+ case:
+ return false;
+ }
+
+ src = src[row_stride:];
+ up = dest;
+ dest = dest[row_stride:];
+ }
+
+ return;
+}
+
+defilter :: proc(img: ^Image, filter_bytes: ^bytes.Buffer, header: ^IHDR, options: Options) -> (err: compress.Error) {
+ input := bytes.buffer_to_bytes(filter_bytes);
+ width := int(header.width);
+ height := int(header.height);
+ channels := int(img.channels);
+ depth := int(header.bit_depth);
+ rescale := .Color not_in header.color_type;
+
+ bytes_per_channel := depth == 16 ? 2 : 1;
+
+ num_bytes := compute_buffer_size(width, height, channels, depth == 16 ? 16 : 8);
+ resize(&img.pixels.buf, num_bytes);
+
+ filter_ok: bool;
+
+ if header.interlace_method != .Adam7 {
+ params := Filter_Params{
+ src = input,
+ width = width,
+ height = height,
+ channels = channels,
+ depth = depth,
+ rescale = rescale,
+ dest = img.pixels.buf[:],
+ };
+
+ if depth == 8 {
+ filter_ok = defilter_8(&params);
+ } else if depth < 8 {
+ filter_ok = defilter_less_than_8(&params);
+ img.depth = 8;
+ } else {
+ filter_ok = defilter_16(&params);
+ }
+ if !filter_ok {
+ // Caller will destroy buffer for us.
+ return E_PNG.Unknown_Filter_Method;
+ }
+ } else {
+ /*
+ For deinterlacing we need to make a temporary buffer, defiilter part of the image,
+ and copy that back into the actual output buffer.
+ */
+
+ for p := 0; p < 7; p += 1 {
+ i,j,x,y: int;
+ x = (width - ADAM7_X_ORIG[p] + ADAM7_X_SPACING[p] - 1) / ADAM7_X_SPACING[p];
+ y = (height - ADAM7_Y_ORIG[p] + ADAM7_Y_SPACING[p] - 1) / ADAM7_Y_SPACING[p];
+ if (x > 0 && y > 0) {
+ temp: bytes.Buffer;
+ temp_len := compute_buffer_size(x, y, channels, depth == 16 ? 16 : 8);
+ resize(&temp.buf, temp_len);
+
+ params := Filter_Params{
+ src = input,
+ width = x,
+ height = y,
+ channels = channels,
+ depth = depth,
+ rescale = rescale,
+ dest = temp.buf[:],
+ };
+
+ if depth == 8 {
+ filter_ok = defilter_8(&params);
+ } else if depth < 8 {
+ filter_ok = defilter_less_than_8(&params);
+ img.depth = 8;
+ } else {
+ filter_ok = defilter_16(&params);
+ }
+
+ if !filter_ok {
+ // Caller will destroy buffer for us.
+ return E_PNG.Unknown_Filter_Method;
+ }
+
+ t := temp.buf[:];
+ for j = 0; j < y; j += 1 {
+ for i = 0; i < x; i += 1 {
+ out_y := j * ADAM7_Y_SPACING[p] + ADAM7_Y_ORIG[p];
+ out_x := i * ADAM7_X_SPACING[p] + ADAM7_X_ORIG[p];
+
+ out_off := out_y * width * channels * bytes_per_channel;
+ out_off += out_x * channels * bytes_per_channel;
+
+ for z := 0; z < channels * bytes_per_channel; z += 1 {
+ img.pixels.buf[out_off + z] = t[z];
+ }
+ t = t[channels * bytes_per_channel:];
+ }
+ }
+ bytes.buffer_destroy(&temp);
+ input_stride := compute_buffer_size(x, y, channels, depth, 1);
+ input = input[input_stride:];
+ }
+ }
+ }
+ when ODIN_ENDIAN == "little" {
+ if img.depth == 16 {
+ // The pixel components are in Big Endian. Let's byteswap.
+ input := mem.slice_data_cast([]u16be, img.pixels.buf[:]);
+ output := mem.slice_data_cast([]u16 , img.pixels.buf[:]);
+ #no_bounds_check for v, i in input {
+ output[i] = u16(v);
+ }
+ }
+ }
+
+ return E_General.OK;
+}
+
+load :: proc{load_from_file, load_from_slice, load_from_stream}; \ No newline at end of file