aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorgingerBill <bill@gingerbill.org>2022-05-02 16:09:09 +0100
committergingerBill <bill@gingerbill.org>2022-05-02 16:09:09 +0100
commit97717d65efde6f83e502f0261a3fb08a34ea3f04 (patch)
tree9f48e24cad25dea79161bed3d948bc77364662b1
parent8023c8abc7087c763da5e843252d3899784e57be (diff)
Add `vendor:openexr`
No foreign library yet
-rw-r--r--vendor/openexr/exr_attr.odin397
-rw-r--r--vendor/openexr/exr_base.odin170
-rw-r--r--vendor/openexr/exr_chunkio.odin143
-rw-r--r--vendor/openexr/exr_coding.odin119
-rw-r--r--vendor/openexr/exr_context.odin485
-rw-r--r--vendor/openexr/exr_debug.odin8
-rw-r--r--vendor/openexr/exr_decode.odin288
-rw-r--r--vendor/openexr/exr_encode.odin319
-rw-r--r--vendor/openexr/exr_errors.odin62
-rw-r--r--vendor/openexr/exr_part.odin733
10 files changed, 2724 insertions, 0 deletions
diff --git a/vendor/openexr/exr_attr.odin b/vendor/openexr/exr_attr.odin
new file mode 100644
index 000000000..eb07142ec
--- /dev/null
+++ b/vendor/openexr/exr_attr.odin
@@ -0,0 +1,397 @@
+package vendor_openexr
+
+import "core:c"
+
+// Enum declaring allowed values for \c u8 value stored in built-in compression type.
+compression_t :: enum c.int {
+ NONE = 0,
+ RLE = 1,
+ ZIPS = 2,
+ ZIP = 3,
+ PIZ = 4,
+ PXR24 = 5,
+ B44 = 6,
+ B44A = 7,
+ DWAA = 8,
+ DWAB = 9,
+}
+
+// Enum declaring allowed values for \c u8 value stored in built-in env map type.
+envmap_t :: enum c.int {
+ LATLONG = 0,
+ CUBE = 1,
+}
+
+// Enum declaring allowed values for \c u8 value stored in \c lineOrder type.
+lineorder_t :: enum c.int {
+ INCREASING_Y = 0,
+ DECREASING_Y = 1,
+ RANDOM_Y = 2,
+}
+
+// Enum declaring allowed values for part type.
+storage_t :: enum c.int {
+ SCANLINE = 0, // Corresponds to type of \c scanlineimage.
+ TILED, // Corresponds to type of \c tiledimage.
+ DEEP_SCANLINE, // Corresponds to type of \c deepscanline.
+ DEEP_TILED, // Corresponds to type of \c deeptile.
+}
+
+// @brief Enum representing what type of tile information is contained.
+tile_level_mode_t :: enum c.int {
+ ONE_LEVEL = 0, // Single level of image data.
+ MIPMAP_LEVELS = 1, // Mipmapped image data.
+ RIPMAP_LEVELS = 2, // Ripmapped image data.
+}
+
+/** @brief Enum representing how to scale positions between levels. */
+tile_round_mode_t :: enum c.int {
+ DOWN = 0,
+ UP = 1,
+}
+
+/** @brief Enum capturing the underlying data type on a channel. */
+pixel_type_t :: enum c.int {
+ UINT = 0,
+ HALF = 1,
+ FLOAT = 2,
+}
+
+/* /////////////////////////////////////// */
+/* First set of structs are data where we can read directly with no allocation needed... */
+
+/** @brief Struct to hold color chromaticities to interpret the tristimulus color values in the image data. */
+attr_chromaticities_t :: struct #packed {
+ red_x: f32,
+ red_y: f32,
+ green_x: f32,
+ green_y: f32,
+ blue_x: f32,
+ blue_y: f32,
+ white_x: f32,
+ white_y: f32,
+}
+
+/** @brief Struct to hold keycode information. */
+attr_keycode_t :: struct #packed {
+ film_mfc_code: i32,
+ film_type: i32,
+ prefix: i32,
+ count: i32,
+ perf_offset: i32,
+ perfs_per_frame: i32,
+ perfs_per_count: i32,
+}
+
+/** @brief struct to hold a 32-bit floating-point 3x3 matrix. */
+attr_m33f_t :: struct #packed {
+ m: [9]f32,
+}
+
+/** @brief struct to hold a 64-bit floating-point 3x3 matrix. */
+attr_m33d_t :: struct #packed {
+ m: [9]f64,
+}
+
+/** @brief Struct to hold a 32-bit floating-point 4x4 matrix. */
+attr_m44f_t :: struct #packed {
+ m: [16]f32,
+}
+
+/** @brief Struct to hold a 64-bit floating-point 4x4 matrix. */
+attr_m44d_t :: struct #packed {
+ m: [16]f64,
+}
+
+/** @brief Struct to hold an integer ratio value. */
+attr_rational_t :: struct #packed {
+ num: i32,
+ denom: u32,
+}
+
+/** @brief Struct to hold timecode information. */
+attr_timecode_t :: struct #packed {
+ time_and_flags: u32,
+ user_data: u32,
+}
+
+/** @brief Struct to hold a 2-element integer vector. */
+attr_v2i_t :: distinct [2]i32
+
+/** @brief Struct to hold a 2-element 32-bit float vector. */
+attr_v2f_t :: distinct [2]f32
+
+/** @brief Struct to hold a 2-element 64-bit float vector. */
+attr_v2d_t :: distinct [2]f64
+
+/** @brief Struct to hold a 3-element integer vector. */
+attr_v3i_t :: distinct [3]i32
+
+/** @brief Struct to hold a 3-element 32-bit float vector. */
+attr_v3f_t :: distinct [3]f32
+
+/** @brief Struct to hold a 3-element 64-bit float vector. */
+attr_v3d_t :: distinct [3]f64
+
+/** @brief Struct to hold an integer box/region definition. */
+attr_box2i_t :: struct #packed {
+ min: attr_v2i_t,
+ max: attr_v2i_t,
+}
+
+/** @brief Struct to hold a floating-point box/region definition. */
+attr_box2f_t:: struct #packed {
+ min: attr_v2f_t,
+ max: attr_v2f_t,
+}
+
+/** @brief Struct holding base tiledesc attribute type defined in spec
+ *
+ * NB: This is in a tightly packed area so it can be read directly, be
+ * careful it doesn't become padded to the next \c uint32_t boundary.
+ */
+attr_tiledesc_t :: struct #packed {
+ x_size: u32,
+ y_size: u32,
+ level_and_round: u8,
+}
+
+/** @brief Macro to access type of tiling from packed structure. */
+GET_TILE_LEVEL_MODE :: #force_inline proc "c" (tiledesc: attr_tiledesc_t) -> tile_level_mode_t {
+ return tile_level_mode_t(tiledesc.level_and_round & 0xf)
+}
+/** @brief Macro to access the rounding mode of tiling from packed structure. */
+GET_TILE_ROUND_MODE :: #force_inline proc "c" (tiledesc: attr_tiledesc_t) -> tile_round_mode_t {
+ return tile_round_mode_t((tiledesc.level_and_round >> 4) & 0xf)
+}
+/** @brief Macro to pack the tiling type and rounding mode into packed structure. */
+PACK_TILE_LEVEL_ROUND :: #force_inline proc "c" (lvl: tile_level_mode_t, mode: tile_round_mode_t) -> u8 {
+ return ((u8(mode) & 0xf) << 4) | (u8(lvl) & 0xf)
+}
+
+
+/* /////////////////////////////////////// */
+/* Now structs that involve heap allocation to store data. */
+
+/** Storage for a string. */
+attr_string_t :: struct {
+ length: i32,
+ /** If this is non-zero, the string owns the data, if 0, is a const ref to a static string. */
+ alloc_size: i32,
+
+ str: cstring,
+}
+
+/** Storage for a string vector. */
+attr_string_vector_t :: struct {
+ n_strings: i32,
+ /** If this is non-zero, the string vector owns the data, if 0, is a const ref. */
+ alloc_size: i32,
+
+ strings: [^]attr_string_t,
+}
+
+/** Float vector storage struct. */
+attr_float_vector_t :: struct {
+ length: i32,
+ /** If this is non-zero, the float vector owns the data, if 0, is a const ref. */
+ alloc_size: i32,
+
+ arr: [^]f32,
+}
+
+/** Hint for lossy compression methods about how to treat values
+ * (logarithmic or linear), meaning a human sees values like R, G, B,
+ * luminance difference between 0.1 and 0.2 as about the same as 1.0
+ * to 2.0 (logarithmic), where chroma coordinates are closer to linear
+ * (0.1 and 0.2 is about the same difference as 1.0 and 1.1).
+ */
+perceptual_treatment_t :: enum c.int {
+ LOGARITHMIC = 0,
+ LINEAR = 1,
+}
+
+/** Individual channel information. */
+attr_chlist_entry_t :: struct {
+ name: attr_string_t,
+ /** Data representation for these pixels: uint, half, float. */
+ pixel_type: pixel_type_t,
+ /** Possible values are 0 and 1 per docs perceptual_treatment_t. */
+ p_linear: u8,
+ reserved: [3]u8,
+ x_sampling: i32,
+ y_sampling: i32,
+}
+
+/** List of channel information (sorted alphabetically). */
+attr_chlist_t :: struct {
+ num_channels: c.int,
+ num_alloced: c.int,
+
+ entries: [^]attr_chlist_entry_t,
+}
+
+/** @brief Struct to define attributes of an embedded preview image. */
+attr_preview_t :: struct {
+ width: u32,
+ height: u32,
+ /** If this is non-zero, the preview owns the data, if 0, is a const ref. */
+ alloc_size: c.size_t,
+
+ rgba: [^]u8,
+}
+
+/** Custom storage structure for opaque data.
+ *
+ * Handlers for opaque types can be registered, then when a
+ * non-builtin type is encountered with a registered handler, the
+ * function pointers to unpack/pack it will be set up.
+ *
+ * @sa register_attr_type_handler
+ */
+attr_opaquedata_t :: struct {
+ size: i32,
+ unpacked_size: i32,
+ /** If this is non-zero, the struct owns the data, if 0, is a const ref. */
+ packed_alloc_size: i32,
+ pad: [4]u8,
+
+ packed_data: rawptr,
+
+ /** When an application wants to have custom data, they can store
+ * an unpacked form here which will be requested to be destroyed
+ * upon destruction of the attribute.
+ */
+ unpacked_data: rawptr,
+
+ /** An application can register an attribute handler which then
+ * fills in these function pointers. This allows a user to delay
+ * the expansion of the custom type until access is desired, and
+ * similarly, to delay the packing of the data until write time.
+ */
+ unpack_func_ptr: proc "c" (
+ ctxt: context_t,
+ data: rawptr,
+ attrsize: i32,
+ outsize: ^i32,
+ outbuffer: ^rawptr) -> result_t,
+ pack_func_ptr: proc "c" (
+ ctxt: context_t,
+ data: rawptr,
+ datasize: i32,
+ outsize: ^i32,
+ outbuffer: rawptr) -> result_t,
+ destroy_unpacked_func_ptr: proc "c" (
+ ctxt: context_t, data: rawptr, attrsize: i32),
+}
+
+/* /////////////////////////////////////// */
+
+/** @brief Built-in/native attribute type enum.
+ *
+ * This will enable us to do a tagged type struct to generically store
+ * attributes.
+ */
+attribute_type_t :: enum c.int {
+ UNKNOWN = 0, // Type indicating an error or uninitialized attribute.
+ BOX2I, // Integer region definition. @see attr_box2i_t.
+ BOX2F, // Float region definition. @see attr_box2f_t.
+ CHLIST, // Definition of channels in file @see chlist_entry.
+ CHROMATICITIES, // Values to specify color space of colors in file @see attr_chromaticities_t.
+ COMPRESSION, // ``u8`` declaring compression present.
+ DOUBLE, // Double precision floating point number.
+ ENVMAP, // ``u8`` declaring environment map type.
+ FLOAT, // Normal (4 byte) precision floating point number.
+ FLOAT_VECTOR, // List of normal (4 byte) precision floating point numbers.
+ INT, // 32-bit signed integer value.
+ KEYCODE, // Struct recording keycode @see attr_keycode_t.
+ LINEORDER, // ``u8`` declaring scanline ordering.
+ M33F, // 9 32-bit floats representing a 3x3 matrix.
+ M33D, // 9 64-bit floats representing a 3x3 matrix.
+ M44F, // 16 32-bit floats representing a 4x4 matrix.
+ M44D, // 16 64-bit floats representing a 4x4 matrix.
+ PREVIEW, // 2 ``unsigned ints`` followed by 4 x w x h ``u8`` image.
+ RATIONAL, // \c int followed by ``unsigned int``
+ STRING, // ``int`` (length) followed by char string data.
+ STRING_VECTOR, // 0 or more text strings (int + string). number is based on attribute size.
+ TILEDESC, // 2 ``unsigned ints`` ``xSize``, ``ySize`` followed by mode.
+ TIMECODE, // 2 ``unsigned ints`` time and flags, user data.
+ V2I, // Pair of 32-bit integers.
+ V2F, // Pair of 32-bit floats.
+ V2D, // Pair of 64-bit floats.
+ V3I, // Set of 3 32-bit integers.
+ V3F, // Set of 3 32-bit floats.
+ V3D, // Set of 3 64-bit floats.
+ OPAQUE, // User/unknown provided type.
+}
+
+/** @brief Storage, name and type information for an attribute.
+ *
+ * Attributes (metadata) for the file cause a surprising amount of
+ * overhead. It is not uncommon for a production-grade EXR to have
+ * many attributes. As such, the attribute struct is designed in a
+ * slightly more complicated manner. It is optimized to have the
+ * storage for that attribute: the struct itself, the name, the type,
+ * and the data all allocated as one block. Further, the type and
+ * standard names may use a static string to avoid allocating space
+ * for those as necessary with the pointers pointing to static strings
+ * (not to be freed). Finally, small values are optimized for.
+ */
+attribute_t :: struct {
+ /** Name of the attribute. */
+ name: cstring,
+ /** String type name of the attribute. */
+ type_name: cstring,
+ /** Length of name string (short flag is 31 max, long allows 255). */
+ name_length: u8,
+ /** Length of type string (short flag is 31 max, long allows 255). */
+ type_name_length: u8,
+
+ pad: [2]u8,
+
+ /** Enum of the attribute type. */
+ type: attribute_type_t,
+
+ /** Union of pointers of different types that can be used to type
+ * pun to an appropriate type for builtins. Do note that while
+ * this looks like a big thing, it is only the size of a single
+ * pointer. These are all pointers into some other data block
+ * storing the value you want, with the exception of the pod types
+ * which are just put in place (i.e. small value optimization).
+ *
+ * The attribute type \c type should directly correlate to one
+ * of these entries.
+ */
+ using _: struct #raw_union {
+ // NB: not pointers for POD types
+ uc: u8,
+ d: f64,
+ f: f32,
+ i: i32,
+
+ box2i: ^attr_box2i_t,
+ box2f: ^attr_box2f_t,
+ chlist: ^attr_chlist_t,
+ chromaticities: ^attr_chromaticities_t,
+ keycode: ^attr_keycode_t,
+ floatvector: ^attr_float_vector_t,
+ m33f: ^attr_m33f_t,
+ m33d: ^attr_m33d_t,
+ m44f: ^attr_m44f_t,
+ m44d: ^attr_m44d_t,
+ preview: ^attr_preview_t,
+ rational: ^attr_rational_t,
+ string: ^attr_string_t,
+ stringvector: ^attr_string_vector_t,
+ tiledesc: ^attr_tiledesc_t,
+ timecode: ^attr_timecode_t,
+ v2i: ^attr_v2i_t,
+ v2f: ^attr_v2f_t,
+ v2d: ^attr_v2d_t,
+ v3i: ^attr_v3i_t,
+ v3f: ^attr_v3f_t,
+ v3d: ^attr_v3d_t,
+ opaque: ^attr_opaquedata_t,
+ rawptr: ^u8,
+ },
+} \ No newline at end of file
diff --git a/vendor/openexr/exr_base.odin b/vendor/openexr/exr_base.odin
new file mode 100644
index 000000000..0db4cc7ff
--- /dev/null
+++ b/vendor/openexr/exr_base.odin
@@ -0,0 +1,170 @@
+package vendor_openexr
+
+foreign import lib "exr.lib"
+
+import "core:c"
+
+/** @brief Function pointer used to hold a malloc-like routine.
+ *
+ * Providing these to a context will override what memory is used to
+ * allocate the context itself, as well as any allocations which
+ * happen during processing of a file or stream. This can be used by
+ * systems which provide rich malloc tracking routines to override the
+ * internal allocations performed by the library.
+ *
+ * This function is expected to allocate and return a new memory
+ * handle, or `NULL` if allocation failed (which the library will then
+ * handle and return an out-of-memory error).
+ *
+ * If one is provided, both should be provided.
+ * @sa exr_memory_free_func_t
+ */
+memory_allocation_func_t :: proc "c" (bytes: c.size_t) -> rawptr
+
+/** @brief Function pointer used to hold a free-like routine.
+ *
+ * Providing these to a context will override what memory is used to
+ * allocate the context itself, as well as any allocations which
+ * happen during processing of a file or stream. This can be used by
+ * systems which provide rich malloc tracking routines to override the
+ * internal allocations performed by the library.
+ *
+ * This function is expected to return memory to the system, ala free
+ * from the C library.
+ *
+ * If providing one, probably need to provide both routines.
+ * @sa exr_memory_allocation_func_t
+ */
+memory_free_func_t :: proc "c" (ptr: rawptr)
+
+@(link_prefix="exr_", default_calling_convention="c")
+foreign lib {
+ /** @brief Retrieve the current library version. The @p extra string is for
+ * custom installs, and is a static string, do not free the returned
+ * pointer.
+ */
+ get_library_version :: proc(maj, min, patch: ^c.int, extra: ^cstring) ---
+
+ /** @brief Limit the size of image allowed to be parsed or created by
+ * the library.
+ *
+ * This is used as a safety check against corrupt files, but can also
+ * serve to avoid potential issues on machines which have very
+ * constrained RAM.
+ *
+ * These values are among the only globals in the core layer of
+ * OpenEXR. The intended use is for applications to define a global
+ * default, which will be combined with the values provided to the
+ * individual context creation routine. The values are used to check
+ * against parsed header values. This adds some level of safety from
+ * memory overruns where a corrupt file given to the system may cause
+ * a large allocation to happen, enabling buffer overruns or other
+ * potential security issue.
+ *
+ * These global values are combined with the values in
+ * \ref exr_context_initializer_t using the following rules:
+ *
+ * 1. negative values are ignored.
+ *
+ * 2. if either value has a positive (non-zero) value, and the other
+ * has 0, the positive value is preferred.
+ *
+ * 3. If both are positive (non-zero), the minimum value is used.
+ *
+ * 4. If both values are 0, this disables the constrained size checks.
+ *
+ * This function does not fail.
+ */
+ set_default_maximum_image_size :: proc(w, h: c.int) ---
+
+ /** @brief Retrieve the global default maximum image size.
+ *
+ * This function does not fail.
+ */
+ get_default_maximum_image_size :: proc(w, h: ^c.int) ---
+
+ /** @brief Limit the size of an image tile allowed to be parsed or
+ * created by the library.
+ *
+ * Similar to image size, this places constraints on the maximum tile
+ * size as a safety check against bad file data
+ *
+ * This is used as a safety check against corrupt files, but can also
+ * serve to avoid potential issues on machines which have very
+ * constrained RAM
+ *
+ * These values are among the only globals in the core layer of
+ * OpenEXR. The intended use is for applications to define a global
+ * default, which will be combined with the values provided to the
+ * individual context creation routine. The values are used to check
+ * against parsed header values. This adds some level of safety from
+ * memory overruns where a corrupt file given to the system may cause
+ * a large allocation to happen, enabling buffer overruns or other
+ * potential security issue.
+ *
+ * These global values are combined with the values in
+ * \ref exr_context_initializer_t using the following rules:
+ *
+ * 1. negative values are ignored.
+ *
+ * 2. if either value has a positive (non-zero) value, and the other
+ * has 0, the positive value is preferred.
+ *
+ * 3. If both are positive (non-zero), the minimum value is used.
+ *
+ * 4. If both values are 0, this disables the constrained size checks.
+ *
+ * This function does not fail.
+ */
+ set_default_maximum_tile_size :: proc(w, h: c.int) ---
+
+ /** @brief Retrieve the global maximum tile size.
+ *
+ * This function does not fail.
+ */
+ get_default_maximum_tile_size :: proc(w, h: ^c.int) ---
+
+ /** @} */
+
+ /**
+ * @defgroup CompressionDefaults Provides default compression settings
+ * @{
+ */
+
+ /** @brief Assigns a default zip compression level.
+ *
+ * This value may be controlled separately on each part, but this
+ * global control determines the initial value.
+ */
+ set_default_zip_compression_level :: proc(l: c.int) ---
+
+ /** @brief Retrieve the global default zip compression value
+ */
+ get_default_zip_compression_level :: proc(l: ^c.int) ---
+
+ /** @brief Assigns a default DWA compression quality level.
+ *
+ * This value may be controlled separately on each part, but this
+ * global control determines the initial value.
+ */
+ set_default_dwa_compression_quality :: proc(q: f32) ---
+
+ /** @brief Retrieve the global default dwa compression quality
+ */
+ get_default_dwa_compression_quality :: proc(q: ^f32) ---
+
+ /** @brief Allow the user to override default allocator used internal
+ * allocations necessary for files, attributes, and other temporary
+ * memory.
+ *
+ * These routines may be overridden when creating a specific context,
+ * however this provides global defaults such that the default can be
+ * applied.
+ *
+ * If either pointer is 0, the appropriate malloc/free routine will be
+ * substituted.
+ *
+ * This function does not fail.
+ */
+ set_default_memory_routines :: proc(alloc_func: memory_allocation_func_t, free_func: memory_free_func_t) ---
+} \ No newline at end of file
diff --git a/vendor/openexr/exr_chunkio.odin b/vendor/openexr/exr_chunkio.odin
new file mode 100644
index 000000000..612db2cb3
--- /dev/null
+++ b/vendor/openexr/exr_chunkio.odin
@@ -0,0 +1,143 @@
+package vendor_openexr
+
+foreign import lib "exr.lib"
+
+import "core:c"
+
+/**
+ * Struct describing raw data information about a chunk.
+ *
+ * A chunk is the generic term for a pixel data block in an EXR file,
+ * as described in the OpenEXR File Layout documentation. This is
+ * common between all different forms of data that can be stored.
+ */
+chunk_info_t :: struct {
+ idx: i32,
+
+ /** For tiles, this is the tilex; for scans it is the x. */
+ start_x: i32,
+ /** For tiles, this is the tiley; for scans it is the scanline y. */
+ start_y: i32,
+ height: i32, /**< For this chunk. */
+ width: i32, /**< For this chunk. */
+
+ level_x: u8, /**< For tiled files. */
+ level_y: u8, /**< For tiled files. */
+
+ type: u8,
+ compression: u8,
+
+ data_offset: u64,
+ packed_size: u64,
+ unpacked_size: u64,
+
+ sample_count_data_offset: u64,
+ sample_count_table_size: u64,
+}
+
+@(link_prefix="exr_", default_calling_convention="c")
+foreign lib {
+ read_scanline_chunk_info :: proc(ctxt: const_context_t, part_index: c.int, y: c.int, cinfo: ^chunk_info_t) -> result_t ---
+
+ read_tile_chunk_info :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ tilex: c.int,
+ tiley: c.int,
+ levelx: c.int,
+ levely: c.int,
+ cinfo: ^chunk_info_t) -> result_t ---
+
+ /** Read the packed data block for a chunk.
+ *
+ * This assumes that the buffer pointed to by @p packed_data is
+ * large enough to hold the chunk block info packed_size bytes.
+ */
+ read_chunk :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ cinfo: ^chunk_info_t,
+ packed_data: rawptr) -> result_t ---
+
+ /**
+ * Read chunk for deep data.
+ *
+ * This allows one to read the packed data, the sample count data, or both.
+ * \c exr_read_chunk also works to read deep data packed data,
+ * but this is a routine to get the sample count table and the packed
+ * data in one go, or if you want to pre-read the sample count data,
+ * you can get just that buffer.
+ */
+ read_deep_chunk :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ cinfo: ^chunk_info_t,
+ packed_data: rawptr,
+ sample_data: rawptr) -> result_t ---
+
+ /**************************************/
+
+ /** Initialize a \c chunk_info_t structure when encoding scanline
+ * data (similar to read but does not do anything with a chunk
+ * table).
+ */
+ write_scanline_chunk_info :: proc(ctxt: context_t, part_index: c.int, y: c.int, cinfo: ^chunk_info_t) -> result_t ---
+
+ /** Initialize a \c chunk_info_t structure when encoding tiled data
+ * (similar to read but does not do anything with a chunk table).
+ */
+ write_tile_chunk_info :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ tilex: c.int,
+ tiley: c.int,
+ levelx: c.int,
+ levely: c.int,
+ cinfo: ^chunk_info_t) -> result_t ---
+
+ /**
+ * @p y must the appropriate starting y for the specified chunk.
+ */
+ write_scanline_chunk :: proc(
+ ctxt: context_t,
+ part_index: int,
+ y: int,
+ packed_data: rawptr,
+ packed_size: u64) -> result_t ---
+
+ /**
+ * @p y must the appropriate starting y for the specified chunk.
+ */
+ write_deep_scanline_chunk :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ y: c.int,
+ packed_data: rawptr,
+ packed_size: u64,
+ unpacked_size: u64,
+ sample_data: rawptr,
+ sample_data_size: u64) -> result_t ---
+
+ write_tile_chunk :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ tilex: c.int,
+ tiley: c.int,
+ levelx: c.int,
+ levely: c.int,
+ packed_data: rawptr,
+ packed_size: u64) -> result_t ---
+
+ write_deep_tile_chunk :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ tilex: c.int,
+ tiley: c.int,
+ levelx: c.int,
+ levely: c.int,
+ packed_data: rawptr,
+ packed_size: u64,
+ unpacked_size: u64,
+ sample_data: rawptr,
+ sample_data_size: u64) -> result_t ---
+} \ No newline at end of file
diff --git a/vendor/openexr/exr_coding.odin b/vendor/openexr/exr_coding.odin
new file mode 100644
index 000000000..337475edf
--- /dev/null
+++ b/vendor/openexr/exr_coding.odin
@@ -0,0 +1,119 @@
+package vendor_openexr
+
+import "core:c"
+/**
+ * Enum for use in a custom allocator in the encode/decode pipelines
+ * (that is, so the implementor knows whether to allocate on which
+ * device based on the buffer disposition).
+ */
+transcoding_pipeline_buffer_id_t :: enum c.int {
+ PACKED,
+ UNPACKED,
+ COMPRESSED,
+ SCRATCH1,
+ SCRATCH2,
+ PACKED_SAMPLES,
+ SAMPLES,
+}
+
+/** @brief Struct for negotiating buffers when decoding/encoding
+ * chunks of data.
+ *
+ * This is generic and meant to negotiate exr data bi-directionally,
+ * in that the same structure is used for both decoding and encoding
+ * chunks for read and write, respectively.
+ *
+ * The first half of the structure will be filled by the library, and
+ * the caller is expected to fill the second half appropriately.
+ */
+coding_channel_info_t :: struct {
+ /**************************************************
+ * Elements below are populated by the library when
+ * decoding is initialized/updated and must be left
+ * untouched when using the default decoder routines.
+ **************************************************/
+
+ /** Channel name.
+ *
+ * This is provided as a convenient reference. Do not free, this
+ * refers to the internal data structure in the context.
+ */
+ channel_name: cstring,
+
+ /** Number of lines for this channel in this chunk.
+ *
+ * May be 0 or less than overall image height based on sampling
+ * (i.e. when in 4:2:0 type sampling)
+ */
+ height: i32,
+
+ /** Width in pixel count.
+ *
+ * May be 0 or less than overall image width based on sampling
+ * (i.e. 4:2:2 will have some channels have fewer values).
+ */
+ width: i32,
+
+ /** Horizontal subsampling information. */
+ x_samples: i32,
+ /** Vertical subsampling information. */
+ y_samples: i32,
+
+ /** Linear flag from channel definition (used by b44). */
+ p_linear: u8,
+
+ /** How many bytes per pixel this channel consumes (2 for float16,
+ * 4 for float32/uint32).
+ */
+ bytes_per_element: i8,
+
+ /** Small form of exr_pixel_type_t enum (EXR_PIXEL_UINT/HALF/FLOAT). */
+ data_type: u16,
+
+ /**************************************************
+ * Elements below must be edited by the caller
+ * to control encoding/decoding.
+ **************************************************/
+
+ /** How many bytes per pixel the input is or output should be
+ * (2 for float16, 4 for float32/uint32). Defaults to same
+ * size as input.
+ */
+ user_bytes_per_element: i16,
+
+ /** Small form of exr_pixel_type_t enum
+ * (EXR_PIXEL_UINT/HALF/FLOAT). Defaults to same type as input.
+ */
+ user_data_type: u16,
+
+ /** Increment to get to next pixel.
+ *
+ * This is in bytes. Must be specified when the decode pointer is
+ * specified (and always for encode).
+ *
+ * This is useful for implementing transcoding generically of
+ * planar or interleaved data. For planar data, where the layout
+ * is RRRRRGGGGGBBBBB, you can pass in 1 * bytes per component.
+ */
+
+ user_pixel_stride: i32,
+
+ /** When \c lines > 1 for a chunk, this is the increment used to get
+ * from beginning of line to beginning of next line.
+ *
+ * This is in bytes. Must be specified when the decode pointer is
+ * specified (and always for encode).
+ */
+ user_line_stride: i32,
+
+ /** This data member has different requirements reading vs
+ * writing. When reading, if this is left as `NULL`, the channel
+ * will be skipped during read and not filled in. During a write
+ * operation, this pointer is considered const and not
+ * modified. To make this more clear, a union is used here.
+ */
+ using _: struct #raw_union {
+ decode_to_ptr: ^u8,
+ encode_from_ptr: ^u8,
+ },
+}
diff --git a/vendor/openexr/exr_context.odin b/vendor/openexr/exr_context.odin
new file mode 100644
index 000000000..958e30490
--- /dev/null
+++ b/vendor/openexr/exr_context.odin
@@ -0,0 +1,485 @@
+package vendor_openexr
+
+foreign import lib "exr.lib"
+
+import "core:c"
+
+#assert(size_of(c.int) == size_of(b32))
+
+context_t :: distinct rawptr
+const_context_t :: context_t
+
+/**
+ * @defgroup ContextFunctions OpenEXR Context Stream/File Functions
+ *
+ * @brief These are a group of function interfaces used to customize
+ * the error handling, memory allocations, or I/O behavior of an
+ * OpenEXR context.
+ *
+ * @{
+ */
+
+/** @brief Stream error notifier
+ *
+ * This function pointer is provided to the stream functions by the
+ * library such that they can provide a nice error message to the
+ * user during stream operations.
+ */
+stream_error_func_ptr_t :: proc "c" (ctxt: const_context_t, code: result_t, fmt: cstring, #c_vararg args: ..any) -> result_t
+
+/** @brief Error callback function
+ *
+ * Because a file can be read from using many threads at once, it is
+ * difficult to store an error message for later retrieval. As such,
+ * when a file is constructed, a callback function can be provided
+ * which delivers an error message for the calling application to
+ * handle. This will then be delivered on the same thread causing the
+ * error.
+ */
+error_handler_cb_t :: proc "c" (ctxt: const_context_t, code: result_t, msg: cstring)
+
+/** Destroy custom stream function pointer
+ *
+ * Generic callback to clean up user data for custom streams.
+ * This is called when the file is closed and expected not to
+ * error.
+ *
+ * @param failed Indicates the write operation failed, the
+ * implementor may wish to cleanup temporary files
+ */
+destroy_stream_func_ptr_t :: proc "c" (ctxt: const_context_t, userdata: rawptr, failed: c.int)
+
+/** Query stream size function pointer
+ *
+ * Used to query the size of the file, or amount of data representing
+ * the openexr file in the data stream.
+ *
+ * This is used to validate requests against the file. If the size is
+ * unavailable, return -1, which will disable these validation steps
+ * for this file, although appropriate memory safeguards must be in
+ * place in the calling application.
+ */
+query_size_func_ptr_t :: proc "c" (ctxt: const_context_t, userdata: rawptr) -> i64
+
+/** @brief Read custom function pointer
+ *
+ * Used to read data from a custom output. Expects similar semantics to
+ * pread or ReadFile with overlapped data under win32.
+ *
+ * It is required that this provides thread-safe concurrent access to
+ * the same file. If the stream/input layer you are providing does
+ * not have this guarantee, your are responsible for providing
+ * appropriate serialization of requests.
+ *
+ * A file should be expected to be accessed in the following pattern:
+ * - upon open, the header and part information attributes will be read
+ * - upon the first image read request, the offset tables will be read
+ * multiple threads accessing this concurrently may actually read
+ * these values at the same time
+ * - chunks can then be read in any order as preferred by the
+ * application
+ *
+ * While this should mean that the header will be read in 'stream'
+ * order (no seeks required), no guarantee is made beyond that to
+ * retrieve image/deep data in order. So if the backing file is
+ * truly a stream, it is up to the provider to implement appropriate
+ * caching of data to give the appearance of being able to seek/read
+ * atomically.
+ */
+read_func_ptr_t :: proc "c" (
+ ctxt: const_context_t,
+ userdata: rawptr,
+ buffer: rawptr,
+ sz: u64,
+ offset: u64,
+ error_cb: stream_error_func_ptr_t) -> i64
+
+/** Write custom function pointer
+ *
+ * Used to write data to a custom output. Expects similar semantics to
+ * pwrite or WriteFile with overlapped data under win32.
+ *
+ * It is required that this provides thread-safe concurrent access to
+ * the same file. While it is unlikely that multiple threads will
+ * be used to write data for compressed forms, it is possible.
+ *
+ * A file should be expected to be accessed in the following pattern:
+ * - upon open, the header and part information attributes is constructed.
+ *
+ * - when the write_header routine is called, the header becomes immutable
+ * and is written to the file. This computes the space to store the chunk
+ * offsets, but does not yet write the values.
+ *
+ * - Image chunks are written to the file, and appear in the order
+ * they are written, not in the ordering that is required by the
+ * chunk offset table (unless written in that order). This may vary
+ * slightly if the size of the chunks is not directly known and
+ * tight packing of data is necessary.
+ *
+ * - at file close, the chunk offset tables are written to the file.
+ */
+write_func_ptr_t :: proc "c" (
+ ctxt: const_context_t,
+ userdata: rawptr,
+ buffer: rawptr,
+ sz: u64,
+ offset: u64,
+ error_cb: stream_error_func_ptr_t) -> i64
+
+/** @brief Struct used to pass function pointers into the context
+ * initialization routines.
+ *
+ * This partly exists to avoid the chicken and egg issue around
+ * creating the storage needed for the context on systems which want
+ * to override the malloc/free routines.
+ *
+ * However, it also serves to make a tidier/simpler set of functions
+ * to create and start processing exr files.
+ *
+ * The size member is required for version portability.
+ *
+ * It can be initialized using \c EXR_DEFAULT_CONTEXT_INITIALIZER.
+ *
+ * \code{.c}
+ * exr_context_initializer_t myctxtinit = DEFAULT_CONTEXT_INITIALIZER;
+ * myctxtinit.error_cb = &my_super_cool_error_callback_function;
+ * ...
+ * \endcode
+ *
+ */
+context_initializer_t :: struct {
+ /** @brief Size member to tag initializer for version stability.
+ *
+ * This should be initialized to the size of the current
+ * structure. This allows EXR to add functions or other
+ * initializers in the future, and retain version compatibility
+ */
+ size: c.size_t,
+
+ /** @brief Error callback function pointer
+ *
+ * The error callback is allowed to be `NULL`, and will use a
+ * default print which outputs to \c stderr.
+ *
+ * @sa exr_error_handler_cb_t
+ */
+ error_handler_fn: error_handler_cb_t,
+
+ /** Custom allocator, if `NULL`, will use malloc. @sa memory_allocation_func_t */
+ alloc_fn: memory_allocation_func_t,
+
+ /** Custom deallocator, if `NULL`, will use free. @sa memory_free_func_t */
+ free_fn: memory_free_func_t,
+
+ /** Blind data passed to custom read, size, write, destroy
+ * functions below. Up to user to manage this pointer.
+ */
+ user_data: rawptr,
+
+ /** @brief Custom read routine.
+ *
+ * This is only used during read or update contexts. If this is
+ * provided, it is expected that the caller has previously made
+ * the stream available, and placed whatever stream/file data
+ * into \c user_data above.
+ *
+ * If this is `NULL`, and the context requested is for reading an
+ * exr file, an internal implementation is provided for reading
+ * from normal filesystem files, and the filename provided is
+ * attempted to be opened as such.
+ *
+ * Expected to be `NULL` for a write-only operation, but is ignored
+ * if it is provided.
+ *
+ * For update contexts, both read and write functions must be
+ * provided if either is.
+ *
+ * @sa exr_read_func_ptr_t
+ */
+ read_fn: read_func_ptr_t,
+
+ /** @brief Custom size query routine.
+ *
+ * Used to provide validation when reading header values. If this
+ * is not provided, but a custom read routine is provided, this
+ * will disable some of the validation checks when parsing the
+ * image header.
+ *
+ * Expected to be `NULL` for a write-only operation, but is ignored
+ * if it is provided.
+ *
+ * @sa exr_query_size_func_ptr_t
+ */
+ size_fn: query_size_func_ptr_t,
+
+ /** @brief Custom write routine.
+ *
+ * This is only used during write or update contexts. If this is
+ * provided, it is expected that the caller has previously made
+ * the stream available, and placed whatever stream/file data
+ * into \c user_data above.
+ *
+ * If this is `NULL`, and the context requested is for writing an
+ * exr file, an internal implementation is provided for reading
+ * from normal filesystem files, and the filename provided is
+ * attempted to be opened as such.
+ *
+ * For update contexts, both read and write functions must be
+ * provided if either is.
+ *
+ * @sa exr_write_func_ptr_t
+ */
+ write_fn: write_func_ptr_t,
+
+ /** @brief Optional function to destroy the user data block of a custom stream.
+ *
+ * Allows one to free any user allocated data, and close any handles.
+ *
+ * @sa exr_destroy_stream_func_ptr_t
+ * */
+ destroy_fn: destroy_stream_func_ptr_t,
+
+ /** Initialize a field specifying what the maximum image width
+ * allowed by the context is. See exr_set_default_maximum_image_size() to
+ * understand how this interacts with global defaults.
+ */
+ max_image_width: c.int,
+
+ /** Initialize a field specifying what the maximum image height
+ * allowed by the context is. See exr_set_default_maximum_image_size() to
+ * understand how this interacts with global defaults.
+ */
+ max_image_height: c.int,
+
+ /** Initialize a field specifying what the maximum tile width
+ * allowed by the context is. See exr_set_default_maximum_tile_size() to
+ * understand how this interacts with global defaults.
+ */
+ max_tile_width: c.int,
+
+ /** Initialize a field specifying what the maximum tile height
+ * allowed by the context is. See exr_set_default_maximum_tile_size() to
+ * understand how this interacts with global defaults.
+ */
+ max_tile_height: c.int,
+
+ /** Initialize a field specifying what the default zip compression level should be
+ * for this context. See exr_set_default_zip_compresion_level() to
+ * set it for all contexts.
+ */
+ zip_level: c.int,
+
+ /** Initialize the default dwa compression quality. See
+ * exr_set_default_dwa_compression_quality() to set the default
+ * for all contexts.
+ */
+ dwa_quality: f32,
+
+ /** Initialize with a bitwise or of the various context flags
+ */
+ flags: c.int,
+}
+
+/** @brief context flag which will enforce strict header validation
+ * checks and may prevent reading of files which could otherwise be
+ * processed.
+ */
+CONTEXT_FLAG_STRICT_HEADER :: (1 << 0)
+
+/** @brief Disables error messages while parsing headers
+ *
+ * The return values will remain the same, but error reporting will be
+ * skipped. This is only valid for reading contexts
+ */
+CONTEXT_FLAG_SILENT_HEADER_PARSE :: (1 << 1)
+
+/** @brief Disables reconstruction logic upon corrupt / missing data chunks
+ *
+ * This will disable the reconstruction logic that searches through an
+ * incomplete file, and will instead just return errors at read
+ * time. This is only valid for reading contexts
+ */
+CONTEXT_FLAG_DISABLE_CHUNK_RECONSTRUCTION :: (1 << 2)
+
+/** @brief Simple macro to initialize the context initializer with default values. */
+DEFAULT_CONTEXT_INITIALIZER :: context_initializer_t{zip_level = -2, dwa_quality = -1}
+
+/** @} */ /* context function pointer declarations */
+
+
+/** @brief Enum describing how default files are handled during write. */
+default_write_mode_t :: enum c.int {
+ WRITE_FILE_DIRECTLY = 0, /**< Overwrite filename provided directly, deleted upon error. */
+ INTERMEDIATE_TEMP_FILE = 1, /**< Create a temporary file, renaming it upon successful write, leaving original upon error */
+}
+
+
+@(link_prefix="exr_", default_calling_convention="c")
+foreign lib {
+ /** @brief Check the magic number of the file and report
+ * `EXR_ERR_SUCCESS` if the file appears to be a valid file (or at least
+ * has the correct magic number and can be read).
+ */
+ test_file_header :: proc(filename: cstring, ctxtdata: ^context_initializer_t) -> result_t ---
+
+ /** @brief Close and free any internally allocated memory,
+ * calling any provided destroy function for custom streams.
+ *
+ * If the file was opened for write, first save the chunk offsets
+ * or any other unwritten data.
+ */
+ finish :: proc(ctxt: ^context_t) -> result_t ---
+
+ /** @brief Create and initialize a read-only exr read context.
+ *
+ * If a custom read function is provided, the filename is for
+ * informational purposes only, the system assumes the user has
+ * previously opened a stream, file, or whatever and placed relevant
+ * data in userdata to access that.
+ *
+ * One notable attribute of the context is that once it has been
+ * created and returned a successful code, it has parsed all the
+ * header data. This is done as one step such that it is easier to
+ * provide a safe context for multiple threads to request data from
+ * the same context concurrently.
+ *
+ * Once finished reading data, use exr_finish() to clean up
+ * the context.
+ *
+ * If you have custom I/O requirements, see the initializer context
+ * documentation \ref exr_context_initializer_t. The @p ctxtdata parameter
+ * is optional, if `NULL`, default values will be used.
+ */
+ start_read :: proc(
+ ctxt: ^context_t,
+ filename: cstring,
+ ctxtdata: ^context_initializer_t) -> result_t ---
+
+ /** @brief Create and initialize a write-only context.
+ *
+ * If a custom write function is provided, the filename is for
+ * informational purposes only, and the @p default_mode parameter will be
+ * ignored. As such, the system assumes the user has previously opened
+ * a stream, file, or whatever and placed relevant data in userdata to
+ * access that.
+ *
+ * Multi-Threading: To avoid issues with creating multi-part EXR
+ * files, the library approaches writing as a multi-step process, so
+ * the same concurrent guarantees can not be made for writing a
+ * file. The steps are:
+ *
+ * 1. Context creation (this function)
+ *
+ * 2. Part definition (required attributes and additional metadata)
+ *
+ * 3. Transition to writing data (this "commits" the part definitions,
+ * any changes requested after will result in an error)
+ *
+ * 4. Write part data in sequential order of parts (part<sub>0</sub>
+ * -> part<sub>N-1</sub>).
+ *
+ * 5. Within each part, multiple threads can be encoding and writing
+ * data concurrently. For some EXR part definitions, this may be able
+ * to write data concurrently when it can predict the chunk sizes, or
+ * data is allowed to be padded. For others, it may need to
+ * temporarily cache chunks until the data is received to flush in
+ * order. The concurrency around this is handled by the library
+ *
+ * 6. Once finished writing data, use exr_finish() to clean
+ * up the context, which will flush any unwritten data such as the
+ * final chunk offset tables, and handle the temporary file flags.
+ *
+ * If you have custom I/O requirements, see the initializer context
+ * documentation \ref exr_context_initializer_t. The @p ctxtdata
+ * parameter is optional, if `NULL`, default values will be used.
+ */
+ start_write :: proc(
+ ctxt: ^context_t,
+ filename: cstring,
+ default_mode: default_write_mode_t,
+ ctxtdata: ^context_initializer_t) -> result_t ---
+
+ /** @brief Create a new context for updating an exr file in place.
+ *
+ * This is a custom mode that allows one to modify the value of a
+ * metadata entry, although not to change the size of the header, or
+ * any of the image data.
+ *
+ * If you have custom I/O requirements, see the initializer context
+ * documentation \ref exr_context_initializer_t. The @p ctxtdata parameter
+ * is optional, if `NULL`, default values will be used.
+ */
+ start_inplace_header_update :: proc(
+ ctxt: ^context_t,
+ filename: cstring,
+ ctxtdata: ^context_initializer_t) -> result_t ---
+
+ /** @brief Retrieve the file name the context is for as provided
+ * during the start routine.
+ *
+ * Do not free the resulting string.
+ */
+
+ get_file_name :: proc(ctxt: const_context_t, name: ^cstring) -> result_t ---
+
+ /** @brief Query the user data the context was constructed with. This
+ * is perhaps useful in the error handler callback to jump back into
+ * an object the user controls.
+ */
+
+ get_user_data :: proc(ctxt: const_context_t, userdata: ^rawptr) -> result_t ---
+
+ /** Any opaque attribute data entry of the specified type is tagged
+ * with these functions enabling downstream users to unpack (or pack)
+ * the data.
+ *
+ * The library handles the memory packed data internally, but the
+ * handler is expected to allocate and manage memory for the
+ * *unpacked* buffer (the library will call the destroy function).
+ *
+ * NB: the pack function will be called twice (unless there is a
+ * memory failure), the first with a `NULL` buffer, requesting the
+ * maximum size (or exact size if known) for the packed buffer, then
+ * the second to fill the output packed buffer, at which point the
+ * size can be re-updated to have the final, precise size to put into
+ * the file.
+ */
+ register_attr_type_handler :: proc(
+ ctxt: context_t,
+ type: cstring,
+ unpack_func_ptr: proc "c" (
+ ctxt: context_t,
+ data: rawptr,
+ attrsize: i32,
+ outsize: ^i32,
+ outbuffer: ^rawptr) -> result_t,
+ pack_func_ptr: proc "c" (
+ ctxt: context_t,
+ data: rawptr,
+ datasize: i32,
+ outsize: ^i32,
+ outbuffer: rawptr) -> result_t,
+ destroy_unpacked_func_ptr: proc "c" (
+ ctxt: context_t, data: rawptr, datasize: i32),
+ ) -> result_t ---
+
+ /** @brief Enable long name support in the output context */
+
+ set_longname_support :: proc(ctxt: context_t, onoff: b32) -> result_t ---
+
+ /** @brief Write the header data.
+ *
+ * Opening a new output file has a small initialization state problem
+ * compared to opening for read/update: we need to enable the user
+ * to specify an arbitrary set of metadata across an arbitrary number
+ * of parts. To avoid having to create the list of parts and entire
+ * metadata up front, prior to calling the above exr_start_write(),
+ * allow the data to be set, then once this is called, it switches
+ * into a mode where the library assumes the data is now valid.
+ *
+ * It will recompute the number of chunks that will be written, and
+ * reset the chunk offsets. If you modify file attributes or part
+ * information after a call to this, it will error.
+ */
+ write_header :: proc(ctxt: context_t) -> result_t ---
+} \ No newline at end of file
diff --git a/vendor/openexr/exr_debug.odin b/vendor/openexr/exr_debug.odin
new file mode 100644
index 000000000..f2c8e18cb
--- /dev/null
+++ b/vendor/openexr/exr_debug.odin
@@ -0,0 +1,8 @@
+package vendor_openexr
+
+foreign import lib "exr.lib"
+
+@(link_prefix="exr_", default_calling_convention="c")
+foreign lib {
+ print_context_info :: proc(c: const_context_t, verbose: b32) -> result_t ---
+} \ No newline at end of file
diff --git a/vendor/openexr/exr_decode.odin b/vendor/openexr/exr_decode.odin
new file mode 100644
index 000000000..7eca819f7
--- /dev/null
+++ b/vendor/openexr/exr_decode.odin
@@ -0,0 +1,288 @@
+package vendor_openexr
+
+foreign import lib "exr.lib"
+
+import "core:c"
+
+/** Can be bit-wise or'ed into the decode_flags in the decode pipeline.
+ *
+ * Indicates that the sample count table should be decoded to a an
+ * individual sample count list (n, m, o, ...), with an extra int at
+ * the end containing the total samples.
+ *
+ * Without this (i.e. a value of 0 in that bit), indicates the sample
+ * count table should be decoded to a cumulative list (n, n+m, n+m+o,
+ * ...), which is the on-disk representation.
+ */
+DECODE_SAMPLE_COUNTS_AS_INDIVIDUAL :: u16(1 << 0)
+
+/** Can be bit-wise or'ed into the decode_flags in the decode pipeline.
+ *
+ * Indicates that the data in the channel pointers to decode to is not
+ * a direct pointer, but instead is a pointer-to-pointers. In this
+ * mode, the user_pixel_stride and user_line_stride are used to
+ * advance the pointer offsets for each pixel in the output, but the
+ * user_bytes_per_element and user_data_type are used to put
+ * (successive) entries into each destination pointer (if not `NULL`).
+ *
+ * So each channel pointer must then point to an array of
+ * chunk.width * chunk.height pointers.
+ *
+ * With this, you can only extract desired pixels (although all the
+ * pixels must be initially decompressed) to handle such operations
+ * like proxying where you might want to read every other pixel.
+ *
+ * If this is NOT set (0), the default unpacking routine assumes the
+ * data will be planar and contiguous (each channel is a separate
+ * memory block), ignoring user_line_stride and user_pixel_stride.
+ */
+DECODE_NON_IMAGE_DATA_AS_POINTERS :: u16(1 << 1)
+
+/**
+ * When reading non-image data (i.e. deep), only read the sample table.
+ */
+DECODE_SAMPLE_DATA_ONLY :: u16(1 << 2)
+
+/**
+ * Struct meant to be used on a per-thread basis for reading exr data
+ *
+ * As should be obvious, this structure is NOT thread safe, but rather
+ * meant to be used by separate threads, which can all be accessing
+ * the same context concurrently.
+ */
+decode_pipeline_t :: struct {
+ /** The output channel information for this chunk.
+ *
+ * User is expected to fill the channel pointers for the desired
+ * output channels (any that are `NULL` will be skipped) if you are
+ * going to use exr_decoding_choose_default_routines(). If all that is
+ * desired is to read and decompress the data, this can be left
+ * uninitialized.
+ *
+ * Describes the channel information. This information is
+ * allocated dynamically during exr_decoding_initialize().
+ */
+ channels: [^]coding_channel_info_t,
+ channel_count: i16,
+
+ /** Decode flags to control the behavior. */
+ decode_flags: u16,
+
+ /** Copy of the parameters given to the initialize/update for
+ * convenience.
+ */
+ part_index: c.int,
+ ctx: const_context_t,
+ chunk: chunk_info_t,
+
+ /** Can be used by the user to pass custom context data through
+ * the decode pipeline.
+ */
+ decoding_user_data: rawptr,
+
+ /** The (compressed) buffer.
+ *
+ * If `NULL`, will be allocated during the run of the pipeline.
+ *
+ * If the caller wishes to take control of the buffer, simple
+ * adopt the pointer and set it to `NULL` here. Be cognizant of any
+ * custom allocators.
+ */
+ packed_buffer: rawptr,
+
+ /** Used when re-using the same decode pipeline struct to know if
+ * chunk is changed size whether current buffer is large enough.
+ */
+ packed_alloc_size: c.size_t,
+
+ /** The decompressed buffer (unpacked_size from the chunk block
+ * info), but still packed into storage order, only needed for
+ * compressed files.
+ *
+ * If `NULL`, will be allocated during the run of the pipeline when
+ * needed.
+ *
+ * If the caller wishes to take control of the buffer, simple
+ * adopt the pointer and set it to `NULL` here. Be cognizant of any
+ * custom allocators.
+ */
+ unpacked_buffer: rawptr,
+
+ /** Used when re-using the same decode pipeline struct to know if
+ * chunk is changed size whether current buffer is large enough.
+ */
+ unpacked_alloc_size: c.size_t,
+
+ /** For deep or other non-image data: packed sample table
+ * (compressed, raw on disk representation).
+ */
+ packed_sample_count_table: rawptr,
+ packed_sample_count_alloc_size: c.size_t,
+
+ /** Usable, native sample count table. Depending on the flag set
+ * above, will be decoded to either a cumulative list (n, n+m,
+ * n+m+o, ...), or an individual table (n, m, o, ...). As an
+ * optimization, if the latter individual count table is chosen,
+ * an extra int32_t will be allocated at the end of the table to
+ * contain the total count of samples, so the table will be n+1
+ * samples in size.
+ */
+ sample_count_table: [^]i32,
+ sample_count_alloc_size: c.size_t,
+
+ /** A scratch buffer of unpacked_size for intermediate results.
+ *
+ * If `NULL`, will be allocated during the run of the pipeline when
+ * needed.
+ *
+ * If the caller wishes to take control of the buffer, simple
+ * adopt the pointer and set it to `NULL` here. Be cognizant of any
+ * custom allocators.
+ */
+ scratch_buffer_1: rawptr,
+
+ /** Used when re-using the same decode pipeline struct to know if
+ * chunk is changed size whether current buffer is large enough.
+ */
+ scratch_alloc_size_1: c.size_t,
+
+ /** Some decompression routines may need a second scratch buffer (zlib).
+ *
+ * If `NULL`, will be allocated during the run of the pipeline when
+ * needed.
+ *
+ * If the caller wishes to take control of the buffer, simple
+ * adopt the pointer and set it to `NULL` here. Be cognizant of any
+ * custom allocators.
+ */
+ scratch_buffer_2: rawptr,
+
+ /** Used when re-using the same decode pipeline struct to know if
+ * chunk is changed size whether current buffer is large enough.
+ */
+ scratch_alloc_size_2: c.size_t,
+
+ /** Enable a custom allocator for the different buffers (if
+ * decoding on a GPU). If `NULL`, will use the allocator from the
+ * context.
+ */
+ alloc_fn: proc "c" (transcoding_pipeline_buffer_id_t, c.size_t) -> rawptr,
+
+ /** Enable a custom allocator for the different buffers (if
+ * decoding on a GPU). If `NULL`, will use the allocator from the
+ * context.
+ */
+ free_fn: proc "c" (transcoding_pipeline_buffer_id_t, rawptr),
+
+ /** Function chosen to read chunk data from the context.
+ *
+ * Initialized to a default generic read routine, may be updated
+ * based on channel information when
+ * exr_decoding_choose_default_routines() is called. This is done such that
+ * if the file is uncompressed and the output channel data is
+ * planar and the same type, the read function can read straight
+ * into the output channels, getting closer to a zero-copy
+ * operation. Otherwise a more traditional read, decompress, then
+ * unpack pipeline will be used with a default reader.
+ *
+ * This is allowed to be overridden, but probably is not necessary
+ * in most scenarios.
+ */
+ read_fn: proc "c" (pipeline: ^decode_pipeline_t) -> result_t,
+
+ /** Function chosen based on the compression type of the part to
+ * decompress data.
+ *
+ * If the user has a custom decompression method for the
+ * compression on this part, this can be changed after
+ * initialization.
+ *
+ * If only compressed data is desired, then assign this to `NULL`
+ * after initialization.
+ */
+ decompress_fn: proc "c" (pipeline: ^decode_pipeline_t) -> result_t,
+
+ /** Function which can be provided if you have bespoke handling for
+ * non-image data and need to re-allocate the data to handle the
+ * about-to-be unpacked data.
+ *
+ * If left `NULL`, will assume the memory pointed to by the channel
+ * pointers is sufficient.
+ */
+ realloc_nonimage_data_fn: proc "c" (pipeline: ^decode_pipeline_t) -> result_t,
+
+ /** Function chosen based on the output layout of the channels of the part to
+ * decompress data.
+ *
+ * This will be `NULL` after initialization, until the user
+ * specifies a custom routine, or initializes the channel data and
+ * calls exr_decoding_choose_default_routines().
+ *
+ * If only compressed data is desired, then leave or assign this
+ * to `NULL` after initialization.
+ */
+ unpack_and_convert_fn: proc "c" (pipeline: ^decode_pipeline_t) -> result_t,
+
+ /** Small stash of channel info values. This is faster than calling
+ * malloc when the channel count in the part is small (RGBAZ),
+ * which is super common, however if there are a large number of
+ * channels, it will allocate space for that, so do not rely on
+ * this being used.
+ */
+ _quick_chan_store: [5]coding_channel_info_t,
+}
+
+DECODE_PIPELINE_INITIALIZER :: decode_pipeline_t{}
+
+
+@(link_prefix="exr_", default_calling_convention="c")
+foreign lib {
+ /** Initialize the decoding pipeline structure with the channel info
+ * for the specified part, and the first block to be read.
+ *
+ * NB: The decode->unpack_and_convert_fn field will be `NULL` after this. If that
+ * stage is desired, initialize the channel output information and
+ * call exr_decoding_choose_default_routines().
+ */
+ decoding_initialize :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ cinfo: ^chunk_info_t,
+ decode: ^decode_pipeline_t) -> result_t ---
+
+ /** Given an initialized decode pipeline, find appropriate functions
+ * to read and shuffle/convert data into the defined channel outputs.
+ *
+ * Calling this is not required if custom routines will be used, or if
+ * just the raw compressed data is desired. Although in that scenario,
+ * it is probably easier to just read the chunk directly using
+ * exr_read_chunk().
+ */
+ decoding_choose_default_routines :: proc(
+ ctxt: const_context_t, part_index: c.int, decode: ^decode_pipeline_t) -> result_t ---
+
+ /** Given a decode pipeline previously initialized, update it for the
+ * new chunk to be read.
+ *
+ * In this manner, memory buffers can be re-used to avoid continual
+ * malloc/free calls. Further, it allows the previous choices for
+ * the various functions to be quickly re-used.
+ */
+ decoding_update :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ cinfo: ^chunk_info_t,
+ decode: ^decode_pipeline_t) -> result_t ---
+
+ /** Execute the decoding pipeline. */
+ decoding_run :: proc(
+ ctxt: const_context_t, part_index: c.int, decode: ^decode_pipeline_t) -> result_t ---
+
+ /** Free any intermediate memory in the decoding pipeline.
+ *
+ * This does *not* free any pointers referred to in the channel info
+ * areas, but rather only the intermediate buffers and memory needed
+ * for the structure itself.
+ */
+ decoding_destroy :: proc(ctxt: const_context_t, decode: ^decode_pipeline_t) -> result_t ---
+} \ No newline at end of file
diff --git a/vendor/openexr/exr_encode.odin b/vendor/openexr/exr_encode.odin
new file mode 100644
index 000000000..402aaba81
--- /dev/null
+++ b/vendor/openexr/exr_encode.odin
@@ -0,0 +1,319 @@
+package vendor_openexr
+
+foreign import lib "exr.lib"
+
+import "core:c"
+
+/** Can be bit-wise or'ed into the decode_flags in the decode pipeline.
+ *
+ * Indicates that the sample count table should be encoded from an
+ * individual sample count list (n, m, o, ...), meaning it will have
+ * to compute the cumulative counts on the fly.
+ *
+ * Without this (i.e. a value of 0 in that bit), indicates the sample
+ * count table is already a cumulative list (n, n+m, n+m+o, ...),
+ * which is the on-disk representation.
+ */
+ENCODE_DATA_SAMPLE_COUNTS_ARE_INDIVIDUAL :: u16(1 << 0)
+
+/** Can be bit-wise or'ed into the decode_flags in the decode pipeline.
+ *
+ * Indicates that the data in the channel pointers to encode from is not
+ * a direct pointer, but instead is a pointer-to-pointers. In this
+ * mode, the user_pixel_stride and user_line_stride are used to
+ * advance the pointer offsets for each pixel in the output, but the
+ * user_bytes_per_element and user_data_type are used to put
+ * (successive) entries into each destination.
+ *
+ * So each channel pointer must then point to an array of
+ * chunk.width * chunk.height pointers. If an entry is
+ * `NULL`, 0 samples will be placed in the output.
+ *
+ * If this is NOT set (0), the default packing routine assumes the
+ * data will be planar and contiguous (each channel is a separate
+ * memory block), ignoring user_line_stride and user_pixel_stride and
+ * advancing only by the sample counts and bytes per element.
+ */
+ENCODE_NON_IMAGE_DATA_AS_POINTERS :: u16(1 << 1)
+
+/** Struct meant to be used on a per-thread basis for writing exr data.
+ *
+ * As should be obvious, this structure is NOT thread safe, but rather
+ * meant to be used by separate threads, which can all be accessing
+ * the same context concurrently.
+ */
+ encode_pipeline_t :: struct {
+ /** The output channel information for this chunk.
+ *
+ * User is expected to fill the channel pointers for the input
+ * channels. For writing, all channels must be initialized prior
+ * to using exr_encoding_choose_default_routines(). If a custom pack routine
+ * is written, that is up to the implementor.
+ *
+ * Describes the channel information. This information is
+ * allocated dynamically during exr_encoding_initialize().
+ */
+ channels: [^]coding_channel_info_t,
+ channel_count: i16,
+
+ /** Encode flags to control the behavior. */
+ encode_flags: u16,
+
+ /** Copy of the parameters given to the initialize/update for convenience. */
+ part_index: c.int,
+ ctx: const_context_t,
+ chunk: chunk_info_t,
+
+ /** Can be used by the user to pass custom context data through
+ * the encode pipeline.
+ */
+ encoding_user_data: rawptr,
+
+ /** The packed buffer where individual channels have been put into here.
+ *
+ * If `NULL`, will be allocated during the run of the pipeline.
+ *
+ * If the caller wishes to take control of the buffer, simple
+ * adopt the pointer and set it to `NULL` here. Be cognizant of any
+ * custom allocators.
+ */
+ packed_buffer: rawptr,
+
+ /** Differing from the allocation size, the number of actual bytes */
+ packed_bytes: u64,
+
+ /** Used when re-using the same encode pipeline struct to know if
+ * chunk is changed size whether current buffer is large enough
+ *
+ * If `NULL`, will be allocated during the run of the pipeline.
+ *
+ * If the caller wishes to take control of the buffer, simple
+ * adopt the pointer and set it to `NULL` here. Be cognizant of any
+ * custom allocators.
+ */
+ packed_alloc_size: c.size_t,
+
+ /** For deep data. NB: the members NOT const because we need to
+ * temporarily swap it to xdr order and restore it (to avoid a
+ * duplicate buffer allocation).
+ *
+ * Depending on the flag set above, will be treated either as a
+ * cumulative list (n, n+m, n+m+o, ...), or an individual table
+ * (n, m, o, ...). */
+ sample_count_table: [^]i32,
+
+ /** Allocated table size (to avoid re-allocations). Number of
+ * samples must always be width * height for the chunk.
+ */
+ sample_count_alloc_size: c.size_t,
+
+ /** Packed sample table (compressed, raw on disk representation)
+ * for deep or other non-image data.
+ */
+ packed_sample_count_table: rawptr,
+
+ /** Number of bytes to write (actual size) for the
+ * packed_sample_count_table.
+ */
+ packed_sample_count_bytes: c.size_t,
+
+ /** Allocated size (to avoid re-allocations) for the
+ * packed_sample_count_table.
+ */
+ packed_sample_count_alloc_size: c.size_t,
+
+ /** The compressed buffer, only needed for compressed files.
+ *
+ * If `NULL`, will be allocated during the run of the pipeline when
+ * needed.
+ *
+ * If the caller wishes to take control of the buffer, simple
+ * adopt the pointer and set it to `NULL` here. Be cognizant of any
+ * custom allocators.
+ */
+ compressed_buffer: rawptr,
+
+ /** Must be filled in as the pipeline runs to inform the writing
+ * software about the compressed size of the chunk (if it is an
+ * uncompressed file or the compression would make the file
+ * larger, it is expected to be the packed_buffer)
+ *
+ * If the caller wishes to take control of the buffer, simple
+ * adopt the pointer and set it to zero here. Be cognizant of any
+ * custom allocators.
+ */
+ compressed_bytes: c.size_t,
+
+ /** Used when re-using the same encode pipeline struct to know if
+ * chunk is changed size whether current buffer is large enough.
+ *
+ * If `NULL`, will be allocated during the run of the pipeline when
+ * needed.
+ *
+ * If the caller wishes to take control of the buffer, simple
+ * adopt the pointer and set it to zero here. Be cognizant of any
+ * custom allocators.
+ */
+ compressed_alloc_size: c.size_t,
+
+ /** A scratch buffer for intermediate results.
+ *
+ * If `NULL`, will be allocated during the run of the pipeline when
+ * needed.
+ *
+ * If the caller wishes to take control of the buffer, simple
+ * adopt the pointer and set it to `NULL` here. Be cognizant of any
+ * custom allocators.
+ */
+ scratch_buffer_1: rawptr,
+
+ /** Used when re-using the same encode pipeline struct to know if
+ * chunk is changed size whether current buffer is large enough.
+ *
+ * If `NULL`, will be allocated during the run of the pipeline when
+ * needed.
+ *
+ * If the caller wishes to take control of the buffer, simple
+ * adopt the pointer and set it to `NULL` here. Be cognizant of any
+ * custom allocators.
+ */
+ scratch_alloc_size_1: c.size_t,
+
+ /** Some compression routines may need a second scratch buffer.
+ *
+ * If `NULL`, will be allocated during the run of the pipeline when
+ * needed.
+ *
+ * If the caller wishes to take control of the buffer, simple
+ * adopt the pointer and set it to `NULL` here. Be cognizant of any
+ * custom allocators.
+ */
+ scratch_buffer_2: rawptr,
+
+ /** Used when re-using the same encode pipeline struct to know if
+ * chunk is changed size whether current buffer is large enough.
+ */
+ scratch_alloc_size_2: c.size_t,
+
+ /** Enable a custom allocator for the different buffers (if
+ * encoding on a GPU). If `NULL`, will use the allocator from the
+ * context.
+ */
+ alloc_fn: proc "c" (transcoding_pipeline_buffer_id_t, c.size_t) -> rawptr,
+
+ /** Enable a custom allocator for the different buffers (if
+ * encoding on a GPU). If `NULL`, will use the allocator from the
+ * context.
+ */
+ free_fn: proc "c" (transcoding_pipeline_buffer_id_t, rawptr),
+
+ /** Function chosen based on the output layout of the channels of the part to
+ * decompress data.
+ *
+ * If the user has a custom method for the
+ * compression on this part, this can be changed after
+ * initialization.
+ */
+ convert_and_pack_fn: proc "c" (pipeline: ^encode_pipeline_t) -> result_t,
+
+ /** Function chosen based on the compression type of the part to
+ * compress data.
+ *
+ * If the user has a custom compression method for the compression
+ * type on this part, this can be changed after initialization.
+ */
+ compress_fn: proc "c" (pipeline: ^encode_pipeline_t) -> result_t,
+
+ /** This routine is used when waiting for other threads to finish
+ * writing previous chunks such that this thread can write this
+ * chunk. This is used for parts which have a specified chunk
+ * ordering (increasing/decreasing y) and the chunks can not be
+ * written randomly (as could be true for uncompressed).
+ *
+ * This enables the calling application to contribute thread time
+ * to other computation as needed, or just use something like
+ * pthread_yield().
+ *
+ * By default, this routine will be assigned to a function which
+ * returns an error, failing the encode immediately. In this way,
+ * it assumes that there is only one thread being used for
+ * writing.
+ *
+ * It is up to the user to provide an appropriate routine if
+ * performing multi-threaded writing.
+ */
+ yield_until_ready_fn: proc "c" (pipeline: ^encode_pipeline_t) -> result_t,
+
+ /** Function chosen to write chunk data to the context.
+ *
+ * This is allowed to be overridden, but probably is not necessary
+ * in most scenarios.
+ */
+ write_fn: proc "c" (pipeline: ^encode_pipeline_t) -> result_t,
+
+ /** Small stash of channel info values. This is faster than calling
+ * malloc when the channel count in the part is small (RGBAZ),
+ * which is super common, however if there are a large number of
+ * channels, it will allocate space for that, so do not rely on
+ * this being used.
+ */
+ _quick_chan_store: [5]coding_channel_info_t,
+}
+
+ENCODE_PIPELINE_INITIALIZER :: encode_pipeline_t{}
+
+
+@(link_prefix="exr_", default_calling_convention="c")
+foreign lib {
+ /** Initialize the encoding pipeline structure with the channel info
+ * for the specified part based on the chunk to be written.
+ *
+ * NB: The encode_pipe->pack_and_convert_fn field will be `NULL` after this. If that
+ * stage is desired, initialize the channel output information and
+ * call exr_encoding_choose_default_routines().
+ */
+ encoding_initialize :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ cinfo: ^chunk_info_t,
+ encode_pipe: ^encode_pipeline_t) -> result_t ---
+
+ /** Given an initialized encode pipeline, find an appropriate
+ * function to shuffle and convert data into the defined channel
+ * outputs.
+ *
+ * Calling this is not required if a custom routine will be used, or
+ * if just the raw decompressed data is desired.
+ */
+ encoding_choose_default_routines :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ encode_pipe: ^encode_pipeline_t) -> result_t ---
+
+ /** Given a encode pipeline previously initialized, update it for the
+ * new chunk to be written.
+ *
+ * In this manner, memory buffers can be re-used to avoid continual
+ * malloc/free calls. Further, it allows the previous choices for
+ * the various functions to be quickly re-used.
+ */
+ encoding_update :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ cinfo: ^chunk_info_t,
+ encode_pipe: ^encode_pipeline_t) -> result_t ---
+
+ /** Execute the encoding pipeline. */
+ encoding_run :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ encode_pipe: ^encode_pipeline_t) -> result_t ---
+
+ /** Free any intermediate memory in the encoding pipeline.
+ *
+ * This does NOT free any pointers referred to in the channel info
+ * areas, but rather only the intermediate buffers and memory needed
+ * for the structure itself.
+ */
+ encoding_destroy :: proc(ctxt: const_context_t, encode_pipe: ^encode_pipeline_t) -> result_t ---
+} \ No newline at end of file
diff --git a/vendor/openexr/exr_errors.odin b/vendor/openexr/exr_errors.odin
new file mode 100644
index 000000000..cf2194756
--- /dev/null
+++ b/vendor/openexr/exr_errors.odin
@@ -0,0 +1,62 @@
+package vendor_openexr
+
+foreign import lib "exr.lib"
+
+import "core:c"
+#assert(size_of(c.int) == size_of(i32))
+
+/** Error codes that may be returned by various functions. */
+error_code_t :: enum i32 {
+ SUCCESS = 0,
+ OUT_OF_MEMORY,
+ MISSING_CONTEXT_ARG,
+ INVALID_ARGUMENT,
+ ARGUMENT_OUT_OF_RANGE,
+ FILE_ACCESS,
+ FILE_BAD_HEADER,
+ NOT_OPEN_READ,
+ NOT_OPEN_WRITE,
+ HEADER_NOT_WRITTEN,
+ READ_IO,
+ WRITE_IO,
+ NAME_TOO_LONG,
+ MISSING_REQ_ATTR,
+ INVALID_ATTR,
+ NO_ATTR_BY_NAME,
+ ATTR_TYPE_MISMATCH,
+ ATTR_SIZE_MISMATCH,
+ SCAN_TILE_MIXEDAPI,
+ TILE_SCAN_MIXEDAPI,
+ MODIFY_SIZE_CHANGE,
+ ALREADY_WROTE_ATTRS,
+ BAD_CHUNK_LEADER,
+ CORRUPT_CHUNK,
+ INCORRECT_PART,
+ INCORRECT_CHUNK,
+ USE_SCAN_DEEP_WRITE,
+ USE_TILE_DEEP_WRITE,
+ USE_SCAN_NONDEEP_WRITE,
+ USE_TILE_NONDEEP_WRITE,
+ INVALID_SAMPLE_DATA,
+ FEATURE_NOT_IMPLEMENTED,
+ UNKNOWN,
+}
+
+/** Return type for all functions. */
+result_t :: error_code_t
+
+
+@(link_prefix="exr_", default_calling_convention="c")
+foreign lib {
+ /** @brief Return a static string corresponding to the specified error code.
+ *
+ * The string should not be freed (it is compiled into the binary).
+ */
+ get_default_error_message :: proc(code: result_t) -> cstring ---
+
+ /** @brief Return a static string corresponding to the specified error code.
+ *
+ * The string should not be freed (it is compiled into the binary).
+ */
+ get_error_code_as_string :: proc(code: result_t) -> cstring ---
+}
diff --git a/vendor/openexr/exr_part.odin b/vendor/openexr/exr_part.odin
new file mode 100644
index 000000000..24e1eb081
--- /dev/null
+++ b/vendor/openexr/exr_part.odin
@@ -0,0 +1,733 @@
+package vendor_openexr
+
+foreign import lib "exr.lib"
+
+import "core:c"
+
+attr_list_access_mode_t :: enum c.int {
+ FILE_ORDER, /**< Order they appear in the file */
+ SORTED_ORDER, /**< Alphabetically sorted */
+}
+
+@(link_prefix="exr_", default_calling_convention="c")
+foreign lib {
+ /** @brief Query how many parts are in the file. */
+ get_count :: proc (ctxt: const_context_t, count: ^c.int) -> result_t ---
+
+ /** @brief Query the part name for the specified part.
+ *
+ * NB: If this file is a single part file and name has not been set, this
+ * will return `NULL`.
+ */
+ get_name :: proc(ctxt: const_context_t, part_index: c.int, out: ^cstring) -> result_t ---
+
+ /** @brief Query the storage type for the specified part. */
+ get_storage :: proc(ctxt: const_context_t, part_index: c.int, out: ^storage_t) -> result_t ---
+
+ /** @brief Define a new part in the file. */
+ add_part :: proc(
+ ctxt: context_t,
+ partname: rawptr,
+ type: storage_t,
+ new_index: ^c.int) -> result_t ---
+
+ /** @brief Query how many levels are in the specified part.
+ *
+ * If the part is a tiled part, fill in how many tile levels are present.
+ *
+ * Return `ERR_SUCCESS` on success, an error otherwise (i.e. if the part
+ * is not tiled).
+ *
+ * It is valid to pass `NULL` to either of the @p levelsx or @p levelsy
+ * arguments, which enables testing if this part is a tiled part, or
+ * if you don't need both (i.e. in the case of a mip-level tiled
+ * image)
+ */
+ get_tile_levels :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ levelsx: ^i32,
+ levelsy: ^i32) -> result_t ---
+
+ /** @brief Query the tile size for a particular level in the specified part.
+ *
+ * If the part is a tiled part, fill in the tile size for the
+ * specified part/level.
+ *
+ * Return `ERR_SUCCESS` on success, an error otherwise (i.e. if the
+ * part is not tiled).
+ *
+ * It is valid to pass `NULL` to either of the @p tilew or @p tileh
+ * arguments, which enables testing if this part is a tiled part, or
+ * if you don't need both (i.e. in the case of a mip-level tiled
+ * image)
+ */
+ get_tile_sizes :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ levelx: c.int,
+ levely: c.int,
+ tilew: ^i32,
+ tileh: ^i32) -> result_t ---
+
+ /** @brief Query the data sizes for a particular level in the specified part.
+ *
+ * If the part is a tiled part, fill in the width/height for the
+ * specified levels.
+ *
+ * Return `ERR_SUCCESS` on success, an error otherwise (i.e. if the part
+ * is not tiled).
+ *
+ * It is valid to pass `NULL` to either of the @p levw or @p levh
+ * arguments, which enables testing if this part is a tiled part, or
+ * if you don't need both for some reason.
+ */
+ get_level_sizes :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ levelx: c.int,
+ levely: c.int,
+ levw: ^i32,
+ levh: ^i32) -> result_t ---
+
+ /** Return the number of chunks contained in this part of the file.
+ *
+ * As in the technical documentation for OpenEXR, the chunk is the
+ * generic term for a pixel data block. This is the atomic unit that
+ * this library uses to negotiate data to and from a context.
+ *
+ * This should be used as a basis for splitting up how a file is
+ * processed. Depending on the compression, a different number of
+ * scanlines are encoded in each chunk, and since those need to be
+ * encoded/decoded as a block, the chunk should be the basis for I/O
+ * as well.
+ */
+ get_chunk_count :: proc(ctxt: const_context_t, part_index: c.int, out: ^i32) -> result_t ---
+
+ /** Return the number of scanlines chunks for this file part.
+ *
+ * When iterating over a scanline file, this may be an easier metric
+ * for multi-threading or other access than only negotiating chunk
+ * counts, and so is provided as a utility.
+ */
+ get_scanlines_per_chunk :: proc(ctxt: const_context_t, part_index: c.int, out: ^i32) -> result_t ---
+
+ /** Return the maximum unpacked size of a chunk for the file part.
+ *
+ * This may be used ahead of any actual reading of data, so can be
+ * used to pre-allocate buffers for multiple threads in one block or
+ * whatever your application may require.
+ */
+ get_chunk_unpacked_size :: proc(ctxt: const_context_t, part_index: c.int, out: ^u64) -> result_t ---
+
+ /** @brief Retrieve the zip compression level used for the specified part.
+ *
+ * This only applies when the compression method involves using zip
+ * compression (zip, zips, some modes of DWAA/DWAB).
+ *
+ * This value is NOT persisted in the file, and only exists for the
+ * lifetime of the context, so will be at the default value when just
+ * reading a file.
+ */
+ get_zip_compression_level :: proc(ctxt: const_context_t, part_index: c.int, level: ^c.int) -> result_t ---
+
+ /** @brief Set the zip compression method used for the specified part.
+ *
+ * This only applies when the compression method involves using zip
+ * compression (zip, zips, some modes of DWAA/DWAB).
+ *
+ * This value is NOT persisted in the file, and only exists for the
+ * lifetime of the context, so this value will be ignored when
+ * reading a file.
+ */
+ set_zip_compression_level :: proc(ctxt: context_t, part_index: c.int, level: c.int) -> result_t ---
+
+ /** @brief Retrieve the dwa compression level used for the specified part.
+ *
+ * This only applies when the compression method is DWAA/DWAB.
+ *
+ * This value is NOT persisted in the file, and only exists for the
+ * lifetime of the context, so will be at the default value when just
+ * reading a file.
+ */
+ get_dwa_compression_level :: proc(ctxt: const_context_t, part_index: c.int, level: ^f32) -> result_t ---
+
+ /** @brief Set the dwa compression method used for the specified part.
+ *
+ * This only applies when the compression method is DWAA/DWAB.
+ *
+ * This value is NOT persisted in the file, and only exists for the
+ * lifetime of the context, so this value will be ignored when
+ * reading a file.
+ */
+ set_dwa_compression_level :: proc(ctxt: context_t, part_index: c.int, level: f32) -> result_t ---
+
+ /**************************************/
+
+ /** @defgroup PartMetadata Functions to get and set metadata for a particular part.
+ * @{
+ *
+ */
+
+ /** @brief Query the count of attributes in a part. */
+ get_attribute_count :: proc(ctxt: const_context_t, part_index: c.int, count: ^i32) -> result_t ---
+
+ /** @brief Query a particular attribute by index. */
+ get_attribute_by_index :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ mode: attr_list_access_mode_t,
+ idx: i32,
+ outattr: ^^attribute_t) -> result_t ---
+
+ /** @brief Query a particular attribute by name. */
+ get_attribute_by_name :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ outattr: ^^attribute_t) -> result_t ---
+
+ /** @brief Query the list of attributes in a part.
+ *
+ * This retrieves a list of attributes currently defined in a part.
+ *
+ * If outlist is `NULL`, this function still succeeds, filling only the
+ * count. In this manner, the user can allocate memory for the list of
+ * attributes, then re-call this function to get the full list.
+ */
+ get_attribute_list :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ mode: attr_list_access_mode_t,
+ count: ^i32,
+ outlist: ^[^]attribute_t) -> result_t ---
+
+ /** Declare an attribute within the specified part.
+ *
+ * Only valid when a file is opened for write.
+ */
+ attr_declare_by_type :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ type: cstring,
+ newattr: ^^attribute_t) -> result_t ---
+
+ /** @brief Declare an attribute within the specified part.
+ *
+ * Only valid when a file is opened for write.
+ */
+ attr_declare :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ type: attribute_type_t,
+ newattr: ^^attribute_t) -> result_t ---
+
+ /**
+ * @defgroup RequiredAttributeHelpers Required Attribute Utililities
+ *
+ * @brief These are a group of functions for attributes that are
+ * required to be in every part of every file.
+ *
+ * @{
+ */
+
+ /** @brief Initialize all required attributes for all files.
+ *
+ * NB: other file types do require other attributes, such as the tile
+ * description for a tiled file.
+ */
+ initialize_required_attr :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ displayWindow: ^attr_box2i_t,
+ dataWindow: ^attr_box2i_t,
+ pixelaspectratio: f32,
+ screenWindowCenter: attr_v2f_t,
+ screenWindowWidth: f32,
+ lineorder: lineorder_t,
+ ctype: compression_t) -> result_t ---
+
+ /** @brief Initialize all required attributes to default values:
+ *
+ * - `displayWindow` is set to (0, 0 -> @p width - 1, @p height - 1)
+ * - `dataWindow` is set to (0, 0 -> @p width - 1, @p height - 1)
+ * - `pixelAspectRatio` is set to 1.0
+ * - `screenWindowCenter` is set to 0.f, 0.f
+ * - `screenWindowWidth` is set to 1.f
+ * - `lineorder` is set to `INCREASING_Y`
+ * - `compression` is set to @p ctype
+ */
+ initialize_required_attr_simple :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ width: i32,
+ height: i32,
+ ctype: compression_t) -> result_t ---
+
+ /** @brief Copy the attributes from one part to another.
+ *
+ * This allows one to quickly unassigned attributes from one source to another.
+ *
+ * If an attribute in the source part has not been yet set in the
+ * destination part, the item will be copied over.
+ *
+ * For example, when you add a part, the storage type and name
+ * attributes are required arguments to the definition of a new part,
+ * but channels has not yet been assigned. So by calling this with an
+ * input file as the source, you can copy the channel definitions (and
+ * any other unassigned attributes from the source).
+ */
+ copy_unset_attributes :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ source: const_context_t,
+ src_part_index: c.int) -> result_t ---
+
+ /** @brief Retrieve the list of channels. */
+ get_channels :: proc(ctxt: const_context_t, part_index: c.int, chlist: ^^attr_chlist_t) -> result_t ---
+
+ /** @brief Define a new channel to the output file part.
+ *
+ * The @p percept parameter is used for lossy compression techniques
+ * to indicate that the value represented is closer to linear (1) or
+ * closer to logarithmic (0). For r, g, b, luminance, this is normally
+ * 0.
+ */
+ add_channel :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ ptype: pixel_type_t,
+ percept: perceptual_treatment_t,
+ xsamp: i32,
+ ysamp: i32) -> c.int ---
+
+ /** @brief Copy the channels from another source.
+ *
+ * Useful if you are manually constructing the list or simply copying
+ * from an input file.
+ */
+ set_channels :: proc(ctxt: context_t, part_index: c.int, channels: ^attr_chlist_t) -> result_t ---
+
+ /** @brief Retrieve the compression method used for the specified part. */
+ get_compression :: proc(ctxt: const_context_t, part_index: c.int, compression: ^compression_t) -> result_t ---
+ /** @brief Set the compression method used for the specified part. */
+ set_compression :: proc(ctxt: context_t, part_index: c.int, ctype: compression_t) -> result_t ---
+
+ /** @brief Retrieve the data window for the specified part. */
+ get_data_window :: proc(ctxt: const_context_t, part_index: c.int, out: ^attr_box2i_t) -> result_t ---
+ /** @brief Set the data window for the specified part. */
+ set_data_window :: proc(ctxt: context_t, part_index: c.int, dw: ^attr_box2i_t) -> c.int ---
+
+ /** @brief Retrieve the display window for the specified part. */
+ get_display_window :: proc(ctxt: const_context_t, part_index: c.int, out: ^attr_box2i_t) -> result_t ---
+ /** @brief Set the display window for the specified part. */
+ set_display_window :: proc(ctxt: context_t, part_index: c.int, dw: ^attr_box2i_t) -> c.int ---
+
+ /** @brief Retrieve the line order for storing data in the specified part (use 0 for single part images). */
+ get_lineorder :: proc(ctxt: const_context_t, part_index: c.int, out: ^lineorder_t) -> result_t ---
+ /** @brief Set the line order for storing data in the specified part (use 0 for single part images). */
+ set_lineorder :: proc(ctxt: context_t, part_index: c.int, lo: lineorder_t) -> result_t ---
+
+ /** @brief Retrieve the pixel aspect ratio for the specified part (use 0 for single part images). */
+ get_pixel_aspect_ratio :: proc(ctxt: const_context_t, part_index: c.int, par: ^f32) -> result_t ---
+ /** @brief Set the pixel aspect ratio for the specified part (use 0 for single part images). */
+ set_pixel_aspect_ratio :: proc(ctxt: context_t, part_index: c.int, par: f32) -> result_t ---
+
+ /** @brief Retrieve the screen oriented window center for the specified part (use 0 for single part images). */
+ get_screen_window_center :: proc(ctxt: const_context_t, part_index: c.int, wc: ^attr_v2f_t) -> result_t ---
+ /** @brief Set the screen oriented window center for the specified part (use 0 for single part images). */
+ set_screen_window_center :: proc(ctxt: context_t, part_index: c.int, wc: ^attr_v2f_t) -> c.int ---
+
+ /** @brief Retrieve the screen oriented window width for the specified part (use 0 for single part images). */
+ get_screen_window_width :: proc(ctxt: const_context_t, part_index: c.int, out: ^f32) -> result_t ---
+ /** @brief Set the screen oriented window width for the specified part (use 0 for single part images). */
+ set_screen_window_width :: proc(ctxt: context_t, part_index: c.int, ssw: f32) -> result_t ---
+
+ /** @brief Retrieve the tiling info for a tiled part (use 0 for single part images). */
+ get_tile_descriptor :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ xsize: ^u32,
+ ysize: ^u32,
+ level: ^tile_level_mode_t,
+ round: ^tile_round_mode_t) -> result_t ---
+
+ /** @brief Set the tiling info for a tiled part (use 0 for single part images). */
+ set_tile_descriptor :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ x_size: u32,
+ y_size: u32,
+ level_mode: tile_level_mode_t,
+ round_mode: tile_round_mode_t) -> result_t ---
+
+ set_name :: proc(ctxt: context_t, part_index: c.int, val: cstring) -> result_t ---
+
+ get_version :: proc(ctxt: const_context_t, part_index: c.int, out: ^i32) -> result_t ---
+
+ set_version :: proc(ctxt: context_t, part_index: c.int, val: i32) -> result_t ---
+
+ set_chunk_count :: proc(ctxt: context_t, part_index: c.int, val: i32) -> result_t ---
+
+ /** @} */ /* required attr group. */
+
+ /**
+ * @defgroup BuiltinAttributeHelpers Attribute utilities for builtin types
+ *
+ * @brief These are a group of functions for attributes that use the builtin types.
+ *
+ * @{
+ */
+
+ attr_get_box2i :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ outval: ^attr_box2i_t) -> result_t ---
+
+ attr_set_box2i :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ val: ^attr_box2i_t) -> result_t ---
+
+ attr_get_box2f :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ outval: ^attr_box2f_t) -> result_t ---
+
+ attr_set_box2f :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ val: ^attr_box2f_t) -> result_t ---
+
+ /** @brief Zero-copy query of channel data.
+ *
+ * Do not free or manipulate the @p chlist data, or use
+ * after the lifetime of the context.
+ */
+ attr_get_channels :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ chlist: ^^attr_chlist_t) -> result_t ---
+
+ /** @brief This allows one to quickly copy the channels from one file
+ * to another.
+ */
+ attr_set_channels :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ channels: ^attr_chlist_t) -> result_t ---
+
+ attr_get_chromaticities :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ chroma: ^attr_chromaticities_t) -> result_t ---
+
+ attr_set_chromaticities :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ chroma: ^attr_chromaticities_t) -> result_t ---
+
+ attr_get_compression :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^compression_t) -> result_t ---
+
+ attr_set_compression :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ comp: compression_t) -> result_t ---
+
+ attr_get_double :: proc(ctxt: const_context_t, part_index: c.int, name: cstring, out: f64) -> result_t ---
+
+ attr_set_double :: proc(ctxt: context_t, part_index: c.int, name: cstring, val: f64) -> result_t ---
+
+ attr_get_envmap :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^envmap_t) -> result_t ---
+
+ attr_set_envmap :: proc(ctxt: context_t, part_index: c.int, name: cstring, emap: envmap_t) -> result_t ---
+
+ attr_get_float :: proc(ctxt: const_context_t, part_index: c.int, name: cstring, out: ^f32) -> result_t ---
+
+ attr_set_float :: proc(ctxt: context_t, part_index: c.int, name: cstring, val: f32) -> result_t ---
+
+ /** @brief Zero-copy query of float data.
+ *
+ * Do not free or manipulate the @p out data, or use after the
+ * lifetime of the context.
+ */
+ attr_get_float_vector :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ sz: ^i32,
+ out: ^[^]f32) -> result_t ---
+
+ attr_set_float_vector :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ sz: i32,
+ vals: [^]f32) -> result_t ---
+
+ attr_get_int :: proc(ctxt: const_context_t, part_index: c.int, name: cstring, out: ^i32) -> result_t ---
+
+ attr_set_int :: proc(ctxt: context_t, part_index: c.int, name: cstring, val: i32) -> result_t ---
+
+ attr_get_keycode :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_keycode_t) -> result_t ---
+
+ attr_set_keycode :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ kc: ^attr_keycode_t) -> result_t ---
+
+ attr_get_lineorder :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^lineorder_t) -> result_t ---
+
+ attr_set_lineorder :: proc(ctxt: context_t, part_index: c.int, name: cstring, lo: lineorder_t) -> result_t ---
+
+ attr_get_m33f :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_m33f_t) -> result_t ---
+
+ attr_set_m33f :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ m: ^attr_m33f_t) -> result_t ---
+
+ attr_get_m33d :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_m33d_t) -> result_t ---
+
+ attr_set_m33d :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ m: ^attr_m33d_t) -> result_t ---
+
+ attr_get_m44f :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_m44f_t) -> result_t ---
+
+ attr_set_m44f :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ m: ^attr_m44f_t) -> result_t ---
+
+ attr_get_m44d :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_m44d_t) -> result_t ---
+
+ attr_set_m44d :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ m: ^attr_m44d_t) -> result_t ---
+
+ attr_get_preview :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_preview_t) -> result_t ---
+
+ attr_set_preview :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ p: ^attr_preview_t) -> result_t ---
+
+ attr_get_rational :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_rational_t) -> result_t ---
+
+ attr_set_rational :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ r: ^attr_rational_t) -> result_t ---
+
+ /** @brief Zero-copy query of string value.
+ *
+ * Do not modify the string pointed to by @p out, and do not use
+ * after the lifetime of the context.
+ */
+ attr_get_string :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ length: ^i32,
+ out: ^cstring) -> result_t ---
+
+ attr_set_string :: proc(ctxt: context_t, part_index: c.int, name: cstring, s: cstring) -> result_t ---
+
+ /** @brief Zero-copy query of string data.
+ *
+ * Do not free the strings pointed to by the array.
+ *
+ * Must provide @p size.
+ *
+ * \p out must be a ``^cstring`` array large enough to hold
+ * the string pointers for the string vector when provided.
+ */
+ attr_get_string_vector :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ size: ^i32,
+ out: ^cstring) -> result_t ---
+
+ attr_set_string_vector :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ size: i32,
+ sv: ^cstring) -> result_t ---
+
+ attr_get_tiledesc :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_tiledesc_t) -> result_t ---
+
+ attr_set_tiledesc :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ td: ^attr_tiledesc_t) -> result_t ---
+
+ attr_get_timecode :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_timecode_t) -> result_t ---
+
+ attr_set_timecode :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ tc: ^attr_timecode_t) -> result_t ---
+
+ attr_get_v2i :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_v2i_t) -> result_t ---
+
+ attr_set_v2i :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ v: ^attr_v2i_t) -> result_t ---
+
+ attr_get_v2f :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_v2f_t) -> result_t ---
+
+ attr_set_v2f :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ v: ^attr_v2f_t) -> result_t ---
+
+ attr_get_v2d :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_v2d_t) -> result_t ---
+
+ attr_set_v2d :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ v: ^attr_v2d_t) -> result_t ---
+
+ attr_get_v3i :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_v3i_t) -> result_t ---
+
+ attr_set_v3i :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ v: ^attr_v3i_t) -> result_t ---
+
+ attr_get_v3f :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_v3f_t) -> result_t ---
+
+ attr_set_v3f :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ v: ^attr_v3f_t) -> result_t ---
+
+ attr_get_v3d :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ out: ^attr_v3d_t) -> result_t ---
+
+ attr_set_v3d :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ v: ^attr_v3d_t) -> result_t ---
+
+ attr_get_user :: proc(
+ ctxt: const_context_t,
+ part_index: c.int,
+ name: cstring,
+ type: ^cstring,
+ size: ^i32,
+ out: ^rawptr) -> result_t ---
+
+ attr_set_user :: proc(
+ ctxt: context_t,
+ part_index: c.int,
+ name: cstring,
+ type: cstring,
+ size: i32,
+ out: rawptr) -> result_t ---
+
+} \ No newline at end of file