aboutsummaryrefslogtreecommitdiff
path: root/vendor/OpenEXRCore/exr_encode.odin
blob: 9d9e80c22a7c7a205ad167c7cfdafe9f819a111c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
package vendor_openexr

when ODIN_OS == .Windows {
	foreign import lib "OpenEXRCore-3_1.lib"
} else {
	foreign import lib "system:OpenEXRCore-3_1"
}

import "core:c"

/** Can be bit-wise or'ed into the decode_flags in the decode pipeline.
 *
 * Indicates that the sample count table should be encoded from an
 * individual sample count list (n, m, o, ...), meaning it will have
 * to compute the cumulative counts on the fly.
 *
 * Without this (i.e. a value of 0 in that bit), indicates the sample
 * count table is already a cumulative list (n, n+m, n+m+o, ...),
 * which is the on-disk representation.
 */
ENCODE_DATA_SAMPLE_COUNTS_ARE_INDIVIDUAL :: u16(1 << 0)

/** Can be bit-wise or'ed into the decode_flags in the decode pipeline.
 *
 * Indicates that the data in the channel pointers to encode from is not
 * a direct pointer, but instead is a pointer-to-pointers. In this
 * mode, the user_pixel_stride and user_line_stride are used to
 * advance the pointer offsets for each pixel in the output, but the
 * user_bytes_per_element and user_data_type are used to put
 * (successive) entries into each destination.
 *
 * So each channel pointer must then point to an array of
 * chunk.width * chunk.height pointers. If an entry is
 * `NULL`, 0 samples will be placed in the output.
 *
 * If this is NOT set (0), the default packing routine assumes the
 * data will be planar and contiguous (each channel is a separate
 * memory block), ignoring user_line_stride and user_pixel_stride and
 * advancing only by the sample counts and bytes per element.
 */
ENCODE_NON_IMAGE_DATA_AS_POINTERS :: u16(1 << 1)

/** Struct meant to be used on a per-thread basis for writing exr data.
 *
 * As should be obvious, this structure is NOT thread safe, but rather
 * meant to be used by separate threads, which can all be accessing
 * the same context concurrently.
 */
 encode_pipeline_t :: struct {
	/** The output channel information for this chunk.
	 *
	 * User is expected to fill the channel pointers for the input
	 * channels. For writing, all channels must be initialized prior
	 * to using exr_encoding_choose_default_routines(). If a custom pack routine
	 * is written, that is up to the implementor.
	 *
	 * Describes the channel information. This information is
	 * allocated dynamically during exr_encoding_initialize().
	 */
	channels: [^]coding_channel_info_t,
	channel_count: i16,

	/** Encode flags to control the behavior. */
	encode_flags: u16,

	/** Copy of the parameters given to the initialize/update for convenience. */
	part_index: c.int,
	ctx:        const_context_t,
	chunk:      chunk_info_t,

	/** Can be used by the user to pass custom context data through
	 * the encode pipeline.
	 */
	encoding_user_data: rawptr,

	/** The packed buffer where individual channels have been put into here.
	 *
	 * If `NULL`, will be allocated during the run of the pipeline.
	 *
	 * If the caller wishes to take control of the buffer, simple
	 * adopt the pointer and set it to `NULL` here. Be cognizant of any
	 * custom allocators.
	 */
	packed_buffer: rawptr,

	/** Differing from the allocation size, the number of actual bytes */
	packed_bytes: u64,

	/** Used when re-using the same encode pipeline struct to know if
	 * chunk is changed size whether current buffer is large enough
	 *
	 * If `NULL`, will be allocated during the run of the pipeline.
	 *
	 * If the caller wishes to take control of the buffer, simple
	 * adopt the pointer and set it to `NULL` here. Be cognizant of any
	 * custom allocators.
	 */
	packed_alloc_size: c.size_t,

	/** For deep data. NB: the members NOT const because we need to
	 * temporarily swap it to xdr order and restore it (to avoid a
	 * duplicate buffer allocation).
	 *
	 * Depending on the flag set above, will be treated either as a
	 * cumulative list (n, n+m, n+m+o, ...), or an individual table
	 * (n, m, o, ...). */
	sample_count_table: [^]i32,

	/** Allocated table size (to avoid re-allocations). Number of
	 * samples must always be width * height for the chunk.
	 */
	sample_count_alloc_size: c.size_t,

	/** Packed sample table (compressed, raw on disk representation)
	 * for deep or other non-image data.
	 */
	packed_sample_count_table: rawptr,

	/** Number of bytes to write (actual size) for the
	 * packed_sample_count_table.
	 */
	packed_sample_count_bytes: c.size_t,

	/** Allocated size (to avoid re-allocations) for the
	 * packed_sample_count_table.
	 */
	packed_sample_count_alloc_size: c.size_t,

	/** The compressed buffer, only needed for compressed files.
	 *
	 * If `NULL`, will be allocated during the run of the pipeline when
	 * needed.
	 *
	 * If the caller wishes to take control of the buffer, simple
	 * adopt the pointer and set it to `NULL` here. Be cognizant of any
	 * custom allocators.
	 */
	compressed_buffer: rawptr,

	/** Must be filled in as the pipeline runs to inform the writing
	 * software about the compressed size of the chunk (if it is an
	 * uncompressed file or the compression would make the file
	 * larger, it is expected to be the packed_buffer)
	 *
	 * If the caller wishes to take control of the buffer, simple
	 * adopt the pointer and set it to zero here. Be cognizant of any
	 * custom allocators.
	 */
	compressed_bytes: c.size_t,

	/** Used when re-using the same encode pipeline struct to know if
	 * chunk is changed size whether current buffer is large enough.
	 *
	 * If `NULL`, will be allocated during the run of the pipeline when
	 * needed.
	 *
	 * If the caller wishes to take control of the buffer, simple
	 * adopt the pointer and set it to zero here. Be cognizant of any
	 * custom allocators.
	 */
	compressed_alloc_size: c.size_t,

	/** A scratch buffer for intermediate results.
	 *
	 * If `NULL`, will be allocated during the run of the pipeline when
	 * needed.
	 *
	 * If the caller wishes to take control of the buffer, simple
	 * adopt the pointer and set it to `NULL` here. Be cognizant of any
	 * custom allocators.
	 */
	scratch_buffer_1: rawptr,

	/** Used when re-using the same encode pipeline struct to know if
	 * chunk is changed size whether current buffer is large enough.
	 *
	 * If `NULL`, will be allocated during the run of the pipeline when
	 * needed.
	 *
	 * If the caller wishes to take control of the buffer, simple
	 * adopt the pointer and set it to `NULL` here. Be cognizant of any
	 * custom allocators.
	 */
	scratch_alloc_size_1: c.size_t,

	/** Some compression routines may need a second scratch buffer.
	 *
	 * If `NULL`, will be allocated during the run of the pipeline when
	 * needed.
	 *
	 * If the caller wishes to take control of the buffer, simple
	 * adopt the pointer and set it to `NULL` here. Be cognizant of any
	 * custom allocators.
	 */
	scratch_buffer_2: rawptr,

	/** Used when re-using the same encode pipeline struct to know if
	 * chunk is changed size whether current buffer is large enough.
	 */
	scratch_alloc_size_2: c.size_t,

	/** Enable a custom allocator for the different buffers (if
	 * encoding on a GPU). If `NULL`, will use the allocator from the
	 * context.
	 */
	alloc_fn: proc "c" (transcoding_pipeline_buffer_id_t, c.size_t) -> rawptr,

	/** Enable a custom allocator for the different buffers (if
	 * encoding on a GPU). If `NULL`, will use the allocator from the
	 * context.
	 */
	free_fn: proc "c" (transcoding_pipeline_buffer_id_t, rawptr),

	/** Function chosen based on the output layout of the channels of the part to
	 * decompress data.
	 *
	 * If the user has a custom method for the
	 * compression on this part, this can be changed after
	 * initialization.
	 */
	convert_and_pack_fn: proc "c" (pipeline: ^encode_pipeline_t) -> result_t,

	/** Function chosen based on the compression type of the part to
	 * compress data.
	 *
	 * If the user has a custom compression method for the compression
	 * type on this part, this can be changed after initialization.
	 */
	compress_fn: proc "c" (pipeline: ^encode_pipeline_t) -> result_t,

	/** This routine is used when waiting for other threads to finish
	 * writing previous chunks such that this thread can write this
	 * chunk. This is used for parts which have a specified chunk
	 * ordering (increasing/decreasing y) and the chunks can not be
	 * written randomly (as could be true for uncompressed).
	 *
	 * This enables the calling application to contribute thread time
	 * to other computation as needed, or just use something like
	 * pthread_yield().
	 *
	 * By default, this routine will be assigned to a function which
	 * returns an error, failing the encode immediately. In this way,
	 * it assumes that there is only one thread being used for
	 * writing.
	 *
	 * It is up to the user to provide an appropriate routine if
	 * performing multi-threaded writing.
	 */
	yield_until_ready_fn: proc "c" (pipeline: ^encode_pipeline_t) -> result_t,

	/** Function chosen to write chunk data to the context.
	 *
	 * This is allowed to be overridden, but probably is not necessary
	 * in most scenarios.
	 */
	write_fn: proc "c" (pipeline: ^encode_pipeline_t) -> result_t,

	/** Small stash of channel info values. This is faster than calling
	 * malloc when the channel count in the part is small (RGBAZ),
	 * which is super common, however if there are a large number of
	 * channels, it will allocate space for that, so do not rely on
	 * this being used.
	 */
	_quick_chan_store: [5]coding_channel_info_t,
}

ENCODE_PIPELINE_INITIALIZER :: encode_pipeline_t{}


@(link_prefix="exr_", default_calling_convention="c")
foreign lib {
	/** Initialize the encoding pipeline structure with the channel info
	 * for the specified part based on the chunk to be written.
	 *
	 * NB: The encode_pipe->pack_and_convert_fn field will be `NULL` after this. If that
	 * stage is desired, initialize the channel output information and
	 * call exr_encoding_choose_default_routines().
	 */
	encoding_initialize :: proc(
		ctxt:        const_context_t,
		part_index:  c.int,
		cinfo:       ^chunk_info_t,
		encode_pipe: ^encode_pipeline_t) -> result_t ---

	/** Given an initialized encode pipeline, find an appropriate
	 * function to shuffle and convert data into the defined channel
	 * outputs.
	 *
	 * Calling this is not required if a custom routine will be used, or
	 * if just the raw decompressed data is desired.
	 */
	encoding_choose_default_routines :: proc(
		ctxt:        const_context_t,
		part_index:  c.int,
		encode_pipe: ^encode_pipeline_t) -> result_t ---

	/** Given a encode pipeline previously initialized, update it for the
	 * new chunk to be written.
	 *
	 * In this manner, memory buffers can be re-used to avoid continual
	 * malloc/free calls. Further, it allows the previous choices for
	 * the various functions to be quickly re-used.
	 */
	encoding_update :: proc(
		ctxt:        const_context_t,
		part_index:  c.int,
		cinfo:       ^chunk_info_t,
		encode_pipe: ^encode_pipeline_t) -> result_t ---

	/** Execute the encoding pipeline. */
	encoding_run :: proc(
		ctxt:        const_context_t,
		part_index:  c.int,
		encode_pipe: ^encode_pipeline_t) -> result_t ---

	/** Free any intermediate memory in the encoding pipeline.
	 *
	 * This does NOT free any pointers referred to in the channel info
	 * areas, but rather only the intermediate buffers and memory needed
	 * for the structure itself.
	 */
	encoding_destroy :: proc(ctxt: const_context_t, encode_pipe: ^encode_pipeline_t) -> result_t ---
}