aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndre Weissflog <floooh@gmail.com>2025-11-15 21:07:22 +0100
committerAndre Weissflog <floooh@gmail.com>2025-11-15 21:07:22 +0100
commit4f9f6e9eab5fea003fa8cd0739700975c55a87bb (patch)
treeed1236431abe5bcef4064654faad96803dcaf469
parentdef26368f7cabac1f4c459b1c89d92ba2872ae89 (diff)
sokol_gfx/app.h vk: fix unused warnings in release builds
-rw-r--r--sokol_app.h7
-rw-r--r--sokol_gfx.h27
2 files changed, 17 insertions, 17 deletions
diff --git a/sokol_app.h b/sokol_app.h
index 39ee4592..95ba7453 100644
--- a/sokol_app.h
+++ b/sokol_app.h
@@ -4352,7 +4352,7 @@ _SOKOL_PRIVATE uint32_t _sapp_vk_required_device_extensions(const char** out_nam
out_names[count++] = VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME;
out_names[count++] = VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME;
out_names[count++] = VK_EXT_DESCRIPTOR_BUFFER_EXTENSION_NAME;
- SOKOL_ASSERT(count <= max_count);
+ SOKOL_ASSERT(count <= max_count); _SOKOL_UNUSED(max_count);
return count;
}
@@ -4360,7 +4360,7 @@ _SOKOL_PRIVATE bool _sapp_vk_check_device_extensions(VkPhysicalDevice pdev, cons
SOKOL_ASSERT(pdev && required_exts && num_required_exts > 0);
uint32_t ext_count = 0;
VkResult res = vkEnumerateDeviceExtensionProperties(pdev, 0, &ext_count, 0);
- SOKOL_ASSERT(res == VK_SUCCESS);
+ SOKOL_ASSERT(res == VK_SUCCESS); _SOKOL_UNUSED(res);
if (ext_count == 0) {
return false;
}
@@ -4582,7 +4582,7 @@ _SOKOL_PRIVATE VkSurfaceFormatKHR _sapp_vk_pick_surface_format(void) {
SOKOL_ASSERT(_sapp.vk.surface);
_SAPP_VK_MAX_COUNT_AND_ARRAY(64, VkSurfaceFormatKHR, fmt_count, formats);
VkResult res = vkGetPhysicalDeviceSurfaceFormatsKHR(_sapp.vk.physical_device, _sapp.vk.surface, &fmt_count, formats);
- SOKOL_ASSERT((res == VK_SUCCESS) || (res == VK_INCOMPLETE));
+ SOKOL_ASSERT((res == VK_SUCCESS) || (res == VK_INCOMPLETE)); _SOKOL_UNUSED(res);
SOKOL_ASSERT(fmt_count > 0);
// FIXME: only accept non-SRGB formats until sokol_app.h gets proper SRGB support
for (uint32_t i = 0; i < fmt_count; i++) {
@@ -4603,6 +4603,7 @@ _SOKOL_PRIVATE void _sapp_vk_create_sync_objects(void) {
_sapp_clear(&create_info, sizeof(create_info));
create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkResult res;
+ _SOKOL_UNUSED(res);
for (uint32_t i = 0; i < _sapp.vk.num_swapchain_images; i++) {
SOKOL_ASSERT(0 == _sapp.vk.sync[i].present_complete_sem);
SOKOL_ASSERT(0 == _sapp.vk.sync[i].render_finished_sem);
diff --git a/sokol_gfx.h b/sokol_gfx.h
index fb02dcc3..bc4d4182 100644
--- a/sokol_gfx.h
+++ b/sokol_gfx.h
@@ -4962,7 +4962,7 @@ typedef struct sg_desc {
int wgpu_bindgroups_cache_size; // number of slots in the WebGPU bindgroup cache (must be 2^N)
int vk_copy_staging_buffer_size; // Vulkan: size of staging buffer for immutable and dynamic resources (default: 4 MB)
int vk_stream_staging_buffer_size; // Vulkan: size of per-frame staging buffer for updating streaming resources (default: 16 MB)
- int vk_descriptor_buffer_size; // Vulkan: size of per-frame descriptor buffer for updating resource bindings (default: 4 MB)
+ int vk_descriptor_buffer_size; // Vulkan: size of per-frame descriptor buffer for updating resource bindings (default: 16 MB)
sg_allocator allocator;
sg_logger logger; // optional log function override
sg_environment environment;
@@ -6010,7 +6010,7 @@ enum {
_SG_DEFAULT_WGPU_BINDGROUP_CACHE_SIZE = 1024,
_SG_DEFAULT_VK_COPY_STAGING_SIZE = (4 * 1024 * 1024),
_SG_DEFAULT_VK_STREAM_STAGING_SIZE = (16 * 1024 * 1024),
- _SG_DEFAULT_VK_DESCRIPTOR_BUFFER_SIZE = (4 * 1024 * 1024),
+ _SG_DEFAULT_VK_DESCRIPTOR_BUFFER_SIZE = (16 * 1024 * 1024),
_SG_MAX_STORAGEBUFFER_BINDINGS_PER_STAGE = SG_MAX_VIEW_BINDSLOTS,
_SG_MAX_STORAGEIMAGE_BINDINGS_PER_STAGE = SG_MAX_VIEW_BINDSLOTS,
_SG_MAX_TEXTURE_BINDINGS_PER_STAGE = SG_MAX_VIEW_BINDSLOTS,
@@ -18524,7 +18524,7 @@ _SOKOL_PRIVATE void _sg_vk_set_object_label(VkObjectType obj_type, uint64_t obj_
SOKOL_ASSERT(obj_handle != 0);
if (label) {
// FIXME: use vkSetDebugUtilsObjectNamesEXT
- _SOKOL_UNUSED(obj_type);
+ _SOKOL_UNUSED(obj_type && obj_handle && label);
}
}
@@ -19349,13 +19349,14 @@ _SOKOL_PRIVATE VkCommandBuffer _sg_vk_staging_copy_begin(void) {
cmdbuf_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cmdbuf_begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
VkResult res = vkBeginCommandBuffer(cmd_buf, &cmdbuf_begin_info);
- SOKOL_ASSERT(res == VK_SUCCESS);
+ SOKOL_ASSERT(res == VK_SUCCESS); _SOKOL_UNUSED(res);
return cmd_buf;
}
_SOKOL_PRIVATE void _sg_vk_staging_copy_end(VkCommandBuffer cmd_buf, VkQueue queue) {
SOKOL_ASSERT(cmd_buf && queue);
VkResult res;
+ _SOKOL_UNUSED(res);
vkEndCommandBuffer(cmd_buf);
VkSubmitInfo submit_info;
_sg_clear(&submit_info, sizeof(submit_info));
@@ -19377,7 +19378,7 @@ _SOKOL_PRIVATE void _sg_vk_staging_map_memcpy_unmap(VkDeviceMemory mem, const vo
SOKOL_ASSERT(num_bytes > 0);
void* dst_ptr = 0;
VkResult res = vkMapMemory(_sg.vk.dev, mem, 0, VK_WHOLE_SIZE, 0, &dst_ptr);
- SOKOL_ASSERT((res == VK_SUCCESS) && dst_ptr);
+ SOKOL_ASSERT((res == VK_SUCCESS) && dst_ptr); _SOKOL_UNUSED(res);
memcpy(dst_ptr, ptr, num_bytes);
vkUnmapMemory(_sg.vk.dev, mem);
}
@@ -19394,7 +19395,7 @@ _SOKOL_PRIVATE void _sg_vk_staging_copy_buffer_data(_sg_buffer_t* buf, const sg_
// an inital wait is only needed for updating existing resources but not when populating a new resource
if (initial_wait) {
VkResult res = vkQueueWaitIdle(_sg.vk.queue);
- SOKOL_ASSERT(res == VK_SUCCESS);
+ SOKOL_ASSERT(res == VK_SUCCESS); _SOKOL_UNUSED(res);
}
VkDeviceMemory dst_mem = _sg.vk.stage.copy.mem;
@@ -19460,7 +19461,7 @@ _SOKOL_PRIVATE void _sg_vk_staging_copy_image_data(_sg_image_t* img, const sg_im
// an inital wait is only needed for updating existing resources but not when populating a new resource
if (initial_wait) {
VkResult res = vkQueueWaitIdle(_sg.vk.queue);
- SOKOL_ASSERT(res == VK_SUCCESS);
+ SOKOL_ASSERT(res == VK_SUCCESS); _SOKOL_UNUSED(res);
}
VkDeviceMemory mem = _sg.vk.stage.copy.mem;
@@ -20012,6 +20013,7 @@ _SOKOL_PRIVATE VkCullModeFlags _sg_vk_cullmode(sg_cull_mode cm) {
case SG_CULLMODE_BACK: return VK_CULL_MODE_BACK_BIT;
default:
SOKOL_UNREACHABLE;
+ return VK_CULL_MODE_NONE;
}
}
@@ -20351,7 +20353,7 @@ _SOKOL_PRIVATE void _sg_vk_create_fences(void) {
for (size_t i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) {
SOKOL_ASSERT(0 == _sg.vk.frame.slot[i].fence);
VkResult res = vkCreateFence(_sg.vk.dev, &create_info, 0, &_sg.vk.frame.slot[i].fence);
- SOKOL_ASSERT((res == VK_SUCCESS) && _sg.vk.frame.slot[i].fence);
+ SOKOL_ASSERT((res == VK_SUCCESS) && _sg.vk.frame.slot[i].fence); _SOKOL_UNUSED(res);
}
}
@@ -20374,7 +20376,7 @@ _SOKOL_PRIVATE void _sg_vk_create_frame_command_pool_and_buffers(void) {
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
pool_create_info.queueFamilyIndex = _sg.vk.queue_family_index;
VkResult res = vkCreateCommandPool(_sg.vk.dev, &pool_create_info, 0, &_sg.vk.frame.cmd_pool);
- SOKOL_ASSERT((res == VK_SUCCESS) && _sg.vk.frame.cmd_pool);
+ SOKOL_ASSERT((res == VK_SUCCESS) && _sg.vk.frame.cmd_pool); _SOKOL_UNUSED(res);
for (size_t i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) {
VkCommandBufferAllocateInfo cmdbuf_alloc_info;
@@ -20425,7 +20427,7 @@ _SOKOL_PRIVATE void _sg_vk_acquire_frame_command_buffers(void) {
return;
}
VkResult res = vkResetFences(_sg.vk.dev, 1, &_sg.vk.frame.slot[_sg.vk.frame_slot].fence);
- SOKOL_ASSERT(res == VK_SUCCESS);
+ SOKOL_ASSERT(res == VK_SUCCESS); _SOKOL_UNUSED(res);
_sg_vk_delete_queue_collect();
@@ -20456,6 +20458,7 @@ _SOKOL_PRIVATE void _sg_vk_submit_frame_command_buffers(void) {
SOKOL_ASSERT(_sg.vk.frame.cmd_buf);
SOKOL_ASSERT(_sg.vk.frame.stream_cmd_buf);
VkResult res;
+ _SOKOL_UNUSED(res);
_sg_vk_staging_stream_before_submit();
_sg_vk_bind_before_submit();
@@ -21506,10 +21509,6 @@ _SOKOL_PRIVATE void _sg_vk_apply_pipeline(_sg_pipeline_t* pip) {
vkCmdBindPipeline(_sg.vk.frame.cmd_buf, bindpoint, pip->vk.pip);
}
-_SOKOL_PRIVATE void _sg_vk_apply_vertex_buffers(_sg_bindings_ptrs_t* bnd) {
- SOKOL_ASSERT(bnd);
-}
-
_SOKOL_PRIVATE bool _sg_vk_apply_bindings(_sg_bindings_ptrs_t* bnd) {
SOKOL_ASSERT(bnd && bnd->pip);
SOKOL_ASSERT(_sg.vk.dev);