diff --git a/source/blender/gpu/vulkan/vk_batch.cc b/source/blender/gpu/vulkan/vk_batch.cc index d5f3c23cf9b..9f94042c666 100644 --- a/source/blender/gpu/vulkan/vk_batch.cc +++ b/source/blender/gpu/vulkan/vk_batch.cc @@ -11,12 +11,13 @@ #include "vk_context.hh" #include "vk_index_buffer.hh" #include "vk_state_manager.hh" +#include "vk_storage_buffer.hh" #include "vk_vertex_attribute_object.hh" #include "vk_vertex_buffer.hh" namespace blender::gpu { -void VKBatch::draw(int vertex_first, int vertex_count, int instance_first, int instance_count) +void VKBatch::draw_setup() { /* Currently the pipeline is rebuild on each draw command. Clearing the dirty flag for * consistency with the internals of GPU module. */ @@ -38,26 +39,54 @@ void VKBatch::draw(int vertex_first, int vertex_count, int instance_first, int i if (draw_indexed) { index_buffer->upload_data(); index_buffer->bind(context); - context.command_buffer_get().draw(index_buffer->index_len_get(), - instance_count, - index_buffer->index_start_get(), - vertex_first, - instance_first); } - else { - context.command_buffer_get().draw(vertex_first, vertex_count, instance_first, instance_count); - } - - context.command_buffer_get().submit(); } -void VKBatch::draw_indirect(GPUStorageBuf * /*indirect_buf*/, intptr_t /*offset*/) {} - -void VKBatch::multi_draw_indirect(GPUStorageBuf * /*indirect_buf*/, - int /*count*/, - intptr_t /*offset*/, - intptr_t /*stride*/) +void VKBatch::draw(int vertex_first, int vertex_count, int instance_first, int instance_count) { + draw_setup(); + + VKContext &context = *VKContext::get(); + VKCommandBuffer &command_buffer = context.command_buffer_get(); + VKIndexBuffer *index_buffer = index_buffer_get(); + const bool draw_indexed = index_buffer != nullptr; + if (draw_indexed) { + command_buffer.draw_indexed(index_buffer->index_len_get(), + instance_count, + index_buffer->index_start_get(), + vertex_first, + instance_first); + } + else { + command_buffer.draw(vertex_first, vertex_count, instance_first, instance_count); + } + + command_buffer.submit(); +} + +void VKBatch::draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset) +{ + multi_draw_indirect(indirect_buf, 1, offset, 0); +} + +void VKBatch::multi_draw_indirect(GPUStorageBuf *indirect_buf, + int count, + intptr_t offset, + intptr_t stride) +{ + draw_setup(); + + VKStorageBuffer &indirect_buffer = *unwrap(unwrap(indirect_buf)); + VKContext &context = *VKContext::get(); + const bool draw_indexed = index_buffer_get() != nullptr; + VKCommandBuffer &command_buffer = context.command_buffer_get(); + if (draw_indexed) { + command_buffer.draw_indexed_indirect(indirect_buffer, offset, count, stride); + } + else { + command_buffer.draw_indirect(indirect_buffer, offset, count, stride); + } + command_buffer.submit(); } VKVertexBuffer *VKBatch::vertex_buffer_get(int index) diff --git a/source/blender/gpu/vulkan/vk_batch.hh b/source/blender/gpu/vulkan/vk_batch.hh index ee37988b629..e3666752aa8 100644 --- a/source/blender/gpu/vulkan/vk_batch.hh +++ b/source/blender/gpu/vulkan/vk_batch.hh @@ -26,6 +26,9 @@ class VKBatch : public Batch { VKVertexBuffer *vertex_buffer_get(int index); VKVertexBuffer *instance_buffer_get(int index); VKIndexBuffer *index_buffer_get(); + + private: + void draw_setup(); }; } // namespace blender::gpu diff --git a/source/blender/gpu/vulkan/vk_command_buffer.cc b/source/blender/gpu/vulkan/vk_command_buffer.cc index 8e6c229a904..59cc5dfb8b1 100644 --- a/source/blender/gpu/vulkan/vk_command_buffer.cc +++ b/source/blender/gpu/vulkan/vk_command_buffer.cc @@ -259,7 +259,7 @@ void VKCommandBuffer::draw(int v_first, int v_count, int i_first, int i_count) state.draw_counts++; } -void VKCommandBuffer::draw( +void VKCommandBuffer::draw_indexed( int index_count, int instance_count, int first_index, int vertex_offset, int first_instance) { validate_framebuffer_exists(); @@ -269,6 +269,29 @@ void VKCommandBuffer::draw( state.draw_counts++; } +void VKCommandBuffer::draw_indirect(const VKStorageBuffer &buffer, + VkDeviceSize offset, + uint32_t draw_count, + uint32_t stride) +{ + validate_framebuffer_exists(); + ensure_active_framebuffer(); + vkCmdDrawIndirect(vk_command_buffer_, buffer.vk_handle(), offset, draw_count, stride); + state.draw_counts++; +} + +void VKCommandBuffer::draw_indexed_indirect(const VKStorageBuffer &buffer, + + VkDeviceSize offset, + uint32_t draw_count, + uint32_t stride) +{ + validate_framebuffer_exists(); + ensure_active_framebuffer(); + vkCmdDrawIndexedIndirect(vk_command_buffer_, buffer.vk_handle(), offset, draw_count, stride); + state.draw_counts++; +} + void VKCommandBuffer::pipeline_barrier(VkPipelineStageFlags source_stages, VkPipelineStageFlags destination_stages) { diff --git a/source/blender/gpu/vulkan/vk_command_buffer.hh b/source/blender/gpu/vulkan/vk_command_buffer.hh index d03dd8b427f..e7c103cc81e 100644 --- a/source/blender/gpu/vulkan/vk_command_buffer.hh +++ b/source/blender/gpu/vulkan/vk_command_buffer.hh @@ -190,9 +190,18 @@ class VKCommandBuffer : NonCopyable, NonMovable { void fill(VKBuffer &buffer, uint32_t data); void draw(int v_first, int v_count, int i_first, int i_count); - void draw( + void draw_indexed( int index_count, int instance_count, int first_index, int vertex_offset, int first_instance); + void draw_indirect(const VKStorageBuffer &buffer, + VkDeviceSize offset, + uint32_t draw_count, + uint32_t stride); + void draw_indexed_indirect(const VKStorageBuffer &buffer, + VkDeviceSize offset, + uint32_t draw_count, + uint32_t stride); + /** * Stop recording commands, encode + send the recordings to Vulkan, wait for the until the * commands have been executed and start the command buffer to accept recordings again.