From 3f89ec2866acf848d433ec7c5b9f094e14602bb1 Mon Sep 17 00:00:00 2001 From: Jeroen Bakker Date: Tue, 30 May 2023 13:50:35 +0200 Subject: [PATCH] Vulkan: Copy Between Textures This PR adds support for copying between two textures. Pull Request: https://projects.blender.org/blender/blender/pulls/108413 --- .../vk_mem_alloc_impl.cc | 9 ++++ source/blender/gpu/tests/texture_test.cc | 31 +++++++++++ source/blender/gpu/vulkan/vk_buffer.cc | 4 +- .../blender/gpu/vulkan/vk_command_buffer.cc | 16 ++++++ .../blender/gpu/vulkan/vk_command_buffer.hh | 1 + source/blender/gpu/vulkan/vk_common.cc | 2 +- source/blender/gpu/vulkan/vk_common.hh | 3 +- source/blender/gpu/vulkan/vk_texture.cc | 52 +++++++++++++++++-- 8 files changed, 110 insertions(+), 8 deletions(-) diff --git a/extern/vulkan_memory_allocator/vk_mem_alloc_impl.cc b/extern/vulkan_memory_allocator/vk_mem_alloc_impl.cc index 4542d19b132..67bfafbd05d 100644 --- a/extern/vulkan_memory_allocator/vk_mem_alloc_impl.cc +++ b/extern/vulkan_memory_allocator/vk_mem_alloc_impl.cc @@ -9,4 +9,13 @@ #define VMA_IMPLEMENTATION +/* + * Disabling internal asserts of VMA. + * + * Blender can destroy logical device before all the resources are freed. This is because static + * resources are freed as a last step during quiting. As long as Vulkan isn't feature complete + * we don't want to change this behavior. So for now we just disable the asserts. + */ +#define VMA_ASSERT(test) + #include "vk_mem_alloc.h" diff --git a/source/blender/gpu/tests/texture_test.cc b/source/blender/gpu/tests/texture_test.cc index 785f37fbcac..4450005d7c4 100644 --- a/source/blender/gpu/tests/texture_test.cc +++ b/source/blender/gpu/tests/texture_test.cc @@ -61,6 +61,37 @@ static void test_texture_read() } GPU_TEST(texture_read) +static void test_texture_copy() +{ + const int SIZE = 128; + GPU_render_begin(); + + eGPUTextureUsage usage = GPU_TEXTURE_USAGE_HOST_READ; + GPUTexture *src_tx = GPU_texture_create_2d("src", SIZE, SIZE, 1, GPU_RGBA32F, usage, nullptr); + GPUTexture *dst_tx = GPU_texture_create_2d("dst", SIZE, SIZE, 1, GPU_RGBA32F, usage, nullptr); + + const float4 color(0.0, 1.0f, 2.0f, 123.0f); + const float4 clear_color(0.0f); + GPU_texture_clear(src_tx, GPU_DATA_FLOAT, color); + GPU_texture_clear(dst_tx, GPU_DATA_FLOAT, clear_color); + + GPU_texture_copy(dst_tx, src_tx); + + GPU_memory_barrier(GPU_BARRIER_TEXTURE_UPDATE); + + float4 *data = (float4 *)GPU_texture_read(dst_tx, GPU_DATA_FLOAT, 0); + for (int index : IndexRange(SIZE * SIZE)) { + EXPECT_EQ(color, data[index]); + } + MEM_freeN(data); + + GPU_texture_free(src_tx); + GPU_texture_free(dst_tx); + + GPU_render_end(); +} +GPU_TEST(texture_copy) + template static DataType *generate_test_data(size_t data_len) { DataType *data = static_cast(MEM_mallocN(data_len * sizeof(DataType), __func__)); diff --git a/source/blender/gpu/vulkan/vk_buffer.cc b/source/blender/gpu/vulkan/vk_buffer.cc index 2bc2fb017b8..d8b315a25e1 100644 --- a/source/blender/gpu/vulkan/vk_buffer.cc +++ b/source/blender/gpu/vulkan/vk_buffer.cc @@ -46,6 +46,8 @@ bool VKBuffer::create(int64_t size_in_bytes, VkBufferUsageFlagBits buffer_usage) { BLI_assert(!is_allocated()); + BLI_assert(vk_buffer_ == VK_NULL_HANDLE); + BLI_assert(mapped_memory_ == nullptr); size_in_bytes_ = size_in_bytes; const VKDevice &device = VKBackend::get().device_get(); @@ -88,7 +90,7 @@ void VKBuffer::update(const void *data) const const VKDevice &device = VKBackend::get().device_get(); VmaAllocator allocator = device.mem_allocator_get(); - vmaFlushAllocation(allocator, allocation_, 0, VK_WHOLE_SIZE); + vmaFlushAllocation(allocator, allocation_, 0, max_ii(size_in_bytes(), 1)); } void VKBuffer::clear(VKContext &context, uint32_t clear_value) diff --git a/source/blender/gpu/vulkan/vk_command_buffer.cc b/source/blender/gpu/vulkan/vk_command_buffer.cc index 4ae1f5b2d11..8e4c2e34e63 100644 --- a/source/blender/gpu/vulkan/vk_command_buffer.cc +++ b/source/blender/gpu/vulkan/vk_command_buffer.cc @@ -171,6 +171,7 @@ void VKCommandBuffer::copy(VKBuffer &dst_buffer, regions.size(), regions.data()); } + void VKCommandBuffer::copy(VKTexture &dst_texture, VKBuffer &src_buffer, Span regions) @@ -183,6 +184,21 @@ void VKCommandBuffer::copy(VKTexture &dst_texture, regions.size(), regions.data()); } + +void VKCommandBuffer::copy(VKTexture &dst_texture, + VKTexture &src_texture, + Span regions) +{ + ensure_no_active_framebuffer(); + vkCmdCopyImage(vk_command_buffer_, + src_texture.vk_image_handle(), + src_texture.current_layout_get(), + dst_texture.vk_image_handle(), + dst_texture.current_layout_get(), + regions.size(), + regions.data()); +} + void VKCommandBuffer::blit(VKTexture &dst_texture, VKTexture &src_buffer, Span regions) diff --git a/source/blender/gpu/vulkan/vk_command_buffer.hh b/source/blender/gpu/vulkan/vk_command_buffer.hh index 04d82190841..795cb651c12 100644 --- a/source/blender/gpu/vulkan/vk_command_buffer.hh +++ b/source/blender/gpu/vulkan/vk_command_buffer.hh @@ -162,6 +162,7 @@ class VKCommandBuffer : NonCopyable, NonMovable { /** Copy the contents of a texture MIP level to the dst buffer. */ void copy(VKBuffer &dst_buffer, VKTexture &src_texture, Span regions); void copy(VKTexture &dst_texture, VKBuffer &src_buffer, Span regions); + void copy(VKTexture &dst_texture, VKTexture &src_texture, Span regions); void blit(VKTexture &dst_texture, VKTexture &src_texture, Span regions); void pipeline_barrier(VkPipelineStageFlags source_stages, VkPipelineStageFlags destination_stages); diff --git a/source/blender/gpu/vulkan/vk_common.cc b/source/blender/gpu/vulkan/vk_common.cc index 8e059d40402..0db3091a46d 100644 --- a/source/blender/gpu/vulkan/vk_common.cc +++ b/source/blender/gpu/vulkan/vk_common.cc @@ -654,7 +654,7 @@ template void copy_color(T dst[4], const T *src) VkClearColorValue to_vk_clear_color_value(const eGPUDataFormat format, const void *data) { - VkClearColorValue result = {0.0f}; + VkClearColorValue result = {{0.0f}}; switch (format) { case GPU_DATA_FLOAT: { const float *float_data = static_cast(data); diff --git a/source/blender/gpu/vulkan/vk_common.hh b/source/blender/gpu/vulkan/vk_common.hh index e86b00ee2e2..fa01f406ef3 100644 --- a/source/blender/gpu/vulkan/vk_common.hh +++ b/source/blender/gpu/vulkan/vk_common.hh @@ -97,6 +97,7 @@ template VkObjectType to_vk_object_type(T /*vk_obj*/) return VK_OBJECT_TYPE_UNKNOWN; } -#define NOT_YET_IMPLEMENTED printf("%s not implemented yet\n", __func__); +#define NOT_YET_IMPLEMENTED \ + printf("%s:%d `%s` not implemented yet\n", __FILE__, __LINE__, __func__); } // namespace blender::gpu diff --git a/source/blender/gpu/vulkan/vk_texture.cc b/source/blender/gpu/vulkan/vk_texture.cc index b9a90248549..473a5d90956 100644 --- a/source/blender/gpu/vulkan/vk_texture.cc +++ b/source/blender/gpu/vulkan/vk_texture.cc @@ -37,9 +37,41 @@ void VKTexture::init(VkImage vk_image, VkImageLayout layout) current_layout_ = layout; } -void VKTexture::generate_mipmap() {} +void VKTexture::generate_mipmap() +{ + NOT_YET_IMPLEMENTED +} -void VKTexture::copy_to(Texture * /*tex*/) {} +void VKTexture::copy_to(Texture *tex) +{ + VKTexture *dst = unwrap(tex); + VKTexture *src = this; + BLI_assert(dst); + BLI_assert(src->w_ == dst->w_ && src->h_ == dst->h_ && src->d_ == dst->d_); + BLI_assert(src->format_ == dst->format_); + UNUSED_VARS_NDEBUG(src); + + VKContext &context = *VKContext::get(); + ensure_allocated(); + layout_ensure(context, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); + dst->ensure_allocated(); + dst->layout_ensure(context, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); + + VkImageCopy region = {}; + region.srcSubresource.aspectMask = to_vk_image_aspect_flag_bits(format_); + region.srcSubresource.mipLevel = 0; + region.srcSubresource.layerCount = 1; + region.dstSubresource.aspectMask = to_vk_image_aspect_flag_bits(format_); + region.dstSubresource.mipLevel = 0; + region.dstSubresource.layerCount = 1; + region.extent.width = w_; + region.extent.height = max_ii(h_, 1); + region.extent.depth = max_ii(d_, 1); + + VKCommandBuffer &command_buffer = context.command_buffer_get(); + command_buffer.copy(*dst, *this, Span(®ion, 1)); + command_buffer.submit(); +} void VKTexture::clear(eGPUDataFormat format, const void *data) { @@ -60,9 +92,15 @@ void VKTexture::clear(eGPUDataFormat format, const void *data) vk_image_, current_layout_get(), clear_color, Span(&range, 1)); } -void VKTexture::swizzle_set(const char /*swizzle_mask*/[4]) {} +void VKTexture::swizzle_set(const char /*swizzle_mask*/[4]) +{ + NOT_YET_IMPLEMENTED; +} -void VKTexture::mip_range_set(int /*min*/, int /*max*/) {} +void VKTexture::mip_range_set(int /*min*/, int /*max*/) +{ + NOT_YET_IMPLEMENTED; +} void VKTexture::read_sub(int mip, eGPUDataFormat format, const int area[4], void *r_data) { @@ -112,7 +150,7 @@ void VKTexture::update_sub( int mip, int offset[3], int extent_[3], eGPUDataFormat format, const void *data) { if (mip != 0) { - /* TODO: not implemented yet. */ + NOT_YET_IMPLEMENTED; return; } if (!is_allocated()) { @@ -165,6 +203,7 @@ void VKTexture::update_sub(int /*offset*/[3], eGPUDataFormat /*format*/, GPUPixelBuffer * /*pixbuf*/) { + NOT_YET_IMPLEMENTED; } /* TODO(fclem): Legacy. Should be removed at some point. */ @@ -185,6 +224,7 @@ bool VKTexture::init_internal() bool VKTexture::init_internal(GPUVertBuf * /*vbo*/) { + NOT_YET_IMPLEMENTED; return false; } @@ -193,6 +233,7 @@ bool VKTexture::init_internal(GPUTexture * /*src*/, int /*layer_offset*/, bool /*use_stencil*/) { + NOT_YET_IMPLEMENTED; return false; } @@ -382,6 +423,7 @@ void VKTexture::current_layout_set(const VkImageLayout new_layout) void VKTexture::layout_ensure(VKContext &context, const VkImageLayout requested_layout) { + BLI_assert(is_allocated()); const VkImageLayout current_layout = current_layout_get(); if (current_layout == requested_layout) { return;