Vulkan: Copy Between Textures

This PR adds support for copying between two textures.

Pull Request: https://projects.blender.org/blender/blender/pulls/108413
This commit is contained in:
Jeroen Bakker 2023-05-30 13:50:35 +02:00
parent e0ad6f1fd8
commit 3f89ec2866
8 changed files with 110 additions and 8 deletions

View File

@ -9,4 +9,13 @@
#define VMA_IMPLEMENTATION
/*
* Disabling internal asserts of VMA.
*
* Blender can destroy logical device before all the resources are freed. This is because static
* resources are freed as a last step during quiting. As long as Vulkan isn't feature complete
* we don't want to change this behavior. So for now we just disable the asserts.
*/
#define VMA_ASSERT(test)
#include "vk_mem_alloc.h"

View File

@ -61,6 +61,37 @@ static void test_texture_read()
}
GPU_TEST(texture_read)
static void test_texture_copy()
{
const int SIZE = 128;
GPU_render_begin();
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_HOST_READ;
GPUTexture *src_tx = GPU_texture_create_2d("src", SIZE, SIZE, 1, GPU_RGBA32F, usage, nullptr);
GPUTexture *dst_tx = GPU_texture_create_2d("dst", SIZE, SIZE, 1, GPU_RGBA32F, usage, nullptr);
const float4 color(0.0, 1.0f, 2.0f, 123.0f);
const float4 clear_color(0.0f);
GPU_texture_clear(src_tx, GPU_DATA_FLOAT, color);
GPU_texture_clear(dst_tx, GPU_DATA_FLOAT, clear_color);
GPU_texture_copy(dst_tx, src_tx);
GPU_memory_barrier(GPU_BARRIER_TEXTURE_UPDATE);
float4 *data = (float4 *)GPU_texture_read(dst_tx, GPU_DATA_FLOAT, 0);
for (int index : IndexRange(SIZE * SIZE)) {
EXPECT_EQ(color, data[index]);
}
MEM_freeN(data);
GPU_texture_free(src_tx);
GPU_texture_free(dst_tx);
GPU_render_end();
}
GPU_TEST(texture_copy)
template<typename DataType> static DataType *generate_test_data(size_t data_len)
{
DataType *data = static_cast<DataType *>(MEM_mallocN(data_len * sizeof(DataType), __func__));

View File

@ -46,6 +46,8 @@ bool VKBuffer::create(int64_t size_in_bytes,
VkBufferUsageFlagBits buffer_usage)
{
BLI_assert(!is_allocated());
BLI_assert(vk_buffer_ == VK_NULL_HANDLE);
BLI_assert(mapped_memory_ == nullptr);
size_in_bytes_ = size_in_bytes;
const VKDevice &device = VKBackend::get().device_get();
@ -88,7 +90,7 @@ void VKBuffer::update(const void *data) const
const VKDevice &device = VKBackend::get().device_get();
VmaAllocator allocator = device.mem_allocator_get();
vmaFlushAllocation(allocator, allocation_, 0, VK_WHOLE_SIZE);
vmaFlushAllocation(allocator, allocation_, 0, max_ii(size_in_bytes(), 1));
}
void VKBuffer::clear(VKContext &context, uint32_t clear_value)

View File

@ -171,6 +171,7 @@ void VKCommandBuffer::copy(VKBuffer &dst_buffer,
regions.size(),
regions.data());
}
void VKCommandBuffer::copy(VKTexture &dst_texture,
VKBuffer &src_buffer,
Span<VkBufferImageCopy> regions)
@ -183,6 +184,21 @@ void VKCommandBuffer::copy(VKTexture &dst_texture,
regions.size(),
regions.data());
}
void VKCommandBuffer::copy(VKTexture &dst_texture,
VKTexture &src_texture,
Span<VkImageCopy> regions)
{
ensure_no_active_framebuffer();
vkCmdCopyImage(vk_command_buffer_,
src_texture.vk_image_handle(),
src_texture.current_layout_get(),
dst_texture.vk_image_handle(),
dst_texture.current_layout_get(),
regions.size(),
regions.data());
}
void VKCommandBuffer::blit(VKTexture &dst_texture,
VKTexture &src_buffer,
Span<VkImageBlit> regions)

View File

@ -162,6 +162,7 @@ class VKCommandBuffer : NonCopyable, NonMovable {
/** Copy the contents of a texture MIP level to the dst buffer. */
void copy(VKBuffer &dst_buffer, VKTexture &src_texture, Span<VkBufferImageCopy> regions);
void copy(VKTexture &dst_texture, VKBuffer &src_buffer, Span<VkBufferImageCopy> regions);
void copy(VKTexture &dst_texture, VKTexture &src_texture, Span<VkImageCopy> regions);
void blit(VKTexture &dst_texture, VKTexture &src_texture, Span<VkImageBlit> regions);
void pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages);

View File

@ -654,7 +654,7 @@ template<typename T> void copy_color(T dst[4], const T *src)
VkClearColorValue to_vk_clear_color_value(const eGPUDataFormat format, const void *data)
{
VkClearColorValue result = {0.0f};
VkClearColorValue result = {{0.0f}};
switch (format) {
case GPU_DATA_FLOAT: {
const float *float_data = static_cast<const float *>(data);

View File

@ -97,6 +97,7 @@ template<typename T> VkObjectType to_vk_object_type(T /*vk_obj*/)
return VK_OBJECT_TYPE_UNKNOWN;
}
#define NOT_YET_IMPLEMENTED printf("%s not implemented yet\n", __func__);
#define NOT_YET_IMPLEMENTED \
printf("%s:%d `%s` not implemented yet\n", __FILE__, __LINE__, __func__);
} // namespace blender::gpu

View File

@ -37,9 +37,41 @@ void VKTexture::init(VkImage vk_image, VkImageLayout layout)
current_layout_ = layout;
}
void VKTexture::generate_mipmap() {}
void VKTexture::generate_mipmap()
{
NOT_YET_IMPLEMENTED
}
void VKTexture::copy_to(Texture * /*tex*/) {}
void VKTexture::copy_to(Texture *tex)
{
VKTexture *dst = unwrap(tex);
VKTexture *src = this;
BLI_assert(dst);
BLI_assert(src->w_ == dst->w_ && src->h_ == dst->h_ && src->d_ == dst->d_);
BLI_assert(src->format_ == dst->format_);
UNUSED_VARS_NDEBUG(src);
VKContext &context = *VKContext::get();
ensure_allocated();
layout_ensure(context, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
dst->ensure_allocated();
dst->layout_ensure(context, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
VkImageCopy region = {};
region.srcSubresource.aspectMask = to_vk_image_aspect_flag_bits(format_);
region.srcSubresource.mipLevel = 0;
region.srcSubresource.layerCount = 1;
region.dstSubresource.aspectMask = to_vk_image_aspect_flag_bits(format_);
region.dstSubresource.mipLevel = 0;
region.dstSubresource.layerCount = 1;
region.extent.width = w_;
region.extent.height = max_ii(h_, 1);
region.extent.depth = max_ii(d_, 1);
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.copy(*dst, *this, Span<VkImageCopy>(&region, 1));
command_buffer.submit();
}
void VKTexture::clear(eGPUDataFormat format, const void *data)
{
@ -60,9 +92,15 @@ void VKTexture::clear(eGPUDataFormat format, const void *data)
vk_image_, current_layout_get(), clear_color, Span<VkImageSubresourceRange>(&range, 1));
}
void VKTexture::swizzle_set(const char /*swizzle_mask*/[4]) {}
void VKTexture::swizzle_set(const char /*swizzle_mask*/[4])
{
NOT_YET_IMPLEMENTED;
}
void VKTexture::mip_range_set(int /*min*/, int /*max*/) {}
void VKTexture::mip_range_set(int /*min*/, int /*max*/)
{
NOT_YET_IMPLEMENTED;
}
void VKTexture::read_sub(int mip, eGPUDataFormat format, const int area[4], void *r_data)
{
@ -112,7 +150,7 @@ void VKTexture::update_sub(
int mip, int offset[3], int extent_[3], eGPUDataFormat format, const void *data)
{
if (mip != 0) {
/* TODO: not implemented yet. */
NOT_YET_IMPLEMENTED;
return;
}
if (!is_allocated()) {
@ -165,6 +203,7 @@ void VKTexture::update_sub(int /*offset*/[3],
eGPUDataFormat /*format*/,
GPUPixelBuffer * /*pixbuf*/)
{
NOT_YET_IMPLEMENTED;
}
/* TODO(fclem): Legacy. Should be removed at some point. */
@ -185,6 +224,7 @@ bool VKTexture::init_internal()
bool VKTexture::init_internal(GPUVertBuf * /*vbo*/)
{
NOT_YET_IMPLEMENTED;
return false;
}
@ -193,6 +233,7 @@ bool VKTexture::init_internal(GPUTexture * /*src*/,
int /*layer_offset*/,
bool /*use_stencil*/)
{
NOT_YET_IMPLEMENTED;
return false;
}
@ -382,6 +423,7 @@ void VKTexture::current_layout_set(const VkImageLayout new_layout)
void VKTexture::layout_ensure(VKContext &context, const VkImageLayout requested_layout)
{
BLI_assert(is_allocated());
const VkImageLayout current_layout = current_layout_get();
if (current_layout == requested_layout) {
return;