Fix: High resolution textures in Metal from Integer overflow

Resolve an issue where a high resolution texutre 16k x 8k
did not update in metal due to integer overflow of size parameter.

This patch contains several changes to address size correctness
across multiple use cases within the Metal backend.

Authored by Apple: Michael Parkin-White

Pull Request: https://projects.blender.org/blender/blender/pulls/108238
This commit is contained in:
Jason Fielder 2023-05-25 08:50:14 +02:00 committed by Jeroen Bakker
parent 92b4e74985
commit 03e4325fa9
21 changed files with 121 additions and 117 deletions

View File

@ -1081,7 +1081,7 @@ typedef struct GPUPixelBuffer GPUPixelBuffer;
/**
* Creates a #GPUPixelBuffer object with \a byte_size worth of storage.
*/
GPUPixelBuffer *GPU_pixel_buffer_create(uint byte_size);
GPUPixelBuffer *GPU_pixel_buffer_create(size_t byte_size);
/**
* Free a #GPUPixelBuffer object.
@ -1106,7 +1106,7 @@ void GPU_pixel_buffer_unmap(GPUPixelBuffer *pixel_buf);
/**
* Return size in bytes of the \a pix_buf .
*/
uint GPU_pixel_buffer_size(GPUPixelBuffer *pixel_buf);
size_t GPU_pixel_buffer_size(GPUPixelBuffer *pixel_buf);
/**
* Return the native handle of the \a pix_buf to use for graphic interoperability registration.

View File

@ -956,7 +956,7 @@ void GPU_texture_get_mipmap_size(GPUTexture *tex, int lvl, int *r_size)
* Pixel buffer utility functions.
* \{ */
GPUPixelBuffer *GPU_pixel_buffer_create(uint size)
GPUPixelBuffer *GPU_pixel_buffer_create(size_t size)
{
/* Ensure buffer satisfies the alignment of 256 bytes for copying
* data between buffers and textures. As specified in:
@ -985,7 +985,7 @@ void GPU_pixel_buffer_unmap(GPUPixelBuffer *pix_buf)
reinterpret_cast<PixelBuffer *>(pix_buf)->unmap();
}
uint GPU_pixel_buffer_size(GPUPixelBuffer *pix_buf)
size_t GPU_pixel_buffer_size(GPUPixelBuffer *pix_buf)
{
return reinterpret_cast<PixelBuffer *>(pix_buf)->get_size();
}

View File

@ -333,16 +333,16 @@ static inline const Texture *unwrap(const GPUTexture *vert)
/* GPU pixel Buffer. */
class PixelBuffer {
protected:
uint size_ = 0;
size_t size_ = 0;
public:
PixelBuffer(uint size) : size_(size){};
PixelBuffer(size_t size) : size_(size){};
virtual ~PixelBuffer(){};
virtual void *map() = 0;
virtual void unmap() = 0;
virtual int64_t get_native_handle() = 0;
virtual uint get_size() = 0;
virtual size_t get_size() = 0;
};
/* Syntactic sugar. */

View File

@ -828,7 +828,9 @@ void MTLComputeState::bind_compute_sampler(MTLSamplerBinding &sampler_binding,
}
}
void MTLRenderPassState::bind_vertex_buffer(id<MTLBuffer> buffer, uint buffer_offset, uint index)
void MTLRenderPassState::bind_vertex_buffer(id<MTLBuffer> buffer,
uint64_t buffer_offset,
uint index)
{
BLI_assert(index >= 0 && index < MTL_MAX_BUFFER_BINDINGS);
BLI_assert(buffer_offset >= 0);
@ -858,7 +860,9 @@ void MTLRenderPassState::bind_vertex_buffer(id<MTLBuffer> buffer, uint buffer_of
}
}
void MTLRenderPassState::bind_fragment_buffer(id<MTLBuffer> buffer, uint buffer_offset, uint index)
void MTLRenderPassState::bind_fragment_buffer(id<MTLBuffer> buffer,
uint64_t buffer_offset,
uint index)
{
BLI_assert(index >= 0 && index < MTL_MAX_BUFFER_BINDINGS);
BLI_assert(buffer_offset >= 0);
@ -889,7 +893,7 @@ void MTLRenderPassState::bind_fragment_buffer(id<MTLBuffer> buffer, uint buffer_
}
void MTLComputeState::bind_compute_buffer(id<MTLBuffer> buffer,
uint buffer_offset,
uint64_t buffer_offset,
uint index,
bool writeable)
{
@ -924,7 +928,7 @@ void MTLComputeState::bind_compute_buffer(id<MTLBuffer> buffer,
}
}
void MTLRenderPassState::bind_vertex_bytes(void *bytes, uint length, uint index)
void MTLRenderPassState::bind_vertex_bytes(void *bytes, uint64_t length, uint index)
{
/* Bytes always updated as source data may have changed. */
BLI_assert(index >= 0 && index < MTL_MAX_BUFFER_BINDINGS);
@ -949,7 +953,7 @@ void MTLRenderPassState::bind_vertex_bytes(void *bytes, uint length, uint index)
this->cached_vertex_buffer_bindings[index].offset = -1;
}
void MTLRenderPassState::bind_fragment_bytes(void *bytes, uint length, uint index)
void MTLRenderPassState::bind_fragment_bytes(void *bytes, uint64_t length, uint index)
{
/* Bytes always updated as source data may have changed. */
BLI_assert(index >= 0 && index < MTL_MAX_BUFFER_BINDINGS);
@ -974,7 +978,7 @@ void MTLRenderPassState::bind_fragment_bytes(void *bytes, uint length, uint inde
this->cached_fragment_buffer_bindings[index].offset = -1;
}
void MTLComputeState::bind_compute_bytes(void *bytes, uint length, uint index)
void MTLComputeState::bind_compute_bytes(void *bytes, uint64_t length, uint index)
{
/* Bytes always updated as source data may have changed. */
BLI_assert(index >= 0 && index < MTL_MAX_BUFFER_BINDINGS);

View File

@ -86,7 +86,7 @@ struct BufferBindingCached {
* or an MTLBuffer. */
bool is_bytes;
id<MTLBuffer> metal_buffer;
int offset;
uint64_t offset;
};
/* Caching of CommandEncoder textures bindings. */
@ -144,10 +144,10 @@ class MTLRenderPassState {
uint slot);
/* Buffer binding (RenderCommandEncoder). */
void bind_vertex_buffer(id<MTLBuffer> buffer, uint buffer_offset, uint index);
void bind_fragment_buffer(id<MTLBuffer> buffer, uint buffer_offset, uint index);
void bind_vertex_bytes(void *bytes, uint length, uint index);
void bind_fragment_bytes(void *bytes, uint length, uint index);
void bind_vertex_buffer(id<MTLBuffer> buffer, uint64_t buffer_offset, uint index);
void bind_fragment_buffer(id<MTLBuffer> buffer, uint64_t buffer_offset, uint index);
void bind_vertex_bytes(void *bytes, uint64_t length, uint index);
void bind_fragment_bytes(void *bytes, uint64_t length, uint index);
};
/* Metal Context Compute Pass State -- Used to track active ComputeCommandEncoder state. */
@ -182,10 +182,10 @@ class MTLComputeState {
uint slot);
/* Buffer binding (ComputeCommandEncoder). */
void bind_compute_buffer(id<MTLBuffer> buffer,
uint buffer_offset,
uint64_t buffer_offset,
uint index,
bool writeable = false);
void bind_compute_bytes(void *bytes, uint length, uint index);
void bind_compute_bytes(void *bytes, uint64_t length, uint index);
};
/* Depth Stencil State */

View File

@ -1148,7 +1148,7 @@ bool MTLContext::ensure_buffer_bindings(
/* buffer(N) index of where to bind the UBO. */
const uint32_t buffer_index = ubo.buffer_index;
id<MTLBuffer> ubo_buffer = nil;
int ubo_size = 0;
size_t ubo_size = 0;
bool bind_dummy_buffer = false;
if (this->pipeline_state.ubo_bindings[ubo_location].bound) {
@ -1196,7 +1196,7 @@ bool MTLContext::ensure_buffer_bindings(
if (ubo_size < expected_size) {
MTL_LOG_UBO_ERROR(
"[Error][UBO] UBO (UBO Name: %s) bound at location: %d (buffer[[%d]]) with size "
"%d (Expected size "
"%lu (Expected size "
"%d) (Shader Name: %s) is too small -- binding NULL buffer. This is likely an "
"over-binding, which is not used, but we need this to avoid validation "
"issues\n",
@ -1270,7 +1270,7 @@ bool MTLContext::ensure_buffer_bindings(
/* buffer(N) index of where to bind the SSBO. */
const uint32_t buffer_index = ssbo.buffer_index;
id<MTLBuffer> ssbo_buffer = nil;
int ssbo_size = 0;
size_t ssbo_size = 0;
UNUSED_VARS_NDEBUG(ssbo_size);
if (this->pipeline_state.ssbo_bindings[ssbo_location].bound) {
@ -1378,7 +1378,7 @@ bool MTLContext::ensure_buffer_bindings(
/* buffer(N) index of where to bind the UBO. */
const uint32_t buffer_index = ubo.buffer_index;
id<MTLBuffer> ubo_buffer = nil;
int ubo_size = 0;
size_t ubo_size = 0;
bool bind_dummy_buffer = false;
if (this->pipeline_state.ubo_bindings[ubo_location].bound) {
@ -1700,12 +1700,12 @@ void MTLContext::ensure_texture_bindings(
}
else {
/* Populate argument buffer with current global sampler bindings. */
int size = [argument_encoder encodedLength];
int alignment = max_uu([argument_encoder alignment], 256);
int size_align_delta = (size % alignment);
int aligned_alloc_size = ((alignment > 1) && (size_align_delta > 0)) ?
size + (alignment - (size % alignment)) :
size;
size_t size = [argument_encoder encodedLength];
size_t alignment = max_uu([argument_encoder alignment], 256);
size_t size_align_delta = (size % alignment);
size_t aligned_alloc_size = ((alignment > 1) && (size_align_delta > 0)) ?
size + (alignment - (size % alignment)) :
size;
/* Allocate buffer to store encoded sampler arguments. */
encoder_buffer = MTLContext::get_global_memory_manager()->allocate(aligned_alloc_size,
@ -1921,12 +1921,12 @@ void MTLContext::ensure_texture_bindings(
}
else {
/* Populate argument buffer with current global sampler bindings. */
int size = [argument_encoder encodedLength];
int alignment = max_uu([argument_encoder alignment], 256);
int size_align_delta = (size % alignment);
int aligned_alloc_size = ((alignment > 1) && (size_align_delta > 0)) ?
size + (alignment - (size % alignment)) :
size;
size_t size = [argument_encoder encodedLength];
size_t alignment = max_uu([argument_encoder alignment], 256);
size_t size_align_delta = (size % alignment);
size_t aligned_alloc_size = ((alignment > 1) && (size_align_delta > 0)) ?
size + (alignment - (size % alignment)) :
size;
/* Allocate buffer to store encoded sampler arguments. */
encoder_buffer = MTLContext::get_global_memory_manager()->allocate(aligned_alloc_size,

View File

@ -494,7 +494,7 @@ void MTLFrameBuffer::read(eGPUFrameBufferBits planes,
if (tex) {
size_t sample_len = area[2] * area[3];
size_t sample_size = to_bytesize(tex->format_, format);
int debug_data_size = sample_len * sample_size;
size_t debug_data_size = sample_len * sample_size;
tex->read_internal(0,
area[0],
area[1],
@ -523,7 +523,7 @@ void MTLFrameBuffer::read(eGPUFrameBufferBits planes,
if (tex) {
size_t sample_len = area[2] * area[3];
size_t sample_size = to_bytesize(tex->format_, format);
int debug_data_size = sample_len * sample_size * channel_len;
size_t debug_data_size = sample_len * sample_size * channel_len;
tex->read_internal(0,
area[0],
area[1],

View File

@ -322,7 +322,7 @@ void MTLImmediate::end()
@autoreleasepool {
id<MTLBuffer> index_buffer_mtl = nil;
uint32_t index_buffer_offset = 0;
uint64_t index_buffer_offset = 0;
/* Region of scratch buffer used for topology emulation element data.
* NOTE(Metal): We do not need to manually flush as the entire scratch

View File

@ -777,7 +777,7 @@ MTLTemporaryBuffer MTLCircularBuffer::allocate_range_aligned(uint64_t alloc_size
/* Ensure alignment of an allocation is aligned to compatible offset boundaries. */
BLI_assert(alignment > 0);
alignment = max_ulul(alignment, 256);
alignment = max_uu(alignment, 256);
/* Align current offset and allocation size to desired alignment */
uint64_t aligned_current_offset = ceil_to_multiple_ul(current_offset_, alignment);

View File

@ -71,7 +71,7 @@ class MTLStorageBuf : public StorageBuf {
void init();
id<MTLBuffer> get_metal_buffer();
int get_size();
size_t get_size();
const char *get_name()
{
return name_;

View File

@ -296,7 +296,7 @@ id<MTLBuffer> MTLStorageBuf::get_metal_buffer()
return source_buffer->get_metal_buffer();
}
int MTLStorageBuf::get_size()
size_t MTLStorageBuf::get_size()
{
BLI_assert(this);
return size_in_bytes_;

View File

@ -184,7 +184,7 @@ class MTLTexture : public Texture {
/* Texture Storage. */
id<MTLBuffer> texture_buffer_ = nil;
uint aligned_w_ = 0;
size_t aligned_w_ = 0;
/* Blit Frame-buffer. */
GPUFrameBuffer *blit_fb_ = nullptr;
@ -314,7 +314,7 @@ class MTLTexture : public Texture {
int depth,
eGPUDataFormat desired_output_format,
int num_output_components,
int debug_data_size,
size_t debug_data_size,
void *r_data);
void bake_mip_swizzle_view();
@ -453,7 +453,7 @@ class MTLPixelBuffer : public PixelBuffer {
void *map() override;
void unmap() override;
int64_t get_native_handle() override;
uint get_size() override;
size_t get_size() override;
id<MTLBuffer> get_metal_buffer();
@ -462,7 +462,7 @@ class MTLPixelBuffer : public PixelBuffer {
/* Utility */
MTLPixelFormat gpu_texture_format_to_metal(eGPUTextureFormat tex_format);
int get_mtl_format_bytesize(MTLPixelFormat tex_format);
size_t get_mtl_format_bytesize(MTLPixelFormat tex_format);
int get_mtl_format_num_components(MTLPixelFormat tex_format);
bool mtl_format_supports_blending(MTLPixelFormat format);

View File

@ -81,7 +81,7 @@ gpu::MTLTexture::MTLTexture(const char *name,
BLI_assert(metal_texture != nil);
BLI_assert(type == GPU_TEXTURE_2D);
type_ = type;
init_2D(metal_texture.width, metal_texture.height, 0, 1, format);
init_2D((int)metal_texture.width, (int)metal_texture.height, 0, 1, format);
/* Assign MTLTexture. */
texture_ = metal_texture;
@ -160,7 +160,7 @@ void gpu::MTLTexture::bake_mip_swizzle_view()
}
int range_len = min_ii((mip_texture_max_level_ - mip_texture_base_level_) + 1,
texture_.mipmapLevelCount - mip_texture_base_level_);
(int)texture_.mipmapLevelCount - mip_texture_base_level_);
BLI_assert(range_len > 0);
BLI_assert(mip_texture_base_level_ < texture_.mipmapLevelCount);
BLI_assert(mip_texture_base_layer_ < num_slices);
@ -173,7 +173,7 @@ void gpu::MTLTexture::bake_mip_swizzle_view()
MTL_LOG_INFO(
"Updating texture view - MIP TEXTURE BASE LEVEL: %d, MAX LEVEL: %d (Range len: %d)\n",
mip_texture_base_level_,
min_ii(mip_texture_max_level_, texture_.mipmapLevelCount),
min_ii(mip_texture_max_level_, (int)texture_.mipmapLevelCount),
range_len);
mip_swizzle_view_.label = [texture_ label];
texture_view_dirty_flags_ = TEXTURE_VIEW_NOT_DIRTY;
@ -472,25 +472,26 @@ void gpu::MTLTexture::update_sub(
@autoreleasepool {
/* Determine totalsize of INPUT Data. */
int num_channels = to_component_len(format_);
int input_bytes_per_pixel = to_bytesize(format_, type);
int totalsize = 0;
size_t input_bytes_per_pixel = to_bytesize(format_, type);
size_t totalsize = 0;
/* If unpack row length is used, size of input data uses the unpack row length, rather than the
* image length. */
int expected_update_w = ((ctx->pipeline_state.unpack_row_length == 0) ?
extent[0] :
ctx->pipeline_state.unpack_row_length);
size_t expected_update_w = ((ctx->pipeline_state.unpack_row_length == 0) ?
extent[0] :
ctx->pipeline_state.unpack_row_length);
/* Ensure calculated total size isn't larger than remaining image data size */
switch (this->dimensions_count()) {
case 1:
totalsize = input_bytes_per_pixel * max_ii(expected_update_w, 1);
totalsize = input_bytes_per_pixel * max_ulul(expected_update_w, 1);
break;
case 2:
totalsize = input_bytes_per_pixel * max_ii(expected_update_w, 1) * extent[1];
totalsize = input_bytes_per_pixel * max_ulul(expected_update_w, 1) * (size_t)extent[1];
break;
case 3:
totalsize = input_bytes_per_pixel * max_ii(expected_update_w, 1) * extent[1] * extent[2];
totalsize = input_bytes_per_pixel * max_ulul(expected_update_w, 1) * (size_t)extent[1] *
(size_t)extent[2];
break;
default:
BLI_assert(false);
@ -522,7 +523,7 @@ void gpu::MTLTexture::update_sub(
/* Determine expected destination data size. */
MTLPixelFormat destination_format = gpu_texture_format_to_metal(format_);
int expected_dst_bytes_per_pixel = get_mtl_format_bytesize(destination_format);
size_t expected_dst_bytes_per_pixel = get_mtl_format_bytesize(destination_format);
int destination_num_channels = get_mtl_format_num_components(destination_format);
/* Prepare specialization struct (For texture update routine). */
@ -567,8 +568,8 @@ void gpu::MTLTexture::update_sub(
/* Debug and verification. */
if (!can_use_direct_blit) {
MTL_LOG_WARNING(
"gpu::MTLTexture::update_sub supplied bpp is %d bytes (%d components per "
"pixel), but backing texture bpp is %d bytes (%d components per pixel) "
"gpu::MTLTexture::update_sub supplied bpp is %lu bytes (%d components per "
"pixel), but backing texture bpp is %lu bytes (%d components per pixel) "
"(TODO(Metal): Channel Conversion needed) (w: %d, h: %d, d: %d)\n",
input_bytes_per_pixel,
num_channels,
@ -687,15 +688,15 @@ void gpu::MTLTexture::update_sub(
case GPU_TEXTURE_1D_ARRAY: {
if (can_use_direct_blit) {
/* Use Blit based update. */
int bytes_per_row = expected_dst_bytes_per_pixel *
((ctx->pipeline_state.unpack_row_length == 0) ?
extent[0] :
ctx->pipeline_state.unpack_row_length);
int bytes_per_image = bytes_per_row;
size_t bytes_per_row = expected_dst_bytes_per_pixel *
((ctx->pipeline_state.unpack_row_length == 0) ?
extent[0] :
ctx->pipeline_state.unpack_row_length);
size_t bytes_per_image = bytes_per_row;
int max_array_index = ((type_ == GPU_TEXTURE_1D_ARRAY) ? extent[1] : 1);
for (int array_index = 0; array_index < max_array_index; array_index++) {
int buffer_array_offset = (bytes_per_image * array_index);
size_t buffer_array_offset = (bytes_per_image * (size_t)array_index);
[blit_encoder
copyFromBuffer:staging_buffer
sourceOffset:buffer_array_offset
@ -759,13 +760,13 @@ void gpu::MTLTexture::update_sub(
case GPU_TEXTURE_2D_ARRAY: {
if (can_use_direct_blit) {
/* Use Blit encoder update. */
int bytes_per_row = expected_dst_bytes_per_pixel *
((ctx->pipeline_state.unpack_row_length == 0) ?
extent[0] :
ctx->pipeline_state.unpack_row_length);
int bytes_per_image = bytes_per_row * extent[1];
size_t bytes_per_row = expected_dst_bytes_per_pixel *
((ctx->pipeline_state.unpack_row_length == 0) ?
extent[0] :
ctx->pipeline_state.unpack_row_length);
size_t bytes_per_image = bytes_per_row * extent[1];
int texture_array_relative_offset = 0;
size_t texture_array_relative_offset = 0;
int base_slice = (type_ == GPU_TEXTURE_2D_ARRAY) ? offset[2] : 0;
int final_slice = base_slice + ((type_ == GPU_TEXTURE_2D_ARRAY) ? extent[2] : 1);
@ -840,11 +841,11 @@ void gpu::MTLTexture::update_sub(
/* 3D */
case GPU_TEXTURE_3D: {
if (can_use_direct_blit) {
int bytes_per_row = expected_dst_bytes_per_pixel *
((ctx->pipeline_state.unpack_row_length == 0) ?
extent[0] :
ctx->pipeline_state.unpack_row_length);
int bytes_per_image = bytes_per_row * extent[1];
size_t bytes_per_row = expected_dst_bytes_per_pixel *
((ctx->pipeline_state.unpack_row_length == 0) ?
extent[0] :
ctx->pipeline_state.unpack_row_length);
size_t bytes_per_image = bytes_per_row * extent[1];
[blit_encoder copyFromBuffer:staging_buffer
sourceOffset:0
sourceBytesPerRow:bytes_per_row
@ -881,13 +882,12 @@ void gpu::MTLTexture::update_sub(
/* CUBE */
case GPU_TEXTURE_CUBE: {
if (can_use_direct_blit) {
int bytes_per_row = expected_dst_bytes_per_pixel *
((ctx->pipeline_state.unpack_row_length == 0) ?
extent[0] :
ctx->pipeline_state.unpack_row_length);
int bytes_per_image = bytes_per_row * extent[1];
int texture_array_relative_offset = 0;
size_t bytes_per_row = expected_dst_bytes_per_pixel *
((ctx->pipeline_state.unpack_row_length == 0) ?
extent[0] :
ctx->pipeline_state.unpack_row_length);
size_t bytes_per_image = bytes_per_row * extent[1];
size_t texture_array_relative_offset = 0;
/* Iterate over all cube faces in range (offset[2], offset[2] + extent[2]). */
for (int i = 0; i < extent[2]; i++) {
@ -917,14 +917,14 @@ void gpu::MTLTexture::update_sub(
case GPU_TEXTURE_CUBE_ARRAY: {
if (can_use_direct_blit) {
int bytes_per_row = expected_dst_bytes_per_pixel *
((ctx->pipeline_state.unpack_row_length == 0) ?
extent[0] :
ctx->pipeline_state.unpack_row_length);
int bytes_per_image = bytes_per_row * extent[1];
size_t bytes_per_row = expected_dst_bytes_per_pixel *
((ctx->pipeline_state.unpack_row_length == 0) ?
extent[0] :
ctx->pipeline_state.unpack_row_length);
size_t bytes_per_image = bytes_per_row * extent[1];
/* Upload to all faces between offset[2] (which is zero in most cases) AND extent[2]. */
int texture_array_relative_offset = 0;
size_t texture_array_relative_offset = 0;
for (int i = 0; i < extent[2]; i++) {
int face_index = offset[2] + i;
[blit_encoder copyFromBuffer:staging_buffer
@ -1091,9 +1091,9 @@ void MTLTexture::update_sub(int offset[3],
/* Calculate dimensions. */
int num_image_channels = to_component_len(format_);
uint bits_per_pixel = num_image_channels * to_bytesize(format);
uint bytes_per_row = bits_per_pixel * extent[0];
uint bytes_per_image = bytes_per_row * extent[1];
size_t bits_per_pixel = num_image_channels * to_bytesize(format);
size_t bytes_per_row = bits_per_pixel * extent[0];
size_t bytes_per_image = bytes_per_row * extent[1];
/* Currently only required for 2D textures. */
if (type_ == GPU_TEXTURE_2D) {
@ -1393,7 +1393,7 @@ void gpu::MTLTexture::read_internal(int mip,
int depth,
eGPUDataFormat desired_output_format,
int num_output_components,
int debug_data_size,
size_t debug_data_size,
void *r_data)
{
/* Verify textures are baked. */
@ -1408,10 +1408,10 @@ void gpu::MTLTexture::read_internal(int mip,
/* Calculate Desired output size. */
int num_channels = to_component_len(format_);
BLI_assert(num_output_components <= num_channels);
uint desired_output_bpp = num_output_components * to_bytesize(desired_output_format);
size_t desired_output_bpp = num_output_components * to_bytesize(desired_output_format);
/* Calculate Metal data output for trivial copy. */
uint image_bpp = get_mtl_format_bytesize(texture_.pixelFormat);
size_t image_bpp = get_mtl_format_bytesize(texture_.pixelFormat);
uint image_components = get_mtl_format_num_components(texture_.pixelFormat);
bool is_depth_format = (format_flag_ & GPU_FORMAT_DEPTH);
@ -1449,9 +1449,9 @@ void gpu::MTLTexture::read_internal(int mip,
}
/* Determine size of output data. */
uint bytes_per_row = desired_output_bpp * width;
uint bytes_per_image = bytes_per_row * height;
uint total_bytes = bytes_per_image * depth;
size_t bytes_per_row = desired_output_bpp * width;
size_t bytes_per_image = bytes_per_row * height;
size_t total_bytes = bytes_per_image * depth;
if (can_use_simple_read) {
/* DEBUG check that if direct copy is being used, then both the expected output size matches
@ -1568,7 +1568,7 @@ void gpu::MTLTexture::read_internal(int mip,
}
int base_slice = z_off;
int final_slice = base_slice + depth;
int texture_array_relative_offset = 0;
size_t texture_array_relative_offset = 0;
for (int array_slice = base_slice; array_slice < final_slice; array_slice++) {
[enc copyFromTexture:read_texture
@ -1618,7 +1618,7 @@ void gpu::MTLTexture::read_internal(int mip,
}
int base_slice = z_off;
int final_slice = base_slice + depth;
int texture_array_relative_offset = 0;
size_t texture_array_relative_offset = 0;
for (int array_slice = base_slice; array_slice < final_slice; array_slice++) {
[enc copyFromTexture:read_texture
@ -1665,12 +1665,12 @@ void gpu::MTLTexture::read_internal(int mip,
/* Copy data from Shared Memory into ptr. */
memcpy(r_data, destination_buffer_host_ptr, total_bytes);
MTL_LOG_INFO("gpu::MTLTexture::read_internal success! %d bytes read\n", total_bytes);
MTL_LOG_INFO("gpu::MTLTexture::read_internal success! %lu bytes read\n", total_bytes);
}
else {
MTL_LOG_WARNING(
"[Warning] gpu::MTLTexture::read_internal not yet supported for this config -- data "
"format different (src %d bytes, dst %d bytes) (src format: %d, dst format: %d), or "
"format different (src %lu bytes, dst %lu bytes) (src format: %d, dst format: %d), or "
"varying component counts (src %d, dst %d)\n",
image_bpp,
desired_output_bpp,
@ -1731,8 +1731,8 @@ bool gpu::MTLTexture::init_internal(GPUVertBuf *vbo)
/* Verify Texture and vertex buffer alignment. */
const GPUVertFormat *format = GPU_vertbuf_get_format(vbo);
int bytes_per_pixel = get_mtl_format_bytesize(mtl_format);
int bytes_per_row = bytes_per_pixel * w_;
size_t bytes_per_pixel = get_mtl_format_bytesize(mtl_format);
size_t bytes_per_row = bytes_per_pixel * w_;
MTLContext *mtl_ctx = MTLContext::get();
uint32_t align_requirement = static_cast<uint32_t>(
@ -1794,7 +1794,7 @@ bool gpu::MTLTexture::init_internal(GPUVertBuf *vbo)
texture_ = [source_buffer
newTextureWithDescriptor:texture_descriptor_
offset:0
bytesPerRow:ceil_to_multiple_u(bytes_per_row, align_requirement)];
bytesPerRow:ceil_to_multiple_ul(bytes_per_row, align_requirement)];
aligned_w_ = bytes_per_row / bytes_per_pixel;
BLI_assert(texture_);
@ -2159,7 +2159,7 @@ int64_t MTLPixelBuffer::get_native_handle()
return reinterpret_cast<int64_t>(buffer_);
}
uint MTLPixelBuffer::get_size()
size_t MTLPixelBuffer::get_size()
{
return size_;
}

View File

@ -185,7 +185,7 @@ MTLPixelFormat gpu_texture_format_to_metal(eGPUTextureFormat tex_format)
return MTLPixelFormatRGBA8Unorm;
}
int get_mtl_format_bytesize(MTLPixelFormat tex_format)
size_t get_mtl_format_bytesize(MTLPixelFormat tex_format)
{
switch (tex_format) {
case MTLPixelFormatRGBA8Uint:

View File

@ -47,7 +47,7 @@ class MTLUniformBuf : public UniformBuf {
void clear_to_zero() override;
id<MTLBuffer> get_metal_buffer();
int get_size();
size_t get_size();
const char *get_name()
{
return name_;

View File

@ -190,7 +190,7 @@ id<MTLBuffer> MTLUniformBuf::get_metal_buffer()
return nil;
}
int MTLUniformBuf::get_size()
size_t MTLUniformBuf::get_size()
{
BLI_assert(this);
return size_in_bytes_;

View File

@ -75,7 +75,7 @@ void MTLVertBuf::duplicate_data(VertBuf *dst_)
BLI_assert(dst->vbo_ == nullptr);
/* Allocate VBO for destination vertbuf. */
uint length = src->vbo_->get_size();
uint64_t length = src->vbo_->get_size();
dst->vbo_ = MTLContext::get_global_memory_manager()->allocate(
length, (dst->get_usage_type() != GPU_USAGE_DEVICE_ONLY));
dst->alloc_size_ = length;
@ -225,7 +225,7 @@ void MTLVertBuf::bind()
sourceOffset:0
toBuffer:copy_new_buffer
destinationOffset:0
size:min_ii([copy_new_buffer length], [copy_prev_buffer length])];
size:min_ulul([copy_new_buffer length], [copy_prev_buffer length])];
/* Flush newly copied data back to host-side buffer, if one exists.
* Ensures data and cache coherency for managed MTLBuffers. */
@ -274,7 +274,7 @@ void MTLVertBuf::update_sub(uint start, uint len, const void *data)
[scratch_allocation.metal_buffer
didModifyRange:NSMakeRange(scratch_allocation.buffer_offset, len)];
id<MTLBuffer> data_buffer = scratch_allocation.metal_buffer;
uint data_buffer_offset = scratch_allocation.buffer_offset;
uint64_t data_buffer_offset = scratch_allocation.buffer_offset;
BLI_assert(vbo_ != nullptr && data != nullptr);
BLI_assert((start + len) <= vbo_->get_size());

View File

@ -886,7 +886,7 @@ int64_t GLPixelBuffer::get_native_handle()
return int64_t(gl_id_);
}
uint GLPixelBuffer::get_size()
size_t GLPixelBuffer::get_size()
{
return size_;
}

View File

@ -139,7 +139,7 @@ class GLPixelBuffer : public PixelBuffer {
void *map() override;
void unmap() override;
int64_t get_native_handle() override;
uint get_size() override;
size_t get_size() override;
MEM_CXX_CLASS_ALLOC_FUNCS("GLPixelBuffer")
};

View File

@ -36,7 +36,7 @@ int64_t VKPixelBuffer::get_native_handle()
return int64_t(buffer_.vk_handle());
}
uint VKPixelBuffer::get_size()
size_t VKPixelBuffer::get_size()
{
return size_;
}

View File

@ -21,7 +21,7 @@ class VKPixelBuffer : public PixelBuffer {
void *map() override;
void unmap() override;
int64_t get_native_handle() override;
uint get_size() override;
size_t get_size() override;
};
} // namespace blender::gpu