diff --git a/source/blender/draw/intern/draw_instance_data.c b/source/blender/draw/intern/draw_instance_data.c index 9695643d707..dca112c85c6 100644 --- a/source/blender/draw/intern/draw_instance_data.c +++ b/source/blender/draw/intern/draw_instance_data.c @@ -102,6 +102,13 @@ static ListBase g_idatalists = {NULL, NULL}; static void instance_batch_free(GPUBatch *batch, void *UNUSED(user_data)) { + if (batch->verts[0] == NULL) { + /** XXX This is a false positive case. + * The batch has been requested but not init yet + * and there is a chance that it might become init. + **/ + return; + } /* Free all batches that have the same key before they are reused. */ /* TODO: Make it thread safe! Batch freeing can happen from another thread. */ /* XXX we need to iterate over all idatalists unless we make some smart @@ -194,12 +201,11 @@ void DRW_instancing_buffer_request( /* Create the batch. */ ibuf = chunk->ibufs + new_id; ibuf->vert = *r_vert = GPU_vertbuf_create_with_format_ex(format, GPU_USAGE_DYNAMIC); - ibuf->batch = *r_batch = GPU_batch_duplicate(instance); + ibuf->batch = *r_batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch"); ibuf->format = format; ibuf->shgroup = shgroup; ibuf->instance = instance; GPU_vertbuf_data_alloc(*r_vert, BUFFER_VERTS_CHUNK); - GPU_batch_instbuf_set(ibuf->batch, ibuf->vert, false); /* Make sure to free this ibuf if the instance batch gets free. */ GPU_batch_callback_free_set(instance, &instance_batch_free, NULL); } @@ -253,6 +259,9 @@ void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist) GPU_vertbuf_data_resize(ibuf->vert, size); } GPU_vertbuf_use(ibuf->vert); /* Send data. */ + /* Setup batch now that we are sure ibuf->instance is setup. */ + GPU_batch_copy(ibuf->batch, ibuf->instance); + GPU_batch_instbuf_set(ibuf->batch, ibuf->vert, false); ibuf->shgroup = NULL; /* Set as non used for the next round. */ } else { diff --git a/source/blender/gpu/GPU_batch.h b/source/blender/gpu/GPU_batch.h index c77f0707ee5..f4911d15e56 100644 --- a/source/blender/gpu/GPU_batch.h +++ b/source/blender/gpu/GPU_batch.h @@ -100,7 +100,7 @@ enum { GPUBatch *GPU_batch_create_ex(GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag); void GPU_batch_init_ex(GPUBatch *, GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag); -GPUBatch *GPU_batch_duplicate(GPUBatch *batch_src); +void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src); #define GPU_batch_create(prim, verts, elem) \ GPU_batch_create_ex(prim, verts, elem, 0) diff --git a/source/blender/gpu/intern/gpu_batch.c b/source/blender/gpu/intern/gpu_batch.c index d70752e9b2f..a2184df229f 100644 --- a/source/blender/gpu/intern/gpu_batch.c +++ b/source/blender/gpu/intern/gpu_batch.c @@ -114,19 +114,22 @@ void GPU_batch_init_ex( } /* This will share the VBOs with the new batch. */ -GPUBatch *GPU_batch_duplicate(GPUBatch *batch_src) +void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src) { - GPUBatch *batch = GPU_batch_create_ex(GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0); + GPU_batch_init_ex(batch_dst, GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0); - batch->gl_prim_type = batch_src->gl_prim_type; + batch_dst->gl_prim_type = batch_src->gl_prim_type; for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) { - batch->verts[v] = batch_src->verts[v]; + batch_dst->verts[v] = batch_src->verts[v]; } - return batch; } void GPU_batch_clear(GPUBatch *batch) { + if (batch->free_callback) { + batch->free_callback(batch, batch->callback_data); + } + if (batch->owns_flag & GPU_BATCH_OWNS_INDEX) { GPU_indexbuf_discard(batch->elem); } @@ -144,10 +147,6 @@ void GPU_batch_clear(GPUBatch *batch) } } GPU_batch_vao_cache_clear(batch); - - if (batch->free_callback) { - batch->free_callback(batch, batch->callback_data); - } } void GPU_batch_discard(GPUBatch *batch)