DRW: Instance: Makes it possible to have instances of a non-init batch

This commit is contained in:
Clément Foucault 2018-12-18 13:08:08 +01:00
parent 77d9ddc5ea
commit c92cffb56d
3 changed files with 20 additions and 12 deletions

View File

@ -102,6 +102,13 @@ static ListBase g_idatalists = {NULL, NULL};
static void instance_batch_free(GPUBatch *batch, void *UNUSED(user_data)) static void instance_batch_free(GPUBatch *batch, void *UNUSED(user_data))
{ {
if (batch->verts[0] == NULL) {
/** XXX This is a false positive case.
* The batch has been requested but not init yet
* and there is a chance that it might become init.
**/
return;
}
/* Free all batches that have the same key before they are reused. */ /* Free all batches that have the same key before they are reused. */
/* TODO: Make it thread safe! Batch freeing can happen from another thread. */ /* TODO: Make it thread safe! Batch freeing can happen from another thread. */
/* XXX we need to iterate over all idatalists unless we make some smart /* XXX we need to iterate over all idatalists unless we make some smart
@ -194,12 +201,11 @@ void DRW_instancing_buffer_request(
/* Create the batch. */ /* Create the batch. */
ibuf = chunk->ibufs + new_id; ibuf = chunk->ibufs + new_id;
ibuf->vert = *r_vert = GPU_vertbuf_create_with_format_ex(format, GPU_USAGE_DYNAMIC); ibuf->vert = *r_vert = GPU_vertbuf_create_with_format_ex(format, GPU_USAGE_DYNAMIC);
ibuf->batch = *r_batch = GPU_batch_duplicate(instance); ibuf->batch = *r_batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
ibuf->format = format; ibuf->format = format;
ibuf->shgroup = shgroup; ibuf->shgroup = shgroup;
ibuf->instance = instance; ibuf->instance = instance;
GPU_vertbuf_data_alloc(*r_vert, BUFFER_VERTS_CHUNK); GPU_vertbuf_data_alloc(*r_vert, BUFFER_VERTS_CHUNK);
GPU_batch_instbuf_set(ibuf->batch, ibuf->vert, false);
/* Make sure to free this ibuf if the instance batch gets free. */ /* Make sure to free this ibuf if the instance batch gets free. */
GPU_batch_callback_free_set(instance, &instance_batch_free, NULL); GPU_batch_callback_free_set(instance, &instance_batch_free, NULL);
} }
@ -253,6 +259,9 @@ void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
GPU_vertbuf_data_resize(ibuf->vert, size); GPU_vertbuf_data_resize(ibuf->vert, size);
} }
GPU_vertbuf_use(ibuf->vert); /* Send data. */ GPU_vertbuf_use(ibuf->vert); /* Send data. */
/* Setup batch now that we are sure ibuf->instance is setup. */
GPU_batch_copy(ibuf->batch, ibuf->instance);
GPU_batch_instbuf_set(ibuf->batch, ibuf->vert, false);
ibuf->shgroup = NULL; /* Set as non used for the next round. */ ibuf->shgroup = NULL; /* Set as non used for the next round. */
} }
else { else {

View File

@ -100,7 +100,7 @@ enum {
GPUBatch *GPU_batch_create_ex(GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag); GPUBatch *GPU_batch_create_ex(GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag);
void GPU_batch_init_ex(GPUBatch *, GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag); void GPU_batch_init_ex(GPUBatch *, GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag);
GPUBatch *GPU_batch_duplicate(GPUBatch *batch_src); void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src);
#define GPU_batch_create(prim, verts, elem) \ #define GPU_batch_create(prim, verts, elem) \
GPU_batch_create_ex(prim, verts, elem, 0) GPU_batch_create_ex(prim, verts, elem, 0)

View File

@ -114,19 +114,22 @@ void GPU_batch_init_ex(
} }
/* This will share the VBOs with the new batch. */ /* This will share the VBOs with the new batch. */
GPUBatch *GPU_batch_duplicate(GPUBatch *batch_src) void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src)
{ {
GPUBatch *batch = GPU_batch_create_ex(GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0); GPU_batch_init_ex(batch_dst, GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0);
batch->gl_prim_type = batch_src->gl_prim_type; batch_dst->gl_prim_type = batch_src->gl_prim_type;
for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) { for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
batch->verts[v] = batch_src->verts[v]; batch_dst->verts[v] = batch_src->verts[v];
} }
return batch;
} }
void GPU_batch_clear(GPUBatch *batch) void GPU_batch_clear(GPUBatch *batch)
{ {
if (batch->free_callback) {
batch->free_callback(batch, batch->callback_data);
}
if (batch->owns_flag & GPU_BATCH_OWNS_INDEX) { if (batch->owns_flag & GPU_BATCH_OWNS_INDEX) {
GPU_indexbuf_discard(batch->elem); GPU_indexbuf_discard(batch->elem);
} }
@ -144,10 +147,6 @@ void GPU_batch_clear(GPUBatch *batch)
} }
} }
GPU_batch_vao_cache_clear(batch); GPU_batch_vao_cache_clear(batch);
if (batch->free_callback) {
batch->free_callback(batch, batch->callback_data);
}
} }
void GPU_batch_discard(GPUBatch *batch) void GPU_batch_discard(GPUBatch *batch)