2016-09-13 08:41:43 +02:00
|
|
|
/*
|
|
|
|
* ***** BEGIN GPL LICENSE BLOCK *****
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
2018-04-22 08:44:23 +02:00
|
|
|
* of the License, or (at your option) any later version.
|
2016-09-13 08:41:43 +02:00
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*
|
2018-07-17 21:11:23 +02:00
|
|
|
* The Original Code is Copyright (C) 2016 by Mike Erwin.
|
2016-09-13 08:41:43 +02:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
2018-07-17 21:11:23 +02:00
|
|
|
* Contributor(s): Blender Foundation
|
2016-09-13 08:41:43 +02:00
|
|
|
*
|
|
|
|
* ***** END GPL LICENSE BLOCK *****
|
|
|
|
*/
|
|
|
|
|
2018-07-18 00:12:21 +02:00
|
|
|
/** \file blender/gpu/GPU_batch.h
|
2018-07-17 21:11:23 +02:00
|
|
|
* \ingroup gpu
|
|
|
|
*
|
2018-07-18 00:12:21 +02:00
|
|
|
* GPU geometry batch
|
2018-07-17 21:11:23 +02:00
|
|
|
* Contains VAOs + VBOs + Shader representing a drawable entity.
|
2016-09-13 08:41:43 +02:00
|
|
|
*/
|
|
|
|
|
2018-07-18 00:12:21 +02:00
|
|
|
#ifndef __GPU_BATCH_H__
|
|
|
|
#define __GPU_BATCH_H__
|
2016-09-13 08:41:43 +02:00
|
|
|
|
2018-07-17 21:11:23 +02:00
|
|
|
#include "GPU_vertex_buffer.h"
|
|
|
|
#include "GPU_element.h"
|
|
|
|
#include "GPU_shader_interface.h"
|
|
|
|
#include "GPU_shader.h"
|
2017-03-17 04:32:35 +01:00
|
|
|
|
2018-07-17 21:11:23 +02:00
|
|
|
typedef enum {
|
2018-07-18 00:12:21 +02:00
|
|
|
GPU_BATCH_READY_TO_FORMAT,
|
|
|
|
GPU_BATCH_READY_TO_BUILD,
|
|
|
|
GPU_BATCH_BUILDING,
|
|
|
|
GPU_BATCH_READY_TO_DRAW
|
|
|
|
} GPUBatchPhase;
|
2017-03-17 04:32:35 +01:00
|
|
|
|
2018-07-18 00:12:21 +02:00
|
|
|
#define GPU_BATCH_VBO_MAX_LEN 3
|
|
|
|
#define GPU_BATCH_VAO_STATIC_LEN 3
|
|
|
|
#define GPU_BATCH_VAO_DYN_ALLOC_COUNT 16
|
2018-01-15 14:06:39 +01:00
|
|
|
|
2018-07-18 00:12:21 +02:00
|
|
|
typedef struct GPUBatch {
|
2018-07-17 21:11:23 +02:00
|
|
|
/* geometry */
|
2018-07-18 15:09:31 +02:00
|
|
|
GPUVertBuf *verts[GPU_BATCH_VBO_MAX_LEN]; /* verts[0] is required, others can be NULL */
|
|
|
|
GPUVertBuf *inst; /* instance attribs */
|
|
|
|
GPUIndexBuf *elem; /* NULL if element list not needed */
|
2018-07-17 21:11:23 +02:00
|
|
|
uint32_t gl_prim_type;
|
|
|
|
|
|
|
|
/* cached values (avoid dereferencing later) */
|
|
|
|
uint32_t vao_id;
|
|
|
|
uint32_t program;
|
2018-07-18 15:09:31 +02:00
|
|
|
const struct GPUShaderInterface *interface;
|
2018-07-17 21:11:23 +02:00
|
|
|
|
|
|
|
/* book-keeping */
|
|
|
|
uint owns_flag;
|
2018-07-18 00:12:21 +02:00
|
|
|
struct GPUContext *context; /* used to free all vaos. this implies all vaos were created under the same context. */
|
|
|
|
GPUBatchPhase phase;
|
2018-07-17 21:11:23 +02:00
|
|
|
bool program_in_use;
|
|
|
|
|
|
|
|
/* Vao management: remembers all geometry state (vertex attrib bindings & element buffer)
|
|
|
|
* for each shader interface. Start with a static number of vaos and fallback to dynamic count
|
|
|
|
* if necessary. Once a batch goes dynamic it does not go back. */
|
|
|
|
bool is_dynamic_vao_count;
|
|
|
|
union {
|
|
|
|
/* Static handle count */
|
|
|
|
struct {
|
2018-07-18 15:09:31 +02:00
|
|
|
const struct GPUShaderInterface *interfaces[GPU_BATCH_VAO_STATIC_LEN];
|
2018-07-18 00:12:21 +02:00
|
|
|
uint32_t vao_ids[GPU_BATCH_VAO_STATIC_LEN];
|
2018-07-17 21:11:23 +02:00
|
|
|
} static_vaos;
|
|
|
|
/* Dynamic handle count */
|
|
|
|
struct {
|
|
|
|
uint count;
|
2018-07-18 15:09:31 +02:00
|
|
|
const struct GPUShaderInterface **interfaces;
|
|
|
|
uint32_t *vao_ids;
|
2018-07-17 21:11:23 +02:00
|
|
|
} dynamic_vaos;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* XXX This is the only solution if we want to have some data structure using
|
|
|
|
* batches as key to identify nodes. We must destroy these nodes with this callback. */
|
2018-07-18 15:09:31 +02:00
|
|
|
void (*free_callback)(struct GPUBatch *, void *);
|
|
|
|
void *callback_data;
|
2018-07-18 00:12:21 +02:00
|
|
|
} GPUBatch;
|
2018-07-17 21:11:23 +02:00
|
|
|
|
|
|
|
enum {
|
2018-07-18 00:12:21 +02:00
|
|
|
GPU_BATCH_OWNS_VBO = (1 << 0),
|
2018-07-17 21:11:23 +02:00
|
|
|
/* each vbo index gets bit-shifted */
|
2018-07-18 00:12:21 +02:00
|
|
|
GPU_BATCH_OWNS_INSTANCES = (1 << 30),
|
2018-12-01 09:36:57 +01:00
|
|
|
GPU_BATCH_OWNS_INDEX = (1u << 31u),
|
2018-07-17 21:11:23 +02:00
|
|
|
};
|
|
|
|
|
2018-07-18 15:09:31 +02:00
|
|
|
GPUBatch *GPU_batch_create_ex(GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag);
|
|
|
|
void GPU_batch_init_ex(GPUBatch *, GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag);
|
2018-12-18 13:08:08 +01:00
|
|
|
void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src);
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2018-07-18 00:12:21 +02:00
|
|
|
#define GPU_batch_create(prim, verts, elem) \
|
|
|
|
GPU_batch_create_ex(prim, verts, elem, 0)
|
|
|
|
#define GPU_batch_init(batch, prim, verts, elem) \
|
|
|
|
GPU_batch_init_ex(batch, prim, verts, elem, 0)
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2018-12-14 00:07:59 +01:00
|
|
|
void GPU_batch_clear(GPUBatch *); /* Same as discard but does not free. */
|
2018-07-18 15:09:31 +02:00
|
|
|
void GPU_batch_discard(GPUBatch *); /* verts & elem are not discarded */
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2018-07-18 15:09:31 +02:00
|
|
|
void GPU_batch_vao_cache_clear(GPUBatch *);
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2018-07-18 15:09:31 +02:00
|
|
|
void GPU_batch_callback_free_set(GPUBatch *, void (*callback)(GPUBatch *, void *), void *);
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2018-07-18 15:09:31 +02:00
|
|
|
void GPU_batch_instbuf_set(GPUBatch *, GPUVertBuf *, bool own_vbo); /* Instancing */
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2018-07-18 15:09:31 +02:00
|
|
|
int GPU_batch_vertbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo);
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2018-07-18 00:12:21 +02:00
|
|
|
#define GPU_batch_vertbuf_add(batch, verts) \
|
|
|
|
GPU_batch_vertbuf_add_ex(batch, verts, false)
|
2016-09-15 18:41:28 +02:00
|
|
|
|
2018-07-18 15:09:31 +02:00
|
|
|
void GPU_batch_program_set_no_use(GPUBatch *, uint32_t program, const GPUShaderInterface *);
|
|
|
|
void GPU_batch_program_set(GPUBatch *, uint32_t program, const GPUShaderInterface *);
|
2019-01-23 04:15:43 +01:00
|
|
|
void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id);
|
2018-07-17 21:11:23 +02:00
|
|
|
/* Entire batch draws with one shader program, but can be redrawn later with another program. */
|
|
|
|
/* Vertex shader's inputs must be compatible with the batch's vertex format. */
|
|
|
|
|
2018-07-18 15:09:31 +02:00
|
|
|
void GPU_batch_program_use_begin(GPUBatch *); /* call before Batch_Uniform (temp hack?) */
|
|
|
|
void GPU_batch_program_use_end(GPUBatch *);
|
|
|
|
|
|
|
|
void GPU_batch_uniform_1ui(GPUBatch *, const char *name, int value);
|
|
|
|
void GPU_batch_uniform_1i(GPUBatch *, const char *name, int value);
|
|
|
|
void GPU_batch_uniform_1b(GPUBatch *, const char *name, bool value);
|
|
|
|
void GPU_batch_uniform_1f(GPUBatch *, const char *name, float value);
|
|
|
|
void GPU_batch_uniform_2f(GPUBatch *, const char *name, float x, float y);
|
|
|
|
void GPU_batch_uniform_3f(GPUBatch *, const char *name, float x, float y, float z);
|
|
|
|
void GPU_batch_uniform_4f(GPUBatch *, const char *name, float x, float y, float z, float w);
|
|
|
|
void GPU_batch_uniform_2fv(GPUBatch *, const char *name, const float data[2]);
|
|
|
|
void GPU_batch_uniform_3fv(GPUBatch *, const char *name, const float data[3]);
|
|
|
|
void GPU_batch_uniform_4fv(GPUBatch *, const char *name, const float data[4]);
|
|
|
|
void GPU_batch_uniform_2fv_array(GPUBatch *, const char *name, int len, const float *data);
|
|
|
|
void GPU_batch_uniform_4fv_array(GPUBatch *, const char *name, int len, const float *data);
|
|
|
|
void GPU_batch_uniform_mat4(GPUBatch *, const char *name, const float data[4][4]);
|
|
|
|
|
|
|
|
void GPU_batch_draw(GPUBatch *);
|
2018-07-17 21:11:23 +02:00
|
|
|
|
|
|
|
/* This does not bind/unbind shader and does not call GPU_matrix_bind() */
|
2018-07-18 15:09:31 +02:00
|
|
|
void GPU_batch_draw_range_ex(GPUBatch *, int v_first, int v_count, bool force_instance);
|
2018-07-17 21:11:23 +02:00
|
|
|
|
|
|
|
/* Does not even need batch */
|
2018-07-18 00:12:21 +02:00
|
|
|
void GPU_draw_primitive(GPUPrimType, int v_count);
|
2018-07-17 21:11:23 +02:00
|
|
|
|
|
|
|
#if 0 /* future plans */
|
|
|
|
|
2018-07-18 00:12:21 +02:00
|
|
|
/* Can multiple batches share a GPUVertBuf? Use ref count? */
|
2018-07-17 21:11:23 +02:00
|
|
|
|
|
|
|
|
|
|
|
/* We often need a batch with its own data, to be created and discarded together. */
|
|
|
|
/* WithOwn variants reduce number of system allocations. */
|
|
|
|
|
|
|
|
typedef struct BatchWithOwnVertexBuffer {
|
2018-07-18 00:12:21 +02:00
|
|
|
GPUBatch batch;
|
|
|
|
GPUVertBuf verts; /* link batch.verts to this */
|
2018-07-17 21:11:23 +02:00
|
|
|
} BatchWithOwnVertexBuffer;
|
|
|
|
|
|
|
|
typedef struct BatchWithOwnElementList {
|
2018-07-18 00:12:21 +02:00
|
|
|
GPUBatch batch;
|
|
|
|
GPUIndexBuf elem; /* link batch.elem to this */
|
2018-07-17 21:11:23 +02:00
|
|
|
} BatchWithOwnElementList;
|
|
|
|
|
|
|
|
typedef struct BatchWithOwnVertexBufferAndElementList {
|
2018-07-18 00:12:21 +02:00
|
|
|
GPUBatch batch;
|
|
|
|
GPUIndexBuf elem; /* link batch.elem to this */
|
|
|
|
GPUVertBuf verts; /* link batch.verts to this */
|
2018-07-17 21:11:23 +02:00
|
|
|
} BatchWithOwnVertexBufferAndElementList;
|
|
|
|
|
2018-07-18 15:09:31 +02:00
|
|
|
GPUBatch *create_BatchWithOwnVertexBuffer(GPUPrimType, GPUVertFormat *, uint v_len, GPUIndexBuf *);
|
|
|
|
GPUBatch *create_BatchWithOwnElementList(GPUPrimType, GPUVertBuf *, uint prim_len);
|
|
|
|
GPUBatch *create_BatchWithOwnVertexBufferAndElementList(GPUPrimType, GPUVertFormat *, uint v_len, uint prim_len);
|
2018-07-17 21:11:23 +02:00
|
|
|
/* verts: shared, own */
|
|
|
|
/* elem: none, shared, own */
|
2018-07-18 15:09:31 +02:00
|
|
|
GPUBatch *create_BatchInGeneral(GPUPrimType, VertexBufferStuff, ElementListStuff);
|
2018-07-17 21:11:23 +02:00
|
|
|
|
|
|
|
#endif /* future plans */
|
2017-02-08 00:38:07 +01:00
|
|
|
|
2018-01-15 06:21:23 +01:00
|
|
|
void gpu_batch_init(void);
|
|
|
|
void gpu_batch_exit(void);
|
|
|
|
|
2018-07-17 21:11:23 +02:00
|
|
|
/* Macros */
|
|
|
|
|
2018-07-18 00:12:21 +02:00
|
|
|
#define GPU_BATCH_DISCARD_SAFE(batch) do { \
|
2018-07-17 21:11:23 +02:00
|
|
|
if (batch != NULL) { \
|
2018-07-18 00:12:21 +02:00
|
|
|
GPU_batch_discard(batch); \
|
2018-07-17 21:11:23 +02:00
|
|
|
batch = NULL; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2018-12-17 11:37:27 +01:00
|
|
|
#define GPU_BATCH_CLEAR_SAFE(batch) do { \
|
|
|
|
if (batch != NULL) { \
|
|
|
|
GPU_batch_clear(batch); \
|
|
|
|
memset(batch, 0, sizeof(*(batch))); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2018-08-30 22:47:36 +02:00
|
|
|
#define GPU_BATCH_DISCARD_ARRAY_SAFE(_batch_array, _len) do { \
|
|
|
|
if (_batch_array != NULL) { \
|
|
|
|
BLI_assert(_len > 0); \
|
|
|
|
for (int _i = 0; _i < _len; _i++) { \
|
|
|
|
GPU_BATCH_DISCARD_SAFE(_batch_array[_i]); \
|
|
|
|
} \
|
|
|
|
MEM_freeN(_batch_array); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2018-07-18 00:12:21 +02:00
|
|
|
#endif /* __GPU_BATCH_H__ */
|