2023-08-15 16:20:26 +02:00
|
|
|
/* SPDX-FileCopyrightText: 2021 Blender Authors
|
2023-05-31 16:19:06 +02:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: GPL-2.0-or-later */
|
2021-10-05 09:36:11 +02:00
|
|
|
|
|
|
|
/** \file
|
|
|
|
* \ingroup draw
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "BKE_global.h"
|
|
|
|
|
Cleanup: fewer iostreams related includes from BLI/BKE headers
Including <iostream> or similar headers is quite expensive, since it
also pulls in things like <locale> and so on. In many BLI headers,
iostreams are only used to implement some sort of "debug print",
or an operator<< for ostream.
Change some of the commonly used places to instead include <iosfwd>,
which is the standard way of forward-declaring iostreams related
classes, and move the actual debug-print / operator<< implementations
into .cc files.
This is not done for templated classes though (it would be possible
to provide explicit operator<< instantiations somewhere in the
source file, but that would lead to hard-to-figure-out linker error
whenever someone would add a different template type). There, where
possible, I changed from full <iostream> include to only the needed
<ostream> part.
For Span<T>, I just removed print_as_lines since it's not used by
anything. It could be moved into a .cc file using a similar approach
as above if needed.
Doing full blender build changes include counts this way:
- <iostream> 1986 -> 978
- <sstream> 2880 -> 925
It does not affect the total build time much though, mostly because
towards the end of it there's just several CPU cores finishing
compiling OpenVDB related source files.
Pull Request: https://projects.blender.org/blender/blender/pulls/111046
2023-08-11 11:27:56 +02:00
|
|
|
#include "BLI_string.h"
|
2021-10-05 09:36:11 +02:00
|
|
|
#include "BLI_vector.hh"
|
|
|
|
|
|
|
|
#include "draw_texture_pool.h"
|
|
|
|
|
|
|
|
using namespace blender;
|
|
|
|
|
2021-10-05 20:15:31 +02:00
|
|
|
struct DRWTexturePoolHandle {
|
2021-10-05 09:36:11 +02:00
|
|
|
uint64_t users_bits;
|
|
|
|
GPUTexture *texture;
|
2023-10-02 16:43:48 +02:00
|
|
|
int orphan_cycles;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ReleasedTexture {
|
|
|
|
GPUTexture *texture;
|
|
|
|
int orphan_cycles;
|
|
|
|
|
|
|
|
bool operator==(const ReleasedTexture &other)
|
|
|
|
{
|
|
|
|
return texture == other.texture;
|
|
|
|
}
|
2021-10-05 20:15:31 +02:00
|
|
|
};
|
2021-10-05 09:36:11 +02:00
|
|
|
|
|
|
|
struct DRWTexturePool {
|
|
|
|
Vector<void *, 16> users;
|
|
|
|
Vector<DRWTexturePoolHandle> handles;
|
|
|
|
/* Cache last result to avoid linear search each time. */
|
|
|
|
int last_user_id = -1;
|
2022-03-18 22:32:12 +01:00
|
|
|
|
|
|
|
Vector<GPUTexture *> tmp_tex_acquired;
|
2023-10-02 16:43:48 +02:00
|
|
|
Vector<ReleasedTexture> tmp_tex_released;
|
2021-10-05 09:36:11 +02:00
|
|
|
};
|
|
|
|
|
2021-12-08 06:31:20 +01:00
|
|
|
DRWTexturePool *DRW_texture_pool_create()
|
2021-10-05 09:36:11 +02:00
|
|
|
{
|
|
|
|
return new DRWTexturePool();
|
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_texture_pool_free(DRWTexturePool *pool)
|
|
|
|
{
|
2023-10-02 16:43:48 +02:00
|
|
|
for (DRWTexturePoolHandle &tex : pool->handles) {
|
|
|
|
GPU_TEXTURE_FREE_SAFE(tex.texture);
|
|
|
|
}
|
|
|
|
for (GPUTexture *tex : pool->tmp_tex_acquired) {
|
|
|
|
GPU_texture_free(tex);
|
|
|
|
}
|
|
|
|
for (ReleasedTexture &tex : pool->tmp_tex_released) {
|
|
|
|
GPU_texture_free(tex.texture);
|
|
|
|
}
|
2021-10-05 09:36:11 +02:00
|
|
|
delete pool;
|
|
|
|
}
|
|
|
|
|
2022-12-08 23:30:57 +01:00
|
|
|
GPUTexture *DRW_texture_pool_query(DRWTexturePool *pool,
|
|
|
|
int width,
|
|
|
|
int height,
|
|
|
|
eGPUTextureFormat format,
|
|
|
|
eGPUTextureUsage usage,
|
|
|
|
void *user)
|
2021-10-05 09:36:11 +02:00
|
|
|
{
|
2022-12-17 05:58:30 +01:00
|
|
|
/* Texture pools have an implicit usage as a texture attachment. */
|
2022-12-08 23:30:57 +01:00
|
|
|
BLI_assert_msg(usage & GPU_TEXTURE_USAGE_ATTACHMENT,
|
|
|
|
"Pool textures must be of usage type attachment.");
|
|
|
|
usage = usage | GPU_TEXTURE_USAGE_ATTACHMENT;
|
|
|
|
|
2021-10-05 09:36:11 +02:00
|
|
|
int user_id = pool->last_user_id;
|
|
|
|
/* Try cached value. */
|
|
|
|
if (user_id != -1) {
|
|
|
|
if (pool->users[user_id] != user) {
|
|
|
|
user_id = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Try to find inside previous users. */
|
|
|
|
if (user_id == -1) {
|
|
|
|
user_id = pool->users.first_index_of_try(user);
|
|
|
|
}
|
|
|
|
/* No chance, needs to add it to the user list. */
|
|
|
|
if (user_id == -1) {
|
|
|
|
user_id = pool->users.size();
|
|
|
|
pool->users.append(user);
|
|
|
|
/* If there is more than 63 users, better refactor this system. */
|
|
|
|
BLI_assert(user_id < 64);
|
|
|
|
}
|
|
|
|
pool->last_user_id = user_id;
|
|
|
|
|
|
|
|
uint64_t user_bit = 1llu << user_id;
|
|
|
|
for (DRWTexturePoolHandle &handle : pool->handles) {
|
|
|
|
/* Skip if the user is already using this texture. */
|
|
|
|
if (user_bit & handle.users_bits) {
|
|
|
|
continue;
|
|
|
|
}
|
2021-10-06 00:28:00 +02:00
|
|
|
/* If everything matches reuse the texture. */
|
2021-10-05 09:36:11 +02:00
|
|
|
if ((GPU_texture_format(handle.texture) == format) &&
|
|
|
|
(GPU_texture_width(handle.texture) == width) &&
|
2022-12-08 23:30:57 +01:00
|
|
|
(GPU_texture_height(handle.texture) == height) &&
|
|
|
|
(GPU_texture_usage(handle.texture) == usage))
|
|
|
|
{
|
2021-10-05 09:36:11 +02:00
|
|
|
handle.users_bits |= user_bit;
|
|
|
|
return handle.texture;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
char name[16] = "DRW_tex_pool";
|
|
|
|
if (G.debug & G_DEBUG_GPU) {
|
|
|
|
int texture_id = pool->handles.size();
|
|
|
|
SNPRINTF(name, "DRW_tex_pool_%d", texture_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
DRWTexturePoolHandle handle;
|
|
|
|
handle.users_bits = user_bit;
|
2023-10-02 16:43:48 +02:00
|
|
|
handle.orphan_cycles = 0;
|
2023-02-25 01:52:27 +01:00
|
|
|
handle.texture = GPU_texture_create_2d(name, width, height, 1, format, usage, nullptr);
|
2021-10-05 09:36:11 +02:00
|
|
|
pool->handles.append(handle);
|
|
|
|
/* Doing filtering for depth does not make sense when not doing shadow mapping,
|
|
|
|
* and enabling texture filtering on integer texture make them unreadable. */
|
2023-02-25 01:04:35 +01:00
|
|
|
bool do_filter = !GPU_texture_has_depth_format(handle.texture) &&
|
|
|
|
!GPU_texture_has_integer_format(handle.texture);
|
2021-10-05 09:36:11 +02:00
|
|
|
GPU_texture_filter_mode(handle.texture, do_filter);
|
|
|
|
|
|
|
|
return handle.texture;
|
|
|
|
}
|
|
|
|
|
2022-12-08 23:30:57 +01:00
|
|
|
GPUTexture *DRW_texture_pool_texture_acquire(
|
|
|
|
DRWTexturePool *pool, int width, int height, eGPUTextureFormat format, eGPUTextureUsage usage)
|
2022-03-18 22:32:12 +01:00
|
|
|
{
|
|
|
|
GPUTexture *tmp_tex = nullptr;
|
|
|
|
int64_t found_index = 0;
|
|
|
|
|
|
|
|
auto texture_match = [&](GPUTexture *tex) -> bool {
|
|
|
|
/* TODO(@fclem): We could reuse texture using texture views if the formats are compatible. */
|
|
|
|
return (GPU_texture_format(tex) == format) && (GPU_texture_width(tex) == width) &&
|
2022-12-08 23:30:57 +01:00
|
|
|
(GPU_texture_height(tex) == height) && (GPU_texture_usage(tex) == usage);
|
2022-03-18 22:32:12 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Search released texture first. */
|
|
|
|
for (auto i : pool->tmp_tex_released.index_range()) {
|
2023-10-02 16:43:48 +02:00
|
|
|
if (texture_match(pool->tmp_tex_released[i].texture)) {
|
|
|
|
tmp_tex = pool->tmp_tex_released[i].texture;
|
2022-03-18 22:32:12 +01:00
|
|
|
found_index = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tmp_tex) {
|
|
|
|
pool->tmp_tex_released.remove_and_reorder(found_index);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Create a new texture in last resort. */
|
|
|
|
char name[16] = "DRW_tex_pool";
|
|
|
|
if (G.debug & G_DEBUG_GPU) {
|
|
|
|
int texture_id = pool->handles.size();
|
|
|
|
SNPRINTF(name, "DRW_tex_pool_%d", texture_id);
|
|
|
|
}
|
2023-02-25 01:52:27 +01:00
|
|
|
tmp_tex = GPU_texture_create_2d(name, width, height, 1, format, usage, nullptr);
|
2022-03-18 22:32:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
pool->tmp_tex_acquired.append(tmp_tex);
|
|
|
|
|
|
|
|
return tmp_tex;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_texture_pool_texture_release(DRWTexturePool *pool, GPUTexture *tmp_tex)
|
|
|
|
{
|
|
|
|
pool->tmp_tex_acquired.remove_first_occurrence_and_reorder(tmp_tex);
|
2023-10-02 16:43:48 +02:00
|
|
|
pool->tmp_tex_released.append({tmp_tex, 0});
|
2022-03-18 22:32:12 +01:00
|
|
|
}
|
|
|
|
|
2022-08-04 12:36:10 +02:00
|
|
|
void DRW_texture_pool_take_texture_ownership(DRWTexturePool *pool, GPUTexture *tex)
|
|
|
|
{
|
|
|
|
pool->tmp_tex_acquired.remove_first_occurrence_and_reorder(tex);
|
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_texture_pool_give_texture_ownership(DRWTexturePool *pool, GPUTexture *tex)
|
|
|
|
{
|
|
|
|
pool->tmp_tex_acquired.append(tex);
|
|
|
|
}
|
|
|
|
|
2021-10-05 09:36:11 +02:00
|
|
|
void DRW_texture_pool_reset(DRWTexturePool *pool)
|
|
|
|
{
|
2023-10-10 15:49:28 +02:00
|
|
|
/** Defer deallocation enough cycles to avoid interleaved calls to different DRW_draw/DRW_render
|
|
|
|
* functions causing constant allocation/deallocation (See #113024). */
|
2023-10-02 16:43:48 +02:00
|
|
|
const int max_orphan_cycles = 8;
|
|
|
|
|
2021-10-05 09:36:11 +02:00
|
|
|
pool->last_user_id = -1;
|
|
|
|
|
|
|
|
for (auto it = pool->handles.rbegin(); it != pool->handles.rend(); ++it) {
|
|
|
|
DRWTexturePoolHandle &handle = *it;
|
|
|
|
if (handle.users_bits == 0) {
|
2023-10-02 16:43:48 +02:00
|
|
|
handle.orphan_cycles++;
|
|
|
|
if (handle.texture && handle.orphan_cycles >= max_orphan_cycles) {
|
2021-10-05 09:36:11 +02:00
|
|
|
GPU_texture_free(handle.texture);
|
|
|
|
handle.texture = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
handle.users_bits = 0;
|
2023-10-02 16:43:48 +02:00
|
|
|
handle.orphan_cycles = 0;
|
2021-10-05 09:36:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reverse iteration to make sure we only reorder with known good handles. */
|
|
|
|
for (int i = pool->handles.size() - 1; i >= 0; i--) {
|
|
|
|
if (!pool->handles[i].texture) {
|
|
|
|
pool->handles.remove_and_reorder(i);
|
|
|
|
}
|
|
|
|
}
|
2022-03-18 22:32:12 +01:00
|
|
|
|
2023-03-18 20:54:20 +01:00
|
|
|
BLI_assert_msg(pool->tmp_tex_acquired.is_empty(),
|
|
|
|
"Missing a TextureFromPool.release() before end of draw.");
|
2023-10-02 16:43:48 +02:00
|
|
|
|
|
|
|
for (int i = pool->tmp_tex_released.size() - 1; i >= 0; i--) {
|
|
|
|
ReleasedTexture &tex = pool->tmp_tex_released[i];
|
|
|
|
if (tex.orphan_cycles >= max_orphan_cycles) {
|
|
|
|
GPU_texture_free(tex.texture);
|
|
|
|
pool->tmp_tex_released.remove_and_reorder(i);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
tex.orphan_cycles++;
|
|
|
|
}
|
2022-03-18 22:32:12 +01:00
|
|
|
}
|
2021-10-06 00:28:00 +02:00
|
|
|
}
|