tornavis/intern/memutil/MEM_CacheLimiter.h

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

318 lines
6.7 KiB
C
Raw Normal View History

/* SPDX-FileCopyrightText: 2006-2022 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup intern_memutil
2011-02-25 12:47:18 +01:00
*/
#ifndef __MEM_CACHELIMITER_H__
#define __MEM_CACHELIMITER_H__
/**
* \section MEM_CacheLimiter
* This class defines a generic memory cache management system
* to limit memory usage to a fixed global maximum.
*
* \note Please use the C-API in MEM_CacheLimiterC-Api.h for code written in C.
*
* Usage example:
*
* \code{.cpp}
* class BigFatImage {
* public:
* ~BigFatImage() { tell_everyone_we_are_gone(this); }
* };
*
* void doit()
* {
* MEM_Cache<BigFatImage> BigFatImages;
*
* MEM_Cache_Handle<BigFatImage>* h = BigFatImages.insert(new BigFatImage);
*
* BigFatImages.enforce_limits();
* h->ref();
*
* // work with image...
*
* h->unref();
*
* // leave image in cache.
* \endcode
*/
#include "MEM_Allocator.h"
#include <list>
#include <queue>
#include <vector>
template<class T> class MEM_CacheLimiter;
#ifndef __MEM_CACHELIMITERC_API_H__
extern "C" {
void MEM_CacheLimiter_set_maximum(size_t m);
size_t MEM_CacheLimiter_get_maximum();
void MEM_CacheLimiter_set_disabled(bool disabled);
bool MEM_CacheLimiter_is_disabled(void);
};
#endif
template<class T> class MEM_CacheLimiterHandle {
public:
explicit MEM_CacheLimiterHandle(T *data_, MEM_CacheLimiter<T> *parent_)
: data(data_), refcount(0), parent(parent_)
{
}
void ref()
{
refcount++;
}
void unref()
{
refcount--;
}
T *get()
{
return data;
}
const T *get() const
{
return data;
}
int get_refcount() const
{
return refcount;
}
bool can_destroy() const
{
return !data || !refcount;
}
bool destroy_if_possible()
{
if (can_destroy()) {
delete data;
2013-03-08 07:32:00 +01:00
data = NULL;
unmanage();
return true;
}
return false;
}
void unmanage()
{
parent->unmanage(this);
}
void touch()
{
parent->touch(this);
}
private:
friend class MEM_CacheLimiter<T>;
T *data;
int refcount;
int pos;
MEM_CacheLimiter<T> *parent;
};
template<class T> class MEM_CacheLimiter {
public:
typedef size_t (*MEM_CacheLimiter_DataSize_Func)(void *data);
typedef int (*MEM_CacheLimiter_ItemPriority_Func)(void *item, int default_priority);
typedef bool (*MEM_CacheLimiter_ItemDestroyable_Func)(void *item);
MEM_CacheLimiter(MEM_CacheLimiter_DataSize_Func data_size_func) : data_size_func(data_size_func)
{
}
~MEM_CacheLimiter()
{
int i;
for (i = 0; i < queue.size(); i++) {
delete queue[i];
}
}
MEM_CacheLimiterHandle<T> *insert(T *elem)
{
queue.push_back(new MEM_CacheLimiterHandle<T>(elem, this));
queue.back()->pos = queue.size() - 1;
return queue.back();
}
void unmanage(MEM_CacheLimiterHandle<T> *handle)
{
int pos = handle->pos;
queue[pos] = queue.back();
queue[pos]->pos = pos;
queue.pop_back();
delete handle;
}
Prefetching for movie clips This commit basically implements frames prefetching for movie clip datablock. Number of frames to be prefetched is controlled in User Preferences, System tab, Prefetch Frames option. Currently prefetching is destructive-less for movie cache, meaning mo frames will be removed from the cache when while prefetching. This is because it's half of simplier to implement, but it also makes sense from tracking point of view -- we could want to playback in both directions and removing frames from behind time cursor is not always a good idea. Anyway, smarter prefetching strategy could be developed later. Some implementation notes: - Added MEM_CacheLimiter_get_memory_in_use function to get memory usage of specified memory limiter. - Fixed prototype of MEM_CacheLimiter_get_maximum which was simply wrong (used wrong data type for output). - Added some utility functions to movie clip and movie cache for direct cache interaction and obtaining cache statistics. - Prefetching is implemented using general jobs system. which is invoking from clip draw function. - Prefetcing will stop as soon other job or playback starts. This is done from performance point of view. Jobs will likely require lots of CPU power and better to provide whole CPU to it. Playback is a bit more complicated case. For jpeg sequence playback prefetching while paying back is nice. But trying to prefetch heavy exr images and doing color space conversion slows down both playback and prefetching. TODO: - Think of better policy of dealing with already cached frames (like when cached frames from other clips prevents frames from current clip to be prefetched) - Currently a bit funky redraw notification happens from prefetch job. Perhaps own ND_ is better to have here. - Hiding clip while prefetch is active in theory shall stop prefetching job. - Having multiple clips opened on file load will prefetch frames for only one of them.
2013-03-20 18:03:20 +01:00
size_t get_memory_in_use()
{
size_t size = 0;
if (data_size_func) {
int i;
for (i = 0; i < queue.size(); i++) {
size += data_size_func(queue[i]->get()->get_data());
}
}
else {
size = MEM_get_memory_in_use();
}
return size;
Prefetching for movie clips This commit basically implements frames prefetching for movie clip datablock. Number of frames to be prefetched is controlled in User Preferences, System tab, Prefetch Frames option. Currently prefetching is destructive-less for movie cache, meaning mo frames will be removed from the cache when while prefetching. This is because it's half of simplier to implement, but it also makes sense from tracking point of view -- we could want to playback in both directions and removing frames from behind time cursor is not always a good idea. Anyway, smarter prefetching strategy could be developed later. Some implementation notes: - Added MEM_CacheLimiter_get_memory_in_use function to get memory usage of specified memory limiter. - Fixed prototype of MEM_CacheLimiter_get_maximum which was simply wrong (used wrong data type for output). - Added some utility functions to movie clip and movie cache for direct cache interaction and obtaining cache statistics. - Prefetching is implemented using general jobs system. which is invoking from clip draw function. - Prefetcing will stop as soon other job or playback starts. This is done from performance point of view. Jobs will likely require lots of CPU power and better to provide whole CPU to it. Playback is a bit more complicated case. For jpeg sequence playback prefetching while paying back is nice. But trying to prefetch heavy exr images and doing color space conversion slows down both playback and prefetching. TODO: - Think of better policy of dealing with already cached frames (like when cached frames from other clips prevents frames from current clip to be prefetched) - Currently a bit funky redraw notification happens from prefetch job. Perhaps own ND_ is better to have here. - Hiding clip while prefetch is active in theory shall stop prefetching job. - Having multiple clips opened on file load will prefetch frames for only one of them.
2013-03-20 18:03:20 +01:00
}
void enforce_limits()
{
size_t max = MEM_CacheLimiter_get_maximum();
bool is_disabled = MEM_CacheLimiter_is_disabled();
size_t mem_in_use, cur_size;
if (is_disabled) {
return;
}
if (max == 0) {
return;
}
Prefetching for movie clips This commit basically implements frames prefetching for movie clip datablock. Number of frames to be prefetched is controlled in User Preferences, System tab, Prefetch Frames option. Currently prefetching is destructive-less for movie cache, meaning mo frames will be removed from the cache when while prefetching. This is because it's half of simplier to implement, but it also makes sense from tracking point of view -- we could want to playback in both directions and removing frames from behind time cursor is not always a good idea. Anyway, smarter prefetching strategy could be developed later. Some implementation notes: - Added MEM_CacheLimiter_get_memory_in_use function to get memory usage of specified memory limiter. - Fixed prototype of MEM_CacheLimiter_get_maximum which was simply wrong (used wrong data type for output). - Added some utility functions to movie clip and movie cache for direct cache interaction and obtaining cache statistics. - Prefetching is implemented using general jobs system. which is invoking from clip draw function. - Prefetcing will stop as soon other job or playback starts. This is done from performance point of view. Jobs will likely require lots of CPU power and better to provide whole CPU to it. Playback is a bit more complicated case. For jpeg sequence playback prefetching while paying back is nice. But trying to prefetch heavy exr images and doing color space conversion slows down both playback and prefetching. TODO: - Think of better policy of dealing with already cached frames (like when cached frames from other clips prevents frames from current clip to be prefetched) - Currently a bit funky redraw notification happens from prefetch job. Perhaps own ND_ is better to have here. - Hiding clip while prefetch is active in theory shall stop prefetching job. - Having multiple clips opened on file load will prefetch frames for only one of them.
2013-03-20 18:03:20 +01:00
mem_in_use = get_memory_in_use();
if (mem_in_use <= max) {
return;
}
while (!queue.empty() && mem_in_use > max) {
MEM_CacheElementPtr elem = get_least_priority_destroyable_element();
2023-09-24 06:52:38 +02:00
if (!elem) {
break;
2023-09-24 06:52:38 +02:00
}
if (data_size_func) {
cur_size = data_size_func(elem->get()->get_data());
2012-09-18 00:34:42 +02:00
}
else {
cur_size = mem_in_use;
}
if (elem->destroy_if_possible()) {
if (data_size_func) {
mem_in_use -= cur_size;
2012-09-18 00:34:42 +02:00
}
else {
mem_in_use -= cur_size - MEM_get_memory_in_use();
}
}
}
}
void touch(MEM_CacheLimiterHandle<T> *handle)
{
/* If we're using custom priority callback re-arranging the queue
* doesn't make much sense because we'll iterate it all to get
* least priority element anyway.
*/
if (item_priority_func == NULL) {
queue[handle->pos] = queue.back();
queue[handle->pos]->pos = handle->pos;
queue.pop_back();
queue.push_back(handle);
handle->pos = queue.size() - 1;
}
}
void set_item_priority_func(MEM_CacheLimiter_ItemPriority_Func item_priority_func)
{
this->item_priority_func = item_priority_func;
}
void set_item_destroyable_func(MEM_CacheLimiter_ItemDestroyable_Func item_destroyable_func)
{
this->item_destroyable_func = item_destroyable_func;
}
private:
typedef MEM_CacheLimiterHandle<T> *MEM_CacheElementPtr;
typedef std::vector<MEM_CacheElementPtr, MEM_Allocator<MEM_CacheElementPtr>> MEM_CacheQueue;
typedef typename MEM_CacheQueue::iterator iterator;
/* Check whether element can be destroyed when enforcing cache limits */
bool can_destroy_element(MEM_CacheElementPtr &elem)
{
if (!elem->can_destroy()) {
/* Element is referenced */
return false;
}
if (item_destroyable_func) {
2023-09-24 06:52:38 +02:00
if (!item_destroyable_func(elem->get()->get_data())) {
return false;
2023-09-24 06:52:38 +02:00
}
}
return true;
}
MEM_CacheElementPtr get_least_priority_destroyable_element(void)
{
2023-09-24 06:52:38 +02:00
if (queue.empty()) {
return NULL;
2023-09-24 06:52:38 +02:00
}
MEM_CacheElementPtr best_match_elem = NULL;
if (!item_priority_func) {
for (iterator it = queue.begin(); it != queue.end(); it++) {
MEM_CacheElementPtr elem = *it;
2023-09-24 06:52:38 +02:00
if (!can_destroy_element(elem)) {
continue;
2023-09-24 06:52:38 +02:00
}
best_match_elem = elem;
break;
}
}
else {
int best_match_priority = 0;
int i;
for (i = 0; i < queue.size(); i++) {
MEM_CacheElementPtr elem = queue[i];
2023-09-24 06:52:38 +02:00
if (!can_destroy_element(elem)) {
continue;
2023-09-24 06:52:38 +02:00
}
/* By default 0 means highest priority element. */
/* Casting a size type to int is questionable,
* but unlikely to cause problems. */
int priority = -((int)(queue.size()) - i - 1);
priority = item_priority_func(elem->get()->get_data(), priority);
if (priority < best_match_priority || best_match_elem == NULL) {
best_match_priority = priority;
best_match_elem = elem;
}
}
}
return best_match_elem;
}
MEM_CacheQueue queue;
MEM_CacheLimiter_DataSize_Func data_size_func;
MEM_CacheLimiter_ItemPriority_Func item_priority_func;
MEM_CacheLimiter_ItemDestroyable_Func item_destroyable_func;
};
#endif // __MEM_CACHELIMITER_H__