Cleanup: remove unused ID-map undo API
Removing this since it was added for TexFace support which has since been removed.
This commit is contained in:
parent
ae19f68d45
commit
233f78c017
|
@ -195,24 +195,6 @@ void BKE_undosys_foreach_ID_ref(UndoStack *ustack,
|
|||
void *user_data);
|
||||
#endif
|
||||
|
||||
/* Use when the undo step stores many arbitrary pointers. */
|
||||
struct UndoIDPtrMap;
|
||||
struct UndoIDPtrMap *BKE_undosys_ID_map_create(void);
|
||||
void BKE_undosys_ID_map_destroy(struct UndoIDPtrMap *map);
|
||||
void BKE_undosys_ID_map_add(struct UndoIDPtrMap *map, ID *id);
|
||||
struct ID *BKE_undosys_ID_map_lookup(const struct UndoIDPtrMap *map, const struct ID *id_src);
|
||||
|
||||
void BKE_undosys_ID_map_add_with_prev(struct UndoIDPtrMap *map,
|
||||
struct ID *id,
|
||||
struct ID **id_prev);
|
||||
struct ID *BKE_undosys_ID_map_lookup_with_prev(const struct UndoIDPtrMap *map,
|
||||
struct ID *id_src,
|
||||
struct ID *id_prev_match[2]);
|
||||
|
||||
void BKE_undosys_ID_map_foreach_ID_ref(struct UndoIDPtrMap *map,
|
||||
UndoTypeForEachIDRefFn foreach_ID_ref_fn,
|
||||
void *user_data);
|
||||
|
||||
void BKE_undosys_print(UndoStack *ustack);
|
||||
|
||||
#endif /* __BKE_UNDO_SYSTEM_H__ */
|
||||
|
|
|
@ -848,10 +848,6 @@ void BKE_undosys_type_free_all(void)
|
|||
* Unfortunately we need this for a handful of places.
|
||||
*/
|
||||
|
||||
/* Disable for now since it accesses freed memory.
|
||||
* The pointer can only be a key, we can't read it's contents. */
|
||||
#define USE_LIB_SKIP
|
||||
|
||||
static void UNUSED_FUNCTION(BKE_undosys_foreach_ID_ref(UndoStack *ustack,
|
||||
UndoTypeForEachIDRefFn foreach_ID_ref_fn,
|
||||
void *user_data))
|
||||
|
@ -864,176 +860,6 @@ static void UNUSED_FUNCTION(BKE_undosys_foreach_ID_ref(UndoStack *ustack,
|
|||
}
|
||||
}
|
||||
|
||||
typedef struct UndoIDPtrMapItem {
|
||||
/** Never changes (matches undo data). Use as sort key for binary search. */
|
||||
const void *ptr;
|
||||
/** Write the new pointers here. */
|
||||
uint index;
|
||||
} UndoIDPtrMapItem;
|
||||
|
||||
typedef struct UndoIDPtrMap {
|
||||
UndoRefID *refs;
|
||||
/**
|
||||
* Pointer map, update 'dst' members before use.
|
||||
* This is always sorted (adds some overhead when adding, in practice it's acceptable since).
|
||||
*/
|
||||
UndoIDPtrMapItem *pmap;
|
||||
|
||||
/** Length for both 'refs' & 'pmap' */
|
||||
uint len;
|
||||
uint len_alloc;
|
||||
} UndoIDPtrMap;
|
||||
|
||||
#ifdef DEBUG
|
||||
# define PMAP_DEFAULT_ALLOC 1
|
||||
#else
|
||||
# define PMAP_DEFAULT_ALLOC 32
|
||||
#endif
|
||||
|
||||
void BKE_undosys_ID_map_foreach_ID_ref(UndoIDPtrMap *map,
|
||||
UndoTypeForEachIDRefFn foreach_ID_ref_fn,
|
||||
void *user_data)
|
||||
{
|
||||
for (uint i = 0; i < map->len; i++) {
|
||||
foreach_ID_ref_fn(user_data, &map->refs[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true when found, otherwise index is set to the index we should insert.
|
||||
*/
|
||||
static bool undosys_ID_map_lookup_index(const UndoIDPtrMap *map, const void *key, uint *r_index)
|
||||
{
|
||||
const UndoIDPtrMapItem *pmap = map->pmap;
|
||||
const uint len = map->len;
|
||||
if (len == 0) {
|
||||
if (r_index) {
|
||||
*r_index = 0;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
int min = 0, max = len - 1;
|
||||
while (min <= max) {
|
||||
const uint mid = (min + max) / 2;
|
||||
if (pmap[mid].ptr < key) {
|
||||
min = mid + 1;
|
||||
}
|
||||
else if (pmap[mid].ptr == key) {
|
||||
if (r_index) {
|
||||
*r_index = mid;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
else if (pmap[mid].ptr > key) {
|
||||
max = mid - 1;
|
||||
}
|
||||
}
|
||||
if (r_index) {
|
||||
*r_index = min;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* A set of ID's use for efficient decoding, so we can map pointers back to the newly loaded data
|
||||
* without performing full look ups each time.
|
||||
*
|
||||
* This can be used as an old_pointer -> new_pointer lookup.
|
||||
*/
|
||||
UndoIDPtrMap *BKE_undosys_ID_map_create(void)
|
||||
{
|
||||
UndoIDPtrMap *map = MEM_mallocN(sizeof(*map), __func__);
|
||||
map->len_alloc = PMAP_DEFAULT_ALLOC;
|
||||
map->refs = MEM_mallocN(sizeof(*map->refs) * map->len_alloc, __func__);
|
||||
map->pmap = MEM_mallocN(sizeof(*map->pmap) * map->len_alloc, __func__);
|
||||
map->len = 0;
|
||||
return map;
|
||||
}
|
||||
void BKE_undosys_ID_map_destroy(UndoIDPtrMap *idpmap)
|
||||
{
|
||||
MEM_SAFE_FREE(idpmap->refs);
|
||||
MEM_SAFE_FREE(idpmap->pmap);
|
||||
MEM_freeN(idpmap);
|
||||
}
|
||||
|
||||
void BKE_undosys_ID_map_add(UndoIDPtrMap *map, ID *id)
|
||||
{
|
||||
uint index;
|
||||
#ifdef USE_LIB_SKIP
|
||||
if (id->lib != NULL) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (undosys_ID_map_lookup_index(map, id, &index)) {
|
||||
return; /* exists. */
|
||||
}
|
||||
|
||||
const uint len_src = map->len;
|
||||
const uint len_dst = map->len + 1;
|
||||
if (len_dst > map->len_alloc) {
|
||||
map->len_alloc *= 2;
|
||||
BLI_assert(map->len_alloc >= len_dst);
|
||||
map->pmap = MEM_reallocN(map->pmap, sizeof(*map->pmap) * map->len_alloc);
|
||||
map->refs = MEM_reallocN(map->refs, sizeof(*map->refs) * map->len_alloc);
|
||||
}
|
||||
|
||||
#if 0 /* Will be done automatically in callback. */
|
||||
BLI_strncpy(map->refs[len_src].name, id->name, sizeof(id->name));
|
||||
#else
|
||||
map->refs[len_src].name[0] = '\0';
|
||||
#endif
|
||||
map->refs[len_src].ptr = id;
|
||||
|
||||
if (len_src != 0 && index != len_src) {
|
||||
memmove(&map->pmap[index + 1], &map->pmap[index], sizeof(*map->pmap) * (len_src - index));
|
||||
}
|
||||
map->pmap[index].ptr = id;
|
||||
map->pmap[index].index = len_src;
|
||||
|
||||
map->len = len_dst;
|
||||
}
|
||||
|
||||
ID *BKE_undosys_ID_map_lookup(const UndoIDPtrMap *map, const ID *id_src)
|
||||
{
|
||||
/* We should only ever lookup indices which exist! */
|
||||
uint index;
|
||||
if (!undosys_ID_map_lookup_index(map, id_src, &index)) {
|
||||
BLI_assert(0);
|
||||
}
|
||||
index = map->pmap[index].index;
|
||||
ID *id_dst = map->refs[index].ptr;
|
||||
BLI_assert(id_dst != NULL);
|
||||
BLI_assert(STREQ(id_dst->name, map->refs[index].name));
|
||||
return id_dst;
|
||||
}
|
||||
|
||||
void BKE_undosys_ID_map_add_with_prev(UndoIDPtrMap *map, ID *id, ID **id_prev)
|
||||
{
|
||||
if (id == *id_prev) {
|
||||
return;
|
||||
}
|
||||
*id_prev = id;
|
||||
BKE_undosys_ID_map_add(map, id);
|
||||
}
|
||||
|
||||
ID *BKE_undosys_ID_map_lookup_with_prev(const UndoIDPtrMap *map, ID *id_src, ID *id_prev_match[2])
|
||||
{
|
||||
if (id_src == id_prev_match[0]) {
|
||||
return id_prev_match[1];
|
||||
}
|
||||
else {
|
||||
#ifdef USE_LIB_SKIP
|
||||
ID *id_dst = BKE_undosys_ID_map_lookup(map, id_src);
|
||||
#else
|
||||
ID *id_dst = (id_src->lib == NULL) ? BKE_undosys_ID_map_lookup(map, id_src) : id_src;
|
||||
#endif
|
||||
id_prev_match[0] = id_src;
|
||||
id_prev_match[1] = id_dst;
|
||||
return id_dst;
|
||||
}
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
|
|
@ -699,7 +699,6 @@ typedef struct MeshUndoStep_Elem {
|
|||
|
||||
typedef struct MeshUndoStep {
|
||||
UndoStep step;
|
||||
struct UndoIDPtrMap *id_map;
|
||||
MeshUndoStep_Elem *elems;
|
||||
uint elems_len;
|
||||
} MeshUndoStep;
|
||||
|
@ -788,10 +787,6 @@ static void mesh_undosys_step_free(UndoStep *us_p)
|
|||
undomesh_free_data(&elem->data);
|
||||
}
|
||||
MEM_freeN(us->elems);
|
||||
|
||||
if (us->id_map != NULL) {
|
||||
BKE_undosys_ID_map_destroy(us->id_map);
|
||||
}
|
||||
}
|
||||
|
||||
static void mesh_undosys_foreach_ID_ref(UndoStep *us_p,
|
||||
|
@ -804,10 +799,6 @@ static void mesh_undosys_foreach_ID_ref(UndoStep *us_p,
|
|||
MeshUndoStep_Elem *elem = &us->elems[i];
|
||||
foreach_ID_ref_fn(user_data, ((UndoRefID *)&elem->obedit_ref));
|
||||
}
|
||||
|
||||
if (us->id_map != NULL) {
|
||||
BKE_undosys_ID_map_foreach_ID_ref(us->id_map, foreach_ID_ref_fn, user_data);
|
||||
}
|
||||
}
|
||||
|
||||
/* Export for ED_undo_sys. */
|
||||
|
|
Loading…
Reference in New Issue