tornavis/source/blender/blenkernel/intern/multires_unsubdivide.cc

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1287 lines
44 KiB
C++
Raw Normal View History

/* SPDX-FileCopyrightText: 2020 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup bke
2020-05-01 04:36:11 +02:00
*
* This implements the un-subdivide algorithm, which generates a lower resolution base mesh and
* its corresponding grids to match a given original mesh.
*/
#include "MEM_guardedalloc.h"
#include "DNA_mesh_types.h"
#include "DNA_meshdata_types.h"
#include "DNA_modifier_types.h"
#include "DNA_scene_types.h"
#include "BLI_gsqueue.h"
#include "BLI_math_vector.h"
#include "BKE_customdata.hh"
#include "BKE_lib_id.h"
#include "BKE_mesh.hh"
#include "BKE_mesh_mapping.hh"
#include "BKE_mesh_runtime.hh"
2023-11-14 09:30:40 +01:00
#include "BKE_modifier.hh"
#include "BKE_multires.hh"
#include "BKE_subdiv.hh"
#include "BKE_subsurf.hh"
#include "bmesh.h"
#include "DEG_depsgraph_query.hh"
#include "multires_reshape.hh"
#include "multires_unsubdivide.hh"
/* This is done in the following steps:
2020-05-01 04:36:11 +02:00
*
* - If there are already grids in the original mesh,
* convert them from tangent displacement to object space coordinates.
* - Assign data-layers to the original mesh to map vertices to a new base mesh.
* These data-layers store the indices of the elements in the original mesh.
* This way the original indices are
* preserved when doing mesh modifications (removing and dissolving vertices)
* when building the new base mesh.
* - Try to find a lower resolution base mesh. This is done by flood fill operation that tags the
2020-05-01 04:36:11 +02:00
* center vertices of the lower level grid.
* If the algorithm can tag all vertices correctly,
* the lower level base mesh is generated by dissolving the tagged vertices.
* - Use the data-layers to map vertices from the base mesh to the original mesh and original to
* base mesh.
* - Find two adjacent vertices on the base mesh to a given vertex to map that loop from base mesh
2020-05-01 04:36:11 +02:00
* to original mesh
* - Extract the grid from the original mesh from that loop. If there are no grids in the original
2020-05-01 04:36:11 +02:00
* mesh, build the new grid directly from the vertex coordinates by iterating in a grid pattern
* over them. If there are grids in the original mesh, iterate in a grid pattern over the faces,
* reorder all the coordinates of the grid in that face and copy those coordinates to the new
2020-05-01 04:36:11 +02:00
* base mesh grid.
* - Copy the new grid data over to a new allocated MDISP layer with the appropriate size to store
* the new levels.
* - Convert the grid data from object space to tangent displacement.
*/
2020-05-01 04:36:11 +02:00
/**
* Used to check if a vertex is in a disconnected element ID.
*/
static bool is_vertex_in_id(BMVert *v, const int *elem_id, int elem)
{
const int v_index = BM_elem_index_get(v);
return elem_id[v_index] == elem;
}
static bool is_vertex_pole_three(BMVert *v)
{
return !BM_vert_is_boundary(v) && (BM_vert_edge_count(v) == 3);
}
static bool is_vertex_pole(BMVert *v)
{
return !BM_vert_is_boundary(v) && (BM_vert_edge_count(v) == 3 || BM_vert_edge_count(v) >= 5);
}
2020-05-01 04:36:11 +02:00
/**
* Returns the first pole that is found in an element ID.
*
* Tries to give priority to 3 vert poles as they generally generate better results in cases were
* the un-subdivide solution is ambiguous.
*/
static BMVert *unsubdivide_find_any_pole(BMesh *bm, int *elem_id, int elem)
{
BMIter iter;
BMVert *v;
BMVert *pole = nullptr;
BM_ITER_MESH (v, &iter, bm, BM_VERTS_OF_MESH) {
if (is_vertex_in_id(v, elem_id, elem) && is_vertex_pole_three(v)) {
return v;
}
if (is_vertex_in_id(v, elem_id, elem) && is_vertex_pole(v)) {
pole = v;
}
}
return pole;
}
2020-05-01 04:36:11 +02:00
/**
* Checks if the mesh is all quads.
*
* TODO(pablodp606): This can perform additional checks if they are faster than trying to search
* for an un-subdivide solution. This way it is possible to cancel the operation faster.
*/
static bool unsubdivide_is_all_quads(BMesh *bm)
{
BMIter iter;
BMIter iter_a;
BMFace *f;
BMVert *v;
int count = 0;
if (bm->totface < 3) {
return false;
}
BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) {
count = 0;
BM_ITER_ELEM (v, &iter_a, f, BM_VERTS_OF_FACE) {
count++;
}
if (count != 4) {
return false;
}
}
BM_ITER_MESH (v, &iter, bm, BM_VERTS_OF_MESH) {
if (BM_vert_is_wire(v)) {
return false;
}
if (BM_vert_edge_count(v) == 0) {
return false;
}
}
return true;
}
2020-05-01 04:36:11 +02:00
/**
* Returns true if from_v and to_v, which should be part of the same quad face, are diagonals.
*/
static bool is_vertex_diagonal(BMVert *from_v, BMVert *to_v)
{
return !BM_edge_exists(from_v, to_v);
}
2020-05-01 04:36:11 +02:00
/**
* Generates a possible solution for un-subdivision by tagging the (0,0)
* vertices of the possible grids.
*
* This works using a flood fill operation using the quads diagonals to jump to the next vertex.
*
* If initial_vertex is part of the base mesh solution, the flood fill should tag only the (0.0)
* vertices of the grids that need to be dissolved, and nothing else.
*/
static void unsubdivide_face_center_vertex_tag(BMesh *bm, BMVert *initial_vertex)
{
bool *visited_verts = static_cast<bool *>(
MEM_calloc_arrayN(bm->totvert, sizeof(bool), "visited vertices"));
GSQueue *queue;
queue = BLI_gsqueue_new(sizeof(BMVert *));
/* Add and tag the vertices connected by a diagonal to initial_vertex to the flood fill queue. If
* initial_vertex is a pole and there is a valid solution, those vertices should be the (0,0) of
* the grids for the loops of initial_vertex. */
BMIter iter;
BMIter iter_a;
BMFace *f;
BMVert *neighbor_v;
BM_ITER_ELEM (f, &iter, initial_vertex, BM_FACES_OF_VERT) {
BM_ITER_ELEM (neighbor_v, &iter_a, f, BM_VERTS_OF_FACE) {
int neighbor_vertex_index = BM_elem_index_get(neighbor_v);
if (neighbor_v != initial_vertex && is_vertex_diagonal(neighbor_v, initial_vertex)) {
BLI_gsqueue_push(queue, &neighbor_v);
visited_verts[neighbor_vertex_index] = true;
BM_elem_flag_set(neighbor_v, BM_ELEM_TAG, true);
}
}
}
/* Repeat a similar operation for all vertices in the queue. */
/* In this case, add to the queue the vertices connected by 2 steps using the diagonals in any
2021-02-05 06:23:34 +01:00
* direction. If a solution exists and `initial_vertex` was a pole, this is guaranteed that will
* tag all the (0,0) vertices of the grids, and nothing else. */
/* If it was not a pole, it may or may not find a solution, even if the solution exists. */
while (!BLI_gsqueue_is_empty(queue)) {
BMVert *from_v;
BLI_gsqueue_pop(queue, &from_v);
/* Get the diagonals (first connected step) */
GSQueue *diagonals;
diagonals = BLI_gsqueue_new(sizeof(BMVert *));
BM_ITER_ELEM (f, &iter, from_v, BM_FACES_OF_VERT) {
BM_ITER_ELEM (neighbor_v, &iter_a, f, BM_VERTS_OF_FACE) {
if (neighbor_v != from_v && is_vertex_diagonal(neighbor_v, from_v)) {
BLI_gsqueue_push(diagonals, &neighbor_v);
}
}
}
/* Do the second connected step. This vertices are the ones that are added to the flood fill
* queue. */
while (!BLI_gsqueue_is_empty(diagonals)) {
BMVert *diagonal_v;
BLI_gsqueue_pop(diagonals, &diagonal_v);
BM_ITER_ELEM (f, &iter, diagonal_v, BM_FACES_OF_VERT) {
BM_ITER_ELEM (neighbor_v, &iter_a, f, BM_VERTS_OF_FACE) {
int neighbor_vertex_index = BM_elem_index_get(neighbor_v);
if (!visited_verts[neighbor_vertex_index] && neighbor_v != diagonal_v &&
is_vertex_diagonal(neighbor_v, diagonal_v))
{
BLI_gsqueue_push(queue, &neighbor_v);
visited_verts[neighbor_vertex_index] = true;
BM_elem_flag_set(neighbor_v, BM_ELEM_TAG, true);
}
}
}
}
BLI_gsqueue_free(diagonals);
}
BLI_gsqueue_free(queue);
MEM_freeN(visited_verts);
}
2020-05-01 04:36:11 +02:00
/**
* This function checks if the current status of the #BMVert tags
* corresponds to a valid un-subdivide solution.
*
* This means that all vertices corresponding to the (0,0) grid coordinate should be tagged.
*
* On a valid solution, the following things should happen:
* - No boundary vertices should be tagged
* - No vertices connected by an edge or a quad diagonal to a tagged vertex should be tagged
* - All boundary vertices should have one vertex connected by an edge or a diagonal tagged
*/
static bool unsubdivide_is_center_vertex_tag_valid(BMesh *bm, int *elem_id, int elem)
{
BMVert *v, *neighbor_v;
BMIter iter, iter_a, iter_b;
BMFace *f;
BM_ITER_MESH (v, &iter, bm, BM_VERTS_OF_MESH) {
if (is_vertex_in_id(v, elem_id, elem)) {
if (BM_elem_flag_test(v, BM_ELEM_TAG)) {
/* Tagged vertex in boundary */
if (BM_vert_is_boundary(v)) {
return false;
}
/* Tagged vertex with connected tagged vertex. */
BM_ITER_ELEM (f, &iter_a, v, BM_FACES_OF_VERT) {
BM_ITER_ELEM (neighbor_v, &iter_b, f, BM_VERTS_OF_FACE) {
if (neighbor_v != v && BM_elem_flag_test(neighbor_v, BM_ELEM_TAG)) {
return false;
}
}
}
}
if (BM_vert_is_boundary(v)) {
2020-05-01 04:36:11 +02:00
/* Un-tagged vertex in boundary without connected tagged vertices. */
bool any_tagged = false;
BM_ITER_ELEM (f, &iter_a, v, BM_FACES_OF_VERT) {
BM_ITER_ELEM (neighbor_v, &iter_b, f, BM_VERTS_OF_FACE) {
if (neighbor_v != v && BM_elem_flag_test(neighbor_v, BM_ELEM_TAG)) {
any_tagged = true;
}
}
}
if (!any_tagged) {
return false;
}
}
}
}
return true;
}
2020-05-01 04:36:11 +02:00
/**
* Search and validates an un-subdivide solution for a given element ID.
*/
static bool unsubdivide_tag_disconnected_mesh_element(BMesh *bm, int *elem_id, int elem)
{
2020-05-01 04:36:11 +02:00
/* First, get vertex candidates to try to generate possible un-subdivide solution. */
/* Find a vertex pole. If there is a solution on an all quad base mesh, this vertex should be
2020-05-01 04:36:11 +02:00
* part of the base mesh. If it isn't, then there is no solution. */
GSQueue *initial_vertex = BLI_gsqueue_new(sizeof(BMVert *));
BMVert *initial_vertex_pole = unsubdivide_find_any_pole(bm, elem_id, elem);
if (initial_vertex_pole != nullptr) {
BLI_gsqueue_push(initial_vertex, &initial_vertex_pole);
}
/* Also try from the different 4 vertices of a quad in the current
* disconnected element ID. If a solution exists the search should return a valid solution from
* one of these vertices. */
BMFace *f, *init_face = nullptr;
BMVert *v;
BMIter iter_a, iter_b;
BM_ITER_MESH (f, &iter_a, bm, BM_FACES_OF_MESH) {
BM_ITER_ELEM (v, &iter_b, f, BM_VERTS_OF_FACE) {
if (is_vertex_in_id(v, elem_id, elem)) {
init_face = f;
break;
}
}
if (init_face != nullptr) {
break;
}
}
BM_ITER_ELEM (v, &iter_a, init_face, BM_VERTS_OF_FACE) {
BLI_gsqueue_push(initial_vertex, &v);
}
bool valid_tag_found = false;
/* Check all vertex candidates to a solution. */
while (!BLI_gsqueue_is_empty(initial_vertex)) {
BMVert *iv;
BLI_gsqueue_pop(initial_vertex, &iv);
/* Generate a possible solution. */
unsubdivide_face_center_vertex_tag(bm, iv);
/* Check if the solution is valid. If it is, stop searching. */
if (unsubdivide_is_center_vertex_tag_valid(bm, elem_id, elem)) {
valid_tag_found = true;
break;
}
/* If the solution is not valid, reset the state of all tags in this disconnected element ID
* and try again. */
BMVert *v_reset;
BMIter iter;
BM_ITER_MESH (v_reset, &iter, bm, BM_VERTS_OF_MESH) {
if (is_vertex_in_id(v_reset, elem_id, elem)) {
BM_elem_flag_set(v_reset, BM_ELEM_TAG, false);
}
}
}
BLI_gsqueue_free(initial_vertex);
return valid_tag_found;
}
2020-05-01 04:36:11 +02:00
/**
* Uses a flood fill operation to generate a different ID for each disconnected mesh element.
*/
static int unsubdivide_init_elem_ids(BMesh *bm, int *elem_id)
{
bool *visited_verts = static_cast<bool *>(
MEM_calloc_arrayN(bm->totvert, sizeof(bool), "visited vertices"));
int current_id = 0;
for (int i = 0; i < bm->totvert; i++) {
if (!visited_verts[i]) {
GSQueue *queue;
queue = BLI_gsqueue_new(sizeof(BMVert *));
visited_verts[i] = true;
elem_id[i] = current_id;
BMVert *iv = BM_vert_at_index(bm, i);
BLI_gsqueue_push(queue, &iv);
while (!BLI_gsqueue_is_empty(queue)) {
BMIter iter;
BMVert *current_v, *neighbor_v;
BMEdge *ed;
BLI_gsqueue_pop(queue, &current_v);
BM_ITER_ELEM (ed, &iter, current_v, BM_EDGES_OF_VERT) {
neighbor_v = BM_edge_other_vert(ed, current_v);
const int neighbor_index = BM_elem_index_get(neighbor_v);
if (!visited_verts[neighbor_index]) {
visited_verts[neighbor_index] = true;
elem_id[neighbor_index] = current_id;
BLI_gsqueue_push(queue, &neighbor_v);
}
}
}
current_id++;
BLI_gsqueue_free(queue);
}
}
MEM_freeN(visited_verts);
return current_id;
}
2020-05-01 04:36:11 +02:00
/**
* Builds a base mesh one subdivision level down from the current original mesh if the original
* mesh has a valid solution stored in the #BMVert tags.
*/
static void unsubdivide_build_base_mesh_from_tags(BMesh *bm)
{
BMVert *v;
BMIter iter;
/* Stores the vertices which correspond to (1, 0) and (0, 1) of the grids in the select flag. */
BM_mesh_elem_hflag_enable_all(bm, BM_VERT | BM_EDGE | BM_FACE, BM_ELEM_SELECT, false);
BMVert *v_neighbor;
BMIter iter_a;
BMEdge *ed;
BM_ITER_MESH (v, &iter, bm, BM_VERTS_OF_MESH) {
BM_ITER_ELEM (ed, &iter_a, v, BM_EDGES_OF_VERT) {
v_neighbor = BM_edge_other_vert(ed, v);
if (BM_elem_flag_test(v_neighbor, BM_ELEM_TAG)) {
BM_elem_flag_set(v, BM_ELEM_SELECT, false);
}
}
}
/* Dissolves the (0,0) vertices of the grids. */
BMO_op_callf(bm,
(BMO_FLAG_DEFAULTS & ~BMO_FLAG_RESPECT_HIDE),
"dissolve_verts verts=%hv use_face_split=%b use_boundary_tear=%b",
BM_ELEM_TAG,
false,
true);
BM_mesh_elem_hflag_disable_all(bm, BM_VERT | BM_EDGE | BM_FACE, BM_ELEM_TAG, false);
/* Copy the select flag to the tag flag. */
BM_ITER_MESH (v, &iter, bm, BM_VERTS_OF_MESH) {
if (!BM_elem_flag_test(v, BM_ELEM_SELECT)) {
BM_elem_flag_set(v, BM_ELEM_TAG, true);
}
}
/* Dissolves the (1,0) and (0,1) vertices of the grids. */
BMO_op_callf(bm,
(BMO_FLAG_DEFAULTS & ~BMO_FLAG_RESPECT_HIDE),
"dissolve_verts verts=%hv use_face_split=%b use_boundary_tear=%b",
BM_ELEM_TAG,
false,
true);
}
2020-05-01 04:36:11 +02:00
/**
* Main function to get a base mesh one level down from the current original mesh if it exists.
*
* This searches for different un-subdivide solutions and stores them as a combination of #BMVert
* flags for each disconnected mesh element.
*
* If the solution for all elements are valid, it builds a new base mesh based on those tags by
* dissolving and merging vertices.
*/
static bool multires_unsubdivide_single_level(BMesh *bm)
{
2020-05-01 04:36:11 +02:00
/* Do a first check to make sure that it makes sense to search for un-subdivision in this mesh.
*/
if (!unsubdivide_is_all_quads(bm)) {
return false;
};
2020-05-01 04:36:11 +02:00
/* Initialize the vertex table. */
BM_mesh_elem_table_init(bm, BM_VERT);
BM_mesh_elem_table_ensure(bm, BM_VERT);
2020-05-01 04:36:11 +02:00
/* Build disconnected elements IDs. Each disconnected mesh element is evaluated separately. */
int *elem_id = static_cast<int *>(MEM_calloc_arrayN(bm->totvert, sizeof(int), " ELEM ID"));
const int tot_ids = unsubdivide_init_elem_ids(bm, elem_id);
bool valid_tag_found = true;
2020-05-01 04:36:11 +02:00
/* Reset the #BMesh flags as they are used to store data during the un-subdivide process. */
BM_mesh_elem_hflag_disable_all(bm, BM_VERT | BM_EDGE | BM_FACE, BM_ELEM_TAG, false);
BM_mesh_elem_hflag_disable_all(bm, BM_VERT | BM_EDGE | BM_FACE, BM_ELEM_SELECT, false);
2020-05-01 04:36:11 +02:00
/* For each disconnected mesh element ID, search if an un-subdivide solution is possible. The
* whole un-subdivide process fails if a single disconnected mesh element fails. */
for (int id = 0; id < tot_ids; id++) {
2020-05-01 04:36:11 +02:00
/* Try to the #BMesh vertex flag tags corresponding to an un-subdivide solution. */
if (!unsubdivide_tag_disconnected_mesh_element(bm, elem_id, id)) {
valid_tag_found = false;
break;
}
}
/* If a solution was found for all elements IDs, build the new base mesh using the solution
* stored in the BMVert tags. */
if (valid_tag_found) {
unsubdivide_build_base_mesh_from_tags(bm);
}
MEM_freeN(elem_id);
return valid_tag_found;
}
2020-05-01 04:36:11 +02:00
/**
* Returns the next edge and vertex in the direction of a given edge.
*/
static BMEdge *edge_step(BMVert *v, BMEdge *edge, BMVert **r_next_vertex)
{
BMIter iter;
BMEdge *test_edge;
if (edge == nullptr) {
(*r_next_vertex) = v;
return edge;
}
(*r_next_vertex) = BM_edge_other_vert(edge, v);
BM_ITER_ELEM (test_edge, &iter, (*r_next_vertex), BM_EDGES_OF_VERT) {
if (!BM_edge_share_quad_check(test_edge, edge)) {
return test_edge;
}
}
return nullptr;
}
static BMFace *face_step(BMEdge *edge, BMFace *f)
{
BMIter iter;
BMFace *face_iter;
BM_ITER_ELEM (face_iter, &iter, edge, BM_FACES_OF_EDGE) {
if (BM_face_share_edge_check(face_iter, f)) {
return face_iter;
}
}
return f;
}
2020-05-01 04:36:11 +02:00
/**
* Returns the other edge which belongs to the face f which is different from edge_x and shares
* initial_vertex.
*/
static BMEdge *get_initial_edge_y(BMFace *f, BMEdge *edge_x, BMVert *initial_vertex)
{
BMIter iter;
BMEdge *test_edge;
BM_ITER_ELEM (test_edge, &iter, f, BM_EDGES_OF_FACE) {
if (edge_x != test_edge) {
if (test_edge->v1 != initial_vertex && test_edge->v2 == initial_vertex) {
return test_edge;
}
if (test_edge->v2 != initial_vertex && test_edge->v1 == initial_vertex) {
return test_edge;
}
}
}
return nullptr;
}
2020-05-01 04:36:11 +02:00
/**
* Writes the current mdisp data into the corresponding area of quad face giving its corner's loop.
*/
static void write_loop_in_face_grid(
float (*face_grid)[3], MDisps *mdisp, int face_grid_size, int orig_grid_size, int loop)
{
int origin[2];
int step_x[2];
int step_y[2];
const int grid_offset = orig_grid_size - 1;
origin[0] = grid_offset;
origin[1] = grid_offset;
switch (loop) {
case 0:
step_x[0] = -1;
step_x[1] = 0;
step_y[0] = 0;
step_y[1] = -1;
break;
case 1:
step_x[0] = 0;
step_x[1] = 1;
step_y[0] = -1;
step_y[1] = -0;
break;
case 2:
step_x[0] = 1;
step_x[1] = 0;
step_y[0] = 0;
step_y[1] = 1;
break;
case 3:
step_x[0] = 0;
step_x[1] = -1;
step_y[0] = 1;
step_y[1] = 0;
break;
default:
BLI_assert_msg(0, "Should never happen");
break;
}
for (int y = 0; y < orig_grid_size; y++) {
for (int x = 0; x < orig_grid_size; x++) {
const int remap_x = origin[1] + (step_x[1] * x) + (step_y[1] * y);
const int remap_y = origin[0] + (step_x[0] * x) + (step_y[0] * y);
const int final_index = remap_x + remap_y * face_grid_size;
copy_v3_v3(face_grid[final_index], mdisp->disps[x + y * orig_grid_size]);
}
}
}
2020-05-01 04:36:11 +02:00
/**
* Writes a buffer containing the 4 grids in the correct orientation of the 4 loops of a face into
* the main #MultiresUnsubdivideGrid that is being extracted.
*/
static void write_face_grid_in_unsubdivide_grid(MultiresUnsubdivideGrid *grid,
float (*face_grid)[3],
int face_grid_size,
int gunsub_x,
int gunsub_y)
{
const int grid_it = face_grid_size - 1;
for (int y = 0; y < face_grid_size; y++) {
for (int x = 0; x < face_grid_size; x++) {
const int remap_x = (grid_it * gunsub_x) + x;
const int remap_y = (grid_it * gunsub_y) + y;
const int remap_index_y = grid->grid_size - remap_x - 1;
const int remap_index_x = grid->grid_size - remap_y - 1;
const int grid_index = remap_index_x + (remap_index_y * grid->grid_size);
copy_v3_v3(grid->grid_co[grid_index], face_grid[x + y * face_grid_size]);
}
}
}
2020-05-01 04:36:11 +02:00
/**
* Stores the data from the mdisps grids of the loops of the face f
* into the new grid for the new base mesh.
*
* Used when there are already grids in the original mesh.
*/
static void store_grid_data(MultiresUnsubdivideContext *context,
MultiresUnsubdivideGrid *grid,
BMVert *v,
BMFace *f,
int grid_x,
int grid_y)
{
Mesh *original_mesh = context->original_mesh;
const blender::OffsetIndices faces = original_mesh->faces();
Mesh: Replace MLoop struct with generic attributes Implements #102359. Split the `MLoop` struct into two separate integer arrays called `corner_verts` and `corner_edges`, referring to the vertex each corner is attached to and the next edge around the face at each corner. These arrays can be sliced to give access to the edges or vertices in a face. Then they are often referred to as "poly_verts" or "poly_edges". The main benefits are halving the necessary memory bandwidth when only one array is used and simplifications from using regular integer indices instead of a special-purpose struct. The commit also starts a renaming from "loop" to "corner" in mesh code. Like the other mesh struct of array refactors, forward compatibility is kept by writing files with the older format. This will be done until 4.0 to ease the transition process. Looking at a small portion of the patch should give a good impression for the rest of the changes. I tried to make the changes as small as possible so it's easy to tell the correctness from the diff. Though I found Blender developers have been very inventive over the last decade when finding different ways to loop over the corners in a face. For performance, nearly every piece of code that deals with `Mesh` is slightly impacted. Any algorithm that is memory bottle-necked should see an improvement. For example, here is a comparison of interpolating a vertex float attribute to face corners (Ryzen 3700x): **Before** (Average: 3.7 ms, Min: 3.4 ms) ``` threading::parallel_for(loops.index_range(), 4096, [&](IndexRange range) { for (const int64_t i : range) { dst[i] = src[loops[i].v]; } }); ``` **After** (Average: 2.9 ms, Min: 2.6 ms) ``` array_utils::gather(src, corner_verts, dst); ``` That's an improvement of 28% to the average timings, and it's also a simplification, since an index-based routine can be used instead. For more examples using the new arrays, see the design task. Pull Request: https://projects.blender.org/blender/blender/pulls/104424
2023-03-20 15:55:13 +01:00
const blender::Span<int> corner_verts = original_mesh->corner_verts();
const blender::IndexRange face = faces[BM_elem_index_get(f)];
const int corner_vertex_index = BM_elem_index_get(v);
/* Calculates an offset to write the grids correctly oriented in the main
2020-05-01 04:36:11 +02:00
* #MultiresUnsubdivideGrid. */
int loop_offset = 0;
for (int i = 0; i < face.size(); i++) {
const int loop_index = face[i];
Mesh: Replace MLoop struct with generic attributes Implements #102359. Split the `MLoop` struct into two separate integer arrays called `corner_verts` and `corner_edges`, referring to the vertex each corner is attached to and the next edge around the face at each corner. These arrays can be sliced to give access to the edges or vertices in a face. Then they are often referred to as "poly_verts" or "poly_edges". The main benefits are halving the necessary memory bandwidth when only one array is used and simplifications from using regular integer indices instead of a special-purpose struct. The commit also starts a renaming from "loop" to "corner" in mesh code. Like the other mesh struct of array refactors, forward compatibility is kept by writing files with the older format. This will be done until 4.0 to ease the transition process. Looking at a small portion of the patch should give a good impression for the rest of the changes. I tried to make the changes as small as possible so it's easy to tell the correctness from the diff. Though I found Blender developers have been very inventive over the last decade when finding different ways to loop over the corners in a face. For performance, nearly every piece of code that deals with `Mesh` is slightly impacted. Any algorithm that is memory bottle-necked should see an improvement. For example, here is a comparison of interpolating a vertex float attribute to face corners (Ryzen 3700x): **Before** (Average: 3.7 ms, Min: 3.4 ms) ``` threading::parallel_for(loops.index_range(), 4096, [&](IndexRange range) { for (const int64_t i : range) { dst[i] = src[loops[i].v]; } }); ``` **After** (Average: 2.9 ms, Min: 2.6 ms) ``` array_utils::gather(src, corner_verts, dst); ``` That's an improvement of 28% to the average timings, and it's also a simplification, since an index-based routine can be used instead. For more examples using the new arrays, see the design task. Pull Request: https://projects.blender.org/blender/blender/pulls/104424
2023-03-20 15:55:13 +01:00
if (corner_verts[loop_index] == corner_vertex_index) {
loop_offset = i;
break;
}
}
/* Write the 4 grids of the current quad with the right orientation into the face_grid buffer. */
const int grid_size = BKE_ccg_gridsize(context->num_original_levels);
const int face_grid_size = BKE_ccg_gridsize(context->num_original_levels + 1);
const int face_grid_area = face_grid_size * face_grid_size;
float(*face_grid)[3] = static_cast<float(*)[3]>(
MEM_calloc_arrayN(face_grid_area, sizeof(float[3]), "face_grid"));
for (int i = 0; i < face.size(); i++) {
const int loop_index = face[i];
MDisps *mdisp = &context->original_mdisp[loop_index];
int quad_loop = i - loop_offset;
if (quad_loop < 0) {
quad_loop += 4;
}
if (quad_loop >= 4) {
quad_loop -= 4;
}
write_loop_in_face_grid(face_grid, mdisp, face_grid_size, grid_size, quad_loop);
}
2020-05-01 04:36:11 +02:00
/* Write the face_grid buffer in the correct position in the #MultiresUnsubdivideGrids that is
* being extracted. */
write_face_grid_in_unsubdivide_grid(grid, face_grid, face_grid_size, grid_x, grid_y);
MEM_freeN(face_grid);
}
2020-05-01 04:36:11 +02:00
/**
* Stores the data into the new grid from a #BMVert.
* Used when there are no grids in the original mesh.
*/
static void store_vertex_data(MultiresUnsubdivideGrid *grid, BMVert *v, int grid_x, int grid_y)
{
const int remap_index_y = grid->grid_size - 1 - grid_x;
const int remap_index_x = grid->grid_size - 1 - grid_y;
const int grid_index = remap_index_x + (remap_index_y * grid->grid_size);
copy_v3_v3(grid->grid_co[grid_index], v->co);
}
2020-05-01 04:36:11 +02:00
/**
* Main function to extract data from the original bmesh and MDISPS as grids for the new base mesh.
*/
static void multires_unsubdivide_extract_single_grid_from_face_edge(
MultiresUnsubdivideContext *context,
BMFace *f1,
BMEdge *e1,
bool flip_grid,
MultiresUnsubdivideGrid *grid)
{
BMVert *initial_vertex;
BMEdge *initial_edge_x;
BMEdge *initial_edge_y;
const int grid_size = BKE_ccg_gridsize(context->num_new_levels);
const int unsubdiv_grid_size = grid->grid_size = BKE_ccg_gridsize(context->num_total_levels);
grid->grid_size = unsubdiv_grid_size;
grid->grid_co = static_cast<float(*)[3]>(MEM_calloc_arrayN(
unsubdiv_grid_size * unsubdiv_grid_size, sizeof(float[3]), "grids coordinates"));
/* Get the vertex on the corner of the grid. This vertex was tagged previously as it also exist
* on the base mesh. */
initial_edge_x = e1;
if (BM_elem_flag_test(initial_edge_x->v1, BM_ELEM_TAG)) {
initial_vertex = initial_edge_x->v1;
}
else {
initial_vertex = initial_edge_x->v2;
}
/* From that vertex, get the edge that defines the grid Y axis for extraction. */
initial_edge_y = get_initial_edge_y(f1, initial_edge_x, initial_vertex);
if (flip_grid) {
BMEdge *edge_temp;
edge_temp = initial_edge_x;
initial_edge_x = initial_edge_y;
initial_edge_y = edge_temp;
}
int grid_x = 0;
int grid_y = 0;
BMVert *current_vertex_x = initial_vertex;
BMEdge *edge_x = initial_edge_x;
BMVert *current_vertex_y = initial_vertex;
BMEdge *edge_y = initial_edge_y;
BMEdge *prev_edge_y = initial_edge_y;
BMFace *current_face = f1;
BMFace *grid_face = f1;
/* If the data is going to be extracted from the already existing grids, there is no need to go
* to the last vertex of the iteration as that coordinate is also included in the grids
* corresponding to the loop of the face of the previous iteration. */
int grid_iteration_max_steps = grid_size;
if (context->num_original_levels > 0) {
grid_iteration_max_steps = grid_size - 1;
}
/* Iterate over the mesh vertices in a grid pattern using the axis defined by the two initial
* edges. */
while (grid_y < grid_iteration_max_steps) {
grid_face = current_face;
while (grid_x < grid_iteration_max_steps) {
if (context->num_original_levels == 0) {
/* If there were no grids on the original mesh, extract the data directly from the
* vertices. */
store_vertex_data(grid, current_vertex_x, grid_x, grid_y);
edge_x = edge_step(current_vertex_x, edge_x, &current_vertex_x);
}
else {
2020-05-01 04:36:11 +02:00
/* If there were grids in the original mesh, extract the data from the grids and iterate
* over the faces. */
store_grid_data(context, grid, current_vertex_x, grid_face, grid_x, grid_y);
edge_x = edge_step(current_vertex_x, edge_x, &current_vertex_x);
grid_face = face_step(edge_x, grid_face);
}
grid_x++;
}
grid_x = 0;
edge_y = edge_step(current_vertex_y, edge_y, &current_vertex_y);
current_vertex_x = current_vertex_y;
/* Get the next edge_x to extract the next row of the grid. This needs to be done because there
* may be two edges connected to current_vertex_x that belong to two different grids. */
BMIter iter;
BMEdge *ed;
BMFace *f;
BM_ITER_ELEM (ed, &iter, current_vertex_x, BM_EDGES_OF_VERT) {
if (ed != prev_edge_y && BM_edge_in_face(ed, current_face)) {
edge_x = ed;
break;
}
}
BM_ITER_ELEM (f, &iter, edge_x, BM_FACES_OF_EDGE) {
if (f != current_face) {
current_face = f;
break;
}
}
prev_edge_y = edge_y;
grid_y++;
}
}
2020-05-01 04:36:11 +02:00
/**
* Returns the l+1 and l-1 vertices of the base mesh face were the grid from the face f1 and edge
2020-05-01 04:36:11 +02:00
* e1 is going to be extracted.
*
* These vertices should always have an corresponding existing vertex on the base mesh.
*/
static void multires_unsubdivide_get_grid_corners_on_base_mesh(BMFace *f1,
BMEdge *e1,
BMVert **r_corner_x,
BMVert **r_corner_y)
{
BMVert *initial_vertex;
BMEdge *initial_edge_x;
BMEdge *initial_edge_y;
initial_edge_x = e1;
if (BM_elem_flag_test(initial_edge_x->v1, BM_ELEM_TAG)) {
initial_vertex = initial_edge_x->v1;
}
else {
initial_vertex = initial_edge_x->v2;
}
/* From that vertex, get the edge that defines the grid Y axis for extraction. */
initial_edge_y = get_initial_edge_y(f1, initial_edge_x, initial_vertex);
BMVert *current_vertex_x = initial_vertex;
BMEdge *edge_x = initial_edge_x;
BMVert *current_vertex_y = initial_vertex;
BMEdge *edge_y = initial_edge_y;
/* Do an edge step until it finds a tagged vertex, which is part of the base mesh. */
/* x axis */
edge_x = edge_step(current_vertex_x, edge_x, &current_vertex_x);
while (!BM_elem_flag_test(current_vertex_x, BM_ELEM_TAG)) {
edge_x = edge_step(current_vertex_x, edge_x, &current_vertex_x);
}
(*r_corner_x) = current_vertex_x;
/* Same for y axis */
edge_y = edge_step(current_vertex_y, edge_y, &current_vertex_y);
while (!BM_elem_flag_test(current_vertex_y, BM_ELEM_TAG)) {
edge_y = edge_step(current_vertex_y, edge_y, &current_vertex_y);
}
(*r_corner_y) = current_vertex_y;
}
static BMesh *get_bmesh_from_mesh(Mesh *mesh)
{
const BMAllocTemplate allocsize = BMALLOC_TEMPLATE_FROM_ME(mesh);
BMeshCreateParams bm_create_params{};
bm_create_params.use_toolflags = true;
BMesh *bm = BM_mesh_create(&allocsize, &bm_create_params);
BMeshFromMeshParams bm_from_me_params{};
bm_from_me_params.calc_face_normal = true;
bm_from_me_params.calc_vert_normal = true;
BM_mesh_bm_from_me(bm, mesh, &bm_from_me_params);
return bm;
}
2020-05-01 04:36:11 +02:00
/* Data-layer names to store the original indices of the elements before modifying the mesh. */
2020-05-01 04:34:43 +02:00
static const char lname[] = "l_remap_index";
static const char vname[] = "v_remap_index";
static void multires_unsubdivide_free_original_datalayers(Mesh *mesh)
{
const int l_layer_index = CustomData_get_named_layer_index(
&mesh->loop_data, CD_PROP_INT32, lname);
if (l_layer_index != -1) {
CustomData_free_layer(&mesh->loop_data, CD_PROP_INT32, mesh->totloop, l_layer_index);
}
const int v_layer_index = CustomData_get_named_layer_index(
&mesh->vert_data, CD_PROP_INT32, vname);
if (v_layer_index != -1) {
CustomData_free_layer(&mesh->vert_data, CD_PROP_INT32, mesh->totvert, v_layer_index);
}
}
2020-05-01 04:36:11 +02:00
/**
* Generates two data-layers to map loops and vertices from base mesh to original mesh after
* dissolving the vertices.
*/
static void multires_unsubdivide_add_original_index_datalayers(Mesh *mesh)
{
multires_unsubdivide_free_original_datalayers(mesh);
int *l_index = static_cast<int *>(CustomData_add_layer_named(
&mesh->loop_data, CD_PROP_INT32, CD_SET_DEFAULT, mesh->totloop, lname));
int *v_index = static_cast<int *>(CustomData_add_layer_named(
&mesh->vert_data, CD_PROP_INT32, CD_SET_DEFAULT, mesh->totvert, vname));
2020-05-01 04:36:11 +02:00
/* Initialize these data-layer with the indices in the current mesh. */
for (int i = 0; i < mesh->totloop; i++) {
l_index[i] = i;
}
for (int i = 0; i < mesh->totvert; i++) {
v_index[i] = i;
}
}
static void multires_unsubdivide_prepare_original_bmesh_for_extract(
MultiresUnsubdivideContext *context)
{
Mesh *original_mesh = context->original_mesh;
Mesh: Remove redundant custom data pointers For copy-on-write, we want to share attribute arrays between meshes where possible. Mutable pointers like `Mesh.mvert` make that difficult by making ownership vague. They also make code more complex by adding redundancy. The simplest solution is just removing them and retrieving layers from `CustomData` as needed. Similar changes have already been applied to curves and point clouds (e9f82d3dc7ee, 410a6efb747f). Removing use of the pointers generally makes code more obvious and more reusable. Mesh data is now accessed with a C++ API (`Mesh::edges()` or `Mesh::edges_for_write()`), and a C API (`BKE_mesh_edges(mesh)`). The CoW changes this commit makes possible are described in T95845 and T95842, and started in D14139 and D14140. The change also simplifies the ongoing mesh struct-of-array refactors from T95965. **RNA/Python Access Performance** Theoretically, accessing mesh elements with the RNA API may become slower, since the layer needs to be found on every random access. However, overhead is already high enough that this doesn't make a noticible differenc, and performance is actually improved in some cases. Random access can be up to 10% faster, but other situations might be a bit slower. Generally using `foreach_get/set` are the best way to improve performance. See the differential revision for more discussion about Python performance. Cycles has been updated to use raw pointers and the internal Blender mesh types, mostly because there is no sense in having this overhead when it's already compiled with Blender. In my tests this roughly halves the Cycles mesh creation time (0.19s to 0.10s for a 1 million face grid). Differential Revision: https://developer.blender.org/D15488
2022-09-05 18:56:34 +02:00
Mesh *base_mesh = context->base_mesh;
BMesh *bm_original_mesh = context->bm_original_mesh = get_bmesh_from_mesh(original_mesh);
2020-05-01 04:36:11 +02:00
/* Initialize the elem tables. */
BM_mesh_elem_table_ensure(bm_original_mesh, BM_EDGE);
BM_mesh_elem_table_ensure(bm_original_mesh, BM_FACE);
BM_mesh_elem_table_ensure(bm_original_mesh, BM_VERT);
/* Disable all flags. */
BM_mesh_elem_hflag_disable_all(
bm_original_mesh, BM_VERT | BM_EDGE | BM_FACE, BM_ELEM_TAG, false);
BM_mesh_elem_hflag_disable_all(
bm_original_mesh, BM_VERT | BM_EDGE | BM_FACE, BM_ELEM_SELECT, false);
2020-05-01 04:36:11 +02:00
/* Get the mapping data-layer. */
context->base_to_orig_vmap = static_cast<const int *>(
CustomData_get_layer_named(&base_mesh->vert_data, CD_PROP_INT32, vname));
/* Tag the base mesh vertices in the original mesh. */
for (int i = 0; i < base_mesh->totvert; i++) {
int vert_basemesh_index = context->base_to_orig_vmap[i];
BMVert *v = BM_vert_at_index(bm_original_mesh, vert_basemesh_index);
BM_elem_flag_set(v, BM_ELEM_TAG, true);
}
context->loop_to_face_map = original_mesh->corner_to_face_map();
}
2020-05-01 04:36:11 +02:00
/**
* Checks the orientation of the loops to flip the x and y axis when extracting the grid if
* necessary.
*/
static bool multires_unsubdivide_flip_grid_x_axis(const blender::OffsetIndices<int> faces,
Mesh: Replace MLoop struct with generic attributes Implements #102359. Split the `MLoop` struct into two separate integer arrays called `corner_verts` and `corner_edges`, referring to the vertex each corner is attached to and the next edge around the face at each corner. These arrays can be sliced to give access to the edges or vertices in a face. Then they are often referred to as "poly_verts" or "poly_edges". The main benefits are halving the necessary memory bandwidth when only one array is used and simplifications from using regular integer indices instead of a special-purpose struct. The commit also starts a renaming from "loop" to "corner" in mesh code. Like the other mesh struct of array refactors, forward compatibility is kept by writing files with the older format. This will be done until 4.0 to ease the transition process. Looking at a small portion of the patch should give a good impression for the rest of the changes. I tried to make the changes as small as possible so it's easy to tell the correctness from the diff. Though I found Blender developers have been very inventive over the last decade when finding different ways to loop over the corners in a face. For performance, nearly every piece of code that deals with `Mesh` is slightly impacted. Any algorithm that is memory bottle-necked should see an improvement. For example, here is a comparison of interpolating a vertex float attribute to face corners (Ryzen 3700x): **Before** (Average: 3.7 ms, Min: 3.4 ms) ``` threading::parallel_for(loops.index_range(), 4096, [&](IndexRange range) { for (const int64_t i : range) { dst[i] = src[loops[i].v]; } }); ``` **After** (Average: 2.9 ms, Min: 2.6 ms) ``` array_utils::gather(src, corner_verts, dst); ``` That's an improvement of 28% to the average timings, and it's also a simplification, since an index-based routine can be used instead. For more examples using the new arrays, see the design task. Pull Request: https://projects.blender.org/blender/blender/pulls/104424
2023-03-20 15:55:13 +01:00
const blender::Span<int> corner_verts,
int face_index,
int loop,
int v_x)
{
const blender::IndexRange face = faces[face_index];
const int v_first = corner_verts[face.start()];
if ((loop == (face.start() + (face.size() - 1))) && v_first == v_x) {
return true;
}
int next_l_index = loop + 1;
if (next_l_index < face.start() + face.size()) {
Mesh: Replace MLoop struct with generic attributes Implements #102359. Split the `MLoop` struct into two separate integer arrays called `corner_verts` and `corner_edges`, referring to the vertex each corner is attached to and the next edge around the face at each corner. These arrays can be sliced to give access to the edges or vertices in a face. Then they are often referred to as "poly_verts" or "poly_edges". The main benefits are halving the necessary memory bandwidth when only one array is used and simplifications from using regular integer indices instead of a special-purpose struct. The commit also starts a renaming from "loop" to "corner" in mesh code. Like the other mesh struct of array refactors, forward compatibility is kept by writing files with the older format. This will be done until 4.0 to ease the transition process. Looking at a small portion of the patch should give a good impression for the rest of the changes. I tried to make the changes as small as possible so it's easy to tell the correctness from the diff. Though I found Blender developers have been very inventive over the last decade when finding different ways to loop over the corners in a face. For performance, nearly every piece of code that deals with `Mesh` is slightly impacted. Any algorithm that is memory bottle-necked should see an improvement. For example, here is a comparison of interpolating a vertex float attribute to face corners (Ryzen 3700x): **Before** (Average: 3.7 ms, Min: 3.4 ms) ``` threading::parallel_for(loops.index_range(), 4096, [&](IndexRange range) { for (const int64_t i : range) { dst[i] = src[loops[i].v]; } }); ``` **After** (Average: 2.9 ms, Min: 2.6 ms) ``` array_utils::gather(src, corner_verts, dst); ``` That's an improvement of 28% to the average timings, and it's also a simplification, since an index-based routine can be used instead. For more examples using the new arrays, see the design task. Pull Request: https://projects.blender.org/blender/blender/pulls/104424
2023-03-20 15:55:13 +01:00
const int v_next = corner_verts[next_l_index];
if (v_next == v_x) {
return true;
}
}
return false;
}
static void multires_unsubdivide_extract_grids(MultiresUnsubdivideContext *context)
{
Mesh *original_mesh = context->original_mesh;
Mesh *base_mesh = context->base_mesh;
BMesh *bm_original_mesh = context->bm_original_mesh;
context->num_grids = base_mesh->totloop;
context->base_mesh_grids = static_cast<MultiresUnsubdivideGrid *>(
MEM_calloc_arrayN(base_mesh->totloop, sizeof(MultiresUnsubdivideGrid), "grids"));
2020-05-01 04:36:11 +02:00
/* Based on the existing indices in the data-layers, generate two vertex indices maps. */
/* From vertex index in original to vertex index in base and from vertex index in base to vertex
* index in original. */
int *orig_to_base_vmap = static_cast<int *>(
MEM_calloc_arrayN(bm_original_mesh->totvert, sizeof(int), "orig vmap"));
int *base_to_orig_vmap = static_cast<int *>(
MEM_calloc_arrayN(base_mesh->totvert, sizeof(int), "base vmap"));
context->base_to_orig_vmap = static_cast<const int *>(
CustomData_get_layer_named(&base_mesh->vert_data, CD_PROP_INT32, vname));
for (int i = 0; i < base_mesh->totvert; i++) {
base_to_orig_vmap[i] = context->base_to_orig_vmap[i];
}
/* If an index in original does not exist in base (it was dissolved when creating the new base
* mesh, return -1. */
for (int i = 0; i < original_mesh->totvert; i++) {
orig_to_base_vmap[i] = -1;
}
for (int i = 0; i < base_mesh->totvert; i++) {
const int orig_vertex_index = context->base_to_orig_vmap[i];
orig_to_base_vmap[orig_vertex_index] = i;
}
2020-05-01 04:36:11 +02:00
/* Add the original data-layers to the base mesh to have the loop indices stored in a data-layer,
* so they can be used from #BMesh. */
multires_unsubdivide_add_original_index_datalayers(base_mesh);
BMesh *bm_base_mesh = get_bmesh_from_mesh(base_mesh);
BMIter iter, iter_a, iter_b;
BMVert *v;
BMLoop *l, *lb;
BM_mesh_elem_table_ensure(bm_base_mesh, BM_VERT);
BM_mesh_elem_table_ensure(bm_base_mesh, BM_FACE);
2020-05-01 04:36:11 +02:00
/* Get the data-layer that contains the loops indices. */
const int base_l_offset = CustomData_get_offset_named(
&bm_base_mesh->ldata, CD_PROP_INT32, lname);
const blender::OffsetIndices faces = base_mesh->faces();
Mesh: Replace MLoop struct with generic attributes Implements #102359. Split the `MLoop` struct into two separate integer arrays called `corner_verts` and `corner_edges`, referring to the vertex each corner is attached to and the next edge around the face at each corner. These arrays can be sliced to give access to the edges or vertices in a face. Then they are often referred to as "poly_verts" or "poly_edges". The main benefits are halving the necessary memory bandwidth when only one array is used and simplifications from using regular integer indices instead of a special-purpose struct. The commit also starts a renaming from "loop" to "corner" in mesh code. Like the other mesh struct of array refactors, forward compatibility is kept by writing files with the older format. This will be done until 4.0 to ease the transition process. Looking at a small portion of the patch should give a good impression for the rest of the changes. I tried to make the changes as small as possible so it's easy to tell the correctness from the diff. Though I found Blender developers have been very inventive over the last decade when finding different ways to loop over the corners in a face. For performance, nearly every piece of code that deals with `Mesh` is slightly impacted. Any algorithm that is memory bottle-necked should see an improvement. For example, here is a comparison of interpolating a vertex float attribute to face corners (Ryzen 3700x): **Before** (Average: 3.7 ms, Min: 3.4 ms) ``` threading::parallel_for(loops.index_range(), 4096, [&](IndexRange range) { for (const int64_t i : range) { dst[i] = src[loops[i].v]; } }); ``` **After** (Average: 2.9 ms, Min: 2.6 ms) ``` array_utils::gather(src, corner_verts, dst); ``` That's an improvement of 28% to the average timings, and it's also a simplification, since an index-based routine can be used instead. For more examples using the new arrays, see the design task. Pull Request: https://projects.blender.org/blender/blender/pulls/104424
2023-03-20 15:55:13 +01:00
const blender::Span<int> corner_verts = base_mesh->corner_verts();
/* Main loop for extracting the grids. Iterates over the base mesh vertices. */
BM_ITER_MESH (v, &iter, bm_base_mesh, BM_VERTS_OF_MESH) {
2020-05-01 04:36:11 +02:00
/* For each base mesh vertex, get the corresponding #BMVert of the original mesh using the
* vertex map. */
const int orig_vertex_index = base_to_orig_vmap[BM_elem_index_get(v)];
BMVert *vert_original = BM_vert_at_index(bm_original_mesh, orig_vertex_index);
/* Iterate over the loops of that vertex in the original mesh. */
BM_ITER_ELEM (l, &iter_a, vert_original, BM_LOOPS_OF_VERT) {
/* For each loop, get the two vertices that should map to the l+1 and l-1 vertices in the
* base mesh of the face of grid that is going to be extracted. */
BMVert *corner_x, *corner_y;
multires_unsubdivide_get_grid_corners_on_base_mesh(l->f, l->e, &corner_x, &corner_y);
/* Map the two obtained vertices to the base mesh. */
const int corner_x_index = orig_to_base_vmap[BM_elem_index_get(corner_x)];
const int corner_y_index = orig_to_base_vmap[BM_elem_index_get(corner_y)];
/* Iterate over the loops of the same vertex in the base mesh. With the previously obtained
* vertices and the current vertex it is possible to get the index of the loop in the base
* mesh the grid that is going to be extracted belongs to. */
BM_ITER_ELEM (lb, &iter_b, v, BM_LOOPS_OF_VERT) {
BMFace *base_face = lb->f;
BMVert *base_corner_x = BM_vert_at_index(bm_base_mesh, corner_x_index);
BMVert *base_corner_y = BM_vert_at_index(bm_base_mesh, corner_y_index);
/* If this is the correct loop in the base mesh, the original vertex and the two corners
* should be in the loop's face. */
if (BM_vert_in_face(base_corner_x, base_face) && BM_vert_in_face(base_corner_y, base_face))
{
/* Get the index of the loop. */
const int base_mesh_loop_index = BM_ELEM_CD_GET_INT(lb, base_l_offset);
const int base_mesh_face_index = BM_elem_index_get(base_face);
/* Check the orientation of the loops in case that is needed to flip the x and y axis
* when extracting the grid. */
const bool flip_grid = multires_unsubdivide_flip_grid_x_axis(
faces, corner_verts, base_mesh_face_index, base_mesh_loop_index, corner_x_index);
/* Extract the grid for that loop. */
context->base_mesh_grids[base_mesh_loop_index].grid_index = base_mesh_loop_index;
multires_unsubdivide_extract_single_grid_from_face_edge(
context, l->f, l->e, !flip_grid, &context->base_mesh_grids[base_mesh_loop_index]);
break;
}
}
}
}
MEM_freeN(orig_to_base_vmap);
MEM_freeN(base_to_orig_vmap);
BM_mesh_free(bm_base_mesh);
multires_unsubdivide_free_original_datalayers(base_mesh);
}
static void multires_unsubdivide_private_extract_data_free(MultiresUnsubdivideContext *context)
{
if (context->bm_original_mesh != nullptr) {
BM_mesh_free(context->bm_original_mesh);
}
}
void multires_unsubdivide_context_init(MultiresUnsubdivideContext *context,
Mesh *original_mesh,
MultiresModifierData *mmd)
{
context->original_mesh = original_mesh;
context->num_new_levels = 0;
context->num_total_levels = 0;
context->num_original_levels = mmd->totlvl;
}
bool multires_unsubdivide_to_basemesh(MultiresUnsubdivideContext *context)
{
Mesh *original_mesh = context->original_mesh;
2020-05-01 04:36:11 +02:00
/* Prepare the data-layers to map base to original. */
multires_unsubdivide_add_original_index_datalayers(original_mesh);
BMesh *bm_base_mesh = get_bmesh_from_mesh(original_mesh);
2020-05-01 04:36:11 +02:00
/* Un-subdivide as many iterations as possible. */
context->num_new_levels = 0;
int num_levels_left = context->max_new_levels;
while (num_levels_left > 0 && multires_unsubdivide_single_level(bm_base_mesh)) {
context->num_new_levels++;
num_levels_left--;
}
2020-05-01 04:36:11 +02:00
/* If no un-subdivide steps were possible, free the bmesh, the map data-layers and stop. */
if (context->num_new_levels == 0) {
multires_unsubdivide_free_original_datalayers(original_mesh);
BM_mesh_free(bm_base_mesh);
return false;
}
/* Calculate the final levels for the new grids over base mesh. */
context->num_total_levels = context->num_new_levels + context->num_original_levels;
2020-05-01 04:36:11 +02:00
/* Store the new base-mesh as a mesh in context, free bmesh. */
context->base_mesh = BKE_mesh_new_nomain(0, 0, 0, 0);
BMeshToMeshParams bm_to_me_params{};
bm_to_me_params.calc_object_remap = true;
BM_mesh_bm_to_me(nullptr, bm_base_mesh, context->base_mesh, &bm_to_me_params);
BM_mesh_free(bm_base_mesh);
2020-05-01 04:36:11 +02:00
/* Initialize bmesh and maps for the original mesh and extract the grids. */
multires_unsubdivide_prepare_original_bmesh_for_extract(context);
multires_unsubdivide_extract_grids(context);
return true;
}
void multires_unsubdivide_context_free(MultiresUnsubdivideContext *context)
{
multires_unsubdivide_private_extract_data_free(context);
for (int i = 0; i < context->num_grids; i++) {
if (context->base_mesh_grids[i].grid_size > 0) {
MEM_SAFE_FREE(context->base_mesh_grids[i].grid_co);
}
}
MEM_SAFE_FREE(context->base_mesh_grids);
}
2020-05-01 04:36:11 +02:00
/**
* This function allocates new mdisps with the right size to fit the new extracted grids from the
* base mesh and copies the data to them.
*/
static void multires_create_grids_in_unsubdivided_base_mesh(MultiresUnsubdivideContext *context,
Mesh *base_mesh)
{
/* Free the current MDISPS and create a new ones. */
if (CustomData_has_layer(&base_mesh->loop_data, CD_MDISPS)) {
CustomData_free_layers(&base_mesh->loop_data, CD_MDISPS, base_mesh->totloop);
}
MDisps *mdisps = static_cast<MDisps *>(
CustomData_add_layer(&base_mesh->loop_data, CD_MDISPS, CD_SET_DEFAULT, base_mesh->totloop));
const int totdisp = pow_i(BKE_ccg_gridsize(context->num_total_levels), 2);
const int totloop = base_mesh->totloop;
BLI_assert(base_mesh->totloop == context->num_grids);
/* Allocate the MDISPS grids and copy the extracted data from context. */
for (int i = 0; i < totloop; i++) {
float(*disps)[3] = static_cast<float(*)[3]>(
MEM_calloc_arrayN(totdisp, sizeof(float[3]), __func__));
if (mdisps[i].disps) {
MEM_freeN(mdisps[i].disps);
}
for (int j = 0; j < totdisp; j++) {
if (context->base_mesh_grids[i].grid_co) {
copy_v3_v3(disps[j], context->base_mesh_grids[i].grid_co[j]);
}
}
mdisps[i].disps = disps;
mdisps[i].totdisp = totdisp;
mdisps[i].level = context->num_total_levels;
}
}
int multiresModifier_rebuild_subdiv(Depsgraph *depsgraph,
Object *object,
MultiresModifierData *mmd,
int rebuild_limit,
bool switch_view_to_lower_level)
{
Mesh *mesh = static_cast<Mesh *>(object->data);
multires_force_sculpt_rebuild(object);
MultiresUnsubdivideContext unsubdiv_context{};
MultiresReshapeContext reshape_context{};
multires_unsubdivide_context_init(&unsubdiv_context, mesh, mmd);
/* Convert and store the existing grids in object space if available. */
if (mmd->totlvl != 0) {
if (!multires_reshape_context_create_from_object(&reshape_context, depsgraph, object, mmd)) {
return 0;
}
multires_reshape_store_original_grids(&reshape_context);
multires_reshape_assign_final_coords_from_mdisps(&reshape_context);
unsubdiv_context.original_mdisp = reshape_context.mdisps;
}
/* Set the limit for the levels that should be rebuild. */
unsubdiv_context.max_new_levels = rebuild_limit;
2020-05-01 04:36:11 +02:00
/* Un-subdivide and create the data for the new grids. */
if (multires_unsubdivide_to_basemesh(&unsubdiv_context) == 0) {
/* If there was no possible to rebuild any level, free the data and return. */
if (mmd->totlvl != 0) {
multires_reshape_object_grids_to_tangent_displacement(&reshape_context);
multires_unsubdivide_context_free(&unsubdiv_context);
}
multires_reshape_context_free(&reshape_context);
return 0;
}
/* Free the reshape context used to convert the data from the original grids to object space. */
if (mmd->totlvl != 0) {
multires_reshape_context_free(&reshape_context);
}
/* Copy the new base mesh to the original mesh. */
Mesh *base_mesh = static_cast<Mesh *>(object->data);
BKE_mesh_nomain_to_mesh(unsubdiv_context.base_mesh, base_mesh, object);
multires_create_grids_in_unsubdivided_base_mesh(&unsubdiv_context, base_mesh);
/* Update the levels in the modifier. Force always to display at level 0 as it contains the new
* created level. */
mmd->totlvl = char(unsubdiv_context.num_total_levels);
if (switch_view_to_lower_level) {
mmd->sculptlvl = 0;
mmd->lvl = 0;
}
else {
mmd->sculptlvl = char(mmd->sculptlvl + unsubdiv_context.num_new_levels);
mmd->lvl = char(mmd->lvl + unsubdiv_context.num_new_levels);
}
mmd->renderlvl = char(mmd->renderlvl + unsubdiv_context.num_new_levels);
2020-05-01 04:36:11 +02:00
/* Create a reshape context to convert the MDISPS data to tangent displacement. It can be the
* same as the previous one as a new Subdivision needs to be created for the new base mesh. */
if (!multires_reshape_context_create_from_base_mesh(&reshape_context, depsgraph, object, mmd)) {
return 0;
}
multires_reshape_object_grids_to_tangent_displacement(&reshape_context);
multires_reshape_context_free(&reshape_context);
2020-05-01 04:36:11 +02:00
/* Free the un-subdivide context and return the total number of levels that were rebuild. */
const int rebuild_subdvis = unsubdiv_context.num_new_levels;
multires_unsubdivide_context_free(&unsubdiv_context);
return rebuild_subdvis;
}