tornavis/source/blender/blenkernel/intern/subdiv_ccg.cc

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

2030 lines
80 KiB
C++
Raw Normal View History

/* SPDX-FileCopyrightText: 2018 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup bke
*/
#include "BKE_subdiv_ccg.hh"
#include "DNA_mesh_types.h"
#include "DNA_meshdata_types.h"
#include "MEM_guardedalloc.h"
#include "BLI_ghash.h"
#include "BLI_math_bits.h"
#include "BLI_math_geom.h"
#include "BLI_math_vector.h"
#include "BLI_task.h"
#include "BKE_DerivedMesh.hh"
#include "BKE_ccg.h"
#include "BKE_global.h"
#include "BKE_mesh.hh"
#include "BKE_subdiv.hh"
#include "BKE_subdiv_eval.hh"
#include "opensubdiv_topology_refiner_capi.h"
using blender::Array;
using blender::Vector;
/* -------------------------------------------------------------------- */
/** \name Various forward declarations
* \{ */
static void subdiv_ccg_average_all_boundaries_and_corners(SubdivCCG *subdiv_ccg, CCGKey *key);
static void subdiv_ccg_average_inner_face_grids(SubdivCCG *subdiv_ccg,
CCGKey *key,
SubdivCCGFace *face);
void subdiv_ccg_average_faces_boundaries_and_corners(SubdivCCG *subdiv_ccg,
CCGKey *key,
CCGFace **effected_faces,
int num_effected_faces);
/** \} */
/* -------------------------------------------------------------------- */
/** \name Generally useful internal helpers
* \{ */
/* Number of floats in per-vertex elements. */
static int num_element_float_get(const SubdivCCG *subdiv_ccg)
{
/* We always have 3 floats for coordinate. */
int num_floats = 3;
if (subdiv_ccg->has_normal) {
num_floats += 3;
}
if (subdiv_ccg->has_mask) {
num_floats += 1;
}
return num_floats;
}
/* Per-vertex element size in bytes. */
static int element_size_bytes_get(const SubdivCCG *subdiv_ccg)
{
return sizeof(float) * num_element_float_get(subdiv_ccg);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Internal helpers for CCG creation
* \{ */
static void subdiv_ccg_init_layers(SubdivCCG *subdiv_ccg, const SubdivToCCGSettings *settings)
{
/* CCG always contains coordinates. Rest of layers are coming after them. */
int layer_offset = sizeof(float[3]);
/* Mask. */
if (settings->need_mask) {
subdiv_ccg->has_mask = true;
subdiv_ccg->mask_offset = layer_offset;
layer_offset += sizeof(float);
}
else {
subdiv_ccg->has_mask = false;
subdiv_ccg->mask_offset = -1;
}
/* Normals.
*
* NOTE: Keep them at the end, matching old CCGDM. Doesn't really matter
2019-01-18 11:28:38 +01:00
* here, but some other area might in theory depend memory layout. */
if (settings->need_normal) {
subdiv_ccg->has_normal = true;
subdiv_ccg->normal_offset = layer_offset;
layer_offset += sizeof(float[3]);
}
else {
subdiv_ccg->has_normal = false;
subdiv_ccg->normal_offset = -1;
}
}
/* TODO(sergey): Make it more accessible function. */
static int topology_refiner_count_face_corners(OpenSubdiv_TopologyRefiner *topology_refiner)
{
const int num_faces = topology_refiner->getNumFaces(topology_refiner);
int num_corners = 0;
for (int face_index = 0; face_index < num_faces; face_index++) {
num_corners += topology_refiner->getNumFaceVertices(topology_refiner, face_index);
}
return num_corners;
}
/* NOTE: Grid size and layer flags are to be filled in before calling this
2019-01-18 11:28:38 +01:00
* function. */
static void subdiv_ccg_alloc_elements(SubdivCCG *subdiv_ccg, Subdiv *subdiv)
{
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
const int element_size = element_size_bytes_get(subdiv_ccg);
/* Allocate memory for surface grids. */
const int num_faces = topology_refiner->getNumFaces(topology_refiner);
const int num_grids = topology_refiner_count_face_corners(topology_refiner);
2018-11-01 11:06:00 +01:00
const int grid_size = BKE_subdiv_grid_size_from_level(subdiv_ccg->level);
const int grid_area = grid_size * grid_size;
subdiv_ccg->grid_element_size = element_size;
subdiv_ccg->num_grids = num_grids;
subdiv_ccg->grids = static_cast<CCGElem **>(
MEM_calloc_arrayN(num_grids, sizeof(CCGElem *), "subdiv ccg grids"));
subdiv_ccg->grids_storage = static_cast<uchar *>(
2022-10-07 13:52:53 +02:00
MEM_calloc_arrayN(num_grids, size_t(grid_area) * element_size, "subdiv ccg grids storage"));
const size_t grid_size_in_bytes = size_t(grid_area) * element_size;
for (int grid_index = 0; grid_index < num_grids; grid_index++) {
const size_t grid_offset = grid_size_in_bytes * grid_index;
subdiv_ccg->grids[grid_index] = (CCGElem *)&subdiv_ccg->grids_storage[grid_offset];
}
/* Grid material flags. */
subdiv_ccg->grid_flag_mats = static_cast<DMFlagMat *>(
MEM_calloc_arrayN(num_grids, sizeof(DMFlagMat), "ccg grid material flags"));
/* Grid hidden flags. */
subdiv_ccg->grid_hidden = static_cast<BLI_bitmap **>(
MEM_calloc_arrayN(num_grids, sizeof(BLI_bitmap *), "ccg grid material flags"));
for (int grid_index = 0; grid_index < num_grids; grid_index++) {
subdiv_ccg->grid_hidden[grid_index] = BLI_BITMAP_NEW(grid_area, "ccg grid hidden");
}
/* TODO(sergey): Allocate memory for loose elements. */
/* Allocate memory for faces. */
subdiv_ccg->num_faces = num_faces;
if (num_faces) {
subdiv_ccg->faces = static_cast<SubdivCCGFace *>(
MEM_calloc_arrayN(num_faces, sizeof(SubdivCCGFace), "Subdiv CCG faces"));
subdiv_ccg->grid_to_face_map.reinitialize(num_grids);
}
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Grids evaluation
* \{ */
struct CCGEvalGridsData {
SubdivCCG *subdiv_ccg;
Subdiv *subdiv;
int *face_ptex_offset;
SubdivCCGMaskEvaluator *mask_evaluator;
SubdivCCGMaterialFlagsEvaluator *material_flags_evaluator;
};
static void subdiv_ccg_eval_grid_element_limit(CCGEvalGridsData *data,
const int ptex_face_index,
const float u,
const float v,
uchar *element)
{
2018-09-21 13:01:09 +02:00
Subdiv *subdiv = data->subdiv;
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
if (subdiv->displacement_evaluator != nullptr) {
2018-09-21 13:01:09 +02:00
BKE_subdiv_eval_final_point(subdiv, ptex_face_index, u, v, (float *)element);
}
2018-09-21 13:01:09 +02:00
else if (subdiv_ccg->has_normal) {
BKE_subdiv_eval_limit_point_and_normal(subdiv,
2018-09-21 13:01:09 +02:00
ptex_face_index,
u,
v,
(float *)element,
2018-09-21 13:01:09 +02:00
(float *)(element + subdiv_ccg->normal_offset));
}
else {
2018-09-21 13:01:09 +02:00
BKE_subdiv_eval_limit_point(subdiv, ptex_face_index, u, v, (float *)element);
}
}
static void subdiv_ccg_eval_grid_element_mask(CCGEvalGridsData *data,
const int ptex_face_index,
const float u,
const float v,
uchar *element)
{
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
if (!subdiv_ccg->has_mask) {
return;
}
float *mask_value_ptr = (float *)(element + subdiv_ccg->mask_offset);
if (data->mask_evaluator != nullptr) {
*mask_value_ptr = data->mask_evaluator->eval_mask(data->mask_evaluator, ptex_face_index, u, v);
}
else {
*mask_value_ptr = 0.0f;
}
}
static void subdiv_ccg_eval_grid_element(CCGEvalGridsData *data,
const int ptex_face_index,
const float u,
const float v,
uchar *element)
{
subdiv_ccg_eval_grid_element_limit(data, ptex_face_index, u, v, element);
subdiv_ccg_eval_grid_element_mask(data, ptex_face_index, u, v, element);
}
static void subdiv_ccg_eval_regular_grid(CCGEvalGridsData *data, const int face_index)
{
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
const int ptex_face_index = data->face_ptex_offset[face_index];
const int grid_size = subdiv_ccg->grid_size;
const float grid_size_1_inv = 1.0f / (grid_size - 1);
const int element_size = element_size_bytes_get(subdiv_ccg);
SubdivCCGFace *faces = subdiv_ccg->faces;
blender::MutableSpan<int> grid_to_face_map = subdiv_ccg->grid_to_face_map;
const SubdivCCGFace *face = &faces[face_index];
for (int corner = 0; corner < face->num_grids; corner++) {
const int grid_index = face->start_grid_index + corner;
uchar *grid = (uchar *)subdiv_ccg->grids[grid_index];
for (int y = 0; y < grid_size; y++) {
const float grid_v = y * grid_size_1_inv;
for (int x = 0; x < grid_size; x++) {
const float grid_u = x * grid_size_1_inv;
float u, v;
BKE_subdiv_rotate_grid_to_quad(corner, grid_u, grid_v, &u, &v);
const size_t grid_element_index = size_t(y) * grid_size + x;
const size_t grid_element_offset = grid_element_index * element_size;
subdiv_ccg_eval_grid_element(data, ptex_face_index, u, v, &grid[grid_element_offset]);
}
}
/* Assign grid's face. */
grid_to_face_map[grid_index] = face_index;
/* Assign material flags. */
subdiv_ccg->grid_flag_mats[grid_index] = data->material_flags_evaluator->eval_material_flags(
data->material_flags_evaluator, face_index);
}
}
static void subdiv_ccg_eval_special_grid(CCGEvalGridsData *data, const int face_index)
{
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
const int grid_size = subdiv_ccg->grid_size;
const float grid_size_1_inv = 1.0f / (grid_size - 1);
const int element_size = element_size_bytes_get(subdiv_ccg);
SubdivCCGFace *faces = subdiv_ccg->faces;
blender::MutableSpan<int> grid_to_face_map = subdiv_ccg->grid_to_face_map;
const SubdivCCGFace *face = &faces[face_index];
for (int corner = 0; corner < face->num_grids; corner++) {
const int grid_index = face->start_grid_index + corner;
const int ptex_face_index = data->face_ptex_offset[face_index] + corner;
uchar *grid = (uchar *)subdiv_ccg->grids[grid_index];
for (int y = 0; y < grid_size; y++) {
const float u = 1.0f - (y * grid_size_1_inv);
for (int x = 0; x < grid_size; x++) {
const float v = 1.0f - (x * grid_size_1_inv);
const size_t grid_element_index = size_t(y) * grid_size + x;
const size_t grid_element_offset = grid_element_index * element_size;
subdiv_ccg_eval_grid_element(data, ptex_face_index, u, v, &grid[grid_element_offset]);
}
}
/* Assign grid's face. */
grid_to_face_map[grid_index] = face_index;
/* Assign material flags. */
subdiv_ccg->grid_flag_mats[grid_index] = data->material_flags_evaluator->eval_material_flags(
data->material_flags_evaluator, face_index);
}
}
static void subdiv_ccg_eval_grids_task(void *__restrict userdata_v,
const int face_index,
const TaskParallelTLS *__restrict /*tls*/)
{
CCGEvalGridsData *data = static_cast<CCGEvalGridsData *>(userdata_v);
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
SubdivCCGFace *face = &subdiv_ccg->faces[face_index];
if (face->num_grids == 4) {
subdiv_ccg_eval_regular_grid(data, face_index);
}
else {
subdiv_ccg_eval_special_grid(data, face_index);
}
}
2018-09-11 21:57:18 +02:00
static bool subdiv_ccg_evaluate_grids(SubdivCCG *subdiv_ccg,
Subdiv *subdiv,
SubdivCCGMaskEvaluator *mask_evaluator,
SubdivCCGMaterialFlagsEvaluator *material_flags_evaluator)
{
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
const int num_faces = topology_refiner->getNumFaces(topology_refiner);
/* Initialize data passed to all the tasks. */
CCGEvalGridsData data;
data.subdiv_ccg = subdiv_ccg;
data.subdiv = subdiv;
data.face_ptex_offset = BKE_subdiv_face_ptex_offset_get(subdiv);
data.mask_evaluator = mask_evaluator;
data.material_flags_evaluator = material_flags_evaluator;
/* Threaded grids evaluation. */
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
BLI_task_parallel_range(
0, num_faces, &data, subdiv_ccg_eval_grids_task, &parallel_range_settings);
/* If displacement is used, need to calculate normals after all final
2019-01-18 11:28:38 +01:00
* coordinates are known. */
if (subdiv->displacement_evaluator != nullptr) {
BKE_subdiv_ccg_recalc_normals(subdiv_ccg);
}
return true;
}
/* Initialize face descriptors, assuming memory for them was already
2019-01-18 11:28:38 +01:00
* allocated. */
static void subdiv_ccg_init_faces(SubdivCCG *subdiv_ccg)
{
Subdiv *subdiv = subdiv_ccg->subdiv;
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
const int num_faces = subdiv_ccg->num_faces;
int corner_index = 0;
for (int face_index = 0; face_index < num_faces; face_index++) {
const int num_corners = topology_refiner->getNumFaceVertices(topology_refiner, face_index);
subdiv_ccg->faces[face_index].num_grids = num_corners;
subdiv_ccg->faces[face_index].start_grid_index = corner_index;
corner_index += num_corners;
}
}
static void subdiv_ccg_allocate_adjacent_edges(SubdivCCG *subdiv_ccg, const int num_edges)
{
subdiv_ccg->num_adjacent_edges = num_edges;
subdiv_ccg->adjacent_edges = static_cast<SubdivCCGAdjacentEdge *>(MEM_calloc_arrayN(
subdiv_ccg->num_adjacent_edges, sizeof(*subdiv_ccg->adjacent_edges), "ccg adjacent edges"));
}
static SubdivCCGCoord subdiv_ccg_coord(int grid_index, int x, int y)
{
SubdivCCGCoord coord{};
coord.grid_index = grid_index;
coord.x = x;
coord.y = y;
return coord;
}
static CCGElem *subdiv_ccg_coord_to_elem(const CCGKey *key,
const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord)
{
return CCG_grid_elem(key, subdiv_ccg->grids[coord->grid_index], coord->x, coord->y);
}
/* Returns storage where boundary elements are to be stored. */
static SubdivCCGCoord *subdiv_ccg_adjacent_edge_add_face(SubdivCCG *subdiv_ccg,
SubdivCCGAdjacentEdge *adjacent_edge)
{
const int grid_size = subdiv_ccg->grid_size * 2;
const int adjacent_face_index = adjacent_edge->num_adjacent_faces;
++adjacent_edge->num_adjacent_faces;
/* Allocate memory for the boundary elements. */
adjacent_edge->boundary_coords = static_cast<SubdivCCGCoord **>(
MEM_reallocN(adjacent_edge->boundary_coords,
adjacent_edge->num_adjacent_faces * sizeof(*adjacent_edge->boundary_coords)));
adjacent_edge->boundary_coords[adjacent_face_index] = static_cast<SubdivCCGCoord *>(
MEM_malloc_arrayN(grid_size * 2, sizeof(SubdivCCGCoord), "ccg adjacent boundary"));
return adjacent_edge->boundary_coords[adjacent_face_index];
}
static void subdiv_ccg_init_faces_edge_neighborhood(SubdivCCG *subdiv_ccg)
{
Subdiv *subdiv = subdiv_ccg->subdiv;
SubdivCCGFace *faces = subdiv_ccg->faces;
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
const int num_edges = topology_refiner->getNumEdges(topology_refiner);
const int grid_size = subdiv_ccg->grid_size;
if (num_edges == 0) {
/* Early output, nothing to do in this case. */
return;
}
subdiv_ccg_allocate_adjacent_edges(subdiv_ccg, num_edges);
Vector<int, 64> face_vertices;
Vector<int, 64> face_edges;
/* Store adjacency for all faces. */
const int num_faces = subdiv_ccg->num_faces;
for (int face_index = 0; face_index < num_faces; face_index++) {
SubdivCCGFace *face = &faces[face_index];
const int num_face_grids = face->num_grids;
face_vertices.reinitialize(num_face_grids);
topology_refiner->getFaceVertices(topology_refiner, face_index, face_vertices.data());
/* Note that order of edges is same as order of MLoops, which also
2019-01-18 11:28:38 +01:00
* means it's the same as order of grids. */
face_edges.reinitialize(num_face_grids);
topology_refiner->getFaceEdges(topology_refiner, face_index, face_edges.data());
/* Store grids adjacency for this edge. */
for (int corner = 0; corner < num_face_grids; corner++) {
const int vertex_index = face_vertices[corner];
const int edge_index = face_edges[corner];
int edge_vertices[2];
topology_refiner->getEdgeVertices(topology_refiner, edge_index, edge_vertices);
const bool is_edge_flipped = (edge_vertices[0] != vertex_index);
/* Grid which is adjacent to the current corner. */
const int current_grid_index = face->start_grid_index + corner;
/* Grid which is adjacent to the next corner. */
const int next_grid_index = face->start_grid_index + (corner + 1) % num_face_grids;
/* Add new face to the adjacent edge. */
SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[edge_index];
SubdivCCGCoord *boundary_coords = subdiv_ccg_adjacent_edge_add_face(subdiv_ccg,
adjacent_edge);
/* Fill CCG elements along the edge. */
int boundary_element_index = 0;
if (is_edge_flipped) {
for (int i = 0; i < grid_size; i++) {
boundary_coords[boundary_element_index++] = subdiv_ccg_coord(
next_grid_index, grid_size - i - 1, grid_size - 1);
}
for (int i = 0; i < grid_size; i++) {
boundary_coords[boundary_element_index++] = subdiv_ccg_coord(
current_grid_index, grid_size - 1, i);
}
}
else {
for (int i = 0; i < grid_size; i++) {
boundary_coords[boundary_element_index++] = subdiv_ccg_coord(
current_grid_index, grid_size - 1, grid_size - i - 1);
}
for (int i = 0; i < grid_size; i++) {
boundary_coords[boundary_element_index++] = subdiv_ccg_coord(
next_grid_index, i, grid_size - 1);
}
}
}
}
}
static void subdiv_ccg_allocate_adjacent_vertices(SubdivCCG *subdiv_ccg, const int num_vertices)
{
subdiv_ccg->num_adjacent_vertices = num_vertices;
subdiv_ccg->adjacent_vertices = static_cast<SubdivCCGAdjacentVertex *>(
MEM_calloc_arrayN(subdiv_ccg->num_adjacent_vertices,
sizeof(*subdiv_ccg->adjacent_vertices),
"ccg adjacent vertices"));
}
/* Returns storage where corner elements are to be stored. This is a pointer
2019-01-18 11:28:38 +01:00
* to the actual storage. */
static SubdivCCGCoord *subdiv_ccg_adjacent_vertex_add_face(
SubdivCCGAdjacentVertex *adjacent_vertex)
{
const int adjacent_face_index = adjacent_vertex->num_adjacent_faces;
++adjacent_vertex->num_adjacent_faces;
/* Allocate memory for the boundary elements. */
adjacent_vertex->corner_coords = static_cast<SubdivCCGCoord *>(
MEM_reallocN(adjacent_vertex->corner_coords,
adjacent_vertex->num_adjacent_faces * sizeof(*adjacent_vertex->corner_coords)));
return &adjacent_vertex->corner_coords[adjacent_face_index];
}
static void subdiv_ccg_init_faces_vertex_neighborhood(SubdivCCG *subdiv_ccg)
{
Subdiv *subdiv = subdiv_ccg->subdiv;
SubdivCCGFace *faces = subdiv_ccg->faces;
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
const int num_vertices = topology_refiner->getNumVertices(topology_refiner);
const int grid_size = subdiv_ccg->grid_size;
if (num_vertices == 0) {
/* Early output, nothing to do in this case. */
return;
}
subdiv_ccg_allocate_adjacent_vertices(subdiv_ccg, num_vertices);
Vector<int, 64> face_vertices;
/* Key to access elements. */
CCGKey key;
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
/* Store adjacency for all faces. */
const int num_faces = subdiv_ccg->num_faces;
for (int face_index = 0; face_index < num_faces; face_index++) {
SubdivCCGFace *face = &faces[face_index];
const int num_face_grids = face->num_grids;
face_vertices.reinitialize(num_face_grids);
topology_refiner->getFaceVertices(topology_refiner, face_index, face_vertices.data());
for (int corner = 0; corner < num_face_grids; corner++) {
const int vertex_index = face_vertices[corner];
/* Grid which is adjacent to the current corner. */
const int grid_index = face->start_grid_index + corner;
/* Add new face to the adjacent edge. */
SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[vertex_index];
SubdivCCGCoord *corner_coord = subdiv_ccg_adjacent_vertex_add_face(adjacent_vertex);
*corner_coord = subdiv_ccg_coord(grid_index, grid_size - 1, grid_size - 1);
}
}
}
static void subdiv_ccg_init_faces_neighborhood(SubdivCCG *subdiv_ccg)
{
subdiv_ccg_init_faces_edge_neighborhood(subdiv_ccg);
subdiv_ccg_init_faces_vertex_neighborhood(subdiv_ccg);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Creation / evaluation
* \{ */
SubdivCCG *BKE_subdiv_to_ccg(Subdiv *subdiv,
const SubdivToCCGSettings *settings,
SubdivCCGMaskEvaluator *mask_evaluator,
SubdivCCGMaterialFlagsEvaluator *material_flags_evaluator)
{
BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
SubdivCCG *subdiv_ccg = MEM_new<SubdivCCG>(__func__);
subdiv_ccg->subdiv = subdiv;
subdiv_ccg->level = bitscan_forward_i(settings->resolution - 1);
2018-11-01 11:06:00 +01:00
subdiv_ccg->grid_size = BKE_subdiv_grid_size_from_level(subdiv_ccg->level);
subdiv_ccg_init_layers(subdiv_ccg, settings);
subdiv_ccg_alloc_elements(subdiv_ccg, subdiv);
subdiv_ccg_init_faces(subdiv_ccg);
subdiv_ccg_init_faces_neighborhood(subdiv_ccg);
2019-02-26 04:31:14 +01:00
if (!subdiv_ccg_evaluate_grids(subdiv_ccg, subdiv, mask_evaluator, material_flags_evaluator)) {
BKE_subdiv_ccg_destroy(subdiv_ccg);
BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
return nullptr;
}
BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
return subdiv_ccg;
}
Mesh *BKE_subdiv_to_ccg_mesh(Subdiv *subdiv,
const SubdivToCCGSettings *settings,
const Mesh *coarse_mesh)
{
/* Make sure evaluator is ready. */
BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
OpenSubDiv: add support for an OpenGL evaluator This evaluator is used in order to evaluate subdivision at render time, allowing for faster renders of meshes with a subdivision surface modifier placed at the last position in the modifier list. When evaluating the subsurf modifier, we detect whether we can delegate evaluation to the draw code. If so, the subdivision is first evaluated on the GPU using our own custom evaluator (only the coarse data needs to be initially sent to the GPU), then, buffers for the final `MeshBufferCache` are filled on the GPU using a set of compute shaders. However, some buffers are still filled on the CPU side, if doing so on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose logic is hardly GPU compatible). This is done at the mesh buffer extraction level so that the result can be readily used in the various OpenGL engines, without having to write custom geometry or tesselation shaders. We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in order to control the data layout, and interpolation. For example, we store vertex colors as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float types. In order to still access the modified geometry on the CPU side, for use in modifiers or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`. Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will create such a wrapper if possible. If the final subdivision surface is not needed on the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used. Enabling or disabling GPU subdivision can be done through the user preferences (under Viewport -> Subdivision). See patch description for benchmarks. Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
if (!BKE_subdiv_eval_begin_from_mesh(
subdiv, coarse_mesh, nullptr, SUBDIV_EVALUATOR_TYPE_CPU, nullptr))
{
if (coarse_mesh->faces_num) {
return nullptr;
}
}
BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
SubdivCCGMaskEvaluator mask_evaluator;
bool has_mask = BKE_subdiv_ccg_mask_init_from_paint(&mask_evaluator, coarse_mesh);
SubdivCCGMaterialFlagsEvaluator material_flags_evaluator;
BKE_subdiv_ccg_material_flags_init_from_mesh(&material_flags_evaluator, coarse_mesh);
SubdivCCG *subdiv_ccg = BKE_subdiv_to_ccg(
subdiv, settings, has_mask ? &mask_evaluator : nullptr, &material_flags_evaluator);
if (has_mask) {
mask_evaluator.free(&mask_evaluator);
}
material_flags_evaluator.free(&material_flags_evaluator);
if (subdiv_ccg == nullptr) {
return nullptr;
}
Mesh *result = BKE_mesh_new_nomain_from_template(coarse_mesh, 0, 0, 0, 0);
result->runtime->subdiv_ccg = subdiv_ccg;
return result;
}
void BKE_subdiv_ccg_destroy(SubdivCCG *subdiv_ccg)
{
const int num_grids = subdiv_ccg->num_grids;
MEM_SAFE_FREE(subdiv_ccg->grids);
MEM_SAFE_FREE(subdiv_ccg->grids_storage);
MEM_SAFE_FREE(subdiv_ccg->edges);
MEM_SAFE_FREE(subdiv_ccg->vertices);
MEM_SAFE_FREE(subdiv_ccg->grid_flag_mats);
if (subdiv_ccg->grid_hidden != nullptr) {
for (int grid_index = 0; grid_index < num_grids; grid_index++) {
MEM_SAFE_FREE(subdiv_ccg->grid_hidden[grid_index]);
}
MEM_SAFE_FREE(subdiv_ccg->grid_hidden);
}
if (subdiv_ccg->subdiv != nullptr) {
BKE_subdiv_free(subdiv_ccg->subdiv);
}
MEM_SAFE_FREE(subdiv_ccg->faces);
/* Free map of adjacent edges. */
for (int i = 0; i < subdiv_ccg->num_adjacent_edges; i++) {
SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[i];
for (int face_index = 0; face_index < adjacent_edge->num_adjacent_faces; face_index++) {
MEM_SAFE_FREE(adjacent_edge->boundary_coords[face_index]);
}
MEM_SAFE_FREE(adjacent_edge->boundary_coords);
}
MEM_SAFE_FREE(subdiv_ccg->adjacent_edges);
/* Free map of adjacent vertices. */
for (int i = 0; i < subdiv_ccg->num_adjacent_vertices; i++) {
SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[i];
MEM_SAFE_FREE(adjacent_vertex->corner_coords);
}
MEM_SAFE_FREE(subdiv_ccg->adjacent_vertices);
MEM_SAFE_FREE(subdiv_ccg->cache_.start_face_grid_index);
MEM_delete(subdiv_ccg);
}
void BKE_subdiv_ccg_key(CCGKey *key, const SubdivCCG *subdiv_ccg, int level)
{
key->level = level;
key->elem_size = element_size_bytes_get(subdiv_ccg);
2018-11-01 11:06:00 +01:00
key->grid_size = BKE_subdiv_grid_size_from_level(level);
key->grid_area = key->grid_size * key->grid_size;
key->grid_bytes = key->elem_size * key->grid_area;
key->normal_offset = subdiv_ccg->normal_offset;
key->mask_offset = subdiv_ccg->mask_offset;
key->has_normals = subdiv_ccg->has_normal;
key->has_mask = subdiv_ccg->has_mask;
}
void BKE_subdiv_ccg_key_top_level(CCGKey *key, const SubdivCCG *subdiv_ccg)
{
BKE_subdiv_ccg_key(key, subdiv_ccg, subdiv_ccg->level);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Normals
* \{ */
struct RecalcInnerNormalsData {
SubdivCCG *subdiv_ccg;
CCGKey *key;
};
struct RecalcInnerNormalsTLSData {
float (*face_normals)[3];
};
/* Evaluate high-res face normals, for faces which corresponds to grid elements
*
* {(x, y), {x + 1, y}, {x + 1, y + 1}, {x, y + 1}}
*
2019-01-18 11:28:38 +01:00
* The result is stored in normals storage from TLS. */
static void subdiv_ccg_recalc_inner_face_normals(SubdivCCG *subdiv_ccg,
CCGKey *key,
RecalcInnerNormalsTLSData *tls,
const int grid_index)
{
const int grid_size = subdiv_ccg->grid_size;
const int grid_size_1 = grid_size - 1;
CCGElem *grid = subdiv_ccg->grids[grid_index];
if (tls->face_normals == nullptr) {
tls->face_normals = static_cast<float(*)[3]>(
MEM_malloc_arrayN(grid_size_1 * grid_size_1, sizeof(float[3]), "CCG TLS normals"));
}
for (int y = 0; y < grid_size - 1; y++) {
for (int x = 0; x < grid_size - 1; x++) {
CCGElem *grid_elements[4] = {
CCG_grid_elem(key, grid, x, y + 1),
CCG_grid_elem(key, grid, x + 1, y + 1),
CCG_grid_elem(key, grid, x + 1, y),
CCG_grid_elem(key, grid, x, y),
};
float *co[4] = {
CCG_elem_co(key, grid_elements[0]),
CCG_elem_co(key, grid_elements[1]),
CCG_elem_co(key, grid_elements[2]),
CCG_elem_co(key, grid_elements[3]),
};
const int face_index = y * grid_size_1 + x;
float *face_normal = tls->face_normals[face_index];
normal_quad_v3(face_normal, co[0], co[1], co[2], co[3]);
}
}
}
/* Average normals at every grid element, using adjacent faces normals. */
static void subdiv_ccg_average_inner_face_normals(SubdivCCG *subdiv_ccg,
CCGKey *key,
RecalcInnerNormalsTLSData *tls,
const int grid_index)
{
const int grid_size = subdiv_ccg->grid_size;
const int grid_size_1 = grid_size - 1;
CCGElem *grid = subdiv_ccg->grids[grid_index];
const float(*face_normals)[3] = tls->face_normals;
for (int y = 0; y < grid_size; y++) {
for (int x = 0; x < grid_size; x++) {
float normal_acc[3] = {0.0f, 0.0f, 0.0f};
int counter = 0;
/* Accumulate normals of all adjacent faces. */
if (x < grid_size_1 && y < grid_size_1) {
add_v3_v3(normal_acc, face_normals[y * grid_size_1 + x]);
counter++;
}
if (x >= 1) {
if (y < grid_size_1) {
add_v3_v3(normal_acc, face_normals[y * grid_size_1 + (x - 1)]);
counter++;
}
if (y >= 1) {
add_v3_v3(normal_acc, face_normals[(y - 1) * grid_size_1 + (x - 1)]);
counter++;
}
}
if (y >= 1 && x < grid_size_1) {
add_v3_v3(normal_acc, face_normals[(y - 1) * grid_size_1 + x]);
counter++;
}
/* Normalize and store. */
mul_v3_v3fl(CCG_grid_elem_no(key, grid, x, y), normal_acc, 1.0f / counter);
}
}
}
static void subdiv_ccg_recalc_inner_normal_task(void *__restrict userdata_v,
const int grid_index,
const TaskParallelTLS *__restrict tls_v)
{
RecalcInnerNormalsData *data = static_cast<RecalcInnerNormalsData *>(userdata_v);
RecalcInnerNormalsTLSData *tls = static_cast<RecalcInnerNormalsTLSData *>(tls_v->userdata_chunk);
subdiv_ccg_recalc_inner_face_normals(data->subdiv_ccg, data->key, tls, grid_index);
subdiv_ccg_average_inner_face_normals(data->subdiv_ccg, data->key, tls, grid_index);
}
static void subdiv_ccg_recalc_inner_normal_free(const void *__restrict /*userdata*/,
void *__restrict tls_v)
{
RecalcInnerNormalsTLSData *tls = static_cast<RecalcInnerNormalsTLSData *>(tls_v);
MEM_SAFE_FREE(tls->face_normals);
}
/* Recalculate normals which corresponds to non-boundaries elements of grids. */
static void subdiv_ccg_recalc_inner_grid_normals(SubdivCCG *subdiv_ccg)
{
CCGKey key;
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
RecalcInnerNormalsData data{};
data.subdiv_ccg = subdiv_ccg;
data.key = &key;
RecalcInnerNormalsTLSData tls_data = {nullptr};
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
parallel_range_settings.userdata_chunk = &tls_data;
parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
parallel_range_settings.func_free = subdiv_ccg_recalc_inner_normal_free;
BLI_task_parallel_range(0,
subdiv_ccg->num_grids,
&data,
subdiv_ccg_recalc_inner_normal_task,
&parallel_range_settings);
}
void BKE_subdiv_ccg_recalc_normals(SubdivCCG *subdiv_ccg)
{
if (!subdiv_ccg->has_normal) {
/* Grids don't have normals, can do early output. */
return;
}
subdiv_ccg_recalc_inner_grid_normals(subdiv_ccg);
BKE_subdiv_ccg_average_grids(subdiv_ccg);
}
struct RecalcModifiedInnerNormalsData {
SubdivCCG *subdiv_ccg;
CCGKey *key;
SubdivCCGFace **effected_ccg_faces;
};
static void subdiv_ccg_recalc_modified_inner_normal_task(void *__restrict userdata_v,
const int face_index,
const TaskParallelTLS *__restrict tls_v)
{
RecalcModifiedInnerNormalsData *data = static_cast<RecalcModifiedInnerNormalsData *>(userdata_v);
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
CCGKey *key = data->key;
RecalcInnerNormalsTLSData *tls = static_cast<RecalcInnerNormalsTLSData *>(tls_v->userdata_chunk);
SubdivCCGFace **faces = data->effected_ccg_faces;
SubdivCCGFace *face = faces[face_index];
const int num_face_grids = face->num_grids;
for (int i = 0; i < num_face_grids; i++) {
const int grid_index = face->start_grid_index + i;
subdiv_ccg_recalc_inner_face_normals(data->subdiv_ccg, data->key, tls, grid_index);
subdiv_ccg_average_inner_face_normals(data->subdiv_ccg, data->key, tls, grid_index);
}
subdiv_ccg_average_inner_face_grids(subdiv_ccg, key, face);
}
static void subdiv_ccg_recalc_modified_inner_normal_free(const void *__restrict /*userdata*/,
void *__restrict tls_v)
{
RecalcInnerNormalsTLSData *tls = static_cast<RecalcInnerNormalsTLSData *>(tls_v);
MEM_SAFE_FREE(tls->face_normals);
}
static void subdiv_ccg_recalc_modified_inner_grid_normals(SubdivCCG *subdiv_ccg,
CCGFace **effected_faces,
int num_effected_faces)
{
CCGKey key;
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
RecalcModifiedInnerNormalsData data{};
data.subdiv_ccg = subdiv_ccg;
data.key = &key;
data.effected_ccg_faces = (SubdivCCGFace **)effected_faces;
RecalcInnerNormalsTLSData tls_data = {nullptr};
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
parallel_range_settings.userdata_chunk = &tls_data;
parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
parallel_range_settings.func_free = subdiv_ccg_recalc_modified_inner_normal_free;
BLI_task_parallel_range(0,
num_effected_faces,
&data,
subdiv_ccg_recalc_modified_inner_normal_task,
&parallel_range_settings);
}
void BKE_subdiv_ccg_update_normals(SubdivCCG *subdiv_ccg,
CCGFace **effected_faces,
int num_effected_faces)
{
if (!subdiv_ccg->has_normal) {
/* Grids don't have normals, can do early output. */
return;
}
if (num_effected_faces == 0) {
/* No faces changed, so nothing to do here. */
return;
}
subdiv_ccg_recalc_modified_inner_grid_normals(subdiv_ccg, effected_faces, num_effected_faces);
CCGKey key;
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
subdiv_ccg_average_faces_boundaries_and_corners(
subdiv_ccg, &key, effected_faces, num_effected_faces);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Boundary averaging/stitching
* \{ */
struct AverageInnerGridsData {
SubdivCCG *subdiv_ccg;
CCGKey *key;
};
static void average_grid_element_value_v3(float a[3], float b[3])
{
add_v3_v3(a, b);
mul_v3_fl(a, 0.5f);
copy_v3_v3(b, a);
}
static void average_grid_element(SubdivCCG *subdiv_ccg,
CCGKey *key,
CCGElem *grid_element_a,
CCGElem *grid_element_b)
{
average_grid_element_value_v3(CCG_elem_co(key, grid_element_a),
CCG_elem_co(key, grid_element_b));
if (subdiv_ccg->has_normal) {
average_grid_element_value_v3(CCG_elem_no(key, grid_element_a),
CCG_elem_no(key, grid_element_b));
}
if (subdiv_ccg->has_mask) {
float mask = (*CCG_elem_mask(key, grid_element_a) + *CCG_elem_mask(key, grid_element_b)) *
0.5f;
*CCG_elem_mask(key, grid_element_a) = mask;
*CCG_elem_mask(key, grid_element_b) = mask;
}
}
/* Accumulator to hold data during averaging. */
struct GridElementAccumulator {
float co[3];
float no[3];
float mask;
};
static void element_accumulator_init(GridElementAccumulator *accumulator)
{
zero_v3(accumulator->co);
zero_v3(accumulator->no);
accumulator->mask = 0.0f;
}
static void element_accumulator_add(GridElementAccumulator *accumulator,
const SubdivCCG *subdiv_ccg,
CCGKey *key,
/*const*/ CCGElem *grid_element)
{
add_v3_v3(accumulator->co, CCG_elem_co(key, grid_element));
if (subdiv_ccg->has_normal) {
add_v3_v3(accumulator->no, CCG_elem_no(key, grid_element));
}
if (subdiv_ccg->has_mask) {
accumulator->mask += *CCG_elem_mask(key, grid_element);
}
}
static void element_accumulator_mul_fl(GridElementAccumulator *accumulator, const float f)
{
mul_v3_fl(accumulator->co, f);
mul_v3_fl(accumulator->no, f);
accumulator->mask *= f;
}
static void element_accumulator_copy(SubdivCCG *subdiv_ccg,
CCGKey *key,
CCGElem *destination,
const GridElementAccumulator *accumulator)
{
copy_v3_v3(CCG_elem_co(key, destination), accumulator->co);
if (subdiv_ccg->has_normal) {
copy_v3_v3(CCG_elem_no(key, destination), accumulator->no);
}
if (subdiv_ccg->has_mask) {
*CCG_elem_mask(key, destination) = accumulator->mask;
}
}
static void subdiv_ccg_average_inner_face_grids(SubdivCCG *subdiv_ccg,
CCGKey *key,
SubdivCCGFace *face)
{
CCGElem **grids = subdiv_ccg->grids;
const int num_face_grids = face->num_grids;
const int grid_size = subdiv_ccg->grid_size;
CCGElem *prev_grid = grids[face->start_grid_index + num_face_grids - 1];
/* Average boundary between neighbor grid. */
for (int corner = 0; corner < num_face_grids; corner++) {
CCGElem *grid = grids[face->start_grid_index + corner];
for (int i = 1; i < grid_size; i++) {
CCGElem *prev_grid_element = CCG_grid_elem(key, prev_grid, i, 0);
CCGElem *grid_element = CCG_grid_elem(key, grid, 0, i);
average_grid_element(subdiv_ccg, key, prev_grid_element, grid_element);
}
prev_grid = grid;
}
/* Average all grids centers into a single accumulator, and share it.
* Guarantees correct and smooth averaging in the center. */
GridElementAccumulator center_accumulator;
element_accumulator_init(&center_accumulator);
for (int corner = 0; corner < num_face_grids; corner++) {
CCGElem *grid = grids[face->start_grid_index + corner];
CCGElem *grid_center_element = CCG_grid_elem(key, grid, 0, 0);
element_accumulator_add(&center_accumulator, subdiv_ccg, key, grid_center_element);
}
element_accumulator_mul_fl(&center_accumulator, 1.0f / num_face_grids);
for (int corner = 0; corner < num_face_grids; corner++) {
CCGElem *grid = grids[face->start_grid_index + corner];
CCGElem *grid_center_element = CCG_grid_elem(key, grid, 0, 0);
element_accumulator_copy(subdiv_ccg, key, grid_center_element, &center_accumulator);
}
}
static void subdiv_ccg_average_inner_grids_task(void *__restrict userdata_v,
const int face_index,
const TaskParallelTLS *__restrict /*tls_v*/)
{
AverageInnerGridsData *data = static_cast<AverageInnerGridsData *>(userdata_v);
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
CCGKey *key = data->key;
SubdivCCGFace *faces = subdiv_ccg->faces;
SubdivCCGFace *face = &faces[face_index];
subdiv_ccg_average_inner_face_grids(subdiv_ccg, key, face);
}
struct AverageGridsBoundariesData {
SubdivCCG *subdiv_ccg;
CCGKey *key;
/* Optional lookup table. Maps task index to index in `subdiv_ccg->adjacent_vertices`. */
2021-06-18 06:27:43 +02:00
const int *adjacent_edge_index_map;
};
struct AverageGridsBoundariesTLSData {
GridElementAccumulator *accumulators;
};
static void subdiv_ccg_average_grids_boundary(SubdivCCG *subdiv_ccg,
CCGKey *key,
SubdivCCGAdjacentEdge *adjacent_edge,
AverageGridsBoundariesTLSData *tls)
{
const int num_adjacent_faces = adjacent_edge->num_adjacent_faces;
const int grid_size2 = subdiv_ccg->grid_size * 2;
if (num_adjacent_faces == 1) {
/* Nothing to average with. */
return;
}
if (tls->accumulators == nullptr) {
tls->accumulators = static_cast<GridElementAccumulator *>(
MEM_calloc_arrayN(grid_size2, sizeof(GridElementAccumulator), "average accumulators"));
}
else {
for (int i = 1; i < grid_size2 - 1; i++) {
element_accumulator_init(&tls->accumulators[i]);
}
}
for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
for (int i = 1; i < grid_size2 - 1; i++) {
CCGElem *grid_element = subdiv_ccg_coord_to_elem(
key, subdiv_ccg, &adjacent_edge->boundary_coords[face_index][i]);
element_accumulator_add(&tls->accumulators[i], subdiv_ccg, key, grid_element);
}
}
for (int i = 1; i < grid_size2 - 1; i++) {
element_accumulator_mul_fl(&tls->accumulators[i], 1.0f / num_adjacent_faces);
}
/* Copy averaged value to all the other faces. */
for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
for (int i = 1; i < grid_size2 - 1; i++) {
CCGElem *grid_element = subdiv_ccg_coord_to_elem(
key, subdiv_ccg, &adjacent_edge->boundary_coords[face_index][i]);
element_accumulator_copy(subdiv_ccg, key, grid_element, &tls->accumulators[i]);
}
}
}
static void subdiv_ccg_average_grids_boundaries_task(void *__restrict userdata_v,
const int n,
const TaskParallelTLS *__restrict tls_v)
{
AverageGridsBoundariesData *data = static_cast<AverageGridsBoundariesData *>(userdata_v);
const int adjacent_edge_index = data->adjacent_edge_index_map ?
data->adjacent_edge_index_map[n] :
n;
AverageGridsBoundariesTLSData *tls = static_cast<AverageGridsBoundariesTLSData *>(
tls_v->userdata_chunk);
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
CCGKey *key = data->key;
SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[adjacent_edge_index];
subdiv_ccg_average_grids_boundary(subdiv_ccg, key, adjacent_edge, tls);
}
static void subdiv_ccg_average_grids_boundaries_free(const void *__restrict /*userdata*/,
void *__restrict tls_v)
{
AverageGridsBoundariesTLSData *tls = static_cast<AverageGridsBoundariesTLSData *>(tls_v);
MEM_SAFE_FREE(tls->accumulators);
}
struct AverageGridsCornerData {
SubdivCCG *subdiv_ccg;
CCGKey *key;
/* Optional lookup table. Maps task range index to index in `subdiv_ccg->adjacent_vertices`. */
2021-06-18 06:27:43 +02:00
const int *adjacent_vert_index_map;
};
static void subdiv_ccg_average_grids_corners(SubdivCCG *subdiv_ccg,
CCGKey *key,
SubdivCCGAdjacentVertex *adjacent_vertex)
{
const int num_adjacent_faces = adjacent_vertex->num_adjacent_faces;
if (num_adjacent_faces == 1) {
/* Nothing to average with. */
return;
}
GridElementAccumulator accumulator;
element_accumulator_init(&accumulator);
for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
CCGElem *grid_element = subdiv_ccg_coord_to_elem(
key, subdiv_ccg, &adjacent_vertex->corner_coords[face_index]);
element_accumulator_add(&accumulator, subdiv_ccg, key, grid_element);
}
element_accumulator_mul_fl(&accumulator, 1.0f / num_adjacent_faces);
/* Copy averaged value to all the other faces. */
for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
CCGElem *grid_element = subdiv_ccg_coord_to_elem(
key, subdiv_ccg, &adjacent_vertex->corner_coords[face_index]);
element_accumulator_copy(subdiv_ccg, key, grid_element, &accumulator);
}
}
static void subdiv_ccg_average_grids_corners_task(void *__restrict userdata_v,
const int n,
const TaskParallelTLS *__restrict /*tls_v*/)
{
AverageGridsCornerData *data = static_cast<AverageGridsCornerData *>(userdata_v);
const int adjacent_vertex_index = data->adjacent_vert_index_map ?
data->adjacent_vert_index_map[n] :
n;
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
CCGKey *key = data->key;
SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[adjacent_vertex_index];
subdiv_ccg_average_grids_corners(subdiv_ccg, key, adjacent_vertex);
}
static void subdiv_ccg_average_boundaries(SubdivCCG *subdiv_ccg,
CCGKey *key,
2021-06-18 06:27:43 +02:00
const int *adjacent_edge_index_map,
int num_adjacent_edges)
{
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
AverageGridsBoundariesData boundaries_data{};
boundaries_data.subdiv_ccg = subdiv_ccg;
boundaries_data.key = key;
boundaries_data.adjacent_edge_index_map = adjacent_edge_index_map;
AverageGridsBoundariesTLSData tls_data = {nullptr};
parallel_range_settings.userdata_chunk = &tls_data;
parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
parallel_range_settings.func_free = subdiv_ccg_average_grids_boundaries_free;
BLI_task_parallel_range(0,
num_adjacent_edges,
&boundaries_data,
subdiv_ccg_average_grids_boundaries_task,
&parallel_range_settings);
}
static void subdiv_ccg_average_all_boundaries(SubdivCCG *subdiv_ccg, CCGKey *key)
{
subdiv_ccg_average_boundaries(subdiv_ccg, key, nullptr, subdiv_ccg->num_adjacent_edges);
}
static void subdiv_ccg_average_corners(SubdivCCG *subdiv_ccg,
CCGKey *key,
2021-06-18 06:27:43 +02:00
const int *adjacent_vert_index_map,
int num_adjacent_vertices)
{
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
AverageGridsCornerData corner_data{};
corner_data.subdiv_ccg = subdiv_ccg;
corner_data.key = key;
corner_data.adjacent_vert_index_map = adjacent_vert_index_map;
BLI_task_parallel_range(0,
num_adjacent_vertices,
&corner_data,
subdiv_ccg_average_grids_corners_task,
&parallel_range_settings);
}
static void subdiv_ccg_average_all_corners(SubdivCCG *subdiv_ccg, CCGKey *key)
{
subdiv_ccg_average_corners(subdiv_ccg, key, nullptr, subdiv_ccg->num_adjacent_vertices);
}
static void subdiv_ccg_average_all_boundaries_and_corners(SubdivCCG *subdiv_ccg, CCGKey *key)
{
subdiv_ccg_average_all_boundaries(subdiv_ccg, key);
subdiv_ccg_average_all_corners(subdiv_ccg, key);
}
void BKE_subdiv_ccg_average_grids(SubdivCCG *subdiv_ccg)
{
CCGKey key;
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
/* Average inner boundaries of grids (within one face), across faces
2019-01-18 11:28:38 +01:00
* from different face-corners. */
AverageInnerGridsData inner_data{};
inner_data.subdiv_ccg = subdiv_ccg;
inner_data.key = &key;
BLI_task_parallel_range(0,
subdiv_ccg->num_faces,
&inner_data,
subdiv_ccg_average_inner_grids_task,
&parallel_range_settings);
subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
}
static void subdiv_ccg_affected_face_adjacency(SubdivCCG *subdiv_ccg,
CCGFace **effected_faces,
int num_effected_faces,
GSet *r_adjacent_vertices,
GSet *r_adjacent_edges)
{
Subdiv *subdiv = subdiv_ccg->subdiv;
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
Vector<int, 64> face_vertices;
Vector<int, 64> face_edges;
for (int i = 0; i < num_effected_faces; i++) {
SubdivCCGFace *face = (SubdivCCGFace *)effected_faces[i];
int face_index = face - subdiv_ccg->faces;
const int num_face_grids = face->num_grids;
face_vertices.reinitialize(num_face_grids);
topology_refiner->getFaceVertices(topology_refiner, face_index, face_vertices.data());
/* Note that order of edges is same as order of MLoops, which also
* means it's the same as order of grids. */
face_edges.reinitialize(num_face_grids);
topology_refiner->getFaceEdges(topology_refiner, face_index, face_edges.data());
for (int corner = 0; corner < num_face_grids; corner++) {
const int vertex_index = face_vertices[corner];
const int edge_index = face_edges[corner];
int edge_vertices[2];
topology_refiner->getEdgeVertices(topology_refiner, edge_index, edge_vertices);
SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[edge_index];
BLI_gset_add(r_adjacent_edges, adjacent_edge);
SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[vertex_index];
BLI_gset_add(r_adjacent_vertices, adjacent_vertex);
}
}
}
void subdiv_ccg_average_faces_boundaries_and_corners(SubdivCCG *subdiv_ccg,
CCGKey *key,
CCGFace **effected_faces,
int num_effected_faces)
{
GSet *adjacent_vertices = BLI_gset_ptr_new(__func__);
GSet *adjacent_edges = BLI_gset_ptr_new(__func__);
GSetIterator gi;
subdiv_ccg_affected_face_adjacency(
subdiv_ccg, effected_faces, num_effected_faces, adjacent_vertices, adjacent_edges);
int i = 0;
/* Average boundaries. */
Array<int, 64> adjacent_edge_index_map(BLI_gset_len(adjacent_edges));
GSET_ITER_INDEX (gi, adjacent_edges, i) {
SubdivCCGAdjacentEdge *adjacent_edge = static_cast<SubdivCCGAdjacentEdge *>(
BLI_gsetIterator_getKey(&gi));
adjacent_edge_index_map[i] = adjacent_edge - subdiv_ccg->adjacent_edges;
}
subdiv_ccg_average_boundaries(
subdiv_ccg, key, adjacent_edge_index_map.data(), BLI_gset_len(adjacent_edges));
/* Average corners. */
Array<int, 64> adjacent_vertex_index_map(BLI_gset_len(adjacent_vertices));
GSET_ITER_INDEX (gi, adjacent_vertices, i) {
SubdivCCGAdjacentVertex *adjacent_vertex = static_cast<SubdivCCGAdjacentVertex *>(
BLI_gsetIterator_getKey(&gi));
adjacent_vertex_index_map[i] = adjacent_vertex - subdiv_ccg->adjacent_vertices;
}
subdiv_ccg_average_corners(
subdiv_ccg, key, adjacent_vertex_index_map.data(), BLI_gset_len(adjacent_vertices));
BLI_gset_free(adjacent_vertices, nullptr);
BLI_gset_free(adjacent_edges, nullptr);
}
struct StitchFacesInnerGridsData {
SubdivCCG *subdiv_ccg;
CCGKey *key;
CCGFace **effected_ccg_faces;
};
static void subdiv_ccg_stitch_face_inner_grids_task(void *__restrict userdata_v,
const int face_index,
const TaskParallelTLS *__restrict /*tls_v*/)
{
StitchFacesInnerGridsData *data = static_cast<StitchFacesInnerGridsData *>(userdata_v);
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
CCGKey *key = data->key;
CCGFace **effected_ccg_faces = data->effected_ccg_faces;
CCGFace *effected_ccg_face = effected_ccg_faces[face_index];
SubdivCCGFace *face = (SubdivCCGFace *)effected_ccg_face;
subdiv_ccg_average_inner_face_grids(subdiv_ccg, key, face);
}
void BKE_subdiv_ccg_average_stitch_faces(SubdivCCG *subdiv_ccg,
CCGFace **effected_faces,
int num_effected_faces)
{
CCGKey key;
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
StitchFacesInnerGridsData data{};
data.subdiv_ccg = subdiv_ccg;
data.key = &key;
data.effected_ccg_faces = effected_faces;
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
BLI_task_parallel_range(0,
num_effected_faces,
&data,
subdiv_ccg_stitch_face_inner_grids_task,
&parallel_range_settings);
/* TODO(sergey): Only average elements which are adjacent to modified
2019-01-18 11:28:38 +01:00
* faces. */
subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
}
void BKE_subdiv_ccg_topology_counters(const SubdivCCG *subdiv_ccg,
int *r_num_vertices,
int *r_num_edges,
int *r_num_faces,
int *r_num_loops)
{
const int num_grids = subdiv_ccg->num_grids;
const int grid_size = subdiv_ccg->grid_size;
const int grid_area = grid_size * grid_size;
const int num_edges_per_grid = 2 * (grid_size * (grid_size - 1));
*r_num_vertices = num_grids * grid_area;
*r_num_edges = num_grids * num_edges_per_grid;
*r_num_faces = num_grids * (grid_size - 1) * (grid_size - 1);
*r_num_loops = *r_num_faces * 4;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Neighbors
* \{ */
void BKE_subdiv_ccg_print_coord(const char *message, const SubdivCCGCoord *coord)
{
printf("%s: grid index: %d, coord: (%d, %d)\n", message, coord->grid_index, coord->x, coord->y);
}
bool BKE_subdiv_ccg_check_coord_valid(const SubdivCCG *subdiv_ccg, const SubdivCCGCoord *coord)
{
if (coord->grid_index < 0 || coord->grid_index >= subdiv_ccg->num_grids) {
return false;
}
const int grid_size = subdiv_ccg->grid_size;
if (coord->x < 0 || coord->x >= grid_size) {
return false;
}
if (coord->y < 0 || coord->y >= grid_size) {
return false;
}
return true;
}
BLI_INLINE void subdiv_ccg_neighbors_init(SubdivCCGNeighbors *neighbors,
const int num_unique,
const int num_duplicates)
{
const int size = num_unique + num_duplicates;
neighbors->size = size;
neighbors->num_duplicates = num_duplicates;
if (size < ARRAY_SIZE(neighbors->coords_fixed)) {
neighbors->coords = neighbors->coords_fixed;
}
else {
neighbors->coords = static_cast<SubdivCCGCoord *>(
MEM_mallocN(sizeof(*neighbors->coords) * size, "SubdivCCGNeighbors.coords"));
}
}
/* Check whether given coordinate belongs to a grid corner. */
BLI_INLINE bool is_corner_grid_coord(const SubdivCCG *subdiv_ccg, const SubdivCCGCoord *coord)
{
const int grid_size_1 = subdiv_ccg->grid_size - 1;
return (coord->x == 0 && coord->y == 0) || (coord->x == 0 && coord->y == grid_size_1) ||
(coord->x == grid_size_1 && coord->y == grid_size_1) ||
(coord->x == grid_size_1 && coord->y == 0);
}
/* Check whether given coordinate belongs to a grid boundary. */
BLI_INLINE bool is_boundary_grid_coord(const SubdivCCG *subdiv_ccg, const SubdivCCGCoord *coord)
{
const int grid_size_1 = subdiv_ccg->grid_size - 1;
return coord->x == 0 || coord->y == 0 || coord->x == grid_size_1 || coord->y == grid_size_1;
}
/* Check whether coordinate is at the boundary between two grids of the same face. */
BLI_INLINE bool is_inner_edge_grid_coordinate(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord)
{
const int grid_size_1 = subdiv_ccg->grid_size - 1;
if (coord->x == 0) {
return coord->y > 0 && coord->y < grid_size_1;
}
if (coord->y == 0) {
return coord->x > 0 && coord->x < grid_size_1;
}
return false;
}
BLI_INLINE SubdivCCGCoord coord_at_prev_row(const SubdivCCG * /*subdiv_ccg*/,
const SubdivCCGCoord *coord)
{
BLI_assert(coord->y > 0);
SubdivCCGCoord result = *coord;
result.y -= 1;
return result;
}
BLI_INLINE SubdivCCGCoord coord_at_next_row(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord)
{
UNUSED_VARS_NDEBUG(subdiv_ccg);
BLI_assert(coord->y < subdiv_ccg->grid_size - 1);
SubdivCCGCoord result = *coord;
result.y += 1;
return result;
}
BLI_INLINE SubdivCCGCoord coord_at_prev_col(const SubdivCCG * /*subdiv_ccg*/,
const SubdivCCGCoord *coord)
{
BLI_assert(coord->x > 0);
SubdivCCGCoord result = *coord;
result.x -= 1;
return result;
}
BLI_INLINE SubdivCCGCoord coord_at_next_col(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord)
{
UNUSED_VARS_NDEBUG(subdiv_ccg);
BLI_assert(coord->x < subdiv_ccg->grid_size - 1);
SubdivCCGCoord result = *coord;
result.x += 1;
return result;
}
/* For the input coordinate which is at the boundary of the grid do one step inside. */
static SubdivCCGCoord coord_step_inside_from_boundary(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord)
{
SubdivCCGCoord result = *coord;
const int grid_size_1 = subdiv_ccg->grid_size - 1;
if (result.x == grid_size_1) {
--result.x;
}
else if (result.y == grid_size_1) {
--result.y;
}
else if (result.x == 0) {
++result.x;
}
else if (result.y == 0) {
++result.y;
}
else {
BLI_assert_msg(0, "non-boundary element given");
}
return result;
}
BLI_INLINE
int next_grid_index_from_coord(const SubdivCCG *subdiv_ccg, const SubdivCCGCoord *coord)
{
const SubdivCCGFace *face = &subdiv_ccg->faces[subdiv_ccg->grid_to_face_map[coord->grid_index]];
const int face_grid_index = coord->grid_index;
int next_face_grid_index = face_grid_index + 1 - face->start_grid_index;
if (next_face_grid_index == face->num_grids) {
next_face_grid_index = 0;
}
return face->start_grid_index + next_face_grid_index;
}
BLI_INLINE int prev_grid_index_from_coord(const SubdivCCG *subdiv_ccg, const SubdivCCGCoord *coord)
{
const SubdivCCGFace *face = &subdiv_ccg->faces[subdiv_ccg->grid_to_face_map[coord->grid_index]];
const int face_grid_index = coord->grid_index;
int prev_face_grid_index = face_grid_index - 1 - face->start_grid_index;
if (prev_face_grid_index < 0) {
prev_face_grid_index = face->num_grids - 1;
}
return face->start_grid_index + prev_face_grid_index;
}
/* Simple case of getting neighbors of a corner coordinate: the corner is a face center, so
* can only iterate over grid of a single face, without looking into adjacency. */
static void neighbor_coords_corner_center_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const bool include_duplicates,
SubdivCCGNeighbors *r_neighbors)
{
const SubdivCCGFace *face = &subdiv_ccg->faces[subdiv_ccg->grid_to_face_map[coord->grid_index]];
const int num_adjacent_grids = face->num_grids;
subdiv_ccg_neighbors_init(
r_neighbors, num_adjacent_grids, (include_duplicates) ? num_adjacent_grids - 1 : 0);
int duplicate_face_grid_index = num_adjacent_grids;
for (int face_grid_index = 0; face_grid_index < num_adjacent_grids; ++face_grid_index) {
SubdivCCGCoord neighbor_coord;
neighbor_coord.grid_index = face->start_grid_index + face_grid_index;
neighbor_coord.x = 1;
neighbor_coord.y = 0;
r_neighbors->coords[face_grid_index] = neighbor_coord;
if (include_duplicates && neighbor_coord.grid_index != coord->grid_index) {
neighbor_coord.x = 0;
r_neighbors->coords[duplicate_face_grid_index++] = neighbor_coord;
}
}
}
/* Get index within adjacent_vertices array for the given CCG coordinate. */
static int adjacent_vertex_index_from_coord(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord)
{
Subdiv *subdiv = subdiv_ccg->subdiv;
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
const SubdivCCGFace *face = &subdiv_ccg->faces[subdiv_ccg->grid_to_face_map[coord->grid_index]];
const int face_index = face - subdiv_ccg->faces;
const int face_grid_index = coord->grid_index - face->start_grid_index;
const int num_face_grids = face->num_grids;
Array<int, 64> face_vertices(num_face_grids);
topology_refiner->getFaceVertices(topology_refiner, face_index, face_vertices.data());
const int adjacent_vertex_index = face_vertices[face_grid_index];
return adjacent_vertex_index;
}
/* The corner is adjacent to a coarse vertex. */
static void neighbor_coords_corner_vertex_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const bool include_duplicates,
SubdivCCGNeighbors *r_neighbors)
{
Subdiv *subdiv = subdiv_ccg->subdiv;
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
const int adjacent_vertex_index = adjacent_vertex_index_from_coord(subdiv_ccg, coord);
BLI_assert(adjacent_vertex_index >= 0);
BLI_assert(adjacent_vertex_index < subdiv_ccg->num_adjacent_vertices);
const int num_vertex_edges = topology_refiner->getNumVertexEdges(topology_refiner,
adjacent_vertex_index);
SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[adjacent_vertex_index];
const int num_adjacent_faces = adjacent_vertex->num_adjacent_faces;
subdiv_ccg_neighbors_init(
r_neighbors, num_vertex_edges, (include_duplicates) ? num_adjacent_faces - 1 : 0);
Array<int, 64> vertex_edges(num_vertex_edges);
topology_refiner->getVertexEdges(topology_refiner, adjacent_vertex_index, vertex_edges.data());
for (int i = 0; i < num_vertex_edges; ++i) {
const int edge_index = vertex_edges[i];
/* Use very first grid of every edge. */
const int edge_face_index = 0;
/* Depending edge orientation we use first (zero-based) or previous-to-last point. */
int edge_vertices_indices[2];
topology_refiner->getEdgeVertices(topology_refiner, edge_index, edge_vertices_indices);
int edge_point_index, duplicate_edge_point_index;
if (edge_vertices_indices[0] == adjacent_vertex_index) {
duplicate_edge_point_index = 0;
edge_point_index = duplicate_edge_point_index + 1;
}
else {
/* Edge "consists" of 2 grids, which makes it 2 * grid_size elements per edge.
* The index of last edge element is 2 * grid_size - 1 (due to zero-based indices),
* and we are interested in previous to last element. */
duplicate_edge_point_index = subdiv_ccg->grid_size * 2 - 1;
edge_point_index = duplicate_edge_point_index - 1;
}
SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[edge_index];
r_neighbors->coords[i] = adjacent_edge->boundary_coords[edge_face_index][edge_point_index];
}
if (include_duplicates) {
/* Add duplicates of the current grid vertex in adjacent faces if requested. */
for (int i = 0, duplicate_i = num_vertex_edges; i < num_adjacent_faces; i++) {
SubdivCCGCoord neighbor_coord = adjacent_vertex->corner_coords[i];
if (neighbor_coord.grid_index != coord->grid_index) {
r_neighbors->coords[duplicate_i++] = neighbor_coord;
}
}
}
}
static int adjacent_edge_index_from_coord(const SubdivCCG *subdiv_ccg, const SubdivCCGCoord *coord)
{
Subdiv *subdiv = subdiv_ccg->subdiv;
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
const SubdivCCGFace *face = &subdiv_ccg->faces[subdiv_ccg->grid_to_face_map[coord->grid_index]];
const int face_grid_index = coord->grid_index - face->start_grid_index;
const int face_index = face - subdiv_ccg->faces;
const int num_face_edges = topology_refiner->getNumFaceEdges(topology_refiner, face_index);
Array<int, 64> face_edges(num_face_edges);
topology_refiner->getFaceEdges(topology_refiner, face_index, face_edges.data());
const int grid_size_1 = subdiv_ccg->grid_size - 1;
int adjacent_edge_index = -1;
if (coord->x == grid_size_1) {
adjacent_edge_index = face_edges[face_grid_index];
}
else {
BLI_assert(coord->y == grid_size_1);
adjacent_edge_index =
face_edges[face_grid_index == 0 ? face->num_grids - 1 : face_grid_index - 1];
}
return adjacent_edge_index;
}
static int adjacent_edge_point_index_from_coord(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const int adjacent_edge_index)
{
Subdiv *subdiv = subdiv_ccg->subdiv;
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
const int adjacent_vertex_index = adjacent_vertex_index_from_coord(subdiv_ccg, coord);
int edge_vertices_indices[2];
topology_refiner->getEdgeVertices(topology_refiner, adjacent_edge_index, edge_vertices_indices);
/* Vertex index of an edge which is used to see whether edge points in the right direction.
2019-10-10 01:25:46 +02:00
* Tricky part here is that depending whether input coordinate is are maximum X or Y coordinate
* of the grid we need to use different edge direction.
* Basically, the edge adjacent to a previous loop needs to point opposite direction. */
int directional_edge_vertex_index = -1;
const int grid_size_1 = subdiv_ccg->grid_size - 1;
int adjacent_edge_point_index = -1;
if (coord->x == grid_size_1) {
adjacent_edge_point_index = subdiv_ccg->grid_size - coord->y - 1;
directional_edge_vertex_index = edge_vertices_indices[0];
}
else {
BLI_assert(coord->y == grid_size_1);
adjacent_edge_point_index = subdiv_ccg->grid_size + coord->x;
directional_edge_vertex_index = edge_vertices_indices[1];
}
2022-10-07 01:32:02 +02:00
/* Flip the index if the edge points opposite direction. */
if (adjacent_vertex_index != directional_edge_vertex_index) {
const int num_edge_points = subdiv_ccg->grid_size * 2;
adjacent_edge_point_index = num_edge_points - adjacent_edge_point_index - 1;
}
return adjacent_edge_point_index;
}
/* Adjacent edge has two points in the middle which corresponds to grid corners, but which are
* the same point in the final geometry.
* So need to use extra step when calculating next/previous points, so we don't go from a corner
* of one grid to a corner of adjacent grid. */
static int next_adjacent_edge_point_index(const SubdivCCG *subdiv_ccg, const int point_index)
{
if (point_index == subdiv_ccg->grid_size - 1) {
return point_index + 2;
}
return point_index + 1;
}
static int prev_adjacent_edge_point_index(const SubdivCCG *subdiv_ccg, const int point_index)
{
if (point_index == subdiv_ccg->grid_size) {
return point_index - 2;
}
return point_index - 1;
}
2020-08-05 03:49:31 +02:00
/* When the point index corresponds to a grid corner, returns the point index which corresponds to
* the corner of the adjacent grid, as the adjacent edge has two separate points for each grid
* corner at the middle of the edge. */
static int adjacent_grid_corner_point_index_on_edge(const SubdivCCG *subdiv_ccg,
const int point_index)
{
if (point_index == subdiv_ccg->grid_size) {
return point_index - 1;
}
return point_index + 1;
}
/* Common implementation of neighbor calculation when input coordinate is at the edge between two
* coarse faces, but is not at the coarse vertex. */
static void neighbor_coords_edge_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const bool include_duplicates,
SubdivCCGNeighbors *r_neighbors)
{
const bool is_corner = is_corner_grid_coord(subdiv_ccg, coord);
const int adjacent_edge_index = adjacent_edge_index_from_coord(subdiv_ccg, coord);
BLI_assert(adjacent_edge_index >= 0);
BLI_assert(adjacent_edge_index < subdiv_ccg->num_adjacent_edges);
const SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[adjacent_edge_index];
/* 2 neighbor points along the edge, plus one inner point per every adjacent grid. */
const int num_adjacent_faces = adjacent_edge->num_adjacent_faces;
int num_duplicates = 0;
if (include_duplicates) {
num_duplicates += num_adjacent_faces - 1;
if (is_corner) {
2020-08-05 03:49:31 +02:00
/* When the coord is a grid corner, add an extra duplicate per adjacent grid in all adjacent
* faces to the edge. */
num_duplicates += num_adjacent_faces;
}
}
subdiv_ccg_neighbors_init(r_neighbors, num_adjacent_faces + 2, num_duplicates);
const int point_index = adjacent_edge_point_index_from_coord(
subdiv_ccg, coord, adjacent_edge_index);
const int point_index_duplicate = adjacent_grid_corner_point_index_on_edge(subdiv_ccg,
point_index);
const int next_point_index = next_adjacent_edge_point_index(subdiv_ccg, point_index);
const int prev_point_index = prev_adjacent_edge_point_index(subdiv_ccg, point_index);
int duplicate_i = num_adjacent_faces;
for (int i = 0; i < num_adjacent_faces; ++i) {
SubdivCCGCoord *boundary_coords = adjacent_edge->boundary_coords[i];
/* One step into the grid from the edge for each adjacent face. */
SubdivCCGCoord grid_coord = boundary_coords[point_index];
r_neighbors->coords[i + 2] = coord_step_inside_from_boundary(subdiv_ccg, &grid_coord);
if (grid_coord.grid_index == coord->grid_index) {
2021-10-12 08:52:35 +02:00
/* Previous and next along the edge for the current grid. */
r_neighbors->coords[0] = boundary_coords[prev_point_index];
r_neighbors->coords[1] = boundary_coords[next_point_index];
}
else if (include_duplicates) {
/* Same coordinate on neighboring grids if requested. */
r_neighbors->coords[duplicate_i + 2] = grid_coord;
duplicate_i++;
}
/* When it is a corner, add the duplicate of the adjacent grid in the same face. */
if (include_duplicates && is_corner) {
SubdivCCGCoord duplicate_corner_grid_coord = boundary_coords[point_index_duplicate];
r_neighbors->coords[duplicate_i + 2] = duplicate_corner_grid_coord;
duplicate_i++;
}
}
BLI_assert(duplicate_i - num_adjacent_faces == num_duplicates);
}
/* The corner is at the middle of edge between faces. */
static void neighbor_coords_corner_edge_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const bool include_duplicates,
SubdivCCGNeighbors *r_neighbors)
{
neighbor_coords_edge_get(subdiv_ccg, coord, include_duplicates, r_neighbors);
}
/* Input coordinate is at one of 4 corners of its grid corners. */
static void neighbor_coords_corner_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const bool include_duplicates,
SubdivCCGNeighbors *r_neighbors)
{
if (coord->x == 0 && coord->y == 0) {
neighbor_coords_corner_center_get(subdiv_ccg, coord, include_duplicates, r_neighbors);
}
else {
const int grid_size_1 = subdiv_ccg->grid_size - 1;
if (coord->x == grid_size_1 && coord->y == grid_size_1) {
neighbor_coords_corner_vertex_get(subdiv_ccg, coord, include_duplicates, r_neighbors);
}
else {
neighbor_coords_corner_edge_get(subdiv_ccg, coord, include_duplicates, r_neighbors);
}
}
}
/* Simple case of getting neighbors of a boundary coordinate: the input coordinate is at the
* boundary between two grids of the same face and there is no need to check adjacency with
* other faces. */
static void neighbor_coords_boundary_inner_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const bool include_duplicates,
SubdivCCGNeighbors *r_neighbors)
{
subdiv_ccg_neighbors_init(r_neighbors, 4, (include_duplicates) ? 1 : 0);
if (coord->x == 0) {
r_neighbors->coords[0] = coord_at_prev_row(subdiv_ccg, coord);
r_neighbors->coords[1] = coord_at_next_row(subdiv_ccg, coord);
r_neighbors->coords[2] = coord_at_next_col(subdiv_ccg, coord);
r_neighbors->coords[3].grid_index = prev_grid_index_from_coord(subdiv_ccg, coord);
r_neighbors->coords[3].x = coord->y;
r_neighbors->coords[3].y = 1;
if (include_duplicates) {
r_neighbors->coords[4] = r_neighbors->coords[3];
r_neighbors->coords[4].y = 0;
}
}
else if (coord->y == 0) {
r_neighbors->coords[0] = coord_at_prev_col(subdiv_ccg, coord);
r_neighbors->coords[1] = coord_at_next_col(subdiv_ccg, coord);
r_neighbors->coords[2] = coord_at_next_row(subdiv_ccg, coord);
r_neighbors->coords[3].grid_index = next_grid_index_from_coord(subdiv_ccg, coord);
r_neighbors->coords[3].x = 1;
r_neighbors->coords[3].y = coord->x;
if (include_duplicates) {
r_neighbors->coords[4] = r_neighbors->coords[3];
r_neighbors->coords[4].x = 0;
}
}
}
/* Input coordinate is on an edge between two faces. Need to check adjacency. */
static void neighbor_coords_boundary_outer_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const bool include_duplicates,
SubdivCCGNeighbors *r_neighbors)
{
neighbor_coords_edge_get(subdiv_ccg, coord, include_duplicates, r_neighbors);
}
/* Input coordinate is at one of 4 boundaries of its grid.
* It could either be an inner boundary (which connects face center to the face edge) or could be
* a part of coarse face edge. */
static void neighbor_coords_boundary_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const bool include_duplicates,
SubdivCCGNeighbors *r_neighbors)
{
if (is_inner_edge_grid_coordinate(subdiv_ccg, coord)) {
neighbor_coords_boundary_inner_get(subdiv_ccg, coord, include_duplicates, r_neighbors);
}
else {
neighbor_coords_boundary_outer_get(subdiv_ccg, coord, include_duplicates, r_neighbors);
}
}
/* Input coordinate is inside of its grid, all the neighbors belong to the same grid. */
static void neighbor_coords_inner_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
SubdivCCGNeighbors *r_neighbors)
{
subdiv_ccg_neighbors_init(r_neighbors, 4, 0);
r_neighbors->coords[0] = coord_at_prev_row(subdiv_ccg, coord);
r_neighbors->coords[1] = coord_at_next_row(subdiv_ccg, coord);
r_neighbors->coords[2] = coord_at_prev_col(subdiv_ccg, coord);
r_neighbors->coords[3] = coord_at_next_col(subdiv_ccg, coord);
}
void BKE_subdiv_ccg_neighbor_coords_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const bool include_duplicates,
SubdivCCGNeighbors *r_neighbors)
{
BLI_assert(coord->grid_index >= 0);
BLI_assert(coord->grid_index < subdiv_ccg->num_grids);
BLI_assert(coord->x >= 0);
BLI_assert(coord->x < subdiv_ccg->grid_size);
BLI_assert(coord->y >= 0);
BLI_assert(coord->y < subdiv_ccg->grid_size);
if (is_corner_grid_coord(subdiv_ccg, coord)) {
neighbor_coords_corner_get(subdiv_ccg, coord, include_duplicates, r_neighbors);
}
else if (is_boundary_grid_coord(subdiv_ccg, coord)) {
neighbor_coords_boundary_get(subdiv_ccg, coord, include_duplicates, r_neighbors);
}
else {
neighbor_coords_inner_get(subdiv_ccg, coord, r_neighbors);
}
#ifndef NDEBUG
for (int i = 0; i < r_neighbors->size; i++) {
BLI_assert(BKE_subdiv_ccg_check_coord_valid(subdiv_ccg, &r_neighbors->coords[i]));
}
#endif
}
int BKE_subdiv_ccg_grid_to_face_index(const SubdivCCG *subdiv_ccg, const int grid_index)
{
return subdiv_ccg->grid_to_face_map[grid_index];
}
const int *BKE_subdiv_ccg_start_face_grid_index_ensure(SubdivCCG *subdiv_ccg)
{
if (subdiv_ccg->cache_.start_face_grid_index == nullptr) {
const Subdiv *subdiv = subdiv_ccg->subdiv;
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
if (topology_refiner == nullptr) {
return nullptr;
}
const int num_coarse_faces = topology_refiner->getNumFaces(topology_refiner);
subdiv_ccg->cache_.start_face_grid_index = static_cast<int *>(
MEM_malloc_arrayN(num_coarse_faces, sizeof(int), "start_face_grid_index"));
int start_grid_index = 0;
for (int face_index = 0; face_index < num_coarse_faces; face_index++) {
const int num_face_grids = topology_refiner->getNumFaceVertices(topology_refiner,
face_index);
subdiv_ccg->cache_.start_face_grid_index[face_index] = start_grid_index;
start_grid_index += num_face_grids;
}
}
return subdiv_ccg->cache_.start_face_grid_index;
}
const int *BKE_subdiv_ccg_start_face_grid_index_get(const SubdivCCG *subdiv_ccg)
{
return subdiv_ccg->cache_.start_face_grid_index;
}
static void adjacent_vertices_index_from_adjacent_edge(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const blender::Span<int> corner_verts,
const blender::OffsetIndices<int> faces,
int *r_v1,
int *r_v2)
{
const int grid_size_1 = subdiv_ccg->grid_size - 1;
const int face_index = BKE_subdiv_ccg_grid_to_face_index(subdiv_ccg, coord->grid_index);
const blender::IndexRange face = faces[face_index];
Mesh: Replace MLoop struct with generic attributes Implements #102359. Split the `MLoop` struct into two separate integer arrays called `corner_verts` and `corner_edges`, referring to the vertex each corner is attached to and the next edge around the face at each corner. These arrays can be sliced to give access to the edges or vertices in a face. Then they are often referred to as "poly_verts" or "poly_edges". The main benefits are halving the necessary memory bandwidth when only one array is used and simplifications from using regular integer indices instead of a special-purpose struct. The commit also starts a renaming from "loop" to "corner" in mesh code. Like the other mesh struct of array refactors, forward compatibility is kept by writing files with the older format. This will be done until 4.0 to ease the transition process. Looking at a small portion of the patch should give a good impression for the rest of the changes. I tried to make the changes as small as possible so it's easy to tell the correctness from the diff. Though I found Blender developers have been very inventive over the last decade when finding different ways to loop over the corners in a face. For performance, nearly every piece of code that deals with `Mesh` is slightly impacted. Any algorithm that is memory bottle-necked should see an improvement. For example, here is a comparison of interpolating a vertex float attribute to face corners (Ryzen 3700x): **Before** (Average: 3.7 ms, Min: 3.4 ms) ``` threading::parallel_for(loops.index_range(), 4096, [&](IndexRange range) { for (const int64_t i : range) { dst[i] = src[loops[i].v]; } }); ``` **After** (Average: 2.9 ms, Min: 2.6 ms) ``` array_utils::gather(src, corner_verts, dst); ``` That's an improvement of 28% to the average timings, and it's also a simplification, since an index-based routine can be used instead. For more examples using the new arrays, see the design task. Pull Request: https://projects.blender.org/blender/blender/pulls/104424
2023-03-20 15:55:13 +01:00
*r_v1 = corner_verts[coord->grid_index];
const int corner = blender::bke::mesh::face_find_corner_from_vert(face, corner_verts, *r_v1);
if (coord->x == grid_size_1) {
const int next = blender::bke::mesh::face_corner_next(face, corner);
Mesh: Replace MLoop struct with generic attributes Implements #102359. Split the `MLoop` struct into two separate integer arrays called `corner_verts` and `corner_edges`, referring to the vertex each corner is attached to and the next edge around the face at each corner. These arrays can be sliced to give access to the edges or vertices in a face. Then they are often referred to as "poly_verts" or "poly_edges". The main benefits are halving the necessary memory bandwidth when only one array is used and simplifications from using regular integer indices instead of a special-purpose struct. The commit also starts a renaming from "loop" to "corner" in mesh code. Like the other mesh struct of array refactors, forward compatibility is kept by writing files with the older format. This will be done until 4.0 to ease the transition process. Looking at a small portion of the patch should give a good impression for the rest of the changes. I tried to make the changes as small as possible so it's easy to tell the correctness from the diff. Though I found Blender developers have been very inventive over the last decade when finding different ways to loop over the corners in a face. For performance, nearly every piece of code that deals with `Mesh` is slightly impacted. Any algorithm that is memory bottle-necked should see an improvement. For example, here is a comparison of interpolating a vertex float attribute to face corners (Ryzen 3700x): **Before** (Average: 3.7 ms, Min: 3.4 ms) ``` threading::parallel_for(loops.index_range(), 4096, [&](IndexRange range) { for (const int64_t i : range) { dst[i] = src[loops[i].v]; } }); ``` **After** (Average: 2.9 ms, Min: 2.6 ms) ``` array_utils::gather(src, corner_verts, dst); ``` That's an improvement of 28% to the average timings, and it's also a simplification, since an index-based routine can be used instead. For more examples using the new arrays, see the design task. Pull Request: https://projects.blender.org/blender/blender/pulls/104424
2023-03-20 15:55:13 +01:00
*r_v2 = corner_verts[next];
}
if (coord->y == grid_size_1) {
const int prev = blender::bke::mesh::face_corner_prev(face, corner);
Mesh: Replace MLoop struct with generic attributes Implements #102359. Split the `MLoop` struct into two separate integer arrays called `corner_verts` and `corner_edges`, referring to the vertex each corner is attached to and the next edge around the face at each corner. These arrays can be sliced to give access to the edges or vertices in a face. Then they are often referred to as "poly_verts" or "poly_edges". The main benefits are halving the necessary memory bandwidth when only one array is used and simplifications from using regular integer indices instead of a special-purpose struct. The commit also starts a renaming from "loop" to "corner" in mesh code. Like the other mesh struct of array refactors, forward compatibility is kept by writing files with the older format. This will be done until 4.0 to ease the transition process. Looking at a small portion of the patch should give a good impression for the rest of the changes. I tried to make the changes as small as possible so it's easy to tell the correctness from the diff. Though I found Blender developers have been very inventive over the last decade when finding different ways to loop over the corners in a face. For performance, nearly every piece of code that deals with `Mesh` is slightly impacted. Any algorithm that is memory bottle-necked should see an improvement. For example, here is a comparison of interpolating a vertex float attribute to face corners (Ryzen 3700x): **Before** (Average: 3.7 ms, Min: 3.4 ms) ``` threading::parallel_for(loops.index_range(), 4096, [&](IndexRange range) { for (const int64_t i : range) { dst[i] = src[loops[i].v]; } }); ``` **After** (Average: 2.9 ms, Min: 2.6 ms) ``` array_utils::gather(src, corner_verts, dst); ``` That's an improvement of 28% to the average timings, and it's also a simplification, since an index-based routine can be used instead. For more examples using the new arrays, see the design task. Pull Request: https://projects.blender.org/blender/blender/pulls/104424
2023-03-20 15:55:13 +01:00
*r_v2 = corner_verts[prev];
}
}
Mesh: Replace MPoly struct with offset indices Implements #95967. Currently the `MPoly` struct is 12 bytes, and stores the index of a face's first corner and the number of corners/verts/edges. Polygons and corners are always created in order by Blender, meaning each face's corners will be after the previous face's corners. We can take advantage of this fact and eliminate the redundancy in mesh face storage by only storing a single integer corner offset for each face. The size of the face is then encoded by the offset of the next face. The size of a single integer is 4 bytes, so this reduces memory usage by 3 times. The same method is used for `CurvesGeometry`, so Blender already has an abstraction to simplify using these offsets called `OffsetIndices`. This class is used to easily retrieve a range of corner indices for each face. This also gives the opportunity for sharing some logic with curves. Another benefit of the change is that the offsets and sizes stored in `MPoly` can no longer disagree with each other. Storing faces in the order of their corners can simplify some code too. Face/polygon variables now use the `IndexRange` type, which comes with quite a few utilities that can simplify code. Some: - The offset integer array has to be one longer than the face count to avoid a branch for every face, which means the data is no longer part of the mesh's `CustomData`. - We lose the ability to "reference" an original mesh's offset array until more reusable CoW from #104478 is committed. That will be added in a separate commit. - Since they aren't part of `CustomData`, poly offsets often have to be copied manually. - To simplify using `OffsetIndices` in many places, some functions and structs in headers were moved to only compile in C++. - All meshes created by Blender use the same order for faces and face corners, but just in case, meshes with mismatched order are fixed by versioning code. - `MeshPolygon.totloop` is no longer editable in RNA. This API break is necessary here unfortunately. It should be worth it in 3.6, since that's the best way to allow loading meshes from 4.0, which is important for an LTS version. Pull Request: https://projects.blender.org/blender/blender/pulls/105938
2023-04-04 20:39:28 +02:00
SubdivCCGAdjacencyType BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(
const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const blender::Span<int> corner_verts,
const blender::OffsetIndices<int> faces,
Mesh: Replace MPoly struct with offset indices Implements #95967. Currently the `MPoly` struct is 12 bytes, and stores the index of a face's first corner and the number of corners/verts/edges. Polygons and corners are always created in order by Blender, meaning each face's corners will be after the previous face's corners. We can take advantage of this fact and eliminate the redundancy in mesh face storage by only storing a single integer corner offset for each face. The size of the face is then encoded by the offset of the next face. The size of a single integer is 4 bytes, so this reduces memory usage by 3 times. The same method is used for `CurvesGeometry`, so Blender already has an abstraction to simplify using these offsets called `OffsetIndices`. This class is used to easily retrieve a range of corner indices for each face. This also gives the opportunity for sharing some logic with curves. Another benefit of the change is that the offsets and sizes stored in `MPoly` can no longer disagree with each other. Storing faces in the order of their corners can simplify some code too. Face/polygon variables now use the `IndexRange` type, which comes with quite a few utilities that can simplify code. Some: - The offset integer array has to be one longer than the face count to avoid a branch for every face, which means the data is no longer part of the mesh's `CustomData`. - We lose the ability to "reference" an original mesh's offset array until more reusable CoW from #104478 is committed. That will be added in a separate commit. - Since they aren't part of `CustomData`, poly offsets often have to be copied manually. - To simplify using `OffsetIndices` in many places, some functions and structs in headers were moved to only compile in C++. - All meshes created by Blender use the same order for faces and face corners, but just in case, meshes with mismatched order are fixed by versioning code. - `MeshPolygon.totloop` is no longer editable in RNA. This API break is necessary here unfortunately. It should be worth it in 3.6, since that's the best way to allow loading meshes from 4.0, which is important for an LTS version. Pull Request: https://projects.blender.org/blender/blender/pulls/105938
2023-04-04 20:39:28 +02:00
int *r_v1,
int *r_v2)
{
const int grid_size_1 = subdiv_ccg->grid_size - 1;
if (is_corner_grid_coord(subdiv_ccg, coord)) {
if (coord->x == 0 && coord->y == 0) {
/* Grid corner in the center of a face. */
return SUBDIV_CCG_ADJACENT_NONE;
}
if (coord->x == grid_size_1 && coord->y == grid_size_1) {
/* Grid corner adjacent to a coarse mesh vertex. */
Mesh: Replace MLoop struct with generic attributes Implements #102359. Split the `MLoop` struct into two separate integer arrays called `corner_verts` and `corner_edges`, referring to the vertex each corner is attached to and the next edge around the face at each corner. These arrays can be sliced to give access to the edges or vertices in a face. Then they are often referred to as "poly_verts" or "poly_edges". The main benefits are halving the necessary memory bandwidth when only one array is used and simplifications from using regular integer indices instead of a special-purpose struct. The commit also starts a renaming from "loop" to "corner" in mesh code. Like the other mesh struct of array refactors, forward compatibility is kept by writing files with the older format. This will be done until 4.0 to ease the transition process. Looking at a small portion of the patch should give a good impression for the rest of the changes. I tried to make the changes as small as possible so it's easy to tell the correctness from the diff. Though I found Blender developers have been very inventive over the last decade when finding different ways to loop over the corners in a face. For performance, nearly every piece of code that deals with `Mesh` is slightly impacted. Any algorithm that is memory bottle-necked should see an improvement. For example, here is a comparison of interpolating a vertex float attribute to face corners (Ryzen 3700x): **Before** (Average: 3.7 ms, Min: 3.4 ms) ``` threading::parallel_for(loops.index_range(), 4096, [&](IndexRange range) { for (const int64_t i : range) { dst[i] = src[loops[i].v]; } }); ``` **After** (Average: 2.9 ms, Min: 2.6 ms) ``` array_utils::gather(src, corner_verts, dst); ``` That's an improvement of 28% to the average timings, and it's also a simplification, since an index-based routine can be used instead. For more examples using the new arrays, see the design task. Pull Request: https://projects.blender.org/blender/blender/pulls/104424
2023-03-20 15:55:13 +01:00
*r_v1 = *r_v2 = corner_verts[coord->grid_index];
return SUBDIV_CCG_ADJACENT_VERTEX;
}
/* Grid corner adjacent to the middle of a coarse mesh edge. */
adjacent_vertices_index_from_adjacent_edge(subdiv_ccg, coord, corner_verts, faces, r_v1, r_v2);
return SUBDIV_CCG_ADJACENT_EDGE;
}
if (is_boundary_grid_coord(subdiv_ccg, coord)) {
if (!is_inner_edge_grid_coordinate(subdiv_ccg, coord)) {
/* Grid boundary adjacent to a coarse mesh edge. */
adjacent_vertices_index_from_adjacent_edge(
subdiv_ccg, coord, corner_verts, faces, r_v1, r_v2);
return SUBDIV_CCG_ADJACENT_EDGE;
}
}
return SUBDIV_CCG_ADJACENT_NONE;
}
void BKE_subdiv_ccg_grid_hidden_ensure(SubdivCCG *subdiv_ccg, int grid_index)
{
if (subdiv_ccg->grid_hidden[grid_index] != nullptr) {
return;
}
CCGKey key;
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
subdiv_ccg->grid_hidden[grid_index] = BLI_BITMAP_NEW(key.grid_area, __func__);
}
void BKE_subdiv_ccg_grid_hidden_free(SubdivCCG *subdiv_ccg, int grid_index)
{
MEM_SAFE_FREE(subdiv_ccg->grid_hidden[grid_index]);
}
static void subdiv_ccg_coord_to_ptex_coord(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
int *r_ptex_face_index,
float *r_u,
float *r_v)
{
Subdiv *subdiv = subdiv_ccg->subdiv;
const float grid_size = subdiv_ccg->grid_size;
const float grid_size_1_inv = 1.0f / (grid_size - 1);
const float grid_u = coord->x * grid_size_1_inv;
const float grid_v = coord->y * grid_size_1_inv;
const int face_index = BKE_subdiv_ccg_grid_to_face_index(subdiv_ccg, coord->grid_index);
const SubdivCCGFace *faces = subdiv_ccg->faces;
const SubdivCCGFace *face = &faces[face_index];
const int *face_ptex_offset = BKE_subdiv_face_ptex_offset_get(subdiv);
*r_ptex_face_index = face_ptex_offset[face_index];
const float corner = coord->grid_index - face->start_grid_index;
if (face->num_grids == 4) {
BKE_subdiv_rotate_grid_to_quad(corner, grid_u, grid_v, r_u, r_v);
}
else {
*r_ptex_face_index += corner;
*r_u = 1.0f - grid_v;
*r_v = 1.0f - grid_u;
}
}
void BKE_subdiv_ccg_eval_limit_point(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
float r_point[3])
{
Subdiv *subdiv = subdiv_ccg->subdiv;
int ptex_face_index;
float u, v;
subdiv_ccg_coord_to_ptex_coord(subdiv_ccg, coord, &ptex_face_index, &u, &v);
BKE_subdiv_eval_limit_point(subdiv, ptex_face_index, u, v, r_point);
}
/** \} */