2018-09-04 16:16:01 +02:00
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*
|
|
|
|
* The Original Code is Copyright (C) 2018 by Blender Foundation.
|
|
|
|
* All rights reserved.
|
|
|
|
*/
|
|
|
|
|
2019-02-17 22:08:12 +01:00
|
|
|
/** \file
|
|
|
|
* \ingroup bke
|
2018-09-04 16:16:01 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "BKE_subdiv_ccg.h"
|
|
|
|
|
|
|
|
#include "DNA_mesh_types.h"
|
|
|
|
#include "DNA_meshdata_types.h"
|
|
|
|
|
|
|
|
#include "MEM_guardedalloc.h"
|
|
|
|
|
2018-09-06 17:06:17 +02:00
|
|
|
#include "BLI_math_bits.h"
|
2018-09-18 14:23:35 +02:00
|
|
|
#include "BLI_math_vector.h"
|
2018-09-06 17:06:17 +02:00
|
|
|
#include "BLI_task.h"
|
|
|
|
|
|
|
|
#include "BKE_DerivedMesh.h"
|
2018-09-04 16:16:01 +02:00
|
|
|
#include "BKE_ccg.h"
|
2018-09-06 17:06:17 +02:00
|
|
|
#include "BKE_mesh.h"
|
2018-09-04 16:16:01 +02:00
|
|
|
#include "BKE_subdiv.h"
|
2018-09-06 17:06:17 +02:00
|
|
|
#include "BKE_subdiv_eval.h"
|
|
|
|
|
2018-09-18 17:09:08 +02:00
|
|
|
#include "opensubdiv_topology_refiner_capi.h"
|
|
|
|
|
2019-02-22 16:56:54 +01:00
|
|
|
/* =============================================================================
|
|
|
|
* Various forward declarations.
|
|
|
|
*/
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_average_all_boundaries_and_corners(SubdivCCG *subdiv_ccg, CCGKey *key);
|
2019-02-22 16:56:54 +01:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_average_inner_face_grids(SubdivCCG *subdiv_ccg,
|
|
|
|
CCGKey *key,
|
|
|
|
SubdivCCGFace *face);
|
2019-02-22 16:56:54 +01:00
|
|
|
|
2018-09-06 17:06:17 +02:00
|
|
|
/* =============================================================================
|
|
|
|
* Generally useful internal helpers.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Number of floats in per-vertex elements. */
|
|
|
|
static int num_element_float_get(const SubdivCCG *subdiv_ccg)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
/* We always have 3 floats for coordinate. */
|
|
|
|
int num_floats = 3;
|
|
|
|
if (subdiv_ccg->has_normal) {
|
|
|
|
num_floats += 3;
|
|
|
|
}
|
|
|
|
if (subdiv_ccg->has_mask) {
|
|
|
|
num_floats += 1;
|
|
|
|
}
|
|
|
|
return num_floats;
|
2018-09-06 17:06:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Per-vertex element size in bytes. */
|
|
|
|
static int element_size_bytes_get(const SubdivCCG *subdiv_ccg)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
return sizeof(float) * num_element_float_get(subdiv_ccg);
|
2018-09-06 17:06:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* =============================================================================
|
|
|
|
* Internal helpers for CCG creation.
|
|
|
|
*/
|
2018-09-04 16:16:01 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_init_layers(SubdivCCG *subdiv_ccg, const SubdivToCCGSettings *settings)
|
2018-09-04 16:16:01 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
/* CCG always contains coordinates. Rest of layers are coming after them. */
|
|
|
|
int layer_offset = sizeof(float) * 3;
|
|
|
|
/* Mask. */
|
|
|
|
if (settings->need_mask) {
|
|
|
|
subdiv_ccg->has_mask = true;
|
|
|
|
subdiv_ccg->mask_offset = layer_offset;
|
|
|
|
layer_offset += sizeof(float);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
subdiv_ccg->has_mask = false;
|
|
|
|
subdiv_ccg->mask_offset = -1;
|
|
|
|
}
|
|
|
|
/* Normals.
|
|
|
|
*
|
|
|
|
* NOTE: Keep them at the end, matching old CCGDM. Doesn't really matter
|
|
|
|
* here, but some other area might in theory depend memory layout. */
|
|
|
|
if (settings->need_normal) {
|
|
|
|
subdiv_ccg->has_normal = true;
|
|
|
|
subdiv_ccg->normal_offset = layer_offset;
|
|
|
|
layer_offset += sizeof(float) * 3;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
subdiv_ccg->has_normal = false;
|
|
|
|
subdiv_ccg->normal_offset = -1;
|
|
|
|
}
|
2018-09-06 17:06:17 +02:00
|
|
|
}
|
|
|
|
|
2018-09-20 10:16:50 +02:00
|
|
|
/* TODO(sergey): Make it more accessible function. */
|
2019-04-17 06:17:24 +02:00
|
|
|
static int topology_refiner_count_face_corners(OpenSubdiv_TopologyRefiner *topology_refiner)
|
2018-09-20 10:16:50 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
const int num_faces = topology_refiner->getNumFaces(topology_refiner);
|
|
|
|
int num_corners = 0;
|
|
|
|
for (int face_index = 0; face_index < num_faces; face_index++) {
|
|
|
|
num_corners += topology_refiner->getNumFaceVertices(topology_refiner, face_index);
|
|
|
|
}
|
|
|
|
return num_corners;
|
2018-09-20 10:16:50 +02:00
|
|
|
}
|
|
|
|
|
2018-09-06 17:06:17 +02:00
|
|
|
/* NOTE: Grid size and layer flags are to be filled in before calling this
|
2019-01-18 11:28:38 +01:00
|
|
|
* function. */
|
2018-09-20 10:16:50 +02:00
|
|
|
static void subdiv_ccg_alloc_elements(SubdivCCG *subdiv_ccg, Subdiv *subdiv)
|
2018-09-06 17:06:17 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
|
|
|
|
const int element_size = element_size_bytes_get(subdiv_ccg);
|
|
|
|
/* Allocate memory for surface grids. */
|
|
|
|
const int num_faces = topology_refiner->getNumFaces(topology_refiner);
|
|
|
|
const int num_grids = topology_refiner_count_face_corners(topology_refiner);
|
|
|
|
const int grid_size = BKE_subdiv_grid_size_from_level(subdiv_ccg->level);
|
|
|
|
const int grid_area = grid_size * grid_size;
|
|
|
|
subdiv_ccg->num_grids = num_grids;
|
|
|
|
subdiv_ccg->grids = MEM_calloc_arrayN(num_grids, sizeof(CCGElem *), "subdiv ccg grids");
|
|
|
|
subdiv_ccg->grids_storage = MEM_calloc_arrayN(
|
|
|
|
num_grids, ((size_t)grid_area) * element_size, "subdiv ccg grids storage");
|
|
|
|
const size_t grid_size_in_bytes = (size_t)grid_area * element_size;
|
|
|
|
for (int grid_index = 0; grid_index < num_grids; grid_index++) {
|
|
|
|
const size_t grid_offset = grid_size_in_bytes * grid_index;
|
|
|
|
subdiv_ccg->grids[grid_index] = (CCGElem *)&subdiv_ccg->grids_storage[grid_offset];
|
|
|
|
}
|
|
|
|
/* Grid material flags. */
|
|
|
|
subdiv_ccg->grid_flag_mats = MEM_calloc_arrayN(
|
|
|
|
num_grids, sizeof(DMFlagMat), "ccg grid material flags");
|
|
|
|
/* Grid hidden flags. */
|
|
|
|
subdiv_ccg->grid_hidden = MEM_calloc_arrayN(
|
|
|
|
num_grids, sizeof(BLI_bitmap *), "ccg grid material flags");
|
|
|
|
for (int grid_index = 0; grid_index < num_grids; grid_index++) {
|
|
|
|
subdiv_ccg->grid_hidden[grid_index] = BLI_BITMAP_NEW(grid_area, "ccg grid hidden");
|
|
|
|
}
|
|
|
|
/* TODO(sergey): Allocate memory for loose elements. */
|
|
|
|
/* Allocate memory for faces. */
|
|
|
|
subdiv_ccg->num_faces = num_faces;
|
|
|
|
if (num_faces) {
|
|
|
|
subdiv_ccg->faces = MEM_calloc_arrayN(num_faces, sizeof(SubdivCCGFace), "Subdiv CCG faces");
|
|
|
|
subdiv_ccg->grid_faces = MEM_calloc_arrayN(
|
|
|
|
num_grids, sizeof(SubdivCCGFace *), "Subdiv CCG grid faces");
|
|
|
|
}
|
2018-09-06 17:06:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* =============================================================================
|
|
|
|
* Grids evaluation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef struct CCGEvalGridsData {
|
2019-04-17 06:17:24 +02:00
|
|
|
SubdivCCG *subdiv_ccg;
|
|
|
|
Subdiv *subdiv;
|
|
|
|
int *face_ptex_offset;
|
|
|
|
SubdivCCGMaskEvaluator *mask_evaluator;
|
|
|
|
SubdivCCGMaterialFlagsEvaluator *material_flags_evaluator;
|
2018-09-06 17:06:17 +02:00
|
|
|
} CCGEvalGridsData;
|
|
|
|
|
2019-09-18 14:33:53 +02:00
|
|
|
static void subdiv_ccg_eval_grid_element_limit(CCGEvalGridsData *data,
|
|
|
|
const int ptex_face_index,
|
|
|
|
const float u,
|
|
|
|
const float v,
|
|
|
|
unsigned char *element)
|
2018-09-06 17:06:17 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
Subdiv *subdiv = data->subdiv;
|
|
|
|
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
|
|
|
|
if (subdiv->displacement_evaluator != NULL) {
|
|
|
|
BKE_subdiv_eval_final_point(subdiv, ptex_face_index, u, v, (float *)element);
|
|
|
|
}
|
|
|
|
else if (subdiv_ccg->has_normal) {
|
|
|
|
BKE_subdiv_eval_limit_point_and_normal(subdiv,
|
|
|
|
ptex_face_index,
|
|
|
|
u,
|
|
|
|
v,
|
|
|
|
(float *)element,
|
|
|
|
(float *)(element + subdiv_ccg->normal_offset));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
BKE_subdiv_eval_limit_point(subdiv, ptex_face_index, u, v, (float *)element);
|
|
|
|
}
|
2019-09-18 14:33:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void subdiv_ccg_eval_grid_element_mask(CCGEvalGridsData *data,
|
|
|
|
const int ptex_face_index,
|
|
|
|
const float u,
|
|
|
|
const float v,
|
|
|
|
unsigned char *element)
|
|
|
|
{
|
|
|
|
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
|
|
|
|
if (!subdiv_ccg->has_mask) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
float *mask_value_ptr = (float *)(element + subdiv_ccg->mask_offset);
|
|
|
|
if (data->mask_evaluator != NULL) {
|
|
|
|
*mask_value_ptr = data->mask_evaluator->eval_mask(data->mask_evaluator, ptex_face_index, u, v);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2019-09-18 14:33:53 +02:00
|
|
|
else {
|
|
|
|
*mask_value_ptr = 0.0f;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void subdiv_ccg_eval_grid_element(CCGEvalGridsData *data,
|
|
|
|
const int ptex_face_index,
|
|
|
|
const float u,
|
|
|
|
const float v,
|
|
|
|
unsigned char *element)
|
|
|
|
{
|
|
|
|
subdiv_ccg_eval_grid_element_limit(data, ptex_face_index, u, v, element);
|
|
|
|
subdiv_ccg_eval_grid_element_mask(data, ptex_face_index, u, v, element);
|
2018-09-04 16:16:01 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_eval_regular_grid(CCGEvalGridsData *data, const int face_index)
|
2018-09-04 16:16:01 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
|
|
|
|
const int ptex_face_index = data->face_ptex_offset[face_index];
|
|
|
|
const int grid_size = subdiv_ccg->grid_size;
|
|
|
|
const float grid_size_1_inv = 1.0f / (float)(grid_size - 1);
|
|
|
|
const int element_size = element_size_bytes_get(subdiv_ccg);
|
|
|
|
SubdivCCGFace *faces = subdiv_ccg->faces;
|
|
|
|
SubdivCCGFace **grid_faces = subdiv_ccg->grid_faces;
|
|
|
|
const SubdivCCGFace *face = &faces[face_index];
|
|
|
|
for (int corner = 0; corner < face->num_grids; corner++) {
|
|
|
|
const int grid_index = face->start_grid_index + corner;
|
|
|
|
unsigned char *grid = (unsigned char *)subdiv_ccg->grids[grid_index];
|
|
|
|
for (int y = 0; y < grid_size; y++) {
|
|
|
|
const float grid_v = (float)y * grid_size_1_inv;
|
|
|
|
for (int x = 0; x < grid_size; x++) {
|
|
|
|
const float grid_u = (float)x * grid_size_1_inv;
|
|
|
|
float u, v;
|
|
|
|
BKE_subdiv_rotate_grid_to_quad(corner, grid_u, grid_v, &u, &v);
|
|
|
|
const size_t grid_element_index = (size_t)y * grid_size + x;
|
|
|
|
const size_t grid_element_offset = grid_element_index * element_size;
|
|
|
|
subdiv_ccg_eval_grid_element(data, ptex_face_index, u, v, &grid[grid_element_offset]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Assign grid's face. */
|
|
|
|
grid_faces[grid_index] = &faces[face_index];
|
|
|
|
/* Assign material flags. */
|
|
|
|
subdiv_ccg->grid_flag_mats[grid_index] = data->material_flags_evaluator->eval_material_flags(
|
|
|
|
data->material_flags_evaluator, face_index);
|
|
|
|
}
|
2018-09-06 17:06:17 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_eval_special_grid(CCGEvalGridsData *data, const int face_index)
|
2018-09-06 17:06:17 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
|
|
|
|
const int grid_size = subdiv_ccg->grid_size;
|
|
|
|
const float grid_size_1_inv = 1.0f / (float)(grid_size - 1);
|
|
|
|
const int element_size = element_size_bytes_get(subdiv_ccg);
|
|
|
|
SubdivCCGFace *faces = subdiv_ccg->faces;
|
|
|
|
SubdivCCGFace **grid_faces = subdiv_ccg->grid_faces;
|
|
|
|
const SubdivCCGFace *face = &faces[face_index];
|
|
|
|
for (int corner = 0; corner < face->num_grids; corner++) {
|
|
|
|
const int grid_index = face->start_grid_index + corner;
|
|
|
|
const int ptex_face_index = data->face_ptex_offset[face_index] + corner;
|
|
|
|
unsigned char *grid = (unsigned char *)subdiv_ccg->grids[grid_index];
|
|
|
|
for (int y = 0; y < grid_size; y++) {
|
|
|
|
const float u = 1.0f - ((float)y * grid_size_1_inv);
|
|
|
|
for (int x = 0; x < grid_size; x++) {
|
|
|
|
const float v = 1.0f - ((float)x * grid_size_1_inv);
|
|
|
|
const size_t grid_element_index = (size_t)y * grid_size + x;
|
|
|
|
const size_t grid_element_offset = grid_element_index * element_size;
|
|
|
|
subdiv_ccg_eval_grid_element(data, ptex_face_index, u, v, &grid[grid_element_offset]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Assign grid's face. */
|
|
|
|
grid_faces[grid_index] = &faces[face_index];
|
|
|
|
/* Assign material flags. */
|
|
|
|
subdiv_ccg->grid_flag_mats[grid_index] = data->material_flags_evaluator->eval_material_flags(
|
|
|
|
data->material_flags_evaluator, face_index);
|
|
|
|
}
|
2018-09-06 17:06:17 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_eval_grids_task(void *__restrict userdata_v,
|
|
|
|
const int face_index,
|
2019-07-30 14:56:47 +02:00
|
|
|
const TaskParallelTLS *__restrict UNUSED(tls))
|
2018-09-06 17:06:17 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
CCGEvalGridsData *data = userdata_v;
|
|
|
|
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
|
|
|
|
SubdivCCGFace *face = &subdiv_ccg->faces[face_index];
|
|
|
|
if (face->num_grids == 4) {
|
|
|
|
subdiv_ccg_eval_regular_grid(data, face_index);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
subdiv_ccg_eval_special_grid(data, face_index);
|
|
|
|
}
|
2018-09-06 17:06:17 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static bool subdiv_ccg_evaluate_grids(SubdivCCG *subdiv_ccg,
|
|
|
|
Subdiv *subdiv,
|
|
|
|
SubdivCCGMaskEvaluator *mask_evaluator,
|
|
|
|
SubdivCCGMaterialFlagsEvaluator *material_flags_evaluator)
|
2018-09-06 17:06:17 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
|
|
|
|
const int num_faces = topology_refiner->getNumFaces(topology_refiner);
|
|
|
|
/* Initialize data passed to all the tasks. */
|
|
|
|
CCGEvalGridsData data;
|
|
|
|
data.subdiv_ccg = subdiv_ccg;
|
|
|
|
data.subdiv = subdiv;
|
|
|
|
data.face_ptex_offset = BKE_subdiv_face_ptex_offset_get(subdiv);
|
|
|
|
data.mask_evaluator = mask_evaluator;
|
|
|
|
data.material_flags_evaluator = material_flags_evaluator;
|
|
|
|
/* Threaded grids evaluation. */
|
2019-07-30 14:56:47 +02:00
|
|
|
TaskParallelSettings parallel_range_settings;
|
2019-04-17 06:17:24 +02:00
|
|
|
BLI_parallel_range_settings_defaults(¶llel_range_settings);
|
|
|
|
BLI_task_parallel_range(
|
|
|
|
0, num_faces, &data, subdiv_ccg_eval_grids_task, ¶llel_range_settings);
|
|
|
|
/* If displacement is used, need to calculate normals after all final
|
|
|
|
* coordinates are known. */
|
|
|
|
if (subdiv->displacement_evaluator != NULL) {
|
|
|
|
BKE_subdiv_ccg_recalc_normals(subdiv_ccg);
|
|
|
|
}
|
|
|
|
return true;
|
2018-09-04 16:16:01 +02:00
|
|
|
}
|
|
|
|
|
2018-09-20 10:16:50 +02:00
|
|
|
/* Initialize face descriptors, assuming memory for them was already
|
2019-01-18 11:28:38 +01:00
|
|
|
* allocated. */
|
2018-09-20 10:16:50 +02:00
|
|
|
static void subdiv_ccg_init_faces(SubdivCCG *subdiv_ccg)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
Subdiv *subdiv = subdiv_ccg->subdiv;
|
|
|
|
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
|
|
|
|
const int num_faces = subdiv_ccg->num_faces;
|
|
|
|
int corner_index = 0;
|
|
|
|
for (int face_index = 0; face_index < num_faces; face_index++) {
|
|
|
|
const int num_corners = topology_refiner->getNumFaceVertices(topology_refiner, face_index);
|
|
|
|
subdiv_ccg->faces[face_index].num_grids = num_corners;
|
|
|
|
subdiv_ccg->faces[face_index].start_grid_index = corner_index;
|
|
|
|
corner_index += num_corners;
|
|
|
|
}
|
2018-09-20 10:16:50 +02:00
|
|
|
}
|
|
|
|
|
2018-09-20 12:37:24 +02:00
|
|
|
/* TODO(sergey): Consider making it generic enough to be fit into BLI. */
|
|
|
|
typedef struct StaticOrHeapIntStorage {
|
2019-04-17 06:17:24 +02:00
|
|
|
int static_storage[64];
|
|
|
|
int static_storage_size;
|
|
|
|
int *heap_storage;
|
|
|
|
int heap_storage_size;
|
2018-09-20 12:37:24 +02:00
|
|
|
} StaticOrHeapIntStorage;
|
|
|
|
|
|
|
|
static void static_or_heap_storage_init(StaticOrHeapIntStorage *storage)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
storage->static_storage_size = sizeof(storage->static_storage) /
|
|
|
|
sizeof(*storage->static_storage);
|
|
|
|
storage->heap_storage = NULL;
|
|
|
|
storage->heap_storage_size = 0;
|
2018-09-20 12:37:24 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static int *static_or_heap_storage_get(StaticOrHeapIntStorage *storage, int size)
|
2018-09-20 12:37:24 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
/* Requested size small enough to be fit into stack allocated memory. */
|
|
|
|
if (size <= storage->static_storage_size) {
|
|
|
|
return storage->static_storage;
|
|
|
|
}
|
|
|
|
/* Make sure heap ius big enough. */
|
|
|
|
if (size > storage->heap_storage_size) {
|
|
|
|
MEM_SAFE_FREE(storage->heap_storage);
|
|
|
|
storage->heap_storage = MEM_malloc_arrayN(size, sizeof(int), "int storage");
|
|
|
|
storage->heap_storage_size = size;
|
|
|
|
}
|
|
|
|
return storage->heap_storage;
|
2018-09-20 12:37:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void static_or_heap_storage_free(StaticOrHeapIntStorage *storage)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
MEM_SAFE_FREE(storage->heap_storage);
|
2018-09-20 12:37:24 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_allocate_adjacent_edges(SubdivCCG *subdiv_ccg, const int num_edges)
|
2018-09-20 12:37:24 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
subdiv_ccg->num_adjacent_edges = num_edges;
|
|
|
|
subdiv_ccg->adjacent_edges = MEM_calloc_arrayN(
|
|
|
|
subdiv_ccg->num_adjacent_edges, sizeof(*subdiv_ccg->adjacent_edges), "ccg adjacent edges");
|
2018-09-20 12:37:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns storage where boundary elements are to be stored. */
|
2019-04-17 06:17:24 +02:00
|
|
|
static CCGElem **subdiv_ccg_adjacent_edge_add_face(SubdivCCG *subdiv_ccg,
|
2019-10-02 12:49:18 +02:00
|
|
|
SubdivCCGAdjacentEdge *adjacent_edge)
|
2018-09-20 12:37:24 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
const int grid_size = subdiv_ccg->grid_size * 2;
|
|
|
|
const int adjacent_face_index = adjacent_edge->num_adjacent_faces;
|
|
|
|
++adjacent_edge->num_adjacent_faces;
|
|
|
|
/* Allocate memory for the boundary elements. */
|
|
|
|
adjacent_edge->boundary_elements = MEM_reallocN(adjacent_edge->boundary_elements,
|
|
|
|
adjacent_edge->num_adjacent_faces *
|
|
|
|
sizeof(*adjacent_edge->boundary_elements));
|
|
|
|
adjacent_edge->boundary_elements[adjacent_face_index] = MEM_malloc_arrayN(
|
|
|
|
grid_size * 2, sizeof(CCGElem *), "ccg adjacent boundary");
|
|
|
|
return adjacent_edge->boundary_elements[adjacent_face_index];
|
2018-09-20 12:37:24 +02:00
|
|
|
}
|
|
|
|
|
2018-09-20 14:01:35 +02:00
|
|
|
static void subdiv_ccg_init_faces_edge_neighborhood(SubdivCCG *subdiv_ccg)
|
2018-09-20 12:37:24 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
Subdiv *subdiv = subdiv_ccg->subdiv;
|
|
|
|
SubdivCCGFace *faces = subdiv_ccg->faces;
|
|
|
|
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
|
|
|
|
const int num_edges = topology_refiner->getNumEdges(topology_refiner);
|
|
|
|
const int grid_size = subdiv_ccg->grid_size;
|
|
|
|
if (num_edges == 0) {
|
|
|
|
/* Early output, nothing to do in this case. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
subdiv_ccg_allocate_adjacent_edges(subdiv_ccg, num_edges);
|
|
|
|
/* Initialize storage. */
|
|
|
|
StaticOrHeapIntStorage face_vertices_storage;
|
|
|
|
StaticOrHeapIntStorage face_edges_storage;
|
|
|
|
static_or_heap_storage_init(&face_vertices_storage);
|
|
|
|
static_or_heap_storage_init(&face_edges_storage);
|
|
|
|
/* Key to access elements. */
|
|
|
|
CCGKey key;
|
|
|
|
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
|
|
|
|
/* Store adjacency for all faces. */
|
|
|
|
const int num_faces = subdiv_ccg->num_faces;
|
|
|
|
for (int face_index = 0; face_index < num_faces; face_index++) {
|
|
|
|
SubdivCCGFace *face = &faces[face_index];
|
|
|
|
const int num_face_grids = face->num_grids;
|
|
|
|
const int num_face_edges = num_face_grids;
|
|
|
|
int *face_vertices = static_or_heap_storage_get(&face_vertices_storage, num_face_edges);
|
|
|
|
topology_refiner->getFaceVertices(topology_refiner, face_index, face_vertices);
|
|
|
|
/* Note that order of edges is same as order of MLoops, which also
|
|
|
|
* means it's the same as order of grids. */
|
|
|
|
int *face_edges = static_or_heap_storage_get(&face_edges_storage, num_face_edges);
|
|
|
|
topology_refiner->getFaceEdges(topology_refiner, face_index, face_edges);
|
|
|
|
/* Store grids adjacency for this edge. */
|
|
|
|
for (int corner = 0; corner < num_face_edges; corner++) {
|
|
|
|
const int vertex_index = face_vertices[corner];
|
|
|
|
const int edge_index = face_edges[corner];
|
|
|
|
int edge_vertices[2];
|
|
|
|
topology_refiner->getEdgeVertices(topology_refiner, edge_index, edge_vertices);
|
|
|
|
const bool is_edge_flipped = (edge_vertices[0] != vertex_index);
|
|
|
|
/* Grid which is adjacent to the current corner. */
|
|
|
|
const int current_grid_index = face->start_grid_index + corner;
|
|
|
|
CCGElem *current_grid = subdiv_ccg->grids[current_grid_index];
|
|
|
|
/* Grid which is adjacent to the next corner. */
|
|
|
|
const int next_grid_index = face->start_grid_index + (corner + 1) % num_face_grids;
|
|
|
|
CCGElem *next_grid = subdiv_ccg->grids[next_grid_index];
|
|
|
|
/* Add new face to the adjacent edge. */
|
|
|
|
SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[edge_index];
|
2019-10-02 12:49:18 +02:00
|
|
|
CCGElem **boundary_elements = subdiv_ccg_adjacent_edge_add_face(subdiv_ccg, adjacent_edge);
|
2019-04-17 06:17:24 +02:00
|
|
|
/* Fill CCG elements along the edge. */
|
|
|
|
int boundary_element_index = 0;
|
|
|
|
if (is_edge_flipped) {
|
|
|
|
for (int i = 0; i < grid_size; i++) {
|
|
|
|
boundary_elements[boundary_element_index++] = CCG_grid_elem(
|
|
|
|
&key, next_grid, grid_size - i - 1, grid_size - 1);
|
|
|
|
}
|
|
|
|
for (int i = 0; i < grid_size; i++) {
|
|
|
|
boundary_elements[boundary_element_index++] = CCG_grid_elem(
|
|
|
|
&key, current_grid, grid_size - 1, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
for (int i = 0; i < grid_size; i++) {
|
|
|
|
boundary_elements[boundary_element_index++] = CCG_grid_elem(
|
|
|
|
&key, current_grid, grid_size - 1, grid_size - i - 1);
|
|
|
|
}
|
|
|
|
for (int i = 0; i < grid_size; i++) {
|
|
|
|
boundary_elements[boundary_element_index++] = CCG_grid_elem(
|
|
|
|
&key, next_grid, i, grid_size - 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Free possibly heap-allocated storage. */
|
|
|
|
static_or_heap_storage_free(&face_vertices_storage);
|
|
|
|
static_or_heap_storage_free(&face_edges_storage);
|
2018-09-20 14:01:35 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_allocate_adjacent_vertices(SubdivCCG *subdiv_ccg, const int num_vertices)
|
2018-09-20 14:01:35 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
subdiv_ccg->num_adjacent_vertices = num_vertices;
|
|
|
|
subdiv_ccg->adjacent_vertices = MEM_calloc_arrayN(subdiv_ccg->num_adjacent_vertices,
|
|
|
|
sizeof(*subdiv_ccg->adjacent_vertices),
|
|
|
|
"ccg adjacent vertices");
|
2018-09-20 14:01:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns storage where corner elements are to be stored. This is a pointer
|
2019-01-18 11:28:38 +01:00
|
|
|
* to the actual storage. */
|
2019-10-02 12:49:18 +02:00
|
|
|
static CCGElem **subdiv_ccg_adjacent_vertex_add_face(SubdivCCGAdjacentVertex *adjacent_vertex)
|
2018-09-20 14:01:35 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
const int adjacent_face_index = adjacent_vertex->num_adjacent_faces;
|
|
|
|
++adjacent_vertex->num_adjacent_faces;
|
|
|
|
/* Allocate memory for the boundary elements. */
|
|
|
|
adjacent_vertex->corner_elements = MEM_reallocN(adjacent_vertex->corner_elements,
|
|
|
|
adjacent_vertex->num_adjacent_faces *
|
|
|
|
sizeof(*adjacent_vertex->corner_elements));
|
|
|
|
return &adjacent_vertex->corner_elements[adjacent_face_index];
|
2018-09-20 14:01:35 +02:00
|
|
|
}
|
2018-09-20 12:37:24 +02:00
|
|
|
|
2018-09-20 14:01:35 +02:00
|
|
|
static void subdiv_ccg_init_faces_vertex_neighborhood(SubdivCCG *subdiv_ccg)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
Subdiv *subdiv = subdiv_ccg->subdiv;
|
|
|
|
SubdivCCGFace *faces = subdiv_ccg->faces;
|
|
|
|
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
|
|
|
|
const int num_vertices = topology_refiner->getNumVertices(topology_refiner);
|
|
|
|
const int grid_size = subdiv_ccg->grid_size;
|
|
|
|
if (num_vertices == 0) {
|
|
|
|
/* Early output, nothing to do in this case. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
subdiv_ccg_allocate_adjacent_vertices(subdiv_ccg, num_vertices);
|
|
|
|
/* Initialize storage. */
|
|
|
|
StaticOrHeapIntStorage face_vertices_storage;
|
|
|
|
static_or_heap_storage_init(&face_vertices_storage);
|
|
|
|
/* Key to access elements. */
|
|
|
|
CCGKey key;
|
|
|
|
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
|
|
|
|
/* Store adjacency for all faces. */
|
|
|
|
const int num_faces = subdiv_ccg->num_faces;
|
|
|
|
for (int face_index = 0; face_index < num_faces; face_index++) {
|
|
|
|
SubdivCCGFace *face = &faces[face_index];
|
|
|
|
const int num_face_grids = face->num_grids;
|
|
|
|
const int num_face_edges = num_face_grids;
|
|
|
|
int *face_vertices = static_or_heap_storage_get(&face_vertices_storage, num_face_edges);
|
|
|
|
topology_refiner->getFaceVertices(topology_refiner, face_index, face_vertices);
|
|
|
|
for (int corner = 0; corner < num_face_edges; corner++) {
|
|
|
|
const int vertex_index = face_vertices[corner];
|
|
|
|
/* Grid which is adjacent to the current corner. */
|
|
|
|
const int grid_index = face->start_grid_index + corner;
|
|
|
|
CCGElem *grid = subdiv_ccg->grids[grid_index];
|
|
|
|
/* Add new face to the adjacent edge. */
|
|
|
|
SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[vertex_index];
|
2019-10-02 12:49:18 +02:00
|
|
|
CCGElem **corner_element = subdiv_ccg_adjacent_vertex_add_face(adjacent_vertex);
|
2019-04-17 06:17:24 +02:00
|
|
|
*corner_element = CCG_grid_elem(&key, grid, grid_size - 1, grid_size - 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Free possibly heap-allocated storage. */
|
|
|
|
static_or_heap_storage_free(&face_vertices_storage);
|
2018-09-20 14:01:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void subdiv_ccg_init_faces_neighborhood(SubdivCCG *subdiv_ccg)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
subdiv_ccg_init_faces_edge_neighborhood(subdiv_ccg);
|
|
|
|
subdiv_ccg_init_faces_vertex_neighborhood(subdiv_ccg);
|
2018-09-20 12:37:24 +02:00
|
|
|
}
|
|
|
|
|
2018-09-06 17:06:17 +02:00
|
|
|
/* =============================================================================
|
2018-09-18 14:23:35 +02:00
|
|
|
* Creation / evaluation.
|
2018-09-06 17:06:17 +02:00
|
|
|
*/
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
SubdivCCG *BKE_subdiv_to_ccg(Subdiv *subdiv,
|
|
|
|
const SubdivToCCGSettings *settings,
|
|
|
|
SubdivCCGMaskEvaluator *mask_evaluator,
|
|
|
|
SubdivCCGMaterialFlagsEvaluator *material_flags_evaluator)
|
2018-09-04 16:16:01 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
|
|
|
|
SubdivCCG *subdiv_ccg = MEM_callocN(sizeof(SubdivCCG), "subdiv ccg");
|
|
|
|
subdiv_ccg->subdiv = subdiv;
|
|
|
|
subdiv_ccg->level = bitscan_forward_i(settings->resolution - 1);
|
|
|
|
subdiv_ccg->grid_size = BKE_subdiv_grid_size_from_level(subdiv_ccg->level);
|
|
|
|
subdiv_ccg_init_layers(subdiv_ccg, settings);
|
|
|
|
subdiv_ccg_alloc_elements(subdiv_ccg, subdiv);
|
|
|
|
subdiv_ccg_init_faces(subdiv_ccg);
|
|
|
|
subdiv_ccg_init_faces_neighborhood(subdiv_ccg);
|
|
|
|
if (!subdiv_ccg_evaluate_grids(subdiv_ccg, subdiv, mask_evaluator, material_flags_evaluator)) {
|
|
|
|
BKE_subdiv_ccg_destroy(subdiv_ccg);
|
|
|
|
BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
|
|
|
|
return subdiv_ccg;
|
2018-09-06 17:06:17 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
Mesh *BKE_subdiv_to_ccg_mesh(Subdiv *subdiv,
|
|
|
|
const SubdivToCCGSettings *settings,
|
|
|
|
const Mesh *coarse_mesh)
|
2018-09-06 17:06:17 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
/* Make sure evaluator is ready. */
|
|
|
|
BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
|
2019-09-26 12:01:52 +02:00
|
|
|
if (!BKE_subdiv_eval_update_from_mesh(subdiv, coarse_mesh, NULL)) {
|
2019-04-17 06:17:24 +02:00
|
|
|
if (coarse_mesh->totpoly) {
|
2019-10-02 23:31:24 +02:00
|
|
|
return NULL;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
|
|
|
|
SubdivCCGMaskEvaluator mask_evaluator;
|
|
|
|
bool has_mask = BKE_subdiv_ccg_mask_init_from_paint(&mask_evaluator, coarse_mesh);
|
|
|
|
SubdivCCGMaterialFlagsEvaluator material_flags_evaluator;
|
|
|
|
BKE_subdiv_ccg_material_flags_init_from_mesh(&material_flags_evaluator, coarse_mesh);
|
|
|
|
SubdivCCG *subdiv_ccg = BKE_subdiv_to_ccg(
|
|
|
|
subdiv, settings, has_mask ? &mask_evaluator : NULL, &material_flags_evaluator);
|
|
|
|
if (has_mask) {
|
|
|
|
mask_evaluator.free(&mask_evaluator);
|
|
|
|
}
|
|
|
|
material_flags_evaluator.free(&material_flags_evaluator);
|
|
|
|
if (subdiv_ccg == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
Mesh *result = BKE_mesh_new_nomain_from_template(coarse_mesh, 0, 0, 0, 0, 0);
|
|
|
|
result->runtime.subdiv_ccg = subdiv_ccg;
|
|
|
|
return result;
|
2018-09-04 16:16:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void BKE_subdiv_ccg_destroy(SubdivCCG *subdiv_ccg)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
const int num_grids = subdiv_ccg->num_grids;
|
|
|
|
MEM_SAFE_FREE(subdiv_ccg->grids);
|
|
|
|
MEM_SAFE_FREE(subdiv_ccg->grids_storage);
|
|
|
|
MEM_SAFE_FREE(subdiv_ccg->edges);
|
|
|
|
MEM_SAFE_FREE(subdiv_ccg->vertices);
|
|
|
|
MEM_SAFE_FREE(subdiv_ccg->grid_flag_mats);
|
|
|
|
if (subdiv_ccg->grid_hidden != NULL) {
|
|
|
|
for (int grid_index = 0; grid_index < num_grids; grid_index++) {
|
|
|
|
MEM_freeN(subdiv_ccg->grid_hidden[grid_index]);
|
|
|
|
}
|
|
|
|
MEM_freeN(subdiv_ccg->grid_hidden);
|
|
|
|
}
|
|
|
|
if (subdiv_ccg->subdiv != NULL) {
|
|
|
|
BKE_subdiv_free(subdiv_ccg->subdiv);
|
|
|
|
}
|
|
|
|
MEM_SAFE_FREE(subdiv_ccg->faces);
|
|
|
|
MEM_SAFE_FREE(subdiv_ccg->grid_faces);
|
|
|
|
/* Free map of adjacent edges. */
|
|
|
|
for (int i = 0; i < subdiv_ccg->num_adjacent_edges; i++) {
|
|
|
|
SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[i];
|
|
|
|
for (int face_index = 0; face_index < adjacent_edge->num_adjacent_faces; face_index++) {
|
|
|
|
MEM_SAFE_FREE(adjacent_edge->boundary_elements[face_index]);
|
|
|
|
}
|
|
|
|
MEM_SAFE_FREE(adjacent_edge->boundary_elements);
|
|
|
|
}
|
|
|
|
MEM_SAFE_FREE(subdiv_ccg->adjacent_edges);
|
|
|
|
/* Free map of adjacent vertices. */
|
|
|
|
for (int i = 0; i < subdiv_ccg->num_adjacent_vertices; i++) {
|
|
|
|
SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[i];
|
|
|
|
MEM_SAFE_FREE(adjacent_vertex->corner_elements);
|
|
|
|
}
|
|
|
|
MEM_SAFE_FREE(subdiv_ccg->adjacent_vertices);
|
|
|
|
MEM_freeN(subdiv_ccg);
|
2018-09-04 16:16:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void BKE_subdiv_ccg_key(CCGKey *key, const SubdivCCG *subdiv_ccg, int level)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
key->level = level;
|
|
|
|
key->elem_size = element_size_bytes_get(subdiv_ccg);
|
|
|
|
key->grid_size = BKE_subdiv_grid_size_from_level(level);
|
|
|
|
key->grid_area = key->grid_size * key->grid_size;
|
|
|
|
key->grid_bytes = key->elem_size * key->grid_area;
|
2018-09-04 16:16:01 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
key->normal_offset = subdiv_ccg->normal_offset;
|
|
|
|
key->mask_offset = subdiv_ccg->mask_offset;
|
2018-09-04 16:16:01 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
key->has_normals = subdiv_ccg->has_normal;
|
|
|
|
key->has_mask = subdiv_ccg->has_mask;
|
2018-09-04 16:16:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void BKE_subdiv_ccg_key_top_level(CCGKey *key, const SubdivCCG *subdiv_ccg)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
BKE_subdiv_ccg_key(key, subdiv_ccg, subdiv_ccg->level);
|
2018-09-04 16:16:01 +02:00
|
|
|
}
|
2018-09-18 14:23:35 +02:00
|
|
|
|
|
|
|
/* =============================================================================
|
|
|
|
* Normals.
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef struct RecalcInnerNormalsData {
|
2019-04-17 06:17:24 +02:00
|
|
|
SubdivCCG *subdiv_ccg;
|
|
|
|
CCGKey *key;
|
2018-09-18 14:23:35 +02:00
|
|
|
} RecalcInnerNormalsData;
|
|
|
|
|
|
|
|
typedef struct RecalcInnerNormalsTLSData {
|
2019-04-17 06:17:24 +02:00
|
|
|
float (*face_normals)[3];
|
2018-09-18 14:23:35 +02:00
|
|
|
} RecalcInnerNormalsTLSData;
|
|
|
|
|
|
|
|
/* Evaluate high-res face normals, for faces which corresponds to grid elements
|
|
|
|
*
|
|
|
|
* {(x, y), {x + 1, y}, {x + 1, y + 1}, {x, y + 1}}
|
|
|
|
*
|
2019-01-18 11:28:38 +01:00
|
|
|
* The result is stored in normals storage from TLS. */
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_recalc_inner_face_normals(SubdivCCG *subdiv_ccg,
|
|
|
|
CCGKey *key,
|
|
|
|
RecalcInnerNormalsTLSData *tls,
|
|
|
|
const int grid_index)
|
2018-09-18 14:23:35 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
const int grid_size = subdiv_ccg->grid_size;
|
|
|
|
const int grid_size_1 = grid_size - 1;
|
|
|
|
CCGElem *grid = subdiv_ccg->grids[grid_index];
|
|
|
|
if (tls->face_normals == NULL) {
|
|
|
|
tls->face_normals = MEM_malloc_arrayN(
|
|
|
|
grid_size_1 * grid_size_1, 3 * sizeof(float), "CCG TLS normals");
|
|
|
|
}
|
|
|
|
for (int y = 0; y < grid_size - 1; y++) {
|
|
|
|
for (int x = 0; x < grid_size - 1; x++) {
|
|
|
|
CCGElem *grid_elements[4] = {
|
|
|
|
CCG_grid_elem(key, grid, x, y + 1),
|
|
|
|
CCG_grid_elem(key, grid, x + 1, y + 1),
|
|
|
|
CCG_grid_elem(key, grid, x + 1, y),
|
|
|
|
CCG_grid_elem(key, grid, x, y),
|
|
|
|
};
|
|
|
|
float *co[4] = {
|
|
|
|
CCG_elem_co(key, grid_elements[0]),
|
|
|
|
CCG_elem_co(key, grid_elements[1]),
|
|
|
|
CCG_elem_co(key, grid_elements[2]),
|
|
|
|
CCG_elem_co(key, grid_elements[3]),
|
|
|
|
};
|
|
|
|
const int face_index = y * grid_size_1 + x;
|
|
|
|
float *face_normal = tls->face_normals[face_index];
|
|
|
|
normal_quad_v3(face_normal, co[0], co[1], co[2], co[3]);
|
|
|
|
}
|
|
|
|
}
|
2018-09-18 14:23:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Average normals at every grid element, using adjacent faces normals. */
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_average_inner_face_normals(SubdivCCG *subdiv_ccg,
|
|
|
|
CCGKey *key,
|
|
|
|
RecalcInnerNormalsTLSData *tls,
|
|
|
|
const int grid_index)
|
2018-09-18 14:23:35 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
const int grid_size = subdiv_ccg->grid_size;
|
|
|
|
const int grid_size_1 = grid_size - 1;
|
|
|
|
CCGElem *grid = subdiv_ccg->grids[grid_index];
|
|
|
|
const float(*face_normals)[3] = tls->face_normals;
|
|
|
|
for (int y = 0; y < grid_size; y++) {
|
|
|
|
for (int x = 0; x < grid_size; x++) {
|
|
|
|
float normal_acc[3] = {0.0f, 0.0f, 0.0f};
|
|
|
|
int counter = 0;
|
|
|
|
/* Accumulate normals of all adjacent faces. */
|
|
|
|
if (x < grid_size_1 && y < grid_size_1) {
|
|
|
|
add_v3_v3(normal_acc, face_normals[y * grid_size_1 + x]);
|
|
|
|
counter++;
|
|
|
|
}
|
|
|
|
if (x >= 1) {
|
|
|
|
if (y < grid_size_1) {
|
|
|
|
add_v3_v3(normal_acc, face_normals[y * grid_size_1 + (x - 1)]);
|
|
|
|
counter++;
|
|
|
|
}
|
|
|
|
if (y >= 1) {
|
|
|
|
add_v3_v3(normal_acc, face_normals[(y - 1) * grid_size_1 + (x - 1)]);
|
|
|
|
counter++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (y >= 1 && x < grid_size_1) {
|
|
|
|
add_v3_v3(normal_acc, face_normals[(y - 1) * grid_size_1 + x]);
|
|
|
|
counter++;
|
|
|
|
}
|
|
|
|
/* Normalize and store. */
|
|
|
|
mul_v3_v3fl(CCG_grid_elem_no(key, grid, x, y), normal_acc, 1.0f / (float)counter);
|
|
|
|
}
|
|
|
|
}
|
2018-09-18 14:23:35 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_recalc_inner_normal_task(void *__restrict userdata_v,
|
|
|
|
const int grid_index,
|
2019-07-30 14:56:47 +02:00
|
|
|
const TaskParallelTLS *__restrict tls_v)
|
2018-09-18 14:23:35 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
RecalcInnerNormalsData *data = userdata_v;
|
|
|
|
RecalcInnerNormalsTLSData *tls = tls_v->userdata_chunk;
|
|
|
|
subdiv_ccg_recalc_inner_face_normals(data->subdiv_ccg, data->key, tls, grid_index);
|
|
|
|
subdiv_ccg_average_inner_face_normals(data->subdiv_ccg, data->key, tls, grid_index);
|
2018-09-18 14:23:35 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_recalc_inner_normal_finalize(void *__restrict UNUSED(userdata),
|
|
|
|
void *__restrict tls_v)
|
2018-09-18 14:23:35 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
RecalcInnerNormalsTLSData *tls = tls_v;
|
|
|
|
MEM_SAFE_FREE(tls->face_normals);
|
2018-09-18 14:23:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Recalculate normals which corresponds to non-boundaries elements of grids. */
|
|
|
|
static void subdiv_ccg_recalc_inner_grid_normals(SubdivCCG *subdiv_ccg)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
CCGKey key;
|
|
|
|
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
|
|
|
|
RecalcInnerNormalsData data = {
|
|
|
|
.subdiv_ccg = subdiv_ccg,
|
|
|
|
.key = &key,
|
|
|
|
};
|
|
|
|
RecalcInnerNormalsTLSData tls_data = {NULL};
|
2019-07-30 14:56:47 +02:00
|
|
|
TaskParallelSettings parallel_range_settings;
|
2019-04-17 06:17:24 +02:00
|
|
|
BLI_parallel_range_settings_defaults(¶llel_range_settings);
|
|
|
|
parallel_range_settings.userdata_chunk = &tls_data;
|
|
|
|
parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
|
|
|
|
parallel_range_settings.func_finalize = subdiv_ccg_recalc_inner_normal_finalize;
|
|
|
|
BLI_task_parallel_range(0,
|
|
|
|
subdiv_ccg->num_grids,
|
|
|
|
&data,
|
|
|
|
subdiv_ccg_recalc_inner_normal_task,
|
|
|
|
¶llel_range_settings);
|
2018-09-18 14:23:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void BKE_subdiv_ccg_recalc_normals(SubdivCCG *subdiv_ccg)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
if (!subdiv_ccg->has_normal) {
|
|
|
|
/* Grids don't have normals, can do early output. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
subdiv_ccg_recalc_inner_grid_normals(subdiv_ccg);
|
|
|
|
BKE_subdiv_ccg_average_grids(subdiv_ccg);
|
2018-09-18 17:09:08 +02:00
|
|
|
}
|
|
|
|
|
2019-02-22 16:56:54 +01:00
|
|
|
typedef struct RecalcModifiedInnerNormalsData {
|
2019-04-17 06:17:24 +02:00
|
|
|
SubdivCCG *subdiv_ccg;
|
|
|
|
CCGKey *key;
|
|
|
|
SubdivCCGFace **effected_ccg_faces;
|
2019-02-22 16:56:54 +01:00
|
|
|
} RecalcModifiedInnerNormalsData;
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_recalc_modified_inner_normal_task(void *__restrict userdata_v,
|
|
|
|
const int face_index,
|
2019-07-30 14:56:47 +02:00
|
|
|
const TaskParallelTLS *__restrict tls_v)
|
2019-02-22 16:56:54 +01:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
RecalcModifiedInnerNormalsData *data = userdata_v;
|
|
|
|
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
|
|
|
|
CCGKey *key = data->key;
|
|
|
|
RecalcInnerNormalsTLSData *tls = tls_v->userdata_chunk;
|
|
|
|
SubdivCCGFace **faces = data->effected_ccg_faces;
|
|
|
|
SubdivCCGFace *face = faces[face_index];
|
|
|
|
const int num_face_grids = face->num_grids;
|
|
|
|
for (int i = 0; i < num_face_grids; i++) {
|
|
|
|
const int grid_index = face->start_grid_index + i;
|
|
|
|
subdiv_ccg_recalc_inner_face_normals(data->subdiv_ccg, data->key, tls, grid_index);
|
|
|
|
subdiv_ccg_average_inner_face_normals(data->subdiv_ccg, data->key, tls, grid_index);
|
|
|
|
}
|
|
|
|
subdiv_ccg_average_inner_face_grids(subdiv_ccg, key, face);
|
2019-02-22 16:56:54 +01:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_recalc_modified_inner_normal_finalize(void *__restrict UNUSED(userdata),
|
|
|
|
void *__restrict tls_v)
|
2019-02-22 16:56:54 +01:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
RecalcInnerNormalsTLSData *tls = tls_v;
|
|
|
|
MEM_SAFE_FREE(tls->face_normals);
|
2019-02-22 16:56:54 +01:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_recalc_modified_inner_grid_normals(SubdivCCG *subdiv_ccg,
|
|
|
|
struct CCGFace **effected_faces,
|
|
|
|
int num_effected_faces)
|
2019-02-22 16:56:54 +01:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
CCGKey key;
|
|
|
|
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
|
|
|
|
RecalcModifiedInnerNormalsData data = {
|
|
|
|
.subdiv_ccg = subdiv_ccg,
|
|
|
|
.key = &key,
|
|
|
|
.effected_ccg_faces = (SubdivCCGFace **)effected_faces,
|
|
|
|
};
|
|
|
|
RecalcInnerNormalsTLSData tls_data = {NULL};
|
2019-07-30 14:56:47 +02:00
|
|
|
TaskParallelSettings parallel_range_settings;
|
2019-04-17 06:17:24 +02:00
|
|
|
BLI_parallel_range_settings_defaults(¶llel_range_settings);
|
|
|
|
parallel_range_settings.userdata_chunk = &tls_data;
|
|
|
|
parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
|
|
|
|
parallel_range_settings.func_finalize = subdiv_ccg_recalc_modified_inner_normal_finalize;
|
|
|
|
BLI_task_parallel_range(0,
|
|
|
|
num_effected_faces,
|
|
|
|
&data,
|
|
|
|
subdiv_ccg_recalc_modified_inner_normal_task,
|
|
|
|
¶llel_range_settings);
|
2019-02-22 16:56:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void BKE_subdiv_ccg_update_normals(SubdivCCG *subdiv_ccg,
|
|
|
|
struct CCGFace **effected_faces,
|
|
|
|
int num_effected_faces)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
if (!subdiv_ccg->has_normal) {
|
|
|
|
/* Grids don't have normals, can do early output. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (num_effected_faces == 0) {
|
|
|
|
/* No faces changed, so nothing to do here. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
subdiv_ccg_recalc_modified_inner_grid_normals(subdiv_ccg, effected_faces, num_effected_faces);
|
|
|
|
/* TODO(sergey): Only average elements which are adjacent to modified
|
|
|
|
* faces. */
|
|
|
|
CCGKey key;
|
|
|
|
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
|
|
|
|
subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
|
2019-02-22 16:56:54 +01:00
|
|
|
}
|
|
|
|
|
2018-09-18 17:09:08 +02:00
|
|
|
/* =============================================================================
|
|
|
|
* Boundary averaging/stitching.
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef struct AverageInnerGridsData {
|
2019-04-17 06:17:24 +02:00
|
|
|
SubdivCCG *subdiv_ccg;
|
|
|
|
CCGKey *key;
|
2018-09-18 17:09:08 +02:00
|
|
|
} AverageInnerGridsData;
|
|
|
|
|
|
|
|
static void average_grid_element_value_v3(float a[3], float b[3])
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
add_v3_v3(a, b);
|
|
|
|
mul_v3_fl(a, 0.5f);
|
|
|
|
copy_v3_v3(b, a);
|
2018-09-18 17:09:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void average_grid_element(SubdivCCG *subdiv_ccg,
|
|
|
|
CCGKey *key,
|
|
|
|
CCGElem *grid_element_a,
|
|
|
|
CCGElem *grid_element_b)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
average_grid_element_value_v3(CCG_elem_co(key, grid_element_a),
|
|
|
|
CCG_elem_co(key, grid_element_b));
|
|
|
|
if (subdiv_ccg->has_normal) {
|
|
|
|
average_grid_element_value_v3(CCG_elem_no(key, grid_element_a),
|
|
|
|
CCG_elem_no(key, grid_element_b));
|
|
|
|
}
|
|
|
|
if (subdiv_ccg->has_mask) {
|
|
|
|
float mask = (*CCG_elem_mask(key, grid_element_a) + *CCG_elem_mask(key, grid_element_b)) *
|
|
|
|
0.5f;
|
|
|
|
*CCG_elem_mask(key, grid_element_a) = mask;
|
|
|
|
*CCG_elem_mask(key, grid_element_b) = mask;
|
|
|
|
}
|
2018-09-18 17:09:08 +02:00
|
|
|
}
|
|
|
|
|
2019-03-08 17:00:11 +01:00
|
|
|
/* Accumulator to hold data during averaging. */
|
|
|
|
typedef struct GridElementAccumulator {
|
2019-04-17 06:17:24 +02:00
|
|
|
float co[3];
|
|
|
|
float no[3];
|
|
|
|
float mask;
|
2019-03-08 17:00:11 +01:00
|
|
|
} GridElementAccumulator;
|
|
|
|
|
|
|
|
static void element_accumulator_init(GridElementAccumulator *accumulator)
|
2018-09-20 14:30:16 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
zero_v3(accumulator->co);
|
|
|
|
zero_v3(accumulator->no);
|
|
|
|
accumulator->mask = 0.0f;
|
2019-03-08 17:00:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void element_accumulator_add(GridElementAccumulator *accumulator,
|
|
|
|
const SubdivCCG *subdiv_ccg,
|
|
|
|
CCGKey *key,
|
|
|
|
/*const*/ CCGElem *grid_element)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
add_v3_v3(accumulator->co, CCG_elem_co(key, grid_element));
|
|
|
|
if (subdiv_ccg->has_normal) {
|
|
|
|
add_v3_v3(accumulator->no, CCG_elem_no(key, grid_element));
|
|
|
|
}
|
|
|
|
if (subdiv_ccg->has_mask) {
|
|
|
|
accumulator->mask += *CCG_elem_mask(key, grid_element);
|
|
|
|
}
|
2019-03-08 17:00:11 +01:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void element_accumulator_mul_fl(GridElementAccumulator *accumulator, const float f)
|
2019-03-08 17:00:11 +01:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
mul_v3_fl(accumulator->co, f);
|
|
|
|
mul_v3_fl(accumulator->no, f);
|
|
|
|
accumulator->mask *= f;
|
2019-03-08 17:00:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void element_accumulator_copy(SubdivCCG *subdiv_ccg,
|
|
|
|
CCGKey *key,
|
|
|
|
CCGElem *destination,
|
|
|
|
const GridElementAccumulator *accumulator)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
copy_v3_v3(CCG_elem_co(key, destination), accumulator->co);
|
|
|
|
if (subdiv_ccg->has_normal) {
|
|
|
|
copy_v3_v3(CCG_elem_no(key, destination), accumulator->no);
|
|
|
|
}
|
|
|
|
if (subdiv_ccg->has_mask) {
|
|
|
|
*CCG_elem_mask(key, destination) = accumulator->mask;
|
|
|
|
}
|
2018-09-20 14:30:16 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_average_inner_face_grids(SubdivCCG *subdiv_ccg,
|
|
|
|
CCGKey *key,
|
|
|
|
SubdivCCGFace *face)
|
2018-09-18 17:09:08 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
CCGElem **grids = subdiv_ccg->grids;
|
|
|
|
const int num_face_grids = face->num_grids;
|
|
|
|
const int grid_size = subdiv_ccg->grid_size;
|
|
|
|
CCGElem *prev_grid = grids[face->start_grid_index + num_face_grids - 1];
|
2019-07-15 11:28:37 +02:00
|
|
|
/* Average boundary between neighbor grid. */
|
2019-04-17 06:17:24 +02:00
|
|
|
for (int corner = 0; corner < num_face_grids; corner++) {
|
|
|
|
CCGElem *grid = grids[face->start_grid_index + corner];
|
2019-07-15 11:28:37 +02:00
|
|
|
for (int i = 1; i < grid_size; i++) {
|
2019-04-17 06:17:24 +02:00
|
|
|
CCGElem *prev_grid_element = CCG_grid_elem(key, prev_grid, i, 0);
|
|
|
|
CCGElem *grid_element = CCG_grid_elem(key, grid, 0, i);
|
|
|
|
average_grid_element(subdiv_ccg, key, prev_grid_element, grid_element);
|
|
|
|
}
|
|
|
|
prev_grid = grid;
|
|
|
|
}
|
2019-07-15 11:28:37 +02:00
|
|
|
/* Average all grids centers into a single accumulator, and share it.
|
2019-08-01 05:53:25 +02:00
|
|
|
* Guarantees correct and smooth averaging in the center. */
|
2019-07-15 11:28:37 +02:00
|
|
|
GridElementAccumulator center_accumulator;
|
|
|
|
element_accumulator_init(¢er_accumulator);
|
|
|
|
for (int corner = 0; corner < num_face_grids; corner++) {
|
|
|
|
CCGElem *grid = grids[face->start_grid_index + corner];
|
|
|
|
CCGElem *grid_center_element = CCG_grid_elem(key, grid, 0, 0);
|
|
|
|
element_accumulator_add(¢er_accumulator, subdiv_ccg, key, grid_center_element);
|
|
|
|
}
|
|
|
|
element_accumulator_mul_fl(¢er_accumulator, 1.0f / (float)num_face_grids);
|
|
|
|
for (int corner = 0; corner < num_face_grids; corner++) {
|
|
|
|
CCGElem *grid = grids[face->start_grid_index + corner];
|
|
|
|
CCGElem *grid_center_element = CCG_grid_elem(key, grid, 0, 0);
|
|
|
|
element_accumulator_copy(subdiv_ccg, key, grid_center_element, ¢er_accumulator);
|
|
|
|
}
|
2018-09-18 17:35:59 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_average_inner_grids_task(void *__restrict userdata_v,
|
|
|
|
const int face_index,
|
2019-07-30 14:56:47 +02:00
|
|
|
const TaskParallelTLS *__restrict UNUSED(tls_v))
|
2018-09-18 17:35:59 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
AverageInnerGridsData *data = userdata_v;
|
|
|
|
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
|
|
|
|
CCGKey *key = data->key;
|
|
|
|
SubdivCCGFace *faces = subdiv_ccg->faces;
|
|
|
|
SubdivCCGFace *face = &faces[face_index];
|
|
|
|
subdiv_ccg_average_inner_face_grids(subdiv_ccg, key, face);
|
2018-09-18 17:09:08 +02:00
|
|
|
}
|
|
|
|
|
2018-09-20 14:30:16 +02:00
|
|
|
typedef struct AverageGridsBoundariesData {
|
2019-04-17 06:17:24 +02:00
|
|
|
SubdivCCG *subdiv_ccg;
|
|
|
|
CCGKey *key;
|
2018-09-20 14:30:16 +02:00
|
|
|
} AverageGridsBoundariesData;
|
|
|
|
|
2019-03-08 17:00:11 +01:00
|
|
|
typedef struct AverageGridsBoundariesTLSData {
|
2019-04-17 06:17:24 +02:00
|
|
|
GridElementAccumulator *accumulators;
|
2019-03-08 17:00:11 +01:00
|
|
|
} AverageGridsBoundariesTLSData;
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_average_grids_boundary(SubdivCCG *subdiv_ccg,
|
|
|
|
CCGKey *key,
|
|
|
|
SubdivCCGAdjacentEdge *adjacent_edge,
|
|
|
|
AverageGridsBoundariesTLSData *tls)
|
2018-09-20 14:30:16 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
const int num_adjacent_faces = adjacent_edge->num_adjacent_faces;
|
|
|
|
const int grid_size2 = subdiv_ccg->grid_size * 2;
|
|
|
|
if (num_adjacent_faces == 1) {
|
|
|
|
/* Nothing to average with. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (tls->accumulators == NULL) {
|
|
|
|
tls->accumulators = MEM_calloc_arrayN(
|
|
|
|
sizeof(GridElementAccumulator), grid_size2, "average accumulators");
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
for (int i = 1; i < grid_size2 - 1; i++) {
|
|
|
|
element_accumulator_init(&tls->accumulators[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
|
|
|
|
for (int i = 1; i < grid_size2 - 1; i++) {
|
|
|
|
CCGElem *grid_element = adjacent_edge->boundary_elements[face_index][i];
|
|
|
|
element_accumulator_add(&tls->accumulators[i], subdiv_ccg, key, grid_element);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (int i = 1; i < grid_size2 - 1; i++) {
|
|
|
|
element_accumulator_mul_fl(&tls->accumulators[i], 1.0f / (float)num_adjacent_faces);
|
|
|
|
}
|
|
|
|
/* Copy averaged value to all the other faces. */
|
|
|
|
for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
|
|
|
|
for (int i = 1; i < grid_size2 - 1; i++) {
|
|
|
|
CCGElem *grid_element = adjacent_edge->boundary_elements[face_index][i];
|
|
|
|
element_accumulator_copy(subdiv_ccg, key, grid_element, &tls->accumulators[i]);
|
|
|
|
}
|
|
|
|
}
|
2018-09-20 14:30:16 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_average_grids_boundaries_task(void *__restrict userdata_v,
|
|
|
|
const int adjacent_edge_index,
|
2019-07-30 14:56:47 +02:00
|
|
|
const TaskParallelTLS *__restrict tls_v)
|
2018-09-20 14:30:16 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
AverageGridsBoundariesData *data = userdata_v;
|
|
|
|
AverageGridsBoundariesTLSData *tls = tls_v->userdata_chunk;
|
|
|
|
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
|
|
|
|
CCGKey *key = data->key;
|
|
|
|
SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[adjacent_edge_index];
|
|
|
|
subdiv_ccg_average_grids_boundary(subdiv_ccg, key, adjacent_edge, tls);
|
2019-03-08 17:00:11 +01:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_average_grids_boundaries_finalize(void *__restrict UNUSED(userdata),
|
|
|
|
void *__restrict tls_v)
|
2019-03-08 17:00:11 +01:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
AverageGridsBoundariesTLSData *tls = tls_v;
|
|
|
|
MEM_SAFE_FREE(tls->accumulators);
|
2018-09-20 14:30:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct AverageGridsCornerData {
|
2019-04-17 06:17:24 +02:00
|
|
|
SubdivCCG *subdiv_ccg;
|
|
|
|
CCGKey *key;
|
2018-09-20 14:30:16 +02:00
|
|
|
} AverageGridsCornerData;
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_average_grids_corners(SubdivCCG *subdiv_ccg,
|
|
|
|
CCGKey *key,
|
|
|
|
SubdivCCGAdjacentVertex *adjacent_vertex)
|
2018-09-20 14:30:16 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
const int num_adjacent_faces = adjacent_vertex->num_adjacent_faces;
|
|
|
|
if (num_adjacent_faces == 1) {
|
|
|
|
/* Nothing to average with. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
GridElementAccumulator accumulator;
|
|
|
|
element_accumulator_init(&accumulator);
|
|
|
|
for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
|
|
|
|
CCGElem *grid_element = adjacent_vertex->corner_elements[face_index];
|
|
|
|
element_accumulator_add(&accumulator, subdiv_ccg, key, grid_element);
|
|
|
|
}
|
|
|
|
element_accumulator_mul_fl(&accumulator, 1.0f / (float)num_adjacent_faces);
|
|
|
|
/* Copy averaged value to all the other faces. */
|
|
|
|
for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
|
|
|
|
CCGElem *grid_element = adjacent_vertex->corner_elements[face_index];
|
|
|
|
element_accumulator_copy(subdiv_ccg, key, grid_element, &accumulator);
|
|
|
|
}
|
2018-09-20 14:30:16 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_average_grids_corners_task(void *__restrict userdata_v,
|
|
|
|
const int adjacent_vertex_index,
|
2019-07-30 14:56:47 +02:00
|
|
|
const TaskParallelTLS *__restrict UNUSED(tls_v))
|
2018-09-20 14:30:16 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
AverageGridsCornerData *data = userdata_v;
|
|
|
|
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
|
|
|
|
CCGKey *key = data->key;
|
|
|
|
SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[adjacent_vertex_index];
|
|
|
|
subdiv_ccg_average_grids_corners(subdiv_ccg, key, adjacent_vertex);
|
2018-09-20 14:30:16 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_average_all_boundaries(SubdivCCG *subdiv_ccg, CCGKey *key)
|
2018-09-20 14:30:16 +02:00
|
|
|
{
|
2019-07-30 14:56:47 +02:00
|
|
|
TaskParallelSettings parallel_range_settings;
|
2019-04-17 06:17:24 +02:00
|
|
|
BLI_parallel_range_settings_defaults(¶llel_range_settings);
|
|
|
|
AverageGridsBoundariesData boundaries_data = {
|
|
|
|
.subdiv_ccg = subdiv_ccg,
|
|
|
|
.key = key,
|
|
|
|
};
|
|
|
|
AverageGridsBoundariesTLSData tls_data = {NULL};
|
|
|
|
parallel_range_settings.userdata_chunk = &tls_data;
|
|
|
|
parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
|
|
|
|
parallel_range_settings.func_finalize = subdiv_ccg_average_grids_boundaries_finalize;
|
|
|
|
BLI_task_parallel_range(0,
|
|
|
|
subdiv_ccg->num_adjacent_edges,
|
|
|
|
&boundaries_data,
|
|
|
|
subdiv_ccg_average_grids_boundaries_task,
|
|
|
|
¶llel_range_settings);
|
2019-03-08 17:00:11 +01:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_average_all_corners(SubdivCCG *subdiv_ccg, CCGKey *key)
|
2019-03-08 17:00:11 +01:00
|
|
|
{
|
2019-07-30 14:56:47 +02:00
|
|
|
TaskParallelSettings parallel_range_settings;
|
2019-04-17 06:17:24 +02:00
|
|
|
BLI_parallel_range_settings_defaults(¶llel_range_settings);
|
|
|
|
AverageGridsCornerData corner_data = {
|
|
|
|
.subdiv_ccg = subdiv_ccg,
|
|
|
|
.key = key,
|
|
|
|
};
|
|
|
|
BLI_task_parallel_range(0,
|
|
|
|
subdiv_ccg->num_adjacent_vertices,
|
|
|
|
&corner_data,
|
|
|
|
subdiv_ccg_average_grids_corners_task,
|
|
|
|
¶llel_range_settings);
|
2018-09-20 14:30:16 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
static void subdiv_ccg_average_all_boundaries_and_corners(SubdivCCG *subdiv_ccg, CCGKey *key)
|
2019-03-08 17:00:11 +01:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
subdiv_ccg_average_all_boundaries(subdiv_ccg, key);
|
|
|
|
subdiv_ccg_average_all_corners(subdiv_ccg, key);
|
2019-03-08 17:00:11 +01:00
|
|
|
}
|
|
|
|
|
2018-09-18 17:09:08 +02:00
|
|
|
void BKE_subdiv_ccg_average_grids(SubdivCCG *subdiv_ccg)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
CCGKey key;
|
|
|
|
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
|
2019-07-30 14:56:47 +02:00
|
|
|
TaskParallelSettings parallel_range_settings;
|
2019-04-17 06:17:24 +02:00
|
|
|
BLI_parallel_range_settings_defaults(¶llel_range_settings);
|
|
|
|
/* Average inner boundaries of grids (within one face), across faces
|
|
|
|
* from different face-corners. */
|
|
|
|
AverageInnerGridsData inner_data = {
|
|
|
|
.subdiv_ccg = subdiv_ccg,
|
|
|
|
.key = &key,
|
|
|
|
};
|
|
|
|
BLI_task_parallel_range(0,
|
|
|
|
subdiv_ccg->num_faces,
|
|
|
|
&inner_data,
|
|
|
|
subdiv_ccg_average_inner_grids_task,
|
|
|
|
¶llel_range_settings);
|
|
|
|
subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
|
2018-09-18 14:23:35 +02:00
|
|
|
}
|
2018-09-18 17:46:00 +02:00
|
|
|
|
|
|
|
typedef struct StitchFacesInnerGridsData {
|
2019-04-17 06:17:24 +02:00
|
|
|
SubdivCCG *subdiv_ccg;
|
|
|
|
CCGKey *key;
|
|
|
|
struct CCGFace **effected_ccg_faces;
|
2018-09-18 17:46:00 +02:00
|
|
|
} StitchFacesInnerGridsData;
|
|
|
|
|
|
|
|
static void subdiv_ccg_stitch_face_inner_grids_task(
|
2019-04-17 06:17:24 +02:00
|
|
|
void *__restrict userdata_v,
|
|
|
|
const int face_index,
|
2019-07-30 14:56:47 +02:00
|
|
|
const TaskParallelTLS *__restrict UNUSED(tls_v))
|
2018-09-18 17:46:00 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
StitchFacesInnerGridsData *data = userdata_v;
|
|
|
|
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
|
|
|
|
CCGKey *key = data->key;
|
|
|
|
struct CCGFace **effected_ccg_faces = data->effected_ccg_faces;
|
|
|
|
struct CCGFace *effected_ccg_face = effected_ccg_faces[face_index];
|
|
|
|
SubdivCCGFace *face = (SubdivCCGFace *)effected_ccg_face;
|
|
|
|
subdiv_ccg_average_inner_face_grids(subdiv_ccg, key, face);
|
2018-09-18 17:46:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void BKE_subdiv_ccg_average_stitch_faces(SubdivCCG *subdiv_ccg,
|
|
|
|
struct CCGFace **effected_faces,
|
|
|
|
int num_effected_faces)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
CCGKey key;
|
|
|
|
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
|
|
|
|
StitchFacesInnerGridsData data = {
|
|
|
|
.subdiv_ccg = subdiv_ccg,
|
|
|
|
.key = &key,
|
|
|
|
.effected_ccg_faces = effected_faces,
|
|
|
|
};
|
2019-07-30 14:56:47 +02:00
|
|
|
TaskParallelSettings parallel_range_settings;
|
2019-04-17 06:17:24 +02:00
|
|
|
BLI_parallel_range_settings_defaults(¶llel_range_settings);
|
|
|
|
BLI_task_parallel_range(0,
|
|
|
|
num_effected_faces,
|
|
|
|
&data,
|
|
|
|
subdiv_ccg_stitch_face_inner_grids_task,
|
|
|
|
¶llel_range_settings);
|
|
|
|
/* TODO(sergey): Only average elements which are adjacent to modified
|
|
|
|
* faces. */
|
|
|
|
subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
|
2018-09-18 17:46:00 +02:00
|
|
|
}
|
2018-12-18 14:19:55 +01:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
void BKE_subdiv_ccg_topology_counters(const SubdivCCG *subdiv_ccg,
|
|
|
|
int *r_num_vertices,
|
|
|
|
int *r_num_edges,
|
|
|
|
int *r_num_faces,
|
|
|
|
int *r_num_loops)
|
2018-12-18 14:19:55 +01:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
const int num_grids = subdiv_ccg->num_grids;
|
|
|
|
const int grid_size = subdiv_ccg->grid_size;
|
|
|
|
const int grid_area = grid_size * grid_size;
|
|
|
|
const int num_edges_per_grid = 2 * (grid_size * (grid_size - 1));
|
|
|
|
*r_num_vertices = num_grids * grid_area;
|
|
|
|
*r_num_edges = num_grids * num_edges_per_grid;
|
|
|
|
*r_num_faces = num_grids * (grid_size - 1) * (grid_size - 1);
|
|
|
|
*r_num_loops = *r_num_faces * 4;
|
2018-12-18 14:19:55 +01:00
|
|
|
}
|