2022-02-10 23:07:11 +01:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
* Copyright 2007 by Nicholas Bishop. All rights reserved. */
|
2007-12-29 18:07:55 +01:00
|
|
|
|
2019-02-17 22:08:12 +01:00
|
|
|
/** \file
|
|
|
|
* \ingroup bke
|
2011-02-27 21:40:57 +01:00
|
|
|
*/
|
|
|
|
|
2007-12-29 18:07:55 +01:00
|
|
|
#include "MEM_guardedalloc.h"
|
|
|
|
|
2011-12-04 07:05:48 +01:00
|
|
|
/* for reading old multires */
|
|
|
|
#define DNA_DEPRECATED_ALLOW
|
|
|
|
|
2007-12-29 18:07:55 +01:00
|
|
|
#include "DNA_mesh_types.h"
|
|
|
|
#include "DNA_meshdata_types.h"
|
|
|
|
#include "DNA_object_types.h"
|
2009-01-06 19:59:03 +01:00
|
|
|
#include "DNA_scene_types.h"
|
2007-12-29 18:07:55 +01:00
|
|
|
|
2012-03-14 07:32:03 +01:00
|
|
|
#include "BLI_bitmap.h"
|
2007-12-29 18:07:55 +01:00
|
|
|
#include "BLI_blenlib.h"
|
2010-03-22 12:59:36 +01:00
|
|
|
#include "BLI_math.h"
|
2018-01-11 17:56:18 +01:00
|
|
|
#include "BLI_task.h"
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "BLI_utildefines.h"
|
2007-12-29 18:07:55 +01:00
|
|
|
|
2012-05-10 22:33:09 +02:00
|
|
|
#include "BKE_ccg.h"
|
2009-01-06 19:59:03 +01:00
|
|
|
#include "BKE_cdderivedmesh.h"
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "BKE_editmesh.h"
|
2009-01-04 15:14:06 +01:00
|
|
|
#include "BKE_mesh.h"
|
2013-12-12 06:26:11 +01:00
|
|
|
#include "BKE_mesh_mapping.h"
|
2018-06-05 16:58:08 +02:00
|
|
|
#include "BKE_mesh_runtime.h"
|
2009-01-06 19:59:03 +01:00
|
|
|
#include "BKE_modifier.h"
|
2007-12-29 18:07:55 +01:00
|
|
|
#include "BKE_multires.h"
|
2010-03-22 12:59:36 +01:00
|
|
|
#include "BKE_paint.h"
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "BKE_pbvh.h"
|
2010-01-25 12:39:56 +01:00
|
|
|
#include "BKE_scene.h"
|
2018-09-14 10:56:54 +02:00
|
|
|
#include "BKE_subdiv_ccg.h"
|
2009-01-06 19:59:03 +01:00
|
|
|
#include "BKE_subsurf.h"
|
2011-01-07 20:18:31 +01:00
|
|
|
|
2010-10-25 10:03:05 +02:00
|
|
|
#include "BKE_object.h"
|
2009-11-25 15:07:12 +01:00
|
|
|
|
|
|
|
#include "CCGSubSurf.h"
|
2007-12-29 18:07:55 +01:00
|
|
|
|
2018-08-16 17:00:24 +02:00
|
|
|
#include "DEG_depsgraph_query.h"
|
|
|
|
|
2020-03-03 12:35:51 +01:00
|
|
|
#include "multires_reshape.h"
|
|
|
|
|
2007-12-29 18:07:55 +01:00
|
|
|
#include <math.h>
|
2009-01-06 19:59:03 +01:00
|
|
|
#include <string.h>
|
2007-12-29 18:07:55 +01:00
|
|
|
|
2009-01-06 19:59:03 +01:00
|
|
|
/* MULTIRES MODIFIER */
|
2009-12-09 14:37:19 +01:00
|
|
|
static const int multires_grid_tot[] = {
|
|
|
|
0, 4, 9, 25, 81, 289, 1089, 4225, 16641, 66049, 263169, 1050625, 4198401, 16785409};
|
|
|
|
static const int multires_side_tot[] = {
|
|
|
|
0, 2, 3, 5, 9, 17, 33, 65, 129, 257, 513, 1025, 2049, 4097};
|
2009-11-25 15:07:12 +01:00
|
|
|
|
2012-02-26 05:40:56 +01:00
|
|
|
/* See multiresModifier_disp_run for description of each operation */
|
|
|
|
typedef enum {
|
|
|
|
APPLY_DISPLACEMENTS,
|
|
|
|
CALC_DISPLACEMENTS,
|
|
|
|
ADD_DISPLACEMENTS,
|
|
|
|
} DispOp;
|
|
|
|
|
2012-05-10 22:33:09 +02:00
|
|
|
static void multiresModifier_disp_run(
|
|
|
|
DerivedMesh *dm, Mesh *me, DerivedMesh *dm2, DispOp op, CCGElem **oldGridData, int totlvl);
|
2009-01-06 19:59:03 +01:00
|
|
|
|
2019-03-19 05:17:46 +01:00
|
|
|
/** Customdata */
|
2012-03-19 06:13:41 +01:00
|
|
|
|
|
|
|
void multires_customdata_delete(Mesh *me)
|
|
|
|
{
|
2019-02-17 08:05:18 +01:00
|
|
|
if (me->edit_mesh) {
|
|
|
|
BMEditMesh *em = me->edit_mesh;
|
2012-03-19 06:13:41 +01:00
|
|
|
/* CustomData_external_remove is used here only to mark layer
|
|
|
|
* as non-external for further free-ing, so zero element count
|
|
|
|
* looks safer than em->totface */
|
|
|
|
CustomData_external_remove(&em->bm->ldata, &me->id, CD_MDISPS, 0);
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
|
2020-03-20 12:23:14 +01:00
|
|
|
if (CustomData_has_layer(&em->bm->ldata, CD_MDISPS)) {
|
|
|
|
BM_data_layer_free(em->bm, &em->bm->ldata, CD_MDISPS);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (CustomData_has_layer(&em->bm->ldata, CD_GRID_PAINT_MASK)) {
|
|
|
|
BM_data_layer_free(em->bm, &em->bm->ldata, CD_GRID_PAINT_MASK);
|
|
|
|
}
|
2012-03-19 06:13:41 +01:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
CustomData_external_remove(&me->ldata, &me->id, CD_MDISPS, me->totloop);
|
|
|
|
CustomData_free_layer_active(&me->ldata, CD_MDISPS, me->totloop);
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
|
|
|
|
CustomData_free_layer_active(&me->ldata, CD_GRID_PAINT_MASK, me->totloop);
|
2012-03-19 06:13:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-19 05:17:46 +01:00
|
|
|
/** Grid hiding */
|
2016-01-18 10:10:18 +01:00
|
|
|
static BLI_bitmap *multires_mdisps_upsample_hidden(BLI_bitmap *lo_hidden,
|
|
|
|
int lo_level,
|
|
|
|
int hi_level,
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-01-18 10:10:18 +01:00
|
|
|
/* assumed to be at hi_level (or null) */
|
|
|
|
const BLI_bitmap *prev_hidden)
|
2012-03-14 07:32:03 +01:00
|
|
|
{
|
2013-07-23 01:20:48 +02:00
|
|
|
BLI_bitmap *subd;
|
2013-10-01 11:17:35 +02:00
|
|
|
int hi_gridsize = BKE_ccg_gridsize(hi_level);
|
|
|
|
int lo_gridsize = BKE_ccg_gridsize(lo_level);
|
2012-03-14 07:32:03 +01:00
|
|
|
int yh, xh, xl, yl, xo, yo, hi_ndx;
|
|
|
|
int offset, factor;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-03-14 07:32:03 +01:00
|
|
|
BLI_assert(lo_level <= hi_level);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-03-14 07:32:03 +01:00
|
|
|
/* fast case */
|
2019-04-22 01:39:35 +02:00
|
|
|
if (lo_level == hi_level) {
|
2012-03-14 07:32:03 +01:00
|
|
|
return MEM_dupallocN(lo_hidden);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-03-06 17:18:10 +01:00
|
|
|
subd = BLI_BITMAP_NEW(square_i(hi_gridsize), "MDisps.hidden upsample");
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2013-10-01 11:17:35 +02:00
|
|
|
factor = BKE_ccg_factor(lo_level, hi_level);
|
2012-03-14 07:32:03 +01:00
|
|
|
offset = 1 << (hi_level - lo_level - 1);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-03-14 07:32:03 +01:00
|
|
|
/* low-res blocks */
|
2012-03-24 07:18:31 +01:00
|
|
|
for (yl = 0; yl < lo_gridsize; yl++) {
|
|
|
|
for (xl = 0; xl < lo_gridsize; xl++) {
|
2014-06-06 08:05:15 +02:00
|
|
|
int lo_val = BLI_BITMAP_TEST(lo_hidden, yl * lo_gridsize + xl);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-03-14 07:32:03 +01:00
|
|
|
/* high-res blocks */
|
2012-03-24 07:18:31 +01:00
|
|
|
for (yo = -offset; yo <= offset; yo++) {
|
2012-03-14 07:32:03 +01:00
|
|
|
yh = yl * factor + yo;
|
2019-04-22 01:39:35 +02:00
|
|
|
if (yh < 0 || yh >= hi_gridsize) {
|
2012-03-14 07:32:03 +01:00
|
|
|
continue;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-03-24 07:18:31 +01:00
|
|
|
for (xo = -offset; xo <= offset; xo++) {
|
2012-03-14 07:32:03 +01:00
|
|
|
xh = xl * factor + xo;
|
2019-04-22 01:39:35 +02:00
|
|
|
if (xh < 0 || xh >= hi_gridsize) {
|
2012-03-14 07:32:03 +01:00
|
|
|
continue;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-03-14 07:32:03 +01:00
|
|
|
hi_ndx = yh * hi_gridsize + xh;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-03-24 07:18:31 +01:00
|
|
|
if (prev_hidden) {
|
2012-03-14 07:32:03 +01:00
|
|
|
/* If prev_hidden is available, copy it to
|
2012-04-22 13:54:53 +02:00
|
|
|
* subd, except when the equivalent element in
|
|
|
|
* lo_hidden is different */
|
2014-06-06 08:00:53 +02:00
|
|
|
if (lo_val != prev_hidden[hi_ndx]) {
|
2014-06-06 08:05:15 +02:00
|
|
|
BLI_BITMAP_SET(subd, hi_ndx, lo_val);
|
2014-06-06 08:00:53 +02:00
|
|
|
}
|
|
|
|
else {
|
2014-06-06 08:05:15 +02:00
|
|
|
BLI_BITMAP_SET(subd, hi_ndx, prev_hidden[hi_ndx]);
|
2014-06-06 08:00:53 +02:00
|
|
|
}
|
2012-03-14 07:32:03 +01:00
|
|
|
}
|
|
|
|
else {
|
2014-06-06 08:05:15 +02:00
|
|
|
BLI_BITMAP_SET(subd, hi_ndx, lo_val);
|
2012-03-14 07:32:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-03-14 07:32:03 +01:00
|
|
|
return subd;
|
|
|
|
}
|
|
|
|
|
2020-07-13 11:27:09 +02:00
|
|
|
static BLI_bitmap *multires_mdisps_downsample_hidden(const BLI_bitmap *old_hidden,
|
2013-07-23 01:20:48 +02:00
|
|
|
int old_level,
|
|
|
|
int new_level)
|
2012-03-14 07:32:03 +01:00
|
|
|
{
|
2013-07-23 01:20:48 +02:00
|
|
|
BLI_bitmap *new_hidden;
|
2013-10-01 11:17:35 +02:00
|
|
|
int new_gridsize = BKE_ccg_gridsize(new_level);
|
|
|
|
int old_gridsize = BKE_ccg_gridsize(old_level);
|
2012-03-14 07:32:03 +01:00
|
|
|
int x, y, factor, old_value;
|
|
|
|
|
|
|
|
BLI_assert(new_level <= old_level);
|
2013-10-01 11:17:35 +02:00
|
|
|
factor = BKE_ccg_factor(new_level, old_level);
|
2020-03-06 17:18:10 +01:00
|
|
|
new_hidden = BLI_BITMAP_NEW(square_i(new_gridsize), "downsample hidden");
|
2012-03-14 07:32:03 +01:00
|
|
|
|
2012-03-24 07:18:31 +01:00
|
|
|
for (y = 0; y < new_gridsize; y++) {
|
|
|
|
for (x = 0; x < new_gridsize; x++) {
|
2012-05-06 19:22:54 +02:00
|
|
|
old_value = BLI_BITMAP_TEST(old_hidden, factor * y * old_gridsize + x * factor);
|
2018-06-17 17:05:51 +02:00
|
|
|
|
2014-06-06 08:05:15 +02:00
|
|
|
BLI_BITMAP_SET(new_hidden, y * new_gridsize + x, old_value);
|
2012-03-14 07:32:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return new_hidden;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void multires_output_hidden_to_ccgdm(CCGDerivedMesh *ccgdm, Mesh *me, int level)
|
|
|
|
{
|
2022-09-07 07:06:31 +02:00
|
|
|
const MPoly *polys = BKE_mesh_polys(me);
|
2012-03-14 07:32:03 +01:00
|
|
|
const MDisps *mdisps = CustomData_get_layer(&me->ldata, CD_MDISPS);
|
2013-07-23 01:20:48 +02:00
|
|
|
BLI_bitmap **grid_hidden = ccgdm->gridHidden;
|
2012-03-14 07:32:03 +01:00
|
|
|
int *gridOffset;
|
|
|
|
int i, j;
|
2018-06-17 17:05:51 +02:00
|
|
|
|
2012-03-14 07:32:03 +01:00
|
|
|
gridOffset = ccgdm->dm.getGridOffset(&ccgdm->dm);
|
|
|
|
|
|
|
|
for (i = 0; i < me->totpoly; i++) {
|
Mesh: Remove redundant custom data pointers
For copy-on-write, we want to share attribute arrays between meshes
where possible. Mutable pointers like `Mesh.mvert` make that difficult
by making ownership vague. They also make code more complex by adding
redundancy.
The simplest solution is just removing them and retrieving layers from
`CustomData` as needed. Similar changes have already been applied to
curves and point clouds (e9f82d3dc7ee, 410a6efb747f). Removing use of
the pointers generally makes code more obvious and more reusable.
Mesh data is now accessed with a C++ API (`Mesh::edges()` or
`Mesh::edges_for_write()`), and a C API (`BKE_mesh_edges(mesh)`).
The CoW changes this commit makes possible are described in T95845
and T95842, and started in D14139 and D14140. The change also simplifies
the ongoing mesh struct-of-array refactors from T95965.
**RNA/Python Access Performance**
Theoretically, accessing mesh elements with the RNA API may become
slower, since the layer needs to be found on every random access.
However, overhead is already high enough that this doesn't make a
noticible differenc, and performance is actually improved in some
cases. Random access can be up to 10% faster, but other situations
might be a bit slower. Generally using `foreach_get/set` are the best
way to improve performance. See the differential revision for more
discussion about Python performance.
Cycles has been updated to use raw pointers and the internal Blender
mesh types, mostly because there is no sense in having this overhead
when it's already compiled with Blender. In my tests this roughly
halves the Cycles mesh creation time (0.19s to 0.10s for a 1 million
face grid).
Differential Revision: https://developer.blender.org/D15488
2022-09-05 18:56:34 +02:00
|
|
|
for (j = 0; j < polys[i].totloop; j++) {
|
2012-03-14 07:32:03 +01:00
|
|
|
int g = gridOffset[i] + j;
|
|
|
|
const MDisps *md = &mdisps[g];
|
2013-07-23 01:20:48 +02:00
|
|
|
BLI_bitmap *gh = md->hidden;
|
2018-06-17 17:05:51 +02:00
|
|
|
|
2012-03-14 07:32:03 +01:00
|
|
|
if (gh) {
|
2012-05-06 19:22:54 +02:00
|
|
|
grid_hidden[g] = multires_mdisps_downsample_hidden(gh, md->level, level);
|
2012-03-14 07:32:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* subdivide mdisps.hidden if needed (assumes that md.level reflects
|
2012-04-22 13:54:53 +02:00
|
|
|
* the current level of md.hidden) */
|
2012-03-14 07:32:03 +01:00
|
|
|
static void multires_mdisps_subdivide_hidden(MDisps *md, int new_level)
|
|
|
|
{
|
2013-07-23 01:20:48 +02:00
|
|
|
BLI_bitmap *subd;
|
2018-06-17 17:05:51 +02:00
|
|
|
|
2012-03-14 07:32:03 +01:00
|
|
|
BLI_assert(md->hidden);
|
|
|
|
|
|
|
|
/* nothing to do if already subdivided enough */
|
2019-04-22 01:39:35 +02:00
|
|
|
if (md->level >= new_level) {
|
2012-03-14 07:32:03 +01:00
|
|
|
return;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2012-03-14 07:32:03 +01:00
|
|
|
|
|
|
|
subd = multires_mdisps_upsample_hidden(md->hidden, md->level, new_level, NULL);
|
2018-06-17 17:05:51 +02:00
|
|
|
|
2012-03-14 07:32:03 +01:00
|
|
|
/* swap in the subdivided data */
|
|
|
|
MEM_freeN(md->hidden);
|
|
|
|
md->hidden = subd;
|
|
|
|
}
|
|
|
|
|
2020-03-17 14:20:14 +01:00
|
|
|
Mesh *BKE_multires_create_mesh(struct Depsgraph *depsgraph,
|
|
|
|
Object *object,
|
|
|
|
MultiresModifierData *mmd)
|
2018-08-16 13:00:24 +02:00
|
|
|
{
|
2020-03-17 14:20:14 +01:00
|
|
|
Object *object_eval = DEG_get_evaluated_object(depsgraph, object);
|
2020-03-17 14:18:27 +01:00
|
|
|
Scene *scene_eval = DEG_get_evaluated_scene(depsgraph);
|
2020-03-17 14:20:14 +01:00
|
|
|
Mesh *deformed_mesh = mesh_get_eval_deform(
|
|
|
|
depsgraph, scene_eval, object_eval, &CD_MASK_BAREMESH);
|
2018-08-16 13:00:24 +02:00
|
|
|
ModifierEvalContext modifier_ctx = {
|
2019-01-06 14:06:58 +01:00
|
|
|
.depsgraph = depsgraph,
|
2020-03-17 14:20:14 +01:00
|
|
|
.object = object_eval,
|
2019-01-06 14:06:58 +01:00
|
|
|
.flag = MOD_APPLY_USECACHE | MOD_APPLY_IGNORE_SIMPLIFY,
|
|
|
|
};
|
2018-09-20 12:04:17 +02:00
|
|
|
|
2020-05-08 10:14:02 +02:00
|
|
|
const ModifierTypeInfo *mti = BKE_modifier_get_info(mmd->modifier.type);
|
2020-04-21 13:09:41 +02:00
|
|
|
Mesh *result = mti->modifyMesh(&mmd->modifier, &modifier_ctx, deformed_mesh);
|
2018-09-20 12:04:17 +02:00
|
|
|
|
2018-08-16 13:00:24 +02:00
|
|
|
if (result == deformed_mesh) {
|
2018-09-20 12:33:45 +02:00
|
|
|
result = BKE_mesh_copy_for_eval(deformed_mesh, true);
|
2018-08-16 13:00:24 +02:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-03-17 17:34:04 +01:00
|
|
|
float (*BKE_multires_create_deformed_base_mesh_vert_coords(struct Depsgraph *depsgraph,
|
|
|
|
struct Object *object,
|
|
|
|
struct MultiresModifierData *mmd,
|
|
|
|
int *r_num_deformed_verts))[3]
|
2020-03-17 15:55:59 +01:00
|
|
|
{
|
|
|
|
Scene *scene_eval = DEG_get_evaluated_scene(depsgraph);
|
|
|
|
Object *object_eval = DEG_get_evaluated_object(depsgraph, object);
|
|
|
|
|
|
|
|
Object object_for_eval = *object_eval;
|
|
|
|
object_for_eval.data = object->data;
|
2020-03-27 11:27:49 +01:00
|
|
|
object_for_eval.sculpt = NULL;
|
2020-03-17 15:55:59 +01:00
|
|
|
|
2020-03-17 17:34:04 +01:00
|
|
|
const bool use_render = (DEG_get_mode(depsgraph) == DAG_EVAL_RENDER);
|
|
|
|
ModifierEvalContext mesh_eval_context = {depsgraph, &object_for_eval, 0};
|
|
|
|
if (use_render) {
|
|
|
|
mesh_eval_context.flag |= MOD_APPLY_RENDER;
|
|
|
|
}
|
|
|
|
const int required_mode = use_render ? eModifierMode_Render : eModifierMode_Realtime;
|
|
|
|
|
|
|
|
VirtualModifierData virtual_modifier_data;
|
2020-05-08 10:14:02 +02:00
|
|
|
ModifierData *first_md = BKE_modifiers_get_virtual_modifierlist(&object_for_eval,
|
2020-05-08 11:02:03 +02:00
|
|
|
&virtual_modifier_data);
|
2020-03-17 17:34:04 +01:00
|
|
|
|
|
|
|
Mesh *base_mesh = object->data;
|
|
|
|
|
|
|
|
int num_deformed_verts;
|
|
|
|
float(*deformed_verts)[3] = BKE_mesh_vert_coords_alloc(base_mesh, &num_deformed_verts);
|
|
|
|
|
|
|
|
for (ModifierData *md = first_md; md != NULL; md = md->next) {
|
2020-05-08 10:14:02 +02:00
|
|
|
const ModifierTypeInfo *mti = BKE_modifier_get_info(md->type);
|
2020-03-17 17:34:04 +01:00
|
|
|
|
|
|
|
if (md == &mmd->modifier) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-05-08 10:14:02 +02:00
|
|
|
if (!BKE_modifier_is_enabled(scene_eval, md, required_mode)) {
|
2020-03-17 17:34:04 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mti->type != eModifierTypeType_OnlyDeform) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-05-08 11:02:03 +02:00
|
|
|
BKE_modifier_deform_verts(
|
|
|
|
md, &mesh_eval_context, base_mesh, deformed_verts, num_deformed_verts);
|
2020-03-17 17:34:04 +01:00
|
|
|
}
|
2020-03-17 15:55:59 +01:00
|
|
|
|
2020-03-17 17:34:04 +01:00
|
|
|
if (r_num_deformed_verts != NULL) {
|
|
|
|
*r_num_deformed_verts = num_deformed_verts;
|
|
|
|
}
|
|
|
|
return deformed_verts;
|
2020-03-17 15:55:59 +01:00
|
|
|
}
|
|
|
|
|
2010-07-05 14:20:49 +02:00
|
|
|
MultiresModifierData *find_multires_modifier_before(Scene *scene, ModifierData *lastmd)
|
2007-12-29 18:07:55 +01:00
|
|
|
{
|
2009-05-23 09:12:55 +02:00
|
|
|
ModifierData *md;
|
2009-01-06 19:59:03 +01:00
|
|
|
|
2012-02-23 03:17:50 +01:00
|
|
|
for (md = lastmd; md; md = md->prev) {
|
|
|
|
if (md->type == eModifierType_Multires) {
|
2020-05-08 10:14:02 +02:00
|
|
|
if (BKE_modifier_is_enabled(scene, md, eModifierMode_Realtime)) {
|
2012-05-06 19:22:54 +02:00
|
|
|
return (MultiresModifierData *)md;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2009-05-23 09:12:55 +02:00
|
|
|
}
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
|
|
|
|
2010-06-06 17:22:27 +02:00
|
|
|
return NULL;
|
2009-11-25 15:07:12 +01:00
|
|
|
}
|
2009-05-23 09:12:55 +02:00
|
|
|
|
2013-03-20 00:17:44 +01:00
|
|
|
MultiresModifierData *get_multires_modifier(Scene *scene, Object *ob, bool use_first)
|
2010-10-25 10:03:05 +02:00
|
|
|
{
|
|
|
|
ModifierData *md;
|
2012-05-06 19:22:54 +02:00
|
|
|
MultiresModifierData *mmd = NULL, *firstmmd = NULL;
|
2010-10-25 10:03:05 +02:00
|
|
|
|
|
|
|
/* find first active multires modifier */
|
2012-02-23 03:17:50 +01:00
|
|
|
for (md = ob->modifiers.first; md; md = md->next) {
|
|
|
|
if (md->type == eModifierType_Multires) {
|
2019-04-22 01:39:35 +02:00
|
|
|
if (!firstmmd) {
|
2012-05-06 19:22:54 +02:00
|
|
|
firstmmd = (MultiresModifierData *)md;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2010-10-25 10:03:05 +02:00
|
|
|
|
2020-05-08 10:14:02 +02:00
|
|
|
if (BKE_modifier_is_enabled(scene, md, eModifierMode_Realtime)) {
|
2012-05-06 19:22:54 +02:00
|
|
|
mmd = (MultiresModifierData *)md;
|
2010-10-25 10:03:05 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-23 03:17:50 +01:00
|
|
|
if (!mmd && use_first) {
|
2010-10-25 10:03:05 +02:00
|
|
|
/* active multires have not been found
|
2012-03-09 19:28:30 +01:00
|
|
|
* try to use first one */
|
2010-10-25 10:03:05 +02:00
|
|
|
return firstmmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
return mmd;
|
|
|
|
}
|
|
|
|
|
2018-08-21 15:40:23 +02:00
|
|
|
int multires_get_level(const Scene *scene,
|
|
|
|
const Object *ob,
|
|
|
|
const MultiresModifierData *mmd,
|
2018-08-14 12:17:10 +02:00
|
|
|
bool render,
|
|
|
|
bool ignore_simplify)
|
2009-11-25 15:07:12 +01:00
|
|
|
{
|
2019-04-22 01:39:35 +02:00
|
|
|
if (render) {
|
2018-06-18 11:21:33 +02:00
|
|
|
return (scene != NULL) ? get_render_subsurf_level(&scene->r, mmd->renderlvl, true) :
|
|
|
|
mmd->renderlvl;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2020-08-07 12:30:43 +02:00
|
|
|
if (ob->mode == OB_MODE_SCULPT) {
|
2020-04-30 15:41:45 +02:00
|
|
|
return mmd->sculptlvl;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2020-08-07 12:30:43 +02:00
|
|
|
if (ignore_simplify) {
|
2014-06-12 09:49:46 +02:00
|
|
|
return mmd->lvl;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2020-08-07 12:30:43 +02:00
|
|
|
|
|
|
|
return (scene != NULL) ? get_render_subsurf_level(&scene->r, mmd->lvl, false) : mmd->lvl;
|
2009-05-23 09:12:55 +02:00
|
|
|
}
|
|
|
|
|
2018-04-05 18:20:27 +02:00
|
|
|
void multires_set_tot_level(Object *ob, MultiresModifierData *mmd, int lvl)
|
2009-05-23 09:12:55 +02:00
|
|
|
{
|
2009-11-25 15:07:12 +01:00
|
|
|
mmd->totlvl = lvl;
|
|
|
|
|
2019-04-22 01:39:35 +02:00
|
|
|
if (ob->mode != OB_MODE_SCULPT) {
|
2010-02-07 14:07:12 +01:00
|
|
|
mmd->lvl = CLAMPIS(MAX2(mmd->lvl, lvl), 0, mmd->totlvl);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2009-11-25 15:07:12 +01:00
|
|
|
|
2010-02-07 14:07:12 +01:00
|
|
|
mmd->sculptlvl = CLAMPIS(MAX2(mmd->sculptlvl, lvl), 0, mmd->totlvl);
|
|
|
|
mmd->renderlvl = CLAMPIS(MAX2(mmd->renderlvl, lvl), 0, mmd->totlvl);
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
|
|
|
|
2018-09-14 10:56:54 +02:00
|
|
|
static void multires_ccg_mark_as_modified(SubdivCCG *subdiv_ccg, MultiresModifiedFlags flags)
|
|
|
|
{
|
|
|
|
if (flags & MULTIRES_COORDS_MODIFIED) {
|
|
|
|
subdiv_ccg->dirty.coords = true;
|
|
|
|
}
|
|
|
|
if (flags & MULTIRES_HIDDEN_MODIFIED) {
|
|
|
|
subdiv_ccg->dirty.hidden = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-09 12:12:18 +02:00
|
|
|
void multires_mark_as_modified(Depsgraph *depsgraph, Object *object, MultiresModifiedFlags flags)
|
2009-12-21 16:55:10 +01:00
|
|
|
{
|
2019-07-09 12:12:18 +02:00
|
|
|
if (object == NULL) {
|
2018-09-14 10:56:54 +02:00
|
|
|
return;
|
|
|
|
}
|
2019-07-09 12:12:18 +02:00
|
|
|
/* NOTE: CCG live inside of evaluated object.
|
|
|
|
*
|
|
|
|
* While this is a bit weird to tag the only one, this is how other areas were built
|
|
|
|
* historically: they are tagging multires for update and then rely on object re-evaluation to
|
|
|
|
* do an actual update.
|
|
|
|
*
|
|
|
|
* In a longer term maybe special dependency graph tag can help sanitizing this a bit. */
|
|
|
|
Object *object_eval = DEG_get_evaluated_object(depsgraph, object);
|
|
|
|
Mesh *mesh = object_eval->data;
|
2018-09-25 10:32:34 +02:00
|
|
|
SubdivCCG *subdiv_ccg = mesh->runtime.subdiv_ccg;
|
2018-09-14 10:56:54 +02:00
|
|
|
if (subdiv_ccg == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
multires_ccg_mark_as_modified(subdiv_ccg, flags);
|
2009-12-21 16:55:10 +01:00
|
|
|
}
|
|
|
|
|
2020-02-28 12:05:48 +01:00
|
|
|
void multires_flush_sculpt_updates(Object *object)
|
2009-12-21 16:55:10 +01:00
|
|
|
{
|
2020-02-28 12:08:15 +01:00
|
|
|
if (object == NULL || object->sculpt == NULL || object->sculpt->pbvh == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
SculptSession *sculpt_session = object->sculpt;
|
2020-04-02 10:10:13 +02:00
|
|
|
if (BKE_pbvh_type(sculpt_session->pbvh) != PBVH_GRIDS || !sculpt_session->multires.active ||
|
|
|
|
sculpt_session->multires.modifier == NULL) {
|
2020-02-28 12:08:15 +01:00
|
|
|
return;
|
2019-09-17 17:24:44 +02:00
|
|
|
}
|
2020-02-28 12:08:15 +01:00
|
|
|
|
2020-02-28 12:21:42 +01:00
|
|
|
SubdivCCG *subdiv_ccg = sculpt_session->subdiv_ccg;
|
|
|
|
if (subdiv_ccg == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!subdiv_ccg->dirty.coords && !subdiv_ccg->dirty.hidden) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-02-28 12:08:15 +01:00
|
|
|
Mesh *mesh = object->data;
|
|
|
|
multiresModifier_reshapeFromCCG(
|
2020-04-02 10:10:13 +02:00
|
|
|
sculpt_session->multires.modifier->totlvl, mesh, sculpt_session->subdiv_ccg);
|
2020-02-28 12:21:42 +01:00
|
|
|
|
|
|
|
subdiv_ccg->dirty.coords = false;
|
|
|
|
subdiv_ccg->dirty.hidden = false;
|
2019-09-17 17:24:44 +02:00
|
|
|
}
|
|
|
|
|
2020-02-28 12:05:48 +01:00
|
|
|
void multires_force_sculpt_rebuild(Object *object)
|
2019-09-17 17:24:44 +02:00
|
|
|
{
|
2020-02-28 12:05:48 +01:00
|
|
|
multires_flush_sculpt_updates(object);
|
2019-09-17 17:24:44 +02:00
|
|
|
|
2020-02-28 12:08:15 +01:00
|
|
|
if (object == NULL || object->sculpt == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
2019-09-17 22:15:58 +02:00
|
|
|
|
2020-02-28 12:08:15 +01:00
|
|
|
SculptSession *ss = object->sculpt;
|
2019-09-17 22:15:58 +02:00
|
|
|
|
2020-02-28 12:08:15 +01:00
|
|
|
if (ss->pbvh != NULL) {
|
|
|
|
BKE_pbvh_free(ss->pbvh);
|
|
|
|
object->sculpt->pbvh = NULL;
|
|
|
|
}
|
|
|
|
|
2021-08-06 05:59:38 +02:00
|
|
|
MEM_SAFE_FREE(ss->pmap);
|
2020-02-28 12:08:15 +01:00
|
|
|
|
2021-08-06 05:59:38 +02:00
|
|
|
MEM_SAFE_FREE(ss->pmap_mem);
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
|
|
|
|
2020-02-28 12:05:48 +01:00
|
|
|
void multires_force_external_reload(Object *object)
|
2010-06-01 21:26:35 +02:00
|
|
|
{
|
2020-02-28 12:05:48 +01:00
|
|
|
Mesh *mesh = BKE_mesh_from_object(object);
|
2010-06-01 21:26:35 +02:00
|
|
|
|
2020-02-28 12:05:48 +01:00
|
|
|
CustomData_external_reload(&mesh->ldata, &mesh->id, CD_MASK_MDISPS, mesh->totloop);
|
|
|
|
multires_force_sculpt_rebuild(object);
|
2010-02-07 11:16:42 +01:00
|
|
|
}
|
|
|
|
|
2010-09-09 02:14:51 +02:00
|
|
|
/* reset the multires levels to match the number of mdisps */
|
2010-11-04 17:00:28 +01:00
|
|
|
static int get_levels_from_disps(Object *ob)
|
2010-09-09 02:14:51 +02:00
|
|
|
{
|
|
|
|
Mesh *me = ob->data;
|
2022-09-07 07:06:31 +02:00
|
|
|
const MPoly *polys = BKE_mesh_polys(me);
|
2011-02-27 07:19:40 +01:00
|
|
|
MDisps *mdisp, *md;
|
2012-05-06 19:22:54 +02:00
|
|
|
int i, j, totlvl = 0;
|
2010-09-09 02:14:51 +02:00
|
|
|
|
2011-02-27 07:19:40 +01:00
|
|
|
mdisp = CustomData_get_layer(&me->ldata, CD_MDISPS);
|
2012-03-10 13:26:32 +01:00
|
|
|
|
2019-09-07 16:12:26 +02:00
|
|
|
for (i = 0; i < me->totpoly; i++) {
|
Mesh: Remove redundant custom data pointers
For copy-on-write, we want to share attribute arrays between meshes
where possible. Mutable pointers like `Mesh.mvert` make that difficult
by making ownership vague. They also make code more complex by adding
redundancy.
The simplest solution is just removing them and retrieving layers from
`CustomData` as needed. Similar changes have already been applied to
curves and point clouds (e9f82d3dc7ee, 410a6efb747f). Removing use of
the pointers generally makes code more obvious and more reusable.
Mesh data is now accessed with a C++ API (`Mesh::edges()` or
`Mesh::edges_for_write()`), and a C API (`BKE_mesh_edges(mesh)`).
The CoW changes this commit makes possible are described in T95845
and T95842, and started in D14139 and D14140. The change also simplifies
the ongoing mesh struct-of-array refactors from T95965.
**RNA/Python Access Performance**
Theoretically, accessing mesh elements with the RNA API may become
slower, since the layer needs to be found on every random access.
However, overhead is already high enough that this doesn't make a
noticible differenc, and performance is actually improved in some
cases. Random access can be up to 10% faster, but other situations
might be a bit slower. Generally using `foreach_get/set` are the best
way to improve performance. See the differential revision for more
discussion about Python performance.
Cycles has been updated to use raw pointers and the internal Blender
mesh types, mostly because there is no sense in having this overhead
when it's already compiled with Blender. In my tests this roughly
halves the Cycles mesh creation time (0.19s to 0.10s for a 1 million
face grid).
Differential Revision: https://developer.blender.org/D15488
2022-09-05 18:56:34 +02:00
|
|
|
md = mdisp + polys[i].loopstart;
|
2018-06-17 17:05:51 +02:00
|
|
|
|
Mesh: Remove redundant custom data pointers
For copy-on-write, we want to share attribute arrays between meshes
where possible. Mutable pointers like `Mesh.mvert` make that difficult
by making ownership vague. They also make code more complex by adding
redundancy.
The simplest solution is just removing them and retrieving layers from
`CustomData` as needed. Similar changes have already been applied to
curves and point clouds (e9f82d3dc7ee, 410a6efb747f). Removing use of
the pointers generally makes code more obvious and more reusable.
Mesh data is now accessed with a C++ API (`Mesh::edges()` or
`Mesh::edges_for_write()`), and a C API (`BKE_mesh_edges(mesh)`).
The CoW changes this commit makes possible are described in T95845
and T95842, and started in D14139 and D14140. The change also simplifies
the ongoing mesh struct-of-array refactors from T95965.
**RNA/Python Access Performance**
Theoretically, accessing mesh elements with the RNA API may become
slower, since the layer needs to be found on every random access.
However, overhead is already high enough that this doesn't make a
noticible differenc, and performance is actually improved in some
cases. Random access can be up to 10% faster, but other situations
might be a bit slower. Generally using `foreach_get/set` are the best
way to improve performance. See the differential revision for more
discussion about Python performance.
Cycles has been updated to use raw pointers and the internal Blender
mesh types, mostly because there is no sense in having this overhead
when it's already compiled with Blender. In my tests this roughly
halves the Cycles mesh creation time (0.19s to 0.10s for a 1 million
face grid).
Differential Revision: https://developer.blender.org/D15488
2022-09-05 18:56:34 +02:00
|
|
|
for (j = 0; j < polys[i].totloop; j++, md++) {
|
2019-04-22 01:39:35 +02:00
|
|
|
if (md->totdisp == 0) {
|
2012-05-09 11:24:15 +02:00
|
|
|
continue;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2018-06-17 17:05:51 +02:00
|
|
|
|
2012-02-23 03:17:50 +01:00
|
|
|
while (1) {
|
2012-05-06 19:22:54 +02:00
|
|
|
int side = (1 << (totlvl - 1)) + 1;
|
|
|
|
int lvl_totdisp = side * side;
|
2019-04-22 01:39:35 +02:00
|
|
|
if (md->totdisp == lvl_totdisp) {
|
2019-04-17 06:17:24 +02:00
|
|
|
break;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2020-08-07 12:30:43 +02:00
|
|
|
if (md->totdisp < lvl_totdisp) {
|
2019-04-17 06:17:24 +02:00
|
|
|
totlvl--;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
else {
|
2012-05-09 11:24:15 +02:00
|
|
|
totlvl++;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2011-02-27 07:19:40 +01:00
|
|
|
}
|
2018-06-17 17:05:51 +02:00
|
|
|
|
2011-02-27 07:19:40 +01:00
|
|
|
break;
|
2010-09-09 02:14:51 +02:00
|
|
|
}
|
2010-11-04 17:00:28 +01:00
|
|
|
}
|
2010-09-09 02:14:51 +02:00
|
|
|
|
2010-11-04 17:00:28 +01:00
|
|
|
return totlvl;
|
|
|
|
}
|
|
|
|
|
|
|
|
void multiresModifier_set_levels_from_disps(MultiresModifierData *mmd, Object *ob)
|
|
|
|
{
|
|
|
|
Mesh *me = ob->data;
|
|
|
|
MDisps *mdisp;
|
|
|
|
|
2019-04-22 01:39:35 +02:00
|
|
|
if (me->edit_mesh) {
|
2019-02-17 08:05:18 +01:00
|
|
|
mdisp = CustomData_get_layer(&me->edit_mesh->bm->ldata, CD_MDISPS);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
else {
|
2011-02-27 07:19:40 +01:00
|
|
|
mdisp = CustomData_get_layer(&me->ldata, CD_MDISPS);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2010-11-04 17:00:28 +01:00
|
|
|
|
2012-02-23 03:17:50 +01:00
|
|
|
if (mdisp) {
|
2010-11-04 17:00:28 +01:00
|
|
|
mmd->totlvl = get_levels_from_disps(ob);
|
2010-09-09 02:14:51 +02:00
|
|
|
mmd->lvl = MIN2(mmd->sculptlvl, mmd->totlvl);
|
|
|
|
mmd->sculptlvl = MIN2(mmd->sculptlvl, mmd->totlvl);
|
|
|
|
mmd->renderlvl = MIN2(mmd->renderlvl, mmd->totlvl);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-10 17:02:37 +02:00
|
|
|
static void multires_set_tot_mdisps(Mesh *me, int lvl)
|
|
|
|
{
|
2012-05-06 19:22:54 +02:00
|
|
|
MDisps *mdisps = CustomData_get_layer(&me->ldata, CD_MDISPS);
|
2010-05-10 17:02:37 +02:00
|
|
|
int i;
|
|
|
|
|
2012-02-23 03:17:50 +01:00
|
|
|
if (mdisps) {
|
|
|
|
for (i = 0; i < me->totloop; i++, mdisps++) {
|
2010-07-19 06:44:37 +02:00
|
|
|
mdisps->totdisp = multires_grid_tot[lvl];
|
2012-03-14 04:10:18 +01:00
|
|
|
mdisps->level = lvl;
|
2010-05-10 17:02:37 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-29 07:48:18 +02:00
|
|
|
static void multires_reallocate_mdisps(int totloop, MDisps *mdisps, int lvl)
|
2010-05-10 17:02:37 +02:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* reallocate displacements to be filled in */
|
2019-09-07 16:12:26 +02:00
|
|
|
for (i = 0; i < totloop; i++) {
|
2010-07-19 06:44:37 +02:00
|
|
|
int totdisp = multires_grid_tot[lvl];
|
2020-08-08 05:29:21 +02:00
|
|
|
float(*disps)[3] = MEM_calloc_arrayN(totdisp, sizeof(float[3]), "multires disps");
|
2010-05-10 17:02:37 +02:00
|
|
|
|
2019-04-22 01:39:35 +02:00
|
|
|
if (mdisps[i].disps) {
|
2012-01-25 21:18:12 +01:00
|
|
|
MEM_freeN(mdisps[i].disps);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2018-06-17 17:05:51 +02:00
|
|
|
|
2019-04-22 01:39:35 +02:00
|
|
|
if (mdisps[i].level && mdisps[i].hidden) {
|
2012-03-14 07:32:03 +01:00
|
|
|
multires_mdisps_subdivide_hidden(&mdisps[i], lvl);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2010-05-10 17:02:37 +02:00
|
|
|
|
|
|
|
mdisps[i].disps = disps;
|
|
|
|
mdisps[i].totdisp = totdisp;
|
2012-03-14 04:10:18 +01:00
|
|
|
mdisps[i].level = lvl;
|
2010-05-10 17:02:37 +02:00
|
|
|
}
|
|
|
|
}
|
2010-04-13 08:06:49 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
static void multires_copy_grid(float (*gridA)[3], float (*gridB)[3], int sizeA, int sizeB)
|
2007-12-29 18:07:55 +01:00
|
|
|
{
|
2009-11-25 15:07:12 +01:00
|
|
|
int x, y, j, skip;
|
2009-01-06 19:59:03 +01:00
|
|
|
|
2012-02-23 03:17:50 +01:00
|
|
|
if (sizeA > sizeB) {
|
2012-05-06 19:22:54 +02:00
|
|
|
skip = (sizeA - 1) / (sizeB - 1);
|
2009-01-06 19:59:03 +01:00
|
|
|
|
2019-04-22 01:39:35 +02:00
|
|
|
for (j = 0, y = 0; y < sizeB; y++) {
|
|
|
|
for (x = 0; x < sizeB; x++, j++) {
|
2012-05-06 19:22:54 +02:00
|
|
|
copy_v3_v3(gridA[y * skip * sizeA + x * skip], gridB[j]);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
}
|
2009-11-25 15:07:12 +01:00
|
|
|
}
|
|
|
|
else {
|
2012-05-06 19:22:54 +02:00
|
|
|
skip = (sizeB - 1) / (sizeA - 1);
|
2009-01-06 19:59:03 +01:00
|
|
|
|
2019-04-22 01:39:35 +02:00
|
|
|
for (j = 0, y = 0; y < sizeA; y++) {
|
|
|
|
for (x = 0; x < sizeA; x++, j++) {
|
2012-05-06 19:22:54 +02:00
|
|
|
copy_v3_v3(gridA[j], gridB[y * skip * sizeB + x * skip]);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
}
|
2009-11-25 15:07:12 +01:00
|
|
|
}
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
|
|
|
|
2012-05-10 22:33:09 +02:00
|
|
|
static void multires_copy_dm_grid(CCGElem *gridA, CCGElem *gridB, CCGKey *keyA, CCGKey *keyB)
|
2007-12-29 18:07:55 +01:00
|
|
|
{
|
2009-11-25 15:07:12 +01:00
|
|
|
int x, y, j, skip;
|
2007-12-29 18:07:55 +01:00
|
|
|
|
2012-05-10 22:33:09 +02:00
|
|
|
if (keyA->grid_size > keyB->grid_size) {
|
|
|
|
skip = (keyA->grid_size - 1) / (keyB->grid_size - 1);
|
2009-01-06 19:59:03 +01:00
|
|
|
|
2019-04-22 01:39:35 +02:00
|
|
|
for (j = 0, y = 0; y < keyB->grid_size; y++) {
|
|
|
|
for (x = 0; x < keyB->grid_size; x++, j++) {
|
2012-05-10 22:33:09 +02:00
|
|
|
memcpy(CCG_elem_offset_co(keyA, gridA, y * skip * keyA->grid_size + x * skip),
|
2012-05-11 10:05:47 +02:00
|
|
|
CCG_elem_offset_co(keyB, gridB, j),
|
2018-09-05 15:56:50 +02:00
|
|
|
keyA->elem_size);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
}
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
2009-11-25 15:07:12 +01:00
|
|
|
else {
|
2012-05-10 22:33:09 +02:00
|
|
|
skip = (keyB->grid_size - 1) / (keyA->grid_size - 1);
|
2007-12-29 18:07:55 +01:00
|
|
|
|
2019-04-22 01:39:35 +02:00
|
|
|
for (j = 0, y = 0; y < keyA->grid_size; y++) {
|
|
|
|
for (x = 0; x < keyA->grid_size; x++, j++) {
|
2012-05-10 22:33:09 +02:00
|
|
|
memcpy(CCG_elem_offset_co(keyA, gridA, j),
|
2012-05-11 10:05:47 +02:00
|
|
|
CCG_elem_offset_co(keyB, gridB, y * skip * keyB->grid_size + x * skip),
|
2018-09-05 15:56:50 +02:00
|
|
|
keyA->elem_size);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
}
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
2009-01-06 19:59:03 +01:00
|
|
|
}
|
|
|
|
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
/* Reallocate gpm->data at a lower resolution and copy values over
|
2012-05-17 01:37:23 +02:00
|
|
|
* from the original high-resolution data */
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
static void multires_grid_paint_mask_downsample(GridPaintMask *gpm, int level)
|
|
|
|
{
|
|
|
|
if (level < gpm->level) {
|
2013-10-01 11:17:35 +02:00
|
|
|
int gridsize = BKE_ccg_gridsize(level);
|
2018-01-14 22:14:20 +01:00
|
|
|
float *data = MEM_calloc_arrayN(
|
2020-03-06 17:18:10 +01:00
|
|
|
square_i(gridsize), sizeof(float), "multires_grid_paint_mask_downsample");
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
int x, y;
|
|
|
|
|
|
|
|
for (y = 0; y < gridsize; y++) {
|
|
|
|
for (x = 0; x < gridsize; x++) {
|
2012-05-11 10:05:47 +02:00
|
|
|
data[y * gridsize + x] = paint_grid_paint_mask(gpm, level, x, y);
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MEM_freeN(gpm->data);
|
|
|
|
gpm->data = data;
|
|
|
|
gpm->level = level;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-05 18:20:27 +02:00
|
|
|
static void multires_del_higher(MultiresModifierData *mmd, Object *ob, int lvl)
|
2009-01-06 19:59:03 +01:00
|
|
|
{
|
2012-05-06 19:22:54 +02:00
|
|
|
Mesh *me = (Mesh *)ob->data;
|
2022-09-07 07:06:31 +02:00
|
|
|
const MPoly *polys = BKE_mesh_polys(me);
|
2009-11-25 15:07:12 +01:00
|
|
|
int levels = mmd->totlvl - lvl;
|
|
|
|
MDisps *mdisps;
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
GridPaintMask *gpm;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2010-05-10 17:02:37 +02:00
|
|
|
multires_set_tot_mdisps(me, mmd->totlvl);
|
2020-03-13 16:13:32 +01:00
|
|
|
multiresModifier_ensure_external_read(me, mmd);
|
2012-05-06 19:22:54 +02:00
|
|
|
mdisps = CustomData_get_layer(&me->ldata, CD_MDISPS);
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
gpm = CustomData_get_layer(&me->ldata, CD_GRID_PAINT_MASK);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-09-17 17:24:44 +02:00
|
|
|
multires_force_sculpt_rebuild(ob);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-02-23 03:17:50 +01:00
|
|
|
if (mdisps && levels > 0) {
|
|
|
|
if (lvl > 0) {
|
2011-05-13 15:17:30 +02:00
|
|
|
/* MLoop *ml = me->mloop; */ /*UNUSED*/
|
2009-12-03 19:35:37 +01:00
|
|
|
int nsize = multires_side_tot[lvl];
|
|
|
|
int hsize = multires_side_tot[mmd->totlvl];
|
2011-03-29 07:48:18 +02:00
|
|
|
int i, j;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-09-07 16:12:26 +02:00
|
|
|
for (i = 0; i < me->totpoly; i++) {
|
Mesh: Remove redundant custom data pointers
For copy-on-write, we want to share attribute arrays between meshes
where possible. Mutable pointers like `Mesh.mvert` make that difficult
by making ownership vague. They also make code more complex by adding
redundancy.
The simplest solution is just removing them and retrieving layers from
`CustomData` as needed. Similar changes have already been applied to
curves and point clouds (e9f82d3dc7ee, 410a6efb747f). Removing use of
the pointers generally makes code more obvious and more reusable.
Mesh data is now accessed with a C++ API (`Mesh::edges()` or
`Mesh::edges_for_write()`), and a C API (`BKE_mesh_edges(mesh)`).
The CoW changes this commit makes possible are described in T95845
and T95842, and started in D14139 and D14140. The change also simplifies
the ongoing mesh struct-of-array refactors from T95965.
**RNA/Python Access Performance**
Theoretically, accessing mesh elements with the RNA API may become
slower, since the layer needs to be found on every random access.
However, overhead is already high enough that this doesn't make a
noticible differenc, and performance is actually improved in some
cases. Random access can be up to 10% faster, but other situations
might be a bit slower. Generally using `foreach_get/set` are the best
way to improve performance. See the differential revision for more
discussion about Python performance.
Cycles has been updated to use raw pointers and the internal Blender
mesh types, mostly because there is no sense in having this overhead
when it's already compiled with Blender. In my tests this roughly
halves the Cycles mesh creation time (0.19s to 0.10s for a 1 million
face grid).
Differential Revision: https://developer.blender.org/D15488
2022-09-05 18:56:34 +02:00
|
|
|
for (j = 0; j < polys[i].totloop; j++) {
|
|
|
|
int g = polys[i].loopstart + j;
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
MDisps *mdisp = &mdisps[g];
|
2010-01-05 23:33:41 +01:00
|
|
|
float(*disps)[3], (*ndisps)[3], (*hdisps)[3];
|
|
|
|
int totdisp = multires_grid_tot[lvl];
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-08-08 05:29:21 +02:00
|
|
|
disps = MEM_calloc_arrayN(totdisp, sizeof(float[3]), "multires disps");
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-06-17 15:10:28 +02:00
|
|
|
if (mdisp->disps != NULL) {
|
|
|
|
ndisps = disps;
|
|
|
|
hdisps = mdisp->disps;
|
|
|
|
|
|
|
|
multires_copy_grid(ndisps, hdisps, nsize, hsize);
|
|
|
|
if (mdisp->hidden) {
|
|
|
|
BLI_bitmap *gh = multires_mdisps_downsample_hidden(mdisp->hidden, mdisp->level, lvl);
|
|
|
|
MEM_freeN(mdisp->hidden);
|
|
|
|
mdisp->hidden = gh;
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-06-17 15:10:28 +02:00
|
|
|
MEM_freeN(mdisp->disps);
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2010-01-05 23:33:41 +01:00
|
|
|
mdisp->disps = disps;
|
|
|
|
mdisp->totdisp = totdisp;
|
2012-03-14 04:10:18 +01:00
|
|
|
mdisp->level = lvl;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-10-01 05:45:31 +02:00
|
|
|
if (gpm) {
|
|
|
|
multires_grid_paint_mask_downsample(&gpm[g], lvl);
|
|
|
|
}
|
2009-01-06 19:59:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-12-03 19:35:37 +01:00
|
|
|
else {
|
2012-03-19 06:13:41 +01:00
|
|
|
multires_customdata_delete(me);
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-04-05 18:20:27 +02:00
|
|
|
multires_set_tot_level(ob, mmd, lvl);
|
2009-01-06 19:59:03 +01:00
|
|
|
}
|
|
|
|
|
2018-06-18 11:21:33 +02:00
|
|
|
void multiresModifier_del_levels(MultiresModifierData *mmd,
|
|
|
|
Scene *scene,
|
|
|
|
Object *ob,
|
|
|
|
int direction)
|
2010-10-25 10:03:05 +02:00
|
|
|
{
|
2012-05-05 18:03:57 +02:00
|
|
|
Mesh *me = BKE_mesh_from_object(ob);
|
2018-06-18 11:21:33 +02:00
|
|
|
int lvl = multires_get_level(scene, ob, mmd, false, true);
|
2010-10-25 10:03:05 +02:00
|
|
|
int levels = mmd->totlvl - lvl;
|
|
|
|
MDisps *mdisps;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2010-10-25 10:03:05 +02:00
|
|
|
multires_set_tot_mdisps(me, mmd->totlvl);
|
2020-03-13 16:13:32 +01:00
|
|
|
multiresModifier_ensure_external_read(me, mmd);
|
2012-05-06 19:22:54 +02:00
|
|
|
mdisps = CustomData_get_layer(&me->ldata, CD_MDISPS);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-09-17 17:24:44 +02:00
|
|
|
multires_force_sculpt_rebuild(ob);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-02-23 03:17:50 +01:00
|
|
|
if (mdisps && levels > 0 && direction == 1) {
|
2018-04-05 18:20:27 +02:00
|
|
|
multires_del_higher(mmd, ob, lvl);
|
2010-10-25 10:03:05 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-04-05 18:20:27 +02:00
|
|
|
multires_set_tot_level(ob, mmd, lvl);
|
2010-10-25 10:03:05 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-01-22 11:48:28 +01:00
|
|
|
static DerivedMesh *multires_dm_create_local(Scene *scene,
|
|
|
|
Object *ob,
|
|
|
|
DerivedMesh *dm,
|
|
|
|
int lvl,
|
|
|
|
int totlvl,
|
|
|
|
bool alloc_paint_mask,
|
|
|
|
int flags)
|
2009-01-06 19:59:03 +01:00
|
|
|
{
|
2012-05-06 19:22:54 +02:00
|
|
|
MultiresModifierData mmd = {{NULL}};
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
mmd.lvl = lvl;
|
|
|
|
mmd.sculptlvl = lvl;
|
|
|
|
mmd.renderlvl = lvl;
|
|
|
|
mmd.totlvl = totlvl;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-01-22 11:48:28 +01:00
|
|
|
flags |= MULTIRES_USE_LOCAL_MMD;
|
2019-04-22 01:39:35 +02:00
|
|
|
if (alloc_paint_mask) {
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
flags |= MULTIRES_ALLOC_PAINT_MASK;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-06-18 11:21:33 +02:00
|
|
|
return multires_make_derived_from_derived(dm, &mmd, scene, ob, flags);
|
2009-11-25 15:07:12 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-06-18 11:51:02 +02:00
|
|
|
static DerivedMesh *subsurf_dm_create_local(Scene *scene,
|
|
|
|
Object *ob,
|
|
|
|
DerivedMesh *dm,
|
2018-06-27 16:34:26 +02:00
|
|
|
int lvl,
|
2018-06-27 22:29:59 +02:00
|
|
|
bool is_simple,
|
|
|
|
bool is_optimal,
|
|
|
|
bool is_plain_uv,
|
|
|
|
bool alloc_paint_mask,
|
2019-01-22 11:48:28 +01:00
|
|
|
bool for_render,
|
|
|
|
SubsurfFlags flags)
|
2009-11-25 15:07:12 +01:00
|
|
|
{
|
2012-05-06 19:22:54 +02:00
|
|
|
SubsurfModifierData smd = {{NULL}};
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
smd.levels = smd.renderLevels = lvl;
|
2018-08-13 15:39:01 +02:00
|
|
|
smd.quality = 3;
|
2018-06-27 16:34:26 +02:00
|
|
|
if (!is_plain_uv) {
|
2021-01-13 16:58:36 +01:00
|
|
|
smd.uv_smooth = SUBSURF_UV_SMOOTH_PRESERVE_BOUNDARIES;
|
2018-08-02 13:36:22 +02:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
smd.uv_smooth = SUBSURF_UV_SMOOTH_NONE;
|
2018-06-27 16:34:26 +02:00
|
|
|
}
|
|
|
|
if (is_simple) {
|
2009-11-25 15:07:12 +01:00
|
|
|
smd.subdivType = ME_SIMPLE_SUBSURF;
|
2018-06-27 16:34:26 +02:00
|
|
|
}
|
|
|
|
if (is_optimal) {
|
2009-12-09 14:37:19 +01:00
|
|
|
smd.flags |= eSubsurfModifierFlag_ControlEdges;
|
2018-06-27 16:34:26 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-06-27 16:34:26 +02:00
|
|
|
if (ob->mode & OB_MODE_EDIT) {
|
2012-05-10 22:31:55 +02:00
|
|
|
flags |= SUBSURF_IN_EDIT_MODE;
|
2018-06-27 16:34:26 +02:00
|
|
|
}
|
|
|
|
if (alloc_paint_mask) {
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
flags |= SUBSURF_ALLOC_PAINT_MASK;
|
2018-06-27 16:34:26 +02:00
|
|
|
}
|
|
|
|
if (for_render) {
|
|
|
|
flags |= SUBSURF_USE_RENDER_PARAMS;
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-06-18 11:51:02 +02:00
|
|
|
return subsurf_make_derived_from_derived(dm, &smd, scene, NULL, flags);
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
|
|
|
|
2012-06-11 10:37:35 +02:00
|
|
|
static void grid_tangent(const CCGKey *key, int x, int y, int axis, CCGElem *grid, float t[3])
|
2009-01-06 19:59:03 +01:00
|
|
|
{
|
2012-02-23 03:17:50 +01:00
|
|
|
if (axis == 0) {
|
2012-05-10 22:33:09 +02:00
|
|
|
if (x == key->grid_size - 1) {
|
2019-04-22 01:39:35 +02:00
|
|
|
if (y == key->grid_size - 1) {
|
2012-05-10 22:33:09 +02:00
|
|
|
sub_v3_v3v3(
|
|
|
|
t, CCG_grid_elem_co(key, grid, x, y - 1), CCG_grid_elem_co(key, grid, x - 1, y - 1));
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
else {
|
2012-05-10 22:33:09 +02:00
|
|
|
sub_v3_v3v3(t, CCG_grid_elem_co(key, grid, x, y), CCG_grid_elem_co(key, grid, x - 1, y));
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
2019-04-22 01:39:35 +02:00
|
|
|
else {
|
2012-05-10 22:33:09 +02:00
|
|
|
sub_v3_v3v3(t, CCG_grid_elem_co(key, grid, x + 1, y), CCG_grid_elem_co(key, grid, x, y));
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
2012-02-23 03:17:50 +01:00
|
|
|
else if (axis == 1) {
|
2012-05-10 22:33:09 +02:00
|
|
|
if (y == key->grid_size - 1) {
|
2019-04-22 01:39:35 +02:00
|
|
|
if (x == key->grid_size - 1) {
|
2012-05-10 22:33:09 +02:00
|
|
|
sub_v3_v3v3(
|
|
|
|
t, CCG_grid_elem_co(key, grid, x - 1, y), CCG_grid_elem_co(key, grid, x - 1, (y - 1)));
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
else {
|
2012-05-10 22:33:09 +02:00
|
|
|
sub_v3_v3v3(t, CCG_grid_elem_co(key, grid, x, y), CCG_grid_elem_co(key, grid, x, (y - 1)));
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2009-11-25 15:07:12 +01:00
|
|
|
}
|
2019-04-22 01:39:35 +02:00
|
|
|
else {
|
2012-05-10 22:33:09 +02:00
|
|
|
sub_v3_v3v3(t, CCG_grid_elem_co(key, grid, x, (y + 1)), CCG_grid_elem_co(key, grid, x, y));
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2009-01-06 19:59:03 +01:00
|
|
|
}
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
|
|
|
|
2012-06-11 10:37:35 +02:00
|
|
|
/* Construct 3x3 tangent-space matrix in 'mat' */
|
|
|
|
static void grid_tangent_matrix(float mat[3][3], const CCGKey *key, int x, int y, CCGElem *grid)
|
|
|
|
{
|
|
|
|
grid_tangent(key, x, y, 0, grid, mat[0]);
|
|
|
|
normalize_v3(mat[0]);
|
|
|
|
|
|
|
|
grid_tangent(key, x, y, 1, grid, mat[1]);
|
|
|
|
normalize_v3(mat[1]);
|
|
|
|
|
|
|
|
copy_v3_v3(mat[2], CCG_grid_elem_no(key, grid, x, y));
|
|
|
|
}
|
|
|
|
|
2018-01-11 20:14:16 +01:00
|
|
|
typedef struct MultiresThreadedData {
|
2018-01-11 19:39:24 +01:00
|
|
|
DispOp op;
|
|
|
|
CCGElem **gridData, **subGridData;
|
|
|
|
CCGKey *key;
|
2018-01-11 20:14:16 +01:00
|
|
|
CCGKey *sub_key;
|
Mesh: Remove redundant custom data pointers
For copy-on-write, we want to share attribute arrays between meshes
where possible. Mutable pointers like `Mesh.mvert` make that difficult
by making ownership vague. They also make code more complex by adding
redundancy.
The simplest solution is just removing them and retrieving layers from
`CustomData` as needed. Similar changes have already been applied to
curves and point clouds (e9f82d3dc7ee, 410a6efb747f). Removing use of
the pointers generally makes code more obvious and more reusable.
Mesh data is now accessed with a C++ API (`Mesh::edges()` or
`Mesh::edges_for_write()`), and a C API (`BKE_mesh_edges(mesh)`).
The CoW changes this commit makes possible are described in T95845
and T95842, and started in D14139 and D14140. The change also simplifies
the ongoing mesh struct-of-array refactors from T95965.
**RNA/Python Access Performance**
Theoretically, accessing mesh elements with the RNA API may become
slower, since the layer needs to be found on every random access.
However, overhead is already high enough that this doesn't make a
noticible differenc, and performance is actually improved in some
cases. Random access can be up to 10% faster, but other situations
might be a bit slower. Generally using `foreach_get/set` are the best
way to improve performance. See the differential revision for more
discussion about Python performance.
Cycles has been updated to use raw pointers and the internal Blender
mesh types, mostly because there is no sense in having this overhead
when it's already compiled with Blender. In my tests this roughly
halves the Cycles mesh creation time (0.19s to 0.10s for a 1 million
face grid).
Differential Revision: https://developer.blender.org/D15488
2022-09-05 18:56:34 +02:00
|
|
|
const MPoly *mpoly;
|
2018-01-11 19:39:24 +01:00
|
|
|
MDisps *mdisps;
|
|
|
|
GridPaintMask *grid_paint_mask;
|
|
|
|
int *gridOffset;
|
|
|
|
int gridSize, dGridSize, dSkip;
|
2018-01-11 20:14:16 +01:00
|
|
|
float (*smat)[3];
|
|
|
|
} MultiresThreadedData;
|
2018-01-11 19:39:24 +01:00
|
|
|
|
|
|
|
static void multires_disp_run_cb(void *__restrict userdata,
|
|
|
|
const int pidx,
|
2019-07-30 14:56:47 +02:00
|
|
|
const TaskParallelTLS *__restrict UNUSED(tls))
|
2018-01-11 19:39:24 +01:00
|
|
|
{
|
2018-01-11 20:14:16 +01:00
|
|
|
MultiresThreadedData *tdata = userdata;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-11 19:39:24 +01:00
|
|
|
DispOp op = tdata->op;
|
|
|
|
CCGElem **gridData = tdata->gridData;
|
|
|
|
CCGElem **subGridData = tdata->subGridData;
|
|
|
|
CCGKey *key = tdata->key;
|
Mesh: Remove redundant custom data pointers
For copy-on-write, we want to share attribute arrays between meshes
where possible. Mutable pointers like `Mesh.mvert` make that difficult
by making ownership vague. They also make code more complex by adding
redundancy.
The simplest solution is just removing them and retrieving layers from
`CustomData` as needed. Similar changes have already been applied to
curves and point clouds (e9f82d3dc7ee, 410a6efb747f). Removing use of
the pointers generally makes code more obvious and more reusable.
Mesh data is now accessed with a C++ API (`Mesh::edges()` or
`Mesh::edges_for_write()`), and a C API (`BKE_mesh_edges(mesh)`).
The CoW changes this commit makes possible are described in T95845
and T95842, and started in D14139 and D14140. The change also simplifies
the ongoing mesh struct-of-array refactors from T95965.
**RNA/Python Access Performance**
Theoretically, accessing mesh elements with the RNA API may become
slower, since the layer needs to be found on every random access.
However, overhead is already high enough that this doesn't make a
noticible differenc, and performance is actually improved in some
cases. Random access can be up to 10% faster, but other situations
might be a bit slower. Generally using `foreach_get/set` are the best
way to improve performance. See the differential revision for more
discussion about Python performance.
Cycles has been updated to use raw pointers and the internal Blender
mesh types, mostly because there is no sense in having this overhead
when it's already compiled with Blender. In my tests this roughly
halves the Cycles mesh creation time (0.19s to 0.10s for a 1 million
face grid).
Differential Revision: https://developer.blender.org/D15488
2022-09-05 18:56:34 +02:00
|
|
|
const MPoly *mpoly = tdata->mpoly;
|
2018-01-11 19:39:24 +01:00
|
|
|
MDisps *mdisps = tdata->mdisps;
|
|
|
|
GridPaintMask *grid_paint_mask = tdata->grid_paint_mask;
|
|
|
|
int *gridOffset = tdata->gridOffset;
|
|
|
|
int gridSize = tdata->gridSize;
|
|
|
|
int dGridSize = tdata->dGridSize;
|
|
|
|
int dSkip = tdata->dSkip;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-11 19:39:24 +01:00
|
|
|
const int numVerts = mpoly[pidx].totloop;
|
|
|
|
int S, x, y, gIndex = gridOffset[pidx];
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-09-07 16:12:26 +02:00
|
|
|
for (S = 0; S < numVerts; S++, gIndex++) {
|
2018-01-11 19:39:24 +01:00
|
|
|
GridPaintMask *gpm = grid_paint_mask ? &grid_paint_mask[gIndex] : NULL;
|
|
|
|
MDisps *mdisp = &mdisps[mpoly[pidx].loopstart + S];
|
|
|
|
CCGElem *grid = gridData[gIndex];
|
|
|
|
CCGElem *subgrid = subGridData[gIndex];
|
|
|
|
float(*dispgrid)[3] = NULL;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-11 19:39:24 +01:00
|
|
|
dispgrid = mdisp->disps;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-11 19:39:24 +01:00
|
|
|
/* if needed, reallocate multires paint mask */
|
|
|
|
if (gpm && gpm->level < key->level) {
|
|
|
|
gpm->level = key->level;
|
|
|
|
if (gpm->data) {
|
|
|
|
MEM_freeN(gpm->data);
|
|
|
|
}
|
2018-01-14 22:14:20 +01:00
|
|
|
gpm->data = MEM_calloc_arrayN(key->grid_area, sizeof(float), "gpm.data");
|
2018-01-11 19:39:24 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-11 19:39:24 +01:00
|
|
|
for (y = 0; y < gridSize; y++) {
|
|
|
|
for (x = 0; x < gridSize; x++) {
|
|
|
|
float *co = CCG_grid_elem_co(key, grid, x, y);
|
|
|
|
float *sco = CCG_grid_elem_co(key, subgrid, x, y);
|
|
|
|
float *data = dispgrid[dGridSize * y * dSkip + x * dSkip];
|
|
|
|
float mat[3][3], disp[3], d[3], mask;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-11 19:39:24 +01:00
|
|
|
/* construct tangent space matrix */
|
|
|
|
grid_tangent_matrix(mat, key, x, y, subgrid);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-11 19:39:24 +01:00
|
|
|
switch (op) {
|
|
|
|
case APPLY_DISPLACEMENTS:
|
|
|
|
/* Convert displacement to object space
|
|
|
|
* and add to grid points */
|
|
|
|
mul_v3_m3v3(disp, mat, data);
|
|
|
|
add_v3_v3v3(co, sco, disp);
|
|
|
|
break;
|
|
|
|
case CALC_DISPLACEMENTS:
|
|
|
|
/* Calculate displacement between new and old
|
|
|
|
* grid points and convert to tangent space */
|
|
|
|
sub_v3_v3v3(disp, co, sco);
|
|
|
|
invert_m3(mat);
|
|
|
|
mul_v3_m3v3(data, mat, disp);
|
|
|
|
break;
|
|
|
|
case ADD_DISPLACEMENTS:
|
|
|
|
/* Convert subdivided displacements to tangent
|
|
|
|
* space and add to the original displacements */
|
|
|
|
invert_m3(mat);
|
|
|
|
mul_v3_m3v3(d, mat, co);
|
|
|
|
add_v3_v3(data, d);
|
|
|
|
break;
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-11 19:39:24 +01:00
|
|
|
if (gpm) {
|
|
|
|
switch (op) {
|
|
|
|
case APPLY_DISPLACEMENTS:
|
|
|
|
/* Copy mask from gpm to DM */
|
|
|
|
*CCG_grid_elem_mask(key, grid, x, y) = paint_grid_paint_mask(gpm, key->level, x, y);
|
|
|
|
break;
|
|
|
|
case CALC_DISPLACEMENTS:
|
|
|
|
/* Copy mask from DM to gpm */
|
|
|
|
mask = *CCG_grid_elem_mask(key, grid, x, y);
|
|
|
|
gpm->data[y * gridSize + x] = CLAMPIS(mask, 0, 1);
|
|
|
|
break;
|
|
|
|
case ADD_DISPLACEMENTS:
|
|
|
|
/* Add mask displacement to gpm */
|
|
|
|
gpm->data[y * gridSize + x] += *CCG_grid_elem_mask(key, grid, x, y);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-13 22:50:32 +02:00
|
|
|
/* XXX WARNING: subsurf elements from dm and oldGridData *must* be of the same format (size),
|
|
|
|
* because this code uses CCGKey's info from dm to access oldGridData's normals
|
|
|
|
* (through the call to grid_tangent_matrix())! */
|
2012-05-10 22:33:09 +02:00
|
|
|
static void multiresModifier_disp_run(
|
|
|
|
DerivedMesh *dm, Mesh *me, DerivedMesh *dm2, DispOp op, CCGElem **oldGridData, int totlvl)
|
2007-12-29 18:07:55 +01:00
|
|
|
{
|
2012-05-06 19:22:54 +02:00
|
|
|
CCGDerivedMesh *ccgdm = (CCGDerivedMesh *)dm;
|
2012-05-10 22:33:09 +02:00
|
|
|
CCGElem **gridData, **subGridData;
|
|
|
|
CCGKey key;
|
2022-09-07 07:06:31 +02:00
|
|
|
const MPoly *mpoly = BKE_mesh_polys(me);
|
2010-01-05 23:33:41 +01:00
|
|
|
MDisps *mdisps = CustomData_get_layer(&me->ldata, CD_MDISPS);
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
GridPaintMask *grid_paint_mask = NULL;
|
2009-12-03 19:35:37 +01:00
|
|
|
int *gridOffset;
|
2018-01-11 19:39:24 +01:00
|
|
|
int i, gridSize, dGridSize, dSkip;
|
2011-03-29 07:48:18 +02:00
|
|
|
int totloop, totpoly;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-02-27 15:07:19 +01:00
|
|
|
/* this happens in the dm made by bmesh_mdisps_space_set */
|
2011-03-29 07:48:18 +02:00
|
|
|
if (dm2 && CustomData_has_layer(&dm2->loopData, CD_MDISPS)) {
|
|
|
|
mpoly = CustomData_get_layer(&dm2->polyData, CD_MPOLY);
|
|
|
|
mdisps = CustomData_get_layer(&dm2->loopData, CD_MDISPS);
|
|
|
|
totloop = dm2->numLoopData;
|
|
|
|
totpoly = dm2->numPolyData;
|
2012-02-23 03:17:50 +01:00
|
|
|
}
|
|
|
|
else {
|
2011-03-29 07:48:18 +02:00
|
|
|
totloop = me->totloop;
|
|
|
|
totpoly = me->totpoly;
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-02-23 03:17:50 +01:00
|
|
|
if (!mdisps) {
|
2019-04-22 01:39:35 +02:00
|
|
|
if (op == CALC_DISPLACEMENTS) {
|
Attributes: Improve custom data initialization options
When allocating new `CustomData` layers, often we do redundant
initialization of arrays. For example, it's common that values are
allocated, set to their default value, and then set to some other
value. This is wasteful, and it negates the benefits of optimizations
to the allocator like D15082. There are two reasons for this. The
first is array-of-structs storage that makes it annoying to initialize
values manually, and the second is confusing options in the Custom Data
API. This patch addresses the latter.
The `CustomData` "alloc type" options are rearranged. Now, besides
the options that use existing layers, there are two remaining:
* `CD_SET_DEFAULT` sets the default value.
* Usually zeroes, but for colors this is white (how it was before).
* Should be used when you add the layer but don't set all values.
* `CD_CONSTRUCT` refers to the "default construct" C++ term.
* Only necessary or defined for non-trivial types like vertex groups.
* Doesn't do anything for trivial types like `int` or `float3`.
* Should be used every other time, when all values will be set.
The attribute API's `AttributeInit` types are updated as well.
To update code, replace `CD_CALLOC` with `CD_SET_DEFAULT` and
`CD_DEFAULT` with `CD_CONSTRUCT`. This doesn't cause any functional
changes yet. Follow-up commits will change to avoid initializing
new layers where the correctness is clear.
Differential Revision: https://developer.blender.org/D15617
2022-08-30 21:54:53 +02:00
|
|
|
mdisps = CustomData_add_layer(&me->ldata, CD_MDISPS, CD_SET_DEFAULT, NULL, me->totloop);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
else {
|
2009-12-14 18:08:02 +01:00
|
|
|
return;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2009-01-06 19:59:03 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-06-26 13:35:18 +02:00
|
|
|
// numGrids = dm->getNumGrids(dm); /* UNUSED */
|
2009-11-25 15:07:12 +01:00
|
|
|
gridSize = dm->getGridSize(dm);
|
|
|
|
gridData = dm->getGridData(dm);
|
2009-12-03 19:35:37 +01:00
|
|
|
gridOffset = dm->getGridOffset(dm);
|
2012-05-10 22:33:09 +02:00
|
|
|
dm->getGridKey(dm, &key);
|
2012-05-11 10:05:47 +02:00
|
|
|
subGridData = (oldGridData) ? oldGridData : gridData;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
dGridSize = multires_side_tot[totlvl];
|
2012-05-06 19:22:54 +02:00
|
|
|
dSkip = (dGridSize - 1) / (gridSize - 1);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
/* multires paint masks */
|
2019-04-22 01:39:35 +02:00
|
|
|
if (key.has_mask) {
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
grid_paint_mask = CustomData_get_layer(&me->ldata, CD_GRID_PAINT_MASK);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-11 17:56:18 +01:00
|
|
|
/* when adding new faces in edit mode, need to allocate disps */
|
2019-09-07 16:12:26 +02:00
|
|
|
for (i = 0; i < totloop; i++) {
|
2018-01-11 17:56:18 +01:00
|
|
|
if (mdisps[i].disps == NULL) {
|
|
|
|
multires_reallocate_mdisps(totloop, mdisps, totlvl);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-07-30 14:56:47 +02:00
|
|
|
TaskParallelSettings settings;
|
2018-01-11 19:39:24 +01:00
|
|
|
BLI_parallel_range_settings_defaults(&settings);
|
|
|
|
settings.min_iter_per_thread = CCG_TASK_LIMIT;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-11 20:14:16 +01:00
|
|
|
MultiresThreadedData data = {
|
2018-01-11 19:39:24 +01:00
|
|
|
.op = op,
|
|
|
|
.gridData = gridData,
|
|
|
|
.subGridData = subGridData,
|
|
|
|
.key = &key,
|
|
|
|
.mpoly = mpoly,
|
|
|
|
.mdisps = mdisps,
|
|
|
|
.grid_paint_mask = grid_paint_mask,
|
|
|
|
.gridOffset = gridOffset,
|
|
|
|
.gridSize = gridSize,
|
|
|
|
.dGridSize = dGridSize,
|
2019-01-06 14:06:58 +01:00
|
|
|
.dSkip = dSkip,
|
2018-01-11 19:39:24 +01:00
|
|
|
};
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-11 19:39:24 +01:00
|
|
|
BLI_task_parallel_range(0, totpoly, &data, multires_disp_run_cb, &settings);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-02-26 05:40:56 +01:00
|
|
|
if (op == APPLY_DISPLACEMENTS) {
|
2009-11-25 15:07:12 +01:00
|
|
|
ccgSubSurf_stitchFaces(ccgdm->ss, 0, NULL, 0);
|
|
|
|
ccgSubSurf_updateNormals(ccgdm->ss, NULL, 0);
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-18 11:21:33 +02:00
|
|
|
void multires_modifier_update_mdisps(struct DerivedMesh *dm, Scene *scene)
|
2007-12-29 18:07:55 +01:00
|
|
|
{
|
2012-05-06 19:22:54 +02:00
|
|
|
CCGDerivedMesh *ccgdm = (CCGDerivedMesh *)dm;
|
2009-08-20 19:37:38 +02:00
|
|
|
Object *ob;
|
2009-01-06 19:59:03 +01:00
|
|
|
Mesh *me;
|
2022-05-13 18:31:29 +02:00
|
|
|
const MDisps *mdisps;
|
2009-11-25 15:07:12 +01:00
|
|
|
MultiresModifierData *mmd;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
ob = ccgdm->multires.ob;
|
|
|
|
me = ccgdm->multires.ob->data;
|
|
|
|
mmd = ccgdm->multires.mmd;
|
2010-05-10 17:02:37 +02:00
|
|
|
multires_set_tot_mdisps(me, mmd->totlvl);
|
2020-03-13 16:13:32 +01:00
|
|
|
multiresModifier_ensure_external_read(me, mmd);
|
2010-01-05 23:33:41 +01:00
|
|
|
mdisps = CustomData_get_layer(&me->ldata, CD_MDISPS);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-02-23 03:17:50 +01:00
|
|
|
if (mdisps) {
|
2009-11-25 15:07:12 +01:00
|
|
|
int lvl = ccgdm->multires.lvl;
|
|
|
|
int totlvl = ccgdm->multires.totlvl;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-02-23 03:17:50 +01:00
|
|
|
if (lvl < totlvl) {
|
2009-11-25 15:07:12 +01:00
|
|
|
DerivedMesh *lowdm, *cddm, *highdm;
|
2012-05-10 22:33:09 +02:00
|
|
|
CCGElem **highGridData, **lowGridData, **subGridData, **gridData, *diffGrid;
|
|
|
|
CCGKey highGridKey, lowGridKey;
|
2009-11-25 15:07:12 +01:00
|
|
|
CCGSubSurf *ss;
|
|
|
|
int i, j, numGrids, highGridSize, lowGridSize;
|
2014-02-03 08:55:59 +01:00
|
|
|
const bool has_mask = CustomData_has_layer(&me->ldata, CD_GRID_PAINT_MASK);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-01-26 16:35:42 +01:00
|
|
|
/* Create subsurf DM from original mesh at high level. */
|
|
|
|
/* TODO: use mesh_deform_eval when sculpting on deformed mesh. */
|
|
|
|
cddm = CDDM_from_mesh(me);
|
Refactor CDData masks, to have one mask per mesh elem type.
We already have different storages for cddata of verts, edges etc.,
'simply' do the same for the mask flags we use all around Blender code
to request some data, or limit some operation to some layers, etc.
Reason we need this is that some cddata types (like Normals) are
actually shared between verts/polys/loops, and we don’t want to generate
clnors everytime we request vnors!
As a side note, this also does final fix to T59338, which was the
trigger for this patch (need to request computed loop normals for
another mesh than evaluated one).
Reviewers: brecht, campbellbarton, sergey
Differential Revision: https://developer.blender.org/D4407
2019-03-07 11:13:40 +01:00
|
|
|
DM_set_only_copy(cddm, &CD_MASK_BAREMESH);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-01-22 11:48:28 +01:00
|
|
|
highdm = subsurf_dm_create_local(scene,
|
|
|
|
ob,
|
|
|
|
cddm,
|
|
|
|
totlvl,
|
2020-10-26 12:32:22 +01:00
|
|
|
false,
|
2019-01-22 11:48:28 +01:00
|
|
|
0,
|
|
|
|
mmd->uv_smooth == SUBSURF_UV_SMOOTH_NONE,
|
|
|
|
has_mask,
|
|
|
|
false,
|
|
|
|
SUBSURF_IGNORE_SIMPLIFY);
|
2012-05-11 10:05:47 +02:00
|
|
|
ss = ((CCGDerivedMesh *)highdm)->ss;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
/* create multires DM from original mesh and displacements */
|
2019-01-22 11:48:28 +01:00
|
|
|
lowdm = multires_dm_create_local(
|
2020-10-26 12:32:22 +01:00
|
|
|
scene, ob, cddm, lvl, totlvl, has_mask, MULTIRES_IGNORE_SIMPLIFY);
|
2009-11-25 15:07:12 +01:00
|
|
|
cddm->release(cddm);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
/* gather grid data */
|
|
|
|
numGrids = highdm->getNumGrids(highdm);
|
|
|
|
highGridSize = highdm->getGridSize(highdm);
|
|
|
|
highGridData = highdm->getGridData(highdm);
|
2012-05-10 22:33:09 +02:00
|
|
|
highdm->getGridKey(highdm, &highGridKey);
|
2009-11-25 15:07:12 +01:00
|
|
|
lowGridSize = lowdm->getGridSize(lowdm);
|
|
|
|
lowGridData = lowdm->getGridData(lowdm);
|
2012-05-10 22:33:09 +02:00
|
|
|
lowdm->getGridKey(lowdm, &lowGridKey);
|
2009-11-25 15:07:12 +01:00
|
|
|
gridData = dm->getGridData(dm);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-05-10 22:33:09 +02:00
|
|
|
BLI_assert(highGridKey.elem_size == lowGridKey.elem_size);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-14 22:14:20 +01:00
|
|
|
subGridData = MEM_calloc_arrayN(numGrids, sizeof(CCGElem *), "subGridData*");
|
|
|
|
diffGrid = MEM_calloc_arrayN(lowGridKey.elem_size, lowGridSize * lowGridSize, "diff");
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-09-07 16:12:26 +02:00
|
|
|
for (i = 0; i < numGrids; i++) {
|
2009-11-25 15:07:12 +01:00
|
|
|
/* backup subsurf grids */
|
2018-01-14 22:14:20 +01:00
|
|
|
subGridData[i] = MEM_calloc_arrayN(
|
|
|
|
highGridKey.elem_size, highGridSize * highGridSize, "subGridData");
|
2012-05-11 10:05:47 +02:00
|
|
|
memcpy(
|
|
|
|
subGridData[i], highGridData[i], highGridKey.elem_size * highGridSize * highGridSize);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
/* write difference of subsurf and displaced low level into high subsurf */
|
2019-09-07 16:12:26 +02:00
|
|
|
for (j = 0; j < lowGridSize * lowGridSize; j++) {
|
Add mask support to CCGSubSurf and multires.
* Add new CCG function ccgSubSurf_setAllocMask(). Similar to to
ccgSubSurf_setCalcVertexNormals(), it sets whether the CCG elements
have a mask layer and what that layer's offset is. Unlike normals
however, it doesn't change any behavior during CCG calculation; it's
there only to give CCGKey information on the mask.
* Add a new flag to _getSubSurf(), CCG_ALLOC_MASK. If set, space for
an extra layer is allocated, but the number of CCG layers is not set
to include it. This is done because GridPaintMasks are absolute,
rather than being relative to the subdivided output (as MDisp
displacements are), so we skip subdividing paint masks here.
* Add a new flag to subsurf_make_derived_from_derived(),
SUBSURF_ALLOC_PAINT_MASK. This controls whether CCG_ALLOC_MASK is
set for _getSubSurf(). Related, masks are never loaded in during
ss_sync_from_derivedmesh(). After subdivision is finished, if the
alloc mask flag is set, the number of CCG layers is increase to 4
with ccgSubSurf_setNumLayers().
* Add a new flag to multires_make_from_derived(),
MULTIRES_ALLOC_PAINT_MASK. Not all multires functions need paint
mask data (e.g. multiresModifier_base_apply.) This flag is always
set in MOD_multires.c so that subdividing a mesh with a mask updates
properly even when not in sculpt mode.
* Update multiresModifier_disp_run() to apply, calculate, and add mask
elements. It's almost the same as the existing operations with xyz
coordinates, but treats masks as absolute rather than displacements
relative to subdivided values.
* Update multires_customdata_delete to free CD_GRID_PAINT_MASK in
addition to CD_MDISPS.
* Update multires_del_higher() to call the new function
multires_grid_paint_mask_downsample(), which allocates a
lower-resolution paint mask grid and copies values over from the
high-resolution grid.
2012-05-10 22:34:08 +02:00
|
|
|
sub_v4_v4v4(CCG_elem_offset_co(&lowGridKey, diffGrid, j),
|
2012-05-11 10:05:47 +02:00
|
|
|
CCG_elem_offset_co(&lowGridKey, gridData[i], j),
|
|
|
|
CCG_elem_offset_co(&lowGridKey, lowGridData[i], j));
|
2012-05-10 22:33:09 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-05-10 22:33:09 +02:00
|
|
|
multires_copy_dm_grid(highGridData[i], diffGrid, &highGridKey, &lowGridKey);
|
2009-11-25 15:07:12 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
/* lower level dm no longer needed at this point */
|
|
|
|
MEM_freeN(diffGrid);
|
|
|
|
lowdm->release(lowdm);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
/* subsurf higher levels again with difference of coordinates */
|
|
|
|
ccgSubSurf_updateFromFaces(ss, lvl, NULL, 0);
|
|
|
|
ccgSubSurf_updateLevels(ss, lvl, NULL, 0);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
/* add to displacements */
|
2012-02-26 05:40:56 +01:00
|
|
|
multiresModifier_disp_run(highdm, me, NULL, ADD_DISPLACEMENTS, subGridData, mmd->totlvl);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
/* free */
|
|
|
|
highdm->release(highdm);
|
2019-09-07 16:12:26 +02:00
|
|
|
for (i = 0; i < numGrids; i++) {
|
2009-11-25 15:07:12 +01:00
|
|
|
MEM_freeN(subGridData[i]);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2009-11-25 15:07:12 +01:00
|
|
|
MEM_freeN(subGridData);
|
2009-01-06 19:59:03 +01:00
|
|
|
}
|
2009-11-25 15:07:12 +01:00
|
|
|
else {
|
|
|
|
DerivedMesh *cddm, *subdm;
|
2014-02-03 08:55:59 +01:00
|
|
|
const bool has_mask = CustomData_has_layer(&me->ldata, CD_GRID_PAINT_MASK);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-01-26 16:35:42 +01:00
|
|
|
/* TODO: use mesh_deform_eval when sculpting on deformed mesh. */
|
|
|
|
cddm = CDDM_from_mesh(me);
|
Refactor CDData masks, to have one mask per mesh elem type.
We already have different storages for cddata of verts, edges etc.,
'simply' do the same for the mask flags we use all around Blender code
to request some data, or limit some operation to some layers, etc.
Reason we need this is that some cddata types (like Normals) are
actually shared between verts/polys/loops, and we don’t want to generate
clnors everytime we request vnors!
As a side note, this also does final fix to T59338, which was the
trigger for this patch (need to request computed loop normals for
another mesh than evaluated one).
Reviewers: brecht, campbellbarton, sergey
Differential Revision: https://developer.blender.org/D4407
2019-03-07 11:13:40 +01:00
|
|
|
DM_set_only_copy(cddm, &CD_MASK_BAREMESH);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-01-22 11:48:28 +01:00
|
|
|
subdm = subsurf_dm_create_local(scene,
|
|
|
|
ob,
|
|
|
|
cddm,
|
|
|
|
mmd->totlvl,
|
2020-10-26 12:32:22 +01:00
|
|
|
false,
|
2019-01-22 11:48:28 +01:00
|
|
|
0,
|
|
|
|
mmd->uv_smooth == SUBSURF_UV_SMOOTH_NONE,
|
|
|
|
has_mask,
|
|
|
|
false,
|
|
|
|
SUBSURF_IGNORE_SIMPLIFY);
|
2009-11-25 15:07:12 +01:00
|
|
|
cddm->release(cddm);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-02-26 05:40:56 +01:00
|
|
|
multiresModifier_disp_run(
|
|
|
|
dm, me, NULL, CALC_DISPLACEMENTS, subdm->getGridData(subdm), mmd->totlvl);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
subdm->release(subdm);
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
2009-01-06 19:59:03 +01:00
|
|
|
}
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
|
|
|
|
2012-03-14 07:32:03 +01:00
|
|
|
void multires_modifier_update_hidden(DerivedMesh *dm)
|
|
|
|
{
|
2012-05-06 19:22:54 +02:00
|
|
|
CCGDerivedMesh *ccgdm = (CCGDerivedMesh *)dm;
|
2013-07-23 01:20:48 +02:00
|
|
|
BLI_bitmap **grid_hidden = ccgdm->gridHidden;
|
2012-03-14 07:32:03 +01:00
|
|
|
Mesh *me = ccgdm->multires.ob->data;
|
|
|
|
MDisps *mdisps = CustomData_get_layer(&me->ldata, CD_MDISPS);
|
|
|
|
int totlvl = ccgdm->multires.totlvl;
|
|
|
|
int lvl = ccgdm->multires.lvl;
|
|
|
|
|
2012-03-24 07:18:31 +01:00
|
|
|
if (mdisps) {
|
2012-03-14 07:32:03 +01:00
|
|
|
int i;
|
2018-06-17 17:05:51 +02:00
|
|
|
|
2012-03-24 07:18:31 +01:00
|
|
|
for (i = 0; i < me->totloop; i++) {
|
2012-03-14 07:32:03 +01:00
|
|
|
MDisps *md = &mdisps[i];
|
2013-07-23 01:20:48 +02:00
|
|
|
BLI_bitmap *gh = grid_hidden[i];
|
2012-03-14 07:32:03 +01:00
|
|
|
|
2012-03-24 07:18:31 +01:00
|
|
|
if (!gh && md->hidden) {
|
2012-03-14 07:32:03 +01:00
|
|
|
MEM_freeN(md->hidden);
|
|
|
|
md->hidden = NULL;
|
|
|
|
}
|
2012-03-24 07:18:31 +01:00
|
|
|
else if (gh) {
|
2012-03-14 07:32:03 +01:00
|
|
|
gh = multires_mdisps_upsample_hidden(gh, lvl, totlvl, md->hidden);
|
2019-04-22 01:39:35 +02:00
|
|
|
if (md->hidden) {
|
2012-03-14 07:32:03 +01:00
|
|
|
MEM_freeN(md->hidden);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2018-06-17 17:05:51 +02:00
|
|
|
|
2012-03-14 07:32:03 +01:00
|
|
|
md->hidden = gh;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-03-29 07:48:18 +02:00
|
|
|
|
2009-12-09 14:37:19 +01:00
|
|
|
void multires_stitch_grids(Object *ob)
|
2007-12-29 18:07:55 +01:00
|
|
|
{
|
2018-09-18 17:09:08 +02:00
|
|
|
if (ob == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
SculptSession *sculpt_session = ob->sculpt;
|
|
|
|
if (sculpt_session == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
PBVH *pbvh = sculpt_session->pbvh;
|
|
|
|
SubdivCCG *subdiv_ccg = sculpt_session->subdiv_ccg;
|
|
|
|
if (pbvh == NULL || subdiv_ccg == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
BLI_assert(BKE_pbvh_type(pbvh) == PBVH_GRIDS);
|
|
|
|
/* NOTE: Currently CCG does not keep track of faces, making it impossible
|
|
|
|
* to use BKE_pbvh_get_grid_updates().
|
|
|
|
*/
|
|
|
|
CCGFace **faces;
|
|
|
|
int num_faces;
|
|
|
|
BKE_pbvh_get_grid_updates(pbvh, false, (void ***)&faces, &num_faces);
|
|
|
|
if (num_faces) {
|
2018-09-18 17:46:00 +02:00
|
|
|
BKE_subdiv_ccg_average_stitch_faces(subdiv_ccg, faces, num_faces);
|
2018-09-18 17:09:08 +02:00
|
|
|
MEM_freeN(faces);
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
2009-12-09 14:37:19 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-04-05 18:20:27 +02:00
|
|
|
DerivedMesh *multires_make_derived_from_derived(
|
|
|
|
DerivedMesh *dm, MultiresModifierData *mmd, Scene *scene, Object *ob, MultiresFlags flags)
|
2007-12-29 18:07:55 +01:00
|
|
|
{
|
2012-05-06 19:22:54 +02:00
|
|
|
Mesh *me = ob->data;
|
2009-01-06 19:59:03 +01:00
|
|
|
DerivedMesh *result;
|
2012-03-14 07:32:03 +01:00
|
|
|
CCGDerivedMesh *ccgdm = NULL;
|
2012-05-10 22:33:09 +02:00
|
|
|
CCGElem **gridData, **subGridData;
|
|
|
|
CCGKey key;
|
2014-06-12 09:49:46 +02:00
|
|
|
const bool render = (flags & MULTIRES_USE_RENDER_PARAMS) != 0;
|
|
|
|
const bool ignore_simplify = (flags & MULTIRES_IGNORE_SIMPLIFY) != 0;
|
2018-06-18 11:21:33 +02:00
|
|
|
int lvl = multires_get_level(scene, ob, mmd, render, ignore_simplify);
|
2009-11-25 15:07:12 +01:00
|
|
|
int i, gridSize, numGrids;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-04-22 01:39:35 +02:00
|
|
|
if (lvl == 0) {
|
2009-11-25 15:07:12 +01:00
|
|
|
return dm;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-01-22 11:48:28 +01:00
|
|
|
const int subsurf_flags = ignore_simplify ? SUBSURF_IGNORE_SIMPLIFY : 0;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-06-18 11:51:02 +02:00
|
|
|
result = subsurf_dm_create_local(scene,
|
|
|
|
ob,
|
|
|
|
dm,
|
|
|
|
lvl,
|
2020-10-26 12:32:22 +01:00
|
|
|
false,
|
2018-04-05 18:20:27 +02:00
|
|
|
mmd->flags & eMultiresModifierFlag_ControlEdges,
|
2018-08-14 11:45:26 +02:00
|
|
|
mmd->uv_smooth == SUBSURF_UV_SMOOTH_NONE,
|
2018-06-27 16:34:26 +02:00
|
|
|
flags & MULTIRES_ALLOC_PAINT_MASK,
|
2019-01-22 11:48:28 +01:00
|
|
|
render,
|
|
|
|
subsurf_flags);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-05-10 22:32:09 +02:00
|
|
|
if (!(flags & MULTIRES_USE_LOCAL_MMD)) {
|
2012-05-06 19:22:54 +02:00
|
|
|
ccgdm = (CCGDerivedMesh *)result;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
ccgdm->multires.ob = ob;
|
|
|
|
ccgdm->multires.mmd = mmd;
|
2012-05-10 22:32:09 +02:00
|
|
|
ccgdm->multires.local_mmd = 0;
|
2009-11-25 15:07:12 +01:00
|
|
|
ccgdm->multires.lvl = lvl;
|
|
|
|
ccgdm->multires.totlvl = mmd->totlvl;
|
2012-03-14 07:32:03 +01:00
|
|
|
ccgdm->multires.modified_flags = 0;
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-11-25 15:07:12 +01:00
|
|
|
numGrids = result->getNumGrids(result);
|
|
|
|
gridSize = result->getGridSize(result);
|
|
|
|
gridData = result->getGridData(result);
|
2012-05-10 22:33:09 +02:00
|
|
|
result->getGridKey(result, &key);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-14 22:14:20 +01:00
|
|
|
subGridData = MEM_malloc_arrayN(numGrids, sizeof(CCGElem *), "subGridData*");
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-02-23 03:17:50 +01:00
|
|
|
for (i = 0; i < numGrids; i++) {
|
2018-01-14 22:14:20 +01:00
|
|
|
subGridData[i] = MEM_malloc_arrayN(key.elem_size, gridSize * gridSize, "subGridData");
|
2012-05-11 10:05:47 +02:00
|
|
|
memcpy(subGridData[i], gridData[i], key.elem_size * gridSize * gridSize);
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2010-05-10 17:02:37 +02:00
|
|
|
multires_set_tot_mdisps(me, mmd->totlvl);
|
2020-03-13 16:13:32 +01:00
|
|
|
multiresModifier_ensure_external_read(me, mmd);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-06-26 13:35:18 +02:00
|
|
|
/* Run displacement. */
|
2012-02-26 05:40:56 +01:00
|
|
|
multiresModifier_disp_run(result, ob->data, dm, APPLY_DISPLACEMENTS, subGridData, mmd->totlvl);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-03-14 07:32:03 +01:00
|
|
|
/* copy hidden elements for this level */
|
2019-04-22 01:39:35 +02:00
|
|
|
if (ccgdm) {
|
2012-03-14 07:32:03 +01:00
|
|
|
multires_output_hidden_to_ccgdm(ccgdm, me, lvl);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-04-22 01:39:35 +02:00
|
|
|
for (i = 0; i < numGrids; i++) {
|
2009-11-25 15:07:12 +01:00
|
|
|
MEM_freeN(subGridData[i]);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2009-11-25 15:07:12 +01:00
|
|
|
MEM_freeN(subGridData);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-01-06 19:59:03 +01:00
|
|
|
return result;
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
2009-01-06 19:59:03 +01:00
|
|
|
|
2011-02-12 11:18:21 +01:00
|
|
|
void old_mdisps_bilinear(float out[3], float (*disps)[3], const int st, float u, float v)
|
2009-12-09 14:37:19 +01:00
|
|
|
{
|
2009-12-10 18:37:04 +01:00
|
|
|
int x, y, x2, y2;
|
|
|
|
const int st_max = st - 1;
|
|
|
|
float urat, vrat, uopp;
|
|
|
|
float d[4][3], d2[2][3];
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-04-22 01:39:35 +02:00
|
|
|
if (!disps || isnan(u) || isnan(v)) {
|
2011-03-29 07:48:18 +02:00
|
|
|
return;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-04-22 01:39:35 +02:00
|
|
|
if (u < 0) {
|
2009-12-10 18:37:04 +01:00
|
|
|
u = 0;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
else if (u >= st) {
|
2009-12-10 18:37:04 +01:00
|
|
|
u = st_max;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
if (v < 0) {
|
2009-12-10 18:37:04 +01:00
|
|
|
v = 0;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
else if (v >= st) {
|
2009-12-10 18:37:04 +01:00
|
|
|
v = st_max;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-12-10 18:37:04 +01:00
|
|
|
x = floor(u);
|
|
|
|
y = floor(v);
|
|
|
|
x2 = x + 1;
|
|
|
|
y2 = y + 1;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-04-22 01:39:35 +02:00
|
|
|
if (x2 >= st) {
|
2012-02-23 03:17:50 +01:00
|
|
|
x2 = st_max;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
if (y2 >= st) {
|
2012-02-23 03:17:50 +01:00
|
|
|
y2 = st_max;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-12-10 18:37:04 +01:00
|
|
|
urat = u - x;
|
|
|
|
vrat = v - y;
|
|
|
|
uopp = 1 - urat;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-12-10 18:37:04 +01:00
|
|
|
mul_v3_v3fl(d[0], disps[y * st + x], uopp);
|
|
|
|
mul_v3_v3fl(d[1], disps[y * st + x2], urat);
|
|
|
|
mul_v3_v3fl(d[2], disps[y2 * st + x], uopp);
|
|
|
|
mul_v3_v3fl(d[3], disps[y2 * st + x2], urat);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-12-10 18:37:04 +01:00
|
|
|
add_v3_v3v3(d2[0], d[0], d[1]);
|
|
|
|
add_v3_v3v3(d2[1], d[2], d[3]);
|
|
|
|
mul_v3_fl(d2[0], 1 - vrat);
|
|
|
|
mul_v3_fl(d2[1], vrat);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2009-12-10 18:37:04 +01:00
|
|
|
add_v3_v3v3(out, d2[0], d2[1]);
|
2007-12-29 18:07:55 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-03-03 12:35:51 +01:00
|
|
|
void multiresModifier_sync_levels_ex(Object *ob_dst,
|
2018-06-18 11:21:33 +02:00
|
|
|
MultiresModifierData *mmd_src,
|
|
|
|
MultiresModifierData *mmd_dst)
|
2010-10-25 10:03:05 +02:00
|
|
|
{
|
2015-05-07 15:16:10 +02:00
|
|
|
if (mmd_src->totlvl == mmd_dst->totlvl) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mmd_src->totlvl > mmd_dst->totlvl) {
|
2020-10-26 12:32:22 +01:00
|
|
|
multiresModifier_subdivide_to_level(
|
|
|
|
ob_dst, mmd_dst, mmd_src->totlvl, MULTIRES_SUBDIVIDE_CATMULL_CLARK);
|
2015-05-07 15:16:10 +02:00
|
|
|
}
|
|
|
|
else {
|
2018-04-05 18:20:27 +02:00
|
|
|
multires_del_higher(mmd_dst, ob_dst, mmd_src->totlvl);
|
2015-05-07 15:16:10 +02:00
|
|
|
}
|
|
|
|
}
|
2010-10-25 10:03:05 +02:00
|
|
|
|
2018-04-05 18:20:27 +02:00
|
|
|
static void multires_sync_levels(Scene *scene, Object *ob_src, Object *ob_dst)
|
2015-05-07 15:16:10 +02:00
|
|
|
{
|
|
|
|
MultiresModifierData *mmd_src = get_multires_modifier(scene, ob_src, true);
|
|
|
|
MultiresModifierData *mmd_dst = get_multires_modifier(scene, ob_dst, true);
|
|
|
|
|
|
|
|
if (!mmd_src) {
|
2010-10-25 10:03:05 +02:00
|
|
|
/* object could have MDISP even when there is no multires modifier
|
2012-03-09 19:28:30 +01:00
|
|
|
* this could lead to troubles due to i've got no idea how mdisp could be
|
2019-06-12 01:04:10 +02:00
|
|
|
* up-sampled correct without modifier data.
|
2012-03-09 19:28:30 +01:00
|
|
|
* just remove mdisps if no multires present (nazgul) */
|
2010-10-25 10:03:05 +02:00
|
|
|
|
2015-05-07 15:16:10 +02:00
|
|
|
multires_customdata_delete(ob_src->data);
|
2010-10-25 10:03:05 +02:00
|
|
|
}
|
|
|
|
|
2015-05-07 15:16:10 +02:00
|
|
|
if (mmd_src && mmd_dst) {
|
2020-03-03 12:35:51 +01:00
|
|
|
multiresModifier_sync_levels_ex(ob_dst, mmd_src, mmd_dst);
|
2012-06-24 22:18:32 +02:00
|
|
|
}
|
2010-10-25 10:03:05 +02:00
|
|
|
}
|
|
|
|
|
2018-11-02 15:23:06 +01:00
|
|
|
static void multires_apply_uniform_scale(Object *object, const float scale)
|
2010-10-25 10:03:05 +02:00
|
|
|
{
|
2018-11-02 15:23:06 +01:00
|
|
|
Mesh *mesh = (Mesh *)object->data;
|
|
|
|
MDisps *mdisps = CustomData_get_layer(&mesh->ldata, CD_MDISPS);
|
2019-09-07 16:12:26 +02:00
|
|
|
for (int i = 0; i < mesh->totloop; i++) {
|
2018-11-02 15:23:06 +01:00
|
|
|
MDisps *grid = &mdisps[i];
|
2019-09-07 16:12:26 +02:00
|
|
|
for (int j = 0; j < grid->totdisp; j++) {
|
2018-11-02 15:23:06 +01:00
|
|
|
mul_v3_fl(grid->disps[j], scale);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-11-02 15:23:06 +01:00
|
|
|
static void multires_apply_smat(struct Depsgraph *UNUSED(depsgraph),
|
|
|
|
Scene *scene,
|
|
|
|
Object *object,
|
2019-09-14 00:10:50 +02:00
|
|
|
const float smat[3][3])
|
2018-11-02 15:23:06 +01:00
|
|
|
{
|
|
|
|
const MultiresModifierData *mmd = get_multires_modifier(scene, object, true);
|
|
|
|
if (mmd == NULL || mmd->totlvl == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Make sure layer present. */
|
|
|
|
Mesh *mesh = (Mesh *)object->data;
|
2020-03-13 16:13:32 +01:00
|
|
|
multiresModifier_ensure_external_read(mesh, mmd);
|
2018-11-02 15:23:06 +01:00
|
|
|
if (!CustomData_get_layer(&mesh->ldata, CD_MDISPS)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (is_uniform_scaled_m3(smat)) {
|
|
|
|
const float scale = mat3_to_scale(smat);
|
|
|
|
multires_apply_uniform_scale(object, scale);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* TODO(sergey): This branch of code actually requires more work to
|
|
|
|
* preserve all the details.
|
|
|
|
*/
|
|
|
|
const float scale = mat3_to_scale(smat);
|
|
|
|
multires_apply_uniform_scale(object, scale);
|
|
|
|
}
|
2010-10-25 10:03:05 +02:00
|
|
|
}
|
|
|
|
|
2022-05-14 18:57:52 +02:00
|
|
|
int multires_mdisp_corners(const MDisps *s)
|
2010-11-04 17:00:28 +01:00
|
|
|
{
|
2012-05-06 19:22:54 +02:00
|
|
|
int lvl = 13;
|
2010-11-04 17:00:28 +01:00
|
|
|
|
2012-02-23 03:17:50 +01:00
|
|
|
while (lvl > 0) {
|
2012-05-06 19:22:54 +02:00
|
|
|
int side = (1 << (lvl - 1)) + 1;
|
2019-04-22 01:39:35 +02:00
|
|
|
if ((s->totdisp % (side * side)) == 0) {
|
2012-05-06 19:22:54 +02:00
|
|
|
return s->totdisp / (side * side);
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2010-11-04 17:00:28 +01:00
|
|
|
lvl--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-06 12:07:27 +02:00
|
|
|
void multiresModifier_scale_disp(struct Depsgraph *depsgraph, Scene *scene, Object *ob)
|
2010-10-25 10:03:05 +02:00
|
|
|
{
|
|
|
|
float smat[3][3];
|
|
|
|
|
|
|
|
/* object's scale matrix */
|
2012-05-05 16:03:12 +02:00
|
|
|
BKE_object_scale_to_mat3(ob, smat);
|
2010-10-25 10:03:05 +02:00
|
|
|
|
2018-04-06 12:07:27 +02:00
|
|
|
multires_apply_smat(depsgraph, scene, ob, smat);
|
2010-10-25 10:03:05 +02:00
|
|
|
}
|
|
|
|
|
2018-04-06 12:07:27 +02:00
|
|
|
void multiresModifier_prepare_join(struct Depsgraph *depsgraph,
|
|
|
|
Scene *scene,
|
|
|
|
Object *ob,
|
|
|
|
Object *to_ob)
|
2010-10-25 10:03:05 +02:00
|
|
|
{
|
|
|
|
float smat[3][3], tmat[3][3], mat[3][3];
|
2018-04-05 18:20:27 +02:00
|
|
|
multires_sync_levels(scene, to_ob, ob);
|
2010-10-25 10:03:05 +02:00
|
|
|
|
|
|
|
/* construct scale matrix for displacement */
|
2012-05-05 16:03:12 +02:00
|
|
|
BKE_object_scale_to_mat3(to_ob, tmat);
|
2010-10-25 10:03:05 +02:00
|
|
|
invert_m3(tmat);
|
2012-05-05 16:03:12 +02:00
|
|
|
BKE_object_scale_to_mat3(ob, smat);
|
2010-10-25 10:03:05 +02:00
|
|
|
mul_m3_m3m3(mat, smat, tmat);
|
|
|
|
|
2018-04-06 12:07:27 +02:00
|
|
|
multires_apply_smat(depsgraph, scene, ob, mat);
|
2010-10-25 10:03:05 +02:00
|
|
|
}
|
2010-11-04 17:00:28 +01:00
|
|
|
|
2012-04-16 13:03:42 +02:00
|
|
|
void multires_topology_changed(Mesh *me)
|
2010-11-04 17:00:28 +01:00
|
|
|
{
|
2012-04-16 13:03:42 +02:00
|
|
|
MDisps *mdisp = NULL, *cur = NULL;
|
|
|
|
int i, grid = 0;
|
2010-11-04 17:00:28 +01:00
|
|
|
|
2013-11-26 17:09:15 +01:00
|
|
|
CustomData_external_read(&me->ldata, &me->id, CD_MASK_MDISPS, me->totloop);
|
2012-04-16 13:03:42 +02:00
|
|
|
mdisp = CustomData_get_layer(&me->ldata, CD_MDISPS);
|
2010-11-05 15:00:31 +01:00
|
|
|
|
2019-04-22 01:39:35 +02:00
|
|
|
if (!mdisp) {
|
2012-04-16 13:03:42 +02:00
|
|
|
return;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2010-11-04 17:00:28 +01:00
|
|
|
|
2012-04-16 13:03:42 +02:00
|
|
|
cur = mdisp;
|
|
|
|
for (i = 0; i < me->totloop; i++, cur++) {
|
|
|
|
if (cur->totdisp) {
|
|
|
|
grid = mdisp->totdisp;
|
2010-11-08 15:39:36 +01:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-11-08 15:00:23 +01:00
|
|
|
|
2012-04-16 13:03:42 +02:00
|
|
|
for (i = 0; i < me->totloop; i++, mdisp++) {
|
2010-11-08 15:00:23 +01:00
|
|
|
/* allocate memory for mdisp, the whole disp layer would be erased otherwise */
|
2012-02-23 03:17:50 +01:00
|
|
|
if (!mdisp->totdisp || !mdisp->disps) {
|
|
|
|
if (grid) {
|
2012-04-16 13:03:42 +02:00
|
|
|
mdisp->totdisp = grid;
|
2021-10-14 01:17:33 +02:00
|
|
|
mdisp->disps = MEM_calloc_arrayN(mdisp->totdisp, sizeof(float[3]), "mdisp topology");
|
2010-11-08 15:39:36 +01:00
|
|
|
}
|
2010-11-08 15:00:23 +01:00
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
2010-11-04 17:00:28 +01:00
|
|
|
}
|
|
|
|
}
|
2010-12-13 22:22:30 +01:00
|
|
|
|
2020-03-13 16:13:32 +01:00
|
|
|
void multires_ensure_external_read(struct Mesh *mesh, int top_level)
|
|
|
|
{
|
|
|
|
if (!CustomData_external_test(&mesh->ldata, CD_MDISPS)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
MDisps *mdisps = CustomData_get_layer(&mesh->ldata, CD_MDISPS);
|
|
|
|
if (mdisps == NULL) {
|
Attributes: Improve custom data initialization options
When allocating new `CustomData` layers, often we do redundant
initialization of arrays. For example, it's common that values are
allocated, set to their default value, and then set to some other
value. This is wasteful, and it negates the benefits of optimizations
to the allocator like D15082. There are two reasons for this. The
first is array-of-structs storage that makes it annoying to initialize
values manually, and the second is confusing options in the Custom Data
API. This patch addresses the latter.
The `CustomData` "alloc type" options are rearranged. Now, besides
the options that use existing layers, there are two remaining:
* `CD_SET_DEFAULT` sets the default value.
* Usually zeroes, but for colors this is white (how it was before).
* Should be used when you add the layer but don't set all values.
* `CD_CONSTRUCT` refers to the "default construct" C++ term.
* Only necessary or defined for non-trivial types like vertex groups.
* Doesn't do anything for trivial types like `int` or `float3`.
* Should be used every other time, when all values will be set.
The attribute API's `AttributeInit` types are updated as well.
To update code, replace `CD_CALLOC` with `CD_SET_DEFAULT` and
`CD_DEFAULT` with `CD_CONSTRUCT`. This doesn't cause any functional
changes yet. Follow-up commits will change to avoid initializing
new layers where the correctness is clear.
Differential Revision: https://developer.blender.org/D15617
2022-08-30 21:54:53 +02:00
|
|
|
mdisps = CustomData_add_layer(&mesh->ldata, CD_MDISPS, CD_SET_DEFAULT, NULL, mesh->totloop);
|
2020-03-13 16:13:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
const int totloop = mesh->totloop;
|
|
|
|
|
|
|
|
for (int i = 0; i < totloop; ++i) {
|
|
|
|
if (mdisps[i].level != top_level) {
|
|
|
|
MEM_SAFE_FREE(mdisps[i].disps);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* NOTE: CustomData_external_read will take care of allocation of displacement vectors if
|
|
|
|
* they are missing. */
|
|
|
|
|
|
|
|
const int totdisp = multires_grid_tot[top_level];
|
|
|
|
mdisps[i].totdisp = totdisp;
|
|
|
|
mdisps[i].level = top_level;
|
|
|
|
}
|
|
|
|
|
|
|
|
CustomData_external_read(&mesh->ldata, &mesh->id, CD_MASK_MDISPS, mesh->totloop);
|
|
|
|
}
|
|
|
|
void multiresModifier_ensure_external_read(struct Mesh *mesh, const MultiresModifierData *mmd)
|
|
|
|
{
|
|
|
|
multires_ensure_external_read(mesh, mmd->totlvl);
|
|
|
|
}
|
|
|
|
|
2011-01-02 17:43:28 +01:00
|
|
|
/***************** Multires interpolation stuff *****************/
|
|
|
|
|
2015-07-30 14:43:58 +02:00
|
|
|
int mdisp_rot_face_to_crn(struct MVert *UNUSED(mvert),
|
|
|
|
struct MPoly *mpoly,
|
|
|
|
struct MLoop *UNUSED(mloop),
|
|
|
|
const struct MLoopTri *UNUSED(lt),
|
|
|
|
const int face_side,
|
|
|
|
const float u,
|
|
|
|
const float v,
|
|
|
|
float *x,
|
|
|
|
float *y)
|
2011-01-02 17:43:28 +01:00
|
|
|
{
|
2012-05-06 19:22:54 +02:00
|
|
|
const float offset = face_side * 0.5f - 0.5f;
|
2011-01-05 16:58:44 +01:00
|
|
|
int S = 0;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2015-07-30 14:43:58 +02:00
|
|
|
if (mpoly->totloop == 4) {
|
2019-04-22 01:39:35 +02:00
|
|
|
if (u <= offset && v <= offset) {
|
2012-02-23 03:17:50 +01:00
|
|
|
S = 0;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
else if (u > offset && v <= offset) {
|
2012-02-23 03:17:50 +01:00
|
|
|
S = 1;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
else if (u > offset && v > offset) {
|
2012-02-23 03:17:50 +01:00
|
|
|
S = 2;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
|
|
|
else if (u <= offset && v >= offset) {
|
2012-05-06 19:22:54 +02:00
|
|
|
S = 3;
|
2019-04-22 01:39:35 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-02-23 03:17:50 +01:00
|
|
|
if (S == 0) {
|
2011-06-05 22:54:04 +02:00
|
|
|
*y = offset - u;
|
|
|
|
*x = offset - v;
|
2012-02-23 03:17:50 +01:00
|
|
|
}
|
|
|
|
else if (S == 1) {
|
2011-06-05 22:54:04 +02:00
|
|
|
*x = u - offset;
|
|
|
|
*y = offset - v;
|
2012-02-23 03:17:50 +01:00
|
|
|
}
|
|
|
|
else if (S == 2) {
|
2011-06-05 22:54:04 +02:00
|
|
|
*y = u - offset;
|
|
|
|
*x = v - offset;
|
2012-02-23 03:17:50 +01:00
|
|
|
}
|
|
|
|
else if (S == 3) {
|
2012-05-06 19:22:54 +02:00
|
|
|
*x = offset - u;
|
2011-06-05 22:54:04 +02:00
|
|
|
*y = v - offset;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2011-06-05 22:54:04 +02:00
|
|
|
}
|
2015-07-30 14:43:58 +02:00
|
|
|
else if (mpoly->totloop == 3) {
|
2011-06-05 22:54:04 +02:00
|
|
|
int grid_size = offset;
|
|
|
|
float w = (face_side - 1) - u - v;
|
|
|
|
float W1, W2;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-05-06 19:22:54 +02:00
|
|
|
if (u >= v && u >= w) {
|
|
|
|
S = 0;
|
|
|
|
W1 = w;
|
|
|
|
W2 = v;
|
|
|
|
}
|
|
|
|
else if (v >= u && v >= w) {
|
|
|
|
S = 1;
|
|
|
|
W1 = u;
|
|
|
|
W2 = w;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
S = 2;
|
|
|
|
W1 = v;
|
|
|
|
W2 = u;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
2012-05-06 19:22:54 +02:00
|
|
|
W1 /= (face_side - 1);
|
|
|
|
W2 /= (face_side - 1);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-05-06 19:22:54 +02:00
|
|
|
*x = (1 - (2 * W1) / (1 - W2)) * grid_size;
|
|
|
|
*y = (1 - (2 * W2) / (1 - W1)) * grid_size;
|
2011-06-05 22:54:04 +02:00
|
|
|
}
|
2015-07-30 14:43:58 +02:00
|
|
|
else {
|
|
|
|
/* the complicated ngon case: find the actual coordinate from
|
|
|
|
* the barycentric coordinates and finally find the closest vertex
|
|
|
|
* should work reliably for convex cases only but better than nothing */
|
|
|
|
|
2018-10-19 00:07:40 +02:00
|
|
|
#if 0
|
|
|
|
int minS, i;
|
|
|
|
float mindist = FLT_MAX;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-10-19 00:07:40 +02:00
|
|
|
for (i = 0; i < mpoly->totloop; i++) {
|
|
|
|
float len = len_v3v3(NULL, mvert[mloop[mpoly->loopstart + i].v].co);
|
|
|
|
if (len < mindist) {
|
|
|
|
mindist = len;
|
|
|
|
minS = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
S = minS;
|
|
|
|
#endif
|
2015-07-30 14:43:58 +02:00
|
|
|
/* temp not implemented yet and also not working properly in current master.
|
|
|
|
* (was worked around by subdividing once) */
|
|
|
|
S = 0;
|
|
|
|
*x = 0;
|
|
|
|
*y = 0;
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2011-06-05 22:54:04 +02:00
|
|
|
return S;
|
|
|
|
}
|