Cleanup: spelling in comments

This commit is contained in:
Campbell Barton 2023-04-12 11:24:10 +10:00
parent 5df8e35da7
commit ccea39b538
13 changed files with 70 additions and 66 deletions

View File

@ -244,7 +244,7 @@ static void export_pointcloud_motion(PointCloud *pointcloud,
const int num_points = pointcloud->num_points();
/* Point cloud attributes are stored as float4 with the radius in the w element.
* This is explict now as float3 is no longer interchangeable with float4 as it
* This is explicit now as float3 is no longer interchangeable with float4 as it
* is packed now. */
float4 *mP = attr_mP->data_float4() + motion_step * num_points;
bool have_motion = false;

View File

@ -1167,8 +1167,8 @@ BVHNode *BVHBuild::create_leaf_node(const BVHRange &range, const vector<BVHRefer
void BVHBuild::rotate(BVHNode *node, int max_depth, int iterations)
{
/* in tested scenes, this resulted in slightly slower raytracing, so disabled
* it for now. could be implementation bug, or depend on the scene */
/* In tested scenes, this resulted in slightly slower ray-tracing, so disabled
* it for now. could be implementation bug, or depend on the scene. */
if (node)
for (int i = 0; i < iterations; i++)
rotate(node, max_depth);

View File

@ -181,7 +181,7 @@ string OptiXDevice::compile_kernel_get_common_cflags(const uint kernel_features)
/* Add OptiX SDK include directory to include paths. */
common_cflags += string_printf(" -I\"%s\"", get_optix_include_dir().c_str());
/* Specialization for shader raytracing. */
/* Specialization for shader ray-tracing. */
if (kernel_features & KERNEL_FEATURE_NODE_RAYTRACE) {
common_cflags += " --keep-device-functions";
}
@ -483,7 +483,7 @@ bool OptiXDevice::load_kernels(const uint kernel_features)
group_descs[PG_HITL].hitgroup.entryFunctionNameAH = "__anyhit__kernel_optix_local_hit";
}
/* Shader raytracing replaces some functions with direct callables. */
/* Shader ray-tracing replaces some functions with direct callables. */
if (kernel_features & KERNEL_FEATURE_NODE_RAYTRACE) {
group_descs[PG_RGEN_SHADE_SURFACE_RAYTRACE].kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
group_descs[PG_RGEN_SHADE_SURFACE_RAYTRACE].raygen.module = optix_module;
@ -584,7 +584,7 @@ bool OptiXDevice::load_kernels(const uint kernel_features)
load_osl_kernels();
}
else if (kernel_features & (KERNEL_FEATURE_NODE_RAYTRACE | KERNEL_FEATURE_MNEE)) {
/* Create shader raytracing and MNEE pipeline. */
/* Create shader ray-tracing and MNEE pipeline. */
vector<OptixProgramGroup> pipeline_groups;
pipeline_groups.reserve(NUM_PROGRAM_GROUPS);
if (kernel_features & KERNEL_FEATURE_NODE_RAYTRACE) {

View File

@ -834,7 +834,7 @@ enum ShaderDataFlag {
SD_NEED_VOLUME_ATTRIBUTES = (1 << 28),
/* Shader has emission */
SD_HAS_EMISSION = (1 << 29),
/* Shader has raytracing */
/* Shader has ray-tracing. */
SD_HAS_RAYTRACE = (1 << 30),
/* Use back side for direct light sampling. */
SD_MIS_BACK = (1 << 31),

View File

@ -2157,7 +2157,7 @@ char *GHOST_SystemX11::getClipboard(bool selection) const
Atom target = m_atom.UTF8_STRING;
Window owner;
/* from xclip.c doOut() v0.11 */
/* From `xclip.c` `doOut()` v0.11. */
char *sel_buf;
ulong sel_len = 0;
XEvent evt;
@ -2614,13 +2614,13 @@ static bool match_token(const char *haystack, const char *needle)
/* Determining if an X device is a Tablet style device is an imperfect science.
* We rely on common conventions around device names as well as the type reported
* by Wacom tablets. This code will likely need to be expanded for alternate tablet types
* by WACOM tablets. This code will likely need to be expanded for alternate tablet types
*
* Wintab refers to any device that interacts with the tablet as a cursor,
* WINTAB refers to any device that interacts with the tablet as a cursor,
* (stylus, eraser, tablet mouse, airbrush, etc)
* this is not to be confused with wacom x11 configuration "cursor" device.
* Wacoms x11 config "cursor" refers to its device slot (which we mirror with
* our gSysCursors) for puck like devices (tablet mice essentially).
* this is not to be confused with WACOM X11 configuration "cursor" device.
* WACOM tablets X11 configuration "cursor" refers to its device slot (which we mirror with
* our `gSysCursors`) for puck like devices (tablet mice essentially).
*/
static GHOST_TTabletMode tablet_mode_from_name(const char *name, const char *type)
{

View File

@ -524,7 +524,8 @@ typedef struct SculptAttribute {
/* Sculpt usage */
SculptAttributeParams params;
/* Used to keep track of which preallocated SculptAttribute instances
/**
* Used to keep track of which pre-allocated SculptAttribute instances
* inside of SculptSession.temp_attribute are used.
*/
bool used;

View File

@ -119,14 +119,14 @@ typedef struct PTCacheID {
unsigned int default_step;
unsigned int max_step;
/* flags defined in DNA_object_force_types.h */
/** flags defined in `DNA_object_force_types.h`. */
unsigned int data_types, info_types;
/* Copies point data to cache data. */
/** Copies point data to cache data. */
int (*write_point)(int index, void *calldata, void **data, int cfra);
/* Copies cache data to point data. */
/** Copies cache data to point data. */
void (*read_point)(int index, void *calldata, void **data, float cfra, const float *old_data);
/* Interpolated between previously read point data and cache data. */
/** Interpolated between previously read point data and cache data. */
void (*interpolate_point)(int index,
void *calldata,
void **data,
@ -135,32 +135,34 @@ typedef struct PTCacheID {
float cfra2,
const float *old_data);
/* copies point data to cache data */
/** Copies point data to cache data. */
int (*write_stream)(PTCacheFile *pf, void *calldata);
/* copies cache cata to point data */
/** Copies cache data to point data. */
int (*read_stream)(PTCacheFile *pf, void *calldata);
/* copies custom extradata to cache data */
/** Copies custom #PTCacheMem::extradata to cache data. */
void (*write_extra_data)(void *calldata, struct PTCacheMem *pm, int cfra);
/* copies custom extradata to cache data */
/** Copies custom #PTCacheMem::extradata to cache data. */
void (*read_extra_data)(void *calldata, struct PTCacheMem *pm, float cfra);
/* copies custom extradata to cache data */
/** Copies custom #PTCacheMem::extradata to cache data */
void (*interpolate_extra_data)(
void *calldata, struct PTCacheMem *pm, float cfra, float cfra1, float cfra2);
/* Total number of simulated points
* (the cfra parameter is just for using same function pointer with totwrite). */
/**
* Total number of simulated points
* (the `cfra` parameter is just for using same function pointer with `totwrite`).
*/
int (*totpoint)(void *calldata, int cfra);
/* report error if number of points does not match */
/** Report error if number of points does not match */
void (*error)(const struct ID *owner_id, void *calldata, const char *message);
/* number of points written for current cache frame */
/** Number of points written for current cache frame. */
int (*totwrite)(void *calldata, int cfra);
int (*write_header)(PTCacheFile *pf);
int (*read_header)(PTCacheFile *pf);
struct PointCache *cache;
/* used for setting the current cache from ptcaches list */
/** Used for setting the current cache from `ptcaches` list. */
struct PointCache **cache_ptr;
struct ListBase *ptcaches;
} PTCacheID;

View File

@ -139,7 +139,7 @@ static void mask_blend_read_data(BlendDataReader *reader, ID *id)
BLO_read_list(reader, &mask->masklayers);
LISTBASE_FOREACH (MaskLayer *, masklay, &mask->masklayers) {
/* can't use newdataadr since it's a pointer within an array */
/* Can't use #newdataadr since it's a pointer within an array. */
MaskSplinePoint *act_point_search = NULL;
BLO_read_list(reader, &masklay->splines);

View File

@ -37,13 +37,13 @@ struct MultiresDisplacementData {
const MultiresModifierData *mmd;
blender::OffsetIndices<int> polys;
const MDisps *mdisps;
/* Indexed by ptex face index, contains polygon/corner which corresponds
/* Indexed by PTEX face index, contains polygon/corner which corresponds
* to it.
*
* NOTE: For quad polygon this is an index of first corner only, since
* there we only have one ptex. */
* there we only have one PTEX. */
PolyCornerIndex *ptex_poly_corner;
/* Indexed by coarse face index, returns first ptex face index corresponding
/* Indexed by coarse face index, returns first PTEX face index corresponding
* to that coarse face. */
int *face_ptex_offset;
/* Sanity check, is used in debug builds.
@ -52,7 +52,7 @@ struct MultiresDisplacementData {
};
/* Denotes which grid to use to average value of the displacement read from the
* grid which corresponds to the ptex face. */
* grid which corresponds to the PTEX face. */
typedef enum eAverageWith {
AVERAGE_WITH_NONE,
AVERAGE_WITH_ALL,
@ -175,12 +175,12 @@ static void average_read_displacement_object(MultiresDisplacementData *data,
{
const PolyCornerIndex *poly_corner = &data->ptex_poly_corner[ptex_face_index];
const int num_corners = data->polys[poly_corner->poly_index].size();
/* Get (u, v) coordinate within the other ptex face which corresponds to
/* Get (u, v) coordinate within the other PTEX face which corresponds to
* the grid coordinates. */
float u, v;
average_convert_grid_coord_to_ptex(num_corners, corner_index, grid_u, grid_v, &u, &v);
/* Construct tangent matrix which corresponds to partial derivatives
* calculated for the other ptex face. */
* calculated for the other PTEX face. */
float tangent_matrix[3][3];
average_construct_tangent_matrix(
data->subdiv, num_corners, ptex_face_index, corner_index, u, v, tangent_matrix);
@ -208,7 +208,7 @@ static void average_get_other_ptex_and_corner(MultiresDisplacementData *data,
start_ptex_face_index + *r_other_corner_index;
}
/* NOTE: Grid coordinates are relatiev to the other grid already. */
/* NOTE: Grid coordinates are relative to the other grid already. */
static void average_with_other(SubdivDisplacement *displacement,
const int ptex_face_index,
const int corner,
@ -380,7 +380,7 @@ static void displacement_data_init_mapping(SubdivDisplacement *displacement, con
const int num_ptex_faces = count_num_ptex_faces(mesh);
/* Allocate memory. */
data->ptex_poly_corner = static_cast<PolyCornerIndex *>(
MEM_malloc_arrayN(num_ptex_faces, sizeof(*data->ptex_poly_corner), "ptex poly corner"));
MEM_malloc_arrayN(num_ptex_faces, sizeof(*data->ptex_poly_corner), "PTEX poly corner"));
/* Fill in offsets. */
int ptex_face_index = 0;
PolyCornerIndex *ptex_poly_corner = data->ptex_poly_corner;

View File

@ -172,7 +172,7 @@ class Operation {
void release_inputs();
/* Release the results that were allocated in the execute method but are not actually needed.
* This can be the case if the execute method allocated a dummy texture for an unndeeded result,
* This can be the case if the execute method allocated a dummy texture for an unneeded result,
* see the description of Result::allocate_texture() for more information. This is called after
* the evaluation of the operation. */
void release_unneeded_results();

View File

@ -41,7 +41,7 @@ class ShadingView {
/** Matrix to apply to the viewmat. */
const float4x4 &face_matrix_;
/** Raytracing persistent buffers. Only opaque and refraction can have surface tracing. */
/** Ray-tracing persistent buffers. Only opaque and refraction can have surface tracing. */
// RaytraceBuffer rt_buffer_opaque_;
// RaytraceBuffer rt_buffer_refract_;
DepthOfFieldBuffer dof_buffer_;

View File

@ -1714,7 +1714,7 @@ static int gpencil_strokes_paste_exec(bContext *C, wmOperator *op)
* doesn't exist already depending on REC button status.
*/
/* Multiframe paste. */
/* Multi-frame paste. */
if (is_multiedit) {
for (bGPDframe *gpf = init_gpf; gpf; gpf = gpf->next) {
/* Active frame is copied later, so don't need duplicate the stroke here. */

View File

@ -690,34 +690,34 @@ class GlareOperation : public NodeOperation {
* Fog Glow Glare.
* --------------- */
/* Fog glow is computed by first progressively half-downsampling the highlights down to a certain
* size, then progressively double-upsampling the last downsampled result up to the original size
* of the highlights, adding the downsampled result of the same size in each upsampling step.
* This can be illustrated as follows:
/* Fog glow is computed by first progressively half-down-sampling the highlights down to a
* certain size, then progressively double-up-sampling the last down-sampled result up to the
* original size of the highlights, adding the down-sampled result of the same size in each
* up-sampling step. This can be illustrated as follows:
*
* Highlights ---+---> Fog Glare
* Highlights ---+---> Fog Glare
* | |
* Downsampled ---+---> Upsampled
* Down-sampled ---+---> Up-sampled
* | |
* Downsampled ---+---> Upsampled
* Down-sampled ---+---> Up-sampled
* | |
* Downsampled ---+---> Upsampled
* Down-sampled ---+---> Up-sampled
* | ^
* ... |
* Downsampled ------------'
* Down-sampled ------------'
*
* The smooth downsampling followed by smooth upsampling can be thought of as a cheap way to
* approximate a large radius blur, and adding the corresponding downsampled result while
* upsampling is done to counter the attenuation that happens during downsampling.
* The smooth down-sampling followed by smooth up-sampling can be thought of as a cheap way to
* approximate a large radius blur, and adding the corresponding down-sampled result while
* up-sampling is done to counter the attenuation that happens during down-sampling.
*
* Smaller downsampled results contribute to larger glare size, so controlling the size can be
* done by stopping downsampling down to a certain size, where the maximum possible size is
* achieved when downsampling happens down to the smallest size of 2. */
* Smaller down-sampled results contribute to larger glare size, so controlling the size can be
* done by stopping down-sampling down to a certain size, where the maximum possible size is
* achieved when down-sampling happens down to the smallest size of 2. */
Result execute_fog_glow(Result &highlights_result)
{
/* The maximum possible glare size is achieved when we downsampled down to the smallest size of
* 2, which would result in a downsampling chain length of the binary logarithm of the smaller
* dimension of the size of the highlights.
/* The maximum possible glare size is achieved when we down-sampled down to the smallest size
* of 2, which would result in a down-sampling chain length of the binary logarithm of the
* smaller dimension of the size of the highlights.
*
* However, as users might want a smaller glare size, we reduce the chain length by the halving
* count supplied by the user. */
@ -729,7 +729,7 @@ class GlareOperation : public NodeOperation {
Array<Result> downsample_chain = compute_fog_glow_downsample_chain(highlights_result,
chain_length);
/* Notice that for a chain length of n, we need (n - 1) upsampling passes. */
/* Notice that for a chain length of n, we need (n - 1) up-sampling passes. */
const IndexRange upsample_passes_range(chain_length - 1);
GPUShader *shader = shader_manager().get("compositor_glare_fog_glow_upsample");
GPU_shader_bind(shader);
@ -754,11 +754,12 @@ class GlareOperation : public NodeOperation {
return downsample_chain[0];
}
/* Progressively downsample the given result into a result with half the size for the given chain
* length, returning an array containing the chain of downsampled results. The first result of
* the chain is the given result itself for easier handling. The chain length is expected not
* to exceed the binary logarithm of the smaller dimension of the given result, because that
* would result in downsampling passes that produce useless textures with just one pixel. */
/* Progressively down-sample the given result into a result with half the size for the given
* chain length, returning an array containing the chain of down-sampled results. The first
* result of the chain is the given result itself for easier handling. The chain length is
* expected not to exceed the binary logarithm of the smaller dimension of the given result,
* because that would result in down-sampling passes that produce useless textures with just
* one pixel. */
Array<Result> compute_fog_glow_downsample_chain(Result &highlights_result, int chain_length)
{
const Result downsampled_result = Result::Temporary(ResultType::Color, texture_pool());
@ -772,9 +773,9 @@ class GlareOperation : public NodeOperation {
GPUShader *shader;
for (const int i : downsample_passes_range) {
/* For the first downsample pass, we use a special "Karis" downsample pass that applies a
/* For the first down-sample pass, we use a special "Karis" down-sample pass that applies a
* form of local tone mapping to reduce the contributions of fireflies, see the shader for
* more information. Later passes use a simple average downsampling filter because fireflies
* more information. Later passes use a simple average down-sampling filter because fireflies
* doesn't service the first pass. */
if (i == downsample_passes_range.first()) {
shader = shader_manager().get("compositor_glare_fog_glow_downsample_karis_average");