Cleanup: spelling in comments

This commit is contained in:
Campbell Barton 2024-03-21 10:01:43 +11:00
parent 116264c310
commit 57dd9c21d3
21 changed files with 38 additions and 36 deletions

View File

@ -119,7 +119,7 @@ BVHMetal::BVHMetal(const BVHParams &params_,
BVHMetal::~BVHMetal()
{
/* Clear point used by enqueuing. */
/* Clear point used by enqueueing. */
device->release_bvh(this);
if (@available(macos 12.0, *)) {

View File

@ -2678,7 +2678,7 @@ class VIEW3D_MT_image_add(Menu):
def draw(self, _context):
layout = self.layout
# Expliclitly set background mode on/off as operator will try to
# Explicitly set background mode on/off as operator will try to
# auto detect which mode to use otherwise.
layout.operator("object.empty_image_add", text="Reference", icon='IMAGE_REFERENCE').background = False
layout.operator("object.empty_image_add", text="Background", icon='IMAGE_BACKGROUND').background = True

View File

@ -298,7 +298,7 @@ void BKE_lattice_resize(Lattice *lt, int uNew, int vNew, int wNew, Object *ltOb)
calc_lat_fudu(lt->flag, wNew, &fw, &dw);
/* If old size is different than resolution changed in interface,
* try to do clever reinit of points. Pretty simply idea, we just
* try to do clever reinitialize of points. Pretty simply idea, we just
* deform new verts by old lattice, but scaling them to match old
* size first.
*/

View File

@ -250,7 +250,7 @@ template<typename Function> inline void isolate_task(const Function &function)
/**
* Should surround parallel code that is highly bandwidth intensive, e.g. it just fills a buffer
* with no or just few additional operations. If the buffers are large, it's benefitial to limit
* with no or just few additional operations. If the buffers are large, it's beneficial to limit
* the number of threads doing the work because that just creates more overhead on the hardware
* level and doesn't provide a notable performance benefit beyond a certain point.
*/
@ -261,7 +261,7 @@ inline void memory_bandwidth_bound_task(const int64_t approximate_bytes_touched,
/* Don't limit threading when all touched memory can stay in the CPU cache, because there a much
* higher memory bandwidth is available compared to accessing RAM. This value is supposed to be
* on the order of the L3 cache size. Accessing that value is not quite straight forward and even
* if it was, it's not clear if using the exact cache size would be benefitial because there is
* if it was, it's not clear if using the exact cache size would be beneficial because there is
* often more stuff going on on the CPU at the same time. */
if (approximate_bytes_touched <= 8 * 1024 * 1024) {
function();

View File

@ -467,10 +467,11 @@ static void evaluate_coarse_difference(const Span<DifferenceCourseBoundary> boun
* #CoarseSegment::Type::Unknown. Those segments can be evaluated in more detail afterwards.
*
* \param root_expression: Expression to be evaluated.
* \param eval_order: Pre-computed evaluation order. All children of a term must come before
* the term itself.
* \param eval_bounds: If given, the evaluation is restriced to those bounds. Otherwise, the full
* referenced masks are used.
* \param eval_order: Pre-computed evaluation order.
* All children of a term must come before the term itself.
* \param eval_bounds: If given, the evaluation is restricted to those bounds.
* Otherwise, the full
* referenced masks are used.
*/
static CoarseResult evaluate_coarse(const Expr &root_expression,
const Span<const Expr *> eval_order,
@ -1148,7 +1149,7 @@ static void evaluate_short_unknown_segments_exactly(
}
case ExactEvalMode::Indices: {
/* #evaluate_exact_with_indices requires that all index masks have a single segment in the
* provided bounds. So split up the range into subranges first if necessary. */
* provided bounds. So split up the range into sub-ranges first if necessary. */
Vector<int64_t, 16> split_indices;
/* Always adding the beginning and end of the bounds simplifies the code below. */
split_indices.extend({bounds.first(), bounds.one_after_last()});

View File

@ -4800,8 +4800,9 @@ void lookat_m4(
i_multmatrix(mat1, mat);
mat1[1][1] = mat1[2][2] = 1.0f; /* be careful here to reinit */
mat1[1][2] = mat1[2][1] = 0.0f; /* those modified by the last */
/* Be careful here to reinitialize those modified by the last. */
mat1[1][1] = mat1[2][2] = 1.0f;
mat1[1][2] = mat1[2][1] = 0.0f;
/* paragraph */
if (hyp != 0.0f) { /* rotate Y */

View File

@ -228,7 +228,7 @@ void memory_bandwidth_bound_task_impl(const FunctionRef<void()> function)
#ifdef WITH_TBB
/* This is the maximum number of threads that may perform these memory bandwidth bound tasks at
* the same time. Often fewer threads are already enough to use up the full bandwidth capacity.
* Additional threads usually have a negilible benefit and can even make performance worse.
* Additional threads usually have a negligible benefit and can even make performance worse.
*
* It's better to use fewer threads here so that the CPU cores can do other tasks at the same
* time which may be more compute intensive. */

View File

@ -314,7 +314,7 @@ static void area_add_window_regions(ScrArea *area, SpaceLink *sl, ListBase *lb)
case SPACE_ACTION: {
SpaceAction *saction = (SpaceAction *)sl;
/* We totally reinit the view for the Action Editor,
/* We totally reinitialize the view for the Action Editor,
* as some old instances had some weird cruft set. */
region->v2d.tot.xmin = -20.0f;
region->v2d.tot.ymin = float(-area->winy) / 3.0f;

View File

@ -28,7 +28,7 @@ float octahedral_texel_solid_angle(ivec2 local_texel,
/* Do not weight these border pixels that are redundant. */
return 0.0;
}
/* Since we are puting texel centers on the edges of the octahedron, the shape of a texel can be
/* Since we are pouting texel centers on the edges of the octahedron, the shape of a texel can be
* anything from a simple quad (at the Z=0 poles), to a 4 pointed start (at the Z=+-1 poles)
* passing by arrow tail shapes (at the X=0 and Y=0 edges). So while it would be more correct to
* account for all these shapes (using 8 triangles), it proves to be quite involved with all the
@ -54,7 +54,7 @@ float octahedral_texel_solid_angle(ivec2 local_texel,
v02 = normalize(v02);
v12 = normalize(v12);
v22 = normalize(v22);
#if 0 /* Has artifacts, is marginaly more correct. */
#if 0 /* Has artifacts, is marginally more correct. */
/* For some reason quad_solid_angle(v10, v20, v11, v21) gives some strange artifacts at Z=0. */
return 0.25 * (quad_solid_angle(v00, v10, v01, v11) + quad_solid_angle(v10, v20, v11, v21) +
quad_solid_angle(v01, v11, v02, v12) + quad_solid_angle(v11, v21, v12, v22));
@ -132,7 +132,7 @@ void main()
/* TODO(fclem): Cleanup: Should spherical_harmonics_encode_signal_sample return a new sh
* instead of adding to it? */
spherical_harmonics_encode_signal_sample(L, local_radiance[0], sh);
/* Outputs one SH for each threadgroup. */
/* Outputs one SH for each thread-group. */
uint work_group_index = gl_NumWorkGroups.x * gl_WorkGroupID.y + gl_WorkGroupID.x;
out_sh[work_group_index].L0_M0 = sh.L0.M0;
out_sh[work_group_index].L1_Mn1 = sh.L1.Mn1;

View File

@ -74,7 +74,7 @@ SphericalHarmonicL1 volume_phase_function_as_sh_L1(vec3 V, float g)
/* Compute rotated zonal harmonic.
* From Bartlomiej Wronsky
* "Volumetric Fog: Unified compute shader based solution to atmospheric scattering" page 55
* Siggraph 2014
* SIGGRAPH 2014
* https://bartwronski.files.wordpress.com/2014/08/bwronski_volumetric_fog_siggraph2014.pdf
*/
SphericalHarmonicL1 sh;

View File

@ -214,7 +214,7 @@ class AbstractTreeViewItem : public AbstractViewItem, public TreeViewItemContain
*/
virtual bool set_collapsed(bool collapsed);
/**
* Make this item be uncollapsed on first draw (may later be overriden by
* Make this item be uncollapsed on first draw (may later be overridden by
* #should_be_collapsed()). Must only be done during tree building.
*
* \note this does not call #on_collapse_change() or #set_collapsed() overrides.

View File

@ -178,8 +178,8 @@ class LayerViewItem : public AbstractTreeViewItem {
bool supports_collapsing() const override
{
/* This is a bit redundant since `LayerViewItem` can't have children. But being expplicit might
* catch errors. */
/* This is a bit redundant since `LayerViewItem` can't have children.
* But being explicit might catch errors. */
return false;
}

View File

@ -342,7 +342,7 @@ static void update_affected_nodes_by_clip_planes(GestureData &gesture_data)
case SelectionType::Outside:
/* Certain degenerate cases of a lasso shape can cause the resulting
* frustum planes to enclose a node's AABB, therefore we must submit it
* to be more throughly evaluated. */
* to be more thoroughly evaluated. */
if (gesture_data.shape_type == ShapeType::Lasso) {
return true;
}

View File

@ -1478,7 +1478,7 @@ bool calculateTransformCenter(bContext *C, int centerMode, float cent3d[3], floa
}
}
/* Aftertrans does insert keyframes, and clears base flags; doesn't read transdata. */
/* Does insert keyframes, and clears base flags; doesn't read `transdata`. */
special_aftertrans_update(C, t);
postTrans(C, t);
@ -2221,7 +2221,7 @@ int transformEnd(bContext *C, TransInfo *t)
exit_code = OPERATOR_FINISHED;
}
/* Aftertrans does insert keyframes, and clears base flags; doesn't read transdata. */
/* Does insert keyframes, and clears base flags; doesn't read `transdata`. */
special_aftertrans_update(C, t);
/* Free data, also handles overlap [in freeTransCustomData()]. */

View File

@ -15,7 +15,7 @@ namespace blender::geometry::boolean {
/** Specifies which solver to use. */
enum class Solver {
/**
* The exact solver based on the Mesh Arrangments for Solid Geometry paper,
* The exact solver based on the Mesh Arrangements for Solid Geometry paper,
* by Zhou, Grinspun, Zorin, and Jacobson.
*/
MeshArr = 0,
@ -54,9 +54,9 @@ struct BooleanOpParameters {
* If there are more than two meshes, the first mesh is operand 0 and the rest of the
* meshes are operand 1 (i.e., as if all of operands 1, ... are joined into one mesh.
* The exact solvers assume that the meshes are PWN (piecewise winding number,
* which approximately means that the meshes are enclosed watertight voluems,
* which approximately means that the meshes are enclosed watertight volumes,
* and all edges are manifold, though there are allowable exceptions to that last condition).
* If the meshes don't sastisfy those conditions, all solvers will try to use ray-shooting
* If the meshes don't satisfy those conditions, all solvers will try to use ray-shooting
* to determine whether particular faces survive or not. This may or may not work
* in the way the user hopes.
*

View File

@ -1000,7 +1000,7 @@ static BMesh *mesh_bm_concat(Span<const Mesh *> meshes,
*r_looptris = looptris;
*r_looptris_tot = looptris_tot;
/* Tranform the vertices that into the desired target_transform space. */
/* Transform the vertices that into the desired target_transform space. */
BMIter iter;
BMVert *eve;
int i = 0;
@ -1131,7 +1131,7 @@ static Mesh *mesh_boolean_float(Span<const Mesh *> meshes,
BM_mesh_free(bm);
if (prev_result_mesh != nullptr) {
/* Except in the first iteration, two_meshes[0] holds the intermediate
* mesh result from the previous iteraiton. */
* mesh result from the previous iteration. */
BKE_mesh_eval_delete(prev_result_mesh);
}
if (i < meshes.size() - 2) {

View File

@ -1691,7 +1691,7 @@ void gpu::MTLTexture::read_internal(int mip,
/** Determine source read texture handle. */
id<MTLTexture> read_texture = texture_;
/* Use textureview handle if reading from a GPU texture view. */
/* Use texture-view handle if reading from a GPU texture view. */
if (resource_mode_ == MTL_TEXTURE_MODE_TEXTURE_VIEW) {
read_texture = this->get_metal_handle();
}

View File

@ -174,7 +174,7 @@ void GLVaoCache::clear()
if (context_) {
context_->vao_cache_unregister(this);
}
/* Reinit. */
/* Reinitialize. */
this->init();
}

View File

@ -673,7 +673,7 @@ void colormanagement_init()
}
}
/* Then try bunded config file. */
/* Then try bundled configuration file. */
if (config == nullptr) {
const std::optional<std::string> configdir = BKE_appdir_folder_id(BLENDER_DATAFILES,
"colormanagement");

View File

@ -1610,7 +1610,7 @@ static StructRNA *rna_Operator_register(Main *bmain,
}
/* XXX, this doubles up with the operator name #29666.
* for now just remove from dir(bpy.types) */
* for now just remove from `dir(bpy.types)`. */
/* create a new operator type */
dummy_ot.rna_ext.srna = RNA_def_struct_ptr(&BLENDER_RNA, dummy_ot.idname, &RNA_Operator);
@ -1783,7 +1783,7 @@ static StructRNA *rna_MacroOperator_register(Main *bmain,
}
/* XXX, this doubles up with the operator name #29666.
* for now just remove from dir(bpy.types) */
* for now just remove from `dir(bpy.types)`. */
/* create a new operator type */
dummy_ot.rna_ext.srna = RNA_def_struct_ptr(&BLENDER_RNA, dummy_ot.idname, &RNA_Operator);

View File

@ -146,7 +146,7 @@ static void sig_handle_crash(int signum)
fclose(fp);
}
/* Delete content of temp dir! */
/* Delete content of temp directory. */
BKE_tempdir_session_purge();
/* Really crash. */
@ -186,7 +186,7 @@ extern LONG WINAPI windows_exception_handler(EXCEPTION_POINTERS *ExceptionInfo)
static void sig_handle_abort(int /*signum*/)
{
/* Delete content of temp dir! */
/* Delete content of temp directory. */
BKE_tempdir_session_purge();
}