Cleanup: spelling in comments

This commit is contained in:
Campbell Barton 2023-10-06 12:50:27 +11:00
parent 9a5f5ca6a9
commit 71b1712d79
9 changed files with 18 additions and 19 deletions

View File

@ -4387,8 +4387,8 @@ static ImBuf *image_get_render_result(Image *ima, ImageUser *iuser, void **r_loc
/* Put an empty image buffer to the cache. This allows to achieve the following: /* Put an empty image buffer to the cache. This allows to achieve the following:
* *
* 1. It makes it so the generic logic in the #BKE_image_has_loaded_ibuf proeprly detects that * 1. It makes it so the generic logic in the #BKE_image_has_loaded_ibuf properly detects that
* an Image used to dusplay render result has loaded image buffer. * an Image used to display render result has loaded image buffer.
* *
* Surely there are all the design questions about scene-dependent Render Result image * Surely there are all the design questions about scene-dependent Render Result image
* data-block, and the behavior of the flag dependent on whether the Render Result image was ever * data-block, and the behavior of the flag dependent on whether the Render Result image was ever

View File

@ -201,11 +201,11 @@ void ShadowPipeline::sync()
* But that requires that the destination pages in the atlas would have been already cleared * But that requires that the destination pages in the atlas would have been already cleared
* using compute. Experiments showed that it is faster to just copy the whole tiles back. * using compute. Experiments showed that it is faster to just copy the whole tiles back.
* *
* For relative perf, raster-based clear within tile update adds around 0.1ms vs 0.25ms for * For relative performance, raster-based clear within tile update adds around 0.1ms vs 0.25ms
* compute based clear for a simple test case. */ * for compute based clear for a simple test case. */
pass.state_set(DRW_STATE_DEPTH_ALWAYS); pass.state_set(DRW_STATE_DEPTH_ALWAYS);
/* Metal have implicit sync with Raster Order Groups. Other backend need to have manual /* Metal have implicit sync with Raster Order Groups. Other backend need to have manual
* sub-pass transition to allow reading the framebuffer. This is a no-op on Metal. */ * sub-pass transition to allow reading the frame-buffer. This is a no-op on Metal. */
pass.subpass_transition(GPU_ATTACHEMENT_WRITE, {GPU_ATTACHEMENT_READ}); pass.subpass_transition(GPU_ATTACHEMENT_WRITE, {GPU_ATTACHEMENT_READ});
pass.bind_image(SHADOW_ATLAS_IMG_SLOT, inst_.shadows.atlas_tx_); pass.bind_image(SHADOW_ATLAS_IMG_SLOT, inst_.shadows.atlas_tx_);
pass.bind_ssbo("dst_coord_buf", inst_.shadows.dst_coord_buf_); pass.bind_ssbo("dst_coord_buf", inst_.shadows.dst_coord_buf_);

View File

@ -109,7 +109,7 @@ class ShadowPipeline {
/* Shadow update pass. */ /* Shadow update pass. */
PassMain render_ps_ = {"Shadow.Surface"}; PassMain render_ps_ = {"Shadow.Surface"};
/* Shadow surface render subpass. */ /* Shadow surface render sub-pass. */
PassMain::Sub *surface_ps_ = nullptr; PassMain::Sub *surface_ps_ = nullptr;
public: public:

View File

@ -5,7 +5,7 @@
* On TBDR, we can use a three-pass method to perform virtual shadow map updates, leveraging * On TBDR, we can use a three-pass method to perform virtual shadow map updates, leveraging
* efficient use of tile-based GPUs. Shadow updates rasterize geometry for each view in much the * efficient use of tile-based GPUs. Shadow updates rasterize geometry for each view in much the
* same way as a conventional shadow map render, but for the standard path, there is an additional * same way as a conventional shadow map render, but for the standard path, there is an additional
* cost of an atomic-min abd store to allow for indirection into the atlas. This setup can lead to * cost of an atomic-min and store to allow for indirection into the atlas. This setup can lead to
* excessive overdraw, rasterization and increased complexity in the material depth fragment * excessive overdraw, rasterization and increased complexity in the material depth fragment
* shader, reducing rendering performance. * shader, reducing rendering performance.
* *
@ -49,10 +49,10 @@ void main()
{ {
/* For storing pass, we store the result from depth in tile memory. */ /* For storing pass, we store the result from depth in tile memory. */
uint u_depth = floatBitsToUint(in_tile_depth); uint u_depth = floatBitsToUint(in_tile_depth);
/* Quantization bias. Equivalent to nextafter in C without all the safety. 1 is not enough. */ /* Quantization bias. Equivalent to `nextafter` in C without all the safety. 1 is not enough. */
u_depth += 2; u_depth += 2;
/* Write result to altas. */ /* Write result to atlas. */
# ifdef GPU_METAL # ifdef GPU_METAL
/* NOTE: Use the fastest possible write function without any parameter wrapping or conversion.*/ /* NOTE: Use the fastest possible write function without any parameter wrapping or conversion.*/
shadow_atlas_img.texture->write(u_depth, ushort2(out_texel_xy), out_page_z); shadow_atlas_img.texture->write(u_depth, ushort2(out_texel_xy), out_page_z);

View File

@ -33,6 +33,6 @@ void main()
* Non-updated tile depth will remain at 0.0 to ensure fragments are discarded. */ * Non-updated tile depth will remain at 0.0 to ensure fragments are discarded. */
gl_Position = vec4(ndc_pos.x, ndc_pos.y, 1.0, 1.0); gl_Position = vec4(ndc_pos.x, ndc_pos.y, 1.0, 1.0);
gpu_Layer = int(src_page_co.z); gpu_Layer = int(src_page_co.z);
/* Assumes last viewport will always cover the whole framebuffer. */ /* Assumes last viewport will always cover the whole frame-buffer. */
gpu_ViewportIndex = 15; gpu_ViewportIndex = 15;
} }

View File

@ -164,7 +164,7 @@ void main()
uint page_index = atomicAdd(clear_dispatch_buf.num_groups_z, 1u); uint page_index = atomicAdd(clear_dispatch_buf.num_groups_z, 1u);
/* Add page to tile processing. */ /* Add page to tile processing. */
atomicAdd(tile_draw_buf.vertex_len, 6u); atomicAdd(tile_draw_buf.vertex_len, 6u);
/* Add page mapping for indexing the page position in atlas and in the framebuffer. */ /* Add page mapping for indexing the page position in atlas and in the frame-buffer. */
dst_coord_buf[page_index] = page_packed; dst_coord_buf[page_index] = page_packed;
src_coord_buf[page_index] = packUvec4x8( src_coord_buf[page_index] = packUvec4x8(
uvec4(relative_tile_co.x, relative_tile_co.y, view_index, 0)); uvec4(relative_tile_co.x, relative_tile_co.y, view_index, 0));

View File

@ -46,8 +46,8 @@ void main()
#ifdef MAT_CLIP_PLANE #ifdef MAT_CLIP_PLANE
/* Do not use hardware clip planes as they modify the rasterization (some GPUs add vertices). /* Do not use hardware clip planes as they modify the rasterization (some GPUs add vertices).
* This would in turn create a discrepency between the prepass depth and the gbuffer depth which * This would in turn create a discrepancy between the pre-pass depth and the G-buffer depth
* exhibits missing pixels data. */ * which exhibits missing pixels data. */
if (clip_interp.clip_distance > 0.0) { if (clip_interp.clip_distance > 0.0) {
discard; discard;
return; return;

View File

@ -220,7 +220,7 @@ GPU_SHADER_CREATE_INFO(eevee_shadow_page_tile_clear)
.fragment_out(0, Type::FLOAT, "out_tile_depth", DualBlend::NONE, SHADOW_ROG_ID); .fragment_out(0, Type::FLOAT, "out_tile_depth", DualBlend::NONE, SHADOW_ROG_ID);
#ifdef APPLE #ifdef APPLE
/* Metal supports USHORT which saves a bit of perf here. */ /* Metal supports USHORT which saves a bit of performance here. */
# define PAGE_Z_TYPE Type::USHORT # define PAGE_Z_TYPE Type::USHORT
#else #else
# define PAGE_Z_TYPE Type::UINT # define PAGE_Z_TYPE Type::UINT

View File

@ -306,13 +306,12 @@ void GLFrameBuffer::attachment_set_loadstore_op(GPUAttachmentType type, GPULoadS
/* TODO(fclem): Add support for other ops. */ /* TODO(fclem): Add support for other ops. */
if (ls.load_action == eGPULoadOp::GPU_LOADACTION_CLEAR) { if (ls.load_action == eGPULoadOp::GPU_LOADACTION_CLEAR) {
if (tmp_detached_[type].tex != nullptr) { if (tmp_detached_[type].tex != nullptr) {
/* GPULoadStore is used to define the framebuffer before it is used for rendering. /* #GPULoadStore is used to define the frame-buffer before it is used for rendering.
* Binding back unattached attachment makes its state undefined. This is described by the * Binding back unattached attachment makes its state undefined. This is described by the
* documentation and the userland code should specify a sub-pass at the start of the drawing * documentation and the user-land code should specify a sub-pass at the start of the drawing
* to explicitly set attachment state. * to explicitly set attachment state. */
*/
if (GLContext::framebuffer_fetch_support) { if (GLContext::framebuffer_fetch_support) {
/* Noop. */ /* NOOP. */
} }
else if (GLContext::texture_barrier_support) { else if (GLContext::texture_barrier_support) {
/* Reset default attachment state. */ /* Reset default attachment state. */