Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCampbell Barton <campbell@blender.org>2022-09-06 09:25:20 +0300
committerCampbell Barton <campbell@blender.org>2022-09-06 09:25:20 +0300
commit6c6a53fad357ad63d8128c33da7a84f172ef0b63 (patch)
tree0ab3290bbc010af86719bec5a7bd37de75997d37 /source/blender/gpu
parent077ba5ac386f3cc75a67e01cdd75239b76c34de5 (diff)
Cleanup: spelling in comments, formatting, move comments into headers
Diffstat (limited to 'source/blender/gpu')
-rw-r--r--source/blender/gpu/GPU_index_buffer.h4
-rw-r--r--source/blender/gpu/GPU_shader_shared_utils.h2
-rw-r--r--source/blender/gpu/intern/gpu_codegen.cc2
-rw-r--r--source/blender/gpu/intern/gpu_index_buffer.cc4
-rw-r--r--source/blender/gpu/intern/gpu_shader_create_info.hh18
-rw-r--r--source/blender/gpu/metal/mtl_capabilities.hh2
-rw-r--r--source/blender/gpu/metal/mtl_command_buffer.mm8
-rw-r--r--source/blender/gpu/metal/mtl_context.hh18
-rw-r--r--source/blender/gpu/metal/mtl_context.mm6
-rw-r--r--source/blender/gpu/metal/mtl_framebuffer.hh36
-rw-r--r--source/blender/gpu/metal/mtl_framebuffer.mm20
-rw-r--r--source/blender/gpu/metal/mtl_index_buffer.hh16
-rw-r--r--source/blender/gpu/metal/mtl_index_buffer.mm47
-rw-r--r--source/blender/gpu/metal/mtl_memory.hh6
-rw-r--r--source/blender/gpu/metal/mtl_pso_descriptor_state.hh12
-rw-r--r--source/blender/gpu/metal/mtl_shader.hh39
-rw-r--r--source/blender/gpu/metal/mtl_shader.mm85
-rw-r--r--source/blender/gpu/metal/mtl_shader_generator.hh111
-rw-r--r--source/blender/gpu/metal/mtl_shader_generator.mm160
-rw-r--r--source/blender/gpu/metal/mtl_shader_interface.hh32
-rw-r--r--source/blender/gpu/metal/mtl_shader_interface.mm32
-rw-r--r--source/blender/gpu/metal/mtl_shader_interface_type.hh2
-rw-r--r--source/blender/gpu/metal/mtl_state.mm2
-rw-r--r--source/blender/gpu/metal/mtl_texture_util.mm8
-rw-r--r--source/blender/gpu/shaders/compositor/compositor_morphological_distance_feather.glsl2
-rw-r--r--source/blender/gpu/shaders/compositor/compositor_morphological_distance_threshold.glsl2
-rw-r--r--source/blender/gpu/shaders/compositor/compositor_projector_lens_distortion.glsl2
-rw-r--r--source/blender/gpu/shaders/compositor/compositor_realize_on_domain.glsl2
-rw-r--r--source/blender/gpu/shaders/gpu_shader_codegen_lib.glsl2
29 files changed, 350 insertions, 332 deletions
diff --git a/source/blender/gpu/GPU_index_buffer.h b/source/blender/gpu/GPU_index_buffer.h
index e6345b1e43b..e5fefda527d 100644
--- a/source/blender/gpu/GPU_index_buffer.h
+++ b/source/blender/gpu/GPU_index_buffer.h
@@ -33,10 +33,10 @@ typedef struct GPUIndexBufBuilder {
uint32_t *data;
} GPUIndexBufBuilder;
-/* supports all primitive types. */
+/** Supports all primitive types. */
void GPU_indexbuf_init_ex(GPUIndexBufBuilder *, GPUPrimType, uint index_len, uint vertex_len);
-/* supports only GPU_PRIM_POINTS, GPU_PRIM_LINES and GPU_PRIM_TRIS. */
+/** Supports only #GPU_PRIM_POINTS, #GPU_PRIM_LINES and #GPU_PRIM_TRIS. */
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len);
GPUIndexBuf *GPU_indexbuf_build_on_device(uint index_len);
diff --git a/source/blender/gpu/GPU_shader_shared_utils.h b/source/blender/gpu/GPU_shader_shared_utils.h
index 1cfc4f8af31..96feed9e7d9 100644
--- a/source/blender/gpu/GPU_shader_shared_utils.h
+++ b/source/blender/gpu/GPU_shader_shared_utils.h
@@ -44,7 +44,7 @@
# define expf exp
# define bool1 bool
-/* Type name collision with Metal shading language - These typenames are already defined. */
+/* Type name collision with Metal shading language - These type-names are already defined. */
# ifndef GPU_METAL
# define float2 vec2
# define float3 vec3
diff --git a/source/blender/gpu/intern/gpu_codegen.cc b/source/blender/gpu/intern/gpu_codegen.cc
index b81345683b4..0102b8db5b2 100644
--- a/source/blender/gpu/intern/gpu_codegen.cc
+++ b/source/blender/gpu/intern/gpu_codegen.cc
@@ -355,7 +355,7 @@ void GPUCodegen::generate_resources()
GPUCodegenCreateInfo &info = *create_info;
/* Ref. T98190: Defines are optimizations for old compilers.
- * Might become unecessary with EEVEE-Next. */
+ * Might become unnecessary with EEVEE-Next. */
if (GPU_material_flag_get(&mat, GPU_MATFLAG_PRINCIPLED_CLEARCOAT)) {
info.define("PRINCIPLED_CLEARCOAT");
}
diff --git a/source/blender/gpu/intern/gpu_index_buffer.cc b/source/blender/gpu/intern/gpu_index_buffer.cc
index 08c31d0d589..3a66f547403 100644
--- a/source/blender/gpu/intern/gpu_index_buffer.cc
+++ b/source/blender/gpu/intern/gpu_index_buffer.cc
@@ -49,7 +49,7 @@ void GPU_indexbuf_init_ex(GPUIndexBufBuilder *builder,
* degenerative primitives when skipping primitives is required and will
* incur no additional performance cost for rendering. */
if (GPU_type_matches_ex(GPU_DEVICE_ANY, GPU_OS_MAC, GPU_DRIVER_ANY, GPU_BACKEND_METAL)) {
- /* We will still use restart-indices for point primtives and then
+ /* We will still use restart-indices for point primitives and then
* patch these during IndexBuf::init, as we cannot benefit from degenerative
* primitives to eliminate these. */
builder->restart_index_value = (is_restart_compatible(prim_type) ||
@@ -379,7 +379,7 @@ void IndexBuf::squeeze_indices_short(uint min_idx,
* clamp index to the maximum within the index range.
*
* `clamp_max_idx` represents the maximum possible index to clamp against. If primitive is
- * restart-compatible, we can just clamp against the primtive-restart value, otherwise, we
+ * restart-compatible, we can just clamp against the primitive-restart value, otherwise, we
* must assign to a valid index within the range.
*
* NOTE: For OpenGL we skip this by disabling clamping, as we still need to use
diff --git a/source/blender/gpu/intern/gpu_shader_create_info.hh b/source/blender/gpu/intern/gpu_shader_create_info.hh
index 3884c067c83..25a79dd26ac 100644
--- a/source/blender/gpu/intern/gpu_shader_create_info.hh
+++ b/source/blender/gpu/intern/gpu_shader_create_info.hh
@@ -32,7 +32,7 @@ namespace blender::gpu::shader {
#endif
enum class Type {
- /* Types supported natively across all GPU backends. */
+ /* Types supported natively across all GPU back-ends. */
FLOAT = 0,
VEC2,
VEC3,
@@ -48,12 +48,12 @@ enum class Type {
IVEC3,
IVEC4,
BOOL,
- /* Additionally supported types to enable data optimisation and native
- * support in some GPUBackends.
- * NOTE: These types must be representable in all APIs. E.g. VEC3_101010I2 is aliased as vec3 in
- * the GL backend, as implicit type conversions from packed normal attribute data to vec3 is
+ /* Additionally supported types to enable data optimization and native
+ * support in some GPU back-ends.
+ * NOTE: These types must be representable in all APIs. E.g. `VEC3_101010I2` is aliased as vec3
+ * in the GL back-end, as implicit type conversions from packed normal attribute data to vec3 is
* supported. UCHAR/CHAR types are natively supported in Metal and can be used to avoid
- * additional data conversions for GPU_COMP_U8 vertex attributes. */
+ * additional data conversions for `GPU_COMP_U8` vertex attributes. */
VEC3_101010I2,
UCHAR,
UCHAR2,
@@ -324,10 +324,10 @@ struct StageInterfaceInfo {
/**
* \brief Describe inputs & outputs, stage interfaces, resources and sources of a shader.
* If all data is correctly provided, this is all that is needed to create and compile
- * a GPUShader.
+ * a #GPUShader.
*
* IMPORTANT: All strings are references only. Make sure all the strings used by a
- * ShaderCreateInfo are not freed until it is consumed or deleted.
+ * #ShaderCreateInfo are not freed until it is consumed or deleted.
*/
struct ShaderCreateInfo {
/** Shader name for debugging. */
@@ -346,7 +346,7 @@ struct ShaderCreateInfo {
DepthWrite depth_write_ = DepthWrite::ANY;
/**
* Maximum length of all the resource names including each null terminator.
- * Only for names used by gpu::ShaderInterface.
+ * Only for names used by #gpu::ShaderInterface.
*/
size_t interface_names_size_ = 0;
/** Manually set builtins. */
diff --git a/source/blender/gpu/metal/mtl_capabilities.hh b/source/blender/gpu/metal/mtl_capabilities.hh
index 5e34d5352f1..36536438bf5 100644
--- a/source/blender/gpu/metal/mtl_capabilities.hh
+++ b/source/blender/gpu/metal/mtl_capabilities.hh
@@ -14,7 +14,7 @@ namespace gpu {
#define MTL_MAX_TEXTURE_SLOTS 128
#define MTL_MAX_SAMPLER_SLOTS MTL_MAX_TEXTURE_SLOTS
-/* Max limit without using bindless for samplers. */
+/* Max limit without using bind-less for samplers. */
#define MTL_MAX_DEFAULT_SAMPLERS 16
#define MTL_MAX_UNIFORM_BUFFER_BINDINGS 31
#define MTL_MAX_VERTEX_INPUT_ATTRIBUTES 31
diff --git a/source/blender/gpu/metal/mtl_command_buffer.mm b/source/blender/gpu/metal/mtl_command_buffer.mm
index 9a9a2d55103..0e13e8d4690 100644
--- a/source/blender/gpu/metal/mtl_command_buffer.mm
+++ b/source/blender/gpu/metal/mtl_command_buffer.mm
@@ -242,7 +242,7 @@ bool MTLCommandBufferManager::end_active_command_encoder()
active_render_command_encoder_ = nil;
active_command_encoder_type_ = MTL_NO_COMMAND_ENCODER;
- /* Reset associated framebuffer flag. */
+ /* Reset associated frame-buffer flag. */
active_frame_buffer_ = nullptr;
active_pass_descriptor_ = nullptr;
return true;
@@ -286,7 +286,7 @@ bool MTLCommandBufferManager::end_active_command_encoder()
id<MTLRenderCommandEncoder> MTLCommandBufferManager::ensure_begin_render_command_encoder(
MTLFrameBuffer *ctx_framebuffer, bool force_begin, bool *new_pass)
{
- /* Ensure valid framebuffer. */
+ /* Ensure valid frame-buffer. */
BLI_assert(ctx_framebuffer != nullptr);
/* Ensure active command buffer. */
@@ -299,10 +299,10 @@ id<MTLRenderCommandEncoder> MTLCommandBufferManager::ensure_begin_render_command
active_frame_buffer_ != ctx_framebuffer || force_begin) {
this->end_active_command_encoder();
- /* Determine if this is a re-bind of the same framebuffer. */
+ /* Determine if this is a re-bind of the same frame-buffer. */
bool is_rebind = (active_frame_buffer_ == ctx_framebuffer);
- /* Generate RenderPassDescriptor from bound framebuffer. */
+ /* Generate RenderPassDescriptor from bound frame-buffer. */
BLI_assert(ctx_framebuffer);
active_frame_buffer_ = ctx_framebuffer;
active_pass_descriptor_ = active_frame_buffer_->bake_render_pass_descriptor(
diff --git a/source/blender/gpu/metal/mtl_context.hh b/source/blender/gpu/metal/mtl_context.hh
index ccc648eab2a..e996193e722 100644
--- a/source/blender/gpu/metal/mtl_context.hh
+++ b/source/blender/gpu/metal/mtl_context.hh
@@ -175,9 +175,9 @@ struct MTLContextDepthStencilState {
bool has_depth_target;
bool has_stencil_target;
- /* TODO(Metal): Consider optimizing this function using memcmp.
+ /* TODO(Metal): Consider optimizing this function using `memcmp`.
* Un-used, but differing, stencil state leads to over-generation
- * of state objects when doing trivial compare. */
+ * of state objects when doing trivial compare. */
bool operator==(const MTLContextDepthStencilState &other) const
{
bool depth_state_equality = (has_depth_target == other.has_depth_target &&
@@ -358,7 +358,7 @@ typedef enum MTLPipelineStateDirtyFlag {
MTL_PIPELINE_STATE_NULL_FLAG = 0,
/* Whether we need to call setViewport. */
MTL_PIPELINE_STATE_VIEWPORT_FLAG = (1 << 0),
- /* Whether we need to call setScissor.*/
+ /* Whether we need to call setScissor. */
MTL_PIPELINE_STATE_SCISSOR_FLAG = (1 << 1),
/* Whether we need to update/rebind active depth stencil state. */
MTL_PIPELINE_STATE_DEPTHSTENCIL_FLAG = (1 << 2),
@@ -565,15 +565,15 @@ class MTLCommandBufferManager {
};
/** MTLContext -- Core render loop and state management. **/
-/* NOTE(Metal): Partial MTLContext stub to provide wrapper functionality
- * for work-in-progress MTL* classes. */
+/* NOTE(Metal): Partial #MTLContext stub to provide wrapper functionality
+ * for work-in-progress `MTL*` classes. */
class MTLContext : public Context {
friend class MTLBackend;
private:
- /* Null buffers for empty/unintialized bindings.
- * Null attribute buffer follows default attribute format of OpenGL Backend. */
+ /* Null buffers for empty/uninitialized bindings.
+ * Null attribute buffer follows default attribute format of OpenGL Back-end. */
id<MTLBuffer> null_buffer_; /* All zero's. */
id<MTLBuffer> null_attribute_buffer_; /* Value float4(0.0,0.0,0.0,1.0). */
@@ -581,7 +581,7 @@ class MTLContext : public Context {
MTLContextTextureUtils texture_utils_;
/* Texture Samplers. */
- /* Cache of generated MTLSamplerState objects based on permutations of `eGPUSamplerState`. */
+ /* Cache of generated #MTLSamplerState objects based on permutations of `eGPUSamplerState`. */
id<MTLSamplerState> sampler_state_cache_[GPU_SAMPLER_MAX];
id<MTLSamplerState> default_sampler_state_ = nil;
@@ -684,7 +684,7 @@ class MTLContext : public Context {
/* Flag whether the visibility buffer for query results
* has changed. This requires a new RenderPass in order
- * to update.*/
+ * to update. */
bool is_visibility_dirty() const;
/* Reset dirty flag state for visibility buffer. */
diff --git a/source/blender/gpu/metal/mtl_context.mm b/source/blender/gpu/metal/mtl_context.mm
index f14236bcb58..a66645e5fb5 100644
--- a/source/blender/gpu/metal/mtl_context.mm
+++ b/source/blender/gpu/metal/mtl_context.mm
@@ -32,7 +32,7 @@ MTLContext::MTLContext(void *ghost_window) : memory_manager(*this), main_command
debug::mtl_debug_init();
/* Device creation.
- * TODO(Metal): This is a temporary initialisation path to enable testing of features
+ * TODO(Metal): This is a temporary initialization path to enable testing of features
* and shader compilation tests. Future functionality should fetch the existing device
* from GHOST_ContextCGL.mm. Plumbing to be updated in future. */
this->device = MTLCreateSystemDefaultDevice();
@@ -40,7 +40,7 @@ MTLContext::MTLContext(void *ghost_window) : memory_manager(*this), main_command
/* Initialize command buffer state. */
this->main_command_buffer.prepare();
- /* Initialise imm and pipeline state */
+ /* Initialize IMM and pipeline state */
this->pipeline_state.initialised = false;
/* Frame management. */
@@ -199,7 +199,7 @@ id<MTLRenderCommandEncoder> MTLContext::ensure_begin_render_pass()
}
/* Ensure command buffer workload submissions are optimal --
- * Though do not split a batch mid-IMM recording */
+ * Though do not split a batch mid-IMM recording. */
/* TODO(Metal): Add IMM Check once MTLImmediate has been implemented. */
if (this->main_command_buffer.do_break_submission()/*&&
!((MTLImmediate *)(this->imm))->imm_is_recording()*/) {
diff --git a/source/blender/gpu/metal/mtl_framebuffer.hh b/source/blender/gpu/metal/mtl_framebuffer.hh
index d6e9fa76b70..434d1a15b43 100644
--- a/source/blender/gpu/metal/mtl_framebuffer.hh
+++ b/source/blender/gpu/metal/mtl_framebuffer.hh
@@ -40,7 +40,7 @@ struct MTLAttachment {
/**
* Implementation of FrameBuffer object using Metal.
- **/
+ */
class MTLFrameBuffer : public FrameBuffer {
private:
/* Context Handle. */
@@ -54,24 +54,32 @@ class MTLFrameBuffer : public FrameBuffer {
bool use_multilayered_rendering_ = false;
/* State. */
- /* Whether global framebuffer properties have changed and require
- * re-generation of MTLRenderPassDescriptor/RenderCommandEncoders. */
+
+ /**
+ * Whether global frame-buffer properties have changed and require
+ * re-generation of #MTLRenderPassDescriptor / #RenderCommandEncoders.
+ */
bool is_dirty_;
- /* Whether loadstore properties have changed (only affects certain cached configs). */
+ /** Whether `loadstore` properties have changed (only affects certain cached configurations). */
bool is_loadstore_dirty_;
- /* Context that the latest modified state was last applied to.
- * If this does not match current ctx, re-apply state. */
+ /**
+ * Context that the latest modified state was last applied to.
+ * If this does not match current ctx, re-apply state.
+ */
MTLContext *dirty_state_ctx_;
- /* Whether a clear is pending -- Used to toggle between clear and load FB configurations
+ /**
+ * Whether a clear is pending -- Used to toggle between clear and load FB configurations
* (without dirtying the state) - Frame-buffer load config is used if no `GPU_clear_*` command
- * was issued after binding the FrameBuffer. */
+ * was issued after binding the #FrameBuffer.
+ */
bool has_pending_clear_;
- /* Render Pass Descriptors:
- * There are 3 MTLRenderPassDescriptors for different ways in which a frame-buffer
+ /**
+ * Render Pass Descriptors:
+ * There are 3 #MTLRenderPassDescriptors for different ways in which a frame-buffer
* can be configured:
* [0] = CLEAR CONFIG -- Used when a GPU_framebuffer_clear_* command has been issued.
* [1] = LOAD CONFIG -- Used if bound, but no clear is required.
@@ -89,17 +97,17 @@ class MTLFrameBuffer : public FrameBuffer {
MTLRenderPassDescriptor *framebuffer_descriptor_[MTL_FB_CONFIG_MAX];
MTLRenderPassColorAttachmentDescriptor
*colour_attachment_descriptors_[GPU_FB_MAX_COLOR_ATTACHMENT];
- /* Whether MTLRenderPassDescriptor[N] requires updating with latest state. */
+ /** Whether `MTLRenderPassDescriptor[N]` requires updating with latest state. */
bool descriptor_dirty_[MTL_FB_CONFIG_MAX];
- /* Whether SRGB is enabled for this framebuffer configuration. */
+ /** Whether SRGB is enabled for this frame-buffer configuration. */
bool srgb_enabled_;
- /* Whether the primary Frame-buffer attachment is an SRGB target or not. */
+ /** Whether the primary Frame-buffer attachment is an SRGB target or not. */
bool is_srgb_;
public:
/**
* Create a conventional framebuffer to attach texture to.
- **/
+ */
MTLFrameBuffer(MTLContext *ctx, const char *name);
~MTLFrameBuffer();
diff --git a/source/blender/gpu/metal/mtl_framebuffer.mm b/source/blender/gpu/metal/mtl_framebuffer.mm
index 515dd70e5de..975e78fc466 100644
--- a/source/blender/gpu/metal/mtl_framebuffer.mm
+++ b/source/blender/gpu/metal/mtl_framebuffer.mm
@@ -885,12 +885,12 @@ bool MTLFrameBuffer::add_color_attachment(gpu::MTLTexture *texture,
mtl_color_attachments_[slot].depth_plane = 0;
break;
default:
- MTL_LOG_ERROR("MTLFrameBuffer::add_color_attachment Unrecognised texture type %u\n",
+ MTL_LOG_ERROR("MTLFrameBuffer::add_color_attachment Unrecognized texture type %u\n",
texture->type_);
break;
}
- /* Update Framebuffer Resolution. */
+ /* Update Frame-buffer Resolution. */
int width_of_miplayer, height_of_miplayer;
if (miplevel <= 0) {
width_of_miplayer = texture->width_get();
@@ -1007,11 +1007,11 @@ bool MTLFrameBuffer::add_depth_attachment(gpu::MTLTexture *texture, int miplevel
mtl_depth_attachment_.depth_plane = 0;
break;
default:
- BLI_assert(false && "Unrecognised texture type");
+ BLI_assert(false && "Unrecognized texture type");
break;
}
- /* Update Framebuffer Resolution. */
+ /* Update Frame-buffer Resolution. */
int width_of_miplayer, height_of_miplayer;
if (miplevel <= 0) {
width_of_miplayer = texture->width_get();
@@ -1022,7 +1022,7 @@ bool MTLFrameBuffer::add_depth_attachment(gpu::MTLTexture *texture, int miplevel
height_of_miplayer = max_ii(texture->height_get() >> miplevel, 1);
}
- /* Update Framebuffer Resolution. */
+ /* Update Frame-buffer Resolution. */
if (width_ == 0 || height_ == 0) {
this->size_set(width_of_miplayer, height_of_miplayer);
this->scissor_reset();
@@ -1129,11 +1129,11 @@ bool MTLFrameBuffer::add_stencil_attachment(gpu::MTLTexture *texture, int miplev
mtl_stencil_attachment_.depth_plane = 0;
break;
default:
- BLI_assert(false && "Unrecognised texture type");
+ BLI_assert(false && "Unrecognized texture type");
break;
}
- /* Update Framebuffer Resolution. */
+ /* Update Frame-buffer Resolution. */
int width_of_miplayer, height_of_miplayer;
if (miplevel <= 0) {
width_of_miplayer = texture->width_get();
@@ -1144,7 +1144,7 @@ bool MTLFrameBuffer::add_stencil_attachment(gpu::MTLTexture *texture, int miplev
height_of_miplayer = max_ii(texture->height_get() >> miplevel, 1);
}
- /* Update Framebuffer Resolution. */
+ /* Update Frame-buffer Resolution. */
if (width_ == 0 || height_ == 0) {
this->size_set(width_of_miplayer, height_of_miplayer);
this->scissor_reset();
@@ -1376,7 +1376,7 @@ bool MTLFrameBuffer::reset_clear_state()
/** \} */
/* -------------------------------------------------------------------- */
-/** \ Fetch values and Framebuffer status
+/** \ Fetch values and Frame-buffer status
* \{ */
bool MTLFrameBuffer::has_attachment_at_slot(uint slot)
@@ -1506,7 +1506,7 @@ MTLRenderPassDescriptor *MTLFrameBuffer::bake_render_pass_descriptor(bool load_c
BLI_assert(metal_ctx && metal_ctx->get_inside_frame());
UNUSED_VARS_NDEBUG(metal_ctx);
- /* If Framebuffer has been modified, regenerate descriptor. */
+ /* If Frame-buffer has been modified, regenerate descriptor. */
if (is_dirty_) {
/* Clear all configs. */
for (int config = 0; config < 3; config++) {
diff --git a/source/blender/gpu/metal/mtl_index_buffer.hh b/source/blender/gpu/metal/mtl_index_buffer.hh
index 5182eeab5e3..fde26b16927 100644
--- a/source/blender/gpu/metal/mtl_index_buffer.hh
+++ b/source/blender/gpu/metal/mtl_index_buffer.hh
@@ -25,13 +25,13 @@ class MTLIndexBuf : public IndexBuf {
#ifndef NDEBUG
/* Flags whether point index buffer has been compacted
- * to remove false retart indices. */
+ * to remove false restart indices. */
bool point_restarts_stripped_ = false;
#endif
- /* Optimised index buffers.
+ /* Optimized index buffers.
* NOTE(Metal): This optimization encodes a new index buffer following
- * TriangleList topology. Parsing of Index buffers is more optimal
+ * #TriangleList topology. Parsing of Index buffers is more optimal
* when not using restart-compatible primitive topology types. */
GPUPrimType optimized_primitive_type_;
gpu::MTLBuffer *optimized_ibo_ = nullptr;
@@ -52,13 +52,13 @@ class MTLIndexBuf : public IndexBuf {
void upload_data() override;
void update_sub(uint32_t start, uint32_t len, const void *data) override;
- /* get_index_buffer can conditionally return an optimized index buffer of a
+ /* #get_index_buffer can conditionally return an optimized index buffer of a
* differing format, if it is concluded that optimization is preferred
* for the given inputs.
- * Index buffer optimization is used to replace restart-compatbiele
- * primitive types with non-restart-compatible ones such as TriangleList and
- * LineList. This improves GPU execution for these types significantly, while
- * only incuring a small performance penalty.
+ * Index buffer optimization is used to replace restart-compatible
+ * primitive types with non-restart-compatible ones such as #TriangleList and
+ * #LineList. This improves GPU execution for these types significantly, while
+ * only incurring a small performance penalty.
*
* This is also used to emulate unsupported topology types
* such as triangle fan. */
diff --git a/source/blender/gpu/metal/mtl_index_buffer.mm b/source/blender/gpu/metal/mtl_index_buffer.mm
index 4a7875aaeb0..99795d7bbd9 100644
--- a/source/blender/gpu/metal/mtl_index_buffer.mm
+++ b/source/blender/gpu/metal/mtl_index_buffer.mm
@@ -40,7 +40,7 @@ void MTLIndexBuf::bind_as_ssbo(uint32_t binding)
/* Ensure we have a valid IBO. */
BLI_assert(this->ibo_);
- /* TODO(Metal): Support index buffer SSBOs. Dependent on compute impl. */
+ /* TODO(Metal): Support index buffer SSBO's. Dependent on compute implementation. */
MTL_LOG_WARNING("MTLIndexBuf::bind_as_ssbo not yet implemented!\n");
}
@@ -58,17 +58,17 @@ const uint32_t *MTLIndexBuf::read() const
void MTLIndexBuf::upload_data()
{
- /* Handle subrange upload. */
+ /* Handle sub-range upload. */
if (is_subrange_) {
MTLIndexBuf *mtlsrc = static_cast<MTLIndexBuf *>(src_);
mtlsrc->upload_data();
#ifndef NDEBUG
BLI_assert_msg(!mtlsrc->point_restarts_stripped_,
- "Cannot use subrange on stripped point buffer.");
+ "Cannot use sub-range on stripped point buffer.");
#endif
- /* If parent subrange allocation has changed,
+ /* If parent sub-range allocation has changed,
* update our index buffer. */
if (alloc_size_ != mtlsrc->alloc_size_ || ibo_ != mtlsrc->ibo_) {
@@ -154,7 +154,7 @@ void MTLIndexBuf::update_sub(uint32_t start, uint32_t len, const void *data)
destinationOffset:start
size:len];
- /* Synchronise changes back to host to ensure CPU-side data is up-to-date for non
+ /* Synchronize changes back to host to ensure CPU-side data is up-to-date for non
* Shared buffers. */
if (dest_buffer.storageMode == MTLStorageModeManaged) {
[enc synchronizeResource:dest_buffer];
@@ -177,8 +177,9 @@ void MTLIndexBuf::flag_can_optimize(bool can_optimize)
/** \} */
-/** \name Index buffer optimization and topology emulation.
- * Index buffer optimization and emulation. Optimise index buffers by
+/** \name Index buffer optimization and topology emulation
+ *
+ * Index buffer optimization and emulation. Optimize index buffers by
* eliminating restart-indices.
* Emulate unsupported index types e.g. Triangle Fan and Line Loop.
* \{ */
@@ -189,7 +190,7 @@ static uint32_t populate_optimized_tri_strip_buf(Span<T> original_data,
MutableSpan<T> output_data,
uint32_t input_index_len)
{
- /* Generate TriangleList from TriangleStrip. */
+ /* Generate #TriangleList from #TriangleStrip. */
uint32_t current_vert_len = 0;
uint32_t current_output_ind = 0;
T indices[3];
@@ -202,13 +203,12 @@ static uint32_t populate_optimized_tri_strip_buf(Span<T> original_data,
}
else {
if (current_vert_len < 3) {
- /* prepare first triangle.
- * Cache indices before genrating a triangle,
- * in case we have bad primitive-restarts. */
+ /* Prepare first triangle.
+ * Cache indices before generating a triangle, in case we have bad primitive-restarts. */
indices[current_vert_len] = current_index;
}
- /* emit triangle once we reach 3 input verts in current strip. */
+ /* Emit triangle once we reach 3 input verts in current strip. */
if (current_vert_len == 3) {
/* First triangle in strip. */
output_data[current_output_ind++] = indices[0];
@@ -247,7 +247,7 @@ static uint32_t populate_emulated_tri_fan_buf(Span<T> original_data,
MutableSpan<T> output_data,
uint32_t input_index_len)
{
- /* Generate TriangleList from TriangleFan. */
+ /* Generate #TriangleList from #TriangleFan. */
T base_prim_ind_val = 0;
uint32_t current_vert_len = 0;
uint32_t current_output_ind = 0;
@@ -261,9 +261,8 @@ static uint32_t populate_emulated_tri_fan_buf(Span<T> original_data,
}
else {
if (current_vert_len < 3) {
- /* prepare first triangle.
- * Cache indices before genrating a triangle,
- * in case we have bad primitive-restarts. */
+ /* Prepare first triangle.
+ * Cache indices before generating a triangle, in case we have bad primitive-restarts. */
indices[current_vert_len] = current_index;
}
@@ -298,7 +297,7 @@ id<MTLBuffer> MTLIndexBuf::get_index_buffer(GPUPrimType &in_out_primitive_type,
uint32_t &in_out_v_count)
{
/* Determine whether to return the original index buffer, or whether we
- * should emulate an unsupported primitive type, or optimisze a restart-
+ * should emulate an unsupported primitive type, or optimize a restart-
* compatible type for faster performance. */
bool should_optimize_or_emulate = (in_out_primitive_type == GPU_PRIM_TRI_FAN) ||
(in_out_primitive_type == GPU_PRIM_TRI_STRIP);
@@ -411,16 +410,16 @@ id<MTLBuffer> MTLIndexBuf::get_index_buffer(GPUPrimType &in_out_primitive_type,
} break;
case GPU_PRIM_LINE_STRIP: {
- /* TOOD(Metal): Line strip topology types would benefit from optimization to remove
+ /* TODO(Metal): Line strip topology types would benefit from optimization to remove
* primitive restarts, however, these do not occur frequently, nor with
* significant geometry counts. */
- MTL_LOG_INFO("TODO: Primitive topology: Optimise line strip topology types\n");
+ MTL_LOG_INFO("TODO: Primitive topology: Optimize line strip topology types\n");
} break;
case GPU_PRIM_LINE_LOOP: {
- /* TOOD(Metal): Line Loop primitive type requires use of optimized index buffer for
- * emulation, if used with indexed rendering. This path is currently not hit as LineLoop
- * does not currently appear to be used alongisde an index buffer. */
+ /* TODO(Metal): Line Loop primitive type requires use of optimized index buffer for
+ * emulation, if used with indexed rendering. This path is currently not hit as #LineLoop
+ * does not currently appear to be used alongside an index buffer. */
MTL_LOG_WARNING(
"TODO: Primitive topology: Line Loop Index buffer optimization required for "
"emulation.\n");
@@ -465,9 +464,9 @@ void MTLIndexBuf::strip_restart_indices()
* length. Primitive restarts are invalid in Metal for non-restart-compatible
* primitive types. We also cannot just use zero unlike for Lines and Triangles,
* as we cannot create de-generative point primitives to hide geometry, as each
- * point is indepednent.
+ * point is independent.
* Instead, we must remove these hidden indices from the index buffer.
- * Note: This happens prior to index squeezing so operate on 32-bit indices. */
+ * NOTE: This happens prior to index squeezing so operate on 32-bit indices. */
MutableSpan<uint32_t> uint_idx(static_cast<uint32_t *>(data_), index_len_);
for (uint i = 0; i < index_len_; i++) {
if (uint_idx[i] == 0xFFFFFFFFu) {
diff --git a/source/blender/gpu/metal/mtl_memory.hh b/source/blender/gpu/metal/mtl_memory.hh
index dc5417dc11a..df80df6543f 100644
--- a/source/blender/gpu/metal/mtl_memory.hh
+++ b/source/blender/gpu/metal/mtl_memory.hh
@@ -41,7 +41,7 @@
* Each frame, the next scratch buffer is reset, then later flushed upon
* command buffer submission.
*
- * Note: This is allocated per-context due to allocations being tied
+ * NOTE: This is allocated per-context due to allocations being tied
* to workload submissions and context-specific submissions.
*
* Examples of scratch buffer usage are:
@@ -73,7 +73,7 @@
* to ensure they are not prematurely re-used before they have finished being
* used by the GPU.
*
- * Note: The MTLBufferPool is a global construct which can be fetched from anywhere.
+ * NOTE: The MTLBufferPool is a global construct which can be fetched from anywhere.
*
* Usage:
* MTLContext::get_global_memory_manager(); - static routine to fetch global memory manager.
@@ -273,7 +273,7 @@ struct CompareMTLBuffer {
* when the next MTLSafeFreeList is created, to allow the existing pool to be released once
* the reference count hits zero after submitted command buffers complete.
*
- * Note: the Metal API independently tracks resources used by command buffers for the purpose of
+ * NOTE: the Metal API independently tracks resources used by command buffers for the purpose of
* keeping resources alive while in-use by the driver and CPU, however, this differs from the
* MTLSafeFreeList mechanism in the Metal backend, which exists for the purpose of allowing
* previously allocated MTLBuffer resources to be re-used. This allows us to save on the expensive
diff --git a/source/blender/gpu/metal/mtl_pso_descriptor_state.hh b/source/blender/gpu/metal/mtl_pso_descriptor_state.hh
index 010349eddbf..1906350679a 100644
--- a/source/blender/gpu/metal/mtl_pso_descriptor_state.hh
+++ b/source/blender/gpu/metal/mtl_pso_descriptor_state.hh
@@ -147,7 +147,7 @@ struct MTLRenderPipelineStateDescriptor {
* new PSO for the current shader.
*
* Unlike the 'MTLContextGlobalShaderPipelineState', this struct contains a subset of
- * parameters used to distinguish between unique PSOs. This struct is hashable and only contains
+ * parameters used to distinguish between unique PSOs. This struct is hash-able and only contains
* those parameters which are required by PSO generation. Non-unique state such as bound
* resources is not tracked here, as it does not require a unique PSO permutation if changed. */
@@ -155,7 +155,7 @@ struct MTLRenderPipelineStateDescriptor {
MTLVertexDescriptor vertex_descriptor;
/* Render Target attachment state.
- * Assign to MTLPixelFormatInvalid if not used. */
+ * Assign to #MTLPixelFormatInvalid if not used. */
int num_color_attachments;
MTLPixelFormat color_attachment_format[GPU_FB_MAX_COLOR_ATTACHMENT];
MTLPixelFormat depth_attachment_format;
@@ -170,7 +170,7 @@ struct MTLRenderPipelineStateDescriptor {
MTLBlendFactor src_alpha_blend_factor;
MTLBlendFactor src_rgb_blend_factor;
- /* Global colour write mask as this cannot be specified per attachment. */
+ /* Global color write mask as this cannot be specified per attachment. */
MTLColorWriteMask color_write_mask;
/* Point size required by point primitives. */
@@ -210,7 +210,7 @@ struct MTLRenderPipelineStateDescriptor {
uint64_t hash() const
{
- /* NOTE(Metal): Current setup aims to minimise overlap of parameters
+ /* NOTE(Metal): Current setup aims to minimize overlap of parameters
* which are more likely to be different, to ensure earlier hash
* differences without having to fallback to comparisons.
* Though this could likely be further improved to remove
@@ -226,7 +226,7 @@ struct MTLRenderPipelineStateDescriptor {
/* Only include elements in Hash if they are needed - avoids variable null assignments
* influencing hash. */
if (this->num_color_attachments > 0) {
- hash ^= (uint64_t)this->color_write_mask << 22; /* 4 bit bitmask. */
+ hash ^= (uint64_t)this->color_write_mask << 22; /* 4 bit bit-mask. */
hash ^= (uint64_t)this->alpha_blend_op << 26; /* Up to 4 (3 bits). */
hash ^= (uint64_t)this->rgb_blend_op << 29; /* Up to 4 (3 bits). */
hash ^= (uint64_t)this->dest_alpha_blend_factor << 32; /* Up to 18 (5 bits). */
@@ -247,4 +247,4 @@ struct MTLRenderPipelineStateDescriptor {
}
};
-} // namespace blender::gpu \ No newline at end of file
+} // namespace blender::gpu
diff --git a/source/blender/gpu/metal/mtl_shader.hh b/source/blender/gpu/metal/mtl_shader.hh
index cdbcd7c68f6..64d9d1cf849 100644
--- a/source/blender/gpu/metal/mtl_shader.hh
+++ b/source/blender/gpu/metal/mtl_shader.hh
@@ -56,7 +56,7 @@ struct MTLBufferArgumentData {
/* Metal Render Pipeline State Instance. */
struct MTLRenderPipelineStateInstance {
- /* Function instances with specialisation.
+ /* Function instances with specialization.
* Required for argument encoder construction. */
id<MTLFunction> vert;
id<MTLFunction> frag;
@@ -78,7 +78,7 @@ struct MTLRenderPipelineStateInstance {
/** Reflection Data.
* Currently used to verify whether uniform buffers of incorrect sizes being bound, due to left
* over bindings being used for slots that did not need updating for a particular draw. Metal
- * Backend over-generates bindings due to detecting their presence, though in many cases, the
+ * Back-end over-generates bindings due to detecting their presence, though in many cases, the
* bindings in the source are not all used for a given shader.
* This information can also be used to eliminate redundant/unused bindings. */
bool reflection_data_available;
@@ -86,7 +86,7 @@ struct MTLRenderPipelineStateInstance {
blender::Vector<MTLBufferArgumentData> buffer_bindings_reflection_data_frag;
};
-/* MTLShaderBuilder source wrapper used during initial compilation. */
+/* #MTLShaderBuilder source wrapper used during initial compilation. */
struct MTLShaderBuilder {
NSString *msl_source_vert_ = @"";
NSString *msl_source_frag_ = @"";
@@ -100,17 +100,17 @@ struct MTLShaderBuilder {
};
/**
- * MTLShader implements shader compilation, Pipeline State Object (PSO)
+ * #MTLShader implements shader compilation, Pipeline State Object (PSO)
* creation for rendering and uniform data binding.
* Shaders can either be created from native MSL, or generated
- * from a GLSL source shader using GPUShaderCreateInfo.
+ * from a GLSL source shader using #GPUShaderCreateInfo.
*
* Shader creation process:
- * - Create MTLShader:
- * - Convert GLSL to MSL source if required.
- * - set MSL source.
- * - set Vertex/Fragment function names.
- * - Create and populate MTLShaderInterface.
+ * - Create #MTLShader:
+ * - Convert GLSL to MSL source if required.
+ * - set MSL source.
+ * - set Vertex/Fragment function names.
+ * - Create and populate #MTLShaderInterface.
**/
class MTLShader : public Shader {
friend shader::ShaderCreateInfo;
@@ -164,7 +164,7 @@ class MTLShader : public Shader {
* and perform vertex assembly manually, rather than using Stage-in.
* This is used to give a vertex shader full access to all of the
* vertex data.
- * This is primarily used for optimisation techniques and
+ * This is primarily used for optimization techniques and
* alternative solutions for Geometry-shaders which are unsupported
* by Metal. */
bool use_ssbo_vertex_fetch_mode_ = false;
@@ -315,7 +315,7 @@ class MTLShader : public Shader {
* and the type specified in the shader source.
*
* e.g. vec3 to vec4 expansion, or vec4 to vec2 truncation.
- * Note: Vector expansion will replace empty elements with the values
+ * NOTE: Vector expansion will replace empty elements with the values
* (0,0,0,1).
*
* If implicit format resize is not possible, this function
@@ -591,18 +591,19 @@ inline bool mtl_vertex_format_resize(MTLVertexFormat mtl_format,
return out_vert_format != MTLVertexFormatInvalid;
}
-/* Returns whether the METAL API can internally convert between the input type of data in the
+/**
+ * Returns whether the METAL API can internally convert between the input type of data in the
* incoming vertex buffer and the format used by the vertex attribute inside the shader.
*
* - Returns TRUE if the type can be converted internally, along with returning the appropriate
- * type to be passed into the MTLVertexAttributeDescriptorPSO.
+ * type to be passed into the #MTLVertexAttributeDescriptorPSO.
*
* - Returns FALSE if the type cannot be converted internally e.g. casting Int4 to Float4.
*
* If implicit conversion is not possible, then we can fallback to performing manual attribute
- * conversion using the special attribute read function specialisations in the shader.
+ * conversion using the special attribute read function specializations in the shader.
* These functions selectively convert between types based on the specified vertex
- * attribute 'GPUVertFetchMode fetch_mode' e.g. GPU_FETCH_INT.
+ * attribute `GPUVertFetchMode fetch_mode` e.g. `GPU_FETCH_INT`.
*/
inline bool mtl_convert_vertex_format(MTLVertexFormat shader_attrib_format,
GPUVertCompType component_type,
@@ -1026,7 +1027,7 @@ inline uint comp_count_from_vert_format(MTLVertexFormat vert_format)
case MTLVertexFormatInt1010102Normalized:
default:
- BLI_assert_msg(false, "Unrecognised attribute type. Add types to switch as needed.");
+ BLI_assert_msg(false, "Unrecognized attribute type. Add types to switch as needed.");
return 0;
}
}
@@ -1086,7 +1087,7 @@ inline GPUVertFetchMode fetchmode_from_vert_format(MTLVertexFormat vert_format)
return GPU_FETCH_INT_TO_FLOAT_UNIT;
default:
- BLI_assert_msg(false, "Unrecognised attribute type. Add types to switch as needed.");
+ BLI_assert_msg(false, "Unrecognized attribute type. Add types to switch as needed.");
return GPU_FETCH_FLOAT;
}
}
@@ -1156,7 +1157,7 @@ inline GPUVertCompType comp_type_from_vert_format(MTLVertexFormat vert_format)
return GPU_COMP_I10;
default:
- BLI_assert_msg(false, "Unrecognised attribute type. Add types to switch as needed.");
+ BLI_assert_msg(false, "Unrecognized attribute type. Add types to switch as needed.");
return GPU_COMP_F32;
}
}
diff --git a/source/blender/gpu/metal/mtl_shader.mm b/source/blender/gpu/metal/mtl_shader.mm
index 1824057c9a2..23097f312f0 100644
--- a/source/blender/gpu/metal/mtl_shader.mm
+++ b/source/blender/gpu/metal/mtl_shader.mm
@@ -51,7 +51,7 @@ MTLShader::MTLShader(MTLContext *ctx, const char *name) : Shader(name)
shd_builder_ = new MTLShaderBuilder();
#ifndef NDEBUG
- /* Remove invalid symbols from shader name to ensure debug entrypoint function name is valid. */
+ /* Remove invalid symbols from shader name to ensure debug entry-point function name is valid. */
for (uint i : IndexRange(strlen(this->name))) {
char c = this->name[i];
if ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9')) {
@@ -123,7 +123,7 @@ MTLShader::~MTLShader()
}
pso_cache_.clear();
- /* NOTE(Metal): ShaderInterface deletion is handled in the super destructor ~Shader(). */
+ /* NOTE(Metal): #ShaderInterface deletion is handled in the super destructor `~Shader()`. */
}
valid_ = false;
@@ -247,7 +247,7 @@ bool MTLShader::finalize(const shader::ShaderCreateInfo *info)
break;
}
- /* Concatenate common src. */
+ /* Concatenate common source. */
NSString *str = [NSString stringWithUTF8String:datatoc_mtl_shader_common_msl];
NSString *source_with_header_a = [str stringByAppendingString:source_to_compile];
@@ -343,9 +343,9 @@ bool MTLShader::transform_feedback_enable(GPUVertBuf *buf)
BLI_assert(buf);
transform_feedback_active_ = true;
transform_feedback_vertbuf_ = buf;
- /* TODO(Metal): Enable this assertion once MTLVertBuf lands. */
- /*BLI_assert(static_cast<MTLVertBuf *>(unwrap(transform_feedback_vertbuf_))->get_usage_type() ==
- GPU_USAGE_DEVICE_ONLY);*/
+ /* TODO(Metal): Enable this assertion once #MTLVertBuf lands. */
+ // BLI_assert(static_cast<MTLVertBuf *>(unwrap(transform_feedback_vertbuf_))->get_usage_type() ==
+ // GPU_USAGE_DEVICE_ONLY);
return true;
}
@@ -560,7 +560,7 @@ void MTLShader::vertformat_from_shader(GPUVertFormat *format) const
/** \} */
/* -------------------------------------------------------------------- */
-/** \name METAL Custom behaviour
+/** \name METAL Custom Behavior
* \{ */
void MTLShader::set_vertex_function_name(NSString *vert_function_name)
@@ -584,7 +584,7 @@ void MTLShader::shader_source_from_msl(NSString *input_vertex_source,
void MTLShader::set_interface(MTLShaderInterface *interface)
{
- /* Assign gpu::Shader superclass interface. */
+ /* Assign gpu::Shader super-class interface. */
Shader::interface = interface;
}
@@ -593,22 +593,24 @@ void MTLShader::set_interface(MTLShaderInterface *interface)
/* -------------------------------------------------------------------- */
/** \name Bake Pipeline State Objects
* \{ */
-/* Bakes or fetches a pipeline state using the current
- * MTLRenderPipelineStateDescriptor state.
+
+/**
+ * Bakes or fetches a pipeline state using the current
+ * #MTLRenderPipelineStateDescriptor state.
*
* This state contains information on shader inputs/outputs, such
* as the vertex descriptor, used to control vertex assembly for
* current vertex data, and active render target information,
- * decsribing the output attachment pixel formats.
+ * describing the output attachment pixel formats.
*
- * Other rendering parameters such as global pointsize, blend state, color mask
- * etc; are also used. See mtl_shader.h for full MLRenderPipelineStateDescriptor.
+ * Other rendering parameters such as global point-size, blend state, color mask
+ * etc; are also used. See mtl_shader.h for full #MLRenderPipelineStateDescriptor.
*/
MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
MTLContext *ctx, MTLPrimitiveTopologyClass prim_type)
{
/* NOTE(Metal): PSO cache can be accessed from multiple threads, though these operations should
- * be thread-safe due to organisation of high-level renderer. If there are any issues, then
+ * be thread-safe due to organization of high-level renderer. If there are any issues, then
* access can be guarded as appropriate. */
BLI_assert(this);
MTLShaderInterface *mtl_interface = this->get_interface();
@@ -616,9 +618,9 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
BLI_assert(this->is_valid());
/* NOTE(Metal): Vertex input assembly description will have been populated externally
- * via MTLBatch or MTLImmediate during binding or draw. */
+ * via #MTLBatch or #MTLImmediate during binding or draw. */
- /* Resolve Context Framebuffer state. */
+ /* Resolve Context Frame-buffer state. */
MTLFrameBuffer *framebuffer = ctx->get_current_framebuffer();
/* Update global pipeline descriptor. */
@@ -631,7 +633,7 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
MTLAttachment color_attachment = framebuffer->get_color_attachment(attachment);
if (color_attachment.used) {
- /* If SRGB is disabled and format is SRGB, use colour data directly with no conversions
+ /* If SRGB is disabled and format is SRGB, use color data directly with no conversions
* between linear and SRGB. */
MTLPixelFormat mtl_format = gpu_texture_format_to_metal(
color_attachment.texture->format_get());
@@ -687,7 +689,7 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
@autoreleasepool {
/* Prepare Render Pipeline Descriptor. */
- /* Setup function specialisation constants, used to modify and optimise
+ /* Setup function specialization constants, used to modify and optimize
* generated code based on current render pipeline configuration. */
MTLFunctionConstantValues *values = [[MTLFunctionConstantValues new] autorelease];
@@ -698,18 +700,18 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
pso_descriptor_.label = [NSString stringWithUTF8String:this->name];
/* Offset the bind index for Uniform buffers such that they begin after the VBO
- * buffer bind slots. MTL_uniform_buffer_base_index is passed as a function
- * specialisation constant, customised per unique pipeline state permutation.
+ * buffer bind slots. `MTL_uniform_buffer_base_index` is passed as a function
+ * specialization constant, customized per unique pipeline state permutation.
*
- * Note: For binding point compaction, we could use the number of VBOs present
- * in the current PSO configuration current_state.vertex_descriptor.num_vert_buffers).
+ * NOTE: For binding point compaction, we could use the number of VBOs present
+ * in the current PSO configuration `current_state.vertex_descriptor.num_vert_buffers`).
* However, it is more efficient to simply offset the uniform buffer base index to the
- * maximal number of VBO bind-points, as then UBO bindpoints for similar draw calls
+ * maximal number of VBO bind-points, as then UBO bind-points for similar draw calls
* will align and avoid the requirement for additional binding. */
int MTL_uniform_buffer_base_index = GPU_BATCH_VBO_MAX_LEN;
/* Null buffer index is used if an attribute is not found in the
- * bound VBOs VertexFormat. */
+ * bound VBOs #VertexFormat. */
int null_buffer_index = current_state.vertex_descriptor.num_vert_buffers;
bool using_null_buffer = false;
@@ -726,20 +728,21 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
else {
for (const uint i : IndexRange(current_state.vertex_descriptor.num_attributes)) {
- /* Metal backend attribute descriptor state. */
+ /* Metal back-end attribute descriptor state. */
MTLVertexAttributeDescriptorPSO &attribute_desc =
current_state.vertex_descriptor.attributes[i];
/* Flag format conversion */
- /* In some cases, Metal cannot implicity convert between data types.
- * In these instances, the fetch mode 'GPUVertFetchMode' as provided in the vertex format
+ /* In some cases, Metal cannot implicitly convert between data types.
+ * In these instances, the fetch mode #GPUVertFetchMode as provided in the vertex format
* is passed in, and used to populate function constants named: MTL_AttributeConvert0..15.
-
+ *
* It is then the responsibility of the vertex shader to perform any necessary type
* casting.
*
- * See mtl_shader.hh for more information. Relevant Metal API documentation:
- * https://developer.apple.com/documentation/metal/mtlvertexattributedescriptor/1516081-format?language=objc */
+ * See `mtl_shader.hh` for more information. Relevant Metal API documentation:
+ * https://developer.apple.com/documentation/metal/mtlvertexattributedescriptor/1516081-format?language=objc
+ */
if (attribute_desc.format == MTLVertexFormatInvalid) {
MTL_LOG_WARNING(
"MTLShader: baking pipeline state for '%s'- expected input attribute at "
@@ -766,7 +769,7 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
mtl_interface->name);
}
- /* Copy metal backend attribute descriptor state into PSO descriptor.
+ /* Copy metal back-end attribute descriptor state into PSO descriptor.
* NOTE: need to copy each element due to direct assignment restrictions.
* Also note */
MTLVertexAttributeDescriptor *mtl_attribute = desc.vertexDescriptor.attributes[i];
@@ -777,12 +780,12 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
}
for (const uint i : IndexRange(current_state.vertex_descriptor.num_vert_buffers)) {
- /* Metal backend state buffer layout. */
+ /* Metal back-end state buffer layout. */
const MTLVertexBufferLayoutDescriptorPSO &buf_layout =
current_state.vertex_descriptor.buffer_layouts[i];
- /* Copy metal backend buffer layout state into PSO descriptor.
+ /* Copy metal back-end buffer layout state into PSO descriptor.
* NOTE: need to copy each element due to copying from internal
- * backend descriptor to Metal API descriptor.*/
+ * back-end descriptor to Metal API descriptor. */
MTLVertexBufferLayoutDescriptor *mtl_buf_layout = desc.vertexDescriptor.layouts[i];
mtl_buf_layout.stepFunction = buf_layout.step_function;
@@ -801,7 +804,7 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
/* DEBUG: Missing/empty attributes. */
/* Attributes are normally mapped as part of the state setting based on the used
- * GPUVertFormat, however, if attribues have not been set, we can sort them out here. */
+ * #GPUVertFormat, however, if attributes have not been set, we can sort them out here. */
for (const uint i : IndexRange(mtl_interface->get_total_attributes())) {
const MTLShaderInputAttribute &attribute = mtl_interface->get_attribute(i);
MTLVertexAttributeDescriptor *current_attribute = desc.vertexDescriptor.attributes[i];
@@ -868,8 +871,8 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
float MTL_pointsize = pipeline_descriptor.point_size;
if (pipeline_descriptor.vertex_descriptor.prim_topology_class ==
MTLPrimitiveTopologyClassPoint) {
- /* IF pointsize is > 0.0, PROGRAM_POINT_SIZE is enabled, and gl_PointSize shader keyword
- overrides the value. Otherwise, if < 0.0, use global constant point size. */
+ /* `if pointsize is > 0.0`, PROGRAM_POINT_SIZE is enabled, and `gl_PointSize` shader keyword
+ * overrides the value. Otherwise, if < 0.0, use global constant point size. */
if (MTL_pointsize < 0.0) {
MTL_pointsize = fabsf(MTL_pointsize);
[values setConstantValue:&MTL_pointsize
@@ -926,7 +929,7 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
/* Setup pixel format state */
for (int color_attachment = 0; color_attachment < GPU_FB_MAX_COLOR_ATTACHMENT;
color_attachment++) {
- /* Fetch colour attachment pixel format in backend pipeline state. */
+ /* Fetch color attachment pixel format in back-end pipeline state. */
MTLPixelFormat pixel_format = current_state.color_attachment_format[color_attachment];
/* Populate MTL API PSO attachment descriptor. */
MTLRenderPipelineColorAttachmentDescriptor *col_attachment =
@@ -999,8 +1002,8 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
* This reflection data is used to contrast the binding information
* we know about in the interface against the bindings in the finalized
* PSO. This accounts for bindings which have been stripped out during
- * optimisation, and allows us to both avoid over-binding and also
- * allows us to veriy size-correctness for bindings, to ensure
+ * optimization, and allows us to both avoid over-binding and also
+ * allows us to verify size-correctness for bindings, to ensure
* that buffers bound are not smaller than the size of expected data. */
NSArray<MTLArgument *> *vert_args = [reflection_data vertexArguments];
@@ -1152,7 +1155,7 @@ void MTLShader::ssbo_vertex_fetch_bind_attributes_begin()
ssbo_vertex_attribute_bind_active_ = true;
ssbo_vertex_attribute_bind_mask_ = (1 << mtl_interface->get_total_attributes()) - 1;
- /* Reset tracking of actively used vbo bind slots for ssbo vertex fetch mode. */
+ /* Reset tracking of actively used VBO bind slots for SSBO vertex fetch mode. */
for (int i = 0; i < MTL_SSBO_VERTEX_FETCH_MAX_VBOS; i++) {
ssbo_vbo_slot_used_[i] = false;
}
diff --git a/source/blender/gpu/metal/mtl_shader_generator.hh b/source/blender/gpu/metal/mtl_shader_generator.hh
index c71504b84b7..43890ca0170 100644
--- a/source/blender/gpu/metal/mtl_shader_generator.hh
+++ b/source/blender/gpu/metal/mtl_shader_generator.hh
@@ -21,9 +21,9 @@
*
* 3) Generate MSL shader.
*
- * 4) Populate MTLShaderInterface, describing input/output structure, bindpoints, buffer size and
- * alignment, shader feature usage etc; Everything required by the Metal backend to successfully
- * enable use of shaders and GPU backend features.
+ * 4) Populate #MTLShaderInterface, describing input/output structure, bind-points, buffer size and
+ * alignment, shader feature usage etc; Everything required by the Metal back-end to
+ * successfully enable use of shaders and GPU back-end features.
*
*
*
@@ -33,27 +33,27 @@
* sampler bindings or argument buffers; at the top of the shader.
*
* 2) Inject common Metal headers.
- * - mtl_shader_defines.msl is used to map GLSL functions to MSL.
- * - mtl_shader_common.msl is added to ALL MSL shaders to provide
- * common functionality required by the backend. This primarily
+ * - `mtl_shader_defines.msl` is used to map GLSL functions to MSL.
+ * - `mtl_shader_common.msl` is added to ALL MSL shaders to provide
+ * common functionality required by the back-end. This primarily
* contains function-constant hooks, used in PSO generation.
*
* 3) Create a class Scope which wraps the GLSL shader. This is used to
* create a global per-thread scope around the shader source, to allow
- * access to common shader members (GLSL globals, shader inputs/outptus etc)
+ * access to common shader members (GLSL globals, shader inputs/outputs etc)
*
* 4) Generate shader interface structs and populate local members where required for:
- * - VertexInputs
- * - VertexOutputs
- * - Uniforms
- * - Uniform Blocks
- * - textures;
+ * - `VertexInputs`
+ * - `VertexOutputs`
+ * - `Uniforms`
+ * - `Uniform Blocks`
+ * - `textures` ;
* etc;
*
* 5) Inject GLSL source.
*
* 6) Generate MSL shader entry point function. Every Metal shader must have a
- * vertex/fragment/kernel entrypoint, which contains the function binding table.
+ * vertex/fragment/kernel entry-point, which contains the function binding table.
* This is where bindings are specified and passed into the shader.
*
* For converted shaders, the MSL entry-point will also instantiate a shader
@@ -61,47 +61,49 @@
*
* Finally, the shaders "main()" method will be called, and outputs are copied.
*
- * Note: For position outputs, the default output position will be converted to
+ * NOTE: For position outputs, the default output position will be converted to
* the Metal coordinate space, which involves flipping the Y coordinate and
* re-mapping the depth range between 0 and 1, as with Vulkan.
*
*
* The final shader structure looks as follows:
*
- * -- Shader defines --
- * #define USE_ARGUMENT_BUFFER_FOR_SAMPLERS 0
- * ... etc ...;
- *
- * class MetalShaderVertexImp {
- *
- * -- Common shader interface structs --
- * struct VertexIn {
- * vec4 pos [[attribute(0)]]
- * }
- * struct VertexOut {...}
- * struct PushConstantBlock {...}
- * struct drw_Globals {...}
- * ...
- *
- * -- GLSL source code --
- * ...
- * };
- *
- * vertex MetalShaderVertexImp::VertexOut vertex_function_entry(
- * MetalShaderVertexImp::VertexIn v_in [[stage_in]],
- * constant PushConstantBlock& globals [[buffer(MTL_uniform_buffer_base_index)]]) {
- *
- * MetalShaderVertexImp impl;
- * -- Copy input members into impl instance --
- * -- Execute GLSL main function --
- * impl.main();
- *
- * -- Copy outputs and return --
- * MetalShaderVertexImp::VertexOut out;
- * out.pos = impl.pos;
- * -- transform position to Metal coordinate system --
- * return v_out;
- * }
+ * \code{.cc}
+ * -- Shader defines --
+ * #define USE_ARGUMENT_BUFFER_FOR_SAMPLERS 0
+ * ... etc ...;
+ *
+ * class MetalShaderVertexImp {
+ *
+ * -- Common shader interface structs --
+ * struct VertexIn {
+ * vec4 pos [[attribute(0)]]
+ * }
+ * struct VertexOut {...}
+ * struct PushConstantBlock {...}
+ * struct drw_Globals {...}
+ * ...
+ *
+ * -- GLSL source code --
+ * ...
+ * };
+ *
+ * vertex MetalShaderVertexImp::VertexOut vertex_function_entry(
+ * MetalShaderVertexImp::VertexIn v_in [[stage_in]],
+ * constant PushConstantBlock& globals [[buffer(MTL_uniform_buffer_base_index)]]) {
+ *
+ * MetalShaderVertexImp impl;
+ * -- Copy input members into impl instance --
+ * -- Execute GLSL main function --
+ * impl.main();
+ *
+ * -- Copy outputs and return --
+ * MetalShaderVertexImp::VertexOut out;
+ * out.pos = impl.pos;
+ * -- transform position to Metal coordinate system --
+ * return v_out;
+ * }
+ * \endcode
*
* -- SSBO-vertex-fetchmode --
*
@@ -125,13 +127,14 @@
* significant performance loss from manual vertex assembly vs under-the-hood assembly.
*
* This mode works by passing the required vertex descriptor information into the shader
- * as uniform data, describing the type, stride, offset, stepmode and buffer index of each
- * attribute, such that the shader ssbo-vertex-fetch utility functions know how to extract data.
+ * as uniform data, describing the type, stride, offset, step-mode and buffer index of each
+ * attribute, such that the shader SSBO-vertex-fetch utility functions know how to extract data.
*
- * This also works with indexed rendering, by similarly binding the index buffer as a manul buffer.
+ * This also works with indexed rendering,
+ * by similarly binding the index buffer as a manual buffer.
*
- * When this mode is used, the code generation and shader interface generation varies to accomodate
- * the required features.
+ * When this mode is used, the code generation and shader interface generation varies to
+ * accommodate the required features.
*
* This mode can be enabled in a shader with:
*
@@ -363,7 +366,7 @@ class MSLGeneratorInterface {
blender::Vector<MSLVertexInputAttribute> vertex_input_attributes;
blender::Vector<MSLVertexOutputAttribute> vertex_output_varyings;
/* Should match vertex outputs, but defined separately as
- * some shader permutations will not utilise all inputs/outputs.
+ * some shader permutations will not utilize all inputs/outputs.
* Final shader uses the intersection between the two sets. */
blender::Vector<MSLVertexOutputAttribute> fragment_input_varyings;
blender::Vector<MSLFragmentOutputAttribute> fragment_outputs;
diff --git a/source/blender/gpu/metal/mtl_shader_generator.mm b/source/blender/gpu/metal/mtl_shader_generator.mm
index 37c1ddd6e7a..977e97dbd82 100644
--- a/source/blender/gpu/metal/mtl_shader_generator.mm
+++ b/source/blender/gpu/metal/mtl_shader_generator.mm
@@ -178,10 +178,12 @@ static bool is_program_word(const char *chr, int *len)
return true;
}
-/* Replace function parameter patterns containing:
+/**
+ * Replace function parameter patterns containing:
* `out vec3 somevar` with `THD vec3&somevar`.
- * which enables pass by reference via resolved macro:
- * thread vec3& somevar. */
+ * which enables pass by reference via resolved macro:
+ * `thread vec3& somevar`.
+ */
static void replace_outvars(std::string &str)
{
char *current_str_begin = &*str.begin();
@@ -205,7 +207,7 @@ static void replace_outvars(std::string &str)
/* Match found. */
bool is_array = (*(word_base2 + len2) == '[');
- /* Generate outvar pattern of form 'THD type&var' from original 'out vec4 var'. */
+ /* Generate out-variable pattern of form `THD type&var` from original `out vec4 var`. */
*start = 'T';
*(start + 1) = 'H';
*(start + 2) = 'D';
@@ -277,13 +279,15 @@ static bool balanced_braces(char *current_str_begin, char *current_str_end)
return (nested_bracket_depth == 0);
}
-/* Certain Constants (such as arrays, or pointer types) declared in Global-scope
- * end up being initialised per shader thread, resulting in high
+/**
+ * Certain Constants (such as arrays, or pointer types) declared in Global-scope
+ * end up being initialized per shader thread, resulting in high
* register pressure within the shader.
- * Here we flag occurences of these constants such that
+ * Here we flag occurrences of these constants such that
* they can be moved to a place where this is not a problem.
*
- * Constants declared within function-scope do not exhibit this problem. */
+ * Constants declared within function-scope do not exhibit this problem.
+ */
static void extract_global_scope_constants(std::string &str, std::stringstream &global_scope_out)
{
char *current_str_begin = &*str.begin();
@@ -395,8 +399,8 @@ static void print_resource(std::ostream &os, const ShaderCreateInfo::Resource &r
if (array_offset == -1) {
/* Create local class member as constant pointer reference to bound UBO buffer.
* Given usage within a shader follows ubo_name.ubo_element syntax, we can
- * dereference the pointer as the compiler will optimise this data fetch.
- * To do this, we also give the ubo name a postfix of `_local` to avoid
+ * dereference the pointer as the compiler will optimize this data fetch.
+ * To do this, we also give the UBO name a post-fix of `_local` to avoid
* macro accessor collisions. */
os << "constant " << res.uniformbuf.type_name << " *" << res.uniformbuf.name
<< "_local;\n";
@@ -434,7 +438,7 @@ std::string MTLShader::resources_declare(const ShaderCreateInfo &info) const
for (const ShaderCreateInfo::Resource &res : info.batch_resources_) {
print_resource(ss, res);
}
- /* Note: Push constant uniform data is generated during `generate_msl_from_glsl`
+ /* NOTE: Push constant uniform data is generated during `generate_msl_from_glsl`
* as the generated output is needed for all paths. This includes generation
* of the push constant data structure (struct PushConstantBlock).
* As all shader generation paths require creation of this. */
@@ -533,14 +537,14 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
return false;
}
- /* MSLGeneratorInterface is a class populated to describe all parameters, resources, bindings
+ /* #MSLGeneratorInterface is a class populated to describe all parameters, resources, bindings
* and features used by the source GLSL shader. This information is then used to generate the
* appropriate Metal entry points and perform any required source translation. */
MSLGeneratorInterface msl_iface(*this);
BLI_assert(shd_builder_ != nullptr);
- /* Populate MSLGeneratorInterface from Create-Info.
- * Note this is a seperate path as MSLGeneratorInterface can also be manually populated
+ /* Populate #MSLGeneratorInterface from Create-Info.
+ * NOTE: this is a separate path as #MSLGeneratorInterface can also be manually populated
* from parsing, if support for shaders without create-info is required. */
msl_iface.prepare_from_createinfo(info);
@@ -553,13 +557,13 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
/** Determine use of Transform Feedback. **/
msl_iface.uses_transform_feedback = false;
if (transform_feedback_type_ != GPU_SHADER_TFB_NONE) {
- /* Ensure TransformFeedback is configured correctly. */
+ /* Ensure #TransformFeedback is configured correctly. */
BLI_assert(tf_output_name_list_.size() > 0);
msl_iface.uses_transform_feedback = true;
}
/* Concatenate msl_shader_defines to provide functionality mapping
- * from GLSL to MSL. Also include additioanl GPU defines for
+ * from GLSL to MSL. Also include additional GPU defines for
* optional high-level feature support. */
const std::string msl_defines_string =
"#define GPU_ARB_texture_cube_map_array 1\n\
@@ -576,7 +580,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
* #pragma USE_SSBO_VERTEX_FETCH(Output Prim Type, num output vertices per input primitive)
*
* This will determine whether SSBO-vertex-fetch
- * mode is ued for this shader. Returns true if used, and populates output reference
+ * mode is used for this shader. Returns true if used, and populates output reference
* values with the output prim type and output number of vertices. */
MTLPrimitiveType vertex_fetch_ssbo_output_prim_type = MTLPrimitiveTypeTriangle;
uint32_t vertex_fetch_ssbo_num_output_verts = 0;
@@ -622,8 +626,8 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
/* NOTE(METAL): Currently still performing fallback string scan, as info->builtins_ does
* not always contain the usage flag. This can be removed once all appropriate create-info's
* have been updated. In some cases, this may incur a false positive if access is guarded
- * behind a macro. Though in these cases, unused code paths and paramters will be
- * optimised out by the Metal shader compiler. */
+ * behind a macro. Though in these cases, unused code paths and parameters will be
+ * optimized out by the Metal shader compiler. */
/** Identify usage of vertex-shader builtins. */
msl_iface.uses_gl_VertexID = bool(info->builtins_ & BuiltinBits::VERTEX_ID) ||
@@ -636,9 +640,10 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
std::string::npos ||
msl_iface.uses_ssbo_vertex_fetch_mode;
- /* instance ID in GL is [0, instancecount] in metal it is [base_instance,
- * base_instance+instance_count], so we need to offset instanceID by base instance in Metal --
- * Thus we expose the [[base_instance]] attribute if instance ID is used at all. */
+ /* instance ID in GL is `[0, instance_count]` in metal it is
+ * `[base_instance, base_instance + instance_count]`,
+ * so we need to offset instance_ID by base instance in Metal --
+ * Thus we expose the `[[base_instance]]` attribute if instance ID is used at all. */
msl_iface.uses_gl_BaseInstanceARB = msl_iface.uses_gl_InstanceID ||
shd_builder_->glsl_vertex_source_.find(
"gl_BaseInstanceARB") != std::string::npos ||
@@ -706,7 +711,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
}
/**** METAL Shader source generation. ****/
- /* Setup stringstream for populaing generated MSL shader vertex/frag shaders. */
+ /* Setup `stringstream` for populating generated MSL shader vertex/frag shaders. */
std::stringstream ss_vertex;
std::stringstream ss_fragment;
@@ -753,7 +758,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
#ifndef NDEBUG
/* Performance warning: Extract global-scope expressions.
- * Note: This is dependent on stripping out comments
+ * NOTE: This is dependent on stripping out comments
* to remove false positives. */
remove_multiline_comments_func(shd_builder_->glsl_vertex_source_);
remove_singleline_comments_func(shd_builder_->glsl_vertex_source_);
@@ -786,7 +791,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
bool is_inside_struct = false;
if (!iface->instance_name.is_empty()) {
/* If shader stage interface has an instance name, then it
- * is using a struct foramt and as such we only need a local
+ * is using a struct format and as such we only need a local
* class member for the struct, not each element. */
ss_vertex << iface->name << " " << iface->instance_name << ";" << std::endl;
is_inside_struct = true;
@@ -822,7 +827,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
is_array,
array_len});
- /* Add to fragment-input interface.*/
+ /* Add to fragment-input interface. */
msl_iface.fragment_input_varyings.append(
{to_string(inout.type),
out_name.c_str(),
@@ -838,7 +843,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
if (!msl_iface.uses_ssbo_vertex_fetch_mode) {
ss_vertex << msl_iface.generate_msl_vertex_in_struct();
}
- /* Genrate Uniform data structs. */
+ /* Generate Uniform data structs. */
ss_vertex << msl_iface.generate_msl_uniform_structs(ShaderStage::VERTEX);
/* Conditionally use global GL variables. */
@@ -900,7 +905,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
/* Class Closing Bracket to end shader global scope. */
ss_vertex << "};" << std::endl;
- /* Generate Vertex shader entrypoint function containing resource bindings. */
+ /* Generate Vertex shader entry-point function containing resource bindings. */
ss_vertex << msl_iface.generate_msl_vertex_entry_stub();
/*** Generate FRAGMENT Stage. ***/
@@ -918,10 +923,8 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
#ifndef NDEBUG
/* Performance warning: Identify global-scope expressions.
- * These cause excessive register pressure due to global
- * arrays being instanciated per-thread.
- * Note: This is dependent on stripping out comments
- * to remove false positives. */
+ * These cause excessive register pressure due to global arrays being instantiated per-thread.
+ * NOTE: This is dependent on stripping out comments to remove false positives. */
remove_multiline_comments_func(shd_builder_->glsl_fragment_source_);
remove_singleline_comments_func(shd_builder_->glsl_fragment_source_);
extract_global_scope_constants(shd_builder_->glsl_fragment_source_, ss_fragment);
@@ -1000,7 +1003,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
/* Class Closing Bracket to end shader global scope. */
ss_fragment << "};" << std::endl;
- /* Generate Fragment entrypoint function. */
+ /* Generate Fragment entry-point function. */
ss_fragment << msl_iface.generate_msl_fragment_entry_stub();
}
@@ -1050,7 +1053,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
shader_debug_printf("[METAL] BSL Converted into MSL\n");
#ifndef NDEBUG
- /* In debug mode, we inject the name of the shader into the entrypoint function
+ /* In debug mode, we inject the name of the shader into the entry-point function
* name, as these are what show up in the Xcode GPU debugger. */
this->set_vertex_function_name(
[[NSString stringWithFormat:@"vertex_function_entry_%s", this->name] retain]);
@@ -1316,8 +1319,8 @@ bool MSLGeneratorInterface::use_argument_buffer_for_samplers() const
uint32_t MSLGeneratorInterface::num_samplers_for_stage(ShaderStage stage) const
{
- /* Note: Sampler bindings and argument buffer shared across stages,
- in case stages share texture/sampler bindings. */
+ /* NOTE: Sampler bindings and argument buffer shared across stages,
+ * in case stages share texture/sampler bindings. */
return texture_samplers.size();
}
@@ -1357,14 +1360,14 @@ std::string MSLGeneratorInterface::generate_msl_vertex_entry_stub()
std::stringstream out;
out << std::endl << "/*** AUTO-GENERATED MSL VERETX SHADER STUB. ***/" << std::endl;
- /* Undef texture defines from main source - avoid conflict with MSL texture. */
+ /* Un-define texture defines from main source - avoid conflict with MSL texture. */
out << "#undef texture" << std::endl;
out << "#undef textureLod" << std::endl;
/* Disable special case for booleans being treated as ints in GLSL. */
out << "#undef bool" << std::endl;
- /* Undef uniform mappings to avoid name collisions. */
+ /* Un-define uniform mappings to avoid name collisions. */
out << generate_msl_uniform_undefs(ShaderStage::VERTEX);
/* Generate function entry point signature w/ resource bindings and inputs. */
@@ -1414,8 +1417,8 @@ std::string MSLGeneratorInterface::generate_msl_vertex_entry_stub()
out << this->generate_msl_vertex_output_population();
/* Final point size,
- * This is only compiled if the MTL_global_pointsize is specified
- * as a function specialisation in the PSO. This is restricted to
+ * This is only compiled if the `MTL_global_pointsize` is specified
+ * as a function specialization in the PSO. This is restricted to
* point primitive types. */
out << "if(is_function_constant_defined(MTL_global_pointsize)){ output.pointsize = "
"(MTL_global_pointsize > 0.0)?MTL_global_pointsize:output.pointsize; }"
@@ -1437,14 +1440,14 @@ std::string MSLGeneratorInterface::generate_msl_fragment_entry_stub()
std::stringstream out;
out << std::endl << "/*** AUTO-GENERATED MSL FRAGMENT SHADER STUB. ***/" << std::endl;
- /* Undef texture defines from main source - avoid conflict with MSL texture*/
+ /* Undefine texture defines from main source - avoid conflict with MSL texture. */
out << "#undef texture" << std::endl;
out << "#undef textureLod" << std::endl;
- /* Disable special case for booleans being treated as ints in GLSL. */
+ /* Disable special case for booleans being treated as integers in GLSL. */
out << "#undef bool" << std::endl;
- /* Undef uniform mappings to avoid name collisions. */
+ /* Undefine uniform mappings to avoid name collisions. */
out << generate_msl_uniform_undefs(ShaderStage::FRAGMENT);
/* Generate function entry point signature w/ resource bindings and inputs. */
@@ -1529,9 +1532,9 @@ void MSLGeneratorInterface::generate_msl_textures_input_string(std::stringstream
}
/* Generate sampler signatures. */
- /* Note: Currently textures and samplers share indices across shading stages, so the limit is
+ /* NOTE: Currently textures and samplers share indices across shading stages, so the limit is
* shared.
- * If we exceed the hardware-supported limit, then follow a bindless model using argument
+ * If we exceed the hardware-supported limit, then follow a bind-less model using argument
* buffers. */
if (this->use_argument_buffer_for_samplers()) {
out << ",\n\tconstant SStruct& samplers [[buffer(MTL_uniform_buffer_base_index+"
@@ -1539,7 +1542,7 @@ void MSLGeneratorInterface::generate_msl_textures_input_string(std::stringstream
}
else {
/* Maximum Limit of samplers defined in the function argument table is
- * MTL_MAX_DEFAULT_SAMPLERS=16. */
+ * `MTL_MAX_DEFAULT_SAMPLERS=16`. */
BLI_assert(this->texture_samplers.size() <= MTL_MAX_DEFAULT_SAMPLERS);
for (const MSLTextureSampler &tex : this->texture_samplers) {
if (bool(tex.stage & stage)) {
@@ -1562,15 +1565,15 @@ void MSLGeneratorInterface::generate_msl_uniforms_input_string(std::stringstream
int ubo_index = 0;
for (const MSLUniformBlock &ubo : this->uniform_blocks) {
if (bool(ubo.stage & stage)) {
- /* For literal/existing global types, we do not need the class namespace accessor. */
+ /* For literal/existing global types, we do not need the class name-space accessor. */
out << ",\n\tconstant ";
if (!is_builtin_type(ubo.type_name)) {
out << get_stage_class_name(stage) << "::";
}
- /* UniformBuffer bind indices start at MTL_uniform_buffer_base_index+1, as
- * MTL_uniform_buffer_base_index is reserved for the PushConstantBlock (push constants).
+ /* #UniformBuffer bind indices start at `MTL_uniform_buffer_base_index + 1`, as
+ * MTL_uniform_buffer_base_index is reserved for the #PushConstantBlock (push constants).
* MTL_uniform_buffer_base_index is an offset depending on the number of unique VBOs
- * bound for the current PSO specialisation. */
+ * bound for the current PSO specialization. */
out << ubo.type_name << "* " << ubo.name << "[[buffer(MTL_uniform_buffer_base_index+"
<< (ubo_index + 1) << ")]]";
}
@@ -1682,7 +1685,7 @@ std::string MSLGeneratorInterface::generate_msl_uniform_structs(ShaderStage shad
return out.str();
}
-/* Note: Uniform macro definition vars can conflict with other parameters. */
+/* NOTE: Uniform macro definition vars can conflict with other parameters. */
std::string MSLGeneratorInterface::generate_msl_uniform_undefs(ShaderStage shader_stage)
{
std::stringstream out;
@@ -1787,7 +1790,7 @@ std::string MSLGeneratorInterface::generate_msl_vertex_out_struct(ShaderStage sh
}
}
else {
- /* Matrix types need to be expressed as their vector subcomponents. */
+ /* Matrix types need to be expressed as their vector sub-components. */
if (is_matrix_type(v_out.type)) {
BLI_assert(v_out.get_mtl_interpolation_qualifier() == " [[flat]]" &&
"Matrix varying types must have [[flat]] interpolation");
@@ -1807,18 +1810,17 @@ std::string MSLGeneratorInterface::generate_msl_vertex_out_struct(ShaderStage sh
/* Add gl_PointSize if written to. */
if (shader_stage == ShaderStage::VERTEX) {
if (this->uses_gl_PointSize) {
- /* If gl_PointSize is explicitly written to,
+ /* If `gl_PointSize` is explicitly written to,
* we will output the written value directly.
- * This value can still be overriden by the
- * global pointsize value. */
+ * This value can still be overridden by the
+ * global point-size value. */
out << "\tfloat pointsize [[point_size]];" << std::endl;
}
else {
- /* Otherwise, if pointsize is not written to inside the shader,
- * then its usage is controlled by whether the MTL_global_pointsize
+ /* Otherwise, if point-size is not written to inside the shader,
+ * then its usage is controlled by whether the `MTL_global_pointsize`
* function constant has been specified.
- * This function constant is enabled for all point primitives beign
- * rendered. */
+ * This function constant is enabled for all point primitives being rendered. */
out << "\tfloat pointsize [[point_size, function_constant(MTL_global_pointsize)]];"
<< std::endl;
}
@@ -1904,7 +1906,7 @@ std::string MSLGeneratorInterface::generate_msl_vertex_transform_feedback_out_st
}
}
else {
- /* Matrix types need to be expressed as their vector subcomponents. */
+ /* Matrix types need to be expressed as their vector sub-components. */
if (is_matrix_type(v_out.type)) {
BLI_assert(v_out.get_mtl_interpolation_qualifier() == " [[flat]]" &&
"Matrix varying types must have [[flat]] interpolation");
@@ -1980,10 +1982,10 @@ std::string MSLGeneratorInterface::generate_msl_uniform_block_population(ShaderS
/* Only include blocks which are used within this stage. */
if (bool(ubo.stage & stage)) {
/* Generate UBO reference assignment.
- * NOTE(Metal): We append `_local` postfix onto the class member name
+ * NOTE(Metal): We append `_local` post-fix onto the class member name
* for the ubo to avoid name collision with the UBO accessor macro.
- * We only need to add this postfix for the non-array access variant,
- * as the array is indexed directly, rather than requiring a dereference. */
+ * We only need to add this post-fix for the non-array access variant,
+ * as the array is indexed directly, rather than requiring a dereference. */
out << "\t"
<< ((stage == ShaderStage::VERTEX) ? "vertex_shader_instance." :
"fragment_shader_instance.")
@@ -2045,7 +2047,7 @@ std::string MSLGeneratorInterface::generate_msl_vertex_attribute_input_populatio
out << ");";
}
else {
- /* OpenGL uses the GPU_FETCH_* functions which can alter how an attribute value is
+ /* OpenGL uses the `GPU_FETCH_*` functions which can alter how an attribute value is
* interpreted. In Metal, we cannot support all implicit conversions within the vertex
* descriptor/vertex stage-in, so we need to perform value transformation on-read.
*
@@ -2055,10 +2057,10 @@ std::string MSLGeneratorInterface::generate_msl_vertex_attribute_input_populatio
* vertex data, depending on the specified GPU_FETCH_* mode for the current
* vertex format.
*
- * The fetch_mode is specified per-attribute using specialisation constants
+ * The fetch_mode is specified per-attribute using specialization constants
* on the PSO, wherein a unique set of constants is passed in per vertex
* buffer/format configuration. Efficiently enabling pass-through reads
- * if no special fetch is required. */
+ * if no special fetch is required. */
bool do_attribute_conversion_on_read = false;
std::string attribute_conversion_func_name = get_attribute_conversion_function(
&do_attribute_conversion_on_read, this->vertex_input_attributes[attribute].type);
@@ -2098,7 +2100,7 @@ std::string MSLGeneratorInterface::generate_msl_vertex_output_population()
<< std::endl;
}
- /* Output Pointsize. */
+ /* Output Point-size. */
if (this->uses_gl_PointSize) {
out << "\toutput.pointsize = vertex_shader_instance.gl_PointSize;" << std::endl;
}
@@ -2110,7 +2112,7 @@ std::string MSLGeneratorInterface::generate_msl_vertex_output_population()
<< std::endl;
}
- /* Output clipdistances. */
+ /* Output clip-distances. */
out << "#if defined(USE_CLIP_PLANES) || defined(USE_WORLD_CLIP_PLANES)" << std::endl;
if (this->clip_distances.size() > 1) {
for (int cd = 0; cd < this->clip_distances.size(); cd++) {
@@ -2384,7 +2386,7 @@ void MSLGeneratorInterface::resolve_input_attribute_locations()
/* Determine free location.
* Starting from 1 is slightly less efficient, however,
- * given mutli-sized attributes, an earlier slot may remain free.
+ * given multi-sized attributes, an earlier slot may remain free.
* given GPU_VERT_ATTR_MAX_LEN is small, this wont matter. */
for (int loc = 0; loc < GPU_VERT_ATTR_MAX_LEN - (required_attr_slot_count - 1); loc++) {
@@ -2429,8 +2431,10 @@ void MSLGeneratorInterface::resolve_fragment_output_locations()
}
}
-/* Add string to name buffer. Utility function to be used in bake_shader_interface.
- * Returns the offset of the inserted name.*/
+/**
+ * Add string to name buffer. Utility function to be used in bake_shader_interface.
+ * Returns the offset of the inserted name.
+ */
static uint32_t name_buffer_copystr(char **name_buffer_ptr,
const char *str_to_copy,
uint32_t &name_buffer_size,
@@ -2443,7 +2447,7 @@ static uint32_t name_buffer_copystr(char **name_buffer_ptr,
uint32_t ret_len = strlen(str_to_copy);
BLI_assert(ret_len > 0);
- /* If required name buffer size is larger, increase by atleast 128 bytes. */
+ /* If required name buffer size is larger, increase by at least 128 bytes. */
if (name_buffer_size + ret_len > name_buffer_size) {
name_buffer_size = name_buffer_size + max_ii(128, ret_len);
*name_buffer_ptr = (char *)MEM_reallocN(*name_buffer_ptr, name_buffer_size);
@@ -2467,7 +2471,7 @@ MTLShaderInterface *MSLGeneratorInterface::bake_shader_interface(const char *nam
interface->init();
/* Name buffer. */
- /* Initialise name buffer. */
+ /* Initialize name buffer. */
uint32_t name_buffer_size = 256;
uint32_t name_buffer_offset = 0;
interface->name_buffer_ = (char *)MEM_mallocN(name_buffer_size, "name_buffer");
@@ -2487,7 +2491,7 @@ MTLShaderInterface *MSLGeneratorInterface::bake_shader_interface(const char *nam
elem < get_matrix_location_count(this->vertex_input_attributes[attribute].type);
elem++) {
/* First attribute matches the core name -- subsequent attributes tagged with
- * __internal_<name><index>. */
+ * `__internal_<name><index>`. */
std::string _internal_name = (elem == 0) ?
this->vertex_input_attributes[attribute].name :
"__internal_" +
@@ -2582,7 +2586,7 @@ MTLShaderInterface *MSLGeneratorInterface::bake_shader_interface(const char *nam
this->get_sampler_argument_buffer_bind_index(ShaderStage::VERTEX),
this->get_sampler_argument_buffer_bind_index(ShaderStage::FRAGMENT));
- /* Map Metal bindings to standardised ShaderInput struct name/binding index. */
+ /* Map Metal bindings to standardized ShaderInput struct name/binding index. */
interface->prepare_common_shader_inputs();
/* Resize name buffer to save some memory. */
@@ -2694,7 +2698,7 @@ std::string MSLTextureSampler::get_msl_texture_type_str() const
return "texture_buffer";
}
default: {
- /* Unrecognised type. */
+ /* Unrecognized type. */
BLI_assert_unreachable();
return "ERROR";
}
@@ -2802,7 +2806,7 @@ std::string MSLTextureSampler::get_msl_wrapper_type_str() const
return "_mtl_combined_image_sampler_buffer";
}
default: {
- /* Unrecognised type. */
+ /* Unrecognized type. */
BLI_assert_unreachable();
return "ERROR";
}
@@ -2857,7 +2861,7 @@ std::string MSLTextureSampler::get_msl_return_type_str() const
}
default: {
- /* Unrecognised type. */
+ /* Unrecognized type. */
BLI_assert_unreachable();
return "ERROR";
}
diff --git a/source/blender/gpu/metal/mtl_shader_interface.hh b/source/blender/gpu/metal/mtl_shader_interface.hh
index 0f04c04031d..0da84cad997 100644
--- a/source/blender/gpu/metal/mtl_shader_interface.hh
+++ b/source/blender/gpu/metal/mtl_shader_interface.hh
@@ -23,33 +23,33 @@
namespace blender::gpu {
-/* MTLShaderInterface describes the layout and properties of a given shader,
+/* #MTLShaderInterface describes the layout and properties of a given shader,
* including input and output bindings, and any special properties or modes
* that the shader may require.
*
* -- Shader input/output bindings --
*
- * We require custom datastructures for the binding information in Metal.
+ * We require custom data-structures for the binding information in Metal.
* This is because certain bindings contain and require more information to
* be stored than can be tracked solely within the `ShaderInput` struct.
* e.g. data sizes and offsets.
*
* Upon interface completion, `prepare_common_shader_inputs` is used to
- * populate the global ShaderInput* array to enable correct functionality
+ * populate the global `ShaderInput*` array to enable correct functionality
* of shader binding location lookups. These returned locations act as indices
- * into the arrays stored here in the MTLShaderInterace, such that extraction
- * of required information can be performed within the backend.
+ * into the arrays stored here in the #MTLShaderInterface, such that extraction
+ * of required information can be performed within the back-end.
*
* e.g. `int loc = GPU_shader_get_uniform(...)`
- * `loc` will match the index into the MTLShaderUniform uniforms_[] array
+ * `loc` will match the index into the `MTLShaderUniform uniforms_[]` array
* to fetch the required Metal specific information.
*
*
*
* -- Argument Buffers and Argument Encoders --
*
- * We can use ArgumentBuffers (AB's) in Metal to extend the resource bind limitations
- * by providing bindless support.
+ * We can use #ArgumentBuffers (AB's) in Metal to extend the resource bind limitations
+ * by providing bind-less support.
*
* Argument Buffers are used for sampler bindings when the builtin
* sampler limit of 16 is exceeded, as in all cases for Blender,
@@ -60,8 +60,8 @@ namespace blender::gpu {
* In future, argument buffers may be extended to support other resource
* types, if overall bind limits are ever increased within Blender.
*
- * The ArgumentEncoder cache used to store the generated ArgumentEncoders for a given
- * shader permutation. The ArgumentEncoder is the resource used to write resource binding
+ * The #ArgumentEncoder cache used to store the generated #ArgumentEncoders for a given
+ * shader permutation. The #ArgumentEncoder is the resource used to write resource binding
* information to a specified buffer, and is unique to the shader's resource interface.
*/
@@ -107,7 +107,7 @@ struct MTLShaderInputAttribute {
struct MTLShaderUniformBlock {
uint32_t name_offset;
uint32_t size = 0;
- /* Buffer resouce bind index in shader [[buffer(index)]]. */
+ /* Buffer resource bind index in shader `[[buffer(index)]]`. */
uint32_t buffer_index;
/* Tracking for manual uniform addition. */
@@ -127,7 +127,7 @@ struct MTLShaderUniform {
struct MTLShaderTexture {
bool used;
uint32_t name_offset;
- /* Texture resource bind slot in shader [[texture(n)]]. */
+ /* Texture resource bind slot in shader `[[texture(n)]]`. */
int slot_index;
eGPUTextureType type;
ShaderStage stage_mask;
@@ -135,7 +135,7 @@ struct MTLShaderTexture {
struct MTLShaderSampler {
uint32_t name_offset;
- /* Sampler resource bind slot in shader [[sampler(n)]]. */
+ /* Sampler resource bind slot in shader `[[sampler(n)]]`. */
uint32_t slot_index = 0;
};
@@ -143,7 +143,7 @@ struct MTLShaderSampler {
MTLVertexFormat mtl_datatype_to_vertex_type(eMTLDataType type);
/**
- * Implementation of Shader interface for Metal Backend.
+ * Implementation of Shader interface for Metal Back-end.
**/
class MTLShaderInterface : public ShaderInterface {
@@ -157,7 +157,7 @@ class MTLShaderInterface : public ShaderInterface {
};
ArgumentEncoderCacheEntry arg_encoders_[ARGUMENT_ENCODERS_CACHE_SIZE] = {};
- /* Vertex input Attribues. */
+ /* Vertex input Attributes. */
uint32_t total_attributes_;
uint32_t total_vert_stride_;
MTLShaderInputAttribute attributes_[MTL_MAX_VERTEX_INPUT_ATTRIBUTES];
@@ -218,7 +218,7 @@ class MTLShaderInterface : public ShaderInterface {
uint32_t argument_buffer_bind_index_vert,
uint32_t argument_buffer_bind_index_frag);
- /* Prepare ShaderInput interface for binding resolution. */
+ /* Prepare #ShaderInput interface for binding resolution. */
void prepare_common_shader_inputs();
/* Fetch Uniforms. */
diff --git a/source/blender/gpu/metal/mtl_shader_interface.mm b/source/blender/gpu/metal/mtl_shader_interface.mm
index 1adf1210496..3703d5b5684 100644
--- a/source/blender/gpu/metal/mtl_shader_interface.mm
+++ b/source/blender/gpu/metal/mtl_shader_interface.mm
@@ -32,7 +32,7 @@ MTLShaderInterface::MTLShaderInterface(const char *name)
strcpy(this->name, name);
}
- /* Ensure ShaderInterface parameters are cleared. */
+ /* Ensure #ShaderInterface parameters are cleared. */
this->init();
}
@@ -64,7 +64,7 @@ void MTLShaderInterface::init()
sampler_argument_buffer_bind_index_vert_ = -1;
sampler_argument_buffer_bind_index_frag_ = -1;
- /* NULL initialise uniform location markers for builtins. */
+ /* NULL initialize uniform location markers for builtins. */
for (const int u : IndexRange(GPU_NUM_UNIFORMS)) {
builtins_[u] = -1;
}
@@ -76,7 +76,7 @@ void MTLShaderInterface::init()
textures_[tex].slot_index = -1;
}
- /* Null initialisation for argument encoders. */
+ /* Null initialization for argument encoders. */
for (const int i : IndexRange(ARGUMENT_ENCODERS_CACHE_SIZE)) {
arg_encoders_[i].encoder = nil;
arg_encoders_[i].buffer_index = -1;
@@ -117,7 +117,7 @@ uint32_t MTLShaderInterface::add_uniform_block(uint32_t name_offset,
MTLShaderUniformBlock &uni_block = ubos_[total_uniform_blocks_];
uni_block.name_offset = name_offset;
- /* We offset the buffer bidning index by one, as the first slot is reserved for push constant
+ /* We offset the buffer binding index by one, as the first slot is reserved for push constant
* data. */
uni_block.buffer_index = buffer_index + 1;
uni_block.size = size;
@@ -224,7 +224,7 @@ void MTLShaderInterface::map_builtins()
builtin_blocks_[ubo] = -1;
}
- /* Resolve and cache uniform locations for bultin uniforms. */
+ /* Resolve and cache uniform locations for builtin uniforms. */
for (const int u : IndexRange(GPU_NUM_UNIFORMS)) {
const ShaderInput *uni = this->uniform_get(builtin_uniform_name((GPUUniformBuiltin)u));
if (uni != nullptr) {
@@ -239,7 +239,7 @@ void MTLShaderInterface::map_builtins()
}
}
- /* Resolve and cache uniform locations for bultin uniform blocks. */
+ /* Resolve and cache uniform locations for builtin uniform blocks. */
for (const int u : IndexRange(GPU_NUM_UNIFORM_BLOCKS)) {
const ShaderInput *uni = this->ubo_get(builtin_uniform_block_name((GPUUniformBlockBuiltin)u));
@@ -255,16 +255,16 @@ void MTLShaderInterface::map_builtins()
}
}
-/* Populate ShaderInput struct based on interface. */
+/* Populate #ShaderInput struct based on interface. */
void MTLShaderInterface::prepare_common_shader_inputs()
{
- /* ShaderInput inputs_ maps a uniform name to an external
+ /* `ShaderInput inputs_` maps a uniform name to an external
* uniform location, which is used as an array index to look-up
- * information in the local MTLShaderInterface input structs.
+ * information in the local #MTLShaderInterface input structs.
*
- * ShaderInput population follows the ordering rules in gpu_shader_interface. */
+ * #ShaderInput population follows the ordering rules in #gpu_shader_interface. */
- /* Populate ShaderInterface counts. */
+ /* Populate #ShaderInterface counts. */
attr_len_ = this->get_total_attributes();
ubo_len_ = this->get_total_uniform_blocks();
uniform_len_ = this->get_total_uniforms() + this->get_total_textures();
@@ -272,8 +272,8 @@ void MTLShaderInterface::prepare_common_shader_inputs()
/* TODO(Metal): Support storage buffer bindings. Pending compute shader support. */
ssbo_len_ = 0;
- /* Calculate total inputs and allocate ShaderInput array. */
- /* NOTE: We use the existing name_buffer_ allocated for internal input structs. */
+ /* Calculate total inputs and allocate #ShaderInput array. */
+ /* NOTE: We use the existing `name_buffer_` allocated for internal input structs. */
int input_tot_len = attr_len_ + ubo_len_ + uniform_len_ + ssbo_len_;
inputs_ = (ShaderInput *)MEM_callocN(sizeof(ShaderInput) * input_tot_len, __func__);
ShaderInput *current_input = inputs_;
@@ -316,9 +316,9 @@ void MTLShaderInterface::prepare_common_shader_inputs()
}
/* Textures.
- * NOTE(Metal): Textures are externally treated as uniforms in gpu_shader_interface.
+ * NOTE(Metal): Textures are externally treated as uniforms in #gpu_shader_interface.
* Location for textures resolved as `binding` value. This
- * is the index into the local MTLShaderTexture textures[] array.
+ * is the index into the local `MTLShaderTexture textures[]` array.
*
* In MSL, we cannot trivially remap which texture slot a given texture
* handle points to, unlike in GLSL, where a uniform sampler/image can be updated
@@ -341,7 +341,7 @@ void MTLShaderInterface::prepare_common_shader_inputs()
* to ensure texture handles are not treated as standard uniforms in Metal. */
current_input->location = texture_index + total_uniforms_;
- /* Binding represents texture slot [[texture(n)]]. */
+ /* Binding represents texture slot `[[texture(n)]]`. */
current_input->binding = shd_tex.slot_index;
current_input++;
}
diff --git a/source/blender/gpu/metal/mtl_shader_interface_type.hh b/source/blender/gpu/metal/mtl_shader_interface_type.hh
index a8e651d8509..3c4c87ee25b 100644
--- a/source/blender/gpu/metal/mtl_shader_interface_type.hh
+++ b/source/blender/gpu/metal/mtl_shader_interface_type.hh
@@ -245,7 +245,7 @@ inline uint mtl_get_data_type_alignment(eMTLDataType type)
return 32;
default:
- BLI_assert_msg(false, "Unrecognised MTL datatype.");
+ BLI_assert_msg(false, "Unrecognized MTL datatype.");
return 0;
};
}
diff --git a/source/blender/gpu/metal/mtl_state.mm b/source/blender/gpu/metal/mtl_state.mm
index 85080041246..31182cf91d1 100644
--- a/source/blender/gpu/metal/mtl_state.mm
+++ b/source/blender/gpu/metal/mtl_state.mm
@@ -202,7 +202,7 @@ static MTLCompareFunction gpu_stencil_func_to_metal(eGPUStencilTest stencil_func
case GPU_STENCIL_ALWAYS:
return MTLCompareFunctionAlways;
default:
- BLI_assert(false && "Unrecognised eGPUStencilTest function");
+ BLI_assert(false && "Unrecognized eGPUStencilTest function");
break;
}
return MTLCompareFunctionAlways;
diff --git a/source/blender/gpu/metal/mtl_texture_util.mm b/source/blender/gpu/metal/mtl_texture_util.mm
index 25b30c6cb0e..928393fb39e 100644
--- a/source/blender/gpu/metal/mtl_texture_util.mm
+++ b/source/blender/gpu/metal/mtl_texture_util.mm
@@ -124,7 +124,7 @@ MTLPixelFormat gpu_texture_format_to_metal(eGPUTextureFormat tex_format)
return MTLPixelFormatDepth16Unorm;
default:
- BLI_assert(!"Unrecognised GPU pixel format!\n");
+ BLI_assert(!"Unrecognized GPU pixel format!\n");
return MTLPixelFormatRGBA8Unorm;
}
}
@@ -183,7 +183,7 @@ int get_mtl_format_bytesize(MTLPixelFormat tex_format)
return 2;
default:
- BLI_assert(!"Unrecognised GPU pixel format!\n");
+ BLI_assert(!"Unrecognized GPU pixel format!\n");
return 1;
}
}
@@ -238,7 +238,7 @@ int get_mtl_format_num_components(MTLPixelFormat tex_format)
return 1;
default:
- BLI_assert(!"Unrecognised GPU pixel format!\n");
+ BLI_assert(!"Unrecognized GPU pixel format!\n");
return 1;
}
}
@@ -632,7 +632,7 @@ id<MTLComputePipelineState> gpu::MTLTexture::mtl_texture_read_impl(
depth_scale_factor = 0xFFFFFFFFu;
break;
default:
- BLI_assert_msg(0, "Unrecognised mode");
+ BLI_assert_msg(0, "Unrecognized mode");
break;
}
}
diff --git a/source/blender/gpu/shaders/compositor/compositor_morphological_distance_feather.glsl b/source/blender/gpu/shaders/compositor/compositor_morphological_distance_feather.glsl
index 8034f4a3ebd..acdd8a40342 100644
--- a/source/blender/gpu/shaders/compositor/compositor_morphological_distance_feather.glsl
+++ b/source/blender/gpu/shaders/compositor/compositor_morphological_distance_feather.glsl
@@ -59,7 +59,7 @@ void main()
/* Start with the center value as the maximum/minimum distance and reassign to the true maximum
* or minimum in the search loop below. Additionally, the center falloff is always 1.0, so start
- * with that. */
+ * with that. */
float limit_distance = center_value;
float limit_distance_falloff = 1.0;
diff --git a/source/blender/gpu/shaders/compositor/compositor_morphological_distance_threshold.glsl b/source/blender/gpu/shaders/compositor/compositor_morphological_distance_threshold.glsl
index 5931c4f0271..e6625e7419f 100644
--- a/source/blender/gpu/shaders/compositor/compositor_morphological_distance_threshold.glsl
+++ b/source/blender/gpu/shaders/compositor/compositor_morphological_distance_threshold.glsl
@@ -58,7 +58,7 @@ void main()
* a texture loader with a fallback value. And since we don't want those values to affect the
* result, the fallback value is chosen such that the inner condition fails, which is when the
* sampled pixel and the center pixel are the same, so choose a fallback that will be considered
- * masked if the center pixel is masked and unmasked otherwise. */
+ * masked if the center pixel is masked and unmasked otherwise. */
vec4 fallback = vec4(is_center_masked ? 1.0 : 0.0);
/* Since the distance search window is limited to the given radius, the maximum possible squared
diff --git a/source/blender/gpu/shaders/compositor/compositor_projector_lens_distortion.glsl b/source/blender/gpu/shaders/compositor/compositor_projector_lens_distortion.glsl
index cf961b20b34..ab44dac93e6 100644
--- a/source/blender/gpu/shaders/compositor/compositor_projector_lens_distortion.glsl
+++ b/source/blender/gpu/shaders/compositor/compositor_projector_lens_distortion.glsl
@@ -4,7 +4,7 @@ void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
- /* Get the normalized coordinates of the pixel centers. */
+ /* Get the normalized coordinates of the pixel centers. */
vec2 normalized_texel = (vec2(texel) + vec2(0.5)) / vec2(texture_size(input_tx));
/* Sample the red and blue channels shifted by the dispersion amount. */
diff --git a/source/blender/gpu/shaders/compositor/compositor_realize_on_domain.glsl b/source/blender/gpu/shaders/compositor/compositor_realize_on_domain.glsl
index be984d81603..b8561e5f059 100644
--- a/source/blender/gpu/shaders/compositor/compositor_realize_on_domain.glsl
+++ b/source/blender/gpu/shaders/compositor/compositor_realize_on_domain.glsl
@@ -9,7 +9,7 @@ void main()
/* Transform the input image by transforming the domain coordinates with the inverse of input
* image's transformation. The inverse transformation is an affine matrix and thus the
- * coordinates should be in homogeneous coordinates. */
+ * coordinates should be in homogeneous coordinates. */
coordinates = (mat3(inverse_transformation) * vec3(coordinates, 1.0)).xy;
/* Since an input image with an identity transformation is supposed to be centered in the domain,
diff --git a/source/blender/gpu/shaders/gpu_shader_codegen_lib.glsl b/source/blender/gpu/shaders/gpu_shader_codegen_lib.glsl
index c0821085c8d..94707de71ed 100644
--- a/source/blender/gpu/shaders/gpu_shader_codegen_lib.glsl
+++ b/source/blender/gpu/shaders/gpu_shader_codegen_lib.glsl
@@ -187,7 +187,7 @@ struct ClosureTransparency {
struct GlobalData {
/** World position. */
vec3 P;
- /** Surface Normal. Normalized, overriden by bump displacement. */
+ /** Surface Normal. Normalized, overridden by bump displacement. */
vec3 N;
/** Raw interpolated normal (non-normalized) data. */
vec3 Ni;