Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/source
diff options
context:
space:
mode:
Diffstat (limited to 'source')
-rw-r--r--source/blender/blenfont/intern/blf_font.c12
-rw-r--r--source/blender/blenkernel/intern/anim_sys.c10
-rw-r--r--source/blender/blenkernel/intern/curves.cc54
-rw-r--r--source/blender/blenkernel/intern/gpencil_modifier.c2
-rw-r--r--source/blender/blenkernel/intern/main_namemap.cc2
-rw-r--r--source/blender/blenkernel/intern/subsurf_ccg.c2
-rw-r--r--source/blender/blenkernel/nla_private.h11
-rw-r--r--source/blender/blenlib/intern/BLI_kdopbvh.c2
-rw-r--r--source/blender/blenloader/BLO_readfile.h8
-rw-r--r--source/blender/compositor/realtime_compositor/COM_compile_state.hh2
-rw-r--r--source/blender/compositor/realtime_compositor/COM_shader_operation.hh2
-rw-r--r--source/blender/compositor/realtime_compositor/COM_simple_operation.hh2
-rw-r--r--source/blender/compositor/realtime_compositor/COM_utilities.hh38
-rw-r--r--source/blender/compositor/realtime_compositor/intern/operation.cc2
-rw-r--r--source/blender/compositor/realtime_compositor/intern/realize_on_domain_operation.cc2
-rw-r--r--source/blender/draw/engines/eevee/eevee_cryptomatte.c4
-rw-r--r--source/blender/draw/engines/eevee/shaders/closure_type_lib.glsl4
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_defines.hh6
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_film.cc2
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_sync.cc2
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_world.cc4
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_depth_of_field_resolve_comp.glsl2
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_depth_of_field_stabilize_comp.glsl2
-rw-r--r--source/blender/draw/engines/eevee_next/shaders/eevee_motion_blur_dilate_comp.glsl2
-rw-r--r--source/blender/draw/intern/draw_command.cc6
-rw-r--r--source/blender/draw/intern/draw_command_shared.hh4
-rw-r--r--source/blender/draw/intern/draw_curves.cc10
-rw-r--r--source/blender/draw/intern/draw_hair.cc18
-rw-r--r--source/blender/draw/intern/draw_manager.c2
-rw-r--r--source/blender/draw/intern/draw_manager.cc6
-rw-r--r--source/blender/draw/intern/draw_manager.hh16
-rw-r--r--source/blender/draw/intern/draw_pass.hh11
-rw-r--r--source/blender/draw/intern/draw_shader_shared.h6
-rw-r--r--source/blender/draw/intern/draw_view.hh4
-rw-r--r--source/blender/draw/intern/shaders/draw_command_generate_comp.glsl2
-rw-r--r--source/blender/editors/animation/keyframes_edit.c7
-rw-r--r--source/blender/editors/animation/keyingsets.c2
-rw-r--r--source/blender/editors/curve/editcurve.c2
-rw-r--r--source/blender/editors/curves/intern/curves_ops.cc2
-rw-r--r--source/blender/editors/interface/view2d.cc2
-rw-r--r--source/blender/editors/sculpt_paint/paint_image.cc4
-rw-r--r--source/blender/editors/sculpt_paint/paint_vertex.cc2
-rw-r--r--source/blender/editors/sculpt_paint/sculpt.c2
-rw-r--r--source/blender/editors/sculpt_paint/sculpt_undo.c4
-rw-r--r--source/blender/editors/space_node/node_add.cc2
-rw-r--r--source/blender/editors/space_node/node_relationships.cc2
-rw-r--r--source/blender/editors/space_outliner/outliner_edit.cc2
-rw-r--r--source/blender/editors/space_outliner/outliner_tools.cc2
-rw-r--r--source/blender/editors/space_sequencer/sequencer_drag_drop.c2
-rw-r--r--source/blender/editors/space_sequencer/sequencer_edit.c2
-rw-r--r--source/blender/editors/transform/transform_convert_armature.c2
-rw-r--r--source/blender/editors/transform/transform_mode.c2
-rw-r--r--source/blender/editors/transform/transform_snap_object.cc4
-rw-r--r--source/blender/geometry/intern/uv_parametrizer.cc8
-rw-r--r--source/blender/gpencil_modifiers/intern/lineart/lineart_cpu.c6
-rw-r--r--source/blender/gpencil_modifiers/intern/lineart/lineart_shadow.c2
-rw-r--r--source/blender/gpu/GPU_index_buffer.h4
-rw-r--r--source/blender/gpu/GPU_shader_shared_utils.h2
-rw-r--r--source/blender/gpu/intern/gpu_codegen.cc2
-rw-r--r--source/blender/gpu/intern/gpu_index_buffer.cc4
-rw-r--r--source/blender/gpu/intern/gpu_shader_create_info.hh18
-rw-r--r--source/blender/gpu/metal/mtl_capabilities.hh2
-rw-r--r--source/blender/gpu/metal/mtl_command_buffer.mm8
-rw-r--r--source/blender/gpu/metal/mtl_context.hh18
-rw-r--r--source/blender/gpu/metal/mtl_context.mm6
-rw-r--r--source/blender/gpu/metal/mtl_framebuffer.hh36
-rw-r--r--source/blender/gpu/metal/mtl_framebuffer.mm20
-rw-r--r--source/blender/gpu/metal/mtl_index_buffer.hh16
-rw-r--r--source/blender/gpu/metal/mtl_index_buffer.mm47
-rw-r--r--source/blender/gpu/metal/mtl_memory.hh6
-rw-r--r--source/blender/gpu/metal/mtl_pso_descriptor_state.hh12
-rw-r--r--source/blender/gpu/metal/mtl_shader.hh39
-rw-r--r--source/blender/gpu/metal/mtl_shader.mm85
-rw-r--r--source/blender/gpu/metal/mtl_shader_generator.hh111
-rw-r--r--source/blender/gpu/metal/mtl_shader_generator.mm160
-rw-r--r--source/blender/gpu/metal/mtl_shader_interface.hh32
-rw-r--r--source/blender/gpu/metal/mtl_shader_interface.mm32
-rw-r--r--source/blender/gpu/metal/mtl_shader_interface_type.hh2
-rw-r--r--source/blender/gpu/metal/mtl_state.mm2
-rw-r--r--source/blender/gpu/metal/mtl_texture_util.mm8
-rw-r--r--source/blender/gpu/shaders/compositor/compositor_morphological_distance_feather.glsl2
-rw-r--r--source/blender/gpu/shaders/compositor/compositor_morphological_distance_threshold.glsl2
-rw-r--r--source/blender/gpu/shaders/compositor/compositor_projector_lens_distortion.glsl2
-rw-r--r--source/blender/gpu/shaders/compositor/compositor_realize_on_domain.glsl2
-rw-r--r--source/blender/gpu/shaders/gpu_shader_codegen_lib.glsl2
-rw-r--r--source/blender/io/usd/intern/usd_reader_mesh.cc2
-rw-r--r--source/blender/io/wavefront_obj/exporter/obj_export_io.hh2
-rw-r--r--source/blender/makesdna/DNA_gpencil_modifier_types.h6
-rw-r--r--source/blender/makesrna/intern/rna_space.c2
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_image.cc2
-rw-r--r--source/blender/nodes/shader/nodes/node_shader_bsdf_principled.cc2
-rw-r--r--source/blender/python/generic/py_capi_utils.c2
-rw-r--r--source/blender/windowmanager/intern/wm_event_system.cc2
93 files changed, 534 insertions, 495 deletions
diff --git a/source/blender/blenfont/intern/blf_font.c b/source/blender/blenfont/intern/blf_font.c
index 03629db0acd..3ddeaaaf1c7 100644
--- a/source/blender/blenfont/intern/blf_font.c
+++ b/source/blender/blenfont/intern/blf_font.c
@@ -223,7 +223,7 @@ void blf_batch_draw_begin(FontBLF *font)
g_batch.ofs[1] = font->pos[1];
}
else {
- /* Offset is baked in modelview mat. */
+ /* Offset is baked in model-view matrix. */
zero_v2_int(g_batch.ofs);
}
@@ -234,13 +234,13 @@ void blf_batch_draw_begin(FontBLF *font)
bool mat_changed = equals_m4m4(gpumat, g_batch.mat) == false;
if (mat_changed) {
- /* Modelviewmat is no longer the same.
- * Flush cache but with the previous mat. */
+ /* Model view matrix is no longer the same.
+ * Flush cache but with the previous matrix. */
GPU_matrix_push();
GPU_matrix_set(g_batch.mat);
}
- /* flush cache if config is not the same. */
+ /* Flush cache if configuration is not the same. */
if (mat_changed || font_changed || shader_changed) {
blf_batch_draw();
g_batch.simple_shader = simple_shader;
@@ -253,7 +253,7 @@ void blf_batch_draw_begin(FontBLF *font)
if (mat_changed) {
GPU_matrix_pop();
- /* Save for next memcmp. */
+ /* Save for next `memcmp`. */
memcpy(g_batch.mat, gpumat, sizeof(g_batch.mat));
}
}
@@ -279,7 +279,7 @@ static GPUTexture *blf_batch_cache_texture_load(void)
int offset_x = bitmap_len_landed % tex_width;
int offset_y = bitmap_len_landed / tex_width;
- /* TODO(germano): Update more than one row in a single call. */
+ /* TODO(@germano): Update more than one row in a single call. */
while (remain) {
int remain_row = tex_width - offset_x;
int width = remain > remain_row ? remain_row : remain;
diff --git a/source/blender/blenkernel/intern/anim_sys.c b/source/blender/blenkernel/intern/anim_sys.c
index 19fef1ce825..85ce647fcab 100644
--- a/source/blender/blenkernel/intern/anim_sys.c
+++ b/source/blender/blenkernel/intern/anim_sys.c
@@ -3624,16 +3624,6 @@ void nlasnapshot_blend_get_inverted_upper_snapshot(NlaEvalData *eval_data,
}
}
-/** Using \a blended_snapshot and \a upper_snapshot, we can solve for the \a r_lower_snapshot.
- *
- * Only channels that exist within \a blended_snapshot are processed.
- * Only blended values within the \a remap_domain are processed.
- *
- * Writes to \a r_upper_snapshot NlaEvalChannelSnapshot->remap_domain to match remapping success.
- *
- * Assumes caller marked upper values that are in the \a blend_domain. This determines whether the
- * blended value came directly from the lower snapshot or a result of blending.
- **/
void nlasnapshot_blend_get_inverted_lower_snapshot(NlaEvalData *eval_data,
NlaEvalSnapshot *blended_snapshot,
NlaEvalSnapshot *upper_snapshot,
diff --git a/source/blender/blenkernel/intern/curves.cc b/source/blender/blenkernel/intern/curves.cc
index 6211f6b7be6..c6e7bb72f53 100644
--- a/source/blender/blenkernel/intern/curves.cc
+++ b/source/blender/blenkernel/intern/curves.cc
@@ -199,33 +199,33 @@ static void curves_blend_read_expand(BlendExpander *expander, ID *id)
}
IDTypeInfo IDType_ID_CV = {
- /*id_code */ ID_CV,
- /*id_filter */ FILTER_ID_CV,
- /*main_listbase_index */ INDEX_ID_CV,
- /*struct_size */ sizeof(Curves),
- /*name */ "Curves",
- /*name_plural */ "hair_curves",
- /*translation_context */ BLT_I18NCONTEXT_ID_CURVES,
- /*flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE,
- /*asset_type_info */ nullptr,
-
- /*init_data */ curves_init_data,
- /*copy_data */ curves_copy_data,
- /*free_data */ curves_free_data,
- /*make_local */ nullptr,
- /*foreach_id */ curves_foreach_id,
- /*foreach_cache */ nullptr,
- /*foreach_path */ nullptr,
- /*owner_get */ nullptr,
-
- /*blend_write */ curves_blend_write,
- /*blend_read_data */ curves_blend_read_data,
- /*blend_read_lib */ curves_blend_read_lib,
- /*blend_read_expand */ curves_blend_read_expand,
-
- /*blend_read_undo_preserve */ nullptr,
-
- /*lib_override_apply_post */ nullptr,
+ /* id_code */ ID_CV,
+ /* id_filter */ FILTER_ID_CV,
+ /* main_listbase_index */ INDEX_ID_CV,
+ /* struct_size */ sizeof(Curves),
+ /* name*/ "Curves",
+ /* name_plural */ "hair_curves",
+ /* translation_context */ BLT_I18NCONTEXT_ID_CURVES,
+ /* flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE,
+ /* asset_type_info */ nullptr,
+
+ /* init_data */ curves_init_data,
+ /* copy_data */ curves_copy_data,
+ /* free_data */ curves_free_data,
+ /* make_local */ nullptr,
+ /* foreach_id */ curves_foreach_id,
+ /* foreach_cache */ nullptr,
+ /* foreach_path */ nullptr,
+ /* owner_get */ nullptr,
+
+ /* blend_write */ curves_blend_write,
+ /* blend_read_data */ curves_blend_read_data,
+ /* blend_read_lib */ curves_blend_read_lib,
+ /* blend_read_expand */ curves_blend_read_expand,
+
+ /* blend_read_undo_preserve */ nullptr,
+
+ /* lib_override_apply_post */ nullptr,
};
void *BKE_curves_add(Main *bmain, const char *name)
diff --git a/source/blender/blenkernel/intern/gpencil_modifier.c b/source/blender/blenkernel/intern/gpencil_modifier.c
index 8ac268b26b0..33f84aff545 100644
--- a/source/blender/blenkernel/intern/gpencil_modifier.c
+++ b/source/blender/blenkernel/intern/gpencil_modifier.c
@@ -695,7 +695,7 @@ static void gpencil_copy_visible_frames_to_eval(Depsgraph *depsgraph, Scene *sce
gpl_eval->actframe = BKE_gpencil_layer_frame_get(gpl_eval, remap_cfra, GP_GETFRAME_USE_PREV);
}
/* Always copy active frame to eval, because the modifiers always evaluate the active frame,
- * even if it's not visible (e.g. the layer is hidden).*/
+ * even if it's not visible (e.g. the layer is hidden). */
if (gpl_eval->actframe != NULL) {
copy_frame_to_eval_ex(gpl_eval->actframe->runtime.gpf_orig, gpl_eval->actframe);
}
diff --git a/source/blender/blenkernel/intern/main_namemap.cc b/source/blender/blenkernel/intern/main_namemap.cc
index a164633af09..a600afb4ed1 100644
--- a/source/blender/blenkernel/intern/main_namemap.cc
+++ b/source/blender/blenkernel/intern/main_namemap.cc
@@ -228,7 +228,7 @@ static void main_namemap_populate(UniqueName_Map *name_map, struct Main *bmain,
/* Get the name map object used for the given Main/ID.
* Lazily creates and populates the contents of the name map, if ensure_created is true.
- * Note: if the contents are populated, the name of the given ID itself is not added. */
+ * NOTE: if the contents are populated, the name of the given ID itself is not added. */
static UniqueName_Map *get_namemap_for(Main *bmain, ID *id, bool ensure_created)
{
if (id->lib != nullptr) {
diff --git a/source/blender/blenkernel/intern/subsurf_ccg.c b/source/blender/blenkernel/intern/subsurf_ccg.c
index 52df555090f..88c260be9ba 100644
--- a/source/blender/blenkernel/intern/subsurf_ccg.c
+++ b/source/blender/blenkernel/intern/subsurf_ccg.c
@@ -1598,7 +1598,7 @@ static void set_ccgdm_all_geometry(CCGDerivedMesh *ccgdm,
gridSize = ccgSubSurf_getGridSize(ss);
gridFaces = gridSize - 1;
gridCuts = gridSize - 2;
- /*gridInternalVerts = gridSideVerts * gridSideVerts; - as yet, unused */
+ // gridInternalVerts = gridSideVerts * gridSideVerts; /* As yet, unused. */
gridSideEdges = gridSize - 1;
gridInternalEdges = (gridSideEdges - 1) * gridSideEdges * 2;
diff --git a/source/blender/blenkernel/nla_private.h b/source/blender/blenkernel/nla_private.h
index c6fbdcc542c..96aecadd3f5 100644
--- a/source/blender/blenkernel/nla_private.h
+++ b/source/blender/blenkernel/nla_private.h
@@ -241,6 +241,17 @@ void nlasnapshot_blend_get_inverted_upper_snapshot(NlaEvalData *eval_data,
float upper_influence,
NlaEvalSnapshot *r_upper_snapshot);
+/**
+ * Using \a blended_snapshot and \a upper_snapshot, we can solve for the \a r_lower_snapshot.
+ *
+ * Only channels that exist within \a blended_snapshot are processed.
+ * Only blended values within the \a remap_domain are processed.
+ *
+ * Writes to \a r_upper_snapshot `NlaEvalChannelSnapshot->remap_domain` to match remapping success.
+ *
+ * Assumes caller marked upper values that are in the \a blend_domain. This determines whether the
+ * blended value came directly from the lower snapshot or a result of blending.
+ */
void nlasnapshot_blend_get_inverted_lower_snapshot(NlaEvalData *eval_data,
NlaEvalSnapshot *blended_snapshot,
NlaEvalSnapshot *upper_snapshot,
diff --git a/source/blender/blenlib/intern/BLI_kdopbvh.c b/source/blender/blenlib/intern/BLI_kdopbvh.c
index 62bf17bd415..a43b725b6e3 100644
--- a/source/blender/blenlib/intern/BLI_kdopbvh.c
+++ b/source/blender/blenlib/intern/BLI_kdopbvh.c
@@ -1385,7 +1385,7 @@ BVHTreeOverlap *BLI_bvhtree_overlap(
static bool tree_intersect_plane_test(const float *bv, const float plane[4])
{
- /* TODO(germano): Support other KDOP geometries. */
+ /* TODO(@germano): Support other KDOP geometries. */
const float bb_min[3] = {bv[0], bv[2], bv[4]};
const float bb_max[3] = {bv[1], bv[3], bv[5]};
float bb_near[3], bb_far[3];
diff --git a/source/blender/blenloader/BLO_readfile.h b/source/blender/blenloader/BLO_readfile.h
index 043f9ffd723..93040fa01ee 100644
--- a/source/blender/blenloader/BLO_readfile.h
+++ b/source/blender/blenloader/BLO_readfile.h
@@ -229,10 +229,10 @@ struct LinkNode *BLO_blendhandle_get_datablock_names(BlendHandle *bh,
* \return A BLI_linklist of `BLODataBlockInfo *`.
* The links and #BLODataBlockInfo.asset_data should be freed with MEM_freeN.
*/
-struct LinkNode * /*BLODataBlockInfo */ BLO_blendhandle_get_datablock_info(BlendHandle *bh,
- int ofblocktype,
- bool use_assets_only,
- int *r_tot_info_items);
+struct LinkNode * /*BLODataBlockInfo*/ BLO_blendhandle_get_datablock_info(BlendHandle *bh,
+ int ofblocktype,
+ bool use_assets_only,
+ int *r_tot_info_items);
/**
* Gets the previews of all the data-blocks in a file of a certain type
* (e.g. all the scene previews in a file).
diff --git a/source/blender/compositor/realtime_compositor/COM_compile_state.hh b/source/blender/compositor/realtime_compositor/COM_compile_state.hh
index ed6ad414e3b..924919bbef6 100644
--- a/source/blender/compositor/realtime_compositor/COM_compile_state.hh
+++ b/source/blender/compositor/realtime_compositor/COM_compile_state.hh
@@ -143,7 +143,7 @@ class CompileState {
* the give node. */
void add_node_to_shader_compile_unit(DNode node);
- /* Get a reference to the shader compile unit. */
+ /* Get a reference to the shader compile unit. */
ShaderCompileUnit &get_shader_compile_unit();
/* Clear the compile unit. This should be called once the compile unit is compiled to ready it to
diff --git a/source/blender/compositor/realtime_compositor/COM_shader_operation.hh b/source/blender/compositor/realtime_compositor/COM_shader_operation.hh
index a33dcbf25be..d03e52ac8f2 100644
--- a/source/blender/compositor/realtime_compositor/COM_shader_operation.hh
+++ b/source/blender/compositor/realtime_compositor/COM_shader_operation.hh
@@ -224,7 +224,7 @@ class ShaderOperation : public Operation {
*
* This method first generates the necessary code to load the inputs and store the outputs. Then,
* it creates a compute shader from the generated sources. Finally, it adds the necessary GPU
- * resources to the shader. */
+ * resources to the shader. */
static void generate_code(void *thunk, GPUMaterial *material, GPUCodegenOutput *code_generator);
/* Add an image in the shader for each of the declared outputs. Additionally, emit code to define
diff --git a/source/blender/compositor/realtime_compositor/COM_simple_operation.hh b/source/blender/compositor/realtime_compositor/COM_simple_operation.hh
index 1655e52ac9a..0061986ce42 100644
--- a/source/blender/compositor/realtime_compositor/COM_simple_operation.hh
+++ b/source/blender/compositor/realtime_compositor/COM_simple_operation.hh
@@ -15,7 +15,7 @@ namespace blender::realtime_compositor {
* A simple operation is an operation that takes exactly one input and computes exactly one output.
* Moreover, the output is guaranteed to only have a single user, that is, its reference count will
* be one. Such operations can be attached to the inputs of operations to pre-process the inputs to
- * prepare them before the operation is executed.*/
+ * prepare them before the operation is executed. */
class SimpleOperation : public Operation {
private:
/* The identifier of the output. This is constant for all operations. */
diff --git a/source/blender/compositor/realtime_compositor/COM_utilities.hh b/source/blender/compositor/realtime_compositor/COM_utilities.hh
index 614384bd573..25f9fd0c1b6 100644
--- a/source/blender/compositor/realtime_compositor/COM_utilities.hh
+++ b/source/blender/compositor/realtime_compositor/COM_utilities.hh
@@ -16,44 +16,54 @@ namespace blender::realtime_compositor {
using namespace nodes::derived_node_tree_types;
-/* Get the origin socket of the given node input. If the input is not linked, the socket itself is
+/**
+ Get the origin socket of the given node input. If the input is not linked, the socket itself is
* returned. If the input is linked, the socket that is linked to it is returned, which could
* either be an input or an output. An input socket is returned when the given input is connected
- * to an unlinked input of a group input node. */
+ * to an unlinked input of a group input node.
+ */
DSocket get_input_origin_socket(DInputSocket input);
-/* Get the output socket linked to the given node input. If the input is not linked to an output, a
- * null output is returned. */
+/**
+ * Get the output socket linked to the given node input. If the input is not linked to an output,
+ * a null output is returned.
+ */
DOutputSocket get_output_linked_to_input(DInputSocket input);
-/* Get the result type that corresponds to the type of the given socket. */
+/** Get the result type that corresponds to the type of the given socket. */
ResultType get_node_socket_result_type(const bNodeSocket *socket);
-/* Returns true if any of the nodes linked to the given output satisfies the given condition, and
- * false otherwise. */
+/**
+ * Returns true if any of the nodes linked to the given output satisfies the given condition,
+ * and false otherwise.
+ */
bool is_output_linked_to_node_conditioned(DOutputSocket output,
FunctionRef<bool(DNode)> condition);
-/* Returns the number of inputs linked to the given output that satisfy the given condition. */
+/** Returns the number of inputs linked to the given output that satisfy the given condition. */
int number_of_inputs_linked_to_output_conditioned(DOutputSocket output,
FunctionRef<bool(DInputSocket)> condition);
-/* A node is a shader node if it defines a method to get a shader node operation. */
+/** A node is a shader node if it defines a method to get a shader node operation. */
bool is_shader_node(DNode node);
-/* Returns true if the given node is supported, that is, have an implementation. Returns false
- * otherwise. */
+/**
+ * Returns true if the given node is supported, that is, have an implementation.
+ * Returns false otherwise.
+ */
bool is_node_supported(DNode node);
-/* Get the input descriptor of the given input socket. */
+/** Get the input descriptor of the given input socket. */
InputDescriptor input_descriptor_from_input_socket(const bNodeSocket *socket);
-/* Dispatch the given compute shader in a 2D compute space such that the number of threads in both
+/**
+ * Dispatch the given compute shader in a 2D compute space such that the number of threads in both
* dimensions is as small as possible but at least covers the entirety of threads_range assuming
* the shader has a local group size given by local_size. That means that the number of threads
* might be a bit larger than threads_range, so shaders has to put that into consideration. A
* default local size of 16x16 is assumed, which is the optimal local size for many image
- * processing shaders. */
+ * processing shaders.
+ */
void compute_dispatch_threads_at_least(GPUShader *shader,
int2 threads_range,
int2 local_size = int2(16));
diff --git a/source/blender/compositor/realtime_compositor/intern/operation.cc b/source/blender/compositor/realtime_compositor/intern/operation.cc
index 42dd5aeebe8..fb02807d729 100644
--- a/source/blender/compositor/realtime_compositor/intern/operation.cc
+++ b/source/blender/compositor/realtime_compositor/intern/operation.cc
@@ -83,7 +83,7 @@ void Operation::add_and_evaluate_input_processors()
* because the construction of the input processors may depend on the result of previous input
* processors for all inputs. For instance, the realize on domain input processor considers the
* value of all inputs, so previous input processors for all inputs needs to be added and
- * evaluated first. */
+ * evaluated first. */
for (const StringRef &identifier : results_mapped_to_inputs_.keys()) {
SimpleOperation *single_value = ReduceToSingleValueOperation::construct_if_needed(
diff --git a/source/blender/compositor/realtime_compositor/intern/realize_on_domain_operation.cc b/source/blender/compositor/realtime_compositor/intern/realize_on_domain_operation.cc
index 47993060a74..817293c0fa6 100644
--- a/source/blender/compositor/realtime_compositor/intern/realize_on_domain_operation.cc
+++ b/source/blender/compositor/realtime_compositor/intern/realize_on_domain_operation.cc
@@ -41,7 +41,7 @@ void RealizeOnDomainOperation::execute()
const float3x3 local_transformation = input.domain().transformation *
domain_.transformation.inverted();
- /* Set the origin of the transformation to be the center of the domain. */
+ /* Set the origin of the transformation to be the center of the domain. */
const float3x3 transformation = float3x3::from_origin_transformation(
local_transformation, float2(domain_.size) / 2.0f);
diff --git a/source/blender/draw/engines/eevee/eevee_cryptomatte.c b/source/blender/draw/engines/eevee/eevee_cryptomatte.c
index fa70d2c6205..d805a039e8f 100644
--- a/source/blender/draw/engines/eevee/eevee_cryptomatte.c
+++ b/source/blender/draw/engines/eevee/eevee_cryptomatte.c
@@ -422,8 +422,8 @@ void EEVEE_cryptomatte_output_accumulate(EEVEE_ViewLayerData *UNUSED(sldata), EE
void EEVEE_cryptomatte_update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view_layer)
{
/* NOTE: Name channels lowercase rgba so that compression rules check in OpenEXR DWA code uses
- * loseless compression. Reportedly this naming is the only one which works good from the
- * interoperability point of view. Using xyzw naming is not portable. */
+ * lossless compression. Reportedly this naming is the only one which works good from the
+ * interoperability point of view. Using XYZW naming is not portable. */
char cryptomatte_pass_name[MAX_NAME];
const short num_passes = eevee_cryptomatte_passes_per_layer(view_layer);
diff --git a/source/blender/draw/engines/eevee/shaders/closure_type_lib.glsl b/source/blender/draw/engines/eevee/shaders/closure_type_lib.glsl
index 4070ede116b..eeccb393a5c 100644
--- a/source/blender/draw/engines/eevee/shaders/closure_type_lib.glsl
+++ b/source/blender/draw/engines/eevee/shaders/closure_type_lib.glsl
@@ -6,8 +6,8 @@
#ifndef VOLUMETRICS
-uniform int outputSsrId; /*Default = 1;*/
-uniform int outputSssId; /*Default = 1;*/
+uniform int outputSsrId; /* Default = 1; */
+uniform int outputSssId; /* Default = 1; */
#endif
diff --git a/source/blender/draw/engines/eevee_next/eevee_defines.hh b/source/blender/draw/engines/eevee_next/eevee_defines.hh
index ec05cce3d02..2f338e707c0 100644
--- a/source/blender/draw/engines/eevee_next/eevee_defines.hh
+++ b/source/blender/draw/engines/eevee_next/eevee_defines.hh
@@ -83,20 +83,20 @@
#define RBUFS_AOV_COLOR_SLOT 5
#define RBUFS_AOV_VALUE_SLOT 6
-/* Uniform Bufs. */
+/* Uniform Buffers. */
/* Only during prepass. */
#define VELOCITY_CAMERA_PREV_BUF 3
#define VELOCITY_CAMERA_CURR_BUF 4
#define VELOCITY_CAMERA_NEXT_BUF 5
-/* Storage Bufs. */
+/* Storage Buffers. */
#define LIGHT_CULL_BUF_SLOT 0
#define LIGHT_BUF_SLOT 1
#define LIGHT_ZBIN_BUF_SLOT 2
#define LIGHT_TILE_BUF_SLOT 3
#define RBUFS_AOV_BUF_SLOT 5
#define SAMPLING_BUF_SLOT 6
-/* Only during prepass. */
+/* Only during pre-pass. */
#define VELOCITY_OBJ_PREV_BUF_SLOT 0
#define VELOCITY_OBJ_NEXT_BUF_SLOT 1
#define VELOCITY_GEO_PREV_BUF_SLOT 2
diff --git a/source/blender/draw/engines/eevee_next/eevee_film.cc b/source/blender/draw/engines/eevee_next/eevee_film.cc
index b0731ceec2f..4679889e59a 100644
--- a/source/blender/draw/engines/eevee_next/eevee_film.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_film.cc
@@ -270,7 +270,7 @@ void Film::init(const int2 &extent, const rcti *output_rect)
data_.any_render_pass_2 = (enabled_passes_ & color_passes_2) != 0;
}
{
- /* Set pass offsets. */
+ /* Set pass offsets. */
data_.display_id = aovs_info.display_id;
data_.display_is_value = aovs_info.display_is_value;
diff --git a/source/blender/draw/engines/eevee_next/eevee_sync.cc b/source/blender/draw/engines/eevee_next/eevee_sync.cc
index 6f1725a7120..5f8b87c24b9 100644
--- a/source/blender/draw/engines/eevee_next/eevee_sync.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_sync.cc
@@ -162,7 +162,7 @@ struct gpIterData {
static void gpencil_drawcall_flush(gpIterData &iter)
{
-#if 0 /* Incompatible with new darw manager. */
+#if 0 /* Incompatible with new draw manager. */
if (iter.geom != nullptr) {
geometry_call(iter.material->shading.sub_pass,
iter.ob,
diff --git a/source/blender/draw/engines/eevee_next/eevee_world.cc b/source/blender/draw/engines/eevee_next/eevee_world.cc
index 56cb0f127db..313c0bda42e 100644
--- a/source/blender/draw/engines/eevee_next/eevee_world.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_world.cc
@@ -42,10 +42,10 @@ DefaultWorldNodeTree::~DefaultWorldNodeTree()
MEM_SAFE_FREE(ntree_);
}
-/* Configure a default nodetree with the given world. */
+/* Configure a default node-tree with the given world. */
bNodeTree *DefaultWorldNodeTree::nodetree_get(::World *wo)
{
- /* WARNING: This function is not threadsafe. Which is not a problem for the moment. */
+ /* WARNING: This function is not thread-safe. Which is not a problem for the moment. */
copy_v3_fl3(color_socket_->value, wo->horr, wo->horg, wo->horb);
return ntree_;
}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_depth_of_field_resolve_comp.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_depth_of_field_resolve_comp.glsl
index 8873a9da235..5123eb0c238 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_depth_of_field_resolve_comp.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_depth_of_field_resolve_comp.glsl
@@ -165,7 +165,7 @@ void main()
out_color = out_color * (1.0 - layer_weight) + layer_color;
}
- /* Fix float precision issue in alpha compositing. */
+ /* Fix float precision issue in alpha compositing. */
if (out_color.a > 0.99) {
out_color.a = 1.0;
}
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_depth_of_field_stabilize_comp.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_depth_of_field_stabilize_comp.glsl
index 5ffedf3068b..46a25b84840 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_depth_of_field_stabilize_comp.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_depth_of_field_stabilize_comp.glsl
@@ -83,7 +83,7 @@ void dof_cache_init()
barrier();
}
-/* Note: Sample color space is already in YCoCg space. */
+/* NOTE: Sample color space is already in YCoCg space. */
DofSample dof_fetch_input_sample(ivec2 offset)
{
ivec2 coord = offset + 1 + ivec2(gl_LocalInvocationID.xy);
diff --git a/source/blender/draw/engines/eevee_next/shaders/eevee_motion_blur_dilate_comp.glsl b/source/blender/draw/engines/eevee_next/shaders/eevee_motion_blur_dilate_comp.glsl
index c3606dca4f7..07139ea6a09 100644
--- a/source/blender/draw/engines/eevee_next/shaders/eevee_motion_blur_dilate_comp.glsl
+++ b/source/blender/draw/engines/eevee_next/shaders/eevee_motion_blur_dilate_comp.glsl
@@ -19,7 +19,7 @@ MotionRect compute_motion_rect(ivec2 tile, vec2 motion)
#if DEBUG_BYPASS_DILATION
return MotionRect(tile, ivec2(1));
#endif
- /* Ceil to number of tile touched.*/
+ /* Ceil to number of tile touched. */
ivec2 point1 = tile + ivec2(sign(motion) * ceil(abs(motion) / float(MOTION_BLUR_TILE_SIZE)));
ivec2 point2 = tile;
diff --git a/source/blender/draw/intern/draw_command.cc b/source/blender/draw/intern/draw_command.cc
index 7d5ea5c2048..ff69885b3b6 100644
--- a/source/blender/draw/intern/draw_command.cc
+++ b/source/blender/draw/intern/draw_command.cc
@@ -437,7 +437,7 @@ std::string DispatchIndirect::serialize() const
std::string Barrier::serialize() const
{
- /* TOOD(fclem): Better serialization... */
+ /* TODO(@fclem): Better serialization... */
return std::string(".barrier(") + std::to_string(type) + ")";
}
@@ -464,7 +464,7 @@ std::string Clear::serialize() const
std::string StateSet::serialize() const
{
- /* TOOD(fclem): Better serialization... */
+ /* TODO(@fclem): Better serialization... */
return std::string(".state_set(") + std::to_string(new_state) + ")";
}
@@ -562,7 +562,7 @@ void DrawMultiBuf::bind(RecordingState &state,
BLI_assert(batch_inst_len == 1);
UNUSED_VARS_NDEBUG(batch_inst_len);
- /* Now that we got the batch infos, we can set the counters to 0. */
+ /* Now that we got the batch information, we can set the counters to 0. */
group.total_counter = group.front_facing_counter = group.back_facing_counter = 0;
}
diff --git a/source/blender/draw/intern/draw_command_shared.hh b/source/blender/draw/intern/draw_command_shared.hh
index 22d1facfb09..9fbbe23f0ce 100644
--- a/source/blender/draw/intern/draw_command_shared.hh
+++ b/source/blender/draw/intern/draw_command_shared.hh
@@ -24,7 +24,7 @@ struct RecordingState;
* the same render state.
*/
struct DrawGroup {
- /** Index of next DrawGroup from the same header. */
+ /** Index of next #DrawGroup from the same header. */
uint next;
/** Index of the first instances after sorting. */
@@ -34,7 +34,7 @@ struct DrawGroup {
/** Number of non inverted scaling instances in this Group. */
uint front_facing_len;
- /** GPUBatch values to be copied to DrawCommand after sorting (if not overriden). */
+ /** #GPUBatch values to be copied to #DrawCommand after sorting (if not overridden). */
int vertex_len;
int vertex_first;
int base_index;
diff --git a/source/blender/draw/intern/draw_curves.cc b/source/blender/draw/intern/draw_curves.cc
index 9c4181b0161..a61769e7a63 100644
--- a/source/blender/draw/intern/draw_curves.cc
+++ b/source/blender/draw/intern/draw_curves.cc
@@ -478,9 +478,9 @@ void DRW_curves_update()
GPU_framebuffer_free(fb);
}
else {
- /* Note(Metal): If compute is not supported, bind a temporary framebuffer to avoid
+ /* NOTE(Metal): If compute is not supported, bind a temporary frame-buffer to avoid
* side-effects from rendering in the active buffer.
- * We also need to guarantee that a Framebuffer is active to perform any rendering work,
+ * We also need to guarantee that a Frame-buffer is active to perform any rendering work,
* even if there is no output */
GPUFrameBuffer *temp_fb = nullptr;
GPUFrameBuffer *prev_fb = nullptr;
@@ -488,7 +488,7 @@ void DRW_curves_update()
if (!GPU_compute_shader_support()) {
prev_fb = GPU_framebuffer_active_get();
char errorOut[256];
- /* if the framebuffer is invalid we need a dummy framebuffer to be bound. */
+ /* if the frame-buffer is invalid we need a dummy frame-buffer to be bound. */
if (!GPU_framebuffer_check_valid(prev_fb, errorOut)) {
int width = 64;
int height = 64;
@@ -510,11 +510,11 @@ void DRW_curves_update()
GPU_memory_barrier(GPU_BARRIER_SHADER_STORAGE);
}
- /* Release temporary framebuffer. */
+ /* Release temporary frame-buffer. */
if (temp_fb != nullptr) {
GPU_framebuffer_free(temp_fb);
}
- /* Rebind existing framebuffer */
+ /* Rebind existing frame-buffer */
if (prev_fb != nullptr) {
GPU_framebuffer_bind(prev_fb);
}
diff --git a/source/blender/draw/intern/draw_hair.cc b/source/blender/draw/intern/draw_hair.cc
index 69f123b95f3..ceee1c7cb48 100644
--- a/source/blender/draw/intern/draw_hair.cc
+++ b/source/blender/draw/intern/draw_hair.cc
@@ -59,7 +59,7 @@ static int g_tf_target_height;
static GPUVertBuf *g_dummy_vbo = nullptr;
static GPUTexture *g_dummy_texture = nullptr;
-static DRWPass *g_tf_pass; /* XXX can be a problem with multiple DRWManager in the future */
+static DRWPass *g_tf_pass; /* XXX can be a problem with multiple #DRWManager in the future */
static blender::draw::UniformBuffer<CurvesInfos> *g_dummy_curves_info = nullptr;
static GPUShader *hair_refine_shader_get(ParticleRefineShader refinement)
@@ -87,7 +87,7 @@ void DRW_hair_init(void)
const float vert[4] = {0.0f, 0.0f, 0.0f, 0.0f};
GPU_vertbuf_data_alloc(g_dummy_vbo, 1);
GPU_vertbuf_attr_fill(g_dummy_vbo, dummy_id, vert);
- /* Create vbo immediately to bind to texture buffer. */
+ /* Create VBO immediately to bind to texture buffer. */
GPU_vertbuf_use(g_dummy_vbo);
g_dummy_texture = GPU_texture_create_from_vertbuf("hair_dummy_attr", g_dummy_vbo);
@@ -247,7 +247,7 @@ DRWShadingGroup *DRW_shgroup_hair_create_sub(Object *object,
DRWShadingGroup *shgrp = DRW_shgroup_create_sub(shgrp_parent);
- /* TODO: optimize this. Only bind the ones GPUMaterial needs. */
+ /* TODO: optimize this. Only bind the ones #GPUMaterial needs. */
for (int i = 0; i < hair_cache->num_uv_layers; i++) {
for (int n = 0; n < MAX_LAYER_NAME_CT && hair_cache->uv_layer_names[i][n][0] != '\0'; n++) {
DRW_shgroup_uniform_texture(shgrp, hair_cache->uv_layer_names[i][n], hair_cache->uv_tex[i]);
@@ -373,17 +373,17 @@ void DRW_hair_update()
GPU_framebuffer_free(fb);
}
else {
- /* Note(Metal): If compute is not supported, bind a temporary framebuffer to avoid
+ /* NOTE(Metal): If compute is not supported, bind a temporary frame-buffer to avoid
* side-effects from rendering in the active buffer.
- * We also need to guarantee that a Framebuffer is active to perform any rendering work,
- * even if there is no output */
+ * We also need to guarantee that a frame-buffer is active to perform any rendering work,
+ * even if there is no output. */
GPUFrameBuffer *temp_fb = nullptr;
GPUFrameBuffer *prev_fb = nullptr;
if (GPU_type_matches_ex(GPU_DEVICE_ANY, GPU_OS_MAC, GPU_DRIVER_ANY, GPU_BACKEND_METAL)) {
if (!GPU_compute_shader_support()) {
prev_fb = GPU_framebuffer_active_get();
char errorOut[256];
- /* if the framebuffer is invalid we need a dummy framebuffer to be bound. */
+ /* if the frame-buffer is invalid we need a dummy frame-buffer to be bound. */
if (!GPU_framebuffer_check_valid(prev_fb, errorOut)) {
int width = 64;
int height = 64;
@@ -405,11 +405,11 @@ void DRW_hair_update()
GPU_memory_barrier(GPU_BARRIER_SHADER_STORAGE);
}
- /* Release temporary framebuffer. */
+ /* Release temporary frame-buffer. */
if (temp_fb != nullptr) {
GPU_framebuffer_free(temp_fb);
}
- /* Rebind existing framebuffer */
+ /* Rebind existing frame-buffer */
if (prev_fb != nullptr) {
GPU_framebuffer_bind(prev_fb);
}
diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c
index 799d0544e34..6e05572a20b 100644
--- a/source/blender/draw/intern/draw_manager.c
+++ b/source/blender/draw/intern/draw_manager.c
@@ -2699,7 +2699,7 @@ void DRW_draw_select_id(Depsgraph *depsgraph, ARegion *region, View3D *v3d, cons
GPUViewport *viewport = WM_draw_region_get_viewport(region);
if (!viewport) {
/* Selection engine requires a viewport.
- * TODO(germano): This should be done internally in the engine. */
+ * TODO(@germano): This should be done internally in the engine. */
sel_ctx->is_dirty = true;
sel_ctx->objects_drawn_len = 0;
sel_ctx->index_drawn_len = 1;
diff --git a/source/blender/draw/intern/draw_manager.cc b/source/blender/draw/intern/draw_manager.cc
index 2841abb53e7..41ff974e835 100644
--- a/source/blender/draw/intern/draw_manager.cc
+++ b/source/blender/draw/intern/draw_manager.cc
@@ -27,8 +27,8 @@ Manager::~Manager()
void Manager::begin_sync()
{
- /* TODO: This means the reference is kept until further redraw or manager teardown. Instead, they
- * should be released after each draw loop. But for now, mimics old DRW behavior. */
+ /* TODO: This means the reference is kept until further redraw or manager tear-down. Instead,
+ * they should be released after each draw loop. But for now, mimics old DRW behavior. */
for (GPUTexture *texture : acquired_textures) {
/* Decrease refcount and free if 0. */
GPU_texture_free(texture);
@@ -37,7 +37,7 @@ void Manager::begin_sync()
acquired_textures.clear();
#ifdef DEBUG
- /* Detect non-init data. */
+ /* Detect uninitialized data. */
memset(matrix_buf.data(), 0xF0, resource_len_ * sizeof(*matrix_buf.data()));
memset(bounds_buf.data(), 0xF0, resource_len_ * sizeof(*bounds_buf.data()));
memset(infos_buf.data(), 0xF0, resource_len_ * sizeof(*infos_buf.data()));
diff --git a/source/blender/draw/intern/draw_manager.hh b/source/blender/draw/intern/draw_manager.hh
index 867b376702c..aff56b0307b 100644
--- a/source/blender/draw/intern/draw_manager.hh
+++ b/source/blender/draw/intern/draw_manager.hh
@@ -44,7 +44,10 @@ class Manager {
using ObjectBoundsBuf = StorageArrayBuffer<ObjectBounds, 128>;
using ObjectInfosBuf = StorageArrayBuffer<ObjectInfos, 128>;
using ObjectAttributeBuf = StorageArrayBuffer<ObjectAttribute, 128>;
- /** TODO(fclem): Remove once we get rid of old EEVEE codebase. DRW_RESOURCE_CHUNK_LEN = 512 */
+ /**
+ * TODO(@fclem): Remove once we get rid of old EEVEE code-base.
+ * `DRW_RESOURCE_CHUNK_LEN = 512`.
+ */
using ObjectAttributeLegacyBuf = UniformArrayBuffer<float4, 8 * 512>;
public:
@@ -77,11 +80,16 @@ class Manager {
* This is because attribute list is arbitrary.
*/
ObjectAttributeBuf attributes_buf;
- /** TODO(fclem): Remove once we get rid of old EEVEE codebase. Only here to satisfy bindings. */
+ /**
+ * TODO(@fclem): Remove once we get rid of old EEVEE code-base.
+ * Only here to satisfy bindings.
+ */
ObjectAttributeLegacyBuf attributes_buf_legacy;
- /** List of textures coming from Image data-blocks. They need to be refcounted in order to avoid
- * beeing freed in another thread. */
+ /**
+ * List of textures coming from Image data-blocks.
+ * They need to be reference-counted in order to avoid being freed in another thread.
+ */
Vector<GPUTexture *> acquired_textures;
private:
diff --git a/source/blender/draw/intern/draw_pass.hh b/source/blender/draw/intern/draw_pass.hh
index 65faa9febbc..e4b3a56c414 100644
--- a/source/blender/draw/intern/draw_pass.hh
+++ b/source/blender/draw/intern/draw_pass.hh
@@ -13,7 +13,7 @@
* submission is optimized for large number of draw calls. But has a significant overhead per
* #Pass. Use many #PassSub along with a main #Pass to reduce the overhead and allow groupings of
* commands. \note The draw call order inside a batch of multiple draw with the exact same state is
- * not guaranteed and is not even deterministic. Use a PassSimple or PassSortable if ordering is
+ * not guaranteed and is not even deterministic. Use a #PassSimple or #PassSortable if ordering is
* needed. \note As of now, it is also quite limited in the type of draw command it can record
* (no custom vertex count, no custom first vertex).
*
@@ -25,7 +25,7 @@
* A lightweight #Pass that lives inside a main #Pass. It can only be created from #Pass.sub()
* and is auto managed. This mean it can be created, filled and thrown away. A #PassSub reference
* is valid until the next #Pass.init() of the parent pass. Commands recorded inside a #PassSub are
- * inserted inside the parent #Pass where the sub have been created durring submission.
+ * inserted inside the parent #Pass where the sub have been created during submission.
*
* `PassSortable`:
* This is a sort of `PassMain` augmented with a per sub-pass sorting value. They can't directly
@@ -35,8 +35,9 @@
* \note A pass can be recorded once and resubmitted any number of time. This can be a good
* optimization for passes that are always the same for each frame. The only thing to be aware of
* is the life time of external resources. If a pass contains draw-calls with non default
- * ResourceHandle (not 0) or a reference to any non static resources (GPUBatch, PushConstant ref,
- * ResourceBind ref) it will have to be re-recorded if any of these reference becomes invalid.
+ * #ResourceHandle (not 0) or a reference to any non static resources
+ * (#GPUBatch, #PushConstant ref, #ResourceBind ref) it will have to be re-recorded
+ * if any of these reference becomes invalid.
*/
#include "BKE_image.h"
@@ -362,7 +363,7 @@ template<typename DrawCommandBufType> class Pass : public detail::PassBase<DrawC
* \{ */
/**
- * Normal pass type. No visibility or draw-call optimisation.
+ * Normal pass type. No visibility or draw-call optimization.
*/
// using PassSimple = detail::Pass<DrawCommandBuf>;
diff --git a/source/blender/draw/intern/draw_shader_shared.h b/source/blender/draw/intern/draw_shader_shared.h
index d43bfe6b159..bedbedcf438 100644
--- a/source/blender/draw/intern/draw_shader_shared.h
+++ b/source/blender/draw/intern/draw_shader_shared.h
@@ -131,7 +131,7 @@ struct ObjectInfos {
float4 color;
float4 infos;
#else
- /** Uploaded as center + size. Converted to mul+bias to local coord. */
+ /** Uploaded as center + size. Converted to mul+bias to local coord. */
float3 orco_add;
uint object_attrs_offset;
float3 orco_mul;
@@ -275,7 +275,7 @@ BLI_STATIC_ASSERT_ALIGN(DRWDebugPrintBuffer, 16)
/* Reuse first instance as row index as we don't use instancing. Equivalent to
* `DRWDebugPrintBuffer.command.i_first`. */
#define drw_debug_print_row_shared drw_debug_print_buf[3]
-/** Offset to the first data. Equal to: sizeof(DrawCommand) / sizeof(uint).
+/** Offset to the first data. Equal to: `sizeof(DrawCommand) / sizeof(uint)`.
* This is needed because we bind the whole buffer as a `uint` array. */
#define drw_debug_print_offset 8
@@ -308,7 +308,7 @@ BLI_STATIC_ASSERT_ALIGN(DRWDebugPrintBuffer, 16)
/* Equivalent to `DRWDebugDrawBuffer.command.v_count`. */
#define drw_debug_draw_v_count drw_debug_verts_buf[0].pos0
-/** Offset to the first data. Equal to: sizeof(DrawCommand) / sizeof(DRWDebugVert).
+/** Offset to the first data. Equal to: `sizeof(DrawCommand) / sizeof(DRWDebugVert)`.
* This is needed because we bind the whole buffer as a `DRWDebugVert` array. */
#define drw_debug_draw_offset 2
diff --git a/source/blender/draw/intern/draw_view.hh b/source/blender/draw/intern/draw_view.hh
index 82e74774a5a..27e7a7a0028 100644
--- a/source/blender/draw/intern/draw_view.hh
+++ b/source/blender/draw/intern/draw_view.hh
@@ -16,7 +16,7 @@ namespace blender::draw {
class Manager;
-/* TODO deduplicate. */
+/* TODO: de-duplicate. */
using ObjectBoundsBuf = StorageArrayBuffer<ObjectBounds, 128>;
/** \note Using uint4 for declaration but bound as uint. */
using VisibilityBuf = StorageArrayBuffer<uint4, 1, true>;
@@ -26,7 +26,7 @@ class View {
private:
UniformBuffer<ViewInfos> data_;
- /** Freezed version of data_ used for debugging culling. */
+ /** Frozen version of data_ used for debugging culling. */
UniformBuffer<ViewInfos> data_freeze_;
/** Result of the visibility computation. 1 bit per resource ID. */
VisibilityBuf visibility_buf_;
diff --git a/source/blender/draw/intern/shaders/draw_command_generate_comp.glsl b/source/blender/draw/intern/shaders/draw_command_generate_comp.glsl
index 70842e5bb81..3e640540777 100644
--- a/source/blender/draw/intern/shaders/draw_command_generate_comp.glsl
+++ b/source/blender/draw/intern/shaders/draw_command_generate_comp.glsl
@@ -28,7 +28,7 @@ void write_draw_call(DrawGroup group, uint group_id)
command_buf[group_id * 2 + 1] = cmd;
/* Reset the counters for a next command gen dispatch. Avoids resending the whole data just
- * for this purpose. Only the last thread will execute this so it is threadsafe. */
+ * for this purpose. Only the last thread will execute this so it is thread-safe. */
group_buf[group_id].front_facing_counter = 0u;
group_buf[group_id].back_facing_counter = 0u;
group_buf[group_id].total_counter = 0u;
diff --git a/source/blender/editors/animation/keyframes_edit.c b/source/blender/editors/animation/keyframes_edit.c
index 63bd5665459..2a94c5db439 100644
--- a/source/blender/editors/animation/keyframes_edit.c
+++ b/source/blender/editors/animation/keyframes_edit.c
@@ -1303,7 +1303,8 @@ void ANIM_fcurve_equalize_keyframes_loop(FCurve *fcu,
/* Perform handle equalization if mode is 'Both' or 'Left'. */
if (mode & EQUALIZE_HANDLES_LEFT) {
- /*If left handle type is 'Auto', 'Auto Clamped', or 'Vector', convert handles to 'Aligned'.*/
+ /* If left handle type is 'Auto', 'Auto Clamped', or 'Vector', convert handles to 'Aligned'.
+ */
if (ELEM(bezt->h1, HD_AUTO, HD_AUTO_ANIM, HD_VECT)) {
bezt->h1 = HD_ALIGN;
bezt->h2 = HD_ALIGN;
@@ -1319,8 +1320,8 @@ void ANIM_fcurve_equalize_keyframes_loop(FCurve *fcu,
/* Perform handle equalization if mode is 'Both' or 'Right'. */
if (mode & EQUALIZE_HANDLES_RIGHT) {
- /*If right handle type is 'Auto', 'Auto Clamped', or 'Vector', convert handles to
- * 'Aligned'.*/
+ /* If right handle type is 'Auto', 'Auto Clamped', or 'Vector', convert handles to
+ * 'Aligned'. */
if (ELEM(bezt->h2, HD_AUTO, HD_AUTO_ANIM, HD_VECT)) {
bezt->h1 = HD_ALIGN;
bezt->h2 = HD_ALIGN;
diff --git a/source/blender/editors/animation/keyingsets.c b/source/blender/editors/animation/keyingsets.c
index 967a324ef95..e6bcb404bcb 100644
--- a/source/blender/editors/animation/keyingsets.c
+++ b/source/blender/editors/animation/keyingsets.c
@@ -714,7 +714,7 @@ static void anim_keyingset_visit_for_search_impl(const bContext *C,
void *visit_user_data,
const bool use_poll)
{
- /* Poll requires context. */
+ /* Poll requires context. */
if (use_poll && (C == NULL)) {
return;
}
diff --git a/source/blender/editors/curve/editcurve.c b/source/blender/editors/curve/editcurve.c
index 164336c4b22..b8c0ea42daa 100644
--- a/source/blender/editors/curve/editcurve.c
+++ b/source/blender/editors/curve/editcurve.c
@@ -1989,7 +1989,7 @@ static int sel_to_copy_ints(const BPoint *bp,
}
if (selected_leg_count &&
/* Prevents leading and trailing unselected legs if all selected.
- * Unless it is extrusion from point or curve.*/
+ * Unless it is extrusion from point or curve. */
(selected_leg_count < max_j || max_j == 1)) {
/* Prepend unselected leg if more than one leg selected at the starting edge.
* max_j == 1 handles extrusion from point to curve and from curve to surface cases. */
diff --git a/source/blender/editors/curves/intern/curves_ops.cc b/source/blender/editors/curves/intern/curves_ops.cc
index 6582ce6e6d7..3b3a7ff7ba9 100644
--- a/source/blender/editors/curves/intern/curves_ops.cc
+++ b/source/blender/editors/curves/intern/curves_ops.cc
@@ -693,7 +693,7 @@ static int snap_curves_to_surface_exec(bContext *C, wmOperator *op)
BKE_report(op->reports, RPT_INFO, "Could not snap some curves to the surface");
}
- /* Refresh the entire window to also clear eventual modifier and nodes editor warnings.*/
+ /* Refresh the entire window to also clear eventual modifier and nodes editor warnings. */
WM_event_add_notifier(C, NC_WINDOW, nullptr);
return OPERATOR_FINISHED;
diff --git a/source/blender/editors/interface/view2d.cc b/source/blender/editors/interface/view2d.cc
index c2ca0ac8c72..bb459f227f9 100644
--- a/source/blender/editors/interface/view2d.cc
+++ b/source/blender/editors/interface/view2d.cc
@@ -150,7 +150,7 @@ static void view2d_masks(View2D *v2d, const rcti *mask_scroll)
}
/* Do not use mapped scroll here because we want to update scroller rects
- * even if they are not displayed. For init purposes. See T75003.*/
+ * even if they are not displayed. For initialization purposes. See T75003. */
scroll = v2d->scroll;
/* Scrollers are based off region-size:
diff --git a/source/blender/editors/sculpt_paint/paint_image.cc b/source/blender/editors/sculpt_paint/paint_image.cc
index 5a6ac9463e2..c852fd25bc4 100644
--- a/source/blender/editors/sculpt_paint/paint_image.cc
+++ b/source/blender/editors/sculpt_paint/paint_image.cc
@@ -161,7 +161,7 @@ void imapaint_image_update(
/* When buffer is partial updated the planes should be set to a larger value than 8. This will
* make sure that partial updating is working but uses more GPU memory as the gpu texture will
* have 4 channels. When so the whole texture needs to be reuploaded to the GPU using the new
- * texture format.*/
+ * texture format. */
if (ibuf != nullptr && ibuf->planes == 8) {
ibuf->planes = 32;
BKE_image_partial_update_mark_full_update(image);
@@ -172,7 +172,7 @@ void imapaint_image_update(
if (texpaint || (sima && sima->lock)) {
const int w = BLI_rcti_size_x(&imapaintpartial.dirty_region);
const int h = BLI_rcti_size_y(&imapaintpartial.dirty_region);
- /* Testing with partial update in uv editor too */
+ /* Testing with partial update in uv editor too. */
BKE_image_update_gputexture(
image, iuser, imapaintpartial.dirty_region.xmin, imapaintpartial.dirty_region.ymin, w, h);
}
diff --git a/source/blender/editors/sculpt_paint/paint_vertex.cc b/source/blender/editors/sculpt_paint/paint_vertex.cc
index c1a2a326d14..2d2033cac96 100644
--- a/source/blender/editors/sculpt_paint/paint_vertex.cc
+++ b/source/blender/editors/sculpt_paint/paint_vertex.cc
@@ -4141,7 +4141,7 @@ static bool paint_object_attributes_active_color_fill_ex(Object *ob,
if (!layer) {
return false;
}
- /* Store original #Mesh.editflag.*/
+ /* Store original #Mesh.editflag. */
const decltype(me->editflag) editflag = me->editflag;
if (!only_selected) {
me->editflag &= ~ME_EDIT_PAINT_FACE_SEL;
diff --git a/source/blender/editors/sculpt_paint/sculpt.c b/source/blender/editors/sculpt_paint/sculpt.c
index 74fd2f904e5..6ccb756099f 100644
--- a/source/blender/editors/sculpt_paint/sculpt.c
+++ b/source/blender/editors/sculpt_paint/sculpt.c
@@ -5560,7 +5560,7 @@ static int sculpt_brush_stroke_invoke(bContext *C, wmOperator *op, const wmEvent
* to avoid falling through to the translate operator in the
* global view3d keymap.
*
- * Note: BKE_object_is_visible_in_viewport is not working here (it returns false
+ * NOTE: #BKE_object_is_visible_in_viewport is not working here (it returns false
* if the object is in local view); instead, test for OB_HIDE_VIEWPORT directly.
*/
diff --git a/source/blender/editors/sculpt_paint/sculpt_undo.c b/source/blender/editors/sculpt_paint/sculpt_undo.c
index 07dbb5964bf..2fc49a24cc4 100644
--- a/source/blender/editors/sculpt_paint/sculpt_undo.c
+++ b/source/blender/editors/sculpt_paint/sculpt_undo.c
@@ -19,11 +19,11 @@
* At the end of the operator you should call SCULPT_undo_push_end.
*
* SCULPT_undo_push_end and ED_sculpt_undo_geometry_begin both take a
- * wmOperatorType as an argument. There are _ex versions that allow a custom
+ * #wmOperatorType as an argument. There are _ex versions that allow a custom
* name; try to avoid using them. These can break the redo panel since it requires
* the undo push have the same name as the calling operator.
*
- * Note: Sculpt undo steps are not appended to the global undo stack until
+ * NOTE: Sculpt undo steps are not appended to the global undo stack until
* the operator finishes. We use BKE_undosys_step_push_init_with_type to build
* a tentative undo step with is appended later when the operator ends.
* Operators must have the OPTYPE_UNDO flag set for this to work properly.
diff --git a/source/blender/editors/space_node/node_add.cc b/source/blender/editors/space_node/node_add.cc
index 7e46877d0ba..9949037479e 100644
--- a/source/blender/editors/space_node/node_add.cc
+++ b/source/blender/editors/space_node/node_add.cc
@@ -159,7 +159,7 @@ static int add_reroute_exec(bContext *C, wmOperator *op)
/* All link "cuts" that start at a particular output socket. Deduplicating new reroutes per
* output socket is useful because it allows reusing reroutes for connected intersections.
- * Further deduplication using the second map means we only have one cut per link.*/
+ * Further deduplication using the second map means we only have one cut per link. */
Map<bNodeSocket *, RerouteCutsForSocket> cuts_per_socket;
LISTBASE_FOREACH (bNodeLink *, link, &ntree.links) {
diff --git a/source/blender/editors/space_node/node_relationships.cc b/source/blender/editors/space_node/node_relationships.cc
index 067c01dcc58..c28b345b111 100644
--- a/source/blender/editors/space_node/node_relationships.cc
+++ b/source/blender/editors/space_node/node_relationships.cc
@@ -1465,7 +1465,7 @@ static int mute_links_exec(bContext *C, wmOperator *op)
nodeLinkSetMute(&ntree, link, !(link->flag & NODE_LINK_MUTED));
const bool muted = link->flag & NODE_LINK_MUTED;
- /* Propagate mute status downsteam past reroute nodes. */
+ /* Propagate mute status downstream past reroute nodes. */
if (link->tonode->is_reroute()) {
Stack<bNodeLink *> links;
links.push_multiple(link->tonode->output_sockets().first()->directly_linked_links());
diff --git a/source/blender/editors/space_outliner/outliner_edit.cc b/source/blender/editors/space_outliner/outliner_edit.cc
index cca6c9cc316..8618c2999c2 100644
--- a/source/blender/editors/space_outliner/outliner_edit.cc
+++ b/source/blender/editors/space_outliner/outliner_edit.cc
@@ -149,7 +149,7 @@ void OUTLINER_OT_highlight_update(wmOperatorType *ot)
void outliner_item_openclose(TreeElement *te, bool open, bool toggle_all)
{
- /* Only allow opening elements with children. */
+ /* Only allow opening elements with children. */
if (!(te->flag & TE_PRETEND_HAS_CHILDREN) && BLI_listbase_is_empty(&te->subtree)) {
return;
}
diff --git a/source/blender/editors/space_outliner/outliner_tools.cc b/source/blender/editors/space_outliner/outliner_tools.cc
index 4663df00a92..b0d24c88eea 100644
--- a/source/blender/editors/space_outliner/outliner_tools.cc
+++ b/source/blender/editors/space_outliner/outliner_tools.cc
@@ -919,7 +919,7 @@ struct OutlinerLibOverrideData {
* override), or an actual already existing override. */
Map<ID *, Vector<OutlinerLiboverrideDataIDRoot>> id_hierarchy_roots;
- /** All 'session_uuid' of all hierarchy root IDs used or created by the operation. */
+ /** All 'session_uuid' of all hierarchy root IDs used or created by the operation. */
Set<uint> id_hierarchy_roots_uid;
void id_root_add(ID *id_hierarchy_root_reference,
diff --git a/source/blender/editors/space_sequencer/sequencer_drag_drop.c b/source/blender/editors/space_sequencer/sequencer_drag_drop.c
index 7a3abbcbf21..c892e7d7e55 100644
--- a/source/blender/editors/space_sequencer/sequencer_drag_drop.c
+++ b/source/blender/editors/space_sequencer/sequencer_drag_drop.c
@@ -293,7 +293,7 @@ static void sequencer_drop_copy(bContext *C, wmDrag *drag, wmDropBox *drop)
SeqCollection *strips = SEQ_query_rendered_strips(
scene, channels, seqbase, scene->r.cfra, sseq->chanshown);
- /* Get the top most strip channel that is in view.*/
+ /* Get the top most strip channel that is in view. */
Sequence *seq;
int max_channel = -1;
SEQ_ITERATOR_FOREACH (seq, strips) {
diff --git a/source/blender/editors/space_sequencer/sequencer_edit.c b/source/blender/editors/space_sequencer/sequencer_edit.c
index 86dc9f566e5..38d61f02607 100644
--- a/source/blender/editors/space_sequencer/sequencer_edit.c
+++ b/source/blender/editors/space_sequencer/sequencer_edit.c
@@ -3076,7 +3076,7 @@ static int seq_cmp_time_startdisp_channel(void *thunk, const void *a, const void
int seq_a_start = SEQ_time_left_handle_frame_get(scene, seq_a);
int seq_b_start = SEQ_time_left_handle_frame_get(scene, seq_b);
- /* If strips have the same start frame favor the one with a higher channel.*/
+ /* If strips have the same start frame favor the one with a higher channel. */
if (seq_a_start == seq_b_start) {
return seq_a->machine > seq_b->machine;
}
diff --git a/source/blender/editors/transform/transform_convert_armature.c b/source/blender/editors/transform/transform_convert_armature.c
index 97d9ab2964a..d83cca15219 100644
--- a/source/blender/editors/transform/transform_convert_armature.c
+++ b/source/blender/editors/transform/transform_convert_armature.c
@@ -1356,7 +1356,7 @@ static void pose_transform_mirror_update(TransInfo *t, TransDataContainer *tc, O
}
mul_v3_m4v3(data->grabtarget, flip_mtx, td->loc);
if (pid) {
- /* TODO(germano): Relative Mirror support. */
+ /* TODO(@germano): Relative Mirror support. */
}
data->flag |= CONSTRAINT_IK_AUTO;
/* Add a temporary auto IK constraint here, as we will only temporarily active this
diff --git a/source/blender/editors/transform/transform_mode.c b/source/blender/editors/transform/transform_mode.c
index d8da7a11d28..10ea022757d 100644
--- a/source/blender/editors/transform/transform_mode.c
+++ b/source/blender/editors/transform/transform_mode.c
@@ -1214,7 +1214,7 @@ void transform_mode_init(TransInfo *t, wmOperator *op, const int mode)
transform_convert_mesh_customdatacorrect_init(t);
}
- /* TODO(germano): Some of these operations change the `t->mode`.
+ /* TODO(@germano): Some of these operations change the `t->mode`.
* This can be bad for Redo. */
// BLI_assert(t->mode == mode);
}
diff --git a/source/blender/editors/transform/transform_snap_object.cc b/source/blender/editors/transform/transform_snap_object.cc
index e4c152bc630..3bd090850f4 100644
--- a/source/blender/editors/transform/transform_snap_object.cc
+++ b/source/blender/editors/transform/transform_snap_object.cc
@@ -3408,8 +3408,8 @@ static eSnapMode transform_snap_context_project_view3d_mixed_impl(SnapObjectCont
bool use_occlusion_test = params->use_occlusion_test && !XRAY_ENABLED(v3d);
- /* Note: if both face raycast and face nearest are enabled, first find result of nearest, then
- * override with raycast. */
+ /* NOTE: if both face ray-cast and face nearest are enabled, first find result of nearest, then
+ * override with ray-cast. */
if ((snap_to_flag & SCE_SNAP_MODE_FACE_NEAREST) && !has_hit) {
has_hit = nearestWorldObjects(
sctx, params, init_co, prev_co, loc, no, &index, &ob_eval, obmat);
diff --git a/source/blender/geometry/intern/uv_parametrizer.cc b/source/blender/geometry/intern/uv_parametrizer.cc
index b7526d82ecc..4f763b09bef 100644
--- a/source/blender/geometry/intern/uv_parametrizer.cc
+++ b/source/blender/geometry/intern/uv_parametrizer.cc
@@ -4260,7 +4260,7 @@ void GEO_uv_parametrizer_average(ParamHandle *phandle,
for (int j = 0; j < max_iter; j++) {
/* An island could contain millions of polygons. When summing many small values, we need to
* use double precision in the accumulator to maintain accuracy. Note that the individual
- * calculations only need to be at single precision.*/
+ * calculations only need to be at single precision. */
double scale_cou = 0;
double scale_cov = 0;
double scale_cross = 0;
@@ -4275,11 +4275,11 @@ void GEO_uv_parametrizer_average(ParamHandle *phandle,
s[1][0] = vb->uv[0] - vc->uv[0];
s[1][1] = vb->uv[1] - vc->uv[1];
/* Find the "U" axis and "V" axis in triangle co-ordinates. Normally this would require
- * SVD, but in 2D we can use a cheaper matrix inversion instead.*/
+ * SVD, but in 2D we can use a cheaper matrix inversion instead. */
if (!invert_m2_m2(m, s)) {
continue;
}
- float cou[3], cov[3]; /* i.e. Texture "U" and texture "V" in 3D co-ordinates.*/
+ float cou[3], cov[3]; /* i.e. Texture "U" and texture "V" in 3D co-ordinates. */
for (int k = 0; k < 3; k++) {
cou[k] = m[0][0] * (va->co[k] - vc->co[k]) + m[0][1] * (vb->co[k] - vc->co[k]);
cov[k] = m[1][0] * (va->co[k] - vc->co[k]) + m[1][1] * (vb->co[k] - vc->co[k]);
@@ -4320,7 +4320,7 @@ void GEO_uv_parametrizer_average(ParamHandle *phandle,
const float tolerance = 1e-6f; /* Trade accuracy for performance. */
if (err < tolerance) {
- /* Too slow? Use Richardson Extrapolation to accelerate the convergence.*/
+ /* Too slow? Use Richardson Extrapolation to accelerate the convergence. */
break;
}
}
diff --git a/source/blender/gpencil_modifiers/intern/lineart/lineart_cpu.c b/source/blender/gpencil_modifiers/intern/lineart/lineart_cpu.c
index 6c3fadc1b29..95341a0eeb5 100644
--- a/source/blender/gpencil_modifiers/intern/lineart/lineart_cpu.c
+++ b/source/blender/gpencil_modifiers/intern/lineart/lineart_cpu.c
@@ -69,7 +69,7 @@ typedef struct LineartIsecThread {
int max;
int count_test;
- /* For individual thread reference.*/
+ /* For individual thread reference. */
LineartData *ld;
} LineartIsecThread;
@@ -1800,7 +1800,7 @@ static void lineart_add_edge_to_array_thread(LineartObjectInfo *obi, LineartEdge
}
/* NOTE: For simplicity, this function doesn't actually do anything if you already have data in
- * #pe. */
+ * #pe. */
void lineart_finalize_object_edge_array_reserve(LineartPendingEdges *pe, int count)
{
if (pe->max || pe->array || count == 0) {
@@ -4671,7 +4671,7 @@ void lineart_main_add_triangles(LineartData *ld)
}
/* Initialize per-thread data for thread task scheduling information and storing intersection
- * results. */
+ * results. */
LineartIsecData d = {0};
lineart_init_isec_thread(&d, ld, ld->thread_count);
diff --git a/source/blender/gpencil_modifiers/intern/lineart/lineart_shadow.c b/source/blender/gpencil_modifiers/intern/lineart/lineart_shadow.c
index bf42677d79c..257184bae1e 100644
--- a/source/blender/gpencil_modifiers/intern/lineart/lineart_shadow.c
+++ b/source/blender/gpencil_modifiers/intern/lineart/lineart_shadow.c
@@ -1303,7 +1303,7 @@ static void lineart_shadow_finalize_shadow_edges_task(
int v2i = (e[i].edge_identifier & LRT_OBINDEX_LOWER);
LineartVert *v = (LineartVert *)eln->pointer;
/* If the global position is close enough, use the original vertex to prevent flickering
- * caused by very slim boundary condition in point_triangle_relation().*/
+ * caused by very slim boundary condition in point_triangle_relation(). */
if (LRT_CLOSE_LOOSER_v3(e[i].v1->gloc, v[v1i].gloc)) {
e[i].v1 = &v[v1i];
}
diff --git a/source/blender/gpu/GPU_index_buffer.h b/source/blender/gpu/GPU_index_buffer.h
index e6345b1e43b..e5fefda527d 100644
--- a/source/blender/gpu/GPU_index_buffer.h
+++ b/source/blender/gpu/GPU_index_buffer.h
@@ -33,10 +33,10 @@ typedef struct GPUIndexBufBuilder {
uint32_t *data;
} GPUIndexBufBuilder;
-/* supports all primitive types. */
+/** Supports all primitive types. */
void GPU_indexbuf_init_ex(GPUIndexBufBuilder *, GPUPrimType, uint index_len, uint vertex_len);
-/* supports only GPU_PRIM_POINTS, GPU_PRIM_LINES and GPU_PRIM_TRIS. */
+/** Supports only #GPU_PRIM_POINTS, #GPU_PRIM_LINES and #GPU_PRIM_TRIS. */
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len);
GPUIndexBuf *GPU_indexbuf_build_on_device(uint index_len);
diff --git a/source/blender/gpu/GPU_shader_shared_utils.h b/source/blender/gpu/GPU_shader_shared_utils.h
index 1cfc4f8af31..96feed9e7d9 100644
--- a/source/blender/gpu/GPU_shader_shared_utils.h
+++ b/source/blender/gpu/GPU_shader_shared_utils.h
@@ -44,7 +44,7 @@
# define expf exp
# define bool1 bool
-/* Type name collision with Metal shading language - These typenames are already defined. */
+/* Type name collision with Metal shading language - These type-names are already defined. */
# ifndef GPU_METAL
# define float2 vec2
# define float3 vec3
diff --git a/source/blender/gpu/intern/gpu_codegen.cc b/source/blender/gpu/intern/gpu_codegen.cc
index b81345683b4..0102b8db5b2 100644
--- a/source/blender/gpu/intern/gpu_codegen.cc
+++ b/source/blender/gpu/intern/gpu_codegen.cc
@@ -355,7 +355,7 @@ void GPUCodegen::generate_resources()
GPUCodegenCreateInfo &info = *create_info;
/* Ref. T98190: Defines are optimizations for old compilers.
- * Might become unecessary with EEVEE-Next. */
+ * Might become unnecessary with EEVEE-Next. */
if (GPU_material_flag_get(&mat, GPU_MATFLAG_PRINCIPLED_CLEARCOAT)) {
info.define("PRINCIPLED_CLEARCOAT");
}
diff --git a/source/blender/gpu/intern/gpu_index_buffer.cc b/source/blender/gpu/intern/gpu_index_buffer.cc
index 08c31d0d589..3a66f547403 100644
--- a/source/blender/gpu/intern/gpu_index_buffer.cc
+++ b/source/blender/gpu/intern/gpu_index_buffer.cc
@@ -49,7 +49,7 @@ void GPU_indexbuf_init_ex(GPUIndexBufBuilder *builder,
* degenerative primitives when skipping primitives is required and will
* incur no additional performance cost for rendering. */
if (GPU_type_matches_ex(GPU_DEVICE_ANY, GPU_OS_MAC, GPU_DRIVER_ANY, GPU_BACKEND_METAL)) {
- /* We will still use restart-indices for point primtives and then
+ /* We will still use restart-indices for point primitives and then
* patch these during IndexBuf::init, as we cannot benefit from degenerative
* primitives to eliminate these. */
builder->restart_index_value = (is_restart_compatible(prim_type) ||
@@ -379,7 +379,7 @@ void IndexBuf::squeeze_indices_short(uint min_idx,
* clamp index to the maximum within the index range.
*
* `clamp_max_idx` represents the maximum possible index to clamp against. If primitive is
- * restart-compatible, we can just clamp against the primtive-restart value, otherwise, we
+ * restart-compatible, we can just clamp against the primitive-restart value, otherwise, we
* must assign to a valid index within the range.
*
* NOTE: For OpenGL we skip this by disabling clamping, as we still need to use
diff --git a/source/blender/gpu/intern/gpu_shader_create_info.hh b/source/blender/gpu/intern/gpu_shader_create_info.hh
index 3884c067c83..25a79dd26ac 100644
--- a/source/blender/gpu/intern/gpu_shader_create_info.hh
+++ b/source/blender/gpu/intern/gpu_shader_create_info.hh
@@ -32,7 +32,7 @@ namespace blender::gpu::shader {
#endif
enum class Type {
- /* Types supported natively across all GPU backends. */
+ /* Types supported natively across all GPU back-ends. */
FLOAT = 0,
VEC2,
VEC3,
@@ -48,12 +48,12 @@ enum class Type {
IVEC3,
IVEC4,
BOOL,
- /* Additionally supported types to enable data optimisation and native
- * support in some GPUBackends.
- * NOTE: These types must be representable in all APIs. E.g. VEC3_101010I2 is aliased as vec3 in
- * the GL backend, as implicit type conversions from packed normal attribute data to vec3 is
+ /* Additionally supported types to enable data optimization and native
+ * support in some GPU back-ends.
+ * NOTE: These types must be representable in all APIs. E.g. `VEC3_101010I2` is aliased as vec3
+ * in the GL back-end, as implicit type conversions from packed normal attribute data to vec3 is
* supported. UCHAR/CHAR types are natively supported in Metal and can be used to avoid
- * additional data conversions for GPU_COMP_U8 vertex attributes. */
+ * additional data conversions for `GPU_COMP_U8` vertex attributes. */
VEC3_101010I2,
UCHAR,
UCHAR2,
@@ -324,10 +324,10 @@ struct StageInterfaceInfo {
/**
* \brief Describe inputs & outputs, stage interfaces, resources and sources of a shader.
* If all data is correctly provided, this is all that is needed to create and compile
- * a GPUShader.
+ * a #GPUShader.
*
* IMPORTANT: All strings are references only. Make sure all the strings used by a
- * ShaderCreateInfo are not freed until it is consumed or deleted.
+ * #ShaderCreateInfo are not freed until it is consumed or deleted.
*/
struct ShaderCreateInfo {
/** Shader name for debugging. */
@@ -346,7 +346,7 @@ struct ShaderCreateInfo {
DepthWrite depth_write_ = DepthWrite::ANY;
/**
* Maximum length of all the resource names including each null terminator.
- * Only for names used by gpu::ShaderInterface.
+ * Only for names used by #gpu::ShaderInterface.
*/
size_t interface_names_size_ = 0;
/** Manually set builtins. */
diff --git a/source/blender/gpu/metal/mtl_capabilities.hh b/source/blender/gpu/metal/mtl_capabilities.hh
index 5e34d5352f1..36536438bf5 100644
--- a/source/blender/gpu/metal/mtl_capabilities.hh
+++ b/source/blender/gpu/metal/mtl_capabilities.hh
@@ -14,7 +14,7 @@ namespace gpu {
#define MTL_MAX_TEXTURE_SLOTS 128
#define MTL_MAX_SAMPLER_SLOTS MTL_MAX_TEXTURE_SLOTS
-/* Max limit without using bindless for samplers. */
+/* Max limit without using bind-less for samplers. */
#define MTL_MAX_DEFAULT_SAMPLERS 16
#define MTL_MAX_UNIFORM_BUFFER_BINDINGS 31
#define MTL_MAX_VERTEX_INPUT_ATTRIBUTES 31
diff --git a/source/blender/gpu/metal/mtl_command_buffer.mm b/source/blender/gpu/metal/mtl_command_buffer.mm
index 9a9a2d55103..0e13e8d4690 100644
--- a/source/blender/gpu/metal/mtl_command_buffer.mm
+++ b/source/blender/gpu/metal/mtl_command_buffer.mm
@@ -242,7 +242,7 @@ bool MTLCommandBufferManager::end_active_command_encoder()
active_render_command_encoder_ = nil;
active_command_encoder_type_ = MTL_NO_COMMAND_ENCODER;
- /* Reset associated framebuffer flag. */
+ /* Reset associated frame-buffer flag. */
active_frame_buffer_ = nullptr;
active_pass_descriptor_ = nullptr;
return true;
@@ -286,7 +286,7 @@ bool MTLCommandBufferManager::end_active_command_encoder()
id<MTLRenderCommandEncoder> MTLCommandBufferManager::ensure_begin_render_command_encoder(
MTLFrameBuffer *ctx_framebuffer, bool force_begin, bool *new_pass)
{
- /* Ensure valid framebuffer. */
+ /* Ensure valid frame-buffer. */
BLI_assert(ctx_framebuffer != nullptr);
/* Ensure active command buffer. */
@@ -299,10 +299,10 @@ id<MTLRenderCommandEncoder> MTLCommandBufferManager::ensure_begin_render_command
active_frame_buffer_ != ctx_framebuffer || force_begin) {
this->end_active_command_encoder();
- /* Determine if this is a re-bind of the same framebuffer. */
+ /* Determine if this is a re-bind of the same frame-buffer. */
bool is_rebind = (active_frame_buffer_ == ctx_framebuffer);
- /* Generate RenderPassDescriptor from bound framebuffer. */
+ /* Generate RenderPassDescriptor from bound frame-buffer. */
BLI_assert(ctx_framebuffer);
active_frame_buffer_ = ctx_framebuffer;
active_pass_descriptor_ = active_frame_buffer_->bake_render_pass_descriptor(
diff --git a/source/blender/gpu/metal/mtl_context.hh b/source/blender/gpu/metal/mtl_context.hh
index ccc648eab2a..e996193e722 100644
--- a/source/blender/gpu/metal/mtl_context.hh
+++ b/source/blender/gpu/metal/mtl_context.hh
@@ -175,9 +175,9 @@ struct MTLContextDepthStencilState {
bool has_depth_target;
bool has_stencil_target;
- /* TODO(Metal): Consider optimizing this function using memcmp.
+ /* TODO(Metal): Consider optimizing this function using `memcmp`.
* Un-used, but differing, stencil state leads to over-generation
- * of state objects when doing trivial compare. */
+ * of state objects when doing trivial compare. */
bool operator==(const MTLContextDepthStencilState &other) const
{
bool depth_state_equality = (has_depth_target == other.has_depth_target &&
@@ -358,7 +358,7 @@ typedef enum MTLPipelineStateDirtyFlag {
MTL_PIPELINE_STATE_NULL_FLAG = 0,
/* Whether we need to call setViewport. */
MTL_PIPELINE_STATE_VIEWPORT_FLAG = (1 << 0),
- /* Whether we need to call setScissor.*/
+ /* Whether we need to call setScissor. */
MTL_PIPELINE_STATE_SCISSOR_FLAG = (1 << 1),
/* Whether we need to update/rebind active depth stencil state. */
MTL_PIPELINE_STATE_DEPTHSTENCIL_FLAG = (1 << 2),
@@ -565,15 +565,15 @@ class MTLCommandBufferManager {
};
/** MTLContext -- Core render loop and state management. **/
-/* NOTE(Metal): Partial MTLContext stub to provide wrapper functionality
- * for work-in-progress MTL* classes. */
+/* NOTE(Metal): Partial #MTLContext stub to provide wrapper functionality
+ * for work-in-progress `MTL*` classes. */
class MTLContext : public Context {
friend class MTLBackend;
private:
- /* Null buffers for empty/unintialized bindings.
- * Null attribute buffer follows default attribute format of OpenGL Backend. */
+ /* Null buffers for empty/uninitialized bindings.
+ * Null attribute buffer follows default attribute format of OpenGL Back-end. */
id<MTLBuffer> null_buffer_; /* All zero's. */
id<MTLBuffer> null_attribute_buffer_; /* Value float4(0.0,0.0,0.0,1.0). */
@@ -581,7 +581,7 @@ class MTLContext : public Context {
MTLContextTextureUtils texture_utils_;
/* Texture Samplers. */
- /* Cache of generated MTLSamplerState objects based on permutations of `eGPUSamplerState`. */
+ /* Cache of generated #MTLSamplerState objects based on permutations of `eGPUSamplerState`. */
id<MTLSamplerState> sampler_state_cache_[GPU_SAMPLER_MAX];
id<MTLSamplerState> default_sampler_state_ = nil;
@@ -684,7 +684,7 @@ class MTLContext : public Context {
/* Flag whether the visibility buffer for query results
* has changed. This requires a new RenderPass in order
- * to update.*/
+ * to update. */
bool is_visibility_dirty() const;
/* Reset dirty flag state for visibility buffer. */
diff --git a/source/blender/gpu/metal/mtl_context.mm b/source/blender/gpu/metal/mtl_context.mm
index f14236bcb58..a66645e5fb5 100644
--- a/source/blender/gpu/metal/mtl_context.mm
+++ b/source/blender/gpu/metal/mtl_context.mm
@@ -32,7 +32,7 @@ MTLContext::MTLContext(void *ghost_window) : memory_manager(*this), main_command
debug::mtl_debug_init();
/* Device creation.
- * TODO(Metal): This is a temporary initialisation path to enable testing of features
+ * TODO(Metal): This is a temporary initialization path to enable testing of features
* and shader compilation tests. Future functionality should fetch the existing device
* from GHOST_ContextCGL.mm. Plumbing to be updated in future. */
this->device = MTLCreateSystemDefaultDevice();
@@ -40,7 +40,7 @@ MTLContext::MTLContext(void *ghost_window) : memory_manager(*this), main_command
/* Initialize command buffer state. */
this->main_command_buffer.prepare();
- /* Initialise imm and pipeline state */
+ /* Initialize IMM and pipeline state */
this->pipeline_state.initialised = false;
/* Frame management. */
@@ -199,7 +199,7 @@ id<MTLRenderCommandEncoder> MTLContext::ensure_begin_render_pass()
}
/* Ensure command buffer workload submissions are optimal --
- * Though do not split a batch mid-IMM recording */
+ * Though do not split a batch mid-IMM recording. */
/* TODO(Metal): Add IMM Check once MTLImmediate has been implemented. */
if (this->main_command_buffer.do_break_submission()/*&&
!((MTLImmediate *)(this->imm))->imm_is_recording()*/) {
diff --git a/source/blender/gpu/metal/mtl_framebuffer.hh b/source/blender/gpu/metal/mtl_framebuffer.hh
index d6e9fa76b70..434d1a15b43 100644
--- a/source/blender/gpu/metal/mtl_framebuffer.hh
+++ b/source/blender/gpu/metal/mtl_framebuffer.hh
@@ -40,7 +40,7 @@ struct MTLAttachment {
/**
* Implementation of FrameBuffer object using Metal.
- **/
+ */
class MTLFrameBuffer : public FrameBuffer {
private:
/* Context Handle. */
@@ -54,24 +54,32 @@ class MTLFrameBuffer : public FrameBuffer {
bool use_multilayered_rendering_ = false;
/* State. */
- /* Whether global framebuffer properties have changed and require
- * re-generation of MTLRenderPassDescriptor/RenderCommandEncoders. */
+
+ /**
+ * Whether global frame-buffer properties have changed and require
+ * re-generation of #MTLRenderPassDescriptor / #RenderCommandEncoders.
+ */
bool is_dirty_;
- /* Whether loadstore properties have changed (only affects certain cached configs). */
+ /** Whether `loadstore` properties have changed (only affects certain cached configurations). */
bool is_loadstore_dirty_;
- /* Context that the latest modified state was last applied to.
- * If this does not match current ctx, re-apply state. */
+ /**
+ * Context that the latest modified state was last applied to.
+ * If this does not match current ctx, re-apply state.
+ */
MTLContext *dirty_state_ctx_;
- /* Whether a clear is pending -- Used to toggle between clear and load FB configurations
+ /**
+ * Whether a clear is pending -- Used to toggle between clear and load FB configurations
* (without dirtying the state) - Frame-buffer load config is used if no `GPU_clear_*` command
- * was issued after binding the FrameBuffer. */
+ * was issued after binding the #FrameBuffer.
+ */
bool has_pending_clear_;
- /* Render Pass Descriptors:
- * There are 3 MTLRenderPassDescriptors for different ways in which a frame-buffer
+ /**
+ * Render Pass Descriptors:
+ * There are 3 #MTLRenderPassDescriptors for different ways in which a frame-buffer
* can be configured:
* [0] = CLEAR CONFIG -- Used when a GPU_framebuffer_clear_* command has been issued.
* [1] = LOAD CONFIG -- Used if bound, but no clear is required.
@@ -89,17 +97,17 @@ class MTLFrameBuffer : public FrameBuffer {
MTLRenderPassDescriptor *framebuffer_descriptor_[MTL_FB_CONFIG_MAX];
MTLRenderPassColorAttachmentDescriptor
*colour_attachment_descriptors_[GPU_FB_MAX_COLOR_ATTACHMENT];
- /* Whether MTLRenderPassDescriptor[N] requires updating with latest state. */
+ /** Whether `MTLRenderPassDescriptor[N]` requires updating with latest state. */
bool descriptor_dirty_[MTL_FB_CONFIG_MAX];
- /* Whether SRGB is enabled for this framebuffer configuration. */
+ /** Whether SRGB is enabled for this frame-buffer configuration. */
bool srgb_enabled_;
- /* Whether the primary Frame-buffer attachment is an SRGB target or not. */
+ /** Whether the primary Frame-buffer attachment is an SRGB target or not. */
bool is_srgb_;
public:
/**
* Create a conventional framebuffer to attach texture to.
- **/
+ */
MTLFrameBuffer(MTLContext *ctx, const char *name);
~MTLFrameBuffer();
diff --git a/source/blender/gpu/metal/mtl_framebuffer.mm b/source/blender/gpu/metal/mtl_framebuffer.mm
index 515dd70e5de..975e78fc466 100644
--- a/source/blender/gpu/metal/mtl_framebuffer.mm
+++ b/source/blender/gpu/metal/mtl_framebuffer.mm
@@ -885,12 +885,12 @@ bool MTLFrameBuffer::add_color_attachment(gpu::MTLTexture *texture,
mtl_color_attachments_[slot].depth_plane = 0;
break;
default:
- MTL_LOG_ERROR("MTLFrameBuffer::add_color_attachment Unrecognised texture type %u\n",
+ MTL_LOG_ERROR("MTLFrameBuffer::add_color_attachment Unrecognized texture type %u\n",
texture->type_);
break;
}
- /* Update Framebuffer Resolution. */
+ /* Update Frame-buffer Resolution. */
int width_of_miplayer, height_of_miplayer;
if (miplevel <= 0) {
width_of_miplayer = texture->width_get();
@@ -1007,11 +1007,11 @@ bool MTLFrameBuffer::add_depth_attachment(gpu::MTLTexture *texture, int miplevel
mtl_depth_attachment_.depth_plane = 0;
break;
default:
- BLI_assert(false && "Unrecognised texture type");
+ BLI_assert(false && "Unrecognized texture type");
break;
}
- /* Update Framebuffer Resolution. */
+ /* Update Frame-buffer Resolution. */
int width_of_miplayer, height_of_miplayer;
if (miplevel <= 0) {
width_of_miplayer = texture->width_get();
@@ -1022,7 +1022,7 @@ bool MTLFrameBuffer::add_depth_attachment(gpu::MTLTexture *texture, int miplevel
height_of_miplayer = max_ii(texture->height_get() >> miplevel, 1);
}
- /* Update Framebuffer Resolution. */
+ /* Update Frame-buffer Resolution. */
if (width_ == 0 || height_ == 0) {
this->size_set(width_of_miplayer, height_of_miplayer);
this->scissor_reset();
@@ -1129,11 +1129,11 @@ bool MTLFrameBuffer::add_stencil_attachment(gpu::MTLTexture *texture, int miplev
mtl_stencil_attachment_.depth_plane = 0;
break;
default:
- BLI_assert(false && "Unrecognised texture type");
+ BLI_assert(false && "Unrecognized texture type");
break;
}
- /* Update Framebuffer Resolution. */
+ /* Update Frame-buffer Resolution. */
int width_of_miplayer, height_of_miplayer;
if (miplevel <= 0) {
width_of_miplayer = texture->width_get();
@@ -1144,7 +1144,7 @@ bool MTLFrameBuffer::add_stencil_attachment(gpu::MTLTexture *texture, int miplev
height_of_miplayer = max_ii(texture->height_get() >> miplevel, 1);
}
- /* Update Framebuffer Resolution. */
+ /* Update Frame-buffer Resolution. */
if (width_ == 0 || height_ == 0) {
this->size_set(width_of_miplayer, height_of_miplayer);
this->scissor_reset();
@@ -1376,7 +1376,7 @@ bool MTLFrameBuffer::reset_clear_state()
/** \} */
/* -------------------------------------------------------------------- */
-/** \ Fetch values and Framebuffer status
+/** \ Fetch values and Frame-buffer status
* \{ */
bool MTLFrameBuffer::has_attachment_at_slot(uint slot)
@@ -1506,7 +1506,7 @@ MTLRenderPassDescriptor *MTLFrameBuffer::bake_render_pass_descriptor(bool load_c
BLI_assert(metal_ctx && metal_ctx->get_inside_frame());
UNUSED_VARS_NDEBUG(metal_ctx);
- /* If Framebuffer has been modified, regenerate descriptor. */
+ /* If Frame-buffer has been modified, regenerate descriptor. */
if (is_dirty_) {
/* Clear all configs. */
for (int config = 0; config < 3; config++) {
diff --git a/source/blender/gpu/metal/mtl_index_buffer.hh b/source/blender/gpu/metal/mtl_index_buffer.hh
index 5182eeab5e3..fde26b16927 100644
--- a/source/blender/gpu/metal/mtl_index_buffer.hh
+++ b/source/blender/gpu/metal/mtl_index_buffer.hh
@@ -25,13 +25,13 @@ class MTLIndexBuf : public IndexBuf {
#ifndef NDEBUG
/* Flags whether point index buffer has been compacted
- * to remove false retart indices. */
+ * to remove false restart indices. */
bool point_restarts_stripped_ = false;
#endif
- /* Optimised index buffers.
+ /* Optimized index buffers.
* NOTE(Metal): This optimization encodes a new index buffer following
- * TriangleList topology. Parsing of Index buffers is more optimal
+ * #TriangleList topology. Parsing of Index buffers is more optimal
* when not using restart-compatible primitive topology types. */
GPUPrimType optimized_primitive_type_;
gpu::MTLBuffer *optimized_ibo_ = nullptr;
@@ -52,13 +52,13 @@ class MTLIndexBuf : public IndexBuf {
void upload_data() override;
void update_sub(uint32_t start, uint32_t len, const void *data) override;
- /* get_index_buffer can conditionally return an optimized index buffer of a
+ /* #get_index_buffer can conditionally return an optimized index buffer of a
* differing format, if it is concluded that optimization is preferred
* for the given inputs.
- * Index buffer optimization is used to replace restart-compatbiele
- * primitive types with non-restart-compatible ones such as TriangleList and
- * LineList. This improves GPU execution for these types significantly, while
- * only incuring a small performance penalty.
+ * Index buffer optimization is used to replace restart-compatible
+ * primitive types with non-restart-compatible ones such as #TriangleList and
+ * #LineList. This improves GPU execution for these types significantly, while
+ * only incurring a small performance penalty.
*
* This is also used to emulate unsupported topology types
* such as triangle fan. */
diff --git a/source/blender/gpu/metal/mtl_index_buffer.mm b/source/blender/gpu/metal/mtl_index_buffer.mm
index 4a7875aaeb0..99795d7bbd9 100644
--- a/source/blender/gpu/metal/mtl_index_buffer.mm
+++ b/source/blender/gpu/metal/mtl_index_buffer.mm
@@ -40,7 +40,7 @@ void MTLIndexBuf::bind_as_ssbo(uint32_t binding)
/* Ensure we have a valid IBO. */
BLI_assert(this->ibo_);
- /* TODO(Metal): Support index buffer SSBOs. Dependent on compute impl. */
+ /* TODO(Metal): Support index buffer SSBO's. Dependent on compute implementation. */
MTL_LOG_WARNING("MTLIndexBuf::bind_as_ssbo not yet implemented!\n");
}
@@ -58,17 +58,17 @@ const uint32_t *MTLIndexBuf::read() const
void MTLIndexBuf::upload_data()
{
- /* Handle subrange upload. */
+ /* Handle sub-range upload. */
if (is_subrange_) {
MTLIndexBuf *mtlsrc = static_cast<MTLIndexBuf *>(src_);
mtlsrc->upload_data();
#ifndef NDEBUG
BLI_assert_msg(!mtlsrc->point_restarts_stripped_,
- "Cannot use subrange on stripped point buffer.");
+ "Cannot use sub-range on stripped point buffer.");
#endif
- /* If parent subrange allocation has changed,
+ /* If parent sub-range allocation has changed,
* update our index buffer. */
if (alloc_size_ != mtlsrc->alloc_size_ || ibo_ != mtlsrc->ibo_) {
@@ -154,7 +154,7 @@ void MTLIndexBuf::update_sub(uint32_t start, uint32_t len, const void *data)
destinationOffset:start
size:len];
- /* Synchronise changes back to host to ensure CPU-side data is up-to-date for non
+ /* Synchronize changes back to host to ensure CPU-side data is up-to-date for non
* Shared buffers. */
if (dest_buffer.storageMode == MTLStorageModeManaged) {
[enc synchronizeResource:dest_buffer];
@@ -177,8 +177,9 @@ void MTLIndexBuf::flag_can_optimize(bool can_optimize)
/** \} */
-/** \name Index buffer optimization and topology emulation.
- * Index buffer optimization and emulation. Optimise index buffers by
+/** \name Index buffer optimization and topology emulation
+ *
+ * Index buffer optimization and emulation. Optimize index buffers by
* eliminating restart-indices.
* Emulate unsupported index types e.g. Triangle Fan and Line Loop.
* \{ */
@@ -189,7 +190,7 @@ static uint32_t populate_optimized_tri_strip_buf(Span<T> original_data,
MutableSpan<T> output_data,
uint32_t input_index_len)
{
- /* Generate TriangleList from TriangleStrip. */
+ /* Generate #TriangleList from #TriangleStrip. */
uint32_t current_vert_len = 0;
uint32_t current_output_ind = 0;
T indices[3];
@@ -202,13 +203,12 @@ static uint32_t populate_optimized_tri_strip_buf(Span<T> original_data,
}
else {
if (current_vert_len < 3) {
- /* prepare first triangle.
- * Cache indices before genrating a triangle,
- * in case we have bad primitive-restarts. */
+ /* Prepare first triangle.
+ * Cache indices before generating a triangle, in case we have bad primitive-restarts. */
indices[current_vert_len] = current_index;
}
- /* emit triangle once we reach 3 input verts in current strip. */
+ /* Emit triangle once we reach 3 input verts in current strip. */
if (current_vert_len == 3) {
/* First triangle in strip. */
output_data[current_output_ind++] = indices[0];
@@ -247,7 +247,7 @@ static uint32_t populate_emulated_tri_fan_buf(Span<T> original_data,
MutableSpan<T> output_data,
uint32_t input_index_len)
{
- /* Generate TriangleList from TriangleFan. */
+ /* Generate #TriangleList from #TriangleFan. */
T base_prim_ind_val = 0;
uint32_t current_vert_len = 0;
uint32_t current_output_ind = 0;
@@ -261,9 +261,8 @@ static uint32_t populate_emulated_tri_fan_buf(Span<T> original_data,
}
else {
if (current_vert_len < 3) {
- /* prepare first triangle.
- * Cache indices before genrating a triangle,
- * in case we have bad primitive-restarts. */
+ /* Prepare first triangle.
+ * Cache indices before generating a triangle, in case we have bad primitive-restarts. */
indices[current_vert_len] = current_index;
}
@@ -298,7 +297,7 @@ id<MTLBuffer> MTLIndexBuf::get_index_buffer(GPUPrimType &in_out_primitive_type,
uint32_t &in_out_v_count)
{
/* Determine whether to return the original index buffer, or whether we
- * should emulate an unsupported primitive type, or optimisze a restart-
+ * should emulate an unsupported primitive type, or optimize a restart-
* compatible type for faster performance. */
bool should_optimize_or_emulate = (in_out_primitive_type == GPU_PRIM_TRI_FAN) ||
(in_out_primitive_type == GPU_PRIM_TRI_STRIP);
@@ -411,16 +410,16 @@ id<MTLBuffer> MTLIndexBuf::get_index_buffer(GPUPrimType &in_out_primitive_type,
} break;
case GPU_PRIM_LINE_STRIP: {
- /* TOOD(Metal): Line strip topology types would benefit from optimization to remove
+ /* TODO(Metal): Line strip topology types would benefit from optimization to remove
* primitive restarts, however, these do not occur frequently, nor with
* significant geometry counts. */
- MTL_LOG_INFO("TODO: Primitive topology: Optimise line strip topology types\n");
+ MTL_LOG_INFO("TODO: Primitive topology: Optimize line strip topology types\n");
} break;
case GPU_PRIM_LINE_LOOP: {
- /* TOOD(Metal): Line Loop primitive type requires use of optimized index buffer for
- * emulation, if used with indexed rendering. This path is currently not hit as LineLoop
- * does not currently appear to be used alongisde an index buffer. */
+ /* TODO(Metal): Line Loop primitive type requires use of optimized index buffer for
+ * emulation, if used with indexed rendering. This path is currently not hit as #LineLoop
+ * does not currently appear to be used alongside an index buffer. */
MTL_LOG_WARNING(
"TODO: Primitive topology: Line Loop Index buffer optimization required for "
"emulation.\n");
@@ -465,9 +464,9 @@ void MTLIndexBuf::strip_restart_indices()
* length. Primitive restarts are invalid in Metal for non-restart-compatible
* primitive types. We also cannot just use zero unlike for Lines and Triangles,
* as we cannot create de-generative point primitives to hide geometry, as each
- * point is indepednent.
+ * point is independent.
* Instead, we must remove these hidden indices from the index buffer.
- * Note: This happens prior to index squeezing so operate on 32-bit indices. */
+ * NOTE: This happens prior to index squeezing so operate on 32-bit indices. */
MutableSpan<uint32_t> uint_idx(static_cast<uint32_t *>(data_), index_len_);
for (uint i = 0; i < index_len_; i++) {
if (uint_idx[i] == 0xFFFFFFFFu) {
diff --git a/source/blender/gpu/metal/mtl_memory.hh b/source/blender/gpu/metal/mtl_memory.hh
index dc5417dc11a..df80df6543f 100644
--- a/source/blender/gpu/metal/mtl_memory.hh
+++ b/source/blender/gpu/metal/mtl_memory.hh
@@ -41,7 +41,7 @@
* Each frame, the next scratch buffer is reset, then later flushed upon
* command buffer submission.
*
- * Note: This is allocated per-context due to allocations being tied
+ * NOTE: This is allocated per-context due to allocations being tied
* to workload submissions and context-specific submissions.
*
* Examples of scratch buffer usage are:
@@ -73,7 +73,7 @@
* to ensure they are not prematurely re-used before they have finished being
* used by the GPU.
*
- * Note: The MTLBufferPool is a global construct which can be fetched from anywhere.
+ * NOTE: The MTLBufferPool is a global construct which can be fetched from anywhere.
*
* Usage:
* MTLContext::get_global_memory_manager(); - static routine to fetch global memory manager.
@@ -273,7 +273,7 @@ struct CompareMTLBuffer {
* when the next MTLSafeFreeList is created, to allow the existing pool to be released once
* the reference count hits zero after submitted command buffers complete.
*
- * Note: the Metal API independently tracks resources used by command buffers for the purpose of
+ * NOTE: the Metal API independently tracks resources used by command buffers for the purpose of
* keeping resources alive while in-use by the driver and CPU, however, this differs from the
* MTLSafeFreeList mechanism in the Metal backend, which exists for the purpose of allowing
* previously allocated MTLBuffer resources to be re-used. This allows us to save on the expensive
diff --git a/source/blender/gpu/metal/mtl_pso_descriptor_state.hh b/source/blender/gpu/metal/mtl_pso_descriptor_state.hh
index 010349eddbf..1906350679a 100644
--- a/source/blender/gpu/metal/mtl_pso_descriptor_state.hh
+++ b/source/blender/gpu/metal/mtl_pso_descriptor_state.hh
@@ -147,7 +147,7 @@ struct MTLRenderPipelineStateDescriptor {
* new PSO for the current shader.
*
* Unlike the 'MTLContextGlobalShaderPipelineState', this struct contains a subset of
- * parameters used to distinguish between unique PSOs. This struct is hashable and only contains
+ * parameters used to distinguish between unique PSOs. This struct is hash-able and only contains
* those parameters which are required by PSO generation. Non-unique state such as bound
* resources is not tracked here, as it does not require a unique PSO permutation if changed. */
@@ -155,7 +155,7 @@ struct MTLRenderPipelineStateDescriptor {
MTLVertexDescriptor vertex_descriptor;
/* Render Target attachment state.
- * Assign to MTLPixelFormatInvalid if not used. */
+ * Assign to #MTLPixelFormatInvalid if not used. */
int num_color_attachments;
MTLPixelFormat color_attachment_format[GPU_FB_MAX_COLOR_ATTACHMENT];
MTLPixelFormat depth_attachment_format;
@@ -170,7 +170,7 @@ struct MTLRenderPipelineStateDescriptor {
MTLBlendFactor src_alpha_blend_factor;
MTLBlendFactor src_rgb_blend_factor;
- /* Global colour write mask as this cannot be specified per attachment. */
+ /* Global color write mask as this cannot be specified per attachment. */
MTLColorWriteMask color_write_mask;
/* Point size required by point primitives. */
@@ -210,7 +210,7 @@ struct MTLRenderPipelineStateDescriptor {
uint64_t hash() const
{
- /* NOTE(Metal): Current setup aims to minimise overlap of parameters
+ /* NOTE(Metal): Current setup aims to minimize overlap of parameters
* which are more likely to be different, to ensure earlier hash
* differences without having to fallback to comparisons.
* Though this could likely be further improved to remove
@@ -226,7 +226,7 @@ struct MTLRenderPipelineStateDescriptor {
/* Only include elements in Hash if they are needed - avoids variable null assignments
* influencing hash. */
if (this->num_color_attachments > 0) {
- hash ^= (uint64_t)this->color_write_mask << 22; /* 4 bit bitmask. */
+ hash ^= (uint64_t)this->color_write_mask << 22; /* 4 bit bit-mask. */
hash ^= (uint64_t)this->alpha_blend_op << 26; /* Up to 4 (3 bits). */
hash ^= (uint64_t)this->rgb_blend_op << 29; /* Up to 4 (3 bits). */
hash ^= (uint64_t)this->dest_alpha_blend_factor << 32; /* Up to 18 (5 bits). */
@@ -247,4 +247,4 @@ struct MTLRenderPipelineStateDescriptor {
}
};
-} // namespace blender::gpu \ No newline at end of file
+} // namespace blender::gpu
diff --git a/source/blender/gpu/metal/mtl_shader.hh b/source/blender/gpu/metal/mtl_shader.hh
index cdbcd7c68f6..64d9d1cf849 100644
--- a/source/blender/gpu/metal/mtl_shader.hh
+++ b/source/blender/gpu/metal/mtl_shader.hh
@@ -56,7 +56,7 @@ struct MTLBufferArgumentData {
/* Metal Render Pipeline State Instance. */
struct MTLRenderPipelineStateInstance {
- /* Function instances with specialisation.
+ /* Function instances with specialization.
* Required for argument encoder construction. */
id<MTLFunction> vert;
id<MTLFunction> frag;
@@ -78,7 +78,7 @@ struct MTLRenderPipelineStateInstance {
/** Reflection Data.
* Currently used to verify whether uniform buffers of incorrect sizes being bound, due to left
* over bindings being used for slots that did not need updating for a particular draw. Metal
- * Backend over-generates bindings due to detecting their presence, though in many cases, the
+ * Back-end over-generates bindings due to detecting their presence, though in many cases, the
* bindings in the source are not all used for a given shader.
* This information can also be used to eliminate redundant/unused bindings. */
bool reflection_data_available;
@@ -86,7 +86,7 @@ struct MTLRenderPipelineStateInstance {
blender::Vector<MTLBufferArgumentData> buffer_bindings_reflection_data_frag;
};
-/* MTLShaderBuilder source wrapper used during initial compilation. */
+/* #MTLShaderBuilder source wrapper used during initial compilation. */
struct MTLShaderBuilder {
NSString *msl_source_vert_ = @"";
NSString *msl_source_frag_ = @"";
@@ -100,17 +100,17 @@ struct MTLShaderBuilder {
};
/**
- * MTLShader implements shader compilation, Pipeline State Object (PSO)
+ * #MTLShader implements shader compilation, Pipeline State Object (PSO)
* creation for rendering and uniform data binding.
* Shaders can either be created from native MSL, or generated
- * from a GLSL source shader using GPUShaderCreateInfo.
+ * from a GLSL source shader using #GPUShaderCreateInfo.
*
* Shader creation process:
- * - Create MTLShader:
- * - Convert GLSL to MSL source if required.
- * - set MSL source.
- * - set Vertex/Fragment function names.
- * - Create and populate MTLShaderInterface.
+ * - Create #MTLShader:
+ * - Convert GLSL to MSL source if required.
+ * - set MSL source.
+ * - set Vertex/Fragment function names.
+ * - Create and populate #MTLShaderInterface.
**/
class MTLShader : public Shader {
friend shader::ShaderCreateInfo;
@@ -164,7 +164,7 @@ class MTLShader : public Shader {
* and perform vertex assembly manually, rather than using Stage-in.
* This is used to give a vertex shader full access to all of the
* vertex data.
- * This is primarily used for optimisation techniques and
+ * This is primarily used for optimization techniques and
* alternative solutions for Geometry-shaders which are unsupported
* by Metal. */
bool use_ssbo_vertex_fetch_mode_ = false;
@@ -315,7 +315,7 @@ class MTLShader : public Shader {
* and the type specified in the shader source.
*
* e.g. vec3 to vec4 expansion, or vec4 to vec2 truncation.
- * Note: Vector expansion will replace empty elements with the values
+ * NOTE: Vector expansion will replace empty elements with the values
* (0,0,0,1).
*
* If implicit format resize is not possible, this function
@@ -591,18 +591,19 @@ inline bool mtl_vertex_format_resize(MTLVertexFormat mtl_format,
return out_vert_format != MTLVertexFormatInvalid;
}
-/* Returns whether the METAL API can internally convert between the input type of data in the
+/**
+ * Returns whether the METAL API can internally convert between the input type of data in the
* incoming vertex buffer and the format used by the vertex attribute inside the shader.
*
* - Returns TRUE if the type can be converted internally, along with returning the appropriate
- * type to be passed into the MTLVertexAttributeDescriptorPSO.
+ * type to be passed into the #MTLVertexAttributeDescriptorPSO.
*
* - Returns FALSE if the type cannot be converted internally e.g. casting Int4 to Float4.
*
* If implicit conversion is not possible, then we can fallback to performing manual attribute
- * conversion using the special attribute read function specialisations in the shader.
+ * conversion using the special attribute read function specializations in the shader.
* These functions selectively convert between types based on the specified vertex
- * attribute 'GPUVertFetchMode fetch_mode' e.g. GPU_FETCH_INT.
+ * attribute `GPUVertFetchMode fetch_mode` e.g. `GPU_FETCH_INT`.
*/
inline bool mtl_convert_vertex_format(MTLVertexFormat shader_attrib_format,
GPUVertCompType component_type,
@@ -1026,7 +1027,7 @@ inline uint comp_count_from_vert_format(MTLVertexFormat vert_format)
case MTLVertexFormatInt1010102Normalized:
default:
- BLI_assert_msg(false, "Unrecognised attribute type. Add types to switch as needed.");
+ BLI_assert_msg(false, "Unrecognized attribute type. Add types to switch as needed.");
return 0;
}
}
@@ -1086,7 +1087,7 @@ inline GPUVertFetchMode fetchmode_from_vert_format(MTLVertexFormat vert_format)
return GPU_FETCH_INT_TO_FLOAT_UNIT;
default:
- BLI_assert_msg(false, "Unrecognised attribute type. Add types to switch as needed.");
+ BLI_assert_msg(false, "Unrecognized attribute type. Add types to switch as needed.");
return GPU_FETCH_FLOAT;
}
}
@@ -1156,7 +1157,7 @@ inline GPUVertCompType comp_type_from_vert_format(MTLVertexFormat vert_format)
return GPU_COMP_I10;
default:
- BLI_assert_msg(false, "Unrecognised attribute type. Add types to switch as needed.");
+ BLI_assert_msg(false, "Unrecognized attribute type. Add types to switch as needed.");
return GPU_COMP_F32;
}
}
diff --git a/source/blender/gpu/metal/mtl_shader.mm b/source/blender/gpu/metal/mtl_shader.mm
index 1824057c9a2..23097f312f0 100644
--- a/source/blender/gpu/metal/mtl_shader.mm
+++ b/source/blender/gpu/metal/mtl_shader.mm
@@ -51,7 +51,7 @@ MTLShader::MTLShader(MTLContext *ctx, const char *name) : Shader(name)
shd_builder_ = new MTLShaderBuilder();
#ifndef NDEBUG
- /* Remove invalid symbols from shader name to ensure debug entrypoint function name is valid. */
+ /* Remove invalid symbols from shader name to ensure debug entry-point function name is valid. */
for (uint i : IndexRange(strlen(this->name))) {
char c = this->name[i];
if ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9')) {
@@ -123,7 +123,7 @@ MTLShader::~MTLShader()
}
pso_cache_.clear();
- /* NOTE(Metal): ShaderInterface deletion is handled in the super destructor ~Shader(). */
+ /* NOTE(Metal): #ShaderInterface deletion is handled in the super destructor `~Shader()`. */
}
valid_ = false;
@@ -247,7 +247,7 @@ bool MTLShader::finalize(const shader::ShaderCreateInfo *info)
break;
}
- /* Concatenate common src. */
+ /* Concatenate common source. */
NSString *str = [NSString stringWithUTF8String:datatoc_mtl_shader_common_msl];
NSString *source_with_header_a = [str stringByAppendingString:source_to_compile];
@@ -343,9 +343,9 @@ bool MTLShader::transform_feedback_enable(GPUVertBuf *buf)
BLI_assert(buf);
transform_feedback_active_ = true;
transform_feedback_vertbuf_ = buf;
- /* TODO(Metal): Enable this assertion once MTLVertBuf lands. */
- /*BLI_assert(static_cast<MTLVertBuf *>(unwrap(transform_feedback_vertbuf_))->get_usage_type() ==
- GPU_USAGE_DEVICE_ONLY);*/
+ /* TODO(Metal): Enable this assertion once #MTLVertBuf lands. */
+ // BLI_assert(static_cast<MTLVertBuf *>(unwrap(transform_feedback_vertbuf_))->get_usage_type() ==
+ // GPU_USAGE_DEVICE_ONLY);
return true;
}
@@ -560,7 +560,7 @@ void MTLShader::vertformat_from_shader(GPUVertFormat *format) const
/** \} */
/* -------------------------------------------------------------------- */
-/** \name METAL Custom behaviour
+/** \name METAL Custom Behavior
* \{ */
void MTLShader::set_vertex_function_name(NSString *vert_function_name)
@@ -584,7 +584,7 @@ void MTLShader::shader_source_from_msl(NSString *input_vertex_source,
void MTLShader::set_interface(MTLShaderInterface *interface)
{
- /* Assign gpu::Shader superclass interface. */
+ /* Assign gpu::Shader super-class interface. */
Shader::interface = interface;
}
@@ -593,22 +593,24 @@ void MTLShader::set_interface(MTLShaderInterface *interface)
/* -------------------------------------------------------------------- */
/** \name Bake Pipeline State Objects
* \{ */
-/* Bakes or fetches a pipeline state using the current
- * MTLRenderPipelineStateDescriptor state.
+
+/**
+ * Bakes or fetches a pipeline state using the current
+ * #MTLRenderPipelineStateDescriptor state.
*
* This state contains information on shader inputs/outputs, such
* as the vertex descriptor, used to control vertex assembly for
* current vertex data, and active render target information,
- * decsribing the output attachment pixel formats.
+ * describing the output attachment pixel formats.
*
- * Other rendering parameters such as global pointsize, blend state, color mask
- * etc; are also used. See mtl_shader.h for full MLRenderPipelineStateDescriptor.
+ * Other rendering parameters such as global point-size, blend state, color mask
+ * etc; are also used. See mtl_shader.h for full #MLRenderPipelineStateDescriptor.
*/
MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
MTLContext *ctx, MTLPrimitiveTopologyClass prim_type)
{
/* NOTE(Metal): PSO cache can be accessed from multiple threads, though these operations should
- * be thread-safe due to organisation of high-level renderer. If there are any issues, then
+ * be thread-safe due to organization of high-level renderer. If there are any issues, then
* access can be guarded as appropriate. */
BLI_assert(this);
MTLShaderInterface *mtl_interface = this->get_interface();
@@ -616,9 +618,9 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
BLI_assert(this->is_valid());
/* NOTE(Metal): Vertex input assembly description will have been populated externally
- * via MTLBatch or MTLImmediate during binding or draw. */
+ * via #MTLBatch or #MTLImmediate during binding or draw. */
- /* Resolve Context Framebuffer state. */
+ /* Resolve Context Frame-buffer state. */
MTLFrameBuffer *framebuffer = ctx->get_current_framebuffer();
/* Update global pipeline descriptor. */
@@ -631,7 +633,7 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
MTLAttachment color_attachment = framebuffer->get_color_attachment(attachment);
if (color_attachment.used) {
- /* If SRGB is disabled and format is SRGB, use colour data directly with no conversions
+ /* If SRGB is disabled and format is SRGB, use color data directly with no conversions
* between linear and SRGB. */
MTLPixelFormat mtl_format = gpu_texture_format_to_metal(
color_attachment.texture->format_get());
@@ -687,7 +689,7 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
@autoreleasepool {
/* Prepare Render Pipeline Descriptor. */
- /* Setup function specialisation constants, used to modify and optimise
+ /* Setup function specialization constants, used to modify and optimize
* generated code based on current render pipeline configuration. */
MTLFunctionConstantValues *values = [[MTLFunctionConstantValues new] autorelease];
@@ -698,18 +700,18 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
pso_descriptor_.label = [NSString stringWithUTF8String:this->name];
/* Offset the bind index for Uniform buffers such that they begin after the VBO
- * buffer bind slots. MTL_uniform_buffer_base_index is passed as a function
- * specialisation constant, customised per unique pipeline state permutation.
+ * buffer bind slots. `MTL_uniform_buffer_base_index` is passed as a function
+ * specialization constant, customized per unique pipeline state permutation.
*
- * Note: For binding point compaction, we could use the number of VBOs present
- * in the current PSO configuration current_state.vertex_descriptor.num_vert_buffers).
+ * NOTE: For binding point compaction, we could use the number of VBOs present
+ * in the current PSO configuration `current_state.vertex_descriptor.num_vert_buffers`).
* However, it is more efficient to simply offset the uniform buffer base index to the
- * maximal number of VBO bind-points, as then UBO bindpoints for similar draw calls
+ * maximal number of VBO bind-points, as then UBO bind-points for similar draw calls
* will align and avoid the requirement for additional binding. */
int MTL_uniform_buffer_base_index = GPU_BATCH_VBO_MAX_LEN;
/* Null buffer index is used if an attribute is not found in the
- * bound VBOs VertexFormat. */
+ * bound VBOs #VertexFormat. */
int null_buffer_index = current_state.vertex_descriptor.num_vert_buffers;
bool using_null_buffer = false;
@@ -726,20 +728,21 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
else {
for (const uint i : IndexRange(current_state.vertex_descriptor.num_attributes)) {
- /* Metal backend attribute descriptor state. */
+ /* Metal back-end attribute descriptor state. */
MTLVertexAttributeDescriptorPSO &attribute_desc =
current_state.vertex_descriptor.attributes[i];
/* Flag format conversion */
- /* In some cases, Metal cannot implicity convert between data types.
- * In these instances, the fetch mode 'GPUVertFetchMode' as provided in the vertex format
+ /* In some cases, Metal cannot implicitly convert between data types.
+ * In these instances, the fetch mode #GPUVertFetchMode as provided in the vertex format
* is passed in, and used to populate function constants named: MTL_AttributeConvert0..15.
-
+ *
* It is then the responsibility of the vertex shader to perform any necessary type
* casting.
*
- * See mtl_shader.hh for more information. Relevant Metal API documentation:
- * https://developer.apple.com/documentation/metal/mtlvertexattributedescriptor/1516081-format?language=objc */
+ * See `mtl_shader.hh` for more information. Relevant Metal API documentation:
+ * https://developer.apple.com/documentation/metal/mtlvertexattributedescriptor/1516081-format?language=objc
+ */
if (attribute_desc.format == MTLVertexFormatInvalid) {
MTL_LOG_WARNING(
"MTLShader: baking pipeline state for '%s'- expected input attribute at "
@@ -766,7 +769,7 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
mtl_interface->name);
}
- /* Copy metal backend attribute descriptor state into PSO descriptor.
+ /* Copy metal back-end attribute descriptor state into PSO descriptor.
* NOTE: need to copy each element due to direct assignment restrictions.
* Also note */
MTLVertexAttributeDescriptor *mtl_attribute = desc.vertexDescriptor.attributes[i];
@@ -777,12 +780,12 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
}
for (const uint i : IndexRange(current_state.vertex_descriptor.num_vert_buffers)) {
- /* Metal backend state buffer layout. */
+ /* Metal back-end state buffer layout. */
const MTLVertexBufferLayoutDescriptorPSO &buf_layout =
current_state.vertex_descriptor.buffer_layouts[i];
- /* Copy metal backend buffer layout state into PSO descriptor.
+ /* Copy metal back-end buffer layout state into PSO descriptor.
* NOTE: need to copy each element due to copying from internal
- * backend descriptor to Metal API descriptor.*/
+ * back-end descriptor to Metal API descriptor. */
MTLVertexBufferLayoutDescriptor *mtl_buf_layout = desc.vertexDescriptor.layouts[i];
mtl_buf_layout.stepFunction = buf_layout.step_function;
@@ -801,7 +804,7 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
/* DEBUG: Missing/empty attributes. */
/* Attributes are normally mapped as part of the state setting based on the used
- * GPUVertFormat, however, if attribues have not been set, we can sort them out here. */
+ * #GPUVertFormat, however, if attributes have not been set, we can sort them out here. */
for (const uint i : IndexRange(mtl_interface->get_total_attributes())) {
const MTLShaderInputAttribute &attribute = mtl_interface->get_attribute(i);
MTLVertexAttributeDescriptor *current_attribute = desc.vertexDescriptor.attributes[i];
@@ -868,8 +871,8 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
float MTL_pointsize = pipeline_descriptor.point_size;
if (pipeline_descriptor.vertex_descriptor.prim_topology_class ==
MTLPrimitiveTopologyClassPoint) {
- /* IF pointsize is > 0.0, PROGRAM_POINT_SIZE is enabled, and gl_PointSize shader keyword
- overrides the value. Otherwise, if < 0.0, use global constant point size. */
+ /* `if pointsize is > 0.0`, PROGRAM_POINT_SIZE is enabled, and `gl_PointSize` shader keyword
+ * overrides the value. Otherwise, if < 0.0, use global constant point size. */
if (MTL_pointsize < 0.0) {
MTL_pointsize = fabsf(MTL_pointsize);
[values setConstantValue:&MTL_pointsize
@@ -926,7 +929,7 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
/* Setup pixel format state */
for (int color_attachment = 0; color_attachment < GPU_FB_MAX_COLOR_ATTACHMENT;
color_attachment++) {
- /* Fetch colour attachment pixel format in backend pipeline state. */
+ /* Fetch color attachment pixel format in back-end pipeline state. */
MTLPixelFormat pixel_format = current_state.color_attachment_format[color_attachment];
/* Populate MTL API PSO attachment descriptor. */
MTLRenderPipelineColorAttachmentDescriptor *col_attachment =
@@ -999,8 +1002,8 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
* This reflection data is used to contrast the binding information
* we know about in the interface against the bindings in the finalized
* PSO. This accounts for bindings which have been stripped out during
- * optimisation, and allows us to both avoid over-binding and also
- * allows us to veriy size-correctness for bindings, to ensure
+ * optimization, and allows us to both avoid over-binding and also
+ * allows us to verify size-correctness for bindings, to ensure
* that buffers bound are not smaller than the size of expected data. */
NSArray<MTLArgument *> *vert_args = [reflection_data vertexArguments];
@@ -1152,7 +1155,7 @@ void MTLShader::ssbo_vertex_fetch_bind_attributes_begin()
ssbo_vertex_attribute_bind_active_ = true;
ssbo_vertex_attribute_bind_mask_ = (1 << mtl_interface->get_total_attributes()) - 1;
- /* Reset tracking of actively used vbo bind slots for ssbo vertex fetch mode. */
+ /* Reset tracking of actively used VBO bind slots for SSBO vertex fetch mode. */
for (int i = 0; i < MTL_SSBO_VERTEX_FETCH_MAX_VBOS; i++) {
ssbo_vbo_slot_used_[i] = false;
}
diff --git a/source/blender/gpu/metal/mtl_shader_generator.hh b/source/blender/gpu/metal/mtl_shader_generator.hh
index c71504b84b7..43890ca0170 100644
--- a/source/blender/gpu/metal/mtl_shader_generator.hh
+++ b/source/blender/gpu/metal/mtl_shader_generator.hh
@@ -21,9 +21,9 @@
*
* 3) Generate MSL shader.
*
- * 4) Populate MTLShaderInterface, describing input/output structure, bindpoints, buffer size and
- * alignment, shader feature usage etc; Everything required by the Metal backend to successfully
- * enable use of shaders and GPU backend features.
+ * 4) Populate #MTLShaderInterface, describing input/output structure, bind-points, buffer size and
+ * alignment, shader feature usage etc; Everything required by the Metal back-end to
+ * successfully enable use of shaders and GPU back-end features.
*
*
*
@@ -33,27 +33,27 @@
* sampler bindings or argument buffers; at the top of the shader.
*
* 2) Inject common Metal headers.
- * - mtl_shader_defines.msl is used to map GLSL functions to MSL.
- * - mtl_shader_common.msl is added to ALL MSL shaders to provide
- * common functionality required by the backend. This primarily
+ * - `mtl_shader_defines.msl` is used to map GLSL functions to MSL.
+ * - `mtl_shader_common.msl` is added to ALL MSL shaders to provide
+ * common functionality required by the back-end. This primarily
* contains function-constant hooks, used in PSO generation.
*
* 3) Create a class Scope which wraps the GLSL shader. This is used to
* create a global per-thread scope around the shader source, to allow
- * access to common shader members (GLSL globals, shader inputs/outptus etc)
+ * access to common shader members (GLSL globals, shader inputs/outputs etc)
*
* 4) Generate shader interface structs and populate local members where required for:
- * - VertexInputs
- * - VertexOutputs
- * - Uniforms
- * - Uniform Blocks
- * - textures;
+ * - `VertexInputs`
+ * - `VertexOutputs`
+ * - `Uniforms`
+ * - `Uniform Blocks`
+ * - `textures` ;
* etc;
*
* 5) Inject GLSL source.
*
* 6) Generate MSL shader entry point function. Every Metal shader must have a
- * vertex/fragment/kernel entrypoint, which contains the function binding table.
+ * vertex/fragment/kernel entry-point, which contains the function binding table.
* This is where bindings are specified and passed into the shader.
*
* For converted shaders, the MSL entry-point will also instantiate a shader
@@ -61,47 +61,49 @@
*
* Finally, the shaders "main()" method will be called, and outputs are copied.
*
- * Note: For position outputs, the default output position will be converted to
+ * NOTE: For position outputs, the default output position will be converted to
* the Metal coordinate space, which involves flipping the Y coordinate and
* re-mapping the depth range between 0 and 1, as with Vulkan.
*
*
* The final shader structure looks as follows:
*
- * -- Shader defines --
- * #define USE_ARGUMENT_BUFFER_FOR_SAMPLERS 0
- * ... etc ...;
- *
- * class MetalShaderVertexImp {
- *
- * -- Common shader interface structs --
- * struct VertexIn {
- * vec4 pos [[attribute(0)]]
- * }
- * struct VertexOut {...}
- * struct PushConstantBlock {...}
- * struct drw_Globals {...}
- * ...
- *
- * -- GLSL source code --
- * ...
- * };
- *
- * vertex MetalShaderVertexImp::VertexOut vertex_function_entry(
- * MetalShaderVertexImp::VertexIn v_in [[stage_in]],
- * constant PushConstantBlock& globals [[buffer(MTL_uniform_buffer_base_index)]]) {
- *
- * MetalShaderVertexImp impl;
- * -- Copy input members into impl instance --
- * -- Execute GLSL main function --
- * impl.main();
- *
- * -- Copy outputs and return --
- * MetalShaderVertexImp::VertexOut out;
- * out.pos = impl.pos;
- * -- transform position to Metal coordinate system --
- * return v_out;
- * }
+ * \code{.cc}
+ * -- Shader defines --
+ * #define USE_ARGUMENT_BUFFER_FOR_SAMPLERS 0
+ * ... etc ...;
+ *
+ * class MetalShaderVertexImp {
+ *
+ * -- Common shader interface structs --
+ * struct VertexIn {
+ * vec4 pos [[attribute(0)]]
+ * }
+ * struct VertexOut {...}
+ * struct PushConstantBlock {...}
+ * struct drw_Globals {...}
+ * ...
+ *
+ * -- GLSL source code --
+ * ...
+ * };
+ *
+ * vertex MetalShaderVertexImp::VertexOut vertex_function_entry(
+ * MetalShaderVertexImp::VertexIn v_in [[stage_in]],
+ * constant PushConstantBlock& globals [[buffer(MTL_uniform_buffer_base_index)]]) {
+ *
+ * MetalShaderVertexImp impl;
+ * -- Copy input members into impl instance --
+ * -- Execute GLSL main function --
+ * impl.main();
+ *
+ * -- Copy outputs and return --
+ * MetalShaderVertexImp::VertexOut out;
+ * out.pos = impl.pos;
+ * -- transform position to Metal coordinate system --
+ * return v_out;
+ * }
+ * \endcode
*
* -- SSBO-vertex-fetchmode --
*
@@ -125,13 +127,14 @@
* significant performance loss from manual vertex assembly vs under-the-hood assembly.
*
* This mode works by passing the required vertex descriptor information into the shader
- * as uniform data, describing the type, stride, offset, stepmode and buffer index of each
- * attribute, such that the shader ssbo-vertex-fetch utility functions know how to extract data.
+ * as uniform data, describing the type, stride, offset, step-mode and buffer index of each
+ * attribute, such that the shader SSBO-vertex-fetch utility functions know how to extract data.
*
- * This also works with indexed rendering, by similarly binding the index buffer as a manul buffer.
+ * This also works with indexed rendering,
+ * by similarly binding the index buffer as a manual buffer.
*
- * When this mode is used, the code generation and shader interface generation varies to accomodate
- * the required features.
+ * When this mode is used, the code generation and shader interface generation varies to
+ * accommodate the required features.
*
* This mode can be enabled in a shader with:
*
@@ -363,7 +366,7 @@ class MSLGeneratorInterface {
blender::Vector<MSLVertexInputAttribute> vertex_input_attributes;
blender::Vector<MSLVertexOutputAttribute> vertex_output_varyings;
/* Should match vertex outputs, but defined separately as
- * some shader permutations will not utilise all inputs/outputs.
+ * some shader permutations will not utilize all inputs/outputs.
* Final shader uses the intersection between the two sets. */
blender::Vector<MSLVertexOutputAttribute> fragment_input_varyings;
blender::Vector<MSLFragmentOutputAttribute> fragment_outputs;
diff --git a/source/blender/gpu/metal/mtl_shader_generator.mm b/source/blender/gpu/metal/mtl_shader_generator.mm
index 37c1ddd6e7a..977e97dbd82 100644
--- a/source/blender/gpu/metal/mtl_shader_generator.mm
+++ b/source/blender/gpu/metal/mtl_shader_generator.mm
@@ -178,10 +178,12 @@ static bool is_program_word(const char *chr, int *len)
return true;
}
-/* Replace function parameter patterns containing:
+/**
+ * Replace function parameter patterns containing:
* `out vec3 somevar` with `THD vec3&somevar`.
- * which enables pass by reference via resolved macro:
- * thread vec3& somevar. */
+ * which enables pass by reference via resolved macro:
+ * `thread vec3& somevar`.
+ */
static void replace_outvars(std::string &str)
{
char *current_str_begin = &*str.begin();
@@ -205,7 +207,7 @@ static void replace_outvars(std::string &str)
/* Match found. */
bool is_array = (*(word_base2 + len2) == '[');
- /* Generate outvar pattern of form 'THD type&var' from original 'out vec4 var'. */
+ /* Generate out-variable pattern of form `THD type&var` from original `out vec4 var`. */
*start = 'T';
*(start + 1) = 'H';
*(start + 2) = 'D';
@@ -277,13 +279,15 @@ static bool balanced_braces(char *current_str_begin, char *current_str_end)
return (nested_bracket_depth == 0);
}
-/* Certain Constants (such as arrays, or pointer types) declared in Global-scope
- * end up being initialised per shader thread, resulting in high
+/**
+ * Certain Constants (such as arrays, or pointer types) declared in Global-scope
+ * end up being initialized per shader thread, resulting in high
* register pressure within the shader.
- * Here we flag occurences of these constants such that
+ * Here we flag occurrences of these constants such that
* they can be moved to a place where this is not a problem.
*
- * Constants declared within function-scope do not exhibit this problem. */
+ * Constants declared within function-scope do not exhibit this problem.
+ */
static void extract_global_scope_constants(std::string &str, std::stringstream &global_scope_out)
{
char *current_str_begin = &*str.begin();
@@ -395,8 +399,8 @@ static void print_resource(std::ostream &os, const ShaderCreateInfo::Resource &r
if (array_offset == -1) {
/* Create local class member as constant pointer reference to bound UBO buffer.
* Given usage within a shader follows ubo_name.ubo_element syntax, we can
- * dereference the pointer as the compiler will optimise this data fetch.
- * To do this, we also give the ubo name a postfix of `_local` to avoid
+ * dereference the pointer as the compiler will optimize this data fetch.
+ * To do this, we also give the UBO name a post-fix of `_local` to avoid
* macro accessor collisions. */
os << "constant " << res.uniformbuf.type_name << " *" << res.uniformbuf.name
<< "_local;\n";
@@ -434,7 +438,7 @@ std::string MTLShader::resources_declare(const ShaderCreateInfo &info) const
for (const ShaderCreateInfo::Resource &res : info.batch_resources_) {
print_resource(ss, res);
}
- /* Note: Push constant uniform data is generated during `generate_msl_from_glsl`
+ /* NOTE: Push constant uniform data is generated during `generate_msl_from_glsl`
* as the generated output is needed for all paths. This includes generation
* of the push constant data structure (struct PushConstantBlock).
* As all shader generation paths require creation of this. */
@@ -533,14 +537,14 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
return false;
}
- /* MSLGeneratorInterface is a class populated to describe all parameters, resources, bindings
+ /* #MSLGeneratorInterface is a class populated to describe all parameters, resources, bindings
* and features used by the source GLSL shader. This information is then used to generate the
* appropriate Metal entry points and perform any required source translation. */
MSLGeneratorInterface msl_iface(*this);
BLI_assert(shd_builder_ != nullptr);
- /* Populate MSLGeneratorInterface from Create-Info.
- * Note this is a seperate path as MSLGeneratorInterface can also be manually populated
+ /* Populate #MSLGeneratorInterface from Create-Info.
+ * NOTE: this is a separate path as #MSLGeneratorInterface can also be manually populated
* from parsing, if support for shaders without create-info is required. */
msl_iface.prepare_from_createinfo(info);
@@ -553,13 +557,13 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
/** Determine use of Transform Feedback. **/
msl_iface.uses_transform_feedback = false;
if (transform_feedback_type_ != GPU_SHADER_TFB_NONE) {
- /* Ensure TransformFeedback is configured correctly. */
+ /* Ensure #TransformFeedback is configured correctly. */
BLI_assert(tf_output_name_list_.size() > 0);
msl_iface.uses_transform_feedback = true;
}
/* Concatenate msl_shader_defines to provide functionality mapping
- * from GLSL to MSL. Also include additioanl GPU defines for
+ * from GLSL to MSL. Also include additional GPU defines for
* optional high-level feature support. */
const std::string msl_defines_string =
"#define GPU_ARB_texture_cube_map_array 1\n\
@@ -576,7 +580,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
* #pragma USE_SSBO_VERTEX_FETCH(Output Prim Type, num output vertices per input primitive)
*
* This will determine whether SSBO-vertex-fetch
- * mode is ued for this shader. Returns true if used, and populates output reference
+ * mode is used for this shader. Returns true if used, and populates output reference
* values with the output prim type and output number of vertices. */
MTLPrimitiveType vertex_fetch_ssbo_output_prim_type = MTLPrimitiveTypeTriangle;
uint32_t vertex_fetch_ssbo_num_output_verts = 0;
@@ -622,8 +626,8 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
/* NOTE(METAL): Currently still performing fallback string scan, as info->builtins_ does
* not always contain the usage flag. This can be removed once all appropriate create-info's
* have been updated. In some cases, this may incur a false positive if access is guarded
- * behind a macro. Though in these cases, unused code paths and paramters will be
- * optimised out by the Metal shader compiler. */
+ * behind a macro. Though in these cases, unused code paths and parameters will be
+ * optimized out by the Metal shader compiler. */
/** Identify usage of vertex-shader builtins. */
msl_iface.uses_gl_VertexID = bool(info->builtins_ & BuiltinBits::VERTEX_ID) ||
@@ -636,9 +640,10 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
std::string::npos ||
msl_iface.uses_ssbo_vertex_fetch_mode;
- /* instance ID in GL is [0, instancecount] in metal it is [base_instance,
- * base_instance+instance_count], so we need to offset instanceID by base instance in Metal --
- * Thus we expose the [[base_instance]] attribute if instance ID is used at all. */
+ /* instance ID in GL is `[0, instance_count]` in metal it is
+ * `[base_instance, base_instance + instance_count]`,
+ * so we need to offset instance_ID by base instance in Metal --
+ * Thus we expose the `[[base_instance]]` attribute if instance ID is used at all. */
msl_iface.uses_gl_BaseInstanceARB = msl_iface.uses_gl_InstanceID ||
shd_builder_->glsl_vertex_source_.find(
"gl_BaseInstanceARB") != std::string::npos ||
@@ -706,7 +711,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
}
/**** METAL Shader source generation. ****/
- /* Setup stringstream for populaing generated MSL shader vertex/frag shaders. */
+ /* Setup `stringstream` for populating generated MSL shader vertex/frag shaders. */
std::stringstream ss_vertex;
std::stringstream ss_fragment;
@@ -753,7 +758,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
#ifndef NDEBUG
/* Performance warning: Extract global-scope expressions.
- * Note: This is dependent on stripping out comments
+ * NOTE: This is dependent on stripping out comments
* to remove false positives. */
remove_multiline_comments_func(shd_builder_->glsl_vertex_source_);
remove_singleline_comments_func(shd_builder_->glsl_vertex_source_);
@@ -786,7 +791,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
bool is_inside_struct = false;
if (!iface->instance_name.is_empty()) {
/* If shader stage interface has an instance name, then it
- * is using a struct foramt and as such we only need a local
+ * is using a struct format and as such we only need a local
* class member for the struct, not each element. */
ss_vertex << iface->name << " " << iface->instance_name << ";" << std::endl;
is_inside_struct = true;
@@ -822,7 +827,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
is_array,
array_len});
- /* Add to fragment-input interface.*/
+ /* Add to fragment-input interface. */
msl_iface.fragment_input_varyings.append(
{to_string(inout.type),
out_name.c_str(),
@@ -838,7 +843,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
if (!msl_iface.uses_ssbo_vertex_fetch_mode) {
ss_vertex << msl_iface.generate_msl_vertex_in_struct();
}
- /* Genrate Uniform data structs. */
+ /* Generate Uniform data structs. */
ss_vertex << msl_iface.generate_msl_uniform_structs(ShaderStage::VERTEX);
/* Conditionally use global GL variables. */
@@ -900,7 +905,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
/* Class Closing Bracket to end shader global scope. */
ss_vertex << "};" << std::endl;
- /* Generate Vertex shader entrypoint function containing resource bindings. */
+ /* Generate Vertex shader entry-point function containing resource bindings. */
ss_vertex << msl_iface.generate_msl_vertex_entry_stub();
/*** Generate FRAGMENT Stage. ***/
@@ -918,10 +923,8 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
#ifndef NDEBUG
/* Performance warning: Identify global-scope expressions.
- * These cause excessive register pressure due to global
- * arrays being instanciated per-thread.
- * Note: This is dependent on stripping out comments
- * to remove false positives. */
+ * These cause excessive register pressure due to global arrays being instantiated per-thread.
+ * NOTE: This is dependent on stripping out comments to remove false positives. */
remove_multiline_comments_func(shd_builder_->glsl_fragment_source_);
remove_singleline_comments_func(shd_builder_->glsl_fragment_source_);
extract_global_scope_constants(shd_builder_->glsl_fragment_source_, ss_fragment);
@@ -1000,7 +1003,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
/* Class Closing Bracket to end shader global scope. */
ss_fragment << "};" << std::endl;
- /* Generate Fragment entrypoint function. */
+ /* Generate Fragment entry-point function. */
ss_fragment << msl_iface.generate_msl_fragment_entry_stub();
}
@@ -1050,7 +1053,7 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
shader_debug_printf("[METAL] BSL Converted into MSL\n");
#ifndef NDEBUG
- /* In debug mode, we inject the name of the shader into the entrypoint function
+ /* In debug mode, we inject the name of the shader into the entry-point function
* name, as these are what show up in the Xcode GPU debugger. */
this->set_vertex_function_name(
[[NSString stringWithFormat:@"vertex_function_entry_%s", this->name] retain]);
@@ -1316,8 +1319,8 @@ bool MSLGeneratorInterface::use_argument_buffer_for_samplers() const
uint32_t MSLGeneratorInterface::num_samplers_for_stage(ShaderStage stage) const
{
- /* Note: Sampler bindings and argument buffer shared across stages,
- in case stages share texture/sampler bindings. */
+ /* NOTE: Sampler bindings and argument buffer shared across stages,
+ * in case stages share texture/sampler bindings. */
return texture_samplers.size();
}
@@ -1357,14 +1360,14 @@ std::string MSLGeneratorInterface::generate_msl_vertex_entry_stub()
std::stringstream out;
out << std::endl << "/*** AUTO-GENERATED MSL VERETX SHADER STUB. ***/" << std::endl;
- /* Undef texture defines from main source - avoid conflict with MSL texture. */
+ /* Un-define texture defines from main source - avoid conflict with MSL texture. */
out << "#undef texture" << std::endl;
out << "#undef textureLod" << std::endl;
/* Disable special case for booleans being treated as ints in GLSL. */
out << "#undef bool" << std::endl;
- /* Undef uniform mappings to avoid name collisions. */
+ /* Un-define uniform mappings to avoid name collisions. */
out << generate_msl_uniform_undefs(ShaderStage::VERTEX);
/* Generate function entry point signature w/ resource bindings and inputs. */
@@ -1414,8 +1417,8 @@ std::string MSLGeneratorInterface::generate_msl_vertex_entry_stub()
out << this->generate_msl_vertex_output_population();
/* Final point size,
- * This is only compiled if the MTL_global_pointsize is specified
- * as a function specialisation in the PSO. This is restricted to
+ * This is only compiled if the `MTL_global_pointsize` is specified
+ * as a function specialization in the PSO. This is restricted to
* point primitive types. */
out << "if(is_function_constant_defined(MTL_global_pointsize)){ output.pointsize = "
"(MTL_global_pointsize > 0.0)?MTL_global_pointsize:output.pointsize; }"
@@ -1437,14 +1440,14 @@ std::string MSLGeneratorInterface::generate_msl_fragment_entry_stub()
std::stringstream out;
out << std::endl << "/*** AUTO-GENERATED MSL FRAGMENT SHADER STUB. ***/" << std::endl;
- /* Undef texture defines from main source - avoid conflict with MSL texture*/
+ /* Undefine texture defines from main source - avoid conflict with MSL texture. */
out << "#undef texture" << std::endl;
out << "#undef textureLod" << std::endl;
- /* Disable special case for booleans being treated as ints in GLSL. */
+ /* Disable special case for booleans being treated as integers in GLSL. */
out << "#undef bool" << std::endl;
- /* Undef uniform mappings to avoid name collisions. */
+ /* Undefine uniform mappings to avoid name collisions. */
out << generate_msl_uniform_undefs(ShaderStage::FRAGMENT);
/* Generate function entry point signature w/ resource bindings and inputs. */
@@ -1529,9 +1532,9 @@ void MSLGeneratorInterface::generate_msl_textures_input_string(std::stringstream
}
/* Generate sampler signatures. */
- /* Note: Currently textures and samplers share indices across shading stages, so the limit is
+ /* NOTE: Currently textures and samplers share indices across shading stages, so the limit is
* shared.
- * If we exceed the hardware-supported limit, then follow a bindless model using argument
+ * If we exceed the hardware-supported limit, then follow a bind-less model using argument
* buffers. */
if (this->use_argument_buffer_for_samplers()) {
out << ",\n\tconstant SStruct& samplers [[buffer(MTL_uniform_buffer_base_index+"
@@ -1539,7 +1542,7 @@ void MSLGeneratorInterface::generate_msl_textures_input_string(std::stringstream
}
else {
/* Maximum Limit of samplers defined in the function argument table is
- * MTL_MAX_DEFAULT_SAMPLERS=16. */
+ * `MTL_MAX_DEFAULT_SAMPLERS=16`. */
BLI_assert(this->texture_samplers.size() <= MTL_MAX_DEFAULT_SAMPLERS);
for (const MSLTextureSampler &tex : this->texture_samplers) {
if (bool(tex.stage & stage)) {
@@ -1562,15 +1565,15 @@ void MSLGeneratorInterface::generate_msl_uniforms_input_string(std::stringstream
int ubo_index = 0;
for (const MSLUniformBlock &ubo : this->uniform_blocks) {
if (bool(ubo.stage & stage)) {
- /* For literal/existing global types, we do not need the class namespace accessor. */
+ /* For literal/existing global types, we do not need the class name-space accessor. */
out << ",\n\tconstant ";
if (!is_builtin_type(ubo.type_name)) {
out << get_stage_class_name(stage) << "::";
}
- /* UniformBuffer bind indices start at MTL_uniform_buffer_base_index+1, as
- * MTL_uniform_buffer_base_index is reserved for the PushConstantBlock (push constants).
+ /* #UniformBuffer bind indices start at `MTL_uniform_buffer_base_index + 1`, as
+ * MTL_uniform_buffer_base_index is reserved for the #PushConstantBlock (push constants).
* MTL_uniform_buffer_base_index is an offset depending on the number of unique VBOs
- * bound for the current PSO specialisation. */
+ * bound for the current PSO specialization. */
out << ubo.type_name << "* " << ubo.name << "[[buffer(MTL_uniform_buffer_base_index+"
<< (ubo_index + 1) << ")]]";
}
@@ -1682,7 +1685,7 @@ std::string MSLGeneratorInterface::generate_msl_uniform_structs(ShaderStage shad
return out.str();
}
-/* Note: Uniform macro definition vars can conflict with other parameters. */
+/* NOTE: Uniform macro definition vars can conflict with other parameters. */
std::string MSLGeneratorInterface::generate_msl_uniform_undefs(ShaderStage shader_stage)
{
std::stringstream out;
@@ -1787,7 +1790,7 @@ std::string MSLGeneratorInterface::generate_msl_vertex_out_struct(ShaderStage sh
}
}
else {
- /* Matrix types need to be expressed as their vector subcomponents. */
+ /* Matrix types need to be expressed as their vector sub-components. */
if (is_matrix_type(v_out.type)) {
BLI_assert(v_out.get_mtl_interpolation_qualifier() == " [[flat]]" &&
"Matrix varying types must have [[flat]] interpolation");
@@ -1807,18 +1810,17 @@ std::string MSLGeneratorInterface::generate_msl_vertex_out_struct(ShaderStage sh
/* Add gl_PointSize if written to. */
if (shader_stage == ShaderStage::VERTEX) {
if (this->uses_gl_PointSize) {
- /* If gl_PointSize is explicitly written to,
+ /* If `gl_PointSize` is explicitly written to,
* we will output the written value directly.
- * This value can still be overriden by the
- * global pointsize value. */
+ * This value can still be overridden by the
+ * global point-size value. */
out << "\tfloat pointsize [[point_size]];" << std::endl;
}
else {
- /* Otherwise, if pointsize is not written to inside the shader,
- * then its usage is controlled by whether the MTL_global_pointsize
+ /* Otherwise, if point-size is not written to inside the shader,
+ * then its usage is controlled by whether the `MTL_global_pointsize`
* function constant has been specified.
- * This function constant is enabled for all point primitives beign
- * rendered. */
+ * This function constant is enabled for all point primitives being rendered. */
out << "\tfloat pointsize [[point_size, function_constant(MTL_global_pointsize)]];"
<< std::endl;
}
@@ -1904,7 +1906,7 @@ std::string MSLGeneratorInterface::generate_msl_vertex_transform_feedback_out_st
}
}
else {
- /* Matrix types need to be expressed as their vector subcomponents. */
+ /* Matrix types need to be expressed as their vector sub-components. */
if (is_matrix_type(v_out.type)) {
BLI_assert(v_out.get_mtl_interpolation_qualifier() == " [[flat]]" &&
"Matrix varying types must have [[flat]] interpolation");
@@ -1980,10 +1982,10 @@ std::string MSLGeneratorInterface::generate_msl_uniform_block_population(ShaderS
/* Only include blocks which are used within this stage. */
if (bool(ubo.stage & stage)) {
/* Generate UBO reference assignment.
- * NOTE(Metal): We append `_local` postfix onto the class member name
+ * NOTE(Metal): We append `_local` post-fix onto the class member name
* for the ubo to avoid name collision with the UBO accessor macro.
- * We only need to add this postfix for the non-array access variant,
- * as the array is indexed directly, rather than requiring a dereference. */
+ * We only need to add this post-fix for the non-array access variant,
+ * as the array is indexed directly, rather than requiring a dereference. */
out << "\t"
<< ((stage == ShaderStage::VERTEX) ? "vertex_shader_instance." :
"fragment_shader_instance.")
@@ -2045,7 +2047,7 @@ std::string MSLGeneratorInterface::generate_msl_vertex_attribute_input_populatio
out << ");";
}
else {
- /* OpenGL uses the GPU_FETCH_* functions which can alter how an attribute value is
+ /* OpenGL uses the `GPU_FETCH_*` functions which can alter how an attribute value is
* interpreted. In Metal, we cannot support all implicit conversions within the vertex
* descriptor/vertex stage-in, so we need to perform value transformation on-read.
*
@@ -2055,10 +2057,10 @@ std::string MSLGeneratorInterface::generate_msl_vertex_attribute_input_populatio
* vertex data, depending on the specified GPU_FETCH_* mode for the current
* vertex format.
*
- * The fetch_mode is specified per-attribute using specialisation constants
+ * The fetch_mode is specified per-attribute using specialization constants
* on the PSO, wherein a unique set of constants is passed in per vertex
* buffer/format configuration. Efficiently enabling pass-through reads
- * if no special fetch is required. */
+ * if no special fetch is required. */
bool do_attribute_conversion_on_read = false;
std::string attribute_conversion_func_name = get_attribute_conversion_function(
&do_attribute_conversion_on_read, this->vertex_input_attributes[attribute].type);
@@ -2098,7 +2100,7 @@ std::string MSLGeneratorInterface::generate_msl_vertex_output_population()
<< std::endl;
}
- /* Output Pointsize. */
+ /* Output Point-size. */
if (this->uses_gl_PointSize) {
out << "\toutput.pointsize = vertex_shader_instance.gl_PointSize;" << std::endl;
}
@@ -2110,7 +2112,7 @@ std::string MSLGeneratorInterface::generate_msl_vertex_output_population()
<< std::endl;
}
- /* Output clipdistances. */
+ /* Output clip-distances. */
out << "#if defined(USE_CLIP_PLANES) || defined(USE_WORLD_CLIP_PLANES)" << std::endl;
if (this->clip_distances.size() > 1) {
for (int cd = 0; cd < this->clip_distances.size(); cd++) {
@@ -2384,7 +2386,7 @@ void MSLGeneratorInterface::resolve_input_attribute_locations()
/* Determine free location.
* Starting from 1 is slightly less efficient, however,
- * given mutli-sized attributes, an earlier slot may remain free.
+ * given multi-sized attributes, an earlier slot may remain free.
* given GPU_VERT_ATTR_MAX_LEN is small, this wont matter. */
for (int loc = 0; loc < GPU_VERT_ATTR_MAX_LEN - (required_attr_slot_count - 1); loc++) {
@@ -2429,8 +2431,10 @@ void MSLGeneratorInterface::resolve_fragment_output_locations()
}
}
-/* Add string to name buffer. Utility function to be used in bake_shader_interface.
- * Returns the offset of the inserted name.*/
+/**
+ * Add string to name buffer. Utility function to be used in bake_shader_interface.
+ * Returns the offset of the inserted name.
+ */
static uint32_t name_buffer_copystr(char **name_buffer_ptr,
const char *str_to_copy,
uint32_t &name_buffer_size,
@@ -2443,7 +2447,7 @@ static uint32_t name_buffer_copystr(char **name_buffer_ptr,
uint32_t ret_len = strlen(str_to_copy);
BLI_assert(ret_len > 0);
- /* If required name buffer size is larger, increase by atleast 128 bytes. */
+ /* If required name buffer size is larger, increase by at least 128 bytes. */
if (name_buffer_size + ret_len > name_buffer_size) {
name_buffer_size = name_buffer_size + max_ii(128, ret_len);
*name_buffer_ptr = (char *)MEM_reallocN(*name_buffer_ptr, name_buffer_size);
@@ -2467,7 +2471,7 @@ MTLShaderInterface *MSLGeneratorInterface::bake_shader_interface(const char *nam
interface->init();
/* Name buffer. */
- /* Initialise name buffer. */
+ /* Initialize name buffer. */
uint32_t name_buffer_size = 256;
uint32_t name_buffer_offset = 0;
interface->name_buffer_ = (char *)MEM_mallocN(name_buffer_size, "name_buffer");
@@ -2487,7 +2491,7 @@ MTLShaderInterface *MSLGeneratorInterface::bake_shader_interface(const char *nam
elem < get_matrix_location_count(this->vertex_input_attributes[attribute].type);
elem++) {
/* First attribute matches the core name -- subsequent attributes tagged with
- * __internal_<name><index>. */
+ * `__internal_<name><index>`. */
std::string _internal_name = (elem == 0) ?
this->vertex_input_attributes[attribute].name :
"__internal_" +
@@ -2582,7 +2586,7 @@ MTLShaderInterface *MSLGeneratorInterface::bake_shader_interface(const char *nam
this->get_sampler_argument_buffer_bind_index(ShaderStage::VERTEX),
this->get_sampler_argument_buffer_bind_index(ShaderStage::FRAGMENT));
- /* Map Metal bindings to standardised ShaderInput struct name/binding index. */
+ /* Map Metal bindings to standardized ShaderInput struct name/binding index. */
interface->prepare_common_shader_inputs();
/* Resize name buffer to save some memory. */
@@ -2694,7 +2698,7 @@ std::string MSLTextureSampler::get_msl_texture_type_str() const
return "texture_buffer";
}
default: {
- /* Unrecognised type. */
+ /* Unrecognized type. */
BLI_assert_unreachable();
return "ERROR";
}
@@ -2802,7 +2806,7 @@ std::string MSLTextureSampler::get_msl_wrapper_type_str() const
return "_mtl_combined_image_sampler_buffer";
}
default: {
- /* Unrecognised type. */
+ /* Unrecognized type. */
BLI_assert_unreachable();
return "ERROR";
}
@@ -2857,7 +2861,7 @@ std::string MSLTextureSampler::get_msl_return_type_str() const
}
default: {
- /* Unrecognised type. */
+ /* Unrecognized type. */
BLI_assert_unreachable();
return "ERROR";
}
diff --git a/source/blender/gpu/metal/mtl_shader_interface.hh b/source/blender/gpu/metal/mtl_shader_interface.hh
index 0f04c04031d..0da84cad997 100644
--- a/source/blender/gpu/metal/mtl_shader_interface.hh
+++ b/source/blender/gpu/metal/mtl_shader_interface.hh
@@ -23,33 +23,33 @@
namespace blender::gpu {
-/* MTLShaderInterface describes the layout and properties of a given shader,
+/* #MTLShaderInterface describes the layout and properties of a given shader,
* including input and output bindings, and any special properties or modes
* that the shader may require.
*
* -- Shader input/output bindings --
*
- * We require custom datastructures for the binding information in Metal.
+ * We require custom data-structures for the binding information in Metal.
* This is because certain bindings contain and require more information to
* be stored than can be tracked solely within the `ShaderInput` struct.
* e.g. data sizes and offsets.
*
* Upon interface completion, `prepare_common_shader_inputs` is used to
- * populate the global ShaderInput* array to enable correct functionality
+ * populate the global `ShaderInput*` array to enable correct functionality
* of shader binding location lookups. These returned locations act as indices
- * into the arrays stored here in the MTLShaderInterace, such that extraction
- * of required information can be performed within the backend.
+ * into the arrays stored here in the #MTLShaderInterface, such that extraction
+ * of required information can be performed within the back-end.
*
* e.g. `int loc = GPU_shader_get_uniform(...)`
- * `loc` will match the index into the MTLShaderUniform uniforms_[] array
+ * `loc` will match the index into the `MTLShaderUniform uniforms_[]` array
* to fetch the required Metal specific information.
*
*
*
* -- Argument Buffers and Argument Encoders --
*
- * We can use ArgumentBuffers (AB's) in Metal to extend the resource bind limitations
- * by providing bindless support.
+ * We can use #ArgumentBuffers (AB's) in Metal to extend the resource bind limitations
+ * by providing bind-less support.
*
* Argument Buffers are used for sampler bindings when the builtin
* sampler limit of 16 is exceeded, as in all cases for Blender,
@@ -60,8 +60,8 @@ namespace blender::gpu {
* In future, argument buffers may be extended to support other resource
* types, if overall bind limits are ever increased within Blender.
*
- * The ArgumentEncoder cache used to store the generated ArgumentEncoders for a given
- * shader permutation. The ArgumentEncoder is the resource used to write resource binding
+ * The #ArgumentEncoder cache used to store the generated #ArgumentEncoders for a given
+ * shader permutation. The #ArgumentEncoder is the resource used to write resource binding
* information to a specified buffer, and is unique to the shader's resource interface.
*/
@@ -107,7 +107,7 @@ struct MTLShaderInputAttribute {
struct MTLShaderUniformBlock {
uint32_t name_offset;
uint32_t size = 0;
- /* Buffer resouce bind index in shader [[buffer(index)]]. */
+ /* Buffer resource bind index in shader `[[buffer(index)]]`. */
uint32_t buffer_index;
/* Tracking for manual uniform addition. */
@@ -127,7 +127,7 @@ struct MTLShaderUniform {
struct MTLShaderTexture {
bool used;
uint32_t name_offset;
- /* Texture resource bind slot in shader [[texture(n)]]. */
+ /* Texture resource bind slot in shader `[[texture(n)]]`. */
int slot_index;
eGPUTextureType type;
ShaderStage stage_mask;
@@ -135,7 +135,7 @@ struct MTLShaderTexture {
struct MTLShaderSampler {
uint32_t name_offset;
- /* Sampler resource bind slot in shader [[sampler(n)]]. */
+ /* Sampler resource bind slot in shader `[[sampler(n)]]`. */
uint32_t slot_index = 0;
};
@@ -143,7 +143,7 @@ struct MTLShaderSampler {
MTLVertexFormat mtl_datatype_to_vertex_type(eMTLDataType type);
/**
- * Implementation of Shader interface for Metal Backend.
+ * Implementation of Shader interface for Metal Back-end.
**/
class MTLShaderInterface : public ShaderInterface {
@@ -157,7 +157,7 @@ class MTLShaderInterface : public ShaderInterface {
};
ArgumentEncoderCacheEntry arg_encoders_[ARGUMENT_ENCODERS_CACHE_SIZE] = {};
- /* Vertex input Attribues. */
+ /* Vertex input Attributes. */
uint32_t total_attributes_;
uint32_t total_vert_stride_;
MTLShaderInputAttribute attributes_[MTL_MAX_VERTEX_INPUT_ATTRIBUTES];
@@ -218,7 +218,7 @@ class MTLShaderInterface : public ShaderInterface {
uint32_t argument_buffer_bind_index_vert,
uint32_t argument_buffer_bind_index_frag);
- /* Prepare ShaderInput interface for binding resolution. */
+ /* Prepare #ShaderInput interface for binding resolution. */
void prepare_common_shader_inputs();
/* Fetch Uniforms. */
diff --git a/source/blender/gpu/metal/mtl_shader_interface.mm b/source/blender/gpu/metal/mtl_shader_interface.mm
index 1adf1210496..3703d5b5684 100644
--- a/source/blender/gpu/metal/mtl_shader_interface.mm
+++ b/source/blender/gpu/metal/mtl_shader_interface.mm
@@ -32,7 +32,7 @@ MTLShaderInterface::MTLShaderInterface(const char *name)
strcpy(this->name, name);
}
- /* Ensure ShaderInterface parameters are cleared. */
+ /* Ensure #ShaderInterface parameters are cleared. */
this->init();
}
@@ -64,7 +64,7 @@ void MTLShaderInterface::init()
sampler_argument_buffer_bind_index_vert_ = -1;
sampler_argument_buffer_bind_index_frag_ = -1;
- /* NULL initialise uniform location markers for builtins. */
+ /* NULL initialize uniform location markers for builtins. */
for (const int u : IndexRange(GPU_NUM_UNIFORMS)) {
builtins_[u] = -1;
}
@@ -76,7 +76,7 @@ void MTLShaderInterface::init()
textures_[tex].slot_index = -1;
}
- /* Null initialisation for argument encoders. */
+ /* Null initialization for argument encoders. */
for (const int i : IndexRange(ARGUMENT_ENCODERS_CACHE_SIZE)) {
arg_encoders_[i].encoder = nil;
arg_encoders_[i].buffer_index = -1;
@@ -117,7 +117,7 @@ uint32_t MTLShaderInterface::add_uniform_block(uint32_t name_offset,
MTLShaderUniformBlock &uni_block = ubos_[total_uniform_blocks_];
uni_block.name_offset = name_offset;
- /* We offset the buffer bidning index by one, as the first slot is reserved for push constant
+ /* We offset the buffer binding index by one, as the first slot is reserved for push constant
* data. */
uni_block.buffer_index = buffer_index + 1;
uni_block.size = size;
@@ -224,7 +224,7 @@ void MTLShaderInterface::map_builtins()
builtin_blocks_[ubo] = -1;
}
- /* Resolve and cache uniform locations for bultin uniforms. */
+ /* Resolve and cache uniform locations for builtin uniforms. */
for (const int u : IndexRange(GPU_NUM_UNIFORMS)) {
const ShaderInput *uni = this->uniform_get(builtin_uniform_name((GPUUniformBuiltin)u));
if (uni != nullptr) {
@@ -239,7 +239,7 @@ void MTLShaderInterface::map_builtins()
}
}
- /* Resolve and cache uniform locations for bultin uniform blocks. */
+ /* Resolve and cache uniform locations for builtin uniform blocks. */
for (const int u : IndexRange(GPU_NUM_UNIFORM_BLOCKS)) {
const ShaderInput *uni = this->ubo_get(builtin_uniform_block_name((GPUUniformBlockBuiltin)u));
@@ -255,16 +255,16 @@ void MTLShaderInterface::map_builtins()
}
}
-/* Populate ShaderInput struct based on interface. */
+/* Populate #ShaderInput struct based on interface. */
void MTLShaderInterface::prepare_common_shader_inputs()
{
- /* ShaderInput inputs_ maps a uniform name to an external
+ /* `ShaderInput inputs_` maps a uniform name to an external
* uniform location, which is used as an array index to look-up
- * information in the local MTLShaderInterface input structs.
+ * information in the local #MTLShaderInterface input structs.
*
- * ShaderInput population follows the ordering rules in gpu_shader_interface. */
+ * #ShaderInput population follows the ordering rules in #gpu_shader_interface. */
- /* Populate ShaderInterface counts. */
+ /* Populate #ShaderInterface counts. */
attr_len_ = this->get_total_attributes();
ubo_len_ = this->get_total_uniform_blocks();
uniform_len_ = this->get_total_uniforms() + this->get_total_textures();
@@ -272,8 +272,8 @@ void MTLShaderInterface::prepare_common_shader_inputs()
/* TODO(Metal): Support storage buffer bindings. Pending compute shader support. */
ssbo_len_ = 0;
- /* Calculate total inputs and allocate ShaderInput array. */
- /* NOTE: We use the existing name_buffer_ allocated for internal input structs. */
+ /* Calculate total inputs and allocate #ShaderInput array. */
+ /* NOTE: We use the existing `name_buffer_` allocated for internal input structs. */
int input_tot_len = attr_len_ + ubo_len_ + uniform_len_ + ssbo_len_;
inputs_ = (ShaderInput *)MEM_callocN(sizeof(ShaderInput) * input_tot_len, __func__);
ShaderInput *current_input = inputs_;
@@ -316,9 +316,9 @@ void MTLShaderInterface::prepare_common_shader_inputs()
}
/* Textures.
- * NOTE(Metal): Textures are externally treated as uniforms in gpu_shader_interface.
+ * NOTE(Metal): Textures are externally treated as uniforms in #gpu_shader_interface.
* Location for textures resolved as `binding` value. This
- * is the index into the local MTLShaderTexture textures[] array.
+ * is the index into the local `MTLShaderTexture textures[]` array.
*
* In MSL, we cannot trivially remap which texture slot a given texture
* handle points to, unlike in GLSL, where a uniform sampler/image can be updated
@@ -341,7 +341,7 @@ void MTLShaderInterface::prepare_common_shader_inputs()
* to ensure texture handles are not treated as standard uniforms in Metal. */
current_input->location = texture_index + total_uniforms_;
- /* Binding represents texture slot [[texture(n)]]. */
+ /* Binding represents texture slot `[[texture(n)]]`. */
current_input->binding = shd_tex.slot_index;
current_input++;
}
diff --git a/source/blender/gpu/metal/mtl_shader_interface_type.hh b/source/blender/gpu/metal/mtl_shader_interface_type.hh
index a8e651d8509..3c4c87ee25b 100644
--- a/source/blender/gpu/metal/mtl_shader_interface_type.hh
+++ b/source/blender/gpu/metal/mtl_shader_interface_type.hh
@@ -245,7 +245,7 @@ inline uint mtl_get_data_type_alignment(eMTLDataType type)
return 32;
default:
- BLI_assert_msg(false, "Unrecognised MTL datatype.");
+ BLI_assert_msg(false, "Unrecognized MTL datatype.");
return 0;
};
}
diff --git a/source/blender/gpu/metal/mtl_state.mm b/source/blender/gpu/metal/mtl_state.mm
index 85080041246..31182cf91d1 100644
--- a/source/blender/gpu/metal/mtl_state.mm
+++ b/source/blender/gpu/metal/mtl_state.mm
@@ -202,7 +202,7 @@ static MTLCompareFunction gpu_stencil_func_to_metal(eGPUStencilTest stencil_func
case GPU_STENCIL_ALWAYS:
return MTLCompareFunctionAlways;
default:
- BLI_assert(false && "Unrecognised eGPUStencilTest function");
+ BLI_assert(false && "Unrecognized eGPUStencilTest function");
break;
}
return MTLCompareFunctionAlways;
diff --git a/source/blender/gpu/metal/mtl_texture_util.mm b/source/blender/gpu/metal/mtl_texture_util.mm
index 25b30c6cb0e..928393fb39e 100644
--- a/source/blender/gpu/metal/mtl_texture_util.mm
+++ b/source/blender/gpu/metal/mtl_texture_util.mm
@@ -124,7 +124,7 @@ MTLPixelFormat gpu_texture_format_to_metal(eGPUTextureFormat tex_format)
return MTLPixelFormatDepth16Unorm;
default:
- BLI_assert(!"Unrecognised GPU pixel format!\n");
+ BLI_assert(!"Unrecognized GPU pixel format!\n");
return MTLPixelFormatRGBA8Unorm;
}
}
@@ -183,7 +183,7 @@ int get_mtl_format_bytesize(MTLPixelFormat tex_format)
return 2;
default:
- BLI_assert(!"Unrecognised GPU pixel format!\n");
+ BLI_assert(!"Unrecognized GPU pixel format!\n");
return 1;
}
}
@@ -238,7 +238,7 @@ int get_mtl_format_num_components(MTLPixelFormat tex_format)
return 1;
default:
- BLI_assert(!"Unrecognised GPU pixel format!\n");
+ BLI_assert(!"Unrecognized GPU pixel format!\n");
return 1;
}
}
@@ -632,7 +632,7 @@ id<MTLComputePipelineState> gpu::MTLTexture::mtl_texture_read_impl(
depth_scale_factor = 0xFFFFFFFFu;
break;
default:
- BLI_assert_msg(0, "Unrecognised mode");
+ BLI_assert_msg(0, "Unrecognized mode");
break;
}
}
diff --git a/source/blender/gpu/shaders/compositor/compositor_morphological_distance_feather.glsl b/source/blender/gpu/shaders/compositor/compositor_morphological_distance_feather.glsl
index 8034f4a3ebd..acdd8a40342 100644
--- a/source/blender/gpu/shaders/compositor/compositor_morphological_distance_feather.glsl
+++ b/source/blender/gpu/shaders/compositor/compositor_morphological_distance_feather.glsl
@@ -59,7 +59,7 @@ void main()
/* Start with the center value as the maximum/minimum distance and reassign to the true maximum
* or minimum in the search loop below. Additionally, the center falloff is always 1.0, so start
- * with that. */
+ * with that. */
float limit_distance = center_value;
float limit_distance_falloff = 1.0;
diff --git a/source/blender/gpu/shaders/compositor/compositor_morphological_distance_threshold.glsl b/source/blender/gpu/shaders/compositor/compositor_morphological_distance_threshold.glsl
index 5931c4f0271..e6625e7419f 100644
--- a/source/blender/gpu/shaders/compositor/compositor_morphological_distance_threshold.glsl
+++ b/source/blender/gpu/shaders/compositor/compositor_morphological_distance_threshold.glsl
@@ -58,7 +58,7 @@ void main()
* a texture loader with a fallback value. And since we don't want those values to affect the
* result, the fallback value is chosen such that the inner condition fails, which is when the
* sampled pixel and the center pixel are the same, so choose a fallback that will be considered
- * masked if the center pixel is masked and unmasked otherwise. */
+ * masked if the center pixel is masked and unmasked otherwise. */
vec4 fallback = vec4(is_center_masked ? 1.0 : 0.0);
/* Since the distance search window is limited to the given radius, the maximum possible squared
diff --git a/source/blender/gpu/shaders/compositor/compositor_projector_lens_distortion.glsl b/source/blender/gpu/shaders/compositor/compositor_projector_lens_distortion.glsl
index cf961b20b34..ab44dac93e6 100644
--- a/source/blender/gpu/shaders/compositor/compositor_projector_lens_distortion.glsl
+++ b/source/blender/gpu/shaders/compositor/compositor_projector_lens_distortion.glsl
@@ -4,7 +4,7 @@ void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
- /* Get the normalized coordinates of the pixel centers. */
+ /* Get the normalized coordinates of the pixel centers. */
vec2 normalized_texel = (vec2(texel) + vec2(0.5)) / vec2(texture_size(input_tx));
/* Sample the red and blue channels shifted by the dispersion amount. */
diff --git a/source/blender/gpu/shaders/compositor/compositor_realize_on_domain.glsl b/source/blender/gpu/shaders/compositor/compositor_realize_on_domain.glsl
index be984d81603..b8561e5f059 100644
--- a/source/blender/gpu/shaders/compositor/compositor_realize_on_domain.glsl
+++ b/source/blender/gpu/shaders/compositor/compositor_realize_on_domain.glsl
@@ -9,7 +9,7 @@ void main()
/* Transform the input image by transforming the domain coordinates with the inverse of input
* image's transformation. The inverse transformation is an affine matrix and thus the
- * coordinates should be in homogeneous coordinates. */
+ * coordinates should be in homogeneous coordinates. */
coordinates = (mat3(inverse_transformation) * vec3(coordinates, 1.0)).xy;
/* Since an input image with an identity transformation is supposed to be centered in the domain,
diff --git a/source/blender/gpu/shaders/gpu_shader_codegen_lib.glsl b/source/blender/gpu/shaders/gpu_shader_codegen_lib.glsl
index c0821085c8d..94707de71ed 100644
--- a/source/blender/gpu/shaders/gpu_shader_codegen_lib.glsl
+++ b/source/blender/gpu/shaders/gpu_shader_codegen_lib.glsl
@@ -187,7 +187,7 @@ struct ClosureTransparency {
struct GlobalData {
/** World position. */
vec3 P;
- /** Surface Normal. Normalized, overriden by bump displacement. */
+ /** Surface Normal. Normalized, overridden by bump displacement. */
vec3 N;
/** Raw interpolated normal (non-normalized) data. */
vec3 Ni;
diff --git a/source/blender/io/usd/intern/usd_reader_mesh.cc b/source/blender/io/usd/intern/usd_reader_mesh.cc
index 86e3aeece5d..cde7ab5b628 100644
--- a/source/blender/io/usd/intern/usd_reader_mesh.cc
+++ b/source/blender/io/usd/intern/usd_reader_mesh.cc
@@ -103,7 +103,7 @@ static Material *find_existing_material(
return mat_iter->second;
}
/* We can't find the Blender material which was previously created for this USD
- * material, which should never happen. */
+ * material, which should never happen. */
BLI_assert_unreachable();
}
}
diff --git a/source/blender/io/wavefront_obj/exporter/obj_export_io.hh b/source/blender/io/wavefront_obj/exporter/obj_export_io.hh
index cc0f7c0824c..59ee7bd32c0 100644
--- a/source/blender/io/wavefront_obj/exporter/obj_export_io.hh
+++ b/source/blender/io/wavefront_obj/exporter/obj_export_io.hh
@@ -186,7 +186,7 @@ class FormatHandler : NonCopyable, NonMovable {
{
write_impl("illum {}\n", mode);
}
- /* Note: options, if present, will have its own leading space. */
+ /* NOTE: options, if present, will have its own leading space. */
void write_mtl_map(const char *type, StringRef options, StringRef value)
{
write_impl("{}{} {}\n", type, options, value);
diff --git a/source/blender/makesdna/DNA_gpencil_modifier_types.h b/source/blender/makesdna/DNA_gpencil_modifier_types.h
index 7f8e436f007..ca1eac0bde8 100644
--- a/source/blender/makesdna/DNA_gpencil_modifier_types.h
+++ b/source/blender/makesdna/DNA_gpencil_modifier_types.h
@@ -1000,9 +1000,9 @@ typedef enum eLineartGpencilModifierSource {
typedef enum eLineartGpencilModifierShadowFilter {
/* These options need to be ordered in this way because those latter options requires line art to
- run a few extra stages. Having those values set up this way will allow
- #BKE_gpencil_get_lineart_modifier_limits() to find out maximum stages needed in multiple
- cached line art modifiers. */
+ * run a few extra stages. Having those values set up this way will allow
+ * #BKE_gpencil_get_lineart_modifier_limits() to find out maximum stages needed in multiple
+ * cached line art modifiers. */
LRT_SHADOW_FILTER_NONE = 0,
LRT_SHADOW_FILTER_ILLUMINATED = 1,
LRT_SHADOW_FILTER_SHADED = 2,
diff --git a/source/blender/makesrna/intern/rna_space.c b/source/blender/makesrna/intern/rna_space.c
index 5f2e3c4d1a0..51acb4da5c3 100644
--- a/source/blender/makesrna/intern/rna_space.c
+++ b/source/blender/makesrna/intern/rna_space.c
@@ -4737,7 +4737,7 @@ static void rna_def_space_view3d_overlay(BlenderRNA *brna)
RNA_def_property_ui_text(prop, "Opacity", "Vertex Paint mix factor");
RNA_def_property_update(prop, NC_SPACE | ND_SPACE_VIEW3D, "rna_GPencil_update");
- /* Developper Debug overlay */
+ /* Developer Debug overlay */
prop = RNA_def_property(srna, "use_debug_freeze_view_culling", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_boolean_sdna(prop, NULL, "debug_flag", V3D_DEBUG_FREEZE_CULLING);
diff --git a/source/blender/nodes/composite/nodes/node_composite_image.cc b/source/blender/nodes/composite/nodes/node_composite_image.cc
index b6bd263b150..4d1eff0b940 100644
--- a/source/blender/nodes/composite/nodes/node_composite_image.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_image.cc
@@ -535,7 +535,7 @@ class ImageOperation : public NodeOperation {
/* Get a copy of the image user that is appropriate to retrieve the image buffer for the output
* with the given identifier. This essentially sets the appropriate pass and view indices that
- * corresponds to the output. */
+ * corresponds to the output. */
ImageUser compute_image_user_for_output(StringRef identifier)
{
ImageUser image_user = *get_image_user();
diff --git a/source/blender/nodes/shader/nodes/node_shader_bsdf_principled.cc b/source/blender/nodes/shader/nodes/node_shader_bsdf_principled.cc
index 2f75b7b533f..7b72d4b9be4 100644
--- a/source/blender/nodes/shader/nodes/node_shader_bsdf_principled.cc
+++ b/source/blender/nodes/shader/nodes/node_shader_bsdf_principled.cc
@@ -172,7 +172,7 @@ static int node_shader_gpu_bsdf_principled(GPUMaterial *mat,
}
/* Ref. T98190: Defines are optimizations for old compilers.
- * Might become unecessary with EEVEE-Next. */
+ * Might become unnecessary with EEVEE-Next. */
if (use_diffuse == false && use_refract == false && use_clear == true) {
flag |= GPU_MATFLAG_PRINCIPLED_CLEARCOAT;
}
diff --git a/source/blender/python/generic/py_capi_utils.c b/source/blender/python/generic/py_capi_utils.c
index 4c842d82972..007a2fdbb8e 100644
--- a/source/blender/python/generic/py_capi_utils.c
+++ b/source/blender/python/generic/py_capi_utils.c
@@ -928,7 +928,7 @@ PyObject *PyC_ExceptionBuffer(void)
PySys_SetObject("stderr", string_io);
PyErr_Restore(error_type, error_value, error_traceback);
- /* Printing clears (call #PyErr_Clear as well to ensure it's cleared). */
+ /* Printing clears (call #PyErr_Clear as well to ensure it's cleared). */
Py_XINCREF(error_type);
Py_XINCREF(error_value);
Py_XINCREF(error_traceback);
diff --git a/source/blender/windowmanager/intern/wm_event_system.cc b/source/blender/windowmanager/intern/wm_event_system.cc
index 3054708fbdb..bc19e2c09c3 100644
--- a/source/blender/windowmanager/intern/wm_event_system.cc
+++ b/source/blender/windowmanager/intern/wm_event_system.cc
@@ -5259,7 +5259,7 @@ void wm_event_add_ghostevent(wmWindowManager *wm, wmWindow *win, int type, void
event.prev_val = event.val;
/* Always use modifiers from the active window since
- changes to modifiers aren't sent to inactive windows, see: T66088. */
+ * changes to modifiers aren't sent to inactive windows, see: T66088. */
if ((wm->winactive != win) && (wm->winactive && wm->winactive->eventstate)) {
event.modifier = wm->winactive->eventstate->modifier;
event.keymodifier = wm->winactive->eventstate->keymodifier;