Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Jones <michael_p_jones@apple.com>2021-10-14 15:53:40 +0300
committerMichael Jones <michael_p_jones@apple.com>2021-10-14 18:14:43 +0300
commita0f269f682dab848afc80cd322d04a0c4a815cae (patch)
tree0978b1888273fbaa2d14550bde484c5247fa89ff /intern/cycles/kernel/kernel_path_state.h
parent47caeb8c26686e24ea7e694f94fabee44f3d2dca (diff)
Cycles: Kernel address space changes for MSL
This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation. MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness. The vast majority of deltas in this patch fall into one of two cases: - Ensuring ccl_private is specified for thread-local pointer types - Ensuring ccl_global is specified for device-wide pointer types Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant. In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture. The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation. Ref T92212 Reviewed By: brecht Maniphest Tasks: T92212 Differential Revision: https://developer.blender.org/D12864
Diffstat (limited to 'intern/cycles/kernel/kernel_path_state.h')
-rw-r--r--intern/cycles/kernel/kernel_path_state.h39
1 files changed, 22 insertions, 17 deletions
diff --git a/intern/cycles/kernel/kernel_path_state.h b/intern/cycles/kernel/kernel_path_state.h
index ebb2c0df4f1..e04ed5b1cc1 100644
--- a/intern/cycles/kernel/kernel_path_state.h
+++ b/intern/cycles/kernel/kernel_path_state.h
@@ -32,7 +32,7 @@ ccl_device_inline void path_state_init_queues(INTEGRATOR_STATE_ARGS)
/* Minimalistic initialization of the path state, which is needed for early outputs in the
* integrator initialization to work. */
ccl_device_inline void path_state_init(INTEGRATOR_STATE_ARGS,
- const ccl_global KernelWorkTile *ccl_restrict tile,
+ ccl_global const KernelWorkTile *ccl_restrict tile,
const int x,
const int y)
{
@@ -281,14 +281,16 @@ typedef struct RNGState {
int sample;
} RNGState;
-ccl_device_inline void path_state_rng_load(INTEGRATOR_STATE_CONST_ARGS, RNGState *rng_state)
+ccl_device_inline void path_state_rng_load(INTEGRATOR_STATE_CONST_ARGS,
+ ccl_private RNGState *rng_state)
{
rng_state->rng_hash = INTEGRATOR_STATE(path, rng_hash);
rng_state->rng_offset = INTEGRATOR_STATE(path, rng_offset);
rng_state->sample = INTEGRATOR_STATE(path, sample);
}
-ccl_device_inline void shadow_path_state_rng_load(INTEGRATOR_STATE_CONST_ARGS, RNGState *rng_state)
+ccl_device_inline void shadow_path_state_rng_load(INTEGRATOR_STATE_CONST_ARGS,
+ ccl_private RNGState *rng_state)
{
const uint shadow_bounces = INTEGRATOR_STATE(shadow_path, transparent_bounce) -
INTEGRATOR_STATE(path, transparent_bounce);
@@ -298,23 +300,26 @@ ccl_device_inline void shadow_path_state_rng_load(INTEGRATOR_STATE_CONST_ARGS, R
rng_state->sample = INTEGRATOR_STATE(path, sample);
}
-ccl_device_inline float path_state_rng_1D(const KernelGlobals *kg,
- const RNGState *rng_state,
+ccl_device_inline float path_state_rng_1D(ccl_global const KernelGlobals *kg,
+ ccl_private const RNGState *rng_state,
int dimension)
{
return path_rng_1D(
kg, rng_state->rng_hash, rng_state->sample, rng_state->rng_offset + dimension);
}
-ccl_device_inline void path_state_rng_2D(
- const KernelGlobals *kg, const RNGState *rng_state, int dimension, float *fx, float *fy)
+ccl_device_inline void path_state_rng_2D(ccl_global const KernelGlobals *kg,
+ ccl_private const RNGState *rng_state,
+ int dimension,
+ ccl_private float *fx,
+ ccl_private float *fy)
{
path_rng_2D(
kg, rng_state->rng_hash, rng_state->sample, rng_state->rng_offset + dimension, fx, fy);
}
-ccl_device_inline float path_state_rng_1D_hash(const KernelGlobals *kg,
- const RNGState *rng_state,
+ccl_device_inline float path_state_rng_1D_hash(ccl_global const KernelGlobals *kg,
+ ccl_private const RNGState *rng_state,
uint hash)
{
/* Use a hash instead of dimension, this is not great but avoids adding
@@ -324,8 +329,8 @@ ccl_device_inline float path_state_rng_1D_hash(const KernelGlobals *kg,
kg, cmj_hash_simple(rng_state->rng_hash, hash), rng_state->sample, rng_state->rng_offset);
}
-ccl_device_inline float path_branched_rng_1D(const KernelGlobals *kg,
- const RNGState *rng_state,
+ccl_device_inline float path_branched_rng_1D(ccl_global const KernelGlobals *kg,
+ ccl_private const RNGState *rng_state,
int branch,
int num_branches,
int dimension)
@@ -336,13 +341,13 @@ ccl_device_inline float path_branched_rng_1D(const KernelGlobals *kg,
rng_state->rng_offset + dimension);
}
-ccl_device_inline void path_branched_rng_2D(const KernelGlobals *kg,
- const RNGState *rng_state,
+ccl_device_inline void path_branched_rng_2D(ccl_global const KernelGlobals *kg,
+ ccl_private const RNGState *rng_state,
int branch,
int num_branches,
int dimension,
- float *fx,
- float *fy)
+ ccl_private float *fx,
+ ccl_private float *fy)
{
path_rng_2D(kg,
rng_state->rng_hash,
@@ -355,8 +360,8 @@ ccl_device_inline void path_branched_rng_2D(const KernelGlobals *kg,
/* Utility functions to get light termination value,
* since it might not be needed in many cases.
*/
-ccl_device_inline float path_state_rng_light_termination(const KernelGlobals *kg,
- const RNGState *state)
+ccl_device_inline float path_state_rng_light_termination(ccl_global const KernelGlobals *kg,
+ ccl_private const RNGState *state)
{
if (kernel_data.integrator.light_inv_rr_threshold > 0.0f) {
return path_state_rng_1D(kg, state, PRNG_LIGHT_TERMINATE);