Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'intern/cycles/kernel/svm/svm_closure.h')
-rw-r--r--intern/cycles/kernel/svm/svm_closure.h164
1 files changed, 102 insertions, 62 deletions
diff --git a/intern/cycles/kernel/svm/svm_closure.h b/intern/cycles/kernel/svm/svm_closure.h
index e55f76a4400..87be73bb2cc 100644
--- a/intern/cycles/kernel/svm/svm_closure.h
+++ b/intern/cycles/kernel/svm/svm_closure.h
@@ -18,8 +18,12 @@ CCL_NAMESPACE_BEGIN
/* Closure Nodes */
-ccl_device void svm_node_glass_setup(
- ShaderData *sd, MicrofacetBsdf *bsdf, int type, float eta, float roughness, bool refract)
+ccl_device void svm_node_glass_setup(ccl_private ShaderData *sd,
+ ccl_private MicrofacetBsdf *bsdf,
+ int type,
+ float eta,
+ float roughness,
+ bool refract)
{
if (type == CLOSURE_BSDF_SHARP_GLASS_ID) {
if (refract) {
@@ -58,8 +62,12 @@ ccl_device void svm_node_glass_setup(
}
template<uint node_feature_mask, ShaderType shader_type>
-ccl_device_noinline int svm_node_closure_bsdf(
- const KernelGlobals *kg, ShaderData *sd, float *stack, uint4 node, int path_flag, int offset)
+ccl_device_noinline int svm_node_closure_bsdf(ccl_global const KernelGlobals *kg,
+ ccl_private ShaderData *sd,
+ ccl_private float *stack,
+ uint4 node,
+ int path_flag,
+ int offset)
{
uint type, param1_offset, param2_offset;
@@ -213,8 +221,8 @@ ccl_device_noinline int svm_node_closure_bsdf(
if (subsurface <= CLOSURE_WEIGHT_CUTOFF && diffuse_weight > CLOSURE_WEIGHT_CUTOFF) {
float3 diff_weight = weight * base_color * diffuse_weight;
- PrincipledDiffuseBsdf *bsdf = (PrincipledDiffuseBsdf *)bsdf_alloc(
- sd, sizeof(PrincipledDiffuseBsdf), diff_weight);
+ ccl_private PrincipledDiffuseBsdf *bsdf = (ccl_private PrincipledDiffuseBsdf *)
+ bsdf_alloc(sd, sizeof(PrincipledDiffuseBsdf), diff_weight);
if (bsdf) {
bsdf->N = N;
@@ -225,7 +233,7 @@ ccl_device_noinline int svm_node_closure_bsdf(
}
}
else if (subsurface > CLOSURE_WEIGHT_CUTOFF) {
- Bssrdf *bssrdf = bssrdf_alloc(sd, subsurf_weight);
+ ccl_private Bssrdf *bssrdf = bssrdf_alloc(sd, subsurf_weight);
if (bssrdf) {
bssrdf->radius = subsurface_radius * subsurface;
@@ -247,7 +255,7 @@ ccl_device_noinline int svm_node_closure_bsdf(
if (diffuse_weight > CLOSURE_WEIGHT_CUTOFF) {
float3 diff_weight = weight * base_color * diffuse_weight;
- PrincipledDiffuseBsdf *bsdf = (PrincipledDiffuseBsdf *)bsdf_alloc(
+ ccl_private PrincipledDiffuseBsdf *bsdf = (ccl_private PrincipledDiffuseBsdf *)bsdf_alloc(
sd, sizeof(PrincipledDiffuseBsdf), diff_weight);
if (bsdf) {
@@ -273,7 +281,7 @@ ccl_device_noinline int svm_node_closure_bsdf(
float3 sheen_weight = weight * sheen * sheen_color * diffuse_weight;
- PrincipledSheenBsdf *bsdf = (PrincipledSheenBsdf *)bsdf_alloc(
+ ccl_private PrincipledSheenBsdf *bsdf = (ccl_private PrincipledSheenBsdf *)bsdf_alloc(
sd, sizeof(PrincipledSheenBsdf), sheen_weight);
if (bsdf) {
@@ -292,11 +300,12 @@ ccl_device_noinline int svm_node_closure_bsdf(
(specular > CLOSURE_WEIGHT_CUTOFF || metallic > CLOSURE_WEIGHT_CUTOFF)) {
float3 spec_weight = weight * specular_weight;
- MicrofacetBsdf *bsdf = (MicrofacetBsdf *)bsdf_alloc(
+ ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc(
sd, sizeof(MicrofacetBsdf), spec_weight);
- MicrofacetExtra *extra = (bsdf != NULL) ? (MicrofacetExtra *)closure_alloc_extra(
- sd, sizeof(MicrofacetExtra)) :
- NULL;
+ ccl_private MicrofacetExtra *extra =
+ (bsdf != NULL) ?
+ (ccl_private MicrofacetExtra *)closure_alloc_extra(sd, sizeof(MicrofacetExtra)) :
+ NULL;
if (bsdf && extra) {
bsdf->N = N;
@@ -355,11 +364,12 @@ ccl_device_noinline int svm_node_closure_bsdf(
if (kernel_data.integrator.caustics_reflective || (path_flag & PATH_RAY_DIFFUSE) == 0)
# endif
{
- MicrofacetBsdf *bsdf = (MicrofacetBsdf *)bsdf_alloc(
+ ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc(
sd, sizeof(MicrofacetBsdf), glass_weight * fresnel);
- MicrofacetExtra *extra = (bsdf != NULL) ? (MicrofacetExtra *)closure_alloc_extra(
- sd, sizeof(MicrofacetExtra)) :
- NULL;
+ ccl_private MicrofacetExtra *extra =
+ (bsdf != NULL) ? (ccl_private MicrofacetExtra *)closure_alloc_extra(
+ sd, sizeof(MicrofacetExtra)) :
+ NULL;
if (bsdf && extra) {
bsdf->N = N;
@@ -384,7 +394,7 @@ ccl_device_noinline int svm_node_closure_bsdf(
if (kernel_data.integrator.caustics_refractive || (path_flag & PATH_RAY_DIFFUSE) == 0)
# endif
{
- MicrofacetBsdf *bsdf = (MicrofacetBsdf *)bsdf_alloc(
+ ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc(
sd, sizeof(MicrofacetBsdf), base_color * glass_weight * (1.0f - fresnel));
if (bsdf) {
bsdf->N = N;
@@ -407,11 +417,12 @@ ccl_device_noinline int svm_node_closure_bsdf(
}
}
else { /* use multi-scatter GGX */
- MicrofacetBsdf *bsdf = (MicrofacetBsdf *)bsdf_alloc(
+ ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc(
sd, sizeof(MicrofacetBsdf), glass_weight);
- MicrofacetExtra *extra = (bsdf != NULL) ? (MicrofacetExtra *)closure_alloc_extra(
- sd, sizeof(MicrofacetExtra)) :
- NULL;
+ ccl_private MicrofacetExtra *extra =
+ (bsdf != NULL) ? (ccl_private MicrofacetExtra *)closure_alloc_extra(
+ sd, sizeof(MicrofacetExtra)) :
+ NULL;
if (bsdf && extra) {
bsdf->N = N;
@@ -440,10 +451,12 @@ ccl_device_noinline int svm_node_closure_bsdf(
if (kernel_data.integrator.caustics_reflective || (path_flag & PATH_RAY_DIFFUSE) == 0) {
# endif
if (clearcoat > CLOSURE_WEIGHT_CUTOFF) {
- MicrofacetBsdf *bsdf = (MicrofacetBsdf *)bsdf_alloc(sd, sizeof(MicrofacetBsdf), weight);
- MicrofacetExtra *extra = (bsdf != NULL) ? (MicrofacetExtra *)closure_alloc_extra(
- sd, sizeof(MicrofacetExtra)) :
- NULL;
+ ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc(
+ sd, sizeof(MicrofacetBsdf), weight);
+ ccl_private MicrofacetExtra *extra =
+ (bsdf != NULL) ?
+ (ccl_private MicrofacetExtra *)closure_alloc_extra(sd, sizeof(MicrofacetExtra)) :
+ NULL;
if (bsdf && extra) {
bsdf->N = clearcoat_normal;
@@ -471,7 +484,8 @@ ccl_device_noinline int svm_node_closure_bsdf(
#endif /* __PRINCIPLED__ */
case CLOSURE_BSDF_DIFFUSE_ID: {
float3 weight = sd->svm_closure_weight * mix_weight;
- OrenNayarBsdf *bsdf = (OrenNayarBsdf *)bsdf_alloc(sd, sizeof(OrenNayarBsdf), weight);
+ ccl_private OrenNayarBsdf *bsdf = (ccl_private OrenNayarBsdf *)bsdf_alloc(
+ sd, sizeof(OrenNayarBsdf), weight);
if (bsdf) {
bsdf->N = N;
@@ -479,7 +493,7 @@ ccl_device_noinline int svm_node_closure_bsdf(
float roughness = param1;
if (roughness == 0.0f) {
- sd->flag |= bsdf_diffuse_setup((DiffuseBsdf *)bsdf);
+ sd->flag |= bsdf_diffuse_setup((ccl_private DiffuseBsdf *)bsdf);
}
else {
bsdf->roughness = roughness;
@@ -490,7 +504,8 @@ ccl_device_noinline int svm_node_closure_bsdf(
}
case CLOSURE_BSDF_TRANSLUCENT_ID: {
float3 weight = sd->svm_closure_weight * mix_weight;
- DiffuseBsdf *bsdf = (DiffuseBsdf *)bsdf_alloc(sd, sizeof(DiffuseBsdf), weight);
+ ccl_private DiffuseBsdf *bsdf = (ccl_private DiffuseBsdf *)bsdf_alloc(
+ sd, sizeof(DiffuseBsdf), weight);
if (bsdf) {
bsdf->N = N;
@@ -513,7 +528,8 @@ ccl_device_noinline int svm_node_closure_bsdf(
break;
#endif
float3 weight = sd->svm_closure_weight * mix_weight;
- MicrofacetBsdf *bsdf = (MicrofacetBsdf *)bsdf_alloc(sd, sizeof(MicrofacetBsdf), weight);
+ ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc(
+ sd, sizeof(MicrofacetBsdf), weight);
if (!bsdf) {
break;
@@ -559,7 +575,8 @@ ccl_device_noinline int svm_node_closure_bsdf(
sd->flag |= bsdf_microfacet_ggx_setup(bsdf);
else if (type == CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID) {
kernel_assert(stack_valid(data_node.w));
- bsdf->extra = (MicrofacetExtra *)closure_alloc_extra(sd, sizeof(MicrofacetExtra));
+ bsdf->extra = (ccl_private MicrofacetExtra *)closure_alloc_extra(sd,
+ sizeof(MicrofacetExtra));
if (bsdf->extra) {
bsdf->extra->color = stack_load_float3(stack, data_node.w);
bsdf->extra->cspec0 = make_float3(0.0f, 0.0f, 0.0f);
@@ -581,7 +598,8 @@ ccl_device_noinline int svm_node_closure_bsdf(
break;
#endif
float3 weight = sd->svm_closure_weight * mix_weight;
- MicrofacetBsdf *bsdf = (MicrofacetBsdf *)bsdf_alloc(sd, sizeof(MicrofacetBsdf), weight);
+ ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc(
+ sd, sizeof(MicrofacetBsdf), weight);
if (bsdf) {
bsdf->N = N;
@@ -639,7 +657,7 @@ ccl_device_noinline int svm_node_closure_bsdf(
if (kernel_data.integrator.caustics_reflective || (path_flag & PATH_RAY_DIFFUSE) == 0)
#endif
{
- MicrofacetBsdf *bsdf = (MicrofacetBsdf *)bsdf_alloc(
+ ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc(
sd, sizeof(MicrofacetBsdf), weight * fresnel);
if (bsdf) {
@@ -655,7 +673,7 @@ ccl_device_noinline int svm_node_closure_bsdf(
if (kernel_data.integrator.caustics_refractive || (path_flag & PATH_RAY_DIFFUSE) == 0)
#endif
{
- MicrofacetBsdf *bsdf = (MicrofacetBsdf *)bsdf_alloc(
+ ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc(
sd, sizeof(MicrofacetBsdf), weight * (1.0f - fresnel));
if (bsdf) {
@@ -675,12 +693,14 @@ ccl_device_noinline int svm_node_closure_bsdf(
break;
#endif
float3 weight = sd->svm_closure_weight * mix_weight;
- MicrofacetBsdf *bsdf = (MicrofacetBsdf *)bsdf_alloc(sd, sizeof(MicrofacetBsdf), weight);
+ ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc(
+ sd, sizeof(MicrofacetBsdf), weight);
if (!bsdf) {
break;
}
- MicrofacetExtra *extra = (MicrofacetExtra *)closure_alloc_extra(sd, sizeof(MicrofacetExtra));
+ ccl_private MicrofacetExtra *extra = (ccl_private MicrofacetExtra *)closure_alloc_extra(
+ sd, sizeof(MicrofacetExtra));
if (!extra) {
break;
}
@@ -706,7 +726,8 @@ ccl_device_noinline int svm_node_closure_bsdf(
}
case CLOSURE_BSDF_ASHIKHMIN_VELVET_ID: {
float3 weight = sd->svm_closure_weight * mix_weight;
- VelvetBsdf *bsdf = (VelvetBsdf *)bsdf_alloc(sd, sizeof(VelvetBsdf), weight);
+ ccl_private VelvetBsdf *bsdf = (ccl_private VelvetBsdf *)bsdf_alloc(
+ sd, sizeof(VelvetBsdf), weight);
if (bsdf) {
bsdf->N = N;
@@ -724,7 +745,8 @@ ccl_device_noinline int svm_node_closure_bsdf(
#endif
case CLOSURE_BSDF_DIFFUSE_TOON_ID: {
float3 weight = sd->svm_closure_weight * mix_weight;
- ToonBsdf *bsdf = (ToonBsdf *)bsdf_alloc(sd, sizeof(ToonBsdf), weight);
+ ccl_private ToonBsdf *bsdf = (ccl_private ToonBsdf *)bsdf_alloc(
+ sd, sizeof(ToonBsdf), weight);
if (bsdf) {
bsdf->N = N;
@@ -771,11 +793,11 @@ ccl_device_noinline int svm_node_closure_bsdf(
random = stack_load_float_default(stack, random_ofs, data_node3.y);
}
- PrincipledHairBSDF *bsdf = (PrincipledHairBSDF *)bsdf_alloc(
+ ccl_private PrincipledHairBSDF *bsdf = (ccl_private PrincipledHairBSDF *)bsdf_alloc(
sd, sizeof(PrincipledHairBSDF), weight);
if (bsdf) {
- PrincipledHairExtra *extra = (PrincipledHairExtra *)closure_alloc_extra(
- sd, sizeof(PrincipledHairExtra));
+ ccl_private PrincipledHairExtra *extra = (ccl_private PrincipledHairExtra *)
+ closure_alloc_extra(sd, sizeof(PrincipledHairExtra));
if (!extra)
break;
@@ -854,7 +876,8 @@ ccl_device_noinline int svm_node_closure_bsdf(
case CLOSURE_BSDF_HAIR_TRANSMISSION_ID: {
float3 weight = sd->svm_closure_weight * mix_weight;
- HairBsdf *bsdf = (HairBsdf *)bsdf_alloc(sd, sizeof(HairBsdf), weight);
+ ccl_private HairBsdf *bsdf = (ccl_private HairBsdf *)bsdf_alloc(
+ sd, sizeof(HairBsdf), weight);
if (bsdf) {
bsdf->N = N;
@@ -889,7 +912,7 @@ ccl_device_noinline int svm_node_closure_bsdf(
case CLOSURE_BSSRDF_RANDOM_WALK_ID:
case CLOSURE_BSSRDF_RANDOM_WALK_FIXED_RADIUS_ID: {
float3 weight = sd->svm_closure_weight * mix_weight;
- Bssrdf *bssrdf = bssrdf_alloc(sd, weight);
+ ccl_private Bssrdf *bssrdf = bssrdf_alloc(sd, weight);
if (bssrdf) {
/* disable in case of diffuse ancestor, can't see it well then and
@@ -921,9 +944,9 @@ ccl_device_noinline int svm_node_closure_bsdf(
}
template<ShaderType shader_type>
-ccl_device_noinline void svm_node_closure_volume(const KernelGlobals *kg,
- ShaderData *sd,
- float *stack,
+ccl_device_noinline void svm_node_closure_volume(ccl_global const KernelGlobals *kg,
+ ccl_private ShaderData *sd,
+ ccl_private float *stack,
uint4 node)
{
#ifdef __VOLUME__
@@ -958,7 +981,7 @@ ccl_device_noinline void svm_node_closure_volume(const KernelGlobals *kg,
/* Add closure for volume scattering. */
if (type == CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID) {
- HenyeyGreensteinVolume *volume = (HenyeyGreensteinVolume *)bsdf_alloc(
+ ccl_private HenyeyGreensteinVolume *volume = (ccl_private HenyeyGreensteinVolume *)bsdf_alloc(
sd, sizeof(HenyeyGreensteinVolume), weight);
if (volume) {
@@ -976,8 +999,12 @@ ccl_device_noinline void svm_node_closure_volume(const KernelGlobals *kg,
}
template<ShaderType shader_type>
-ccl_device_noinline int svm_node_principled_volume(
- const KernelGlobals *kg, ShaderData *sd, float *stack, uint4 node, int path_flag, int offset)
+ccl_device_noinline int svm_node_principled_volume(ccl_global const KernelGlobals *kg,
+ ccl_private ShaderData *sd,
+ ccl_private float *stack,
+ uint4 node,
+ int path_flag,
+ int offset)
{
#ifdef __VOLUME__
uint4 value_node = read_node(kg, &offset);
@@ -1023,7 +1050,7 @@ ccl_device_noinline int svm_node_principled_volume(
}
/* Add closure for volume scattering. */
- HenyeyGreensteinVolume *volume = (HenyeyGreensteinVolume *)bsdf_alloc(
+ ccl_private HenyeyGreensteinVolume *volume = (ccl_private HenyeyGreensteinVolume *)bsdf_alloc(
sd, sizeof(HenyeyGreensteinVolume), color * density);
if (volume) {
float anisotropy = (stack_valid(anisotropy_offset)) ?
@@ -1087,7 +1114,9 @@ ccl_device_noinline int svm_node_principled_volume(
return offset;
}
-ccl_device_noinline void svm_node_closure_emission(ShaderData *sd, float *stack, uint4 node)
+ccl_device_noinline void svm_node_closure_emission(ccl_private ShaderData *sd,
+ ccl_private float *stack,
+ uint4 node)
{
uint mix_weight_offset = node.y;
float3 weight = sd->svm_closure_weight;
@@ -1104,7 +1133,9 @@ ccl_device_noinline void svm_node_closure_emission(ShaderData *sd, float *stack,
emission_setup(sd, weight);
}
-ccl_device_noinline void svm_node_closure_background(ShaderData *sd, float *stack, uint4 node)
+ccl_device_noinline void svm_node_closure_background(ccl_private ShaderData *sd,
+ ccl_private float *stack,
+ uint4 node)
{
uint mix_weight_offset = node.y;
float3 weight = sd->svm_closure_weight;
@@ -1121,7 +1152,9 @@ ccl_device_noinline void svm_node_closure_background(ShaderData *sd, float *stac
background_setup(sd, weight);
}
-ccl_device_noinline void svm_node_closure_holdout(ShaderData *sd, float *stack, uint4 node)
+ccl_device_noinline void svm_node_closure_holdout(ccl_private ShaderData *sd,
+ ccl_private float *stack,
+ uint4 node)
{
uint mix_weight_offset = node.y;
@@ -1142,26 +1175,28 @@ ccl_device_noinline void svm_node_closure_holdout(ShaderData *sd, float *stack,
/* Closure Nodes */
-ccl_device_inline void svm_node_closure_store_weight(ShaderData *sd, float3 weight)
+ccl_device_inline void svm_node_closure_store_weight(ccl_private ShaderData *sd, float3 weight)
{
sd->svm_closure_weight = weight;
}
-ccl_device void svm_node_closure_set_weight(ShaderData *sd, uint r, uint g, uint b)
+ccl_device void svm_node_closure_set_weight(ccl_private ShaderData *sd, uint r, uint g, uint b)
{
float3 weight = make_float3(__uint_as_float(r), __uint_as_float(g), __uint_as_float(b));
svm_node_closure_store_weight(sd, weight);
}
-ccl_device void svm_node_closure_weight(ShaderData *sd, float *stack, uint weight_offset)
+ccl_device void svm_node_closure_weight(ccl_private ShaderData *sd,
+ ccl_private float *stack,
+ uint weight_offset)
{
float3 weight = stack_load_float3(stack, weight_offset);
svm_node_closure_store_weight(sd, weight);
}
-ccl_device_noinline void svm_node_emission_weight(const KernelGlobals *kg,
- ShaderData *sd,
- float *stack,
+ccl_device_noinline void svm_node_emission_weight(ccl_global const KernelGlobals *kg,
+ ccl_private ShaderData *sd,
+ ccl_private float *stack,
uint4 node)
{
uint color_offset = node.y;
@@ -1173,7 +1208,9 @@ ccl_device_noinline void svm_node_emission_weight(const KernelGlobals *kg,
svm_node_closure_store_weight(sd, weight);
}
-ccl_device_noinline void svm_node_mix_closure(ShaderData *sd, float *stack, uint4 node)
+ccl_device_noinline void svm_node_mix_closure(ccl_private ShaderData *sd,
+ ccl_private float *stack,
+ uint4 node)
{
/* fetch weight from blend input, previous mix closures,
* and write to stack to be used by closure nodes later */
@@ -1195,8 +1232,11 @@ ccl_device_noinline void svm_node_mix_closure(ShaderData *sd, float *stack, uint
/* (Bump) normal */
-ccl_device void svm_node_set_normal(
- const KernelGlobals *kg, ShaderData *sd, float *stack, uint in_direction, uint out_normal)
+ccl_device void svm_node_set_normal(ccl_global const KernelGlobals *kg,
+ ccl_private ShaderData *sd,
+ ccl_private float *stack,
+ uint in_direction,
+ uint out_normal)
{
float3 normal = stack_load_float3(stack, in_direction);
sd->N = normal;