diff options
author | Brecht Van Lommel <brecht@blender.org> | 2020-06-10 19:55:33 +0300 |
---|---|---|
committer | Brecht Van Lommel <brecht@blender.org> | 2020-06-22 14:28:01 +0300 |
commit | d1ef5146d72d40f97fdcbf28e96da49193c21dea (patch) | |
tree | 7a19a24bd6b809c7de72b4e2499d62b8740e639a /intern/cycles/kernel/geom | |
parent | 1de0e13af619e405f351bf42924f819dc3a9bc44 (diff) |
Cycles: remove SIMD BVH optimizations, to be replaced by Embree
Ref T73778
Depends on D8011
Maniphest Tasks: T73778
Differential Revision: https://developer.blender.org/D8012
Diffstat (limited to 'intern/cycles/kernel/geom')
-rw-r--r-- | intern/cycles/kernel/geom/geom_curve_intersect.h | 138 | ||||
-rw-r--r-- | intern/cycles/kernel/geom/geom_motion_curve.h | 84 | ||||
-rw-r--r-- | intern/cycles/kernel/geom/geom_object.h | 80 | ||||
-rw-r--r-- | intern/cycles/kernel/geom/geom_triangle_intersect.h | 427 |
4 files changed, 18 insertions, 711 deletions
diff --git a/intern/cycles/kernel/geom/geom_curve_intersect.h b/intern/cycles/kernel/geom/geom_curve_intersect.h index 88963bea6ef..87ed0bf201f 100644 --- a/intern/cycles/kernel/geom/geom_curve_intersect.h +++ b/intern/cycles/kernel/geom/geom_curve_intersect.h @@ -18,13 +18,6 @@ CCL_NAMESPACE_BEGIN #ifdef __HAIR__ -# ifdef __KERNEL_SSE2__ -ccl_device_inline ssef transform_point_T3(const ssef t[3], const ssef &a) -{ - return madd(shuffle<0>(a), t[0], madd(shuffle<1>(a), t[1], shuffle<2>(a) * t[2])); -} -# endif - /* On CPU pass P and dir by reference to aligned vector. */ ccl_device_forceinline bool curve_intersect(KernelGlobals *kg, Intersection *isect, @@ -55,108 +48,6 @@ ccl_device_forceinline bool curve_intersect(KernelGlobals *kg, int flags = kernel_data.curve.curveflags; int prim = kernel_tex_fetch(__prim_index, curveAddr); -# ifdef __KERNEL_SSE2__ - ssef vdir = load4f(dir); - ssef vcurve_coef[4]; - const float3 *curve_coef = (float3 *)vcurve_coef; - - { - ssef dtmp = vdir * vdir; - ssef d_ss = mm_sqrt(dtmp + shuffle<2>(dtmp)); - ssef rd_ss = load1f_first(1.0f) / d_ss; - - ssei v00vec = load4i((ssei *)&kg->__curves.data[prim]); - int2 &v00 = (int2 &)v00vec; - - int k0 = v00.x + segment; - int k1 = k0 + 1; - int ka = max(k0 - 1, v00.x); - int kb = min(k1 + 1, v00.x + v00.y - 1); - -# if defined(__KERNEL_AVX2__) && defined(__KERNEL_SSE__) && \ - (!defined(_MSC_VER) || _MSC_VER > 1800) - avxf P_curve_0_1, P_curve_2_3; - if (is_curve_primitive) { - P_curve_0_1 = _mm256_loadu2_m128(&kg->__curve_keys.data[k0].x, &kg->__curve_keys.data[ka].x); - P_curve_2_3 = _mm256_loadu2_m128(&kg->__curve_keys.data[kb].x, &kg->__curve_keys.data[k1].x); - } - else { - int fobject = (object == OBJECT_NONE) ? kernel_tex_fetch(__prim_object, curveAddr) : object; - motion_curve_keys_avx(kg, fobject, prim, time, ka, k0, k1, kb, &P_curve_0_1, &P_curve_2_3); - } -# else /* __KERNEL_AVX2__ */ - ssef P_curve[4]; - - if (is_curve_primitive) { - P_curve[0] = load4f(&kg->__curve_keys.data[ka].x); - P_curve[1] = load4f(&kg->__curve_keys.data[k0].x); - P_curve[2] = load4f(&kg->__curve_keys.data[k1].x); - P_curve[3] = load4f(&kg->__curve_keys.data[kb].x); - } - else { - int fobject = (object == OBJECT_NONE) ? kernel_tex_fetch(__prim_object, curveAddr) : object; - motion_curve_keys(kg, fobject, prim, time, ka, k0, k1, kb, (float4 *)&P_curve); - } -# endif /* __KERNEL_AVX2__ */ - - ssef rd_sgn = set_sign_bit<0, 1, 1, 1>(shuffle<0>(rd_ss)); - ssef mul_zxxy = shuffle<2, 0, 0, 1>(vdir) * rd_sgn; - ssef mul_yz = shuffle<1, 2, 1, 2>(vdir) * mul_zxxy; - ssef mul_shuf = shuffle<0, 1, 2, 3>(mul_zxxy, mul_yz); - ssef vdir0 = vdir & cast(ssei(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0)); - - ssef htfm0 = shuffle<0, 2, 0, 3>(mul_shuf, vdir0); - ssef htfm1 = shuffle<1, 0, 1, 3>(load1f_first(extract<0>(d_ss)), vdir0); - ssef htfm2 = shuffle<1, 3, 2, 3>(mul_shuf, vdir0); - -# if defined(__KERNEL_AVX2__) && defined(__KERNEL_SSE__) && \ - (!defined(_MSC_VER) || _MSC_VER > 1800) - const avxf vPP = _mm256_broadcast_ps(&P.m128); - const avxf htfm00 = avxf(htfm0.m128, htfm0.m128); - const avxf htfm11 = avxf(htfm1.m128, htfm1.m128); - const avxf htfm22 = avxf(htfm2.m128, htfm2.m128); - - const avxf p01 = madd( - shuffle<0>(P_curve_0_1 - vPP), - htfm00, - madd(shuffle<1>(P_curve_0_1 - vPP), htfm11, shuffle<2>(P_curve_0_1 - vPP) * htfm22)); - const avxf p23 = madd( - shuffle<0>(P_curve_2_3 - vPP), - htfm00, - madd(shuffle<1>(P_curve_2_3 - vPP), htfm11, shuffle<2>(P_curve_2_3 - vPP) * htfm22)); - - const ssef p0 = _mm256_castps256_ps128(p01); - const ssef p1 = _mm256_extractf128_ps(p01, 1); - const ssef p2 = _mm256_castps256_ps128(p23); - const ssef p3 = _mm256_extractf128_ps(p23, 1); - - const ssef P_curve_1 = _mm256_extractf128_ps(P_curve_0_1, 1); - r_st = ((float4 &)P_curve_1).w; - const ssef P_curve_2 = _mm256_castps256_ps128(P_curve_2_3); - r_en = ((float4 &)P_curve_2).w; -# else /* __KERNEL_AVX2__ */ - ssef htfm[] = {htfm0, htfm1, htfm2}; - ssef vP = load4f(P); - ssef p0 = transform_point_T3(htfm, P_curve[0] - vP); - ssef p1 = transform_point_T3(htfm, P_curve[1] - vP); - ssef p2 = transform_point_T3(htfm, P_curve[2] - vP); - ssef p3 = transform_point_T3(htfm, P_curve[3] - vP); - - r_st = ((float4 &)P_curve[1]).w; - r_en = ((float4 &)P_curve[2]).w; -# endif /* __KERNEL_AVX2__ */ - - float fc = 0.71f; - ssef vfc = ssef(fc); - ssef vfcxp3 = vfc * p3; - - vcurve_coef[0] = p1; - vcurve_coef[1] = vfc * (p2 - p0); - vcurve_coef[2] = madd( - ssef(fc * 2.0f), p0, madd(ssef(fc - 3.0f), p1, msub(ssef(3.0f - 2.0f * fc), p2, vfcxp3))); - vcurve_coef[3] = msub(ssef(fc - 2.0f), p2 - p1, msub(vfc, p0, vfcxp3)); - } -# else float3 curve_coef[4]; /* curve Intersection check */ @@ -212,7 +103,6 @@ ccl_device_forceinline bool curve_intersect(KernelGlobals *kg, r_st = P_curve[1].w; r_en = P_curve[2].w; } -# endif float r_curr = max(r_st, r_en); @@ -275,23 +165,6 @@ ccl_device_forceinline bool curve_intersect(KernelGlobals *kg, const float i_st = tree * resol; const float i_en = i_st + (level * resol); -# ifdef __KERNEL_SSE2__ - ssef vi_st = ssef(i_st), vi_en = ssef(i_en); - ssef vp_st = madd(madd(madd(vcurve_coef[3], vi_st, vcurve_coef[2]), vi_st, vcurve_coef[1]), - vi_st, - vcurve_coef[0]); - ssef vp_en = madd(madd(madd(vcurve_coef[3], vi_en, vcurve_coef[2]), vi_en, vcurve_coef[1]), - vi_en, - vcurve_coef[0]); - - ssef vbmin = min(vp_st, vp_en); - ssef vbmax = max(vp_st, vp_en); - - float3 &bmin = (float3 &)vbmin, &bmax = (float3 &)vbmax; - float &bminx = bmin.x, &bminy = bmin.y, &bminz = bmin.z; - float &bmaxx = bmax.x, &bmaxy = bmax.y, &bmaxz = bmax.z; - float3 &p_st = (float3 &)vp_st, &p_en = (float3 &)vp_en; -# else float3 p_st = ((curve_coef[3] * i_st + curve_coef[2]) * i_st + curve_coef[1]) * i_st + curve_coef[0]; float3 p_en = ((curve_coef[3] * i_en + curve_coef[2]) * i_en + curve_coef[1]) * i_en + @@ -303,7 +176,6 @@ ccl_device_forceinline bool curve_intersect(KernelGlobals *kg, float bmaxy = max(p_st.y, p_en.y); float bminz = min(p_st.z, p_en.z); float bmaxz = max(p_st.z, p_en.z); -# endif if (xextrem[0] >= i_st && xextrem[0] <= i_en) { bminx = min(bminx, xextrem[1]); @@ -351,23 +223,13 @@ ccl_device_forceinline bool curve_intersect(KernelGlobals *kg, if (flags & CURVE_KN_RIBBONS) { float3 tg = (p_en - p_st); -# ifdef __KERNEL_SSE__ - const float3 tg_sq = tg * tg; - float w = tg_sq.x + tg_sq.y; -# else float w = tg.x * tg.x + tg.y * tg.y; -# endif if (w == 0) { tree++; level = tree & -tree; continue; } -# ifdef __KERNEL_SSE__ - const float3 p_sttg = p_st * tg; - w = -(p_sttg.x + p_sttg.y) / w; -# else w = -(p_st.x * tg.x + p_st.y * tg.y) / w; -# endif w = saturate(w); /* compute u on the curve segment */ diff --git a/intern/cycles/kernel/geom/geom_motion_curve.h b/intern/cycles/kernel/geom/geom_motion_curve.h index dd7429c02bd..0e7a05eaac2 100644 --- a/intern/cycles/kernel/geom/geom_motion_curve.h +++ b/intern/cycles/kernel/geom/geom_motion_curve.h @@ -106,15 +106,15 @@ ccl_device_inline void motion_curve_keys( } ccl_device_inline void motion_curve_keys_for_step(KernelGlobals *kg, - int offset, - int numkeys, - int numsteps, - int step, - int k0, - int k1, - int k2, - int k3, - float4 keys[4]) + int offset, + int numkeys, + int numsteps, + int step, + int k0, + int k1, + int k2, + int k3, + float4 keys[4]) { if (step == numsteps) { /* center step: regular key location */ @@ -139,14 +139,14 @@ ccl_device_inline void motion_curve_keys_for_step(KernelGlobals *kg, /* return 2 curve key locations */ ccl_device_inline void motion_curve_keys(KernelGlobals *kg, - int object, - int prim, - float time, - int k0, - int k1, - int k2, - int k3, - float4 keys[4]) + int object, + int prim, + float time, + int k0, + int k1, + int k2, + int k3, + float4 keys[4]) { /* get motion info */ int numsteps, numkeys; @@ -166,8 +166,7 @@ ccl_device_inline void motion_curve_keys(KernelGlobals *kg, float4 next_keys[4]; motion_curve_keys_for_step(kg, offset, numkeys, numsteps, step, k0, k1, k2, k3, keys); - motion_curve_keys_for_step( - kg, offset, numkeys, numsteps, step + 1, k0, k1, k2, k3, next_keys); + motion_curve_keys_for_step(kg, offset, numkeys, numsteps, step + 1, k0, k1, k2, k3, next_keys); /* interpolate between steps */ keys[0] = (1.0f - t) * keys[0] + t * next_keys[0]; @@ -176,53 +175,6 @@ ccl_device_inline void motion_curve_keys(KernelGlobals *kg, keys[3] = (1.0f - t) * keys[3] + t * next_keys[3]; } -# if defined(__KERNEL_AVX2__) && defined(__KERNEL_SSE__) -/* Similar to above, but returns keys as pair of two AVX registers with each - * holding two float4. - */ -ccl_device_inline void motion_curve_keys_avx(KernelGlobals *kg, - int object, - int prim, - float time, - int k0, - int k1, - int k2, - int k3, - avxf *out_keys_0_1, - avxf *out_keys_2_3) -{ - /* Get motion info. */ - int numsteps, numkeys; - object_motion_info(kg, object, &numsteps, NULL, &numkeys); - - /* Figure out which steps we need to fetch and their interpolation factor. */ - int maxstep = numsteps * 2; - int step = min((int)(time * maxstep), maxstep - 1); - float t = time * maxstep - step; - - /* Find attribute. */ - AttributeElement elem; - int offset = find_attribute_curve_motion(kg, object, ATTR_STD_MOTION_VERTEX_POSITION, &elem); - kernel_assert(offset != ATTR_STD_NOT_FOUND); - - /* Fetch key coordinates. */ - float4 next_keys[4]; - float4 keys[4]; - motion_curve_keys_for_step(kg, offset, numkeys, numsteps, step, k0, k1, k2, k3, keys); - motion_curve_keys_for_step( - kg, offset, numkeys, numsteps, step + 1, k0, k1, k2, k3, next_keys); - - const avxf keys_0_1 = avxf(keys[0].m128, keys[1].m128); - const avxf keys_2_3 = avxf(keys[2].m128, keys[3].m128); - const avxf next_keys_0_1 = avxf(next_keys[0].m128, next_keys[1].m128); - const avxf next_keys_2_3 = avxf(next_keys[2].m128, next_keys[3].m128); - - /* Interpolate between steps. */ - *out_keys_0_1 = (1.0f - t) * keys_0_1 + t * next_keys_0_1; - *out_keys_2_3 = (1.0f - t) * keys_2_3 + t * next_keys_2_3; -} -# endif - #endif CCL_NAMESPACE_END diff --git a/intern/cycles/kernel/geom/geom_object.h b/intern/cycles/kernel/geom/geom_object.h index 3aa68e1f84e..614e2e3b92b 100644 --- a/intern/cycles/kernel/geom/geom_object.h +++ b/intern/cycles/kernel/geom/geom_object.h @@ -411,25 +411,10 @@ ccl_device float3 particle_angular_velocity(KernelGlobals *kg, int particle) ccl_device_inline float3 bvh_clamp_direction(float3 dir) { - /* clamp absolute values by exp2f(-80.0f) to avoid division by zero when calculating inverse - * direction */ -#if defined(__KERNEL_SSE__) && defined(__KERNEL_SSE2__) - const ssef oopes(8.271806E-25f, 8.271806E-25f, 8.271806E-25f, 0.0f); - const ssef mask = _mm_cmpgt_ps(fabs(dir), oopes); - const ssef signdir = signmsk(dir.m128) | oopes; -# ifndef __KERNEL_AVX__ - ssef res = mask & ssef(dir); - res = _mm_or_ps(res, _mm_andnot_ps(mask, signdir)); -# else - ssef res = _mm_blendv_ps(signdir, dir, mask); -# endif - return float3(res); -#else /* __KERNEL_SSE__ && __KERNEL_SSE2__ */ const float ooeps = 8.271806E-25f; return make_float3((fabsf(dir.x) > ooeps) ? dir.x : copysignf(ooeps, dir.x), (fabsf(dir.y) > ooeps) ? dir.y : copysignf(ooeps, dir.y), (fabsf(dir.z) > ooeps) ? dir.z : copysignf(ooeps, dir.z)); -#endif /* __KERNEL_SSE__ && __KERNEL_SSE2__ */ } ccl_device_inline float3 bvh_inverse_direction(float3 dir) @@ -457,38 +442,6 @@ ccl_device_inline float bvh_instance_push( return t; } -#ifdef __QBVH__ -/* Same as above, but optimized for QBVH scene intersection, - * which needs to modify two max distances. - * - * TODO(sergey): Investigate if passing NULL instead of t1 gets optimized - * so we can avoid having this duplication. - */ -ccl_device_inline void qbvh_instance_push(KernelGlobals *kg, - int object, - const Ray *ray, - float3 *P, - float3 *dir, - float3 *idir, - float *t, - float *t1) -{ - Transform tfm = object_fetch_transform(kg, object, OBJECT_INVERSE_TRANSFORM); - - *P = transform_point(&tfm, ray->P); - - float len; - *dir = bvh_clamp_direction(normalize_len(transform_direction(&tfm, ray->D), &len)); - *idir = bvh_inverse_direction(*dir); - - if (*t != FLT_MAX) - *t *= len; - - if (*t1 != -FLT_MAX) - *t1 *= len; -} -#endif - /* Transorm ray to exit static object in BVH */ ccl_device_inline float bvh_instance_pop( @@ -551,39 +504,6 @@ ccl_device_inline float bvh_instance_motion_push(KernelGlobals *kg, return t; } -# ifdef __QBVH__ -/* Same as above, but optimized for QBVH scene intersection, - * which needs to modify two max distances. - * - * TODO(sergey): Investigate if passing NULL instead of t1 gets optimized - * so we can avoid having this duplication. - */ -ccl_device_inline void qbvh_instance_motion_push(KernelGlobals *kg, - int object, - const Ray *ray, - float3 *P, - float3 *dir, - float3 *idir, - float *t, - float *t1, - Transform *itfm) -{ - object_fetch_transform_motion_test(kg, object, ray->time, itfm); - - *P = transform_point(itfm, ray->P); - - float len; - *dir = bvh_clamp_direction(normalize_len(transform_direction(itfm, ray->D), &len)); - *idir = bvh_inverse_direction(*dir); - - if (*t != FLT_MAX) - *t *= len; - - if (*t1 != -FLT_MAX) - *t1 *= len; -} -# endif - /* Transorm ray to exit motion blurred object in BVH */ ccl_device_inline float bvh_instance_motion_pop(KernelGlobals *kg, diff --git a/intern/cycles/kernel/geom/geom_triangle_intersect.h b/intern/cycles/kernel/geom/geom_triangle_intersect.h index 6604806f73b..b0cce274b94 100644 --- a/intern/cycles/kernel/geom/geom_triangle_intersect.h +++ b/intern/cycles/kernel/geom/geom_triangle_intersect.h @@ -71,433 +71,6 @@ ccl_device_inline bool triangle_intersect(KernelGlobals *kg, return false; } -#ifdef __KERNEL_AVX2__ -# define cross256(A, B, C, D) _mm256_fmsub_ps(A, B, _mm256_mul_ps(C, D)) -ccl_device_inline int ray_triangle_intersect8(KernelGlobals *kg, - float3 ray_P, - float3 ray_dir, - Intersection **isect, - uint visibility, - int object, - __m256 *triA, - __m256 *triB, - __m256 *triC, - int prim_addr, - int prim_num, - uint *num_hits, - uint max_hits, - int *num_hits_in_instance, - float isect_t) -{ - - const unsigned char prim_num_mask = (1 << prim_num) - 1; - - const __m256i zero256 = _mm256_setzero_si256(); - - const __m256 Px256 = _mm256_set1_ps(ray_P.x); - const __m256 Py256 = _mm256_set1_ps(ray_P.y); - const __m256 Pz256 = _mm256_set1_ps(ray_P.z); - - const __m256 dirx256 = _mm256_set1_ps(ray_dir.x); - const __m256 diry256 = _mm256_set1_ps(ray_dir.y); - const __m256 dirz256 = _mm256_set1_ps(ray_dir.z); - - /* Calculate vertices relative to ray origin. */ - __m256 v0_x_256 = _mm256_sub_ps(triC[0], Px256); - __m256 v0_y_256 = _mm256_sub_ps(triC[1], Py256); - __m256 v0_z_256 = _mm256_sub_ps(triC[2], Pz256); - - __m256 v1_x_256 = _mm256_sub_ps(triA[0], Px256); - __m256 v1_y_256 = _mm256_sub_ps(triA[1], Py256); - __m256 v1_z_256 = _mm256_sub_ps(triA[2], Pz256); - - __m256 v2_x_256 = _mm256_sub_ps(triB[0], Px256); - __m256 v2_y_256 = _mm256_sub_ps(triB[1], Py256); - __m256 v2_z_256 = _mm256_sub_ps(triB[2], Pz256); - - __m256 v0_v1_x_256 = _mm256_add_ps(v0_x_256, v1_x_256); - __m256 v0_v1_y_256 = _mm256_add_ps(v0_y_256, v1_y_256); - __m256 v0_v1_z_256 = _mm256_add_ps(v0_z_256, v1_z_256); - - __m256 v0_v2_x_256 = _mm256_add_ps(v0_x_256, v2_x_256); - __m256 v0_v2_y_256 = _mm256_add_ps(v0_y_256, v2_y_256); - __m256 v0_v2_z_256 = _mm256_add_ps(v0_z_256, v2_z_256); - - __m256 v1_v2_x_256 = _mm256_add_ps(v1_x_256, v2_x_256); - __m256 v1_v2_y_256 = _mm256_add_ps(v1_y_256, v2_y_256); - __m256 v1_v2_z_256 = _mm256_add_ps(v1_z_256, v2_z_256); - - /* Calculate triangle edges. */ - __m256 e0_x_256 = _mm256_sub_ps(v2_x_256, v0_x_256); - __m256 e0_y_256 = _mm256_sub_ps(v2_y_256, v0_y_256); - __m256 e0_z_256 = _mm256_sub_ps(v2_z_256, v0_z_256); - - __m256 e1_x_256 = _mm256_sub_ps(v0_x_256, v1_x_256); - __m256 e1_y_256 = _mm256_sub_ps(v0_y_256, v1_y_256); - __m256 e1_z_256 = _mm256_sub_ps(v0_z_256, v1_z_256); - - __m256 e2_x_256 = _mm256_sub_ps(v1_x_256, v2_x_256); - __m256 e2_y_256 = _mm256_sub_ps(v1_y_256, v2_y_256); - __m256 e2_z_256 = _mm256_sub_ps(v1_z_256, v2_z_256); - - /* Perform edge tests. */ - /* cross (AyBz - AzBy, AzBx -AxBz, AxBy - AyBx) */ - __m256 U_x_256 = cross256(v0_v2_y_256, e0_z_256, v0_v2_z_256, e0_y_256); - __m256 U_y_256 = cross256(v0_v2_z_256, e0_x_256, v0_v2_x_256, e0_z_256); - __m256 U_z_256 = cross256(v0_v2_x_256, e0_y_256, v0_v2_y_256, e0_x_256); - /* vertical dot */ - __m256 U_256 = _mm256_mul_ps(U_x_256, dirx256); - U_256 = _mm256_fmadd_ps(U_y_256, diry256, U_256); - U_256 = _mm256_fmadd_ps(U_z_256, dirz256, U_256); - - __m256 V_x_256 = cross256(v0_v1_y_256, e1_z_256, v0_v1_z_256, e1_y_256); - __m256 V_y_256 = cross256(v0_v1_z_256, e1_x_256, v0_v1_x_256, e1_z_256); - __m256 V_z_256 = cross256(v0_v1_x_256, e1_y_256, v0_v1_y_256, e1_x_256); - /* vertical dot */ - __m256 V_256 = _mm256_mul_ps(V_x_256, dirx256); - V_256 = _mm256_fmadd_ps(V_y_256, diry256, V_256); - V_256 = _mm256_fmadd_ps(V_z_256, dirz256, V_256); - - __m256 W_x_256 = cross256(v1_v2_y_256, e2_z_256, v1_v2_z_256, e2_y_256); - __m256 W_y_256 = cross256(v1_v2_z_256, e2_x_256, v1_v2_x_256, e2_z_256); - __m256 W_z_256 = cross256(v1_v2_x_256, e2_y_256, v1_v2_y_256, e2_x_256); - /* vertical dot */ - __m256 W_256 = _mm256_mul_ps(W_x_256, dirx256); - W_256 = _mm256_fmadd_ps(W_y_256, diry256, W_256); - W_256 = _mm256_fmadd_ps(W_z_256, dirz256, W_256); - - __m256i U_256_1 = _mm256_srli_epi32(_mm256_castps_si256(U_256), 31); - __m256i V_256_1 = _mm256_srli_epi32(_mm256_castps_si256(V_256), 31); - __m256i W_256_1 = _mm256_srli_epi32(_mm256_castps_si256(W_256), 31); - __m256i UVW_256_1 = _mm256_add_epi32(_mm256_add_epi32(U_256_1, V_256_1), W_256_1); - - const __m256i one256 = _mm256_set1_epi32(1); - const __m256i two256 = _mm256_set1_epi32(2); - - __m256i mask_minmaxUVW_256 = _mm256_or_si256(_mm256_cmpeq_epi32(one256, UVW_256_1), - _mm256_cmpeq_epi32(two256, UVW_256_1)); - - unsigned char mask_minmaxUVW_pos = _mm256_movemask_ps(_mm256_castsi256_ps(mask_minmaxUVW_256)); - if ((mask_minmaxUVW_pos & prim_num_mask) == prim_num_mask) { // all bits set - return false; - } - - /* Calculate geometry normal and denominator. */ - __m256 Ng1_x_256 = cross256(e1_y_256, e0_z_256, e1_z_256, e0_y_256); - __m256 Ng1_y_256 = cross256(e1_z_256, e0_x_256, e1_x_256, e0_z_256); - __m256 Ng1_z_256 = cross256(e1_x_256, e0_y_256, e1_y_256, e0_x_256); - - Ng1_x_256 = _mm256_add_ps(Ng1_x_256, Ng1_x_256); - Ng1_y_256 = _mm256_add_ps(Ng1_y_256, Ng1_y_256); - Ng1_z_256 = _mm256_add_ps(Ng1_z_256, Ng1_z_256); - - /* vertical dot */ - __m256 den_256 = _mm256_mul_ps(Ng1_x_256, dirx256); - den_256 = _mm256_fmadd_ps(Ng1_y_256, diry256, den_256); - den_256 = _mm256_fmadd_ps(Ng1_z_256, dirz256, den_256); - - /* Perform depth test. */ - __m256 T_256 = _mm256_mul_ps(Ng1_x_256, v0_x_256); - T_256 = _mm256_fmadd_ps(Ng1_y_256, v0_y_256, T_256); - T_256 = _mm256_fmadd_ps(Ng1_z_256, v0_z_256, T_256); - - const __m256i c0x80000000 = _mm256_set1_epi32(0x80000000); - __m256i sign_den_256 = _mm256_and_si256(_mm256_castps_si256(den_256), c0x80000000); - - __m256 sign_T_256 = _mm256_castsi256_ps( - _mm256_xor_si256(_mm256_castps_si256(T_256), sign_den_256)); - - unsigned char mask_sign_T = _mm256_movemask_ps(sign_T_256); - if (((mask_minmaxUVW_pos | mask_sign_T) & prim_num_mask) == prim_num_mask) { - return false; - } - - __m256 xor_signmask_256 = _mm256_castsi256_ps( - _mm256_xor_si256(_mm256_castps_si256(den_256), sign_den_256)); - - ccl_align(32) float den8[8], U8[8], V8[8], T8[8], sign_T8[8], xor_signmask8[8]; - ccl_align(32) unsigned int mask_minmaxUVW8[8]; - - if (visibility == PATH_RAY_SHADOW_OPAQUE) { - __m256i mask_final_256 = _mm256_cmpeq_epi32(mask_minmaxUVW_256, zero256); - __m256i maskden256 = _mm256_cmpeq_epi32(_mm256_castps_si256(den_256), zero256); - __m256i mask0 = _mm256_cmpgt_epi32(zero256, _mm256_castps_si256(sign_T_256)); - __m256 rayt_256 = _mm256_set1_ps((*isect)->t); - __m256i mask1 = _mm256_cmpgt_epi32( - _mm256_castps_si256(sign_T_256), - _mm256_castps_si256(_mm256_mul_ps( - _mm256_castsi256_ps(_mm256_xor_si256(_mm256_castps_si256(den_256), sign_den_256)), - rayt_256))); - mask0 = _mm256_or_si256(mask1, mask0); - mask_final_256 = _mm256_andnot_si256(mask0, mask_final_256); //(~mask_minmaxUVW_pos) &(~mask) - mask_final_256 = _mm256_andnot_si256( - maskden256, mask_final_256); //(~mask_minmaxUVW_pos) &(~mask) & (~maskden) - int mask_final = _mm256_movemask_ps(_mm256_castsi256_ps(mask_final_256)); - if ((mask_final & prim_num_mask) == 0) { - return false; - } - while (mask_final != 0) { - const int i = __bscf(mask_final); - if (i >= prim_num) { - return false; - } -# ifdef __VISIBILITY_FLAG__ - if ((kernel_tex_fetch(__prim_visibility, (prim_addr + i)) & visibility) == 0) { - continue; - } -# endif - __m256 inv_den_256 = _mm256_rcp_ps(den_256); - U_256 = _mm256_mul_ps(U_256, inv_den_256); - V_256 = _mm256_mul_ps(V_256, inv_den_256); - T_256 = _mm256_mul_ps(T_256, inv_den_256); - _mm256_store_ps(U8, U_256); - _mm256_store_ps(V8, V_256); - _mm256_store_ps(T8, T_256); - (*isect)->u = U8[i]; - (*isect)->v = V8[i]; - (*isect)->t = T8[i]; - (*isect)->prim = (prim_addr + i); - (*isect)->object = object; - (*isect)->type = PRIMITIVE_TRIANGLE; - return true; - } - return false; - } - else { - _mm256_store_ps(den8, den_256); - _mm256_store_ps(U8, U_256); - _mm256_store_ps(V8, V_256); - _mm256_store_ps(T8, T_256); - - _mm256_store_ps(sign_T8, sign_T_256); - _mm256_store_ps(xor_signmask8, xor_signmask_256); - _mm256_store_si256((__m256i *)mask_minmaxUVW8, mask_minmaxUVW_256); - - int ret = false; - - if (visibility == PATH_RAY_SHADOW) { - for (int i = 0; i < prim_num; i++) { - if (mask_minmaxUVW8[i]) { - continue; - } -# ifdef __VISIBILITY_FLAG__ - if ((kernel_tex_fetch(__prim_visibility, (prim_addr + i)) & visibility) == 0) { - continue; - } -# endif - if ((sign_T8[i] < 0.0f) || (sign_T8[i] > (*isect)->t * xor_signmask8[i])) { - continue; - } - if (!den8[i]) { - continue; - } - const float inv_den = 1.0f / den8[i]; - (*isect)->u = U8[i] * inv_den; - (*isect)->v = V8[i] * inv_den; - (*isect)->t = T8[i] * inv_den; - (*isect)->prim = (prim_addr + i); - (*isect)->object = object; - (*isect)->type = PRIMITIVE_TRIANGLE; - const int prim = kernel_tex_fetch(__prim_index, (*isect)->prim); - int shader = 0; -# ifdef __HAIR__ - if (kernel_tex_fetch(__prim_type, (*isect)->prim) & PRIMITIVE_ALL_TRIANGLE) -# endif - { - shader = kernel_tex_fetch(__tri_shader, prim); - } -# ifdef __HAIR__ - else { - float4 str = kernel_tex_fetch(__curves, prim); - shader = __float_as_int(str.z); - } -# endif - const int flag = kernel_tex_fetch(__shaders, (shader & SHADER_MASK)).flags; - /* If no transparent shadows, all light is blocked. */ - if (!(flag & SD_HAS_TRANSPARENT_SHADOW)) { - return 2; - } - /* If maximum number of hits reached, block all light. */ - else if (num_hits == NULL || *num_hits == max_hits) { - return 2; - } - /* Move on to next entry in intersections array. */ - ret = true; - (*isect)++; - (*num_hits)++; - (*num_hits_in_instance)++; - (*isect)->t = isect_t; - } - } - else { - for (int i = 0; i < prim_num; i++) { - if (mask_minmaxUVW8[i]) { - continue; - } -# ifdef __VISIBILITY_FLAG__ - if ((kernel_tex_fetch(__prim_visibility, (prim_addr + i)) & visibility) == 0) { - continue; - } -# endif - if ((sign_T8[i] < 0.0f) || (sign_T8[i] > (*isect)->t * xor_signmask8[i])) { - continue; - } - if (!den8[i]) { - continue; - } - const float inv_den = 1.0f / den8[i]; - (*isect)->u = U8[i] * inv_den; - (*isect)->v = V8[i] * inv_den; - (*isect)->t = T8[i] * inv_den; - (*isect)->prim = (prim_addr + i); - (*isect)->object = object; - (*isect)->type = PRIMITIVE_TRIANGLE; - ret = true; - } - } - return ret; - } -} - -ccl_device_inline int triangle_intersect8(KernelGlobals *kg, - Intersection **isect, - float3 P, - float3 dir, - uint visibility, - int object, - int prim_addr, - int prim_num, - uint *num_hits, - uint max_hits, - int *num_hits_in_instance, - float isect_t) -{ - __m128 tri_a[8], tri_b[8], tri_c[8]; - __m256 tritmp[12], tri[12]; - __m256 triA[3], triB[3], triC[3]; - - int i, r; - - uint tri_vindex = kernel_tex_fetch(__prim_tri_index, prim_addr); - for (i = 0; i < prim_num; i++) { - tri_a[i] = *(__m128 *)&kg->__prim_tri_verts.data[tri_vindex++]; - tri_b[i] = *(__m128 *)&kg->__prim_tri_verts.data[tri_vindex++]; - tri_c[i] = *(__m128 *)&kg->__prim_tri_verts.data[tri_vindex++]; - } - // create 9 or 12 placeholders - tri[0] = _mm256_castps128_ps256(tri_a[0]); //_mm256_zextps128_ps256 - tri[1] = _mm256_castps128_ps256(tri_b[0]); //_mm256_zextps128_ps256 - tri[2] = _mm256_castps128_ps256(tri_c[0]); //_mm256_zextps128_ps256 - - tri[3] = _mm256_castps128_ps256(tri_a[1]); //_mm256_zextps128_ps256 - tri[4] = _mm256_castps128_ps256(tri_b[1]); //_mm256_zextps128_ps256 - tri[5] = _mm256_castps128_ps256(tri_c[1]); //_mm256_zextps128_ps256 - - tri[6] = _mm256_castps128_ps256(tri_a[2]); //_mm256_zextps128_ps256 - tri[7] = _mm256_castps128_ps256(tri_b[2]); //_mm256_zextps128_ps256 - tri[8] = _mm256_castps128_ps256(tri_c[2]); //_mm256_zextps128_ps256 - - if (prim_num > 3) { - tri[9] = _mm256_castps128_ps256(tri_a[3]); //_mm256_zextps128_ps256 - tri[10] = _mm256_castps128_ps256(tri_b[3]); //_mm256_zextps128_ps256 - tri[11] = _mm256_castps128_ps256(tri_c[3]); //_mm256_zextps128_ps256 - } - - for (i = 4, r = 0; i < prim_num; i++, r += 3) { - tri[r] = _mm256_insertf128_ps(tri[r], tri_a[i], 1); - tri[r + 1] = _mm256_insertf128_ps(tri[r + 1], tri_b[i], 1); - tri[r + 2] = _mm256_insertf128_ps(tri[r + 2], tri_c[i], 1); - } - - //------------------------------------------------ - // 0! Xa0 Ya0 Za0 1 Xa4 Ya4 Za4 1 - // 1! Xb0 Yb0 Zb0 1 Xb4 Yb4 Zb4 1 - // 2! Xc0 Yc0 Zc0 1 Xc4 Yc4 Zc4 1 - - // 3! Xa1 Ya1 Za1 1 Xa5 Ya5 Za5 1 - // 4! Xb1 Yb1 Zb1 1 Xb5 Yb5 Zb5 1 - // 5! Xc1 Yc1 Zc1 1 Xc5 Yc5 Zc5 1 - - // 6! Xa2 Ya2 Za2 1 Xa6 Ya6 Za6 1 - // 7! Xb2 Yb2 Zb2 1 Xb6 Yb6 Zb6 1 - // 8! Xc2 Yc2 Zc2 1 Xc6 Yc6 Zc6 1 - - // 9! Xa3 Ya3 Za3 1 Xa7 Ya7 Za7 1 - // 10! Xb3 Yb3 Zb3 1 Xb7 Yb7 Zb7 1 - // 11! Xc3 Yc3 Zc3 1 Xc7 Yc7 Zc7 1 - - //"transpose" - tritmp[0] = _mm256_unpacklo_ps(tri[0], tri[3]); // 0! Xa0 Xa1 Ya0 Ya1 Xa4 Xa5 Ya4 Ya5 - tritmp[1] = _mm256_unpackhi_ps(tri[0], tri[3]); // 1! Za0 Za1 1 1 Za4 Za5 1 1 - - tritmp[2] = _mm256_unpacklo_ps(tri[6], tri[9]); // 2! Xa2 Xa3 Ya2 Ya3 Xa6 Xa7 Ya6 Ya7 - tritmp[3] = _mm256_unpackhi_ps(tri[6], tri[9]); // 3! Za2 Za3 1 1 Za6 Za7 1 1 - - tritmp[4] = _mm256_unpacklo_ps(tri[1], tri[4]); // 4! Xb0 Xb1 Yb0 Yb1 Xb4 Xb5 Yb4 Yb5 - tritmp[5] = _mm256_unpackhi_ps(tri[1], tri[4]); // 5! Zb0 Zb1 1 1 Zb4 Zb5 1 1 - - tritmp[6] = _mm256_unpacklo_ps(tri[7], tri[10]); // 6! Xb2 Xb3 Yb2 Yb3 Xb6 Xb7 Yb6 Yb7 - tritmp[7] = _mm256_unpackhi_ps(tri[7], tri[10]); // 7! Zb2 Zb3 1 1 Zb6 Zb7 1 1 - - tritmp[8] = _mm256_unpacklo_ps(tri[2], tri[5]); // 8! Xc0 Xc1 Yc0 Yc1 Xc4 Xc5 Yc4 Yc5 - tritmp[9] = _mm256_unpackhi_ps(tri[2], tri[5]); // 9! Zc0 Zc1 1 1 Zc4 Zc5 1 1 - - tritmp[10] = _mm256_unpacklo_ps(tri[8], tri[11]); // 10! Xc2 Xc3 Yc2 Yc3 Xc6 Xc7 Yc6 Yc7 - tritmp[11] = _mm256_unpackhi_ps(tri[8], tri[11]); // 11! Zc2 Zc3 1 1 Zc6 Zc7 1 1 - - /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ - triA[0] = _mm256_castpd_ps( - _mm256_unpacklo_pd(_mm256_castps_pd(tritmp[0]), - _mm256_castps_pd(tritmp[2]))); // Xa0 Xa1 Xa2 Xa3 Xa4 Xa5 Xa6 Xa7 - triA[1] = _mm256_castpd_ps( - _mm256_unpackhi_pd(_mm256_castps_pd(tritmp[0]), - _mm256_castps_pd(tritmp[2]))); // Ya0 Ya1 Ya2 Ya3 Ya4 Ya5 Ya6 Ya7 - triA[2] = _mm256_castpd_ps( - _mm256_unpacklo_pd(_mm256_castps_pd(tritmp[1]), - _mm256_castps_pd(tritmp[3]))); // Za0 Za1 Za2 Za3 Za4 Za5 Za6 Za7 - - triB[0] = _mm256_castpd_ps( - _mm256_unpacklo_pd(_mm256_castps_pd(tritmp[4]), - _mm256_castps_pd(tritmp[6]))); // Xb0 Xb1 Xb2 Xb3 Xb4 Xb5 Xb5 Xb7 - triB[1] = _mm256_castpd_ps( - _mm256_unpackhi_pd(_mm256_castps_pd(tritmp[4]), - _mm256_castps_pd(tritmp[6]))); // Yb0 Yb1 Yb2 Yb3 Yb4 Yb5 Yb5 Yb7 - triB[2] = _mm256_castpd_ps( - _mm256_unpacklo_pd(_mm256_castps_pd(tritmp[5]), - _mm256_castps_pd(tritmp[7]))); // Zb0 Zb1 Zb2 Zb3 Zb4 Zb5 Zb5 Zb7 - - triC[0] = _mm256_castpd_ps( - _mm256_unpacklo_pd(_mm256_castps_pd(tritmp[8]), - _mm256_castps_pd(tritmp[10]))); // Xc0 Xc1 Xc2 Xc3 Xc4 Xc5 Xc6 Xc7 - triC[1] = _mm256_castpd_ps( - _mm256_unpackhi_pd(_mm256_castps_pd(tritmp[8]), - _mm256_castps_pd(tritmp[10]))); // Yc0 Yc1 Yc2 Yc3 Yc4 Yc5 Yc6 Yc7 - triC[2] = _mm256_castpd_ps( - _mm256_unpacklo_pd(_mm256_castps_pd(tritmp[9]), - _mm256_castps_pd(tritmp[11]))); // Zc0 Zc1 Zc2 Zc3 Zc4 Zc5 Zc6 Zc7 - - /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ - - int result = ray_triangle_intersect8(kg, - P, - dir, - isect, - visibility, - object, - triA, - triB, - triC, - prim_addr, - prim_num, - num_hits, - max_hits, - num_hits_in_instance, - isect_t); - return result; -} - -#endif /* __KERNEL_AVX2__ */ - /* Special ray intersection routines for subsurface scattering. In that case we * only want to intersect with primitives in the same object, and if case of * multiple hits we pick a single random primitive as the intersection point. |