Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'intern/cycles/blender')
-rw-r--r--intern/cycles/blender/CMakeLists.txt18
-rw-r--r--intern/cycles/blender/addon/presets.py26
-rw-r--r--intern/cycles/blender/addon/properties.py37
-rw-r--r--intern/cycles/blender/addon/ui.py24
-rw-r--r--intern/cycles/blender/camera.cpp13
-rw-r--r--intern/cycles/blender/curves.cpp169
-rw-r--r--intern/cycles/blender/device.cpp4
-rw-r--r--intern/cycles/blender/display_driver.cpp175
-rw-r--r--intern/cycles/blender/display_driver.h23
-rw-r--r--intern/cycles/blender/geometry.cpp8
-rw-r--r--intern/cycles/blender/mesh.cpp538
-rw-r--r--intern/cycles/blender/object.cpp8
-rw-r--r--intern/cycles/blender/pointcloud.cpp61
-rw-r--r--intern/cycles/blender/python.cpp13
-rw-r--r--intern/cycles/blender/session.cpp24
-rw-r--r--intern/cycles/blender/shader.cpp141
-rw-r--r--intern/cycles/blender/sync.cpp37
-rw-r--r--intern/cycles/blender/sync.h5
-rw-r--r--intern/cycles/blender/util.h10
19 files changed, 807 insertions, 527 deletions
diff --git a/intern/cycles/blender/CMakeLists.txt b/intern/cycles/blender/CMakeLists.txt
index 4919b99cfe0..666b0077a72 100644
--- a/intern/cycles/blender/CMakeLists.txt
+++ b/intern/cycles/blender/CMakeLists.txt
@@ -3,18 +3,19 @@
set(INC
..
- ../../glew-mx
../../guardedalloc
../../mikktspace
../../../source/blender/makesdna
../../../source/blender/makesrna
../../../source/blender/blenlib
+ ../../../source/blender/gpu
+ ../../../source/blender/render
${CMAKE_BINARY_DIR}/source/blender/makesrna/intern
)
set(INC_SYS
+ ${Epoxy_INCLUDE_DIRS}
${PYTHON_INCLUDE_DIRS}
- ${GLEW_INCLUDE_DIR}
)
set(SRC
@@ -64,6 +65,9 @@ set(LIB
cycles_subd
cycles_util
+ bf_intern_mikktspace
+
+ ${Epoxy_LIBRARIES}
${PYTHON_LINKFLAGS}
${PYTHON_LIBRARIES}
)
@@ -87,8 +91,6 @@ set(ADDON_FILES
addon/version_update.py
)
-add_definitions(${GL_DEFINITIONS})
-
if(WITH_CYCLES_DEVICE_HIP)
add_definitions(-DWITH_HIP)
endif()
@@ -101,6 +103,10 @@ if(WITH_MOD_FLUID)
add_definitions(-DWITH_FLUID)
endif()
+if(WITH_TBB)
+ add_definitions(-DWITH_TBB)
+endif()
+
if(WITH_OPENVDB)
add_definitions(-DWITH_OPENVDB ${OPENVDB_DEFINITIONS})
list(APPEND INC_SYS
@@ -128,10 +134,6 @@ if(WITH_OPENIMAGEDENOISE)
)
endif()
-if(WITH_EXPERIMENTAL_FEATURES)
- add_definitions(-DWITH_NEW_CURVES_TYPE)
-endif()
-
blender_add_lib(bf_intern_cycles "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")
add_dependencies(bf_intern_cycles bf_rna)
diff --git a/intern/cycles/blender/addon/presets.py b/intern/cycles/blender/addon/presets.py
index cc6d574da99..e1f08c07eaf 100644
--- a/intern/cycles/blender/addon/presets.py
+++ b/intern/cycles/blender/addon/presets.py
@@ -84,10 +84,36 @@ class AddPresetViewportSampling(AddPresetBase, Operator):
preset_subdir = "cycles/viewport_sampling"
+class AddPresetPerformance(AddPresetBase, Operator):
+ '''Add an Performance Preset'''
+ bl_idname = "render.cycles_performance_preset_add"
+ bl_label = "Add Performance Preset"
+ preset_menu = "CYCLES_PT_performance_presets"
+
+ preset_defines = [
+ "render = bpy.context.scene.render"
+ "cycles = bpy.context.scene.cycles"
+ ]
+
+ preset_values = [
+ "render.threads_mode",
+ "render.use_persistent_data",
+ "cycles.debug_use_spatial_splits",
+ "cycles.debug_use_compact_bvh",
+ "cycles.debug_use_hair_bvh",
+ "cycles.debug_bvh_time_steps",
+ "cycles.use_auto_tile",
+ "cycles.tile_size",
+ ]
+
+ preset_subdir = "cycles/performance"
+
+
classes = (
AddPresetIntegrator,
AddPresetSampling,
AddPresetViewportSampling,
+ AddPresetPerformance,
)
diff --git a/intern/cycles/blender/addon/properties.py b/intern/cycles/blender/addon/properties.py
index b444a806f8d..699c90183fe 100644
--- a/intern/cycles/blender/addon/properties.py
+++ b/intern/cycles/blender/addon/properties.py
@@ -81,7 +81,7 @@ enum_use_layer_samples = (
)
enum_sampling_pattern = (
- ('SOBOL', "Sobol", "Use Sobol random sampling pattern", 0),
+ ('SOBOL', "Sobol-Burley", "Use Sobol-Burley random sampling pattern", 0),
('PROGRESSIVE_MULTI_JITTER', "Progressive Multi-Jitter", "Use Progressive Multi-Jitter random sampling pattern", 1),
)
@@ -118,7 +118,8 @@ enum_device_type = (
('CUDA', "CUDA", "CUDA", 1),
('OPTIX', "OptiX", "OptiX", 3),
('HIP', "HIP", "HIP", 4),
- ('METAL', "Metal", "Metal", 5)
+ ('METAL', "Metal", "Metal", 5),
+ ('ONEAPI', "oneAPI", "oneAPI", 6)
)
enum_texture_limit = (
@@ -380,7 +381,7 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
sampling_pattern: EnumProperty(
name="Sampling Pattern",
- description="Random sampling pattern used by the integrator. When adaptive sampling is enabled, Progressive Multi-Jitter is always used instead of Sobol",
+ description="Random sampling pattern used by the integrator. When adaptive sampling is enabled, Progressive Multi-Jitter is always used instead of Sobol-Burley",
items=enum_sampling_pattern,
default='PROGRESSIVE_MULTI_JITTER',
)
@@ -692,7 +693,7 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
debug_use_compact_bvh: BoolProperty(
name="Use Compact BVH",
description="Use compact BVH structure (uses less ram but renders slower)",
- default=True,
+ default=False,
)
debug_bvh_time_steps: IntProperty(
name="BVH Time Steps",
@@ -1397,7 +1398,8 @@ class CyclesPreferences(bpy.types.AddonPreferences):
def get_device_types(self, context):
import _cycles
- has_cuda, has_optix, has_hip, has_metal = _cycles.get_device_types()
+ has_cuda, has_optix, has_hip, has_metal, has_oneapi = _cycles.get_device_types()
+
list = [('NONE', "None", "Don't use compute device", 0)]
if has_cuda:
list.append(('CUDA', "CUDA", "Use CUDA for GPU acceleration", 1))
@@ -1407,6 +1409,8 @@ class CyclesPreferences(bpy.types.AddonPreferences):
list.append(('HIP', "HIP", "Use HIP for GPU acceleration", 4))
if has_metal:
list.append(('METAL', "Metal", "Use Metal for GPU acceleration", 5))
+ if has_oneapi:
+ list.append(('ONEAPI', "oneAPI", "Use oneAPI for GPU acceleration", 6))
return list
@@ -1438,7 +1442,7 @@ class CyclesPreferences(bpy.types.AddonPreferences):
def update_device_entries(self, device_list):
for device in device_list:
- if not device[1] in {'CUDA', 'OPTIX', 'CPU', 'HIP', 'METAL'}:
+ if not device[1] in {'CUDA', 'OPTIX', 'CPU', 'HIP', 'METAL', 'ONEAPI'}:
continue
# Try to find existing Device entry
entry = self.find_existing_device_entry(device)
@@ -1482,7 +1486,7 @@ class CyclesPreferences(bpy.types.AddonPreferences):
import _cycles
# Ensure `self.devices` is not re-allocated when the second call to
# get_devices_for_type is made, freeing items from the first list.
- for device_type in ('CUDA', 'OPTIX', 'HIP', 'METAL'):
+ for device_type in ('CUDA', 'OPTIX', 'HIP', 'METAL', 'ONEAPI'):
self.update_device_entries(_cycles.available_devices(device_type))
# Deprecated: use refresh_devices instead.
@@ -1545,18 +1549,31 @@ class CyclesPreferences(bpy.types.AddonPreferences):
elif device_type == 'HIP':
import sys
if sys.platform[:3] == "win":
- col.label(text="Requires discrete AMD GPU with RDNA architecture", icon='BLANK1')
+ col.label(text="Requires AMD GPU with Vega or RDNA architecture", icon='BLANK1')
col.label(text="and AMD Radeon Pro 21.Q4 driver or newer", icon='BLANK1')
elif sys.platform.startswith("linux"):
- col.label(text="Requires discrete AMD GPU with RDNA architecture", icon='BLANK1')
+ col.label(text="Requires AMD GPU with Vega or RDNA architecture", icon='BLANK1')
col.label(text="and AMD driver version 22.10 or newer", icon='BLANK1')
+ elif device_type == 'ONEAPI':
+ import sys
+ col.label(text="Requires Intel GPU with Xe-HPG architecture", icon='BLANK1')
+ if sys.platform.startswith("win"):
+ col.label(text="and Windows driver version 101.3268 or newer", icon='BLANK1')
+ elif sys.platform.startswith("linux"):
+ col.label(text="and Linux driver version xx.xx.23570 or newer", icon='BLANK1')
elif device_type == 'METAL':
col.label(text="Requires Apple Silicon with macOS 12.2 or newer", icon='BLANK1')
col.label(text="or AMD with macOS 12.3 or newer", icon='BLANK1')
return
for device in devices:
- box.prop(device, "use", text=device.name)
+ import unicodedata
+ box.prop(
+ device, "use", text=device.name
+ .replace('(TM)', unicodedata.lookup('TRADE MARK SIGN'))
+ .replace('(R)', unicodedata.lookup('REGISTERED SIGN'))
+ .replace('(C)', unicodedata.lookup('COPYRIGHT SIGN'))
+ )
def draw_impl(self, layout, context):
row = layout.row()
diff --git a/intern/cycles/blender/addon/ui.py b/intern/cycles/blender/addon/ui.py
index 88be546746d..ee284dd899a 100644
--- a/intern/cycles/blender/addon/ui.py
+++ b/intern/cycles/blender/addon/ui.py
@@ -43,6 +43,12 @@ class CYCLES_PT_integrator_presets(CyclesPresetPanel):
preset_add_operator = "render.cycles_integrator_preset_add"
+class CYCLES_PT_performance_presets(CyclesPresetPanel):
+ bl_label = "Performance Presets"
+ preset_subdir = "cycles/performance"
+ preset_add_operator = "render.cycles_performance_preset_add"
+
+
class CyclesButtonsPanel:
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
@@ -111,6 +117,12 @@ def use_optix(context):
return (get_device_type(context) == 'OPTIX' and cscene.device == 'GPU')
+def use_oneapi(context):
+ cscene = context.scene.cycles
+
+ return (get_device_type(context) == 'ONEAPI' and cscene.device == 'GPU')
+
+
def use_multi_device(context):
cscene = context.scene.cycles
if cscene.device != 'GPU':
@@ -284,7 +296,6 @@ class CYCLES_RENDER_PT_sampling_advanced(CyclesButtonsPanel, Panel):
row.prop(cscene, "use_animated_seed", text="", icon='TIME')
col = layout.column(align=True)
- col.active = not (cscene.use_adaptive_sampling and cscene.use_preview_adaptive_sampling)
col.prop(cscene, "sampling_pattern", text="Pattern")
col = layout.column(align=True)
@@ -293,6 +304,7 @@ class CYCLES_RENDER_PT_sampling_advanced(CyclesButtonsPanel, Panel):
layout.separator()
heading = layout.column(align=True, heading="Scrambling Distance")
+ heading.active = cscene.sampling_pattern != 'SOBOL'
heading.prop(cscene, "auto_scrambling_distance", text="Automatic")
heading.prop(cscene, "preview_scrambling_distance", text="Viewport")
heading.prop(cscene, "scrambling_distance", text="Multiplier")
@@ -618,6 +630,9 @@ class CYCLES_RENDER_PT_performance(CyclesButtonsPanel, Panel):
bl_label = "Performance"
bl_options = {'DEFAULT_CLOSED'}
+ def draw_header_preset(self, context):
+ CYCLES_PT_performance_presets.draw_panel_header(self.layout)
+
def draw(self, context):
pass
@@ -937,6 +952,8 @@ class CYCLES_CAMERA_PT_dof(CyclesButtonsPanel, Panel):
col = split.column()
col.prop(dof, "focus_object", text="Focus Object")
+ if dof.focus_object and dof.focus_object.type == 'ARMATURE':
+ col.prop_search(dof, "focus_subtarget", dof.focus_object.data, "bones", text="Focus Bone")
sub = col.row()
sub.active = dof.focus_object is None
@@ -1196,7 +1213,7 @@ class CYCLES_OBJECT_PT_lightgroup(CyclesButtonsPanel, Panel):
sub.prop_search(ob, "lightgroup", view_layer, "lightgroups", text="Light Group", results_are_suggestions=True)
sub = row.column(align=True)
- sub.active = bool(ob.lightgroup) and not any(lg.name == ob.lightgroup for lg in view_layer.lightgroups)
+ sub.enabled = bool(ob.lightgroup) and not any(lg.name == ob.lightgroup for lg in view_layer.lightgroups)
sub.operator("scene.view_layer_add_lightgroup", icon='ADD', text="").name = ob.lightgroup
@@ -1634,7 +1651,7 @@ class CYCLES_WORLD_PT_settings_light_group(CyclesButtonsPanel, Panel):
)
sub = row.column(align=True)
- sub.active = bool(world.lightgroup) and not any(lg.name == world.lightgroup for lg in view_layer.lightgroups)
+ sub.enabled = bool(world.lightgroup) and not any(lg.name == world.lightgroup for lg in view_layer.lightgroups)
sub.operator("scene.view_layer_add_lightgroup", icon='ADD', text="").name = world.lightgroup
@@ -2263,6 +2280,7 @@ classes = (
CYCLES_PT_sampling_presets,
CYCLES_PT_viewport_sampling_presets,
CYCLES_PT_integrator_presets,
+ CYCLES_PT_performance_presets,
CYCLES_RENDER_PT_sampling,
CYCLES_RENDER_PT_sampling_viewport,
CYCLES_RENDER_PT_sampling_viewport_denoise,
diff --git a/intern/cycles/blender/camera.cpp b/intern/cycles/blender/camera.cpp
index 402fd7c4ec6..6926c833096 100644
--- a/intern/cycles/blender/camera.cpp
+++ b/intern/cycles/blender/camera.cpp
@@ -143,11 +143,20 @@ static float blender_camera_focal_distance(BL::RenderEngine &b_engine,
if (!b_dof_object)
return b_camera.dof().focus_distance();
+ Transform dofmat = get_transform(b_dof_object.matrix_world());
+
+ string focus_subtarget = b_camera.dof().focus_subtarget();
+ if (b_dof_object.pose() && !focus_subtarget.empty()) {
+ BL::PoseBone b_bone = b_dof_object.pose().bones[focus_subtarget];
+ if (b_bone) {
+ dofmat = dofmat * get_transform(b_bone.matrix());
+ }
+ }
+
/* for dof object, return distance along camera Z direction */
BL::Array<float, 16> b_ob_matrix;
b_engine.camera_model_matrix(b_ob, bcam->use_spherical_stereo, b_ob_matrix);
Transform obmat = transform_clear_scale(get_transform(b_ob_matrix));
- Transform dofmat = get_transform(b_dof_object.matrix_world());
float3 view_dir = normalize(transform_get_column(&obmat, 2));
float3 dof_dir = transform_get_column(&obmat, 3) - transform_get_column(&dofmat, 3);
return fabsf(dot(view_dir, dof_dir));
@@ -643,7 +652,7 @@ void BlenderSync::sync_camera_motion(
/* TODO(sergey): De-duplicate calculation with camera sync. */
float fov = 2.0f * atanf((0.5f * sensor_size) / bcam.lens / aspectratio);
if (fov != cam->get_fov()) {
- VLOG(3) << "Camera " << b_ob.name() << " FOV change detected.";
+ VLOG_WORK << "Camera " << b_ob.name() << " FOV change detected.";
if (motion_time == 0.0f) {
cam->set_fov(fov);
}
diff --git a/intern/cycles/blender/curves.cpp b/intern/cycles/blender/curves.cpp
index 4e9f4f62087..6158ed78598 100644
--- a/intern/cycles/blender/curves.cpp
+++ b/intern/cycles/blender/curves.cpp
@@ -55,7 +55,7 @@ static bool ObtainCacheParticleData(
return false;
Transform tfm = get_transform(b_ob->matrix_world());
- Transform itfm = transform_quick_inverse(tfm);
+ Transform itfm = transform_inverse(tfm);
for (BL::Modifier &b_mod : b_ob->modifiers) {
if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) &&
@@ -341,7 +341,7 @@ static void ExportCurveSegments(Scene *scene, Hair *hair, ParticleCurveData *CDa
/* check allocation */
if ((hair->get_curve_keys().size() != num_keys) || (hair->num_curves() != num_curves)) {
- VLOG(1) << "Hair memory allocation failed, clearing data.";
+ VLOG_WARNING << "Hair memory allocation failed, clearing data.";
hair->clear(true);
}
}
@@ -397,7 +397,7 @@ static void export_hair_motion_validate_attribute(Hair *hair,
if (num_motion_keys != num_keys || !have_motion) {
/* No motion or hair "topology" changed, remove attributes again. */
if (num_motion_keys != num_keys) {
- VLOG(1) << "Hair topology changed, removing motion attribute.";
+ VLOG_WORK << "Hair topology changed, removing motion attribute.";
}
hair->attributes.remove(ATTR_STD_MOTION_VERTEX_POSITION);
}
@@ -613,8 +613,6 @@ void BlenderSync::sync_particle_hair(
}
}
-#ifdef WITH_NEW_CURVES_TYPE
-
static std::optional<BL::FloatAttribute> find_curves_radius_attribute(BL::Curves b_curves)
{
for (BL::Attribute &b_attribute : b_curves.attributes) {
@@ -632,6 +630,25 @@ static std::optional<BL::FloatAttribute> find_curves_radius_attribute(BL::Curves
return std::nullopt;
}
+static BL::FloatVectorAttribute find_curves_position_attribute(BL::Curves b_curves)
+{
+ for (BL::Attribute &b_attribute : b_curves.attributes) {
+ if (b_attribute.name() != "position") {
+ continue;
+ }
+ if (b_attribute.domain() != BL::Attribute::domain_POINT) {
+ continue;
+ }
+ if (b_attribute.data_type() != BL::Attribute::data_type_FLOAT_VECTOR) {
+ continue;
+ }
+ return BL::FloatVectorAttribute{b_attribute};
+ }
+ /* The position attribute must exist. */
+ assert(false);
+ return BL::FloatVectorAttribute{b_curves.attributes[0]};
+}
+
template<typename TypeInCycles, typename GetValueAtIndex>
static void fill_generic_attribute(BL::Curves &b_curves,
TypeInCycles *data,
@@ -690,6 +707,21 @@ static void attr_create_motion(Hair *hair, BL::Attribute &b_attribute, const flo
}
}
+static void attr_create_uv(AttributeSet &attributes,
+ BL::Curves &b_curves,
+ BL::Attribute &b_attribute,
+ const ustring name)
+{
+ BL::Float2Attribute b_float2_attribute{b_attribute};
+ Attribute *attr = attributes.add(ATTR_STD_UV, name);
+
+ float2 *data = attr->data_float2();
+ fill_generic_attribute(b_curves, data, ATTR_ELEMENT_CURVE, [&](int i) {
+ BL::Array<float, 2> v = b_float2_attribute.data[i].vector();
+ return make_float2(v[0], v[1]);
+ });
+}
+
static void attr_create_generic(Scene *scene,
Hair *hair,
BL::Curves &b_curves,
@@ -698,12 +730,26 @@ static void attr_create_generic(Scene *scene,
{
AttributeSet &attributes = hair->attributes;
static const ustring u_velocity("velocity");
+ const bool need_uv = hair->need_attribute(scene, ATTR_STD_UV);
+ bool have_uv = false;
for (BL::Attribute &b_attribute : b_curves.attributes) {
const ustring name{b_attribute.name().c_str()};
+ const BL::Attribute::domain_enum b_domain = b_attribute.domain();
+ const BL::Attribute::data_type_enum b_data_type = b_attribute.data_type();
+
if (need_motion && name == u_velocity) {
attr_create_motion(hair, b_attribute, motion_scale);
+ continue;
+ }
+
+ /* Weak, use first float2 attribute as standard UV. */
+ if (need_uv && !have_uv && b_data_type == BL::Attribute::data_type_FLOAT2 &&
+ b_domain == BL::Attribute::domain_CURVE) {
+ attr_create_uv(attributes, b_curves, b_attribute, name);
+ have_uv = true;
+ continue;
}
if (!hair->need_attribute(scene, name)) {
@@ -713,9 +759,6 @@ static void attr_create_generic(Scene *scene,
continue;
}
- const BL::Attribute::domain_enum b_domain = b_attribute.domain();
- const BL::Attribute::data_type_enum b_data_type = b_attribute.data_type();
-
AttributeElement element = ATTR_ELEMENT_NONE;
switch (b_domain) {
case BL::Attribute::domain_POINT:
@@ -795,16 +838,16 @@ static void attr_create_generic(Scene *scene,
}
}
-static float4 hair_point_as_float4(BL::Curves b_curves,
+static float4 hair_point_as_float4(BL::FloatVectorAttribute b_attr_position,
std::optional<BL::FloatAttribute> b_attr_radius,
const int index)
{
- float4 mP = float3_to_float4(get_float3(b_curves.position_data[index].vector()));
- mP.w = b_attr_radius ? b_attr_radius->data[index].value() : 0.0f;
+ float4 mP = float3_to_float4(get_float3(b_attr_position.data[index].vector()));
+ mP.w = b_attr_radius ? b_attr_radius->data[index].value() : 0.005f;
return mP;
}
-static float4 interpolate_hair_points(BL::Curves b_curves,
+static float4 interpolate_hair_points(BL::FloatVectorAttribute b_attr_position,
std::optional<BL::FloatAttribute> b_attr_radius,
const int first_point_index,
const int num_points,
@@ -814,8 +857,8 @@ static float4 interpolate_hair_points(BL::Curves b_curves,
const int point_a = clamp((int)curve_t, 0, num_points - 1);
const int point_b = min(point_a + 1, num_points - 1);
const float t = curve_t - (float)point_a;
- return lerp(hair_point_as_float4(b_curves, b_attr_radius, first_point_index + point_a),
- hair_point_as_float4(b_curves, b_attr_radius, first_point_index + point_b),
+ return lerp(hair_point_as_float4(b_attr_position, b_attr_radius, first_point_index + point_a),
+ hair_point_as_float4(b_attr_position, b_attr_radius, first_point_index + point_b),
t);
}
@@ -827,78 +870,84 @@ static void export_hair_curves(Scene *scene,
{
/* TODO: optimize so we can straight memcpy arrays from Blender? */
+ const int num_keys = b_curves.points.length();
+ const int num_curves = b_curves.curves.length();
+
+ hair->resize_curves(num_curves, num_keys);
+
+ float3 *curve_keys = hair->get_curve_keys().data();
+ float *curve_radius = hair->get_curve_radius().data();
+ int *curve_first_key = hair->get_curve_first_key().data();
+ int *curve_shader = hair->get_curve_shader().data();
+
/* Add requested attributes. */
- Attribute *attr_intercept = NULL;
- Attribute *attr_length = NULL;
- Attribute *attr_random = NULL;
+ float *attr_intercept = NULL;
+ float *attr_length = NULL;
+ float *attr_random = NULL;
if (hair->need_attribute(scene, ATTR_STD_CURVE_INTERCEPT)) {
- attr_intercept = hair->attributes.add(ATTR_STD_CURVE_INTERCEPT);
+ attr_intercept = hair->attributes.add(ATTR_STD_CURVE_INTERCEPT)->data_float();
}
if (hair->need_attribute(scene, ATTR_STD_CURVE_LENGTH)) {
- attr_length = hair->attributes.add(ATTR_STD_CURVE_LENGTH);
+ attr_length = hair->attributes.add(ATTR_STD_CURVE_LENGTH)->data_float();
}
if (hair->need_attribute(scene, ATTR_STD_CURVE_RANDOM)) {
- attr_random = hair->attributes.add(ATTR_STD_CURVE_RANDOM);
+ attr_random = hair->attributes.add(ATTR_STD_CURVE_RANDOM)->data_float();
}
- /* Reserve memory. */
- const int num_keys = b_curves.points.length();
- const int num_curves = b_curves.curves.length();
-
- hair->reserve_curves(num_curves, num_keys);
-
+ BL::FloatVectorAttribute b_attr_position = find_curves_position_attribute(b_curves);
std::optional<BL::FloatAttribute> b_attr_radius = find_curves_radius_attribute(b_curves);
/* Export curves and points. */
- vector<float> points_length;
-
for (int i = 0; i < num_curves; i++) {
const int first_point_index = b_curves.curve_offset_data[i].value();
const int num_points = b_curves.curve_offset_data[i + 1].value() - first_point_index;
float3 prev_co = zero_float3();
float length = 0.0f;
- if (attr_intercept) {
- points_length.clear();
- points_length.reserve(num_points);
- }
/* Position and radius. */
- for (int i = 0; i < num_points; i++) {
- const float3 co = get_float3(b_curves.position_data[first_point_index + i].vector());
- const float radius = b_attr_radius ? b_attr_radius->data[first_point_index + i].value() :
- 0.0f;
- hair->add_curve_key(co, radius);
-
- if (attr_intercept) {
- if (i > 0) {
+ for (int j = 0; j < num_points; j++) {
+ const int point_offset = first_point_index + j;
+ const float3 co = get_float3(b_attr_position.data[point_offset].vector());
+ const float radius = b_attr_radius ? b_attr_radius->data[point_offset].value() : 0.005f;
+
+ curve_keys[point_offset] = co;
+ curve_radius[point_offset] = radius;
+
+ if (attr_length || attr_intercept) {
+ if (j > 0) {
length += len(co - prev_co);
- points_length.push_back(length);
}
prev_co = co;
+
+ if (attr_intercept) {
+ attr_intercept[point_offset] = length;
+ }
}
}
/* Normalized 0..1 attribute along curve. */
- if (attr_intercept) {
- for (int i = 0; i < num_points; i++) {
- attr_intercept->add((length == 0.0f) ? 0.0f : points_length[i] / length);
+ if (attr_intercept && length > 0.0f) {
+ for (int j = 1; j < num_points; j++) {
+ const int point_offset = first_point_index + j;
+ attr_intercept[point_offset] /= length;
}
}
+ /* Curve length. */
if (attr_length) {
- attr_length->add(length);
+ attr_length[i] = length;
}
/* Random number per curve. */
if (attr_random != NULL) {
- attr_random->add(hash_uint2_to_float(i, 0));
+ attr_random[i] = hash_uint2_to_float(i, 0);
}
/* Curve. */
- const int shader_index = 0;
- hair->add_curve(first_point_index, shader_index);
+ curve_shader[i] = 0;
+ curve_first_key[i] = first_point_index;
}
attr_create_generic(scene, hair, b_curves, need_motion, motion_scale);
@@ -923,6 +972,7 @@ static void export_hair_curves_motion(Hair *hair, BL::Curves b_curves, int motio
int num_motion_keys = 0;
int curve_index = 0;
+ BL::FloatVectorAttribute b_attr_position = find_curves_position_attribute(b_curves);
std::optional<BL::FloatAttribute> b_attr_radius = find_curves_radius_attribute(b_curves);
for (int i = 0; i < num_curves; i++) {
@@ -938,7 +988,7 @@ static void export_hair_curves_motion(Hair *hair, BL::Curves b_curves, int motio
int point_index = first_point_index + i;
if (point_index < num_keys) {
- mP[num_motion_keys] = hair_point_as_float4(b_curves, b_attr_radius, point_index);
+ mP[num_motion_keys] = hair_point_as_float4(b_attr_position, b_attr_radius, point_index);
num_motion_keys++;
if (!have_motion) {
@@ -958,7 +1008,7 @@ static void export_hair_curves_motion(Hair *hair, BL::Curves b_curves, int motio
for (int i = 0; i < curve.num_keys; i++) {
const float step = i * step_size;
mP[num_motion_keys] = interpolate_hair_points(
- b_curves, b_attr_radius, first_point_index, num_points, step);
+ b_attr_position, b_attr_radius, first_point_index, num_points, step);
num_motion_keys++;
}
have_motion = true;
@@ -990,15 +1040,6 @@ void BlenderSync::sync_hair(Hair *hair, BObjectInfo &b_ob_info, bool motion, int
export_hair_curves(scene, hair, b_curves, need_motion, motion_scale);
}
}
-#else
-void BlenderSync::sync_hair(Hair *hair, BObjectInfo &b_ob_info, bool motion, int motion_step)
-{
- (void)hair;
- (void)b_ob_info;
- (void)motion;
- (void)motion_step;
-}
-#endif
void BlenderSync::sync_hair(BL::Depsgraph b_depsgraph, BObjectInfo &b_ob_info, Hair *hair)
{
@@ -1010,14 +1051,11 @@ void BlenderSync::sync_hair(BL::Depsgraph b_depsgraph, BObjectInfo &b_ob_info, H
new_hair.set_used_shaders(used_shaders);
if (view_layer.use_hair) {
-#ifdef WITH_NEW_CURVES_TYPE
if (b_ob_info.object_data.is_a(&RNA_Curves)) {
/* Hair object. */
sync_hair(&new_hair, b_ob_info, false);
}
- else
-#endif
- {
+ else {
/* Particle hair. */
bool need_undeformed = new_hair.need_attribute(scene, ATTR_STD_GENERATED);
BL::Mesh b_mesh = object_to_mesh(
@@ -1064,15 +1102,12 @@ void BlenderSync::sync_hair_motion(BL::Depsgraph b_depsgraph,
/* Export deformed coordinates. */
if (ccl::BKE_object_is_deform_modified(b_ob_info, b_scene, preview)) {
-#ifdef WITH_NEW_CURVES_TYPE
if (b_ob_info.object_data.is_a(&RNA_Curves)) {
/* Hair object. */
sync_hair(hair, b_ob_info, true, motion_step);
return;
}
- else
-#endif
- {
+ else {
/* Particle hair. */
BL::Mesh b_mesh = object_to_mesh(
b_data, b_ob_info, b_depsgraph, false, Mesh::SUBDIVISION_NONE);
diff --git a/intern/cycles/blender/device.cpp b/intern/cycles/blender/device.cpp
index 38effa329a5..22beca898f1 100644
--- a/intern/cycles/blender/device.cpp
+++ b/intern/cycles/blender/device.cpp
@@ -15,6 +15,7 @@ enum ComputeDevice {
COMPUTE_DEVICE_OPTIX = 3,
COMPUTE_DEVICE_HIP = 4,
COMPUTE_DEVICE_METAL = 5,
+ COMPUTE_DEVICE_ONEAPI = 6,
COMPUTE_DEVICE_NUM
};
@@ -76,6 +77,9 @@ DeviceInfo blender_device_info(BL::Preferences &b_preferences, BL::Scene &b_scen
else if (compute_device == COMPUTE_DEVICE_METAL) {
mask |= DEVICE_MASK_METAL;
}
+ else if (compute_device == COMPUTE_DEVICE_ONEAPI) {
+ mask |= DEVICE_MASK_ONEAPI;
+ }
vector<DeviceInfo> devices = Device::available_devices(mask);
/* Match device preferences and available devices. */
diff --git a/intern/cycles/blender/display_driver.cpp b/intern/cycles/blender/display_driver.cpp
index ee67073a9a4..e2be4f85a9b 100644
--- a/intern/cycles/blender/display_driver.cpp
+++ b/intern/cycles/blender/display_driver.cpp
@@ -7,21 +7,9 @@
#include "util/log.h"
#include "util/opengl.h"
-extern "C" {
-struct RenderEngine;
+#include "GPU_platform.h"
-bool RE_engine_has_render_context(struct RenderEngine *engine);
-void RE_engine_render_context_enable(struct RenderEngine *engine);
-void RE_engine_render_context_disable(struct RenderEngine *engine);
-
-bool DRW_opengl_context_release();
-void DRW_opengl_context_activate(bool drw_state);
-
-void *WM_opengl_context_create();
-void WM_opengl_context_activate(void *gl_context);
-void WM_opengl_context_dispose(void *gl_context);
-void WM_opengl_context_release(void *context);
-}
+#include "RE_engine.h"
CCL_NAMESPACE_BEGIN
@@ -507,6 +495,7 @@ class DrawTileAndPBO {
DrawTile tile;
GLPixelBufferObject buffer_object;
+ bool need_update_texture_pixels = false;
};
/* --------------------------------------------------------------------
@@ -556,18 +545,21 @@ struct BlenderDisplayDriver::Tiles {
}
};
-BlenderDisplayDriver::BlenderDisplayDriver(BL::RenderEngine &b_engine, BL::Scene &b_scene)
+BlenderDisplayDriver::BlenderDisplayDriver(BL::RenderEngine &b_engine,
+ BL::Scene &b_scene,
+ const bool background)
: b_engine_(b_engine),
+ background_(background),
display_shader_(BlenderDisplayShader::create(b_engine, b_scene)),
tiles_(make_unique<Tiles>())
{
/* Create context while on the main thread. */
- gl_context_create();
+ gpu_context_create();
}
BlenderDisplayDriver::~BlenderDisplayDriver()
{
- gl_resources_destroy();
+ gpu_resources_destroy();
}
/* --------------------------------------------------------------------
@@ -585,6 +577,8 @@ void BlenderDisplayDriver::next_tile_begin()
/* Moving to the next tile without giving render data for the current tile is not an expected
* situation. */
DCHECK(!need_clear_);
+ /* Texture should have been updated from the PBO at this point. */
+ DCHECK(!tiles_->current_tile.need_update_texture_pixels);
tiles_->finished_tiles.tiles.emplace_back(std::move(tiles_->current_tile.tile));
}
@@ -596,12 +590,12 @@ bool BlenderDisplayDriver::update_begin(const Params &params,
/* Note that it's the responsibility of BlenderDisplayDriver to ensure updating and drawing
* the texture does not happen at the same time. This is achieved indirectly.
*
- * When enabling the OpenGL context, it uses an internal mutex lock DST.gl_context_lock.
+ * When enabling the OpenGL context, it uses an internal mutex lock DST.gpu_context_lock.
* This same lock is also held when do_draw() is called, which together ensure mutual
* exclusion.
*
* This locking is not performed on the Cycles side, because that would cause lock inversion. */
- if (!gl_context_enable()) {
+ if (!gpu_context_enable()) {
return false;
}
@@ -622,13 +616,13 @@ bool BlenderDisplayDriver::update_begin(const Params &params,
if (!tiles_->gl_resources_ensure()) {
tiles_->gl_resources_destroy();
- gl_context_disable();
+ gpu_context_disable();
return false;
}
if (!tiles_->current_tile.gl_resources_ensure()) {
tiles_->current_tile.gl_resources_destroy();
- gl_context_disable();
+ gpu_context_disable();
return false;
}
@@ -702,13 +696,23 @@ void BlenderDisplayDriver::update_end()
* One concern with this approach is that if the update happens more often than drawing then
* doing the unpack here occupies GPU transfer for no good reason. However, the render scheduler
* takes care of ensuring updates don't happen that often. In regular applications redraw will
- * happen much more often than this update. */
- update_tile_texture_pixels(tiles_->current_tile);
+ * happen much more often than this update.
+ *
+ * On some older GPUs on macOS, there is a driver crash when updating the texture for viewport
+ * renders while Blender is drawing. As a workaround update texture during draw, under assumption
+ * that there is no graphics interop on macOS and viewport render has a single tile. */
+ if (!background_ &&
+ GPU_type_matches_ex(GPU_DEVICE_NVIDIA, GPU_OS_MAC, GPU_DRIVER_ANY, GPU_BACKEND_ANY)) {
+ tiles_->current_tile.need_update_texture_pixels = true;
+ }
+ else {
+ update_tile_texture_pixels(tiles_->current_tile);
+ }
gl_upload_sync_ = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
glFlush();
- gl_context_disable();
+ gpu_context_disable();
}
/* --------------------------------------------------------------------
@@ -756,12 +760,12 @@ BlenderDisplayDriver::GraphicsInterop BlenderDisplayDriver::graphics_interop_get
void BlenderDisplayDriver::graphics_interop_activate()
{
- gl_context_enable();
+ gpu_context_enable();
}
void BlenderDisplayDriver::graphics_interop_deactivate()
{
- gl_context_disable();
+ gpu_context_disable();
}
/* --------------------------------------------------------------------
@@ -895,7 +899,7 @@ void BlenderDisplayDriver::flush()
* If we don't do this, the NVIDIA driver hangs for a few seconds for when ending 3D viewport
* rendering, for unknown reasons. This was found with NVIDIA driver version 470.73 and a Quadro
* RTX 6000 on Linux. */
- if (!gl_context_enable()) {
+ if (!gpu_context_enable()) {
return;
}
@@ -907,17 +911,12 @@ void BlenderDisplayDriver::flush()
glWaitSync((GLsync)gl_render_sync_, 0, GL_TIMEOUT_IGNORED);
}
- gl_context_disable();
+ gpu_context_disable();
}
void BlenderDisplayDriver::draw(const Params &params)
{
- /* See do_update_begin() for why no locking is required here. */
- const bool transparent = true; // TODO(sergey): Derive this from Film.
-
- if (use_gl_context_) {
- gl_context_mutex_.lock();
- }
+ gpu_context_lock();
if (need_clear_) {
/* Texture is requested to be cleared and was not yet cleared.
@@ -925,9 +924,7 @@ void BlenderDisplayDriver::draw(const Params &params)
* Do early return which should be equivalent of drawing all-zero texture.
* Watch out for the lock though so that the clear happening during update is properly
* synchronized here. */
- if (use_gl_context_) {
- gl_context_mutex_.unlock();
- }
+ gpu_context_unlock();
return;
}
@@ -935,10 +932,8 @@ void BlenderDisplayDriver::draw(const Params &params)
glWaitSync((GLsync)gl_upload_sync_, 0, GL_TIMEOUT_IGNORED);
}
- if (transparent) {
- glEnable(GL_BLEND);
- glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
- }
+ glEnable(GL_BLEND);
+ glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glActiveTexture(GL_TEXTURE0);
@@ -957,6 +952,11 @@ void BlenderDisplayDriver::draw(const Params &params)
glEnableVertexAttribArray(texcoord_attribute);
glEnableVertexAttribArray(position_attribute);
+ if (tiles_->current_tile.need_update_texture_pixels) {
+ update_tile_texture_pixels(tiles_->current_tile);
+ tiles_->current_tile.need_update_texture_pixels = false;
+ }
+
draw_tile(zoom_,
texcoord_attribute,
position_attribute,
@@ -975,103 +975,60 @@ void BlenderDisplayDriver::draw(const Params &params)
glDeleteVertexArrays(1, &vertex_array_object);
- if (transparent) {
- glDisable(GL_BLEND);
- }
+ glDisable(GL_BLEND);
gl_render_sync_ = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
glFlush();
- if (VLOG_IS_ON(5)) {
- VLOG(5) << "Number of textures: " << GLTexture::num_used;
- VLOG(5) << "Number of PBOs: " << GLPixelBufferObject::num_used;
- }
+ gpu_context_unlock();
- if (use_gl_context_) {
- gl_context_mutex_.unlock();
- }
+ VLOG_DEVICE_STATS << "Display driver number of textures: " << GLTexture::num_used;
+ VLOG_DEVICE_STATS << "Display driver number of PBOs: " << GLPixelBufferObject::num_used;
}
-void BlenderDisplayDriver::gl_context_create()
+void BlenderDisplayDriver::gpu_context_create()
{
- /* When rendering in viewport there is no render context available via engine.
- * Check whether own context is to be created here.
- *
- * NOTE: If the `b_engine_`'s context is not available, we are expected to be on a main thread
- * here. */
- use_gl_context_ = !RE_engine_has_render_context(
- reinterpret_cast<RenderEngine *>(b_engine_.ptr.data));
-
- if (use_gl_context_) {
- const bool drw_state = DRW_opengl_context_release();
- gl_context_ = WM_opengl_context_create();
- if (gl_context_) {
- /* On Windows an old context is restored after creation, and subsequent release of context
- * generates a Win32 error. Harmless for users, but annoying to have possible misleading
- * error prints in the console. */
-#ifndef _WIN32
- WM_opengl_context_release(gl_context_);
-#endif
- }
- else {
- LOG(ERROR) << "Error creating OpenGL context.";
- }
-
- DRW_opengl_context_activate(drw_state);
+ if (!RE_engine_gpu_context_create(reinterpret_cast<RenderEngine *>(b_engine_.ptr.data))) {
+ LOG(ERROR) << "Error creating OpenGL context.";
}
}
-bool BlenderDisplayDriver::gl_context_enable()
+bool BlenderDisplayDriver::gpu_context_enable()
{
- if (use_gl_context_) {
- if (!gl_context_) {
- return false;
- }
- gl_context_mutex_.lock();
- WM_opengl_context_activate(gl_context_);
- return true;
- }
-
- RE_engine_render_context_enable(reinterpret_cast<RenderEngine *>(b_engine_.ptr.data));
- return true;
+ return RE_engine_gpu_context_enable(reinterpret_cast<RenderEngine *>(b_engine_.ptr.data));
}
-void BlenderDisplayDriver::gl_context_disable()
+void BlenderDisplayDriver::gpu_context_disable()
{
- if (use_gl_context_) {
- if (gl_context_) {
- WM_opengl_context_release(gl_context_);
- gl_context_mutex_.unlock();
- }
- return;
- }
-
- RE_engine_render_context_disable(reinterpret_cast<RenderEngine *>(b_engine_.ptr.data));
+ RE_engine_gpu_context_disable(reinterpret_cast<RenderEngine *>(b_engine_.ptr.data));
}
-void BlenderDisplayDriver::gl_context_dispose()
+void BlenderDisplayDriver::gpu_context_destroy()
{
- if (gl_context_) {
- const bool drw_state = DRW_opengl_context_release();
+ RE_engine_gpu_context_destroy(reinterpret_cast<RenderEngine *>(b_engine_.ptr.data));
+}
- WM_opengl_context_activate(gl_context_);
- WM_opengl_context_dispose(gl_context_);
+void BlenderDisplayDriver::gpu_context_lock()
+{
+ RE_engine_gpu_context_lock(reinterpret_cast<RenderEngine *>(b_engine_.ptr.data));
+}
- DRW_opengl_context_activate(drw_state);
- }
+void BlenderDisplayDriver::gpu_context_unlock()
+{
+ RE_engine_gpu_context_unlock(reinterpret_cast<RenderEngine *>(b_engine_.ptr.data));
}
-void BlenderDisplayDriver::gl_resources_destroy()
+void BlenderDisplayDriver::gpu_resources_destroy()
{
- gl_context_enable();
+ gpu_context_enable();
tiles_->current_tile.gl_resources_destroy();
tiles_->finished_tiles.gl_resources_destroy_and_clear();
tiles_->gl_resources_destroy();
- gl_context_disable();
+ gpu_context_disable();
- gl_context_dispose();
+ gpu_context_destroy();
}
CCL_NAMESPACE_END
diff --git a/intern/cycles/blender/display_driver.h b/intern/cycles/blender/display_driver.h
index 58867d08e19..4df40269daf 100644
--- a/intern/cycles/blender/display_driver.h
+++ b/intern/cycles/blender/display_driver.h
@@ -89,7 +89,7 @@ class BlenderDisplaySpaceShader : public BlenderDisplayShader {
/* Display driver implementation which is specific for Blender viewport integration. */
class BlenderDisplayDriver : public DisplayDriver {
public:
- BlenderDisplayDriver(BL::RenderEngine &b_engine, BL::Scene &b_scene);
+ BlenderDisplayDriver(BL::RenderEngine &b_engine, BL::Scene &b_scene, const bool background);
~BlenderDisplayDriver();
virtual void graphics_interop_activate() override;
@@ -115,23 +115,18 @@ class BlenderDisplayDriver : public DisplayDriver {
virtual void flush() override;
/* Helper function which allocates new GPU context. */
- void gl_context_create();
- bool gl_context_enable();
- void gl_context_disable();
- void gl_context_dispose();
+ void gpu_context_create();
+ bool gpu_context_enable();
+ void gpu_context_disable();
+ void gpu_context_destroy();
+ void gpu_context_lock();
+ void gpu_context_unlock();
/* Destroy all GPU resources which are being used by this object. */
- void gl_resources_destroy();
+ void gpu_resources_destroy();
BL::RenderEngine b_engine_;
-
- /* OpenGL context which is used the render engine doesn't have its own. */
- void *gl_context_ = nullptr;
- /* The when Blender RenderEngine side context is not available and the DisplayDriver is to create
- * its own context. */
- bool use_gl_context_ = false;
- /* Mutex used to guard the `gl_context_`. */
- thread_mutex gl_context_mutex_;
+ bool background_;
/* Content of the display is to be filled with zeroes. */
std::atomic<bool> need_clear_ = true;
diff --git a/intern/cycles/blender/geometry.cpp b/intern/cycles/blender/geometry.cpp
index 215860f59e6..fc03ca6e489 100644
--- a/intern/cycles/blender/geometry.cpp
+++ b/intern/cycles/blender/geometry.cpp
@@ -18,11 +18,7 @@ CCL_NAMESPACE_BEGIN
static Geometry::Type determine_geom_type(BObjectInfo &b_ob_info, bool use_particle_hair)
{
-#ifdef WITH_NEW_CURVES_TYPE
if (b_ob_info.object_data.is_a(&RNA_Curves) || use_particle_hair) {
-#else
- if (use_particle_hair) {
-#endif
return Geometry::HAIR;
}
@@ -217,11 +213,7 @@ void BlenderSync::sync_geometry_motion(BL::Depsgraph &b_depsgraph,
if (progress.get_cancel())
return;
-#ifdef WITH_NEW_CURVES_TYPE
if (b_ob_info.object_data.is_a(&RNA_Curves) || use_particle_hair) {
-#else
- if (use_particle_hair) {
-#endif
Hair *hair = static_cast<Hair *>(geom);
sync_hair_motion(b_depsgraph, b_ob_info, hair, motion_step);
}
diff --git a/intern/cycles/blender/mesh.cpp b/intern/cycles/blender/mesh.cpp
index e2db52cc5c1..1d1eadebc39 100644
--- a/intern/cycles/blender/mesh.cpp
+++ b/intern/cycles/blender/mesh.cpp
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright 2011-2022 Blender Foundation */
+#include <optional>
+
#include "blender/session.h"
#include "blender/sync.h"
#include "blender/util.h"
@@ -22,22 +24,23 @@
#include "util/log.h"
#include "util/math.h"
-#include "mikktspace.h"
+#include "mikktspace.hh"
+
+#include "DNA_meshdata_types.h"
CCL_NAMESPACE_BEGIN
/* Tangent Space */
-struct MikkUserData {
- MikkUserData(const BL::Mesh &b_mesh,
- const char *layer_name,
- const Mesh *mesh,
- float3 *tangent,
- float *tangent_sign)
+template<bool is_subd> struct MikkMeshWrapper {
+ MikkMeshWrapper(const BL::Mesh &b_mesh,
+ const char *layer_name,
+ const Mesh *mesh,
+ float3 *tangent,
+ float *tangent_sign)
: mesh(mesh), texface(NULL), orco(NULL), tangent(tangent), tangent_sign(tangent_sign)
{
- const AttributeSet &attributes = (mesh->get_num_subd_faces()) ? mesh->subd_attributes :
- mesh->attributes;
+ const AttributeSet &attributes = is_subd ? mesh->subd_attributes : mesh->attributes;
Attribute *attr_vN = attributes.find(ATTR_STD_VERTEX_NORMAL);
vertex_normal = attr_vN->data_float3();
@@ -47,7 +50,9 @@ struct MikkUserData {
if (attr_orco) {
orco = attr_orco->data_float3();
+ float3 orco_size;
mesh_texture_space(*(BL::Mesh *)&b_mesh, orco_loc, orco_size);
+ inv_orco_size = 1.0f / orco_size;
}
}
else {
@@ -58,160 +63,126 @@ struct MikkUserData {
}
}
- const Mesh *mesh;
- int num_faces;
-
- float3 *vertex_normal;
- float2 *texface;
- float3 *orco;
- float3 orco_loc, orco_size;
-
- float3 *tangent;
- float *tangent_sign;
-};
-
-static int mikk_get_num_faces(const SMikkTSpaceContext *context)
-{
- const MikkUserData *userdata = (const MikkUserData *)context->m_pUserData;
- if (userdata->mesh->get_num_subd_faces()) {
- return userdata->mesh->get_num_subd_faces();
- }
- else {
- return userdata->mesh->num_triangles();
+ int GetNumFaces()
+ {
+ if constexpr (is_subd) {
+ return mesh->get_num_subd_faces();
+ }
+ else {
+ return mesh->num_triangles();
+ }
}
-}
-static int mikk_get_num_verts_of_face(const SMikkTSpaceContext *context, const int face_num)
-{
- const MikkUserData *userdata = (const MikkUserData *)context->m_pUserData;
- if (userdata->mesh->get_num_subd_faces()) {
- const Mesh *mesh = userdata->mesh;
- return mesh->get_subd_num_corners()[face_num];
- }
- else {
- return 3;
+ int GetNumVerticesOfFace(const int face_num)
+ {
+ if constexpr (is_subd) {
+ return mesh->get_subd_num_corners()[face_num];
+ }
+ else {
+ return 3;
+ }
}
-}
-static int mikk_vertex_index(const Mesh *mesh, const int face_num, const int vert_num)
-{
- if (mesh->get_num_subd_faces()) {
- const Mesh::SubdFace &face = mesh->get_subd_face(face_num);
- return mesh->get_subd_face_corners()[face.start_corner + vert_num];
- }
- else {
- return mesh->get_triangles()[face_num * 3 + vert_num];
+ int CornerIndex(const int face_num, const int vert_num)
+ {
+ if constexpr (is_subd) {
+ const Mesh::SubdFace &face = mesh->get_subd_face(face_num);
+ return face.start_corner + vert_num;
+ }
+ else {
+ return face_num * 3 + vert_num;
+ }
}
-}
-static int mikk_corner_index(const Mesh *mesh, const int face_num, const int vert_num)
-{
- if (mesh->get_num_subd_faces()) {
- const Mesh::SubdFace &face = mesh->get_subd_face(face_num);
- return face.start_corner + vert_num;
- }
- else {
- return face_num * 3 + vert_num;
+ int VertexIndex(const int face_num, const int vert_num)
+ {
+ int corner = CornerIndex(face_num, vert_num);
+ if constexpr (is_subd) {
+ return mesh->get_subd_face_corners()[corner];
+ }
+ else {
+ return mesh->get_triangles()[corner];
+ }
}
-}
-
-static void mikk_get_position(const SMikkTSpaceContext *context,
- float P[3],
- const int face_num,
- const int vert_num)
-{
- const MikkUserData *userdata = (const MikkUserData *)context->m_pUserData;
- const Mesh *mesh = userdata->mesh;
- const int vertex_index = mikk_vertex_index(mesh, face_num, vert_num);
- const float3 vP = mesh->get_verts()[vertex_index];
- P[0] = vP.x;
- P[1] = vP.y;
- P[2] = vP.z;
-}
-static void mikk_get_texture_coordinate(const SMikkTSpaceContext *context,
- float uv[2],
- const int face_num,
- const int vert_num)
-{
- const MikkUserData *userdata = (const MikkUserData *)context->m_pUserData;
- const Mesh *mesh = userdata->mesh;
- if (userdata->texface != NULL) {
- const int corner_index = mikk_corner_index(mesh, face_num, vert_num);
- float2 tfuv = userdata->texface[corner_index];
- uv[0] = tfuv.x;
- uv[1] = tfuv.y;
- }
- else if (userdata->orco != NULL) {
- const int vertex_index = mikk_vertex_index(mesh, face_num, vert_num);
- const float3 orco_loc = userdata->orco_loc;
- const float3 orco_size = userdata->orco_size;
- const float3 orco = (userdata->orco[vertex_index] + orco_loc) / orco_size;
-
- const float2 tmp = map_to_sphere(orco);
- uv[0] = tmp.x;
- uv[1] = tmp.y;
- }
- else {
- uv[0] = 0.0f;
- uv[1] = 0.0f;
+ mikk::float3 GetPosition(const int face_num, const int vert_num)
+ {
+ const float3 vP = mesh->get_verts()[VertexIndex(face_num, vert_num)];
+ return mikk::float3(vP.x, vP.y, vP.z);
}
-}
-static void mikk_get_normal(const SMikkTSpaceContext *context,
- float N[3],
- const int face_num,
- const int vert_num)
-{
- const MikkUserData *userdata = (const MikkUserData *)context->m_pUserData;
- const Mesh *mesh = userdata->mesh;
- float3 vN;
- if (mesh->get_num_subd_faces()) {
- const Mesh::SubdFace &face = mesh->get_subd_face(face_num);
- if (face.smooth) {
- const int vertex_index = mikk_vertex_index(mesh, face_num, vert_num);
- vN = userdata->vertex_normal[vertex_index];
+ mikk::float3 GetTexCoord(const int face_num, const int vert_num)
+ {
+ /* TODO: Check whether introducing a template boolean in order to
+ * turn this into a constexpr is worth it. */
+ if (texface != NULL) {
+ const int corner_index = CornerIndex(face_num, vert_num);
+ float2 tfuv = texface[corner_index];
+ return mikk::float3(tfuv.x, tfuv.y, 1.0f);
+ }
+ else if (orco != NULL) {
+ const int vertex_index = VertexIndex(face_num, vert_num);
+ const float2 uv = map_to_sphere((orco[vertex_index] + orco_loc) * inv_orco_size);
+ return mikk::float3(uv.x, uv.y, 1.0f);
}
else {
- vN = face.normal(mesh);
+ return mikk::float3(0.0f, 0.0f, 1.0f);
}
}
- else {
- if (mesh->get_smooth()[face_num]) {
- const int vertex_index = mikk_vertex_index(mesh, face_num, vert_num);
- vN = userdata->vertex_normal[vertex_index];
+
+ mikk::float3 GetNormal(const int face_num, const int vert_num)
+ {
+ float3 vN;
+ if (is_subd) {
+ const Mesh::SubdFace &face = mesh->get_subd_face(face_num);
+ if (face.smooth) {
+ const int vertex_index = VertexIndex(face_num, vert_num);
+ vN = vertex_normal[vertex_index];
+ }
+ else {
+ vN = face.normal(mesh);
+ }
}
else {
- const Mesh::Triangle tri = mesh->get_triangle(face_num);
- vN = tri.compute_normal(&mesh->get_verts()[0]);
+ if (mesh->get_smooth()[face_num]) {
+ const int vertex_index = VertexIndex(face_num, vert_num);
+ vN = vertex_normal[vertex_index];
+ }
+ else {
+ const Mesh::Triangle tri = mesh->get_triangle(face_num);
+ vN = tri.compute_normal(&mesh->get_verts()[0]);
+ }
}
+ return mikk::float3(vN.x, vN.y, vN.z);
}
- N[0] = vN.x;
- N[1] = vN.y;
- N[2] = vN.z;
-}
-static void mikk_set_tangent_space(const SMikkTSpaceContext *context,
- const float T[],
- const float sign,
- const int face_num,
- const int vert_num)
-{
- MikkUserData *userdata = (MikkUserData *)context->m_pUserData;
- const Mesh *mesh = userdata->mesh;
- const int corner_index = mikk_corner_index(mesh, face_num, vert_num);
- userdata->tangent[corner_index] = make_float3(T[0], T[1], T[2]);
- if (userdata->tangent_sign != NULL) {
- userdata->tangent_sign[corner_index] = sign;
+ void SetTangentSpace(const int face_num, const int vert_num, mikk::float3 T, bool orientation)
+ {
+ const int corner_index = CornerIndex(face_num, vert_num);
+ tangent[corner_index] = make_float3(T.x, T.y, T.z);
+ if (tangent_sign != NULL) {
+ tangent_sign[corner_index] = orientation ? 1.0f : -1.0f;
+ }
}
-}
+
+ const Mesh *mesh;
+ int num_faces;
+
+ float3 *vertex_normal;
+ float2 *texface;
+ float3 *orco;
+ float3 orco_loc, inv_orco_size;
+
+ float3 *tangent;
+ float *tangent_sign;
+};
static void mikk_compute_tangents(
const BL::Mesh &b_mesh, const char *layer_name, Mesh *mesh, bool need_sign, bool active_render)
{
/* Create tangent attributes. */
- AttributeSet &attributes = (mesh->get_num_subd_faces()) ? mesh->subd_attributes :
- mesh->attributes;
+ const bool is_subd = mesh->get_num_subd_faces();
+ AttributeSet &attributes = is_subd ? mesh->subd_attributes : mesh->attributes;
Attribute *attr;
ustring name;
if (layer_name != NULL) {
@@ -247,24 +218,18 @@ static void mikk_compute_tangents(
}
tangent_sign = attr_sign->data_float();
}
+
/* Setup userdata. */
- MikkUserData userdata(b_mesh, layer_name, mesh, tangent, tangent_sign);
- /* Setup interface. */
- SMikkTSpaceInterface sm_interface;
- memset(&sm_interface, 0, sizeof(sm_interface));
- sm_interface.m_getNumFaces = mikk_get_num_faces;
- sm_interface.m_getNumVerticesOfFace = mikk_get_num_verts_of_face;
- sm_interface.m_getPosition = mikk_get_position;
- sm_interface.m_getTexCoord = mikk_get_texture_coordinate;
- sm_interface.m_getNormal = mikk_get_normal;
- sm_interface.m_setTSpaceBasic = mikk_set_tangent_space;
- /* Setup context. */
- SMikkTSpaceContext context;
- memset(&context, 0, sizeof(context));
- context.m_pUserData = &userdata;
- context.m_pInterface = &sm_interface;
- /* Compute tangents. */
- genTangSpaceDefault(&context);
+ if (is_subd) {
+ MikkMeshWrapper<true> userdata(b_mesh, layer_name, mesh, tangent, tangent_sign);
+ /* Compute tangents. */
+ mikk::Mikktspace(userdata).genTangSpace();
+ }
+ else {
+ MikkMeshWrapper<false> userdata(b_mesh, layer_name, mesh, tangent, tangent_sign);
+ /* Compute tangents. */
+ mikk::Mikktspace(userdata).genTangSpace();
+ }
}
template<typename TypeInCycles, typename GetValueAtIndex>
@@ -277,10 +242,15 @@ static void fill_generic_attribute(BL::Mesh &b_mesh,
switch (b_domain) {
case BL::Attribute::domain_CORNER: {
if (subdivision) {
- for (BL::MeshPolygon &p : b_mesh.polygons) {
- int n = p.loop_total();
- for (int i = 0; i < n; i++) {
- *data = get_value_at_index(p.loop_start() + i);
+ const int polys_num = b_mesh.polygons.length();
+ if (polys_num == 0) {
+ return;
+ }
+ const MPoly *polys = static_cast<const MPoly *>(b_mesh.polygons[0].ptr.data);
+ for (int i = 0; i < polys_num; i++) {
+ const MPoly &b_poly = polys[i];
+ for (int j = 0; j < b_poly.totloop; j++) {
+ *data = get_value_at_index(b_poly.loopstart + j);
data++;
}
}
@@ -297,27 +267,32 @@ static void fill_generic_attribute(BL::Mesh &b_mesh,
break;
}
case BL::Attribute::domain_EDGE: {
+ const size_t edges_num = b_mesh.edges.length();
+ if (edges_num == 0) {
+ return;
+ }
if constexpr (std::is_same_v<TypeInCycles, uchar4>) {
/* uchar4 edge attributes do not exist, and averaging in place
* would not work. */
assert(0);
}
else {
- /* Average edge attributes at vertices. */
- const size_t num_verts = b_mesh.vertices.length();
- vector<int> count(num_verts, 0);
-
- for (BL::MeshEdge &e : b_mesh.edges) {
- BL::Array<int, 2> vertices = e.vertices();
- TypeInCycles value = get_value_at_index(e.index());
+ const MEdge *edges = static_cast<const MEdge *>(b_mesh.edges[0].ptr.data);
+ const size_t verts_num = b_mesh.vertices.length();
+ vector<int> count(verts_num, 0);
- data[vertices[0]] += value;
- data[vertices[1]] += value;
- count[vertices[0]]++;
- count[vertices[1]]++;
+ /* Average edge attributes at vertices. */
+ for (int i = 0; i < edges_num; i++) {
+ TypeInCycles value = get_value_at_index(i);
+
+ const MEdge &b_edge = edges[i];
+ data[b_edge.v1] += value;
+ data[b_edge.v2] += value;
+ count[b_edge.v1]++;
+ count[b_edge.v2]++;
}
- for (size_t i = 0; i < num_verts; i++) {
+ for (size_t i = 0; i < verts_num; i++) {
if (count[i] > 1) {
data[i] /= (float)count[i];
}
@@ -601,6 +576,12 @@ static void attr_create_uv_map(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh)
static void attr_create_subd_uv_map(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh, bool subdivide_uvs)
{
+ const int polys_num = b_mesh.polygons.length();
+ if (polys_num == 0) {
+ return;
+ }
+ const MPoly *polys = static_cast<const MPoly *>(b_mesh.polygons[0].ptr.data);
+
if (!b_mesh.uv_layers.empty()) {
BL::Mesh::uv_layers_iterator l;
int i = 0;
@@ -634,10 +615,10 @@ static void attr_create_subd_uv_map(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh,
float2 *fdata = uv_attr->data_float2();
- for (BL::MeshPolygon &p : b_mesh.polygons) {
- int n = p.loop_total();
- for (int j = 0; j < n; j++) {
- *(fdata++) = get_float2(l->data[p.loop_start() + j].uv());
+ for (int i = 0; i < polys_num; i++) {
+ const MPoly &b_poly = polys[i];
+ for (int j = 0; j < b_poly.totloop; j++) {
+ *(fdata++) = get_float2(l->data[b_poly.loopstart + j].uv());
}
}
}
@@ -700,6 +681,8 @@ static void attr_create_pointiness(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh, b
if (num_verts == 0) {
return;
}
+ const MVert *verts = static_cast<const MVert *>(b_mesh.vertices[0].ptr.data);
+
/* STEP 1: Find out duplicated vertices and point duplicates to a single
* original vertex.
*/
@@ -752,10 +735,12 @@ static void attr_create_pointiness(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh, b
*/
vector<float3> vert_normal(num_verts, zero_float3());
/* First we accumulate all vertex normals in the original index. */
+ const float(*b_vert_normals)[3] = static_cast<const float(*)[3]>(
+ b_mesh.vertex_normals[0].ptr.data);
for (int vert_index = 0; vert_index < num_verts; ++vert_index) {
- const float3 normal = get_float3(b_mesh.vertices[vert_index].normal());
+ const float *b_vert_normal = b_vert_normals[vert_index];
const int orig_index = vert_orig_index[vert_index];
- vert_normal[orig_index] += normal;
+ vert_normal[orig_index] += make_float3(b_vert_normal[0], b_vert_normal[1], b_vert_normal[2]);
}
/* Then we normalize the accumulated result and flush it to all duplicates
* as well.
@@ -768,18 +753,24 @@ static void attr_create_pointiness(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh, b
vector<int> counter(num_verts, 0);
vector<float> raw_data(num_verts, 0.0f);
vector<float3> edge_accum(num_verts, zero_float3());
- BL::Mesh::edges_iterator e;
EdgeMap visited_edges;
- int edge_index = 0;
memset(&counter[0], 0, sizeof(int) * counter.size());
- for (b_mesh.edges.begin(e); e != b_mesh.edges.end(); ++e, ++edge_index) {
- const int v0 = vert_orig_index[b_mesh.edges[edge_index].vertices()[0]],
- v1 = vert_orig_index[b_mesh.edges[edge_index].vertices()[1]];
+
+ const MEdge *edges = static_cast<MEdge *>(b_mesh.edges[0].ptr.data);
+ const int edges_num = b_mesh.edges.length();
+
+ for (int i = 0; i < edges_num; i++) {
+ const MEdge &b_edge = edges[i];
+ const int v0 = vert_orig_index[b_edge.v1];
+ const int v1 = vert_orig_index[b_edge.v2];
if (visited_edges.exists(v0, v1)) {
continue;
}
visited_edges.insert(v0, v1);
- float3 co0 = get_float3(b_mesh.vertices[v0].co()), co1 = get_float3(b_mesh.vertices[v1].co());
+ const MVert &b_vert_0 = verts[v0];
+ const MVert &b_vert_1 = verts[v1];
+ float3 co0 = make_float3(b_vert_0.co[0], b_vert_0.co[1], b_vert_0.co[2]);
+ float3 co1 = make_float3(b_vert_1.co[0], b_vert_1.co[1], b_vert_1.co[2]);
float3 edge = normalize(co1 - co0);
edge_accum[v0] += edge;
edge_accum[v1] += -edge;
@@ -807,11 +798,11 @@ static void attr_create_pointiness(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh, b
float *data = attr->data_float();
memcpy(data, &raw_data[0], sizeof(float) * raw_data.size());
memset(&counter[0], 0, sizeof(int) * counter.size());
- edge_index = 0;
visited_edges.clear();
- for (b_mesh.edges.begin(e); e != b_mesh.edges.end(); ++e, ++edge_index) {
- const int v0 = vert_orig_index[b_mesh.edges[edge_index].vertices()[0]],
- v1 = vert_orig_index[b_mesh.edges[edge_index].vertices()[1]];
+ for (int i = 0; i < edges_num; i++) {
+ const MEdge &b_edge = edges[i];
+ const int v0 = vert_orig_index[b_edge.v1];
+ const int v1 = vert_orig_index[b_edge.v2];
if (visited_edges.exists(v0, v1)) {
continue;
}
@@ -850,6 +841,7 @@ static void attr_create_random_per_island(Scene *scene,
return;
}
+ const int polys_num = b_mesh.polygons.length();
int number_of_vertices = b_mesh.vertices.length();
if (number_of_vertices == 0) {
return;
@@ -857,8 +849,11 @@ static void attr_create_random_per_island(Scene *scene,
DisjointSet vertices_sets(number_of_vertices);
- for (BL::MeshEdge &e : b_mesh.edges) {
- vertices_sets.join(e.vertices()[0], e.vertices()[1]);
+ const MEdge *edges = static_cast<MEdge *>(b_mesh.edges[0].ptr.data);
+ const int edges_num = b_mesh.edges.length();
+
+ for (int i = 0; i < edges_num; i++) {
+ vertices_sets.join(edges[i].v1, edges[i].v2);
}
AttributeSet &attributes = (subdivision) ? mesh->subd_attributes : mesh->attributes;
@@ -871,14 +866,37 @@ static void attr_create_random_per_island(Scene *scene,
}
}
else {
- for (BL::MeshPolygon &p : b_mesh.polygons) {
- data[p.index()] = hash_uint_to_float(vertices_sets.find(p.vertices()[0]));
+ if (polys_num != 0) {
+ const MPoly *polys = static_cast<const MPoly *>(b_mesh.polygons[0].ptr.data);
+ const MLoop *loops = static_cast<const MLoop *>(b_mesh.loops[0].ptr.data);
+ for (int i = 0; i < polys_num; i++) {
+ const MPoly &b_poly = polys[i];
+ const MLoop &b_loop = loops[b_poly.loopstart];
+ data[i] = hash_uint_to_float(vertices_sets.find(b_loop.v));
+ }
}
}
}
/* Create Mesh */
+static std::optional<BL::IntAttribute> find_material_index_attribute(BL::Mesh b_mesh)
+{
+ for (BL::Attribute &b_attribute : b_mesh.attributes) {
+ if (b_attribute.domain() != BL::Attribute::domain_FACE) {
+ continue;
+ }
+ if (b_attribute.data_type() != BL::Attribute::data_type_INT) {
+ continue;
+ }
+ if (b_attribute.name() != "material_index") {
+ continue;
+ }
+ return BL::IntAttribute{b_attribute};
+ }
+ return std::nullopt;
+}
+
static void create_mesh(Scene *scene,
Mesh *mesh,
BL::Mesh &b_mesh,
@@ -890,6 +908,7 @@ static void create_mesh(Scene *scene,
{
/* count vertices and faces */
int numverts = b_mesh.vertices.length();
+ const int polys_num = b_mesh.polygons.length();
int numfaces = (!subdivision) ? b_mesh.loop_triangles.length() : b_mesh.polygons.length();
int numtris = 0;
int numcorners = 0;
@@ -902,13 +921,17 @@ static void create_mesh(Scene *scene,
return;
}
+ const MVert *verts = static_cast<const MVert *>(b_mesh.vertices[0].ptr.data);
+
if (!subdivision) {
numtris = numfaces;
}
else {
- for (BL::MeshPolygon &p : b_mesh.polygons) {
- numngons += (p.loop_total() == 4) ? 0 : 1;
- numcorners += p.loop_total();
+ const MPoly *polys = static_cast<const MPoly *>(b_mesh.polygons[0].ptr.data);
+ for (int i = 0; i < polys_num; i++) {
+ const MPoly &b_poly = polys[i];
+ numngons += (b_poly.totloop == 4) ? 0 : 1;
+ numcorners += b_poly.totloop;
}
}
@@ -920,17 +943,23 @@ static void create_mesh(Scene *scene,
mesh->reserve_mesh(numverts, numtris);
/* create vertex coordinates and normals */
- BL::Mesh::vertices_iterator v;
- for (b_mesh.vertices.begin(v); v != b_mesh.vertices.end(); ++v)
- mesh->add_vertex(get_float3(v->co()));
+ for (int i = 0; i < numverts; i++) {
+ const MVert &b_vert = verts[i];
+ mesh->add_vertex(make_float3(b_vert.co[0], b_vert.co[1], b_vert.co[2]));
+ }
AttributeSet &attributes = (subdivision) ? mesh->subd_attributes : mesh->attributes;
Attribute *attr_N = attributes.add(ATTR_STD_VERTEX_NORMAL);
float3 *N = attr_N->data_float3();
- for (b_mesh.vertices.begin(v); v != b_mesh.vertices.end(); ++v, ++N)
- *N = get_float3(v->normal());
- N = attr_N->data_float3();
+ if (subdivision || !use_loop_normals) {
+ const float(*b_vert_normals)[3] = static_cast<const float(*)[3]>(
+ b_mesh.vertex_normals[0].ptr.data);
+ for (int i = 0; i < numverts; i++) {
+ const float *b_vert_normal = b_vert_normals[i];
+ N[i] = make_float3(b_vert_normal[0], b_vert_normal[1], b_vert_normal[2]);
+ }
+ }
/* create generated coordinates from undeformed coordinates */
const bool need_default_tangent = (subdivision == false) && (b_mesh.uv_layers.empty()) &&
@@ -945,19 +974,30 @@ static void create_mesh(Scene *scene,
float3 *generated = attr->data_float3();
size_t i = 0;
+ BL::Mesh::vertices_iterator v;
for (b_mesh.vertices.begin(v); v != b_mesh.vertices.end(); ++v) {
generated[i++] = get_float3(v->undeformed_co()) * size - loc;
}
}
+ std::optional<BL::IntAttribute> material_indices = find_material_index_attribute(b_mesh);
+ auto get_material_index = [&](const int poly_index) -> int {
+ if (material_indices) {
+ return clamp(material_indices->data[poly_index].value(), 0, used_shaders.size() - 1);
+ }
+ return 0;
+ };
+
/* create faces */
+ const MPoly *polys = static_cast<const MPoly *>(b_mesh.polygons[0].ptr.data);
if (!subdivision) {
for (BL::MeshLoopTriangle &t : b_mesh.loop_triangles) {
- BL::MeshPolygon p = b_mesh.polygons[t.polygon_index()];
+ const int poly_index = t.polygon_index();
+ const MPoly &b_poly = polys[poly_index];
int3 vi = get_int3(t.vertices());
- int shader = clamp(p.material_index(), 0, used_shaders.size() - 1);
- bool smooth = p.use_smooth() || use_loop_normals;
+ int shader = get_material_index(poly_index);
+ bool smooth = (b_poly.flag & ME_SMOOTH) || use_loop_normals;
if (use_loop_normals) {
BL::Array<float, 9> loop_normals = t.split_normals();
@@ -977,15 +1017,19 @@ static void create_mesh(Scene *scene,
else {
vector<int> vi;
- for (BL::MeshPolygon &p : b_mesh.polygons) {
- int n = p.loop_total();
- int shader = clamp(p.material_index(), 0, used_shaders.size() - 1);
- bool smooth = p.use_smooth() || use_loop_normals;
+ const MLoop *loops = static_cast<const MLoop *>(b_mesh.loops[0].ptr.data);
+
+ for (int i = 0; i < numfaces; i++) {
+ const MPoly &b_poly = polys[i];
+ int n = b_poly.totloop;
+ int shader = get_material_index(i);
+ bool smooth = (b_poly.flag & ME_SMOOTH) || use_loop_normals;
vi.resize(n);
for (int i = 0; i < n; i++) {
/* NOTE: Autosmooth is already taken care about. */
- vi[i] = b_mesh.loops[p.loop_start() + i].vertex_index();
+
+ vi[i] = loops[b_poly.loopstart + i].v;
}
/* create subd faces */
@@ -1038,27 +1082,33 @@ static void create_subd_mesh(Scene *scene,
create_mesh(scene, mesh, b_mesh, used_shaders, need_motion, motion_scale, true, subdivide_uvs);
- /* export creases */
- size_t num_creases = 0;
+ const int edges_num = b_mesh.edges.length();
+
+ if (edges_num != 0) {
+ size_t num_creases = 0;
+ const MEdge *edges = static_cast<MEdge *>(b_mesh.edges[0].ptr.data);
- for (BL::MeshEdge &e : b_mesh.edges) {
- if (e.crease() != 0.0f) {
- num_creases++;
+ for (int i = 0; i < edges_num; i++) {
+ const MEdge &b_edge = edges[i];
+ if (b_edge.crease != 0) {
+ num_creases++;
+ }
}
- }
- mesh->reserve_subd_creases(num_creases);
+ mesh->reserve_subd_creases(num_creases);
- for (BL::MeshEdge &e : b_mesh.edges) {
- if (e.crease() != 0.0f) {
- mesh->add_edge_crease(e.vertices()[0], e.vertices()[1], e.crease());
+ for (int i = 0; i < edges_num; i++) {
+ const MEdge &b_edge = edges[i];
+ if (b_edge.crease != 0) {
+ mesh->add_edge_crease(b_edge.v1, b_edge.v2, float(b_edge.crease) / 255.0f);
+ }
}
- }
- for (BL::MeshVertexCreaseLayer &c : b_mesh.vertex_creases) {
- for (int i = 0; i < c.data.length(); ++i) {
- if (c.data[i].value() != 0.0f) {
- mesh->add_vertex_crease(i, c.data[i].value());
+ for (BL::MeshVertexCreaseLayer &c : b_mesh.vertex_creases) {
+ for (int i = 0; i < c.data.length(); ++i) {
+ if (c.data[i].value() != 0.0f) {
+ mesh->add_vertex_crease(i, c.data[i].value());
+ }
}
}
}
@@ -1179,6 +1229,12 @@ void BlenderSync::sync_mesh_motion(BL::Depsgraph b_depsgraph,
/* TODO(sergey): Perform preliminary check for number of vertices. */
if (b_mesh) {
+ const int b_verts_num = b_mesh.vertices.length();
+ if (b_verts_num == 0) {
+ free_object_to_mesh(b_data, b_ob_info, b_mesh);
+ return;
+ }
+
/* Export deformed coordinates. */
/* Find attributes. */
Attribute *attr_mP = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
@@ -1196,33 +1252,41 @@ void BlenderSync::sync_mesh_motion(BL::Depsgraph b_depsgraph,
/* Load vertex data from mesh. */
float3 *mP = attr_mP->data_float3() + motion_step * numverts;
float3 *mN = (attr_mN) ? attr_mN->data_float3() + motion_step * numverts : NULL;
+
+ const MVert *verts = static_cast<const MVert *>(b_mesh.vertices[0].ptr.data);
+
/* NOTE: We don't copy more that existing amount of vertices to prevent
* possible memory corruption.
*/
- BL::Mesh::vertices_iterator v;
- int i = 0;
- for (b_mesh.vertices.begin(v); v != b_mesh.vertices.end() && i < numverts; ++v, ++i) {
- mP[i] = get_float3(v->co());
- if (mN)
- mN[i] = get_float3(v->normal());
+ for (int i = 0; i < std::min<size_t>(b_verts_num, numverts); i++) {
+ const MVert &b_vert = verts[i];
+ mP[i] = make_float3(b_vert.co[0], b_vert.co[1], b_vert.co[2]);
+ }
+ if (mN) {
+ const float(*b_vert_normals)[3] = static_cast<const float(*)[3]>(
+ b_mesh.vertex_normals[0].ptr.data);
+ for (int i = 0; i < std::min<size_t>(b_verts_num, numverts); i++) {
+ const float *b_vert_normal = b_vert_normals[i];
+ mN[i] = make_float3(b_vert_normal[0], b_vert_normal[1], b_vert_normal[2]);
+ }
}
if (new_attribute) {
/* In case of new attribute, we verify if there really was any motion. */
- if (b_mesh.vertices.length() != numverts ||
+ if (b_verts_num != numverts ||
memcmp(mP, &mesh->get_verts()[0], sizeof(float3) * numverts) == 0) {
/* no motion, remove attributes again */
- if (b_mesh.vertices.length() != numverts) {
- VLOG(1) << "Topology differs, disabling motion blur for object " << ob_name;
+ if (b_verts_num != numverts) {
+ VLOG_WARNING << "Topology differs, disabling motion blur for object " << ob_name;
}
else {
- VLOG(1) << "No actual deformation motion for object " << ob_name;
+ VLOG_DEBUG << "No actual deformation motion for object " << ob_name;
}
mesh->attributes.remove(ATTR_STD_MOTION_VERTEX_POSITION);
if (attr_mN)
mesh->attributes.remove(ATTR_STD_MOTION_VERTEX_NORMAL);
}
else if (motion_step > 0) {
- VLOG(1) << "Filling deformation motion for object " << ob_name;
+ VLOG_DEBUG << "Filling deformation motion for object " << ob_name;
/* motion, fill up previous steps that we might have skipped because
* they had no motion, but we need them anyway now */
float3 *P = &mesh->get_verts()[0];
@@ -1235,9 +1299,9 @@ void BlenderSync::sync_mesh_motion(BL::Depsgraph b_depsgraph,
}
}
else {
- if (b_mesh.vertices.length() != numverts) {
- VLOG(1) << "Topology differs, discarding motion blur for object " << ob_name << " at time "
- << motion_step;
+ if (b_verts_num != numverts) {
+ VLOG_WARNING << "Topology differs, discarding motion blur for object " << ob_name
+ << " at time " << motion_step;
memcpy(mP, &mesh->get_verts()[0], sizeof(float3) * numverts);
if (mN != NULL) {
memcpy(mN, attr_N->data_float3(), sizeof(float3) * numverts);
diff --git a/intern/cycles/blender/object.cpp b/intern/cycles/blender/object.cpp
index 9b08b564b25..109408c354d 100644
--- a/intern/cycles/blender/object.cpp
+++ b/intern/cycles/blender/object.cpp
@@ -66,12 +66,6 @@ bool BlenderSync::object_is_geometry(BObjectInfo &b_ob_info)
return true;
}
- /* Other object types that are not meshes but evaluate to meshes are presented to render engines
- * as separate instance objects. Metaballs have not been affected by that change yet. */
- if (type == BL::Object::type_META) {
- return true;
- }
-
return b_ob_data.is_a(&RNA_Mesh);
}
@@ -762,7 +756,7 @@ void BlenderSync::sync_motion(BL::RenderSettings &b_render,
continue;
}
- VLOG(1) << "Synchronizing motion for the relative time " << relative_time << ".";
+ VLOG_WORK << "Synchronizing motion for the relative time " << relative_time << ".";
/* fixed shutter time to get previous and next frame for motion pass */
float shuttertime = scene->motion_shutter_time();
diff --git a/intern/cycles/blender/pointcloud.cpp b/intern/cycles/blender/pointcloud.cpp
index 0312ad87a70..b4e90859877 100644
--- a/intern/cycles/blender/pointcloud.cpp
+++ b/intern/cycles/blender/pointcloud.cpp
@@ -1,8 +1,10 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright 2011-2022 Blender Foundation */
-#include "scene/pointcloud.h"
+#include <optional>
+
#include "scene/attribute.h"
+#include "scene/pointcloud.h"
#include "scene/scene.h"
#include "blender/sync.h"
@@ -138,6 +140,36 @@ static void copy_attributes(PointCloud *pointcloud,
}
}
+static std::optional<BL::FloatAttribute> find_radius_attribute(BL::PointCloud b_pointcloud)
+{
+ for (BL::Attribute &b_attribute : b_pointcloud.attributes) {
+ if (b_attribute.name() != "radius") {
+ continue;
+ }
+ if (b_attribute.data_type() != BL::Attribute::data_type_FLOAT) {
+ continue;
+ }
+ return BL::FloatAttribute{b_attribute};
+ }
+ return std::nullopt;
+}
+
+static BL::FloatVectorAttribute find_position_attribute(BL::PointCloud b_pointcloud)
+{
+ for (BL::Attribute &b_attribute : b_pointcloud.attributes) {
+ if (b_attribute.name() != "position") {
+ continue;
+ }
+ if (b_attribute.data_type() != BL::Attribute::data_type_FLOAT_VECTOR) {
+ continue;
+ }
+ return BL::FloatVectorAttribute{b_attribute};
+ }
+ /* The position attribute must exist. */
+ assert(false);
+ return BL::FloatVectorAttribute{b_pointcloud.attributes[0]};
+}
+
static void export_pointcloud(Scene *scene,
PointCloud *pointcloud,
BL::PointCloud b_pointcloud,
@@ -156,18 +188,18 @@ static void export_pointcloud(Scene *scene,
const int num_points = b_pointcloud.points.length();
pointcloud->reserve(num_points);
+ BL::FloatVectorAttribute b_attr_position = find_position_attribute(b_pointcloud);
+ std::optional<BL::FloatAttribute> b_attr_radius = find_radius_attribute(b_pointcloud);
+
/* Export points. */
- BL::PointCloud::points_iterator b_point_iter;
- for (b_pointcloud.points.begin(b_point_iter); b_point_iter != b_pointcloud.points.end();
- ++b_point_iter) {
- BL::Point b_point = *b_point_iter;
- const float3 co = get_float3(b_point.co());
- const float radius = b_point.radius();
+ for (int i = 0; i < num_points; i++) {
+ const float3 co = get_float3(b_attr_position.data[i].vector());
+ const float radius = b_attr_radius ? b_attr_radius->data[i].value() : 0.0f;
pointcloud->add_point(co, radius);
/* Random number per point. */
if (attr_random != NULL) {
- attr_random->add(hash_uint2_to_float(b_point.index(), 0));
+ attr_random->add(hash_uint2_to_float(i, 0));
}
}
@@ -195,14 +227,15 @@ static void export_pointcloud_motion(PointCloud *pointcloud,
int num_motion_points = 0;
const array<float3> &pointcloud_points = pointcloud->get_points();
- BL::PointCloud::points_iterator b_point_iter;
- for (b_pointcloud.points.begin(b_point_iter); b_point_iter != b_pointcloud.points.end();
- ++b_point_iter) {
- BL::Point b_point = *b_point_iter;
+ BL::FloatVectorAttribute b_attr_position = find_position_attribute(b_pointcloud);
+ std::optional<BL::FloatAttribute> b_attr_radius = find_radius_attribute(b_pointcloud);
+ for (int i = 0; i < num_points; i++) {
if (num_motion_points < num_points) {
- float3 P = get_float3(b_point.co());
- P.w = b_point.radius();
+ const float3 co = get_float3(b_attr_position.data[i].vector());
+ const float radius = b_attr_radius ? b_attr_radius->data[i].value() : 0.0f;
+ float3 P = co;
+ P.w = radius;
mP[num_motion_points] = P;
have_motion = have_motion || (P != pointcloud_points[num_motion_points]);
num_motion_points++;
diff --git a/intern/cycles/blender/python.cpp b/intern/cycles/blender/python.cpp
index 7bd1ad2cafe..1e33b0b7207 100644
--- a/intern/cycles/blender/python.cpp
+++ b/intern/cycles/blender/python.cpp
@@ -59,8 +59,6 @@ static void debug_flags_sync_from_scene(BL::Scene b_scene)
{
DebugFlagsRef flags = DebugFlags();
PointerRNA cscene = RNA_pointer_get(&b_scene.ptr, "cycles");
- /* Synchronize shared flags. */
- flags.viewport_static_bvh = get_enum(cscene, "debug_bvh_type");
/* Synchronize CPU flags. */
flags.cpu.avx2 = get_boolean(cscene, "debug_use_cpu_avx2");
flags.cpu.avx = get_boolean(cscene, "debug_use_cpu_avx");
@@ -140,8 +138,6 @@ static PyObject *init_func(PyObject * /*self*/, PyObject *args)
BlenderSession::headless = headless;
- DebugFlags().running_inside_blender = true;
-
Py_RETURN_NONE;
}
@@ -871,18 +867,20 @@ static PyObject *enable_print_stats_func(PyObject * /*self*/, PyObject * /*args*
static PyObject *get_device_types_func(PyObject * /*self*/, PyObject * /*args*/)
{
vector<DeviceType> device_types = Device::available_types();
- bool has_cuda = false, has_optix = false, has_hip = false, has_metal = false;
+ bool has_cuda = false, has_optix = false, has_hip = false, has_metal = false, has_oneapi = false;
foreach (DeviceType device_type, device_types) {
has_cuda |= (device_type == DEVICE_CUDA);
has_optix |= (device_type == DEVICE_OPTIX);
has_hip |= (device_type == DEVICE_HIP);
has_metal |= (device_type == DEVICE_METAL);
+ has_oneapi |= (device_type == DEVICE_ONEAPI);
}
- PyObject *list = PyTuple_New(4);
+ PyObject *list = PyTuple_New(5);
PyTuple_SET_ITEM(list, 0, PyBool_FromLong(has_cuda));
PyTuple_SET_ITEM(list, 1, PyBool_FromLong(has_optix));
PyTuple_SET_ITEM(list, 2, PyBool_FromLong(has_hip));
PyTuple_SET_ITEM(list, 3, PyBool_FromLong(has_metal));
+ PyTuple_SET_ITEM(list, 4, PyBool_FromLong(has_oneapi));
return list;
}
@@ -914,6 +912,9 @@ static PyObject *set_device_override_func(PyObject * /*self*/, PyObject *arg)
else if (override == "METAL") {
BlenderSession::device_override = DEVICE_MASK_METAL;
}
+ else if (override == "ONEAPI") {
+ BlenderSession::device_override = DEVICE_MASK_ONEAPI;
+ }
else {
printf("\nError: %s is not a valid Cycles device.\n", override.c_str());
Py_RETURN_FALSE;
diff --git a/intern/cycles/blender/session.cpp b/intern/cycles/blender/session.cpp
index 87f051ba50b..321771b67a5 100644
--- a/intern/cycles/blender/session.cpp
+++ b/intern/cycles/blender/session.cpp
@@ -110,7 +110,8 @@ void BlenderSession::create_session()
{
const SessionParams session_params = BlenderSync::get_session_params(
b_engine, b_userpref, b_scene, background);
- const SceneParams scene_params = BlenderSync::get_scene_params(b_scene, background);
+ const SceneParams scene_params = BlenderSync::get_scene_params(
+ b_scene, background, use_developer_ui);
const bool session_pause = BlenderSync::get_session_pause(b_scene, background);
/* reset status/progress */
@@ -196,7 +197,8 @@ void BlenderSession::reset_session(BL::BlendData &b_data, BL::Depsgraph &b_depsg
const SessionParams session_params = BlenderSync::get_session_params(
b_engine, b_userpref, b_scene, background);
- const SceneParams scene_params = BlenderSync::get_scene_params(b_scene, background);
+ const SceneParams scene_params = BlenderSync::get_scene_params(
+ b_scene, background, use_developer_ui);
if (scene->params.modified(scene_params) || session->params.modified(session_params) ||
!this->b_render.use_persistent_data()) {
@@ -458,8 +460,8 @@ void BlenderSession::render(BL::Depsgraph &b_depsgraph_)
double total_time, render_time;
session->progress.get_time(total_time, render_time);
- VLOG(1) << "Total render time: " << total_time;
- VLOG(1) << "Render time (without synchronization): " << render_time;
+ VLOG_INFO << "Total render time: " << total_time;
+ VLOG_INFO << "Render time (without synchronization): " << render_time;
}
void BlenderSession::render_frame_finish()
@@ -657,6 +659,7 @@ void BlenderSession::bake(BL::Depsgraph &b_depsgraph_,
session->set_display_driver(nullptr);
session->set_output_driver(make_unique<BlenderOutputDriver>(b_engine));
+ session->full_buffer_written_cb = [&](string_view filename) { full_buffer_written(filename); };
/* Sync scene. */
BL::Object b_camera_override(b_engine.camera_override());
@@ -698,6 +701,10 @@ void BlenderSession::bake(BL::Depsgraph &b_depsgraph_,
BufferParams buffer_params;
buffer_params.width = bake_width;
buffer_params.height = bake_height;
+ buffer_params.window_width = bake_width;
+ buffer_params.window_height = bake_height;
+ /* Unique layer name for multi-image baking. */
+ buffer_params.layer = string_printf("bake_%d\n", (int)full_buffer_files_.size());
/* Update session. */
session->reset(session_params, buffer_params);
@@ -711,8 +718,6 @@ void BlenderSession::bake(BL::Depsgraph &b_depsgraph_,
session->start();
session->wait();
}
-
- session->set_output_driver(nullptr);
}
void BlenderSession::synchronize(BL::Depsgraph &b_depsgraph_)
@@ -724,7 +729,8 @@ void BlenderSession::synchronize(BL::Depsgraph &b_depsgraph_)
/* on session/scene parameter changes, we recreate session entirely */
const SessionParams session_params = BlenderSync::get_session_params(
b_engine, b_userpref, b_scene, background);
- const SceneParams scene_params = BlenderSync::get_scene_params(b_scene, background);
+ const SceneParams scene_params = BlenderSync::get_scene_params(
+ b_scene, background, use_developer_ui);
const bool session_pause = BlenderSync::get_session_pause(b_scene, background);
if (session->params.modified(session_params) || scene->params.modified(scene_params)) {
@@ -1056,8 +1062,8 @@ void BlenderSession::ensure_display_driver_if_needed()
return;
}
- unique_ptr<BlenderDisplayDriver> display_driver = make_unique<BlenderDisplayDriver>(b_engine,
- b_scene);
+ unique_ptr<BlenderDisplayDriver> display_driver = make_unique<BlenderDisplayDriver>(
+ b_engine, b_scene, background);
display_driver_ = display_driver.get();
session->set_display_driver(move(display_driver));
}
diff --git a/intern/cycles/blender/shader.cpp b/intern/cycles/blender/shader.cpp
index 81a64457c88..9505f4ba58f 100644
--- a/intern/cycles/blender/shader.cpp
+++ b/intern/cycles/blender/shader.cpp
@@ -248,6 +248,13 @@ static void get_tex_mapping(TextureNode *mapping, BL::TexMapping &b_mapping)
mapping->set_tex_mapping_z_mapping((TextureMapping::Mapping)b_mapping.mapping_z());
}
+static bool is_image_animated(BL::Image::source_enum b_image_source, BL::ImageUser &b_image_user)
+{
+ return (b_image_source == BL::Image::source_MOVIE ||
+ b_image_source == BL::Image::source_SEQUENCE) &&
+ b_image_user.use_auto_refresh();
+}
+
static ShaderNode *add_node(Scene *scene,
BL::RenderEngine &b_engine,
BL::BlendData &b_data,
@@ -343,6 +350,33 @@ static ShaderNode *add_node(Scene *scene,
mix->set_use_clamp(b_mix_node.use_clamp());
node = mix;
}
+ else if (b_node.is_a(&RNA_ShaderNodeMix)) {
+ BL::ShaderNodeMix b_mix_node(b_node);
+ if (b_mix_node.data_type() == BL::ShaderNodeMix::data_type_VECTOR) {
+ if (b_mix_node.factor_mode() == BL::ShaderNodeMix::factor_mode_UNIFORM) {
+ MixVectorNode *mix_node = graph->create_node<MixVectorNode>();
+ mix_node->set_use_clamp(b_mix_node.clamp_factor());
+ node = mix_node;
+ }
+ else {
+ MixVectorNonUniformNode *mix_node = graph->create_node<MixVectorNonUniformNode>();
+ mix_node->set_use_clamp(b_mix_node.clamp_factor());
+ node = mix_node;
+ }
+ }
+ else if (b_mix_node.data_type() == BL::ShaderNodeMix::data_type_RGBA) {
+ MixColorNode *mix_node = graph->create_node<MixColorNode>();
+ mix_node->set_blend_type((NodeMix)b_mix_node.blend_type());
+ mix_node->set_use_clamp(b_mix_node.clamp_factor());
+ mix_node->set_use_clamp_result(b_mix_node.clamp_result());
+ node = mix_node;
+ }
+ else {
+ MixFloatNode *mix_node = graph->create_node<MixFloatNode>();
+ mix_node->set_use_clamp(b_mix_node.clamp_factor());
+ node = mix_node;
+ }
+ }
else if (b_node.is_a(&RNA_ShaderNodeSeparateRGB)) {
node = graph->create_node<SeparateRGBNode>();
}
@@ -748,10 +782,11 @@ static ShaderNode *add_node(Scene *scene,
get_tex_mapping(image, b_texture_mapping);
if (b_image) {
+ BL::Image::source_enum b_image_source = b_image.source();
PointerRNA colorspace_ptr = b_image.colorspace_settings().ptr;
image->set_colorspace(ustring(get_enum_identifier(colorspace_ptr, "name")));
- image->set_animated(b_image_node.image_user().use_auto_refresh());
+ image->set_animated(is_image_animated(b_image_source, b_image_user));
image->set_alpha_type(get_image_alpha_type(b_image));
array<int> tiles;
@@ -763,9 +798,9 @@ static ShaderNode *add_node(Scene *scene,
/* builtin images will use callback-based reading because
* they could only be loaded correct from blender side
*/
- bool is_builtin = b_image.packed_file() || b_image.source() == BL::Image::source_GENERATED ||
- b_image.source() == BL::Image::source_MOVIE ||
- (b_engine.is_preview() && b_image.source() != BL::Image::source_SEQUENCE);
+ bool is_builtin = b_image.packed_file() || b_image_source == BL::Image::source_GENERATED ||
+ b_image_source == BL::Image::source_MOVIE ||
+ (b_engine.is_preview() && b_image_source != BL::Image::source_SEQUENCE);
if (is_builtin) {
/* for builtin images we're using image datablock name to find an image to
@@ -776,7 +811,7 @@ static ShaderNode *add_node(Scene *scene,
*/
int scene_frame = b_scene.frame_current();
int image_frame = image_user_frame_number(b_image_user, b_image, scene_frame);
- if (b_image.source() != BL::Image::source_TILED) {
+ if (b_image_source != BL::Image::source_TILED) {
image->handle = scene->image_manager->add_image(
new BlenderImageLoader(b_image, image_frame, 0, b_engine.is_preview()),
image->image_params());
@@ -794,7 +829,7 @@ static ShaderNode *add_node(Scene *scene,
}
else {
ustring filename = ustring(
- image_user_file_path(b_image_user, b_image, b_scene.frame_current()));
+ image_user_file_path(b_data, b_image_user, b_image, b_scene.frame_current()));
image->set_filename(filename);
}
}
@@ -812,15 +847,15 @@ static ShaderNode *add_node(Scene *scene,
get_tex_mapping(env, b_texture_mapping);
if (b_image) {
+ BL::Image::source_enum b_image_source = b_image.source();
PointerRNA colorspace_ptr = b_image.colorspace_settings().ptr;
env->set_colorspace(ustring(get_enum_identifier(colorspace_ptr, "name")));
-
- env->set_animated(b_env_node.image_user().use_auto_refresh());
+ env->set_animated(is_image_animated(b_image_source, b_image_user));
env->set_alpha_type(get_image_alpha_type(b_image));
- bool is_builtin = b_image.packed_file() || b_image.source() == BL::Image::source_GENERATED ||
- b_image.source() == BL::Image::source_MOVIE ||
- (b_engine.is_preview() && b_image.source() != BL::Image::source_SEQUENCE);
+ bool is_builtin = b_image.packed_file() || b_image_source == BL::Image::source_GENERATED ||
+ b_image_source == BL::Image::source_MOVIE ||
+ (b_engine.is_preview() && b_image_source != BL::Image::source_SEQUENCE);
if (is_builtin) {
int scene_frame = b_scene.frame_current();
@@ -831,7 +866,7 @@ static ShaderNode *add_node(Scene *scene,
}
else {
env->set_filename(
- ustring(image_user_file_path(b_image_user, b_image, b_scene.frame_current())));
+ ustring(image_user_file_path(b_data, b_image_user, b_image, b_scene.frame_current())));
}
}
node = env;
@@ -928,8 +963,22 @@ static ShaderNode *add_node(Scene *scene,
sky->set_sun_disc(b_sky_node.sun_disc());
sky->set_sun_size(b_sky_node.sun_size());
sky->set_sun_intensity(b_sky_node.sun_intensity());
- sky->set_sun_elevation(b_sky_node.sun_elevation());
- sky->set_sun_rotation(b_sky_node.sun_rotation());
+ /* Patch sun position to be able to animate daylight cycle while keeping the shading code
+ * simple. */
+ float sun_rotation = b_sky_node.sun_rotation();
+ /* Wrap into [-2PI..2PI] range. */
+ float sun_elevation = fmodf(b_sky_node.sun_elevation(), M_2PI_F);
+ /* Wrap into [-PI..PI] range. */
+ if (fabsf(sun_elevation) >= M_PI_F) {
+ sun_elevation -= copysignf(2.0f, sun_elevation) * M_PI_F;
+ }
+ /* Wrap into [-PI/2..PI/2] range while keeping the same absolute position. */
+ if (sun_elevation >= M_PI_2_F || sun_elevation <= -M_PI_2_F) {
+ sun_elevation = copysignf(M_PI_F, sun_elevation) - sun_elevation;
+ sun_rotation += M_PI_F;
+ }
+ sky->set_sun_elevation(sun_elevation);
+ sky->set_sun_rotation(sun_rotation);
sky->set_altitude(b_sky_node.altitude());
sky->set_air_density(b_sky_node.air_density());
sky->set_dust_density(b_sky_node.dust_density());
@@ -1050,7 +1099,9 @@ static bool node_use_modified_socket_name(ShaderNode *node)
return true;
}
-static ShaderInput *node_find_input_by_name(ShaderNode *node, BL::NodeSocket &b_socket)
+static ShaderInput *node_find_input_by_name(BL::Node b_node,
+ ShaderNode *node,
+ BL::NodeSocket &b_socket)
{
string name = b_socket.identifier();
ShaderInput *input = node->input(name.c_str());
@@ -1060,6 +1111,35 @@ static ShaderInput *node_find_input_by_name(ShaderNode *node, BL::NodeSocket &b_
if (string_startswith(name, "Shader")) {
string_replace(name, "Shader", "Closure");
}
+
+ /* Map mix node internal name for shader. */
+ if (b_node.is_a(&RNA_ShaderNodeMix)) {
+ if (string_endswith(name, "Factor_Float")) {
+ string_replace(name, "Factor_Float", "Factor");
+ }
+ else if (string_endswith(name, "Factor_Vector")) {
+ string_replace(name, "Factor_Vector", "Factor");
+ }
+ else if (string_endswith(name, "A_Float")) {
+ string_replace(name, "A_Float", "A");
+ }
+ else if (string_endswith(name, "B_Float")) {
+ string_replace(name, "B_Float", "B");
+ }
+ else if (string_endswith(name, "A_Color")) {
+ string_replace(name, "A_Color", "A");
+ }
+ else if (string_endswith(name, "B_Color")) {
+ string_replace(name, "B_Color", "B");
+ }
+ else if (string_endswith(name, "A_Vector")) {
+ string_replace(name, "A_Vector", "A");
+ }
+ else if (string_endswith(name, "B_Vector")) {
+ string_replace(name, "B_Vector", "B");
+ }
+ }
+
input = node->input(name.c_str());
if (!input) {
@@ -1089,7 +1169,9 @@ static ShaderInput *node_find_input_by_name(ShaderNode *node, BL::NodeSocket &b_
return input;
}
-static ShaderOutput *node_find_output_by_name(ShaderNode *node, BL::NodeSocket &b_socket)
+static ShaderOutput *node_find_output_by_name(BL::Node b_node,
+ ShaderNode *node,
+ BL::NodeSocket &b_socket)
{
string name = b_socket.identifier();
ShaderOutput *output = node->output(name.c_str());
@@ -1100,6 +1182,21 @@ static ShaderOutput *node_find_output_by_name(ShaderNode *node, BL::NodeSocket &
name = "Closure";
output = node->output(name.c_str());
}
+ /* Map internal name for shader. */
+ if (b_node.is_a(&RNA_ShaderNodeMix)) {
+ if (string_endswith(name, "Result_Float")) {
+ string_replace(name, "Result_Float", "Result");
+ output = node->output(name.c_str());
+ }
+ else if (string_endswith(name, "Result_Color")) {
+ string_replace(name, "Result_Color", "Result");
+ output = node->output(name.c_str());
+ }
+ else if (string_endswith(name, "Result_Vector")) {
+ string_replace(name, "Result_Vector", "Result");
+ output = node->output(name.c_str());
+ }
+ }
}
return output;
@@ -1245,7 +1342,11 @@ static void add_nodes(Scene *scene,
if (node) {
/* map node sockets for linking */
for (BL::NodeSocket &b_input : b_node.inputs) {
- ShaderInput *input = node_find_input_by_name(node, b_input);
+ if (b_input.is_unavailable()) {
+ /* Skip unavailable sockets. */
+ continue;
+ }
+ ShaderInput *input = node_find_input_by_name(b_node, node, b_input);
if (!input) {
/* XXX should not happen, report error? */
continue;
@@ -1255,7 +1356,11 @@ static void add_nodes(Scene *scene,
set_default_value(input, b_input, b_data, b_ntree);
}
for (BL::NodeSocket &b_output : b_node.outputs) {
- ShaderOutput *output = node_find_output_by_name(node, b_output);
+ if (b_output.is_unavailable()) {
+ /* Skip unavailable sockets. */
+ continue;
+ }
+ ShaderOutput *output = node_find_output_by_name(b_node, node, b_output);
if (!output) {
/* XXX should not happen, report error? */
continue;
diff --git a/intern/cycles/blender/sync.cpp b/intern/cycles/blender/sync.cpp
index 1028c940772..6081c4626f0 100644
--- a/intern/cycles/blender/sync.cpp
+++ b/intern/cycles/blender/sync.cpp
@@ -285,7 +285,7 @@ void BlenderSync::sync_data(BL::RenderSettings &b_render,
free_data_after_sync(b_depsgraph);
- VLOG(1) << "Total time spent synchronizing data: " << timer.get_time();
+ VLOG_INFO << "Total time spent synchronizing data: " << timer.get_time();
has_updates_ = false;
}
@@ -343,7 +343,7 @@ void BlenderSync::sync_integrator(BL::ViewLayer &b_view_layer, bool background)
integrator->set_light_sampling_threshold(get_float(cscene, "light_sampling_threshold"));
SamplingPattern sampling_pattern = (SamplingPattern)get_enum(
- cscene, "sampling_pattern", SAMPLING_NUM_PATTERNS, SAMPLING_PATTERN_SOBOL);
+ cscene, "sampling_pattern", SAMPLING_NUM_PATTERNS, SAMPLING_PATTERN_PMJ);
integrator->set_sampling_pattern(sampling_pattern);
int samples = 1;
@@ -385,12 +385,13 @@ void BlenderSync::sync_integrator(BL::ViewLayer &b_view_layer, bool background)
/* Only use scrambling distance in the viewport if user wants to. */
bool preview_scrambling_distance = get_boolean(cscene, "preview_scrambling_distance");
- if (preview && !preview_scrambling_distance) {
+ if ((preview && !preview_scrambling_distance) ||
+ sampling_pattern == SAMPLING_PATTERN_SOBOL_BURLEY) {
scrambling_distance = 1.0f;
}
if (scrambling_distance != 1.0f) {
- VLOG(3) << "Using scrambling distance: " << scrambling_distance;
+ VLOG_INFO << "Using scrambling distance: " << scrambling_distance;
}
integrator->set_scrambling_distance(scrambling_distance);
@@ -412,7 +413,15 @@ void BlenderSync::sync_integrator(BL::ViewLayer &b_view_layer, bool background)
integrator->set_direct_light_sampling_type(direct_light_sampling_type);
#endif
- const DenoiseParams denoise_params = get_denoise_params(b_scene, b_view_layer, background);
+ DenoiseParams denoise_params = get_denoise_params(b_scene, b_view_layer, background);
+
+ /* No denoising support for vertex color baking, vertices packed into image
+ * buffer have no relation to neighbors. */
+ if (scene->bake_manager->get_baking() &&
+ b_scene.render().bake().target() != BL::BakeSettings::target_IMAGE_TEXTURES) {
+ denoise_params.use = false;
+ }
+
integrator->set_use_denoise(denoise_params.use);
/* Only update denoiser parameters if the denoiser is actually used. This allows to tweak
@@ -671,14 +680,18 @@ void BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay, BL::ViewLayer &b_v
}
/* Cryptomatte stores two ID/weight pairs per RGBA layer.
- * User facing parameter is the number of pairs. */
+ * User facing parameter is the number of pairs.
+ *
+ * NOTE: Name channels lowercase RGBA so that compression rules check in OpenEXR DWA code uses
+ * lossless compression. Reportedly this naming is the only one which works good from the
+ * interoperability point of view. Using XYZW naming is not portable. */
int crypto_depth = divide_up(min(16, b_view_layer.pass_cryptomatte_depth()), 2);
scene->film->set_cryptomatte_depth(crypto_depth);
CryptomatteType cryptomatte_passes = CRYPT_NONE;
if (b_view_layer.use_pass_cryptomatte_object()) {
for (int i = 0; i < crypto_depth; i++) {
string passname = cryptomatte_prefix + string_printf("Object%02d", i);
- b_engine.add_pass(passname.c_str(), 4, "RGBA", b_view_layer.name().c_str());
+ b_engine.add_pass(passname.c_str(), 4, "rgba", b_view_layer.name().c_str());
pass_add(scene, PASS_CRYPTOMATTE, passname.c_str());
}
cryptomatte_passes = (CryptomatteType)(cryptomatte_passes | CRYPT_OBJECT);
@@ -686,7 +699,7 @@ void BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay, BL::ViewLayer &b_v
if (b_view_layer.use_pass_cryptomatte_material()) {
for (int i = 0; i < crypto_depth; i++) {
string passname = cryptomatte_prefix + string_printf("Material%02d", i);
- b_engine.add_pass(passname.c_str(), 4, "RGBA", b_view_layer.name().c_str());
+ b_engine.add_pass(passname.c_str(), 4, "rgba", b_view_layer.name().c_str());
pass_add(scene, PASS_CRYPTOMATTE, passname.c_str());
}
cryptomatte_passes = (CryptomatteType)(cryptomatte_passes | CRYPT_MATERIAL);
@@ -694,7 +707,7 @@ void BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay, BL::ViewLayer &b_v
if (b_view_layer.use_pass_cryptomatte_asset()) {
for (int i = 0; i < crypto_depth; i++) {
string passname = cryptomatte_prefix + string_printf("Asset%02d", i);
- b_engine.add_pass(passname.c_str(), 4, "RGBA", b_view_layer.name().c_str());
+ b_engine.add_pass(passname.c_str(), 4, "rgba", b_view_layer.name().c_str());
pass_add(scene, PASS_CRYPTOMATTE, passname.c_str());
}
cryptomatte_passes = (CryptomatteType)(cryptomatte_passes | CRYPT_ASSET);
@@ -793,7 +806,9 @@ void BlenderSync::free_data_after_sync(BL::Depsgraph &b_depsgraph)
/* Scene Parameters */
-SceneParams BlenderSync::get_scene_params(BL::Scene &b_scene, bool background)
+SceneParams BlenderSync::get_scene_params(BL::Scene &b_scene,
+ const bool background,
+ const bool use_developer_ui)
{
SceneParams params;
PointerRNA cscene = RNA_pointer_get(&b_scene.ptr, "cycles");
@@ -804,7 +819,7 @@ SceneParams BlenderSync::get_scene_params(BL::Scene &b_scene, bool background)
else if (shadingsystem == 1)
params.shadingsystem = SHADINGSYSTEM_OSL;
- if (background || DebugFlags().viewport_static_bvh)
+ if (background || (use_developer_ui && get_enum(cscene, "debug_bvh_type")))
params.bvh_type = BVH_TYPE_STATIC;
else
params.bvh_type = BVH_TYPE_DYNAMIC;
diff --git a/intern/cycles/blender/sync.h b/intern/cycles/blender/sync.h
index 5cc18452ac1..ae6c2420e55 100644
--- a/intern/cycles/blender/sync.h
+++ b/intern/cycles/blender/sync.h
@@ -7,6 +7,7 @@
#include "MEM_guardedalloc.h"
#include "RNA_access.h"
#include "RNA_blender_cpp.h"
+#include "RNA_path.h"
#include "RNA_types.h"
#include "blender/id_map.h"
@@ -83,7 +84,9 @@ class BlenderSync {
}
/* get parameters */
- static SceneParams get_scene_params(BL::Scene &b_scene, bool background);
+ static SceneParams get_scene_params(BL::Scene &b_scene,
+ const bool background,
+ const bool use_developer_ui);
static SessionParams get_session_params(BL::RenderEngine &b_engine,
BL::Preferences &b_userpref,
BL::Scene &b_scene,
diff --git a/intern/cycles/blender/util.h b/intern/cycles/blender/util.h
index 49cecb6d0f3..dbdfbaddaf1 100644
--- a/intern/cycles/blender/util.h
+++ b/intern/cycles/blender/util.h
@@ -21,7 +21,8 @@
extern "C" {
void BKE_image_user_frame_calc(void *ima, void *iuser, int cfra);
-void BKE_image_user_file_path_ex(void *iuser, void *ima, char *path, bool resolve_udim);
+void BKE_image_user_file_path_ex(
+ void *bmain, void *iuser, void *ima, char *path, bool resolve_udim, bool resolve_multiview);
unsigned char *BKE_image_get_pixels_for_frame(void *image, int frame, int tile);
float *BKE_image_get_float_pixels_for_frame(void *image, int frame, int tile);
}
@@ -281,12 +282,15 @@ static inline int render_resolution_y(BL::RenderSettings &b_render)
return b_render.resolution_y() * b_render.resolution_percentage() / 100;
}
-static inline string image_user_file_path(BL::ImageUser &iuser, BL::Image &ima, int cfra)
+static inline string image_user_file_path(BL::BlendData &data,
+ BL::ImageUser &iuser,
+ BL::Image &ima,
+ int cfra)
{
char filepath[1024];
iuser.tile(0);
BKE_image_user_frame_calc(ima.ptr.data, iuser.ptr.data, cfra);
- BKE_image_user_file_path_ex(iuser.ptr.data, ima.ptr.data, filepath, false);
+ BKE_image_user_file_path_ex(data.ptr.data, iuser.ptr.data, ima.ptr.data, filepath, false, true);
return string(filepath);
}