Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'intern/cycles/blender')
-rw-r--r--intern/cycles/blender/addon/properties.py46
-rw-r--r--intern/cycles/blender/addon/ui.py23
-rw-r--r--intern/cycles/blender/blender_device.cpp7
-rw-r--r--intern/cycles/blender/blender_python.cpp10
-rw-r--r--intern/cycles/blender/blender_sync.cpp2
5 files changed, 65 insertions, 23 deletions
diff --git a/intern/cycles/blender/addon/properties.py b/intern/cycles/blender/addon/properties.py
index 93f8f76cd6a..8623b38a271 100644
--- a/intern/cycles/blender/addon/properties.py
+++ b/intern/cycles/blender/addon/properties.py
@@ -137,6 +137,7 @@ enum_world_mis = (
enum_device_type = (
('CPU', "CPU", "CPU", 0),
('CUDA', "CUDA", "CUDA", 1),
+ ('OPTIX', "OptiX", "OptiX", 3),
('OPENCL', "OpenCL", "OpenCL", 2)
)
@@ -740,6 +741,8 @@ class CyclesRenderSettings(bpy.types.PropertyGroup):
debug_use_cuda_adaptive_compile: BoolProperty(name="Adaptive Compile", default=False)
debug_use_cuda_split_kernel: BoolProperty(name="Split Kernel", default=False)
+ debug_optix_cuda_streams: IntProperty(name="CUDA Streams", default=1, min=1)
+
debug_opencl_kernel_type: EnumProperty(
name="OpenCL Kernel Type",
default='DEFAULT',
@@ -1400,10 +1403,12 @@ class CyclesPreferences(bpy.types.AddonPreferences):
def get_device_types(self, context):
import _cycles
- has_cuda, has_opencl = _cycles.get_device_types()
+ has_cuda, has_optix, has_opencl = _cycles.get_device_types()
list = [('NONE', "None", "Don't use compute device", 0)]
if has_cuda:
list.append(('CUDA', "CUDA", "Use CUDA for GPU acceleration", 1))
+ if has_optix:
+ list.append(('OPTIX', "OptiX", "Use OptiX for GPU acceleration", 3))
if has_opencl:
list.append(('OPENCL', "OpenCL", "Use OpenCL for GPU acceleration", 2))
return list
@@ -1424,7 +1429,7 @@ class CyclesPreferences(bpy.types.AddonPreferences):
def update_device_entries(self, device_list):
for device in device_list:
- if not device[1] in {'CUDA', 'OPENCL', 'CPU'}:
+ if not device[1] in {'CUDA', 'OPTIX', 'OPENCL', 'CPU'}:
continue
# Try to find existing Device entry
entry = self.find_existing_device_entry(device)
@@ -1439,8 +1444,8 @@ class CyclesPreferences(bpy.types.AddonPreferences):
# Update name in case it changed
entry.name = device[0]
- # Gets all devices types by default.
- def get_devices(self, compute_device_type=''):
+ # Gets all devices types for a compute device type.
+ def get_devices_for_type(self, compute_device_type):
import _cycles
# Layout of the device tuples: (Name, Type, Persistent ID)
device_list = _cycles.available_devices(compute_device_type)
@@ -1449,20 +1454,23 @@ class CyclesPreferences(bpy.types.AddonPreferences):
# hold pointers to a resized array.
self.update_device_entries(device_list)
# Sort entries into lists
- cuda_devices = []
- opencl_devices = []
+ devices = []
cpu_devices = []
for device in device_list:
entry = self.find_existing_device_entry(device)
- if entry.type == 'CUDA':
- cuda_devices.append(entry)
- elif entry.type == 'OPENCL':
- opencl_devices.append(entry)
+ if entry.type == compute_device_type:
+ devices.append(entry)
elif entry.type == 'CPU':
cpu_devices.append(entry)
# Extend all GPU devices with CPU.
- cuda_devices.extend(cpu_devices)
- opencl_devices.extend(cpu_devices)
+ if compute_device_type in ('CUDA', 'OPENCL'):
+ devices.extend(cpu_devices)
+ return devices
+
+ # For backwards compatibility, only has CUDA and OpenCL.
+ def get_devices(self, compute_device_type=''):
+ cuda_devices = self.get_devices_for_type('CUDA')
+ opencl_devices = self.get_devices_for_type('OPENCL')
return cuda_devices, opencl_devices
def get_num_gpu_devices(self):
@@ -1498,16 +1506,24 @@ class CyclesPreferences(bpy.types.AddonPreferences):
for device in devices:
box.prop(device, "use", text=device.name)
+ if device_type == 'OPTIX':
+ col = box.column(align=True)
+ col.label(text="OptiX support is experimental", icon='INFO')
+ col.label(text="Not all Cycles features are supported yet", icon='BLANK1')
+
+
def draw_impl(self, layout, context):
row = layout.row()
row.prop(self, "compute_device_type", expand=True)
- cuda_devices, opencl_devices = self.get_devices(self.compute_device_type)
+ devices = self.get_devices_for_type(self.compute_device_type)
row = layout.row()
if self.compute_device_type == 'CUDA':
- self._draw_devices(row, 'CUDA', cuda_devices)
+ self._draw_devices(row, 'CUDA', devices)
+ elif self.compute_device_type == 'OPTIX':
+ self._draw_devices(row, 'OPTIX', devices)
elif self.compute_device_type == 'OPENCL':
- self._draw_devices(row, 'OPENCL', opencl_devices)
+ self._draw_devices(row, 'OPENCL', devices)
def draw(self, context):
self.draw_impl(self.layout, context)
diff --git a/intern/cycles/blender/addon/ui.py b/intern/cycles/blender/addon/ui.py
index 200b08f93cb..44ed28e9e02 100644
--- a/intern/cycles/blender/addon/ui.py
+++ b/intern/cycles/blender/addon/ui.py
@@ -88,10 +88,16 @@ def use_cuda(context):
return (get_device_type(context) == 'CUDA' and cscene.device == 'GPU')
+def use_optix(context):
+ cscene = context.scene.cycles
+
+ return (get_device_type(context) == 'OPTIX' and cscene.device == 'GPU')
+
+
def use_branched_path(context):
cscene = context.scene.cycles
- return (cscene.progressive == 'BRANCHED_PATH')
+ return (cscene.progressive == 'BRANCHED_PATH' and not use_optix(context))
def use_sample_all_lights(context):
@@ -168,7 +174,8 @@ class CYCLES_RENDER_PT_sampling(CyclesButtonsPanel, Panel):
layout.use_property_split = True
layout.use_property_decorate = False
- layout.prop(cscene, "progressive")
+ if not use_optix(context):
+ layout.prop(cscene, "progressive")
if cscene.progressive == 'PATH' or use_branched_path(context) is False:
col = layout.column(align=True)
@@ -1763,6 +1770,10 @@ class CYCLES_RENDER_PT_bake(CyclesButtonsPanel, Panel):
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'CYCLES'}
+ @classmethod
+ def poll(cls, context):
+ return not use_optix(context)
+
def draw(self, context):
layout = self.layout
layout.use_property_split = True
@@ -1947,7 +1958,13 @@ class CYCLES_RENDER_PT_debug(CyclesButtonsPanel, Panel):
col.separator()
col = layout.column()
- col.label(text='OpenCL Flags:')
+ col.label(text="OptiX Flags:")
+ col.prop(cscene, "debug_optix_cuda_streams")
+
+ col.separator()
+
+ col = layout.column()
+ col.label(text="OpenCL Flags:")
col.prop(cscene, "debug_opencl_device_type", text="Device")
col.prop(cscene, "debug_use_opencl_debug", text="Debug")
col.prop(cscene, "debug_opencl_mem_limit")
diff --git a/intern/cycles/blender/blender_device.cpp b/intern/cycles/blender/blender_device.cpp
index 98fc0c6dec4..111fc8d5192 100644
--- a/intern/cycles/blender/blender_device.cpp
+++ b/intern/cycles/blender/blender_device.cpp
@@ -61,7 +61,8 @@ DeviceInfo blender_device_info(BL::Preferences &b_preferences, BL::Scene &b_scen
COMPUTE_DEVICE_CPU = 0,
COMPUTE_DEVICE_CUDA = 1,
COMPUTE_DEVICE_OPENCL = 2,
- COMPUTE_DEVICE_NUM = 3,
+ COMPUTE_DEVICE_OPTIX = 3,
+ COMPUTE_DEVICE_NUM = 4,
};
ComputeDevice compute_device = (ComputeDevice)get_enum(
@@ -73,6 +74,10 @@ DeviceInfo blender_device_info(BL::Preferences &b_preferences, BL::Scene &b_scen
if (compute_device == COMPUTE_DEVICE_CUDA) {
mask |= DEVICE_MASK_CUDA;
}
+ else if (compute_device == COMPUTE_DEVICE_OPTIX) {
+ /* Cannot use CPU and OptiX device at the same time right now, so replace mask. */
+ mask = DEVICE_MASK_OPTIX;
+ }
else if (compute_device == COMPUTE_DEVICE_OPENCL) {
mask |= DEVICE_MASK_OPENCL;
}
diff --git a/intern/cycles/blender/blender_python.cpp b/intern/cycles/blender/blender_python.cpp
index 2bea6b34772..335d4daf09c 100644
--- a/intern/cycles/blender/blender_python.cpp
+++ b/intern/cycles/blender/blender_python.cpp
@@ -81,6 +81,8 @@ bool debug_flags_sync_from_scene(BL::Scene b_scene)
/* Synchronize CUDA flags. */
flags.cuda.adaptive_compile = get_boolean(cscene, "debug_use_cuda_adaptive_compile");
flags.cuda.split_kernel = get_boolean(cscene, "debug_use_cuda_split_kernel");
+ /* Synchronize OptiX flags. */
+ flags.optix.cuda_streams = get_int(cscene, "debug_optix_cuda_streams");
/* Synchronize OpenCL device type. */
switch (get_enum(cscene, "debug_opencl_device_type")) {
case 0:
@@ -960,14 +962,16 @@ static PyObject *enable_print_stats_func(PyObject * /*self*/, PyObject * /*args*
static PyObject *get_device_types_func(PyObject * /*self*/, PyObject * /*args*/)
{
vector<DeviceType> device_types = Device::available_types();
- bool has_cuda = false, has_opencl = false;
+ bool has_cuda = false, has_optix = false, has_opencl = false;
foreach (DeviceType device_type, device_types) {
has_cuda |= (device_type == DEVICE_CUDA);
+ has_optix |= (device_type == DEVICE_OPTIX);
has_opencl |= (device_type == DEVICE_OPENCL);
}
- PyObject *list = PyTuple_New(2);
+ PyObject *list = PyTuple_New(3);
PyTuple_SET_ITEM(list, 0, PyBool_FromLong(has_cuda));
- PyTuple_SET_ITEM(list, 1, PyBool_FromLong(has_opencl));
+ PyTuple_SET_ITEM(list, 1, PyBool_FromLong(has_optix));
+ PyTuple_SET_ITEM(list, 2, PyBool_FromLong(has_opencl));
return list;
}
diff --git a/intern/cycles/blender/blender_sync.cpp b/intern/cycles/blender/blender_sync.cpp
index 8b7c66363d9..1a166d171bc 100644
--- a/intern/cycles/blender/blender_sync.cpp
+++ b/intern/cycles/blender/blender_sync.cpp
@@ -758,7 +758,7 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine &b_engine,
preview_samples = preview_samples * preview_samples;
}
- if (get_enum(cscene, "progressive") == 0) {
+ if (get_enum(cscene, "progressive") == 0 && (params.device.type != DEVICE_OPTIX)) {
if (background) {
params.samples = aa_samples;
}