diff options
author | Lukas Stockner <lukas.stockner@freenet.de> | 2016-11-07 04:33:53 +0300 |
---|---|---|
committer | Lukas Stockner <lukas.stockner@freenet.de> | 2016-11-07 05:19:29 +0300 |
commit | dd921238d9223f550d3043313c9c38d07620de5d (patch) | |
tree | 762c114b9ccda879a2826be6c2205cb5a20665d3 /intern/cycles/blender | |
parent | f89fbf580eae6202cef9da08756fd415ca34a8f3 (diff) |
Cycles: Refactor Device selection to allow individual GPU compute device selection
Previously, it was only possible to choose a single GPU or all of that type (CUDA or OpenCL).
Now, a toggle button is displayed for every device.
These settings are tied to the PCI Bus ID of the devices, so they're consistent across hardware addition and removal (but not when swapping/moving cards).
From the code perspective, the more important change is that now, the compute device properties are stored in the Addon preferences of the Cycles addon, instead of directly in the User Preferences.
This allows for a cleaner implementation, removing the Cycles C API functions that were called by the RNA code to specify the enum items.
Note that this change is neither backwards- nor forwards-compatible, but since it's only a User Preference no existing files are broken.
Reviewers: #cycles, brecht
Reviewed By: #cycles, brecht
Subscribers: brecht, juicyfruit, mib2berlin, Blendify
Differential Revision: https://developer.blender.org/D2338
Diffstat (limited to 'intern/cycles/blender')
-rw-r--r-- | intern/cycles/blender/CCL_api.h | 11 | ||||
-rw-r--r-- | intern/cycles/blender/addon/properties.py | 110 | ||||
-rw-r--r-- | intern/cycles/blender/addon/ui.py | 36 | ||||
-rw-r--r-- | intern/cycles/blender/blender_python.cpp | 91 | ||||
-rw-r--r-- | intern/cycles/blender/blender_sync.cpp | 47 |
5 files changed, 194 insertions, 101 deletions
diff --git a/intern/cycles/blender/CCL_api.h b/intern/cycles/blender/CCL_api.h index d3a68c4db4f..233ffc8802c 100644 --- a/intern/cycles/blender/CCL_api.h +++ b/intern/cycles/blender/CCL_api.h @@ -21,17 +21,6 @@ extern "C" { #endif -/* returns a list of devices for selection, array is empty identifier - * terminated and must not be freed */ - -typedef struct CCLDeviceInfo { - char identifier[128]; - char name[512]; - int value; -} CCLDeviceInfo; - -CCLDeviceInfo *CCL_compute_device_list(int device_type); - /* create python module _cycles used by addon */ void *CCL_python_module_init(void); diff --git a/intern/cycles/blender/addon/properties.py b/intern/cycles/blender/addon/properties.py index a8ab9100ded..27c9b922042 100644 --- a/intern/cycles/blender/addon/properties.py +++ b/intern/cycles/blender/addon/properties.py @@ -21,7 +21,8 @@ from bpy.props import (BoolProperty, EnumProperty, FloatProperty, IntProperty, - PointerProperty) + PointerProperty, + StringProperty) # enums @@ -122,6 +123,12 @@ enum_volume_interpolation = ( ('CUBIC', "Cubic", "Smoothed high quality interpolation, but slower") ) +enum_device_type = ( + ('CPU', "CPU", "CPU", 0), + ('CUDA', "CUDA", "CUDA", 1), + ('OPENCL', "OpenCL", "OpenCL", 2) + ) + class CyclesRenderSettings(bpy.types.PropertyGroup): @classmethod @@ -1130,6 +1137,103 @@ class CyclesCurveSettings(bpy.types.PropertyGroup): del bpy.types.ParticleSettings.cycles +class CyclesDeviceSettings(bpy.types.PropertyGroup): + @classmethod + def register(cls): + cls.id = StringProperty(name="ID") + cls.name = StringProperty(name="Name") + cls.use = BoolProperty(name="Use", default=True) + cls.type = EnumProperty(name="Type", items=enum_device_type, default='CUDA') + + +class CyclesPreferences(bpy.types.AddonPreferences): + bl_idname = __package__ + + def get_device_types(self, context): + import _cycles + has_cuda, has_opencl = _cycles.get_device_types() + list = [('NONE', "None", "Don't use compute device", 0)] + if has_cuda: + list.append(('CUDA', "CUDA", "Use CUDA for GPU acceleration", 1)) + if has_opencl: + list.append(('OPENCL', "OpenCL", "Use OpenCL for GPU acceleration", 2)) + return list + + compute_device_type = EnumProperty( + name="Compute Device Type", + description="Device to use for computation (rendering with Cycles)", + items=get_device_types, + ) + + devices = bpy.props.CollectionProperty(type=CyclesDeviceSettings) + + def get_devices(self): + import _cycles + # Layout of the device tuples: (Name, Type, Internal ID, Persistent ID) + device_list = _cycles.available_devices() + + cuda_devices = [] + opencl_devices = [] + for device in device_list: + if not device[1] in {'CUDA', 'OPENCL'}: + continue + + entry = None + # Try to find existing Device entry + for dev in self.devices: + if dev.id == device[2] and dev.type == device[1]: + entry = dev + break + # Create new entry if no existing one was found + if not entry: + entry = self.devices.add() + entry.id = device[2] + entry.name = device[0] + entry.type = device[1] + + # Sort entries into lists + if entry.type == 'CUDA': + cuda_devices.append(entry) + elif entry.type == 'OPENCL': + opencl_devices.append(entry) + return cuda_devices, opencl_devices + + + def has_active_device(self): + import _cycles + device_list = _cycles.available_devices() + for device in device_list: + if device[1] != self.compute_device_type: + continue + if any(dev.use and dev.id == device[2] for dev in self.devices): + return True + return False + + + def draw_impl(self, layout, context): + layout.label(text="Compute Device:") + layout.row().prop(self, "compute_device_type", expand=True) + + cuda_devices, opencl_devices = self.get_devices() + row = layout.row() + + if cuda_devices: + col = row.column(align=True) + col.label(text="CUDA devices:") + for device in cuda_devices: + col.prop(device, "use", text=device.name, toggle=True) + + if opencl_devices: + col = row.column(align=True) + col.label(text="OpenCL devices:") + for device in opencl_devices: + col.prop(device, "use", text=device.name, toggle=True) + + + def draw(self, context): + self.draw_impl(self.layout, context) + + def register(): bpy.utils.register_class(CyclesRenderSettings) bpy.utils.register_class(CyclesCameraSettings) @@ -1141,6 +1245,8 @@ def register(): bpy.utils.register_class(CyclesObjectSettings) bpy.utils.register_class(CyclesCurveRenderSettings) bpy.utils.register_class(CyclesCurveSettings) + bpy.utils.register_class(CyclesDeviceSettings) + bpy.utils.register_class(CyclesPreferences) def unregister(): @@ -1154,3 +1260,5 @@ def unregister(): bpy.utils.unregister_class(CyclesVisibilitySettings) bpy.utils.unregister_class(CyclesCurveRenderSettings) bpy.utils.unregister_class(CyclesCurveSettings) + bpy.utils.unregister_class(CyclesDeviceSettings) + bpy.utils.unregister_class(CyclesPreferences) diff --git a/intern/cycles/blender/addon/ui.py b/intern/cycles/blender/addon/ui.py index 4942a71ce4d..d9ad7d967a6 100644 --- a/intern/cycles/blender/addon/ui.py +++ b/intern/cycles/blender/addon/ui.py @@ -53,25 +53,26 @@ class CyclesButtonsPanel: return rd.engine in cls.COMPAT_ENGINES +def get_device_type(context): + return context.user_preferences.addons[__package__].preferences.compute_device_type + + def use_cpu(context): cscene = context.scene.cycles - device_type = context.user_preferences.system.compute_device_type - return (device_type == 'NONE' or cscene.device == 'CPU') + return (get_device_type(context) == 'NONE' or cscene.device == 'CPU') def use_opencl(context): cscene = context.scene.cycles - device_type = context.user_preferences.system.compute_device_type - return (device_type == 'OPENCL' and cscene.device == 'GPU') + return (get_device_type(context) == 'OPENCL' and cscene.device == 'GPU') def use_cuda(context): cscene = context.scene.cycles - device_type = context.user_preferences.system.compute_device_type - return (device_type == 'CUDA' and cscene.device == 'GPU') + return (get_device_type(context) == 'CUDA' and cscene.device == 'GPU') def use_branched_path(context): @@ -85,6 +86,14 @@ def use_sample_all_lights(context): return cscene.sample_all_lights_direct or cscene.sample_all_lights_indirect +def show_device_selection(context): + type = get_device_type(context) + if type == 'NETWORK': + return True + if not type in {'CUDA', 'OPENCL'}: + return False + return context.user_preferences.addons[__package__].preferences.has_active_device() + def draw_samples_info(layout, context): cscene = context.scene.cycles @@ -141,7 +150,6 @@ class CyclesRender_PT_sampling(CyclesButtonsPanel, Panel): scene = context.scene cscene = scene.cycles - device_type = context.user_preferences.system.compute_device_type row = layout.row(align=True) row.menu("CYCLES_MT_sampling_presets", text=bpy.types.CYCLES_MT_sampling_presets.bl_label) @@ -150,7 +158,7 @@ class CyclesRender_PT_sampling(CyclesButtonsPanel, Panel): row = layout.row() sub = row.row() - sub.active = device_type != 'OPENCL' or use_cpu(context) + sub.active = get_device_type(context) != 'OPENCL' or use_cpu(context) sub.prop(cscene, "progressive", text="") row.prop(cscene, "use_square_samples") @@ -364,6 +372,8 @@ class CyclesRender_PT_performance(CyclesButtonsPanel, Panel): rd = scene.render cscene = scene.cycles + context.user_preferences.addons['cycles'].preferences.draw_impl(layout, context) + split = layout.split() col = split.column(align=True) @@ -1606,9 +1616,13 @@ def draw_device(self, context): layout.prop(cscene, "feature_set") - device_type = context.user_preferences.system.compute_device_type - if device_type in {'CUDA', 'OPENCL', 'NETWORK'}: - layout.prop(cscene, "device") + split = layout.split(percentage=1/3) + split.label("Device:") + row = split.row(align=True) + sub = row.split(align=True) + sub.active = show_device_selection(context) + sub.prop(cscene, "device", text="") + row.operator("wm.addon_userpref_show", text="Preferences", icon='PREFERENCES').module = __package__ if engine.with_osl() and use_cpu(context): layout.prop(cscene, "shading_system") diff --git a/intern/cycles/blender/blender_python.cpp b/intern/cycles/blender/blender_python.cpp index a50f5edb1df..438abc49f88 100644 --- a/intern/cycles/blender/blender_python.cpp +++ b/intern/cycles/blender/blender_python.cpp @@ -40,10 +40,6 @@ CCL_NAMESPACE_BEGIN namespace { -/* Device list stored static (used by compute_device_list()). */ -static ccl::vector<CCLDeviceInfo> device_list; -static ccl::DeviceType device_type = DEVICE_NONE; - /* Flag describing whether debug flags were synchronized from scene. */ bool debug_flags_set = false; @@ -195,7 +191,6 @@ static PyObject *exit_func(PyObject * /*self*/, PyObject * /*args*/) ShaderManager::free_memory(); TaskScheduler::free_memory(); Device::free_memory(); - device_list.free_memory(); Py_RETURN_NONE; } @@ -389,7 +384,12 @@ static PyObject *available_devices_func(PyObject * /*self*/, PyObject * /*args*/ for(size_t i = 0; i < devices.size(); i++) { DeviceInfo& device = devices[i]; - PyTuple_SET_ITEM(ret, i, PyUnicode_FromString(device.description.c_str())); + string type_name = Device::string_from_type(device.type); + PyObject *device_tuple = PyTuple_New(3); + PyTuple_SET_ITEM(device_tuple, 0, PyUnicode_FromString(device.description.c_str())); + PyTuple_SET_ITEM(device_tuple, 1, PyUnicode_FromString(type_name.c_str())); + PyTuple_SET_ITEM(device_tuple, 2, PyUnicode_FromString(device.id.c_str())); + PyTuple_SET_ITEM(ret, i, device_tuple); } return ret; @@ -676,6 +676,20 @@ static PyObject *set_resumable_chunks_func(PyObject * /*self*/, PyObject *args) Py_RETURN_NONE; } +static PyObject *get_device_types_func(PyObject * /*self*/, PyObject * /*args*/) +{ + vector<DeviceInfo>& devices = Device::available_devices(); + bool has_cuda = false, has_opencl = false; + for(int i = 0; i < devices.size(); i++) { + has_cuda |= (devices[i].type == DEVICE_CUDA); + has_opencl |= (devices[i].type == DEVICE_OPENCL); + } + PyObject *list = PyTuple_New(2); + PyTuple_SET_ITEM(list, 0, PyBool_FromLong(has_cuda)); + PyTuple_SET_ITEM(list, 1, PyBool_FromLong(has_opencl)); + return list; +} + static PyMethodDef methods[] = { {"init", init_func, METH_VARARGS, ""}, {"exit", exit_func, METH_VARARGS, ""}, @@ -703,6 +717,9 @@ static PyMethodDef methods[] = { /* Resumable render */ {"set_resumable_chunks", set_resumable_chunks_func, METH_VARARGS, ""}, + /* Compute Device selection */ + {"get_device_types", get_device_types_func, METH_VARARGS, ""}, + {NULL, NULL, 0, NULL}, }; @@ -715,47 +732,6 @@ static struct PyModuleDef module = { NULL, NULL, NULL, NULL }; -static CCLDeviceInfo *compute_device_list(DeviceType type) -{ - /* create device list if it's not already done */ - if(type != device_type) { - ccl::vector<DeviceInfo>& devices = ccl::Device::available_devices(); - - device_type = type; - device_list.clear(); - - /* add devices */ - int i = 0; - - foreach(DeviceInfo& info, devices) { - if(info.type == type || - (info.type == DEVICE_MULTI && info.multi_devices[0].type == type)) - { - CCLDeviceInfo cinfo; - - strncpy(cinfo.identifier, info.id.c_str(), sizeof(cinfo.identifier)); - cinfo.identifier[info.id.length()] = '\0'; - - strncpy(cinfo.name, info.description.c_str(), sizeof(cinfo.name)); - cinfo.name[info.description.length()] = '\0'; - - cinfo.value = i++; - - device_list.push_back(cinfo); - } - } - - /* null terminate */ - if(!device_list.empty()) { - CCLDeviceInfo cinfo = {"", "", 0}; - device_list.push_back(cinfo); - } - } - - return (device_list.empty())? NULL: &device_list[0]; -} - - CCL_NAMESPACE_END void *CCL_python_module_init() @@ -794,24 +770,3 @@ void *CCL_python_module_init() return (void*)mod; } - -CCLDeviceInfo *CCL_compute_device_list(int device_type) -{ - ccl::DeviceType type; - switch(device_type) { - case 0: - type = ccl::DEVICE_CUDA; - break; - case 1: - type = ccl::DEVICE_OPENCL; - break; - case 2: - type = ccl::DEVICE_NETWORK; - break; - default: - type = ccl::DEVICE_NONE; - break; - } - return ccl::compute_device_list(type); -} - diff --git a/intern/cycles/blender/blender_sync.cpp b/intern/cycles/blender/blender_sync.cpp index bc5c3bb8096..6e466826c35 100644 --- a/intern/cycles/blender/blender_sync.cpp +++ b/intern/cycles/blender/blender_sync.cpp @@ -531,7 +531,12 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine& b_engine, vector<DeviceInfo>& devices = Device::available_devices(); /* device default CPU */ - params.device = devices[0]; + foreach(DeviceInfo& device, devices) { + if(device.type == DEVICE_CPU) { + params.device = device; + break; + } + } if(get_enum(cscene, "device") == 2) { /* find network device */ @@ -540,17 +545,39 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine& b_engine, params.device = info; } else if(get_enum(cscene, "device") == 1) { - /* find GPU device with given id */ - PointerRNA systemptr = b_userpref.system().ptr; - PropertyRNA *deviceprop = RNA_struct_find_property(&systemptr, "compute_device"); - int device_id = b_userpref.system().compute_device(); + PointerRNA b_preferences; - const char *id; + BL::UserPreferences::addons_iterator b_addon_iter; + for(b_userpref.addons.begin(b_addon_iter); b_addon_iter != b_userpref.addons.end(); ++b_addon_iter) { + if(b_addon_iter->module() == "cycles") { + b_preferences = b_addon_iter->preferences().ptr; + break; + } + } - if(RNA_property_enum_identifier(NULL, &systemptr, deviceprop, device_id, &id)) { - foreach(DeviceInfo& info, devices) - if(info.id == id) - params.device = info; + int compute_device = get_enum(b_preferences, "compute_device_type"); + + if(compute_device != 0) { + vector<DeviceInfo> used_devices; + RNA_BEGIN(&b_preferences, device, "devices") { + if(get_enum(device, "type") == compute_device && get_boolean(device, "use")) { + string id = get_string(device, "id"); + foreach(DeviceInfo& info, devices) { + if(info.id == id) { + used_devices.push_back(info); + break; + } + } + } + } RNA_END + + if(used_devices.size() == 1) { + params.device = used_devices[0]; + } + else if(used_devices.size() > 1) { + params.device = Device::get_multi_device(used_devices); + } + /* Else keep using the CPU device that was set before. */ } } |