Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/intern
diff options
context:
space:
mode:
authorLukas Stockner <lukas.stockner@freenet.de>2016-11-07 04:33:53 +0300
committerLukas Stockner <lukas.stockner@freenet.de>2016-11-07 05:19:29 +0300
commitdd921238d9223f550d3043313c9c38d07620de5d (patch)
tree762c114b9ccda879a2826be6c2205cb5a20665d3 /intern
parentf89fbf580eae6202cef9da08756fd415ca34a8f3 (diff)
Cycles: Refactor Device selection to allow individual GPU compute device selection
Previously, it was only possible to choose a single GPU or all of that type (CUDA or OpenCL). Now, a toggle button is displayed for every device. These settings are tied to the PCI Bus ID of the devices, so they're consistent across hardware addition and removal (but not when swapping/moving cards). From the code perspective, the more important change is that now, the compute device properties are stored in the Addon preferences of the Cycles addon, instead of directly in the User Preferences. This allows for a cleaner implementation, removing the Cycles C API functions that were called by the RNA code to specify the enum items. Note that this change is neither backwards- nor forwards-compatible, but since it's only a User Preference no existing files are broken. Reviewers: #cycles, brecht Reviewed By: #cycles, brecht Subscribers: brecht, juicyfruit, mib2berlin, Blendify Differential Revision: https://developer.blender.org/D2338
Diffstat (limited to 'intern')
-rw-r--r--intern/cycles/blender/CCL_api.h11
-rw-r--r--intern/cycles/blender/addon/properties.py110
-rw-r--r--intern/cycles/blender/addon/ui.py36
-rw-r--r--intern/cycles/blender/blender_python.cpp91
-rw-r--r--intern/cycles/blender/blender_sync.cpp47
-rw-r--r--intern/cycles/device/device.cpp54
-rw-r--r--intern/cycles/device/device.h9
-rw-r--r--intern/cycles/device/device_cuda.cpp7
-rw-r--r--intern/cycles/device/device_intern.h1
-rw-r--r--intern/cycles/device/device_multi.cpp115
-rw-r--r--intern/cycles/device/device_opencl.cpp7
-rw-r--r--intern/cycles/device/opencl/opencl.h9
-rw-r--r--intern/cycles/device/opencl/opencl_util.cpp25
-rw-r--r--intern/cycles/render/session.h3
14 files changed, 281 insertions, 244 deletions
diff --git a/intern/cycles/blender/CCL_api.h b/intern/cycles/blender/CCL_api.h
index d3a68c4db4f..233ffc8802c 100644
--- a/intern/cycles/blender/CCL_api.h
+++ b/intern/cycles/blender/CCL_api.h
@@ -21,17 +21,6 @@
extern "C" {
#endif
-/* returns a list of devices for selection, array is empty identifier
- * terminated and must not be freed */
-
-typedef struct CCLDeviceInfo {
- char identifier[128];
- char name[512];
- int value;
-} CCLDeviceInfo;
-
-CCLDeviceInfo *CCL_compute_device_list(int device_type);
-
/* create python module _cycles used by addon */
void *CCL_python_module_init(void);
diff --git a/intern/cycles/blender/addon/properties.py b/intern/cycles/blender/addon/properties.py
index a8ab9100ded..27c9b922042 100644
--- a/intern/cycles/blender/addon/properties.py
+++ b/intern/cycles/blender/addon/properties.py
@@ -21,7 +21,8 @@ from bpy.props import (BoolProperty,
EnumProperty,
FloatProperty,
IntProperty,
- PointerProperty)
+ PointerProperty,
+ StringProperty)
# enums
@@ -122,6 +123,12 @@ enum_volume_interpolation = (
('CUBIC', "Cubic", "Smoothed high quality interpolation, but slower")
)
+enum_device_type = (
+ ('CPU', "CPU", "CPU", 0),
+ ('CUDA', "CUDA", "CUDA", 1),
+ ('OPENCL', "OpenCL", "OpenCL", 2)
+ )
+
class CyclesRenderSettings(bpy.types.PropertyGroup):
@classmethod
@@ -1130,6 +1137,103 @@ class CyclesCurveSettings(bpy.types.PropertyGroup):
del bpy.types.ParticleSettings.cycles
+class CyclesDeviceSettings(bpy.types.PropertyGroup):
+ @classmethod
+ def register(cls):
+ cls.id = StringProperty(name="ID")
+ cls.name = StringProperty(name="Name")
+ cls.use = BoolProperty(name="Use", default=True)
+ cls.type = EnumProperty(name="Type", items=enum_device_type, default='CUDA')
+
+
+class CyclesPreferences(bpy.types.AddonPreferences):
+ bl_idname = __package__
+
+ def get_device_types(self, context):
+ import _cycles
+ has_cuda, has_opencl = _cycles.get_device_types()
+ list = [('NONE', "None", "Don't use compute device", 0)]
+ if has_cuda:
+ list.append(('CUDA', "CUDA", "Use CUDA for GPU acceleration", 1))
+ if has_opencl:
+ list.append(('OPENCL', "OpenCL", "Use OpenCL for GPU acceleration", 2))
+ return list
+
+ compute_device_type = EnumProperty(
+ name="Compute Device Type",
+ description="Device to use for computation (rendering with Cycles)",
+ items=get_device_types,
+ )
+
+ devices = bpy.props.CollectionProperty(type=CyclesDeviceSettings)
+
+ def get_devices(self):
+ import _cycles
+ # Layout of the device tuples: (Name, Type, Internal ID, Persistent ID)
+ device_list = _cycles.available_devices()
+
+ cuda_devices = []
+ opencl_devices = []
+ for device in device_list:
+ if not device[1] in {'CUDA', 'OPENCL'}:
+ continue
+
+ entry = None
+ # Try to find existing Device entry
+ for dev in self.devices:
+ if dev.id == device[2] and dev.type == device[1]:
+ entry = dev
+ break
+ # Create new entry if no existing one was found
+ if not entry:
+ entry = self.devices.add()
+ entry.id = device[2]
+ entry.name = device[0]
+ entry.type = device[1]
+
+ # Sort entries into lists
+ if entry.type == 'CUDA':
+ cuda_devices.append(entry)
+ elif entry.type == 'OPENCL':
+ opencl_devices.append(entry)
+ return cuda_devices, opencl_devices
+
+
+ def has_active_device(self):
+ import _cycles
+ device_list = _cycles.available_devices()
+ for device in device_list:
+ if device[1] != self.compute_device_type:
+ continue
+ if any(dev.use and dev.id == device[2] for dev in self.devices):
+ return True
+ return False
+
+
+ def draw_impl(self, layout, context):
+ layout.label(text="Compute Device:")
+ layout.row().prop(self, "compute_device_type", expand=True)
+
+ cuda_devices, opencl_devices = self.get_devices()
+ row = layout.row()
+
+ if cuda_devices:
+ col = row.column(align=True)
+ col.label(text="CUDA devices:")
+ for device in cuda_devices:
+ col.prop(device, "use", text=device.name, toggle=True)
+
+ if opencl_devices:
+ col = row.column(align=True)
+ col.label(text="OpenCL devices:")
+ for device in opencl_devices:
+ col.prop(device, "use", text=device.name, toggle=True)
+
+
+ def draw(self, context):
+ self.draw_impl(self.layout, context)
+
+
def register():
bpy.utils.register_class(CyclesRenderSettings)
bpy.utils.register_class(CyclesCameraSettings)
@@ -1141,6 +1245,8 @@ def register():
bpy.utils.register_class(CyclesObjectSettings)
bpy.utils.register_class(CyclesCurveRenderSettings)
bpy.utils.register_class(CyclesCurveSettings)
+ bpy.utils.register_class(CyclesDeviceSettings)
+ bpy.utils.register_class(CyclesPreferences)
def unregister():
@@ -1154,3 +1260,5 @@ def unregister():
bpy.utils.unregister_class(CyclesVisibilitySettings)
bpy.utils.unregister_class(CyclesCurveRenderSettings)
bpy.utils.unregister_class(CyclesCurveSettings)
+ bpy.utils.unregister_class(CyclesDeviceSettings)
+ bpy.utils.unregister_class(CyclesPreferences)
diff --git a/intern/cycles/blender/addon/ui.py b/intern/cycles/blender/addon/ui.py
index 4942a71ce4d..d9ad7d967a6 100644
--- a/intern/cycles/blender/addon/ui.py
+++ b/intern/cycles/blender/addon/ui.py
@@ -53,25 +53,26 @@ class CyclesButtonsPanel:
return rd.engine in cls.COMPAT_ENGINES
+def get_device_type(context):
+ return context.user_preferences.addons[__package__].preferences.compute_device_type
+
+
def use_cpu(context):
cscene = context.scene.cycles
- device_type = context.user_preferences.system.compute_device_type
- return (device_type == 'NONE' or cscene.device == 'CPU')
+ return (get_device_type(context) == 'NONE' or cscene.device == 'CPU')
def use_opencl(context):
cscene = context.scene.cycles
- device_type = context.user_preferences.system.compute_device_type
- return (device_type == 'OPENCL' and cscene.device == 'GPU')
+ return (get_device_type(context) == 'OPENCL' and cscene.device == 'GPU')
def use_cuda(context):
cscene = context.scene.cycles
- device_type = context.user_preferences.system.compute_device_type
- return (device_type == 'CUDA' and cscene.device == 'GPU')
+ return (get_device_type(context) == 'CUDA' and cscene.device == 'GPU')
def use_branched_path(context):
@@ -85,6 +86,14 @@ def use_sample_all_lights(context):
return cscene.sample_all_lights_direct or cscene.sample_all_lights_indirect
+def show_device_selection(context):
+ type = get_device_type(context)
+ if type == 'NETWORK':
+ return True
+ if not type in {'CUDA', 'OPENCL'}:
+ return False
+ return context.user_preferences.addons[__package__].preferences.has_active_device()
+
def draw_samples_info(layout, context):
cscene = context.scene.cycles
@@ -141,7 +150,6 @@ class CyclesRender_PT_sampling(CyclesButtonsPanel, Panel):
scene = context.scene
cscene = scene.cycles
- device_type = context.user_preferences.system.compute_device_type
row = layout.row(align=True)
row.menu("CYCLES_MT_sampling_presets", text=bpy.types.CYCLES_MT_sampling_presets.bl_label)
@@ -150,7 +158,7 @@ class CyclesRender_PT_sampling(CyclesButtonsPanel, Panel):
row = layout.row()
sub = row.row()
- sub.active = device_type != 'OPENCL' or use_cpu(context)
+ sub.active = get_device_type(context) != 'OPENCL' or use_cpu(context)
sub.prop(cscene, "progressive", text="")
row.prop(cscene, "use_square_samples")
@@ -364,6 +372,8 @@ class CyclesRender_PT_performance(CyclesButtonsPanel, Panel):
rd = scene.render
cscene = scene.cycles
+ context.user_preferences.addons['cycles'].preferences.draw_impl(layout, context)
+
split = layout.split()
col = split.column(align=True)
@@ -1606,9 +1616,13 @@ def draw_device(self, context):
layout.prop(cscene, "feature_set")
- device_type = context.user_preferences.system.compute_device_type
- if device_type in {'CUDA', 'OPENCL', 'NETWORK'}:
- layout.prop(cscene, "device")
+ split = layout.split(percentage=1/3)
+ split.label("Device:")
+ row = split.row(align=True)
+ sub = row.split(align=True)
+ sub.active = show_device_selection(context)
+ sub.prop(cscene, "device", text="")
+ row.operator("wm.addon_userpref_show", text="Preferences", icon='PREFERENCES').module = __package__
if engine.with_osl() and use_cpu(context):
layout.prop(cscene, "shading_system")
diff --git a/intern/cycles/blender/blender_python.cpp b/intern/cycles/blender/blender_python.cpp
index a50f5edb1df..438abc49f88 100644
--- a/intern/cycles/blender/blender_python.cpp
+++ b/intern/cycles/blender/blender_python.cpp
@@ -40,10 +40,6 @@ CCL_NAMESPACE_BEGIN
namespace {
-/* Device list stored static (used by compute_device_list()). */
-static ccl::vector<CCLDeviceInfo> device_list;
-static ccl::DeviceType device_type = DEVICE_NONE;
-
/* Flag describing whether debug flags were synchronized from scene. */
bool debug_flags_set = false;
@@ -195,7 +191,6 @@ static PyObject *exit_func(PyObject * /*self*/, PyObject * /*args*/)
ShaderManager::free_memory();
TaskScheduler::free_memory();
Device::free_memory();
- device_list.free_memory();
Py_RETURN_NONE;
}
@@ -389,7 +384,12 @@ static PyObject *available_devices_func(PyObject * /*self*/, PyObject * /*args*/
for(size_t i = 0; i < devices.size(); i++) {
DeviceInfo& device = devices[i];
- PyTuple_SET_ITEM(ret, i, PyUnicode_FromString(device.description.c_str()));
+ string type_name = Device::string_from_type(device.type);
+ PyObject *device_tuple = PyTuple_New(3);
+ PyTuple_SET_ITEM(device_tuple, 0, PyUnicode_FromString(device.description.c_str()));
+ PyTuple_SET_ITEM(device_tuple, 1, PyUnicode_FromString(type_name.c_str()));
+ PyTuple_SET_ITEM(device_tuple, 2, PyUnicode_FromString(device.id.c_str()));
+ PyTuple_SET_ITEM(ret, i, device_tuple);
}
return ret;
@@ -676,6 +676,20 @@ static PyObject *set_resumable_chunks_func(PyObject * /*self*/, PyObject *args)
Py_RETURN_NONE;
}
+static PyObject *get_device_types_func(PyObject * /*self*/, PyObject * /*args*/)
+{
+ vector<DeviceInfo>& devices = Device::available_devices();
+ bool has_cuda = false, has_opencl = false;
+ for(int i = 0; i < devices.size(); i++) {
+ has_cuda |= (devices[i].type == DEVICE_CUDA);
+ has_opencl |= (devices[i].type == DEVICE_OPENCL);
+ }
+ PyObject *list = PyTuple_New(2);
+ PyTuple_SET_ITEM(list, 0, PyBool_FromLong(has_cuda));
+ PyTuple_SET_ITEM(list, 1, PyBool_FromLong(has_opencl));
+ return list;
+}
+
static PyMethodDef methods[] = {
{"init", init_func, METH_VARARGS, ""},
{"exit", exit_func, METH_VARARGS, ""},
@@ -703,6 +717,9 @@ static PyMethodDef methods[] = {
/* Resumable render */
{"set_resumable_chunks", set_resumable_chunks_func, METH_VARARGS, ""},
+ /* Compute Device selection */
+ {"get_device_types", get_device_types_func, METH_VARARGS, ""},
+
{NULL, NULL, 0, NULL},
};
@@ -715,47 +732,6 @@ static struct PyModuleDef module = {
NULL, NULL, NULL, NULL
};
-static CCLDeviceInfo *compute_device_list(DeviceType type)
-{
- /* create device list if it's not already done */
- if(type != device_type) {
- ccl::vector<DeviceInfo>& devices = ccl::Device::available_devices();
-
- device_type = type;
- device_list.clear();
-
- /* add devices */
- int i = 0;
-
- foreach(DeviceInfo& info, devices) {
- if(info.type == type ||
- (info.type == DEVICE_MULTI && info.multi_devices[0].type == type))
- {
- CCLDeviceInfo cinfo;
-
- strncpy(cinfo.identifier, info.id.c_str(), sizeof(cinfo.identifier));
- cinfo.identifier[info.id.length()] = '\0';
-
- strncpy(cinfo.name, info.description.c_str(), sizeof(cinfo.name));
- cinfo.name[info.description.length()] = '\0';
-
- cinfo.value = i++;
-
- device_list.push_back(cinfo);
- }
- }
-
- /* null terminate */
- if(!device_list.empty()) {
- CCLDeviceInfo cinfo = {"", "", 0};
- device_list.push_back(cinfo);
- }
- }
-
- return (device_list.empty())? NULL: &device_list[0];
-}
-
-
CCL_NAMESPACE_END
void *CCL_python_module_init()
@@ -794,24 +770,3 @@ void *CCL_python_module_init()
return (void*)mod;
}
-
-CCLDeviceInfo *CCL_compute_device_list(int device_type)
-{
- ccl::DeviceType type;
- switch(device_type) {
- case 0:
- type = ccl::DEVICE_CUDA;
- break;
- case 1:
- type = ccl::DEVICE_OPENCL;
- break;
- case 2:
- type = ccl::DEVICE_NETWORK;
- break;
- default:
- type = ccl::DEVICE_NONE;
- break;
- }
- return ccl::compute_device_list(type);
-}
-
diff --git a/intern/cycles/blender/blender_sync.cpp b/intern/cycles/blender/blender_sync.cpp
index bc5c3bb8096..6e466826c35 100644
--- a/intern/cycles/blender/blender_sync.cpp
+++ b/intern/cycles/blender/blender_sync.cpp
@@ -531,7 +531,12 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine& b_engine,
vector<DeviceInfo>& devices = Device::available_devices();
/* device default CPU */
- params.device = devices[0];
+ foreach(DeviceInfo& device, devices) {
+ if(device.type == DEVICE_CPU) {
+ params.device = device;
+ break;
+ }
+ }
if(get_enum(cscene, "device") == 2) {
/* find network device */
@@ -540,17 +545,39 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine& b_engine,
params.device = info;
}
else if(get_enum(cscene, "device") == 1) {
- /* find GPU device with given id */
- PointerRNA systemptr = b_userpref.system().ptr;
- PropertyRNA *deviceprop = RNA_struct_find_property(&systemptr, "compute_device");
- int device_id = b_userpref.system().compute_device();
+ PointerRNA b_preferences;
- const char *id;
+ BL::UserPreferences::addons_iterator b_addon_iter;
+ for(b_userpref.addons.begin(b_addon_iter); b_addon_iter != b_userpref.addons.end(); ++b_addon_iter) {
+ if(b_addon_iter->module() == "cycles") {
+ b_preferences = b_addon_iter->preferences().ptr;
+ break;
+ }
+ }
- if(RNA_property_enum_identifier(NULL, &systemptr, deviceprop, device_id, &id)) {
- foreach(DeviceInfo& info, devices)
- if(info.id == id)
- params.device = info;
+ int compute_device = get_enum(b_preferences, "compute_device_type");
+
+ if(compute_device != 0) {
+ vector<DeviceInfo> used_devices;
+ RNA_BEGIN(&b_preferences, device, "devices") {
+ if(get_enum(device, "type") == compute_device && get_boolean(device, "use")) {
+ string id = get_string(device, "id");
+ foreach(DeviceInfo& info, devices) {
+ if(info.id == id) {
+ used_devices.push_back(info);
+ break;
+ }
+ }
+ }
+ } RNA_END
+
+ if(used_devices.size() == 1) {
+ params.device = used_devices[0];
+ }
+ else if(used_devices.size() > 1) {
+ params.device = Device::get_multi_device(used_devices);
+ }
+ /* Else keep using the CPU device that was set before. */
}
}
diff --git a/intern/cycles/device/device.cpp b/intern/cycles/device/device.cpp
index 909ec7a6d60..ff9387b0a8a 100644
--- a/intern/cycles/device/device.cpp
+++ b/intern/cycles/device/device.cpp
@@ -258,33 +258,33 @@ Device *Device::create(DeviceInfo& info, Stats &stats, bool background)
DeviceType Device::type_from_string(const char *name)
{
- if(strcmp(name, "cpu") == 0)
+ if(strcmp(name, "CPU") == 0)
return DEVICE_CPU;
- else if(strcmp(name, "cuda") == 0)
+ else if(strcmp(name, "CUDA") == 0)
return DEVICE_CUDA;
- else if(strcmp(name, "opencl") == 0)
+ else if(strcmp(name, "OPENCL") == 0)
return DEVICE_OPENCL;
- else if(strcmp(name, "network") == 0)
+ else if(strcmp(name, "NETWORK") == 0)
return DEVICE_NETWORK;
- else if(strcmp(name, "multi") == 0)
+ else if(strcmp(name, "MULTI") == 0)
return DEVICE_MULTI;
-
+
return DEVICE_NONE;
}
string Device::string_from_type(DeviceType type)
{
if(type == DEVICE_CPU)
- return "cpu";
+ return "CPU";
else if(type == DEVICE_CUDA)
- return "cuda";
+ return "CUDA";
else if(type == DEVICE_OPENCL)
- return "opencl";
+ return "OPENCL";
else if(type == DEVICE_NETWORK)
- return "network";
+ return "NETWORK";
else if(type == DEVICE_MULTI)
- return "multi";
-
+ return "MULTI";
+
return "";
}
@@ -307,9 +307,6 @@ vector<DeviceType>& Device::available_types()
#ifdef WITH_NETWORK
types.push_back(DEVICE_NETWORK);
#endif
-#ifdef WITH_MULTI
- types.push_back(DEVICE_MULTI);
-#endif
need_types_update = false;
}
@@ -331,10 +328,6 @@ vector<DeviceInfo>& Device::available_devices()
device_opencl_info(devices);
#endif
-#ifdef WITH_MULTI
- device_multi_info(devices);
-#endif
-
device_cpu_info(devices);
#ifdef WITH_NETWORK
@@ -368,6 +361,29 @@ string Device::device_capabilities()
return capabilities;
}
+DeviceInfo Device::get_multi_device(vector<DeviceInfo> subdevices)
+{
+ assert(subdevices.size() > 1);
+
+ DeviceInfo info;
+ info.type = DEVICE_MULTI;
+ info.id = "MULTI";
+ info.description = "Multi Device";
+ info.multi_devices = subdevices;
+ info.num = 0;
+
+ info.has_bindless_textures = true;
+ info.pack_images = false;
+ foreach(DeviceInfo &device, subdevices) {
+ assert(device.type == info.multi_devices[0].type);
+
+ info.pack_images |= device.pack_images;
+ info.has_bindless_textures &= device.has_bindless_textures;
+ }
+
+ return info;
+}
+
void Device::tag_update()
{
need_types_update = true;
diff --git a/intern/cycles/device/device.h b/intern/cycles/device/device.h
index 77dc1fa9713..b9bdffa2618 100644
--- a/intern/cycles/device/device.h
+++ b/intern/cycles/device/device.h
@@ -49,7 +49,7 @@ class DeviceInfo {
public:
DeviceType type;
string description;
- string id;
+ string id; /* used for user preferences, should stay fixed with changing hardware config */
int num;
bool display_device;
bool advanced_shading;
@@ -69,6 +69,12 @@ public:
has_bindless_textures = false;
use_split_kernel = false;
}
+
+ bool operator==(const DeviceInfo &info) {
+ /* Multiple Devices with the same ID would be very bad. */
+ assert(id != info.id || (type == info.type && num == info.num && description == info.description));
+ return id == info.id;
+ }
};
class DeviceRequestedFeatures {
@@ -282,6 +288,7 @@ public:
static vector<DeviceType>& available_types();
static vector<DeviceInfo>& available_devices();
static string device_capabilities();
+ static DeviceInfo get_multi_device(vector<DeviceInfo> subdevices);
/* Tag devices lists for update. */
static void tag_update();
diff --git a/intern/cycles/device/device_cuda.cpp b/intern/cycles/device/device_cuda.cpp
index 73c9221e6a2..a4818aa3b8d 100644
--- a/intern/cycles/device/device_cuda.cpp
+++ b/intern/cycles/device/device_cuda.cpp
@@ -1408,13 +1408,18 @@ void device_cuda_info(vector<DeviceInfo>& devices)
info.type = DEVICE_CUDA;
info.description = string(name);
- info.id = string_printf("CUDA_%d", num);
info.num = num;
info.advanced_shading = (major >= 2);
info.has_bindless_textures = (major >= 3);
info.pack_images = false;
+ int pci_location[3] = {0, 0, 0};
+ cuDeviceGetAttribute(&pci_location[0], CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, num);
+ cuDeviceGetAttribute(&pci_location[1], CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, num);
+ cuDeviceGetAttribute(&pci_location[2], CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, num);
+ info.id = string_printf("CUDA_%s_%04x:%02x:%02x", name, pci_location[0], pci_location[1], pci_location[2]);
+
/* if device has a kernel timeout, assume it is used for display */
if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
info.description += " (Display)";
diff --git a/intern/cycles/device/device_intern.h b/intern/cycles/device/device_intern.h
index 47584ae6d22..de487649045 100644
--- a/intern/cycles/device/device_intern.h
+++ b/intern/cycles/device/device_intern.h
@@ -33,7 +33,6 @@ void device_cpu_info(vector<DeviceInfo>& devices);
void device_opencl_info(vector<DeviceInfo>& devices);
void device_cuda_info(vector<DeviceInfo>& devices);
void device_network_info(vector<DeviceInfo>& devices);
-void device_multi_info(vector<DeviceInfo>& devices);
string device_cpu_capabilities(void);
string device_opencl_capabilities(void);
diff --git a/intern/cycles/device/device_multi.cpp b/intern/cycles/device/device_multi.cpp
index ef257358b22..48fd159d508 100644
--- a/intern/cycles/device/device_multi.cpp
+++ b/intern/cycles/device/device_multi.cpp
@@ -350,120 +350,5 @@ Device *device_multi_create(DeviceInfo& info, Stats &stats, bool background)
return new MultiDevice(info, stats, background);
}
-static bool device_multi_add(vector<DeviceInfo>& devices, DeviceType type, bool with_display, bool with_advanced_shading, const char *id_fmt, int num)
-{
- DeviceInfo info;
-
- /* create map to find duplicate descriptions */
- map<string, int> dupli_map;
- map<string, int>::iterator dt;
- int num_added = 0, num_display = 0;
-
- info.advanced_shading = with_advanced_shading;
- info.pack_images = false;
- info.has_bindless_textures = true;
-
- foreach(DeviceInfo& subinfo, devices) {
- if(subinfo.type == type) {
- if(subinfo.advanced_shading != info.advanced_shading)
- continue;
- if(subinfo.display_device) {
- if(with_display)
- num_display++;
- else
- continue;
- }
-
- string key = subinfo.description;
-
- if(dupli_map.find(key) == dupli_map.end())
- dupli_map[key] = 1;
- else
- dupli_map[key]++;
-
- info.multi_devices.push_back(subinfo);
- if(subinfo.display_device)
- info.display_device = true;
- info.pack_images = info.pack_images || subinfo.pack_images;
- info.has_bindless_textures = info.has_bindless_textures && subinfo.has_bindless_textures;
- num_added++;
- }
- }
-
- if(num_added <= 1 || (with_display && num_display == 0))
- return false;
-
- /* generate string */
- stringstream desc;
- vector<string> last_tokens;
- bool first = true;
-
- for(dt = dupli_map.begin(); dt != dupli_map.end(); dt++) {
- if(!first) desc << " + ";
- first = false;
-
- /* get name and count */
- string name = dt->first;
- int count = dt->second;
-
- /* strip common prefixes */
- vector<string> tokens;
- string_split(tokens, dt->first);
-
- if(tokens.size() > 1) {
- int i;
-
- for(i = 0; i < tokens.size() && i < last_tokens.size(); i++)
- if(tokens[i] != last_tokens[i])
- break;
-
- name = "";
- for(; i < tokens.size(); i++) {
- name += tokens[i];
- if(i != tokens.size() - 1)
- name += " ";
- }
- }
-
- last_tokens = tokens;
-
- /* add */
- if(count > 1)
- desc << name << " (" << count << "x)";
- else
- desc << name;
- }
-
- /* add info */
- info.type = DEVICE_MULTI;
- info.description = desc.str();
- info.id = string_printf(id_fmt, num);
- info.display_device = with_display;
- info.num = 0;
-
- if(with_display)
- devices.push_back(info);
- else
- devices.insert(devices.begin(), info);
-
- return true;
-}
-
-void device_multi_info(vector<DeviceInfo>& devices)
-{
- int num = 0;
-
- if(!device_multi_add(devices, DEVICE_CUDA, false, true, "CUDA_MULTI_%d", num++))
- device_multi_add(devices, DEVICE_CUDA, false, false, "CUDA_MULTI_%d", num++);
- if(!device_multi_add(devices, DEVICE_CUDA, true, true, "CUDA_MULTI_%d", num++))
- device_multi_add(devices, DEVICE_CUDA, true, false, "CUDA_MULTI_%d", num++);
-
- num = 0;
- if(!device_multi_add(devices, DEVICE_OPENCL, false, true, "OPENCL_MULTI_%d", num++))
- device_multi_add(devices, DEVICE_OPENCL, false, false, "OPENCL_MULTI_%d", num++);
- if(!device_multi_add(devices, DEVICE_OPENCL, true, true, "OPENCL_MULTI_%d", num++))
- device_multi_add(devices, DEVICE_OPENCL, true, false, "OPENCL_MULTI_%d", num++);
-}
-
CCL_NAMESPACE_END
diff --git a/intern/cycles/device/device_opencl.cpp b/intern/cycles/device/device_opencl.cpp
index 45cf6b074e9..ba94c592a5f 100644
--- a/intern/cycles/device/device_opencl.cpp
+++ b/intern/cycles/device/device_opencl.cpp
@@ -83,17 +83,22 @@ void device_opencl_info(vector<DeviceInfo>& devices)
const string& platform_name = platform_device.platform_name;
const cl_device_type device_type = platform_device.device_type;
const string& device_name = platform_device.device_name;
+ string hardware_id = platform_device.hardware_id;
+ if(hardware_id == "") {
+ hardware_id = string_printf("ID_%d", num_devices);
+ }
+
DeviceInfo info;
info.type = DEVICE_OPENCL;
info.description = string_remove_trademark(string(device_name));
info.num = num_devices;
- info.id = string_printf("OPENCL_%d", info.num);
/* We don't know if it's used for display, but assume it is. */
info.display_device = true;
info.advanced_shading = OpenCLInfo::kernel_use_advanced_shading(platform_name);
info.pack_images = true;
info.use_split_kernel = OpenCLInfo::kernel_use_split(platform_name,
device_type);
+ info.id = string("OPENCL_") + platform_name + "_" + device_name + "_" + hardware_id;
devices.push_back(info);
num_devices++;
}
diff --git a/intern/cycles/device/opencl/opencl.h b/intern/cycles/device/opencl/opencl.h
index 30a35acbb2a..054ac9014f0 100644
--- a/intern/cycles/device/opencl/opencl.h
+++ b/intern/cycles/device/opencl/opencl.h
@@ -55,17 +55,20 @@ struct OpenCLPlatformDevice {
const string& platform_name,
cl_device_id device_id,
cl_device_type device_type,
- const string& device_name)
+ const string& device_name,
+ const string& hardware_id)
: platform_id(platform_id),
platform_name(platform_name),
device_id(device_id),
device_type(device_type),
- device_name(device_name) {}
+ device_name(device_name),
+ hardware_id(hardware_id) {}
cl_platform_id platform_id;
string platform_name;
cl_device_id device_id;
cl_device_type device_type;
string device_name;
+ string hardware_id;
};
/* Contains all static OpenCL helper functions. */
@@ -83,6 +86,8 @@ public:
string *error = NULL);
static bool device_version_check(cl_device_id device,
string *error = NULL);
+ static string get_hardware_id(string platform_name,
+ cl_device_id device_id);
static void get_usable_devices(vector<OpenCLPlatformDevice> *usable_devices,
bool force_all = false);
};
diff --git a/intern/cycles/device/opencl/opencl_util.cpp b/intern/cycles/device/opencl/opencl_util.cpp
index e425ae8e2e8..36eb70b8c85 100644
--- a/intern/cycles/device/opencl/opencl_util.cpp
+++ b/intern/cycles/device/opencl/opencl_util.cpp
@@ -661,6 +661,27 @@ bool OpenCLInfo::device_version_check(cl_device_id device,
return true;
}
+string OpenCLInfo::get_hardware_id(string platform_name, cl_device_id device_id)
+{
+ if(platform_name == "AMD Accelerated Parallel Processing" || platform_name == "Apple") {
+ /* Use cl_amd_device_topology extension. */
+ cl_char topology[24];
+ if(clGetDeviceInfo(device_id, 0x4037, sizeof(topology), topology, NULL) == CL_SUCCESS && topology[0] == 1) {
+ return string_printf("%02x:%02x.%01x", topology[21], topology[22], topology[23]);
+ }
+ }
+ else if(platform_name == "NVIDIA CUDA") {
+ /* Use two undocumented options of the cl_nv_device_attribute_query extension. */
+ cl_int bus_id, slot_id;
+ if(clGetDeviceInfo(device_id, 0x4008, sizeof(cl_int), &bus_id, NULL) == CL_SUCCESS &&
+ clGetDeviceInfo(device_id, 0x4009, sizeof(cl_int), &slot_id, NULL) == CL_SUCCESS) {
+ return string_printf("%02x:%02x.%01x", bus_id, slot_id>>3, slot_id & 0x7);
+ }
+ }
+ /* No general way to get a hardware ID from OpenCL => give up. */
+ return "";
+}
+
void OpenCLInfo::get_usable_devices(vector<OpenCLPlatformDevice> *usable_devices,
bool force_all)
{
@@ -773,11 +794,13 @@ void OpenCLInfo::get_usable_devices(vector<OpenCLPlatformDevice> *usable_devices
continue;
}
FIRST_VLOG(2) << "Adding new device " << device_name << ".";
+ string hardware_id = get_hardware_id(platform_name, device_id);
usable_devices->push_back(OpenCLPlatformDevice(platform_id,
platform_name,
device_id,
device_type,
- device_name));
+ device_name,
+ hardware_id));
}
else {
FIRST_VLOG(2) << "Ignoring device " << device_name
diff --git a/intern/cycles/render/session.h b/intern/cycles/render/session.h
index 8bff0f9ed15..1db4692e171 100644
--- a/intern/cycles/render/session.h
+++ b/intern/cycles/render/session.h
@@ -89,8 +89,7 @@ public:
}
bool modified(const SessionParams& params)
- { return !(device.type == params.device.type
- && device.id == params.device.id
+ { return !(device == params.device
&& background == params.background
&& progressive_refine == params.progressive_refine
&& output_path == params.output_path