Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--intern/cycles/blender/CCL_api.h11
-rw-r--r--intern/cycles/blender/addon/properties.py110
-rw-r--r--intern/cycles/blender/addon/ui.py36
-rw-r--r--intern/cycles/blender/blender_python.cpp91
-rw-r--r--intern/cycles/blender/blender_sync.cpp47
-rw-r--r--intern/cycles/device/device.cpp54
-rw-r--r--intern/cycles/device/device.h9
-rw-r--r--intern/cycles/device/device_cuda.cpp7
-rw-r--r--intern/cycles/device/device_intern.h1
-rw-r--r--intern/cycles/device/device_multi.cpp115
-rw-r--r--intern/cycles/device/device_opencl.cpp7
-rw-r--r--intern/cycles/device/opencl/opencl.h9
-rw-r--r--intern/cycles/device/opencl/opencl_util.cpp25
-rw-r--r--intern/cycles/render/session.h3
-rw-r--r--release/scripts/startup/bl_operators/wm.py29
-rw-r--r--release/scripts/startup/bl_ui/space_userpref.py7
-rw-r--r--source/blender/makesrna/intern/rna_userdef.c107
-rw-r--r--source/blenderplayer/bad_level_call_stubs/stubs.c5
18 files changed, 310 insertions, 363 deletions
diff --git a/intern/cycles/blender/CCL_api.h b/intern/cycles/blender/CCL_api.h
index d3a68c4db4f..233ffc8802c 100644
--- a/intern/cycles/blender/CCL_api.h
+++ b/intern/cycles/blender/CCL_api.h
@@ -21,17 +21,6 @@
extern "C" {
#endif
-/* returns a list of devices for selection, array is empty identifier
- * terminated and must not be freed */
-
-typedef struct CCLDeviceInfo {
- char identifier[128];
- char name[512];
- int value;
-} CCLDeviceInfo;
-
-CCLDeviceInfo *CCL_compute_device_list(int device_type);
-
/* create python module _cycles used by addon */
void *CCL_python_module_init(void);
diff --git a/intern/cycles/blender/addon/properties.py b/intern/cycles/blender/addon/properties.py
index a8ab9100ded..27c9b922042 100644
--- a/intern/cycles/blender/addon/properties.py
+++ b/intern/cycles/blender/addon/properties.py
@@ -21,7 +21,8 @@ from bpy.props import (BoolProperty,
EnumProperty,
FloatProperty,
IntProperty,
- PointerProperty)
+ PointerProperty,
+ StringProperty)
# enums
@@ -122,6 +123,12 @@ enum_volume_interpolation = (
('CUBIC', "Cubic", "Smoothed high quality interpolation, but slower")
)
+enum_device_type = (
+ ('CPU', "CPU", "CPU", 0),
+ ('CUDA', "CUDA", "CUDA", 1),
+ ('OPENCL', "OpenCL", "OpenCL", 2)
+ )
+
class CyclesRenderSettings(bpy.types.PropertyGroup):
@classmethod
@@ -1130,6 +1137,103 @@ class CyclesCurveSettings(bpy.types.PropertyGroup):
del bpy.types.ParticleSettings.cycles
+class CyclesDeviceSettings(bpy.types.PropertyGroup):
+ @classmethod
+ def register(cls):
+ cls.id = StringProperty(name="ID")
+ cls.name = StringProperty(name="Name")
+ cls.use = BoolProperty(name="Use", default=True)
+ cls.type = EnumProperty(name="Type", items=enum_device_type, default='CUDA')
+
+
+class CyclesPreferences(bpy.types.AddonPreferences):
+ bl_idname = __package__
+
+ def get_device_types(self, context):
+ import _cycles
+ has_cuda, has_opencl = _cycles.get_device_types()
+ list = [('NONE', "None", "Don't use compute device", 0)]
+ if has_cuda:
+ list.append(('CUDA', "CUDA", "Use CUDA for GPU acceleration", 1))
+ if has_opencl:
+ list.append(('OPENCL', "OpenCL", "Use OpenCL for GPU acceleration", 2))
+ return list
+
+ compute_device_type = EnumProperty(
+ name="Compute Device Type",
+ description="Device to use for computation (rendering with Cycles)",
+ items=get_device_types,
+ )
+
+ devices = bpy.props.CollectionProperty(type=CyclesDeviceSettings)
+
+ def get_devices(self):
+ import _cycles
+ # Layout of the device tuples: (Name, Type, Internal ID, Persistent ID)
+ device_list = _cycles.available_devices()
+
+ cuda_devices = []
+ opencl_devices = []
+ for device in device_list:
+ if not device[1] in {'CUDA', 'OPENCL'}:
+ continue
+
+ entry = None
+ # Try to find existing Device entry
+ for dev in self.devices:
+ if dev.id == device[2] and dev.type == device[1]:
+ entry = dev
+ break
+ # Create new entry if no existing one was found
+ if not entry:
+ entry = self.devices.add()
+ entry.id = device[2]
+ entry.name = device[0]
+ entry.type = device[1]
+
+ # Sort entries into lists
+ if entry.type == 'CUDA':
+ cuda_devices.append(entry)
+ elif entry.type == 'OPENCL':
+ opencl_devices.append(entry)
+ return cuda_devices, opencl_devices
+
+
+ def has_active_device(self):
+ import _cycles
+ device_list = _cycles.available_devices()
+ for device in device_list:
+ if device[1] != self.compute_device_type:
+ continue
+ if any(dev.use and dev.id == device[2] for dev in self.devices):
+ return True
+ return False
+
+
+ def draw_impl(self, layout, context):
+ layout.label(text="Compute Device:")
+ layout.row().prop(self, "compute_device_type", expand=True)
+
+ cuda_devices, opencl_devices = self.get_devices()
+ row = layout.row()
+
+ if cuda_devices:
+ col = row.column(align=True)
+ col.label(text="CUDA devices:")
+ for device in cuda_devices:
+ col.prop(device, "use", text=device.name, toggle=True)
+
+ if opencl_devices:
+ col = row.column(align=True)
+ col.label(text="OpenCL devices:")
+ for device in opencl_devices:
+ col.prop(device, "use", text=device.name, toggle=True)
+
+
+ def draw(self, context):
+ self.draw_impl(self.layout, context)
+
+
def register():
bpy.utils.register_class(CyclesRenderSettings)
bpy.utils.register_class(CyclesCameraSettings)
@@ -1141,6 +1245,8 @@ def register():
bpy.utils.register_class(CyclesObjectSettings)
bpy.utils.register_class(CyclesCurveRenderSettings)
bpy.utils.register_class(CyclesCurveSettings)
+ bpy.utils.register_class(CyclesDeviceSettings)
+ bpy.utils.register_class(CyclesPreferences)
def unregister():
@@ -1154,3 +1260,5 @@ def unregister():
bpy.utils.unregister_class(CyclesVisibilitySettings)
bpy.utils.unregister_class(CyclesCurveRenderSettings)
bpy.utils.unregister_class(CyclesCurveSettings)
+ bpy.utils.unregister_class(CyclesDeviceSettings)
+ bpy.utils.unregister_class(CyclesPreferences)
diff --git a/intern/cycles/blender/addon/ui.py b/intern/cycles/blender/addon/ui.py
index 4942a71ce4d..d9ad7d967a6 100644
--- a/intern/cycles/blender/addon/ui.py
+++ b/intern/cycles/blender/addon/ui.py
@@ -53,25 +53,26 @@ class CyclesButtonsPanel:
return rd.engine in cls.COMPAT_ENGINES
+def get_device_type(context):
+ return context.user_preferences.addons[__package__].preferences.compute_device_type
+
+
def use_cpu(context):
cscene = context.scene.cycles
- device_type = context.user_preferences.system.compute_device_type
- return (device_type == 'NONE' or cscene.device == 'CPU')
+ return (get_device_type(context) == 'NONE' or cscene.device == 'CPU')
def use_opencl(context):
cscene = context.scene.cycles
- device_type = context.user_preferences.system.compute_device_type
- return (device_type == 'OPENCL' and cscene.device == 'GPU')
+ return (get_device_type(context) == 'OPENCL' and cscene.device == 'GPU')
def use_cuda(context):
cscene = context.scene.cycles
- device_type = context.user_preferences.system.compute_device_type
- return (device_type == 'CUDA' and cscene.device == 'GPU')
+ return (get_device_type(context) == 'CUDA' and cscene.device == 'GPU')
def use_branched_path(context):
@@ -85,6 +86,14 @@ def use_sample_all_lights(context):
return cscene.sample_all_lights_direct or cscene.sample_all_lights_indirect
+def show_device_selection(context):
+ type = get_device_type(context)
+ if type == 'NETWORK':
+ return True
+ if not type in {'CUDA', 'OPENCL'}:
+ return False
+ return context.user_preferences.addons[__package__].preferences.has_active_device()
+
def draw_samples_info(layout, context):
cscene = context.scene.cycles
@@ -141,7 +150,6 @@ class CyclesRender_PT_sampling(CyclesButtonsPanel, Panel):
scene = context.scene
cscene = scene.cycles
- device_type = context.user_preferences.system.compute_device_type
row = layout.row(align=True)
row.menu("CYCLES_MT_sampling_presets", text=bpy.types.CYCLES_MT_sampling_presets.bl_label)
@@ -150,7 +158,7 @@ class CyclesRender_PT_sampling(CyclesButtonsPanel, Panel):
row = layout.row()
sub = row.row()
- sub.active = device_type != 'OPENCL' or use_cpu(context)
+ sub.active = get_device_type(context) != 'OPENCL' or use_cpu(context)
sub.prop(cscene, "progressive", text="")
row.prop(cscene, "use_square_samples")
@@ -364,6 +372,8 @@ class CyclesRender_PT_performance(CyclesButtonsPanel, Panel):
rd = scene.render
cscene = scene.cycles
+ context.user_preferences.addons['cycles'].preferences.draw_impl(layout, context)
+
split = layout.split()
col = split.column(align=True)
@@ -1606,9 +1616,13 @@ def draw_device(self, context):
layout.prop(cscene, "feature_set")
- device_type = context.user_preferences.system.compute_device_type
- if device_type in {'CUDA', 'OPENCL', 'NETWORK'}:
- layout.prop(cscene, "device")
+ split = layout.split(percentage=1/3)
+ split.label("Device:")
+ row = split.row(align=True)
+ sub = row.split(align=True)
+ sub.active = show_device_selection(context)
+ sub.prop(cscene, "device", text="")
+ row.operator("wm.addon_userpref_show", text="Preferences", icon='PREFERENCES').module = __package__
if engine.with_osl() and use_cpu(context):
layout.prop(cscene, "shading_system")
diff --git a/intern/cycles/blender/blender_python.cpp b/intern/cycles/blender/blender_python.cpp
index a50f5edb1df..438abc49f88 100644
--- a/intern/cycles/blender/blender_python.cpp
+++ b/intern/cycles/blender/blender_python.cpp
@@ -40,10 +40,6 @@ CCL_NAMESPACE_BEGIN
namespace {
-/* Device list stored static (used by compute_device_list()). */
-static ccl::vector<CCLDeviceInfo> device_list;
-static ccl::DeviceType device_type = DEVICE_NONE;
-
/* Flag describing whether debug flags were synchronized from scene. */
bool debug_flags_set = false;
@@ -195,7 +191,6 @@ static PyObject *exit_func(PyObject * /*self*/, PyObject * /*args*/)
ShaderManager::free_memory();
TaskScheduler::free_memory();
Device::free_memory();
- device_list.free_memory();
Py_RETURN_NONE;
}
@@ -389,7 +384,12 @@ static PyObject *available_devices_func(PyObject * /*self*/, PyObject * /*args*/
for(size_t i = 0; i < devices.size(); i++) {
DeviceInfo& device = devices[i];
- PyTuple_SET_ITEM(ret, i, PyUnicode_FromString(device.description.c_str()));
+ string type_name = Device::string_from_type(device.type);
+ PyObject *device_tuple = PyTuple_New(3);
+ PyTuple_SET_ITEM(device_tuple, 0, PyUnicode_FromString(device.description.c_str()));
+ PyTuple_SET_ITEM(device_tuple, 1, PyUnicode_FromString(type_name.c_str()));
+ PyTuple_SET_ITEM(device_tuple, 2, PyUnicode_FromString(device.id.c_str()));
+ PyTuple_SET_ITEM(ret, i, device_tuple);
}
return ret;
@@ -676,6 +676,20 @@ static PyObject *set_resumable_chunks_func(PyObject * /*self*/, PyObject *args)
Py_RETURN_NONE;
}
+static PyObject *get_device_types_func(PyObject * /*self*/, PyObject * /*args*/)
+{
+ vector<DeviceInfo>& devices = Device::available_devices();
+ bool has_cuda = false, has_opencl = false;
+ for(int i = 0; i < devices.size(); i++) {
+ has_cuda |= (devices[i].type == DEVICE_CUDA);
+ has_opencl |= (devices[i].type == DEVICE_OPENCL);
+ }
+ PyObject *list = PyTuple_New(2);
+ PyTuple_SET_ITEM(list, 0, PyBool_FromLong(has_cuda));
+ PyTuple_SET_ITEM(list, 1, PyBool_FromLong(has_opencl));
+ return list;
+}
+
static PyMethodDef methods[] = {
{"init", init_func, METH_VARARGS, ""},
{"exit", exit_func, METH_VARARGS, ""},
@@ -703,6 +717,9 @@ static PyMethodDef methods[] = {
/* Resumable render */
{"set_resumable_chunks", set_resumable_chunks_func, METH_VARARGS, ""},
+ /* Compute Device selection */
+ {"get_device_types", get_device_types_func, METH_VARARGS, ""},
+
{NULL, NULL, 0, NULL},
};
@@ -715,47 +732,6 @@ static struct PyModuleDef module = {
NULL, NULL, NULL, NULL
};
-static CCLDeviceInfo *compute_device_list(DeviceType type)
-{
- /* create device list if it's not already done */
- if(type != device_type) {
- ccl::vector<DeviceInfo>& devices = ccl::Device::available_devices();
-
- device_type = type;
- device_list.clear();
-
- /* add devices */
- int i = 0;
-
- foreach(DeviceInfo& info, devices) {
- if(info.type == type ||
- (info.type == DEVICE_MULTI && info.multi_devices[0].type == type))
- {
- CCLDeviceInfo cinfo;
-
- strncpy(cinfo.identifier, info.id.c_str(), sizeof(cinfo.identifier));
- cinfo.identifier[info.id.length()] = '\0';
-
- strncpy(cinfo.name, info.description.c_str(), sizeof(cinfo.name));
- cinfo.name[info.description.length()] = '\0';
-
- cinfo.value = i++;
-
- device_list.push_back(cinfo);
- }
- }
-
- /* null terminate */
- if(!device_list.empty()) {
- CCLDeviceInfo cinfo = {"", "", 0};
- device_list.push_back(cinfo);
- }
- }
-
- return (device_list.empty())? NULL: &device_list[0];
-}
-
-
CCL_NAMESPACE_END
void *CCL_python_module_init()
@@ -794,24 +770,3 @@ void *CCL_python_module_init()
return (void*)mod;
}
-
-CCLDeviceInfo *CCL_compute_device_list(int device_type)
-{
- ccl::DeviceType type;
- switch(device_type) {
- case 0:
- type = ccl::DEVICE_CUDA;
- break;
- case 1:
- type = ccl::DEVICE_OPENCL;
- break;
- case 2:
- type = ccl::DEVICE_NETWORK;
- break;
- default:
- type = ccl::DEVICE_NONE;
- break;
- }
- return ccl::compute_device_list(type);
-}
-
diff --git a/intern/cycles/blender/blender_sync.cpp b/intern/cycles/blender/blender_sync.cpp
index bc5c3bb8096..6e466826c35 100644
--- a/intern/cycles/blender/blender_sync.cpp
+++ b/intern/cycles/blender/blender_sync.cpp
@@ -531,7 +531,12 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine& b_engine,
vector<DeviceInfo>& devices = Device::available_devices();
/* device default CPU */
- params.device = devices[0];
+ foreach(DeviceInfo& device, devices) {
+ if(device.type == DEVICE_CPU) {
+ params.device = device;
+ break;
+ }
+ }
if(get_enum(cscene, "device") == 2) {
/* find network device */
@@ -540,17 +545,39 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine& b_engine,
params.device = info;
}
else if(get_enum(cscene, "device") == 1) {
- /* find GPU device with given id */
- PointerRNA systemptr = b_userpref.system().ptr;
- PropertyRNA *deviceprop = RNA_struct_find_property(&systemptr, "compute_device");
- int device_id = b_userpref.system().compute_device();
+ PointerRNA b_preferences;
- const char *id;
+ BL::UserPreferences::addons_iterator b_addon_iter;
+ for(b_userpref.addons.begin(b_addon_iter); b_addon_iter != b_userpref.addons.end(); ++b_addon_iter) {
+ if(b_addon_iter->module() == "cycles") {
+ b_preferences = b_addon_iter->preferences().ptr;
+ break;
+ }
+ }
- if(RNA_property_enum_identifier(NULL, &systemptr, deviceprop, device_id, &id)) {
- foreach(DeviceInfo& info, devices)
- if(info.id == id)
- params.device = info;
+ int compute_device = get_enum(b_preferences, "compute_device_type");
+
+ if(compute_device != 0) {
+ vector<DeviceInfo> used_devices;
+ RNA_BEGIN(&b_preferences, device, "devices") {
+ if(get_enum(device, "type") == compute_device && get_boolean(device, "use")) {
+ string id = get_string(device, "id");
+ foreach(DeviceInfo& info, devices) {
+ if(info.id == id) {
+ used_devices.push_back(info);
+ break;
+ }
+ }
+ }
+ } RNA_END
+
+ if(used_devices.size() == 1) {
+ params.device = used_devices[0];
+ }
+ else if(used_devices.size() > 1) {
+ params.device = Device::get_multi_device(used_devices);
+ }
+ /* Else keep using the CPU device that was set before. */
}
}
diff --git a/intern/cycles/device/device.cpp b/intern/cycles/device/device.cpp
index 909ec7a6d60..ff9387b0a8a 100644
--- a/intern/cycles/device/device.cpp
+++ b/intern/cycles/device/device.cpp
@@ -258,33 +258,33 @@ Device *Device::create(DeviceInfo& info, Stats &stats, bool background)
DeviceType Device::type_from_string(const char *name)
{
- if(strcmp(name, "cpu") == 0)
+ if(strcmp(name, "CPU") == 0)
return DEVICE_CPU;
- else if(strcmp(name, "cuda") == 0)
+ else if(strcmp(name, "CUDA") == 0)
return DEVICE_CUDA;
- else if(strcmp(name, "opencl") == 0)
+ else if(strcmp(name, "OPENCL") == 0)
return DEVICE_OPENCL;
- else if(strcmp(name, "network") == 0)
+ else if(strcmp(name, "NETWORK") == 0)
return DEVICE_NETWORK;
- else if(strcmp(name, "multi") == 0)
+ else if(strcmp(name, "MULTI") == 0)
return DEVICE_MULTI;
-
+
return DEVICE_NONE;
}
string Device::string_from_type(DeviceType type)
{
if(type == DEVICE_CPU)
- return "cpu";
+ return "CPU";
else if(type == DEVICE_CUDA)
- return "cuda";
+ return "CUDA";
else if(type == DEVICE_OPENCL)
- return "opencl";
+ return "OPENCL";
else if(type == DEVICE_NETWORK)
- return "network";
+ return "NETWORK";
else if(type == DEVICE_MULTI)
- return "multi";
-
+ return "MULTI";
+
return "";
}
@@ -307,9 +307,6 @@ vector<DeviceType>& Device::available_types()
#ifdef WITH_NETWORK
types.push_back(DEVICE_NETWORK);
#endif
-#ifdef WITH_MULTI
- types.push_back(DEVICE_MULTI);
-#endif
need_types_update = false;
}
@@ -331,10 +328,6 @@ vector<DeviceInfo>& Device::available_devices()
device_opencl_info(devices);
#endif
-#ifdef WITH_MULTI
- device_multi_info(devices);
-#endif
-
device_cpu_info(devices);
#ifdef WITH_NETWORK
@@ -368,6 +361,29 @@ string Device::device_capabilities()
return capabilities;
}
+DeviceInfo Device::get_multi_device(vector<DeviceInfo> subdevices)
+{
+ assert(subdevices.size() > 1);
+
+ DeviceInfo info;
+ info.type = DEVICE_MULTI;
+ info.id = "MULTI";
+ info.description = "Multi Device";
+ info.multi_devices = subdevices;
+ info.num = 0;
+
+ info.has_bindless_textures = true;
+ info.pack_images = false;
+ foreach(DeviceInfo &device, subdevices) {
+ assert(device.type == info.multi_devices[0].type);
+
+ info.pack_images |= device.pack_images;
+ info.has_bindless_textures &= device.has_bindless_textures;
+ }
+
+ return info;
+}
+
void Device::tag_update()
{
need_types_update = true;
diff --git a/intern/cycles/device/device.h b/intern/cycles/device/device.h
index 77dc1fa9713..b9bdffa2618 100644
--- a/intern/cycles/device/device.h
+++ b/intern/cycles/device/device.h
@@ -49,7 +49,7 @@ class DeviceInfo {
public:
DeviceType type;
string description;
- string id;
+ string id; /* used for user preferences, should stay fixed with changing hardware config */
int num;
bool display_device;
bool advanced_shading;
@@ -69,6 +69,12 @@ public:
has_bindless_textures = false;
use_split_kernel = false;
}
+
+ bool operator==(const DeviceInfo &info) {
+ /* Multiple Devices with the same ID would be very bad. */
+ assert(id != info.id || (type == info.type && num == info.num && description == info.description));
+ return id == info.id;
+ }
};
class DeviceRequestedFeatures {
@@ -282,6 +288,7 @@ public:
static vector<DeviceType>& available_types();
static vector<DeviceInfo>& available_devices();
static string device_capabilities();
+ static DeviceInfo get_multi_device(vector<DeviceInfo> subdevices);
/* Tag devices lists for update. */
static void tag_update();
diff --git a/intern/cycles/device/device_cuda.cpp b/intern/cycles/device/device_cuda.cpp
index 73c9221e6a2..a4818aa3b8d 100644
--- a/intern/cycles/device/device_cuda.cpp
+++ b/intern/cycles/device/device_cuda.cpp
@@ -1408,13 +1408,18 @@ void device_cuda_info(vector<DeviceInfo>& devices)
info.type = DEVICE_CUDA;
info.description = string(name);
- info.id = string_printf("CUDA_%d", num);
info.num = num;
info.advanced_shading = (major >= 2);
info.has_bindless_textures = (major >= 3);
info.pack_images = false;
+ int pci_location[3] = {0, 0, 0};
+ cuDeviceGetAttribute(&pci_location[0], CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, num);
+ cuDeviceGetAttribute(&pci_location[1], CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, num);
+ cuDeviceGetAttribute(&pci_location[2], CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, num);
+ info.id = string_printf("CUDA_%s_%04x:%02x:%02x", name, pci_location[0], pci_location[1], pci_location[2]);
+
/* if device has a kernel timeout, assume it is used for display */
if(cuDeviceGetAttribute(&attr, CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT, num) == CUDA_SUCCESS && attr == 1) {
info.description += " (Display)";
diff --git a/intern/cycles/device/device_intern.h b/intern/cycles/device/device_intern.h
index 47584ae6d22..de487649045 100644
--- a/intern/cycles/device/device_intern.h
+++ b/intern/cycles/device/device_intern.h
@@ -33,7 +33,6 @@ void device_cpu_info(vector<DeviceInfo>& devices);
void device_opencl_info(vector<DeviceInfo>& devices);
void device_cuda_info(vector<DeviceInfo>& devices);
void device_network_info(vector<DeviceInfo>& devices);
-void device_multi_info(vector<DeviceInfo>& devices);
string device_cpu_capabilities(void);
string device_opencl_capabilities(void);
diff --git a/intern/cycles/device/device_multi.cpp b/intern/cycles/device/device_multi.cpp
index ef257358b22..48fd159d508 100644
--- a/intern/cycles/device/device_multi.cpp
+++ b/intern/cycles/device/device_multi.cpp
@@ -350,120 +350,5 @@ Device *device_multi_create(DeviceInfo& info, Stats &stats, bool background)
return new MultiDevice(info, stats, background);
}
-static bool device_multi_add(vector<DeviceInfo>& devices, DeviceType type, bool with_display, bool with_advanced_shading, const char *id_fmt, int num)
-{
- DeviceInfo info;
-
- /* create map to find duplicate descriptions */
- map<string, int> dupli_map;
- map<string, int>::iterator dt;
- int num_added = 0, num_display = 0;
-
- info.advanced_shading = with_advanced_shading;
- info.pack_images = false;
- info.has_bindless_textures = true;
-
- foreach(DeviceInfo& subinfo, devices) {
- if(subinfo.type == type) {
- if(subinfo.advanced_shading != info.advanced_shading)
- continue;
- if(subinfo.display_device) {
- if(with_display)
- num_display++;
- else
- continue;
- }
-
- string key = subinfo.description;
-
- if(dupli_map.find(key) == dupli_map.end())
- dupli_map[key] = 1;
- else
- dupli_map[key]++;
-
- info.multi_devices.push_back(subinfo);
- if(subinfo.display_device)
- info.display_device = true;
- info.pack_images = info.pack_images || subinfo.pack_images;
- info.has_bindless_textures = info.has_bindless_textures && subinfo.has_bindless_textures;
- num_added++;
- }
- }
-
- if(num_added <= 1 || (with_display && num_display == 0))
- return false;
-
- /* generate string */
- stringstream desc;
- vector<string> last_tokens;
- bool first = true;
-
- for(dt = dupli_map.begin(); dt != dupli_map.end(); dt++) {
- if(!first) desc << " + ";
- first = false;
-
- /* get name and count */
- string name = dt->first;
- int count = dt->second;
-
- /* strip common prefixes */
- vector<string> tokens;
- string_split(tokens, dt->first);
-
- if(tokens.size() > 1) {
- int i;
-
- for(i = 0; i < tokens.size() && i < last_tokens.size(); i++)
- if(tokens[i] != last_tokens[i])
- break;
-
- name = "";
- for(; i < tokens.size(); i++) {
- name += tokens[i];
- if(i != tokens.size() - 1)
- name += " ";
- }
- }
-
- last_tokens = tokens;
-
- /* add */
- if(count > 1)
- desc << name << " (" << count << "x)";
- else
- desc << name;
- }
-
- /* add info */
- info.type = DEVICE_MULTI;
- info.description = desc.str();
- info.id = string_printf(id_fmt, num);
- info.display_device = with_display;
- info.num = 0;
-
- if(with_display)
- devices.push_back(info);
- else
- devices.insert(devices.begin(), info);
-
- return true;
-}
-
-void device_multi_info(vector<DeviceInfo>& devices)
-{
- int num = 0;
-
- if(!device_multi_add(devices, DEVICE_CUDA, false, true, "CUDA_MULTI_%d", num++))
- device_multi_add(devices, DEVICE_CUDA, false, false, "CUDA_MULTI_%d", num++);
- if(!device_multi_add(devices, DEVICE_CUDA, true, true, "CUDA_MULTI_%d", num++))
- device_multi_add(devices, DEVICE_CUDA, true, false, "CUDA_MULTI_%d", num++);
-
- num = 0;
- if(!device_multi_add(devices, DEVICE_OPENCL, false, true, "OPENCL_MULTI_%d", num++))
- device_multi_add(devices, DEVICE_OPENCL, false, false, "OPENCL_MULTI_%d", num++);
- if(!device_multi_add(devices, DEVICE_OPENCL, true, true, "OPENCL_MULTI_%d", num++))
- device_multi_add(devices, DEVICE_OPENCL, true, false, "OPENCL_MULTI_%d", num++);
-}
-
CCL_NAMESPACE_END
diff --git a/intern/cycles/device/device_opencl.cpp b/intern/cycles/device/device_opencl.cpp
index 45cf6b074e9..ba94c592a5f 100644
--- a/intern/cycles/device/device_opencl.cpp
+++ b/intern/cycles/device/device_opencl.cpp
@@ -83,17 +83,22 @@ void device_opencl_info(vector<DeviceInfo>& devices)
const string& platform_name = platform_device.platform_name;
const cl_device_type device_type = platform_device.device_type;
const string& device_name = platform_device.device_name;
+ string hardware_id = platform_device.hardware_id;
+ if(hardware_id == "") {
+ hardware_id = string_printf("ID_%d", num_devices);
+ }
+
DeviceInfo info;
info.type = DEVICE_OPENCL;
info.description = string_remove_trademark(string(device_name));
info.num = num_devices;
- info.id = string_printf("OPENCL_%d", info.num);
/* We don't know if it's used for display, but assume it is. */
info.display_device = true;
info.advanced_shading = OpenCLInfo::kernel_use_advanced_shading(platform_name);
info.pack_images = true;
info.use_split_kernel = OpenCLInfo::kernel_use_split(platform_name,
device_type);
+ info.id = string("OPENCL_") + platform_name + "_" + device_name + "_" + hardware_id;
devices.push_back(info);
num_devices++;
}
diff --git a/intern/cycles/device/opencl/opencl.h b/intern/cycles/device/opencl/opencl.h
index 30a35acbb2a..054ac9014f0 100644
--- a/intern/cycles/device/opencl/opencl.h
+++ b/intern/cycles/device/opencl/opencl.h
@@ -55,17 +55,20 @@ struct OpenCLPlatformDevice {
const string& platform_name,
cl_device_id device_id,
cl_device_type device_type,
- const string& device_name)
+ const string& device_name,
+ const string& hardware_id)
: platform_id(platform_id),
platform_name(platform_name),
device_id(device_id),
device_type(device_type),
- device_name(device_name) {}
+ device_name(device_name),
+ hardware_id(hardware_id) {}
cl_platform_id platform_id;
string platform_name;
cl_device_id device_id;
cl_device_type device_type;
string device_name;
+ string hardware_id;
};
/* Contains all static OpenCL helper functions. */
@@ -83,6 +86,8 @@ public:
string *error = NULL);
static bool device_version_check(cl_device_id device,
string *error = NULL);
+ static string get_hardware_id(string platform_name,
+ cl_device_id device_id);
static void get_usable_devices(vector<OpenCLPlatformDevice> *usable_devices,
bool force_all = false);
};
diff --git a/intern/cycles/device/opencl/opencl_util.cpp b/intern/cycles/device/opencl/opencl_util.cpp
index e425ae8e2e8..36eb70b8c85 100644
--- a/intern/cycles/device/opencl/opencl_util.cpp
+++ b/intern/cycles/device/opencl/opencl_util.cpp
@@ -661,6 +661,27 @@ bool OpenCLInfo::device_version_check(cl_device_id device,
return true;
}
+string OpenCLInfo::get_hardware_id(string platform_name, cl_device_id device_id)
+{
+ if(platform_name == "AMD Accelerated Parallel Processing" || platform_name == "Apple") {
+ /* Use cl_amd_device_topology extension. */
+ cl_char topology[24];
+ if(clGetDeviceInfo(device_id, 0x4037, sizeof(topology), topology, NULL) == CL_SUCCESS && topology[0] == 1) {
+ return string_printf("%02x:%02x.%01x", topology[21], topology[22], topology[23]);
+ }
+ }
+ else if(platform_name == "NVIDIA CUDA") {
+ /* Use two undocumented options of the cl_nv_device_attribute_query extension. */
+ cl_int bus_id, slot_id;
+ if(clGetDeviceInfo(device_id, 0x4008, sizeof(cl_int), &bus_id, NULL) == CL_SUCCESS &&
+ clGetDeviceInfo(device_id, 0x4009, sizeof(cl_int), &slot_id, NULL) == CL_SUCCESS) {
+ return string_printf("%02x:%02x.%01x", bus_id, slot_id>>3, slot_id & 0x7);
+ }
+ }
+ /* No general way to get a hardware ID from OpenCL => give up. */
+ return "";
+}
+
void OpenCLInfo::get_usable_devices(vector<OpenCLPlatformDevice> *usable_devices,
bool force_all)
{
@@ -773,11 +794,13 @@ void OpenCLInfo::get_usable_devices(vector<OpenCLPlatformDevice> *usable_devices
continue;
}
FIRST_VLOG(2) << "Adding new device " << device_name << ".";
+ string hardware_id = get_hardware_id(platform_name, device_id);
usable_devices->push_back(OpenCLPlatformDevice(platform_id,
platform_name,
device_id,
device_type,
- device_name));
+ device_name,
+ hardware_id));
}
else {
FIRST_VLOG(2) << "Ignoring device " << device_name
diff --git a/intern/cycles/render/session.h b/intern/cycles/render/session.h
index 8bff0f9ed15..1db4692e171 100644
--- a/intern/cycles/render/session.h
+++ b/intern/cycles/render/session.h
@@ -89,8 +89,7 @@ public:
}
bool modified(const SessionParams& params)
- { return !(device.type == params.device.type
- && device.id == params.device.id
+ { return !(device == params.device
&& background == params.background
&& progressive_refine == params.progressive_refine
&& output_path == params.output_path
diff --git a/release/scripts/startup/bl_operators/wm.py b/release/scripts/startup/bl_operators/wm.py
index 1c97d213e05..343fcdb0d22 100644
--- a/release/scripts/startup/bl_operators/wm.py
+++ b/release/scripts/startup/bl_operators/wm.py
@@ -2163,3 +2163,32 @@ class WM_OT_addon_expand(Operator):
info["show_expanded"] = not info["show_expanded"]
return {'FINISHED'}
+
+class WM_OT_addon_userpref_show(Operator):
+ "Show add-on user preferences"
+ bl_idname = "wm.addon_userpref_show"
+ bl_label = ""
+ bl_options = {'INTERNAL'}
+
+ module = StringProperty(
+ name="Module",
+ description="Module name of the add-on to expand",
+ )
+
+ def execute(self, context):
+ import addon_utils
+
+ module_name = self.module
+
+ modules = addon_utils.modules(refresh=False)
+ mod = addon_utils.addons_fake_modules.get(module_name)
+ if mod is not None:
+ info = addon_utils.module_bl_info(mod)
+ info["show_expanded"] = True
+
+ bpy.context.user_preferences.active_section = 'ADDONS'
+ context.window_manager.addon_filter = 'All'
+ context.window_manager.addon_search = info["name"]
+ bpy.ops.screen.userpref_show('INVOKE_DEFAULT')
+
+ return {'FINISHED'}
diff --git a/release/scripts/startup/bl_ui/space_userpref.py b/release/scripts/startup/bl_ui/space_userpref.py
index dcafac66fca..ab3ec3559e5 100644
--- a/release/scripts/startup/bl_ui/space_userpref.py
+++ b/release/scripts/startup/bl_ui/space_userpref.py
@@ -429,13 +429,6 @@ class USERPREF_PT_system(Panel):
col.separator()
- if hasattr(system, "compute_device_type"):
- col.label(text="Compute Device:")
- col.row().prop(system, "compute_device_type", expand=True)
- sub = col.row()
- sub.active = system.compute_device_type != 'CPU'
- sub.prop(system, "compute_device", text="")
-
if hasattr(system, "opensubdiv_compute_type"):
col.label(text="OpenSubdiv compute:")
col.row().prop(system, "opensubdiv_compute_type", text="")
diff --git a/source/blender/makesrna/intern/rna_userdef.c b/source/blender/makesrna/intern/rna_userdef.c
index 73a5ae59557..3040fab63ce 100644
--- a/source/blender/makesrna/intern/rna_userdef.c
+++ b/source/blender/makesrna/intern/rna_userdef.c
@@ -52,15 +52,6 @@
#include "BLT_lang.h"
#include "GPU_buffers.h"
-#ifdef WITH_CYCLES
-static EnumPropertyItem compute_device_type_items[] = {
- {USER_COMPUTE_DEVICE_NONE, "NONE", 0, "None", "Don't use compute device"},
- {USER_COMPUTE_DEVICE_CUDA, "CUDA", 0, "CUDA", "Use CUDA for GPU acceleration"},
- {USER_COMPUTE_DEVICE_OPENCL, "OPENCL", 0, "OpenCL", "Use OpenCL for GPU acceleration"},
- { 0, NULL, 0, NULL, NULL}
-};
-#endif
-
#ifdef WITH_OPENSUBDIV
static EnumPropertyItem opensubdiv_compute_type_items[] = {
{USER_OPENSUBDIV_COMPUTE_NONE, "NONE", 0, "None", ""},
@@ -124,8 +115,6 @@ static EnumPropertyItem rna_enum_language_default_items[] = {
#include "UI_interface.h"
-#include "CCL_api.h"
-
#ifdef WITH_OPENSUBDIV
# include "opensubdiv_capi.h"
#endif
@@ -476,78 +465,6 @@ static PointerRNA rna_Theme_space_list_generic_get(PointerRNA *ptr)
}
-#ifdef WITH_CYCLES
-static EnumPropertyItem *rna_userdef_compute_device_type_itemf(bContext *UNUSED(C), PointerRNA *UNUSED(ptr),
- PropertyRNA *UNUSED(prop), bool *r_free)
-{
- EnumPropertyItem *item = NULL;
- int totitem = 0;
-
- /* add supported device types */
- RNA_enum_items_add_value(&item, &totitem, compute_device_type_items, USER_COMPUTE_DEVICE_NONE);
- if (CCL_compute_device_list(0))
- RNA_enum_items_add_value(&item, &totitem, compute_device_type_items, USER_COMPUTE_DEVICE_CUDA);
- if (CCL_compute_device_list(1))
- RNA_enum_items_add_value(&item, &totitem, compute_device_type_items, USER_COMPUTE_DEVICE_OPENCL);
-
- RNA_enum_item_end(&item, &totitem);
- *r_free = true;
-
- return item;
-}
-
-static int rna_userdef_compute_device_get(PointerRNA *UNUSED(ptr))
-{
- if (U.compute_device_type == USER_COMPUTE_DEVICE_NONE)
- return 0;
-
- return U.compute_device_id;
-}
-
-static EnumPropertyItem *rna_userdef_compute_device_itemf(bContext *UNUSED(C), PointerRNA *UNUSED(ptr),
- PropertyRNA *UNUSED(prop), bool *r_free)
-{
- EnumPropertyItem tmp = {0, "", 0, "", ""};
- EnumPropertyItem *item = NULL;
- int totitem = 0;
-
- if (U.compute_device_type == USER_COMPUTE_DEVICE_NONE) {
- /* only add a single CPU device */
- tmp.value = 0;
- tmp.name = "CPU";
- tmp.identifier = "CPU";
- RNA_enum_item_add(&item, &totitem, &tmp);
- }
- else {
- /* get device list from cycles. it would be good to make this generic
- * once we have more subsystems using opencl, for now this is easiest */
- int opencl = (U.compute_device_type == USER_COMPUTE_DEVICE_OPENCL);
- CCLDeviceInfo *devices = CCL_compute_device_list(opencl);
- int a;
-
- if (devices) {
- for (a = 0; devices[a].identifier[0]; a++) {
- tmp.value = devices[a].value;
- tmp.identifier = devices[a].identifier;
- tmp.name = devices[a].name;
- RNA_enum_item_add(&item, &totitem, &tmp);
- }
- }
- else {
- tmp.value = 0;
- tmp.name = "CPU";
- tmp.identifier = "CPU";
- RNA_enum_item_add(&item, &totitem, &tmp);
- }
- }
-
- RNA_enum_item_end(&item, &totitem);
- *r_free = true;
-
- return item;
-}
-#endif
-
#ifdef WITH_OPENSUBDIV
static EnumPropertyItem *rna_userdef_opensubdiv_compute_type_itemf(bContext *UNUSED(C), PointerRNA *UNUSED(ptr),
PropertyRNA *UNUSED(prop), bool *r_free)
@@ -3967,13 +3884,6 @@ static void rna_def_userdef_system(BlenderRNA *brna)
{0, NULL, 0, NULL, NULL}
};
-#ifdef WITH_CYCLES
- static EnumPropertyItem compute_device_items[] = {
- {0, "CPU", 0, "CPU", ""},
- { 0, NULL, 0, NULL, NULL}
- };
-#endif
-
static EnumPropertyItem image_draw_methods[] = {
{IMAGE_DRAW_METHOD_2DTEXTURE, "2DTEXTURE", 0, "2D Texture", "Use CPU for display transform and draw image with 2D texture"},
{IMAGE_DRAW_METHOD_GLSL, "GLSL", 0, "GLSL", "Use GLSL shaders for display transform and draw image with 2D texture"},
@@ -4265,23 +4175,6 @@ static void rna_def_userdef_system(BlenderRNA *brna)
"Draw tool/property regions over the main region, when using Triple Buffer");
RNA_def_property_update(prop, 0, "rna_userdef_dpi_update");
-#ifdef WITH_CYCLES
- prop = RNA_def_property(srna, "compute_device_type", PROP_ENUM, PROP_NONE);
- RNA_def_property_flag(prop, PROP_ENUM_NO_CONTEXT);
- RNA_def_property_enum_sdna(prop, NULL, "compute_device_type");
- RNA_def_property_enum_items(prop, compute_device_type_items);
- RNA_def_property_enum_funcs(prop, NULL, NULL, "rna_userdef_compute_device_type_itemf");
- RNA_def_property_ui_text(prop, "Compute Device Type", "Device to use for computation (rendering with Cycles)");
- RNA_def_property_update(prop, NC_SPACE | ND_SPACE_PROPERTIES, NULL);
-
- prop = RNA_def_property(srna, "compute_device", PROP_ENUM, PROP_NONE);
- RNA_def_property_flag(prop, PROP_ENUM_NO_CONTEXT);
- RNA_def_property_enum_sdna(prop, NULL, "compute_device_id");
- RNA_def_property_enum_items(prop, compute_device_items);
- RNA_def_property_enum_funcs(prop, "rna_userdef_compute_device_get", NULL, "rna_userdef_compute_device_itemf");
- RNA_def_property_ui_text(prop, "Compute Device", "Device to use for computation");
-#endif
-
#ifdef WITH_OPENSUBDIV
prop = RNA_def_property(srna, "opensubdiv_compute_type", PROP_ENUM, PROP_NONE);
RNA_def_property_flag(prop, PROP_ENUM_NO_CONTEXT);
diff --git a/source/blenderplayer/bad_level_call_stubs/stubs.c b/source/blenderplayer/bad_level_call_stubs/stubs.c
index d8a4ddc8d4f..6040dfff644 100644
--- a/source/blenderplayer/bad_level_call_stubs/stubs.c
+++ b/source/blenderplayer/bad_level_call_stubs/stubs.c
@@ -142,7 +142,6 @@ struct wmWindowManager;
# pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
-#include "../../intern/cycles/blender/CCL_api.h"
#include "../../intern/dualcon/dualcon.h"
#include "../../intern/elbeem/extern/elbeem.h"
#include "../blender/blenkernel/BKE_modifier.h"
@@ -770,10 +769,6 @@ void *dualcon(const DualConInput *input_mesh,
float scale,
int depth) RET_ZERO
-/* intern/cycles */
-struct CCLDeviceInfo;
-struct CCLDeviceInfo *CCL_compute_device_list(int opencl) RET_NULL
-
/* compositor */
void COM_execute(RenderData *rd, Scene *scene, bNodeTree *editingtree, int rendering,
const ColorManagedViewSettings *viewSettings, const ColorManagedDisplaySettings *displaySettings,