Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Mours <pmours@nvidia.com>2020-06-08 18:16:10 +0300
committerPatrick Mours <pmours@nvidia.com>2020-06-08 18:55:49 +0300
commit9f7d84b656fbb56966620ecc249ce5bc7089a1d1 (patch)
treed0a022feae43f6db2166cf5214b56cce99b96a60 /intern/cycles/blender/addon
parent0a907657d4d525d320e0c8518f583b7210736214 (diff)
Cycles: Add support for P2P memory distribution (e.g. via NVLink)
This change modifies the multi-device implementation to support memory distribution across devices, to reduce the overall memory footprint of large scenes and allow scenes to fit entirely into combined GPU memory that previously had to fall back to host memory. Reviewed By: brecht Differential Revision: https://developer.blender.org/D7426
Diffstat (limited to 'intern/cycles/blender/addon')
-rw-r--r--intern/cycles/blender/addon/properties.py27
1 files changed, 20 insertions, 7 deletions
diff --git a/intern/cycles/blender/addon/properties.py b/intern/cycles/blender/addon/properties.py
index da18ac7c693..1635afab210 100644
--- a/intern/cycles/blender/addon/properties.py
+++ b/intern/cycles/blender/addon/properties.py
@@ -1535,6 +1535,12 @@ class CyclesPreferences(bpy.types.AddonPreferences):
devices: bpy.props.CollectionProperty(type=CyclesDeviceSettings)
+ peer_memory: BoolProperty(
+ name="Distribute memory across devices",
+ description="Make more room for large scenes to fit by distributing memory across interconnected devices (e.g. via NVLink) rather than duplicating it",
+ default=False,
+ )
+
def find_existing_device_entry(self, device):
for device_entry in self.devices:
if device_entry.id == device[2] and device_entry.type == device[1]:
@@ -1632,14 +1638,21 @@ class CyclesPreferences(bpy.types.AddonPreferences):
row = layout.row()
row.prop(self, "compute_device_type", expand=True)
- devices = self.get_devices_for_type(self.compute_device_type)
+ if self.compute_device_type == 'NONE':
+ return
row = layout.row()
- if self.compute_device_type == 'CUDA':
- self._draw_devices(row, 'CUDA', devices)
- elif self.compute_device_type == 'OPTIX':
- self._draw_devices(row, 'OPTIX', devices)
- elif self.compute_device_type == 'OPENCL':
- self._draw_devices(row, 'OPENCL', devices)
+ devices = self.get_devices_for_type(self.compute_device_type)
+ self._draw_devices(row, self.compute_device_type, devices)
+
+ import _cycles
+ has_peer_memory = 0
+ for device in _cycles.available_devices(self.compute_device_type):
+ if device[3] and self.find_existing_device_entry(device).use:
+ has_peer_memory += 1
+ if has_peer_memory > 1:
+ row = layout.row()
+ row.use_property_split = True
+ row.prop(self, "peer_memory")
def draw(self, context):
self.draw_impl(self.layout, context)