Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Mours <pmours@nvidia.com>2019-11-28 15:57:02 +0300
committerPatrick Mours <pmours@nvidia.com>2019-11-28 15:57:02 +0300
commit70a32adfeb41f9b3fc376a933fbdc2d11bb73c15 (patch)
tree5f1d6639c093b818022969ce776d5dabc24ed695 /intern/cycles/device/device_multi.cpp
parentd4835b88b23bce6c28d8514e35d771408d5ef18b (diff)
Fix assert in Cycles memory statistics when using OptiX on multiple GPUs
The acceleration structure built by OptiX may be different between GPUs, so cannot assume the memory size is the same for all. This fixes that by moving the memory management for all OptiX acceleration structures into the responsibility of each device (was already the case for BLAS previously, now for TLAS too).
Diffstat (limited to 'intern/cycles/device/device_multi.cpp')
-rw-r--r--intern/cycles/device/device_multi.cpp12
1 files changed, 2 insertions, 10 deletions
diff --git a/intern/cycles/device/device_multi.cpp b/intern/cycles/device/device_multi.cpp
index ac71be9dbea..b8587eb0a62 100644
--- a/intern/cycles/device/device_multi.cpp
+++ b/intern/cycles/device/device_multi.cpp
@@ -153,21 +153,13 @@ class MultiDevice : public Device {
return result;
}
- bool build_optix_bvh(BVH *bvh, device_memory &mem)
+ bool build_optix_bvh(BVH *bvh)
{
- device_ptr key = unique_key++;
-
// Broadcast acceleration structure build to all devices
foreach (SubDevice &sub, devices) {
- mem.device = sub.device;
- if (!sub.device->build_optix_bvh(bvh, mem))
+ if (!sub.device->build_optix_bvh(bvh))
return false;
- sub.ptr_map[key] = mem.device_pointer;
}
-
- mem.device = this;
- mem.device_pointer = key;
- stats.mem_alloc(mem.device_size);
return true;
}