Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/intern
diff options
context:
space:
mode:
authorPatrick Mours <pmours@nvidia.com>2019-11-25 20:36:55 +0300
committerPatrick Mours <pmours@nvidia.com>2019-11-25 20:36:55 +0300
commit03cdfc2ff6df61a247b90c4ad8bb1b26034b4505 (patch)
tree2cff9b889733d8e9659690d592cf63c3248dc87a /intern
parentb374b24f1b2dea4360f318fe3c86bede00bdaae4 (diff)
Fix potential access to deleted memory in OptiX kernel loading code
Calling "OptiXDevice::load_kernels" multiple times would call "optixPipelineDestroy" on a pipeline pointer that may have already been deleted previously (since the PIP_SHADER_EVAL pipeline is only created conditionally). This change also avoids a CUDA kernel reload every time this is called. The CUDA kernels are precompiled and don't change, so there is no need to reload them every time.
Diffstat (limited to 'intern')
-rw-r--r--intern/cycles/device/device_optix.cpp35
1 files changed, 19 insertions, 16 deletions
diff --git a/intern/cycles/device/device_optix.cpp b/intern/cycles/device/device_optix.cpp
index 831cd8c3148..e10bab5a0d8 100644
--- a/intern/cycles/device/device_optix.cpp
+++ b/intern/cycles/device/device_optix.cpp
@@ -329,16 +329,17 @@ class OptiXDevice : public Device {
const CUDAContextScope scope(cuda_context);
- // Unload any existing modules first
- if (cuda_module != NULL)
- cuModuleUnload(cuda_module);
- if (cuda_filter_module != NULL)
- cuModuleUnload(cuda_filter_module);
- if (optix_module != NULL)
+ // Unload existing OptiX module and pipelines first
+ if (optix_module != NULL) {
optixModuleDestroy(optix_module);
- for (unsigned int i = 0; i < NUM_PIPELINES; ++i)
- if (pipelines[i] != NULL)
+ optix_module = NULL;
+ }
+ for (unsigned int i = 0; i < NUM_PIPELINES; ++i) {
+ if (pipelines[i] != NULL) {
optixPipelineDestroy(pipelines[i]);
+ pipelines[i] = NULL;
+ }
+ }
OptixModuleCompileOptions module_options;
module_options.maxRegisterCount = 0; // Do not set an explicit register limit
@@ -399,16 +400,18 @@ class OptiXDevice : public Device {
cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, info.num);
cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, info.num);
- string cubin_data;
- const string cubin_filename = string_printf("lib/kernel_sm_%d%d.cubin", major, minor);
- if (!path_read_text(path_get(cubin_filename), cubin_data)) {
- set_error("Failed loading pre-compiled CUDA kernel " + cubin_filename + ".");
- return false;
- }
+ if (cuda_module == NULL) { // Avoid reloading module if it was already loaded
+ string cubin_data;
+ const string cubin_filename = string_printf("lib/kernel_sm_%d%d.cubin", major, minor);
+ if (!path_read_text(path_get(cubin_filename), cubin_data)) {
+ set_error("Failed loading pre-compiled CUDA kernel " + cubin_filename + ".");
+ return false;
+ }
- check_result_cuda_ret(cuModuleLoadData(&cuda_module, cubin_data.data()));
+ check_result_cuda_ret(cuModuleLoadData(&cuda_module, cubin_data.data()));
+ }
- if (requested_features.use_denoising) {
+ if (requested_features.use_denoising && cuda_filter_module == NULL) {
string filter_data;
const string filter_filename = string_printf("lib/filter_sm_%d%d.cubin", major, minor);
if (!path_read_text(path_get(filter_filename), filter_data)) {