Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLukas Toenne <lukas.toenne@googlemail.com>2012-09-11 15:41:51 +0400
committerLukas Toenne <lukas.toenne@googlemail.com>2012-09-11 15:41:51 +0400
commit31ed71cb6b86278eb5c15de65eb8e2ed800821e1 (patch)
tree9ce654327b030ee0351a8aacf2ef17cdd18c4a1d
parent2cdfa33137b69b82c8354dd5d0bd58acfe2d89d9 (diff)
Performance fix for Cycles: Don't wait in the main UI thread when resetting devices.
When the scene is updated Cycles resets the renderer device, cancelling all existing tasks. The main thread would wait for all running tasks to finish before continuing. This is ok when tasks can actually cancel in a timely fashion. For OSL however, this does not work, since the OSL shader group optimization takes quite a bit of time and can not be easily be cancelled once running (on my crappy machine in full debug mode: ~0.12 seconds for simple node trees). This would lead to very laggy UI behavior and make it difficult to accurately control elements such as sliders. This patch removes the wait condition from the device->task_cancel method. Instead it just sets the do_cancel flag and returns. To avoid backlog in the task pool of the device it will return early from the BlenderSession::sync function while the reset is going on (tested in Session::resetting). Once all existing tasks have finished the do_cancel flag is finally cleared again (checked in TaskPool::num_decrease). Care has to be taken to avoid race conditions on the do_cancel flag, since it can now be modified outside the TaskPool::cancel function itself. For this purpose the scope of the TaskPool::num_mutex locks has been extended, in most cases the mutex is now locked by the TaskPool itself before calling TaskScheduler methods, instead of only locking inside the num_increase/num_decrease functions themselves. The only occurrence of a lock outside of the TaskPool methods is in TaskScheduler::thread_run. This patch is most useful in combination with the OSL renderer mode, so it can probably wait until after the 2.64 release. SVM tasks tend to be cancelled quickly, so the effect is less noticeable.
-rw-r--r--intern/cycles/blender/blender_session.cpp6
-rw-r--r--intern/cycles/device/device.h1
-rw-r--r--intern/cycles/device/device_cpu.cpp5
-rw-r--r--intern/cycles/device/device_cuda.cpp5
-rw-r--r--intern/cycles/device/device_multi.cpp8
-rw-r--r--intern/cycles/device/device_opencl.cpp5
-rw-r--r--intern/cycles/render/session.cpp19
-rw-r--r--intern/cycles/render/session.h3
-rw-r--r--intern/cycles/util/util_task.cpp33
9 files changed, 69 insertions, 16 deletions
diff --git a/intern/cycles/blender/blender_session.cpp b/intern/cycles/blender/blender_session.cpp
index 7b80c520e72..5930a2800bf 100644
--- a/intern/cycles/blender/blender_session.cpp
+++ b/intern/cycles/blender/blender_session.cpp
@@ -372,6 +372,12 @@ void BlenderSession::synchronize()
return;
}
+ /* if the session is still resetting the device come back later */
+ if(session->resetting()) {
+ tag_update();
+ return;
+ }
+
/* increase samples, but never decrease */
session->set_samples(session_params.samples);
session->set_pause(BlenderSync::get_session_pause(b_scene, background));
diff --git a/intern/cycles/device/device.h b/intern/cycles/device/device.h
index 2ee2e044618..8e3bc408399 100644
--- a/intern/cycles/device/device.h
+++ b/intern/cycles/device/device.h
@@ -115,6 +115,7 @@ public:
virtual void task_add(DeviceTask& task) = 0;
virtual void task_wait() = 0;
virtual void task_cancel() = 0;
+ virtual bool task_cancelled() = 0;
/* opengl drawing */
virtual void draw_pixels(device_memory& mem, int y, int w, int h,
diff --git a/intern/cycles/device/device_cpu.cpp b/intern/cycles/device/device_cpu.cpp
index 4c54671b0d0..e2f612ee233 100644
--- a/intern/cycles/device/device_cpu.cpp
+++ b/intern/cycles/device/device_cpu.cpp
@@ -273,6 +273,11 @@ public:
{
task_pool.cancel();
}
+
+ bool task_cancelled()
+ {
+ return task_pool.cancelled();
+ }
};
Device *device_cpu_create(DeviceInfo& info, int threads)
diff --git a/intern/cycles/device/device_cuda.cpp b/intern/cycles/device/device_cuda.cpp
index c8dcfdc2f3d..acc1086cc35 100644
--- a/intern/cycles/device/device_cuda.cpp
+++ b/intern/cycles/device/device_cuda.cpp
@@ -892,6 +892,11 @@ public:
{
task_pool.cancel();
}
+
+ bool task_cancelled()
+ {
+ return task_pool.cancelled();
+ }
};
Device *device_cuda_create(DeviceInfo& info, bool background)
diff --git a/intern/cycles/device/device_multi.cpp b/intern/cycles/device/device_multi.cpp
index 546ffe5e4b9..4923e5c9e66 100644
--- a/intern/cycles/device/device_multi.cpp
+++ b/intern/cycles/device/device_multi.cpp
@@ -312,6 +312,14 @@ public:
foreach(SubDevice& sub, devices)
sub.device->task_cancel();
}
+
+ bool task_cancelled()
+ {
+ foreach(SubDevice& sub, devices)
+ if (sub.device->task_cancelled())
+ return true;
+ return false;
+ }
};
Device *device_multi_create(DeviceInfo& info, bool background)
diff --git a/intern/cycles/device/device_opencl.cpp b/intern/cycles/device/device_opencl.cpp
index 3c78b4895ae..e1cfb9e7ade 100644
--- a/intern/cycles/device/device_opencl.cpp
+++ b/intern/cycles/device/device_opencl.cpp
@@ -724,6 +724,11 @@ public:
{
task_pool.cancel();
}
+
+ bool task_cancelled()
+ {
+ return task_pool.cancelled();
+ }
};
Device *device_opencl_create(DeviceInfo& info, bool background)
diff --git a/intern/cycles/render/session.cpp b/intern/cycles/render/session.cpp
index 0efbcb8afd3..05c57ba48ec 100644
--- a/intern/cycles/render/session.cpp
+++ b/intern/cycles/render/session.cpp
@@ -140,6 +140,12 @@ void Session::reset_gpu(BufferParams& buffer_params, int samples)
pause_cond.notify_all();
}
+bool Session::resetting_gpu() const
+{
+ /* no need to wait for gpu device */
+ return false;
+}
+
bool Session::draw_gpu(BufferParams& buffer_params)
{
/* block for buffer access */
@@ -290,6 +296,11 @@ void Session::reset_cpu(BufferParams& buffer_params, int samples)
pause_cond.notify_all();
}
+bool Session::resetting_cpu() const
+{
+ return device->task_cancelled();
+}
+
bool Session::draw_cpu(BufferParams& buffer_params)
{
thread_scoped_lock display_lock(display_mutex);
@@ -584,6 +595,14 @@ void Session::reset(BufferParams& buffer_params, int samples)
reset_cpu(buffer_params, samples);
}
+bool Session::resetting() const
+{
+ if(device_use_gl)
+ return resetting_gpu();
+ else
+ return resetting_cpu();
+}
+
void Session::set_samples(int samples)
{
if(samples != params.samples) {
diff --git a/intern/cycles/render/session.h b/intern/cycles/render/session.h
index 7b01357a2b7..e45753d22a0 100644
--- a/intern/cycles/render/session.h
+++ b/intern/cycles/render/session.h
@@ -116,6 +116,7 @@ public:
bool ready_to_reset();
void reset(BufferParams& params, int samples);
+ bool resetting() const;
void set_samples(int samples);
void set_pause(bool pause);
@@ -139,10 +140,12 @@ protected:
void run_cpu();
bool draw_cpu(BufferParams& params);
void reset_cpu(BufferParams& params, int samples);
+ bool resetting_cpu() const;
void run_gpu();
bool draw_gpu(BufferParams& params);
void reset_gpu(BufferParams& params, int samples);
+ bool resetting_gpu() const;
bool acquire_tile(Device *tile_device, RenderTile& tile);
void update_tile_sample(RenderTile& tile);
diff --git a/intern/cycles/util/util_task.cpp b/intern/cycles/util/util_task.cpp
index ea0abd6f54f..2b209c135f4 100644
--- a/intern/cycles/util/util_task.cpp
+++ b/intern/cycles/util/util_task.cpp
@@ -38,6 +38,8 @@ TaskPool::~TaskPool()
void TaskPool::push(Task *task, bool front)
{
+ thread_scoped_lock num_lock(num_mutex);
+
TaskScheduler::Entry entry;
entry.task = task;
@@ -102,22 +104,17 @@ void TaskPool::wait_work()
void TaskPool::cancel()
{
+ thread_scoped_lock num_lock(num_mutex);
+
do_cancel = true;
TaskScheduler::clear(this);
-
- {
- thread_scoped_lock num_lock(num_mutex);
-
- while(num)
- num_cond.wait(num_lock);
- }
-
- do_cancel = false;
}
void TaskPool::stop()
{
+ thread_scoped_lock num_lock(num_mutex);
+
TaskScheduler::clear(this);
assert(num == 0);
@@ -130,20 +127,20 @@ bool TaskPool::cancelled()
void TaskPool::num_decrease(int done)
{
- num_mutex.lock();
num -= done;
-
assert(num >= 0);
- if(num == 0)
+
+ if(num == 0) {
+ do_cancel = false;
+
num_cond.notify_all();
-
- num_mutex.unlock();
+ }
}
void TaskPool::num_increase()
{
- thread_scoped_lock num_lock(num_mutex);
num++;
+
num_cond.notify_all();
}
@@ -239,7 +236,11 @@ void TaskScheduler::thread_run(int thread_id)
delete entry.task;
/* notify pool task was done */
- entry.pool->num_decrease(1);
+ {
+ /* not called from TaskPool, have to explicitly lock the mutex here */
+ thread_scoped_lock num_lock(entry.pool->num_mutex);
+ entry.pool->num_decrease(1);
+ }
}
}