Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrecht Van Lommel <brechtvanlommel@pandora.be>2013-08-31 03:09:22 +0400
committerBrecht Van Lommel <brechtvanlommel@pandora.be>2013-08-31 03:09:22 +0400
commit6785874e7adf5ef15e7a28b134b2bd4e8b3a8988 (patch)
tree12adcc0c7313c9376919cf748f3310f02b67a9c7 /intern/cycles
parentf477c0e535168b22509514e7421e6fc5ed5bf1ac (diff)
Fix #36137: cycles render not using all GPU's when the number of GPU's is larger
than the number of CPU threads
Diffstat (limited to 'intern/cycles')
-rw-r--r--intern/cycles/device/device_cuda.cpp4
-rw-r--r--intern/cycles/device/device_opencl.cpp4
-rw-r--r--intern/cycles/util/util_task.cpp150
-rw-r--r--intern/cycles/util/util_task.h53
4 files changed, 202 insertions, 9 deletions
diff --git a/intern/cycles/device/device_cuda.cpp b/intern/cycles/device/device_cuda.cpp
index a60512577cf..c1b5a8bfcea 100644
--- a/intern/cycles/device/device_cuda.cpp
+++ b/intern/cycles/device/device_cuda.cpp
@@ -37,7 +37,7 @@ CCL_NAMESPACE_BEGIN
class CUDADevice : public Device
{
public:
- TaskPool task_pool;
+ DedicatedTaskPool task_pool;
CUdevice cuDevice;
CUcontext cuContext;
CUmodule cuModule;
@@ -976,7 +976,7 @@ public:
void task_wait()
{
- task_pool.wait_work();
+ task_pool.wait();
}
void task_cancel()
diff --git a/intern/cycles/device/device_opencl.cpp b/intern/cycles/device/device_opencl.cpp
index 01c6f0e3ff7..e800b3f6442 100644
--- a/intern/cycles/device/device_opencl.cpp
+++ b/intern/cycles/device/device_opencl.cpp
@@ -314,7 +314,7 @@ public:
class OpenCLDevice : public Device
{
public:
- TaskPool task_pool;
+ DedicatedTaskPool task_pool;
cl_context cxContext;
cl_command_queue cqCommandQueue;
cl_platform_id cpPlatform;
@@ -1100,7 +1100,7 @@ public:
void task_wait()
{
- task_pool.wait_work();
+ task_pool.wait();
}
void task_cancel()
diff --git a/intern/cycles/util/util_task.cpp b/intern/cycles/util/util_task.cpp
index 42007f44043..4426ea0fef1 100644
--- a/intern/cycles/util/util_task.cpp
+++ b/intern/cycles/util/util_task.cpp
@@ -168,7 +168,7 @@ void TaskPool::num_increase()
thread_mutex TaskScheduler::mutex;
int TaskScheduler::users = 0;
vector<thread*> TaskScheduler::threads;
-volatile bool TaskScheduler::do_exit = false;
+bool TaskScheduler::do_exit = false;
list<TaskScheduler::Entry> TaskScheduler::queue;
thread_mutex TaskScheduler::queue_mutex;
@@ -298,5 +298,153 @@ void TaskScheduler::clear(TaskPool *pool)
pool->num_decrease(done);
}
+/* Dedicated Task Pool */
+
+DedicatedTaskPool::DedicatedTaskPool()
+{
+ do_cancel = false;
+ do_exit = false;
+ num = 0;
+
+ worker_thread = new thread(function_bind(&DedicatedTaskPool::thread_run, this));
+}
+
+DedicatedTaskPool::~DedicatedTaskPool()
+{
+ stop();
+ worker_thread->join();
+ delete worker_thread;
+}
+
+void DedicatedTaskPool::push(Task *task, bool front)
+{
+ num_increase();
+
+ /* add task to queue */
+ queue_mutex.lock();
+ if(front)
+ queue.push_front(task);
+ else
+ queue.push_back(task);
+
+ queue_cond.notify_one();
+ queue_mutex.unlock();
+}
+
+void DedicatedTaskPool::push(const TaskRunFunction& run, bool front)
+{
+ push(new Task(run), front);
+}
+
+void DedicatedTaskPool::wait()
+{
+ thread_scoped_lock num_lock(num_mutex);
+
+ while(num)
+ num_cond.wait(num_lock);
+}
+
+void DedicatedTaskPool::cancel()
+{
+ do_cancel = true;
+
+ clear();
+ wait();
+
+ do_cancel = false;
+}
+
+void DedicatedTaskPool::stop()
+{
+ clear();
+
+ do_exit = true;
+ queue_cond.notify_all();
+
+ wait();
+
+ assert(num == 0);
+}
+
+bool DedicatedTaskPool::cancelled()
+{
+ return do_cancel;
+}
+
+void DedicatedTaskPool::num_decrease(int done)
+{
+ num_mutex.lock();
+ num -= done;
+
+ assert(num >= 0);
+ if(num == 0)
+ num_cond.notify_all();
+
+ num_mutex.unlock();
+}
+
+void DedicatedTaskPool::num_increase()
+{
+ thread_scoped_lock num_lock(num_mutex);
+ num++;
+ num_cond.notify_all();
+}
+
+bool DedicatedTaskPool::thread_wait_pop(Task*& task)
+{
+ thread_scoped_lock queue_lock(queue_mutex);
+
+ while(queue.empty() && !do_exit)
+ queue_cond.wait(queue_lock);
+
+ if(queue.empty()) {
+ assert(do_exit);
+ return false;
+ }
+
+ task = queue.front();
+ queue.pop_front();
+
+ return true;
+}
+
+void DedicatedTaskPool::thread_run()
+{
+ Task *task;
+
+ /* keep popping off tasks */
+ while(thread_wait_pop(task)) {
+ /* run task */
+ task->run();
+
+ /* delete task */
+ delete task;
+
+ /* notify task was done */
+ num_decrease(1);
+ }
+}
+
+void DedicatedTaskPool::clear()
+{
+ thread_scoped_lock queue_lock(queue_mutex);
+
+ /* erase all tasks from the queue */
+ list<Task*>::iterator it = queue.begin();
+ int done = 0;
+
+ while(it != queue.end()) {
+ done++;
+ delete *it;
+
+ it = queue.erase(it);
+ }
+
+ queue_lock.unlock();
+
+ /* notify done */
+ num_decrease(done);
+}
+
CCL_NAMESPACE_END
diff --git a/intern/cycles/util/util_task.h b/intern/cycles/util/util_task.h
index 11829ab0ae0..22515e3e433 100644
--- a/intern/cycles/util/util_task.h
+++ b/intern/cycles/util/util_task.h
@@ -50,7 +50,7 @@ public:
* pool, we can wait for all tasks to be done, or cancel them before they are
* done.
*
- * The run callback that actually executes the task may be create like this:
+ * The run callback that actually executes the task may be created like this:
* function_bind(&MyClass::task_execute, this, _1, _2) */
class TaskPool
@@ -77,8 +77,8 @@ protected:
thread_mutex num_mutex;
thread_condition_variable num_cond;
- volatile int num;
- volatile bool do_cancel;
+ int num;
+ bool do_cancel;
};
/* Task Scheduler
@@ -109,7 +109,7 @@ protected:
static thread_mutex mutex;
static int users;
static vector<thread*> threads;
- static volatile bool do_exit;
+ static bool do_exit;
static list<Entry> queue;
static thread_mutex queue_mutex;
@@ -122,6 +122,51 @@ protected:
static void clear(TaskPool *pool);
};
+/* Dedicated Task Pool
+ *
+ * Like a TaskPool, but will launch one dedicated thread to execute all tasks.
+ *
+ * The run callback that actually executes the task may be created like this:
+ * function_bind(&MyClass::task_execute, this, _1, _2) */
+
+class DedicatedTaskPool
+{
+public:
+ DedicatedTaskPool();
+ ~DedicatedTaskPool();
+
+ void push(Task *task, bool front = false);
+ void push(const TaskRunFunction& run, bool front = false);
+
+ void wait(); /* wait until all tasks are done */
+ void cancel(); /* cancel all tasks, keep worker thread running */
+ void stop(); /* stop worker thread */
+
+ bool cancelled(); /* for worker thread, test if cancelled */
+
+protected:
+ void num_decrease(int done);
+ void num_increase();
+
+ void thread_run();
+ bool thread_wait_pop(Task*& entry);
+
+ void clear();
+
+ thread_mutex num_mutex;
+ thread_condition_variable num_cond;
+
+ list<Task*> queue;
+ thread_mutex queue_mutex;
+ thread_condition_variable queue_cond;
+
+ int num;
+ bool do_cancel;
+ bool do_exit;
+
+ thread *worker_thread;
+};
+
CCL_NAMESPACE_END
#endif