Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/intern
diff options
context:
space:
mode:
authorBrecht Van Lommel <brechtvanlommel@gmail.com>2019-01-18 21:15:38 +0300
committerBrecht Van Lommel <brechtvanlommel@gmail.com>2019-01-18 22:58:56 +0300
commitc5eb10b1104bb0f15695be5d4394bbb8303ad092 (patch)
tree276f02cb02328bde7e080352313ae5b9d0cbd938 /intern
parent1e3203986c0ae0abf27399336300e676190747bb (diff)
Fix T60585: Cycles not using all cores on threadripper, after recent changes.
Diffstat (limited to 'intern')
-rw-r--r--intern/cycles/util/util_task.cpp22
-rw-r--r--intern/cycles/util/util_thread.cpp5
-rw-r--r--intern/cycles/util/util_thread.h4
3 files changed, 14 insertions, 17 deletions
diff --git a/intern/cycles/util/util_task.cpp b/intern/cycles/util/util_task.cpp
index 4241c4aa8cc..6260d8d13ab 100644
--- a/intern/cycles/util/util_task.cpp
+++ b/intern/cycles/util/util_task.cpp
@@ -225,9 +225,9 @@ int get_num_total_processors(const vector<int>& num_per_node_processors)
/* Assign every thread a node on which is should be running, for the best
* performance. */
-void distribute_threads_on_nodes(const vector<thread*>& threads)
+vector<int> distribute_threads_on_nodes(const int num_threads)
{
- const int num_threads = threads.size();
+ vector<int> thread_nodes(num_threads, -1);
const int num_active_group_processors =
system_cpu_num_active_group_processors();
VLOG(1) << "Detected " << num_active_group_processors << " processors "
@@ -241,14 +241,14 @@ void distribute_threads_on_nodes(const vector<thread*>& threads)
* have two Cycles/Blender instances running manually set to a different
* dies on a CPU. */
VLOG(1) << "Not setting thread group affinity.";
- return;
+ return thread_nodes;
}
vector<int> num_per_node_processors;
get_per_node_num_processors(&num_per_node_processors);
if(num_per_node_processors.size() == 0) {
/* Error was already repported, here we can't do anything, so we simply
* leave default affinity to all the worker threads. */
- return;
+ return thread_nodes;
}
const int num_nodes = num_per_node_processors.size();
int thread_index = 0;
@@ -273,11 +273,11 @@ void distribute_threads_on_nodes(const vector<thread*>& threads)
{
VLOG(1) << "Scheduling thread " << thread_index << " to node "
<< current_node_index << ".";
- threads[thread_index]->schedule_to_node(current_node_index);
+ thread_nodes[thread_index] = current_node_index;
++thread_index;
if(thread_index == num_threads) {
/* All threads are scheduled on their nodes. */
- return;
+ return thread_nodes;
}
}
++current_node_index;
@@ -305,6 +305,8 @@ void distribute_threads_on_nodes(const vector<thread*>& threads)
++thread_index;
current_node_index = (current_node_index + 1) % num_nodes;
}
+
+ return thread_nodes;
}
} // namespace
@@ -325,13 +327,17 @@ void TaskScheduler::init(int num_threads)
num_threads = system_cpu_thread_count();
}
VLOG(1) << "Creating pool of " << num_threads << " threads.";
+
+ /* Compute distribution on NUMA nodes. */
+ vector<int> thread_nodes = distribute_threads_on_nodes(num_threads);
+
/* Launch threads that will be waiting for work. */
threads.resize(num_threads);
for(int thread_index = 0; thread_index < num_threads; ++thread_index) {
threads[thread_index] = new thread(
- function_bind(&TaskScheduler::thread_run, thread_index + 1));
+ function_bind(&TaskScheduler::thread_run, thread_index + 1),
+ thread_nodes[thread_index]);
}
- distribute_threads_on_nodes(threads);
}
void TaskScheduler::exit()
diff --git a/intern/cycles/util/util_thread.cpp b/intern/cycles/util/util_thread.cpp
index 1880eefcb9c..4d30e3f564f 100644
--- a/intern/cycles/util/util_thread.cpp
+++ b/intern/cycles/util/util_thread.cpp
@@ -58,9 +58,4 @@ bool thread::join()
}
}
-void thread::schedule_to_node(int node)
-{
- node_ = node;
-}
-
CCL_NAMESPACE_END
diff --git a/intern/cycles/util/util_thread.h b/intern/cycles/util/util_thread.h
index d21a7a8c773..9ae9af25e6b 100644
--- a/intern/cycles/util/util_thread.h
+++ b/intern/cycles/util/util_thread.h
@@ -54,10 +54,6 @@ public:
static void *run(void *arg);
bool join();
- /* For an existing thread descriptor which is NOT running yet, assign node
- * on which it should be running. */
- void schedule_to_node(int node);
-
protected:
function<void()> run_cb_;
std::thread thread_;