Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Sharybin <sergey.vfx@gmail.com>2016-11-22 18:03:16 +0300
committerSergey Sharybin <sergey.vfx@gmail.com>2016-11-22 18:03:16 +0300
commit751573ce6fff547b6a53769245814bc2d3fed17f (patch)
treec7a48856b85986eac308b7f4afc3eaf747686fe2 /intern/cycles/util/util_task.cpp
parentcb694d6595c78767874baf021096cc217858c85b (diff)
Fix T50034: Blender changes processor affinity unauthorized
Diffstat (limited to 'intern/cycles/util/util_task.cpp')
-rw-r--r--intern/cycles/util/util_task.cpp34
1 files changed, 31 insertions, 3 deletions
diff --git a/intern/cycles/util/util_task.cpp b/intern/cycles/util/util_task.cpp
index 352ba81c95a..0d1fed3ebbf 100644
--- a/intern/cycles/util/util_task.cpp
+++ b/intern/cycles/util/util_task.cpp
@@ -195,7 +195,8 @@ void TaskScheduler::init(int num_threads)
if(users == 0) {
do_exit = false;
- if(num_threads == 0) {
+ const bool use_auto_threads = (num_threads == 0);
+ if(use_auto_threads) {
/* automatic number of threads */
num_threads = system_cpu_thread_count();
}
@@ -204,7 +205,18 @@ void TaskScheduler::init(int num_threads)
/* launch threads that will be waiting for work */
threads.resize(num_threads);
- int num_groups = system_cpu_group_count();
+ const int num_groups = system_cpu_group_count();
+ unsigned short num_process_groups;
+ vector<unsigned short> process_groups;
+ int current_group_threads;
+ if(num_groups > 1) {
+ process_groups.resize(num_groups);
+ num_process_groups = system_cpu_process_groups(num_groups,
+ &process_groups[0]);
+ if(num_process_groups == 1) {
+ current_group_threads = system_cpu_group_thread_count(process_groups[0]);
+ }
+ }
int thread_index = 0;
for(int group = 0; group < num_groups; ++group) {
/* NOTE: That's not really efficient from threading point of view,
@@ -218,9 +230,25 @@ void TaskScheduler::init(int num_threads)
group_thread < num_group_threads && thread_index < threads.size();
++group_thread, ++thread_index)
{
+ /* NOTE: Thread group of -1 means we would not force thread affinity. */
+ int thread_group;
+ if(num_groups == 1) {
+ /* Use default affinity if there's only one CPU group in the system. */
+ thread_group = -1;
+ }
+ else if(use_auto_threads &&
+ num_process_groups == 1 &&
+ num_threads <= current_group_threads)
+ {
+ /* If we fit into curent CPU group we also don't force any affinity. */
+ thread_group = -1;
+ }
+ else {
+ thread_group = group;
+ }
threads[thread_index] = new thread(function_bind(&TaskScheduler::thread_run,
thread_index + 1),
- group);
+ thread_group);
}
}
}