Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrecht Van Lommel <brecht@blender.org>2021-10-31 15:18:28 +0300
committerBrecht Van Lommel <brecht@blender.org>2021-11-01 10:36:50 +0300
commit806521f7037a5a50bba9d332ab5de3b0172c5a22 (patch)
treeac075e3972e0b844ff76a8382f328a814e8dc319
parent154a06077773a5236d388cc47cdaee9d312abd0a (diff)
Fix T92671: confusing Cycles debug logs about CPU architecture
Instead of printing debug flags listing various CPU and GPU settings that may or may not be used, print when we are using them. This include CPU kernel types, OptiX debugging and CUDA and HIP adaptive compilation. BVH type was already printed.
-rw-r--r--intern/cycles/blender/python.cpp5
-rw-r--r--intern/cycles/device/cpu/device_impl.cpp3
-rw-r--r--intern/cycles/device/cuda/device_impl.cpp10
-rw-r--r--intern/cycles/device/hip/device_impl.cpp12
-rw-r--r--intern/cycles/device/optix/device_impl.cpp1
-rw-r--r--intern/cycles/integrator/path_trace_work_gpu.cpp4
-rw-r--r--intern/cycles/util/debug.cpp22
-rw-r--r--intern/cycles/util/debug.h2
8 files changed, 19 insertions, 40 deletions
diff --git a/intern/cycles/blender/python.cpp b/intern/cycles/blender/python.cpp
index 20bf6385999..bb9b0a74424 100644
--- a/intern/cycles/blender/python.cpp
+++ b/intern/cycles/blender/python.cpp
@@ -157,8 +157,6 @@ static PyObject *init_func(PyObject * /*self*/, PyObject *args)
DebugFlags().running_inside_blender = true;
- VLOG(2) << "Debug flags initialized to:\n" << DebugFlags();
-
Py_RETURN_NONE;
}
@@ -885,8 +883,6 @@ static PyObject *debug_flags_update_func(PyObject * /*self*/, PyObject *args)
debug_flags_sync_from_scene(b_scene);
- VLOG(2) << "Debug flags set to:\n" << DebugFlags();
-
debug_flags_set = true;
Py_RETURN_NONE;
@@ -896,7 +892,6 @@ static PyObject *debug_flags_reset_func(PyObject * /*self*/, PyObject * /*args*/
{
debug_flags_reset();
if (debug_flags_set) {
- VLOG(2) << "Debug flags reset to:\n" << DebugFlags();
debug_flags_set = false;
}
Py_RETURN_NONE;
diff --git a/intern/cycles/device/cpu/device_impl.cpp b/intern/cycles/device/cpu/device_impl.cpp
index dbad332f896..d494b40f71d 100644
--- a/intern/cycles/device/cpu/device_impl.cpp
+++ b/intern/cycles/device/cpu/device_impl.cpp
@@ -68,8 +68,7 @@ CPUDevice::CPUDevice(const DeviceInfo &info_, Stats &stats_, Profiler &profiler_
{
/* Pick any kernel, all of them are supposed to have same level of microarchitecture
* optimization. */
- VLOG(1) << "Will be using " << kernels.integrator_init_from_camera.get_uarch_name()
- << " kernels.";
+ VLOG(1) << "Using " << kernels.integrator_init_from_camera.get_uarch_name() << " CPU kernels.";
if (info.cpu_threads == 0) {
info.cpu_threads = TaskScheduler::num_threads();
diff --git a/intern/cycles/device/cuda/device_impl.cpp b/intern/cycles/device/cuda/device_impl.cpp
index 2f9a1394ad8..2bb0592bcc5 100644
--- a/intern/cycles/device/cuda/device_impl.cpp
+++ b/intern/cycles/device/cuda/device_impl.cpp
@@ -378,7 +378,9 @@ string CUDADevice::compile_kernel(const uint kernel_features,
cubin.c_str(),
common_cflags.c_str());
- printf("Compiling CUDA kernel ...\n%s\n", command.c_str());
+ printf("Compiling %sCUDA kernel ...\n%s\n",
+ (use_adaptive_compilation()) ? "adaptive " : "",
+ command.c_str());
# ifdef _WIN32
command = "call " + command;
@@ -405,13 +407,15 @@ string CUDADevice::compile_kernel(const uint kernel_features,
bool CUDADevice::load_kernels(const uint kernel_features)
{
- /* TODO(sergey): Support kernels re-load for CUDA devices.
+ /* TODO(sergey): Support kernels re-load for CUDA devices adaptive compile.
*
* Currently re-loading kernel will invalidate memory pointers,
* causing problems in cuCtxSynchronize.
*/
if (cuModule) {
- VLOG(1) << "Skipping kernel reload, not currently supported.";
+ if (use_adaptive_compilation()) {
+ VLOG(1) << "Skipping CUDA kernel reload for adaptive compilation, not currently supported.";
+ }
return true;
}
diff --git a/intern/cycles/device/hip/device_impl.cpp b/intern/cycles/device/hip/device_impl.cpp
index 31b7b07383b..1ea387513d5 100644
--- a/intern/cycles/device/hip/device_impl.cpp
+++ b/intern/cycles/device/hip/device_impl.cpp
@@ -360,7 +360,9 @@ string HIPDevice::compile_kernel(const uint kernel_features,
source_path.c_str(),
fatbin.c_str());
- printf("Compiling HIP kernel ...\n%s\n", command.c_str());
+ printf("Compiling %sHIP kernel ...\n%s\n",
+ (use_adaptive_compilation()) ? "adaptive " : "",
+ command.c_str());
# ifdef _WIN32
command = "call " + command;
@@ -387,13 +389,15 @@ string HIPDevice::compile_kernel(const uint kernel_features,
bool HIPDevice::load_kernels(const uint kernel_features)
{
- /* TODO(sergey): Support kernels re-load for HIP devices.
+ /* TODO(sergey): Support kernels re-load for CUDA devices adaptive compile.
*
* Currently re-loading kernel will invalidate memory pointers,
- * causing problems in hipCtxSynchronize.
+ * causing problems in cuCtxSynchronize.
*/
if (hipModule) {
- VLOG(1) << "Skipping kernel reload, not currently supported.";
+ if (use_adaptive_compilation()) {
+ VLOG(1) << "Skipping HIP kernel reload for adaptive compilation, not currently supported.";
+ }
return true;
}
diff --git a/intern/cycles/device/optix/device_impl.cpp b/intern/cycles/device/optix/device_impl.cpp
index e9164cc0a76..9b9a5ac0de7 100644
--- a/intern/cycles/device/optix/device_impl.cpp
+++ b/intern/cycles/device/optix/device_impl.cpp
@@ -91,6 +91,7 @@ OptiXDevice::OptiXDevice(const DeviceInfo &info, Stats &stats, Profiler &profile
};
# endif
if (DebugFlags().optix.use_debug) {
+ VLOG(1) << "Using OptiX debug mode.";
options.validationMode = OPTIX_DEVICE_CONTEXT_VALIDATION_MODE_ALL;
}
optix_assert(optixDeviceContextCreate(cuContext, &options, &context));
diff --git a/intern/cycles/integrator/path_trace_work_gpu.cpp b/intern/cycles/integrator/path_trace_work_gpu.cpp
index 251bec0dc8f..dfc1362ab09 100644
--- a/intern/cycles/integrator/path_trace_work_gpu.cpp
+++ b/intern/cycles/integrator/path_trace_work_gpu.cpp
@@ -807,10 +807,10 @@ bool PathTraceWorkGPU::should_use_graphics_interop()
interop_use_ = device->should_use_graphics_interop();
if (interop_use_) {
- VLOG(2) << "Will be using graphics interop GPU display update.";
+ VLOG(2) << "Using graphics interop GPU display update.";
}
else {
- VLOG(2) << "Will be using naive GPU display update.";
+ VLOG(2) << "Using naive GPU display update.";
}
interop_use_checked_ = true;
diff --git a/intern/cycles/util/debug.cpp b/intern/cycles/util/debug.cpp
index b49df3d42bc..7d5b6d4e54e 100644
--- a/intern/cycles/util/debug.cpp
+++ b/intern/cycles/util/debug.cpp
@@ -99,26 +99,4 @@ void DebugFlags::reset()
optix.reset();
}
-std::ostream &operator<<(std::ostream &os, DebugFlagsConstRef debug_flags)
-{
- os << "CPU flags:\n"
- << " AVX2 : " << string_from_bool(debug_flags.cpu.avx2) << "\n"
- << " AVX : " << string_from_bool(debug_flags.cpu.avx) << "\n"
- << " SSE4.1 : " << string_from_bool(debug_flags.cpu.sse41) << "\n"
- << " SSE3 : " << string_from_bool(debug_flags.cpu.sse3) << "\n"
- << " SSE2 : " << string_from_bool(debug_flags.cpu.sse2) << "\n"
- << " BVH layout : " << bvh_layout_name(debug_flags.cpu.bvh_layout) << "\n";
-
- os << "CUDA flags:\n"
- << " Adaptive Compile : " << string_from_bool(debug_flags.cuda.adaptive_compile) << "\n";
-
- os << "OptiX flags:\n"
- << " Debug : " << string_from_bool(debug_flags.optix.use_debug) << "\n";
-
- os << "HIP flags:\n"
- << " HIP streams : " << string_from_bool(debug_flags.hip.adaptive_compile) << "\n";
-
- return os;
-}
-
CCL_NAMESPACE_END
diff --git a/intern/cycles/util/debug.h b/intern/cycles/util/debug.h
index 58b2b047261..548c67600e5 100644
--- a/intern/cycles/util/debug.h
+++ b/intern/cycles/util/debug.h
@@ -160,8 +160,6 @@ inline DebugFlags &DebugFlags()
return DebugFlags::get();
}
-std::ostream &operator<<(std::ostream &os, DebugFlagsConstRef debug_flags);
-
CCL_NAMESPACE_END
#endif /* __UTIL_DEBUG_H__ */