Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrecht Van Lommel <brecht@blender.org>2022-06-16 20:39:13 +0300
committerBrecht Van Lommel <brecht@blender.org>2022-06-17 15:08:14 +0300
commit2c1bffa286b9551c0533081cb16f497058974b03 (patch)
treed0b12d9a78bc798fcfa1754cbc7077f9b3bbf9f7 /intern/cycles/device
parent24246d98707096f16d5ab48f673f49354eac87a1 (diff)
Cleanup: add verbose logging category names instead of numbers
And use them more consistently than before.
Diffstat (limited to 'intern/cycles/device')
-rw-r--r--intern/cycles/device/cpu/device_impl.cpp22
-rw-r--r--intern/cycles/device/cuda/device.cpp28
-rw-r--r--intern/cycles/device/cuda/device_impl.cpp41
-rw-r--r--intern/cycles/device/cuda/queue.cpp6
-rw-r--r--intern/cycles/device/device.cpp6
-rw-r--r--intern/cycles/device/hip/device.cpp25
-rw-r--r--intern/cycles/device/hip/device_impl.cpp36
-rw-r--r--intern/cycles/device/hip/queue.cpp6
-rw-r--r--intern/cycles/device/metal/device_impl.mm18
-rw-r--r--intern/cycles/device/metal/queue.mm4
-rw-r--r--intern/cycles/device/optix/device.cpp6
-rw-r--r--intern/cycles/device/optix/device_impl.cpp6
-rw-r--r--intern/cycles/device/queue.cpp21
13 files changed, 115 insertions, 110 deletions
diff --git a/intern/cycles/device/cpu/device_impl.cpp b/intern/cycles/device/cpu/device_impl.cpp
index 612c391f7d5..0a4eb089037 100644
--- a/intern/cycles/device/cpu/device_impl.cpp
+++ b/intern/cycles/device/cpu/device_impl.cpp
@@ -55,8 +55,8 @@ CPUDevice::CPUDevice(const DeviceInfo &info_, Stats &stats_, Profiler &profiler_
{
/* Pick any kernel, all of them are supposed to have same level of microarchitecture
* optimization. */
- VLOG(1) << "Using " << get_cpu_kernels().integrator_init_from_camera.get_uarch_name()
- << " CPU kernels.";
+ VLOG_INFO << "Using " << get_cpu_kernels().integrator_init_from_camera.get_uarch_name()
+ << " CPU kernels.";
if (info.cpu_threads == 0) {
info.cpu_threads = TaskScheduler::max_concurrency();
@@ -111,9 +111,9 @@ void CPUDevice::mem_alloc(device_memory &mem)
}
else {
if (mem.name) {
- VLOG(1) << "Buffer allocate: " << mem.name << ", "
- << string_human_readable_number(mem.memory_size()) << " bytes. ("
- << string_human_readable_size(mem.memory_size()) << ")";
+ VLOG_WORK << "Buffer allocate: " << mem.name << ", "
+ << string_human_readable_number(mem.memory_size()) << " bytes. ("
+ << string_human_readable_size(mem.memory_size()) << ")";
}
if (mem.type == MEM_DEVICE_ONLY || !mem.host_pointer) {
@@ -205,9 +205,9 @@ void CPUDevice::const_copy_to(const char *name, void *host, size_t size)
void CPUDevice::global_alloc(device_memory &mem)
{
- VLOG(1) << "Global memory allocate: " << mem.name << ", "
- << string_human_readable_number(mem.memory_size()) << " bytes. ("
- << string_human_readable_size(mem.memory_size()) << ")";
+ VLOG_WORK << "Global memory allocate: " << mem.name << ", "
+ << string_human_readable_number(mem.memory_size()) << " bytes. ("
+ << string_human_readable_size(mem.memory_size()) << ")";
kernel_global_memory_copy(&kernel_globals, mem.name, mem.host_pointer, mem.data_size);
@@ -227,9 +227,9 @@ void CPUDevice::global_free(device_memory &mem)
void CPUDevice::tex_alloc(device_texture &mem)
{
- VLOG(1) << "Texture allocate: " << mem.name << ", "
- << string_human_readable_number(mem.memory_size()) << " bytes. ("
- << string_human_readable_size(mem.memory_size()) << ")";
+ VLOG_WORK << "Texture allocate: " << mem.name << ", "
+ << string_human_readable_number(mem.memory_size()) << " bytes. ("
+ << string_human_readable_size(mem.memory_size()) << ")";
mem.device_pointer = (device_ptr)mem.host_pointer;
mem.device_size = mem.memory_size();
diff --git a/intern/cycles/device/cuda/device.cpp b/intern/cycles/device/cuda/device.cpp
index 400490336d6..5a213c45b71 100644
--- a/intern/cycles/device/cuda/device.cpp
+++ b/intern/cycles/device/cuda/device.cpp
@@ -29,24 +29,25 @@ bool device_cuda_init()
initialized = true;
int cuew_result = cuewInit(CUEW_INIT_CUDA);
if (cuew_result == CUEW_SUCCESS) {
- VLOG(1) << "CUEW initialization succeeded";
+ VLOG_INFO << "CUEW initialization succeeded";
if (CUDADevice::have_precompiled_kernels()) {
- VLOG(1) << "Found precompiled kernels";
+ VLOG_INFO << "Found precompiled kernels";
result = true;
}
else if (cuewCompilerPath() != NULL) {
- VLOG(1) << "Found CUDA compiler " << cuewCompilerPath();
+ VLOG_INFO << "Found CUDA compiler " << cuewCompilerPath();
result = true;
}
else {
- VLOG(1) << "Neither precompiled kernels nor CUDA compiler was found,"
- << " unable to use CUDA";
+ VLOG_INFO << "Neither precompiled kernels nor CUDA compiler was found,"
+ << " unable to use CUDA";
}
}
else {
- VLOG(1) << "CUEW initialization failed: "
- << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED) ? "Error setting up atexit() handler" :
- "Error opening the library");
+ VLOG_WARNING << "CUEW initialization failed: "
+ << ((cuew_result == CUEW_ERROR_ATEXIT_FAILED) ?
+ "Error setting up atexit() handler" :
+ "Error opening the library");
}
return result;
@@ -121,7 +122,8 @@ void device_cuda_info(vector<DeviceInfo> &devices)
int major;
cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, num);
if (major < 3) {
- VLOG(1) << "Ignoring device \"" << name << "\", this graphics card is no longer supported.";
+ VLOG_INFO << "Ignoring device \"" << name
+ << "\", this graphics card is no longer supported.";
continue;
}
@@ -166,21 +168,21 @@ void device_cuda_info(vector<DeviceInfo> &devices)
* Windows 10 even when it is, due to an issue in application profiles.
* Detect case where we expect it to be available and override. */
if (preempt_attr == 0 && (major >= 6) && system_windows_version_at_least(10, 17134)) {
- VLOG(1) << "Assuming device has compute preemption on Windows 10.";
+ VLOG_INFO << "Assuming device has compute preemption on Windows 10.";
preempt_attr = 1;
}
if (timeout_attr && !preempt_attr) {
- VLOG(1) << "Device is recognized as display.";
+ VLOG_INFO << "Device is recognized as display.";
info.description += " (Display)";
info.display_device = true;
display_devices.push_back(info);
}
else {
- VLOG(1) << "Device has compute preemption or is not used for display.";
+ VLOG_INFO << "Device has compute preemption or is not used for display.";
devices.push_back(info);
}
- VLOG(1) << "Added device \"" << name << "\" with id \"" << info.id << "\".";
+ VLOG_INFO << "Added device \"" << name << "\" with id \"" << info.id << "\".";
}
if (!display_devices.empty())
diff --git a/intern/cycles/device/cuda/device_impl.cpp b/intern/cycles/device/cuda/device_impl.cpp
index cb7e909a2d5..e75224abe90 100644
--- a/intern/cycles/device/cuda/device_impl.cpp
+++ b/intern/cycles/device/cuda/device_impl.cpp
@@ -244,9 +244,9 @@ string CUDADevice::compile_kernel(const uint kernel_features,
if (!use_adaptive_compilation()) {
if (!force_ptx) {
const string cubin = path_get(string_printf("lib/%s_sm_%d%d.cubin", name, major, minor));
- VLOG(1) << "Testing for pre-compiled kernel " << cubin << ".";
+ VLOG_INFO << "Testing for pre-compiled kernel " << cubin << ".";
if (path_exists(cubin)) {
- VLOG(1) << "Using precompiled kernel.";
+ VLOG_INFO << "Using precompiled kernel.";
return cubin;
}
}
@@ -256,9 +256,9 @@ string CUDADevice::compile_kernel(const uint kernel_features,
while (ptx_major >= 3) {
const string ptx = path_get(
string_printf("lib/%s_compute_%d%d.ptx", name, ptx_major, ptx_minor));
- VLOG(1) << "Testing for pre-compiled kernel " << ptx << ".";
+ VLOG_INFO << "Testing for pre-compiled kernel " << ptx << ".";
if (path_exists(ptx)) {
- VLOG(1) << "Using precompiled kernel.";
+ VLOG_INFO << "Using precompiled kernel.";
return ptx;
}
@@ -287,9 +287,9 @@ string CUDADevice::compile_kernel(const uint kernel_features,
const string cubin_file = string_printf(
"cycles_%s_%s_%d%d_%s.%s", name, kernel_arch, major, minor, kernel_md5.c_str(), kernel_ext);
const string cubin = path_cache_get(path_join("kernels", cubin_file));
- VLOG(1) << "Testing for locally compiled kernel " << cubin << ".";
+ VLOG_INFO << "Testing for locally compiled kernel " << cubin << ".";
if (path_exists(cubin)) {
- VLOG(1) << "Using locally compiled kernel.";
+ VLOG_INFO << "Using locally compiled kernel.";
return cubin;
}
@@ -323,7 +323,7 @@ string CUDADevice::compile_kernel(const uint kernel_features,
}
const int nvcc_cuda_version = cuewCompilerVersion();
- VLOG(1) << "Found nvcc " << nvcc << ", CUDA version " << nvcc_cuda_version << ".";
+ VLOG_INFO << "Found nvcc " << nvcc << ", CUDA version " << nvcc_cuda_version << ".";
if (nvcc_cuda_version < 101) {
printf(
"Unsupported CUDA version %d.%d detected, "
@@ -399,7 +399,8 @@ bool CUDADevice::load_kernels(const uint kernel_features)
*/
if (cuModule) {
if (use_adaptive_compilation()) {
- VLOG(1) << "Skipping CUDA kernel reload for adaptive compilation, not currently supported.";
+ VLOG_INFO
+ << "Skipping CUDA kernel reload for adaptive compilation, not currently supported.";
}
return true;
}
@@ -481,8 +482,8 @@ void CUDADevice::reserve_local_memory(const uint kernel_features)
cuMemGetInfo(&free_after, &total);
}
- VLOG(1) << "Local memory reserved " << string_human_readable_number(free_before - free_after)
- << " bytes. (" << string_human_readable_size(free_before - free_after) << ")";
+ VLOG_INFO << "Local memory reserved " << string_human_readable_number(free_before - free_after)
+ << " bytes. (" << string_human_readable_size(free_before - free_after) << ")";
# if 0
/* For testing mapped host memory, fill up device memory. */
@@ -513,7 +514,7 @@ void CUDADevice::init_host_memory()
}
}
else {
- VLOG(1) << "Mapped host memory disabled, failed to get system RAM";
+ VLOG_WARNING << "Mapped host memory disabled, failed to get system RAM";
map_host_limit = 0;
}
@@ -524,8 +525,8 @@ void CUDADevice::init_host_memory()
device_working_headroom = 32 * 1024 * 1024LL; // 32MB
device_texture_headroom = 128 * 1024 * 1024LL; // 128MB
- VLOG(1) << "Mapped host memory limit set to " << string_human_readable_number(map_host_limit)
- << " bytes. (" << string_human_readable_size(map_host_limit) << ")";
+ VLOG_INFO << "Mapped host memory limit set to " << string_human_readable_number(map_host_limit)
+ << " bytes. (" << string_human_readable_size(map_host_limit) << ")";
}
void CUDADevice::load_texture_info()
@@ -593,7 +594,7 @@ void CUDADevice::move_textures_to_host(size_t size, bool for_texture)
* multiple CUDA devices could be moving the memory. The
* first one will do it, and the rest will adopt the pointer. */
if (max_mem) {
- VLOG(1) << "Move memory from device to host: " << max_mem->name;
+ VLOG_WORK << "Move memory from device to host: " << max_mem->name;
static thread_mutex move_mutex;
thread_scoped_lock lock(move_mutex);
@@ -701,9 +702,9 @@ CUDADevice::CUDAMem *CUDADevice::generic_alloc(device_memory &mem, size_t pitch_
}
if (mem.name) {
- VLOG(1) << "Buffer allocate: " << mem.name << ", "
- << string_human_readable_number(mem.memory_size()) << " bytes. ("
- << string_human_readable_size(mem.memory_size()) << ")" << status;
+ VLOG_WORK << "Buffer allocate: " << mem.name << ", "
+ << string_human_readable_number(mem.memory_size()) << " bytes. ("
+ << string_human_readable_size(mem.memory_size()) << ")" << status;
}
mem.device_pointer = (device_ptr)device_pointer;
@@ -1008,9 +1009,9 @@ void CUDADevice::tex_alloc(device_texture &mem)
desc.NumChannels = mem.data_elements;
desc.Flags = 0;
- VLOG(1) << "Array 3D allocate: " << mem.name << ", "
- << string_human_readable_number(mem.memory_size()) << " bytes. ("
- << string_human_readable_size(mem.memory_size()) << ")";
+ VLOG_WORK << "Array 3D allocate: " << mem.name << ", "
+ << string_human_readable_number(mem.memory_size()) << " bytes. ("
+ << string_human_readable_size(mem.memory_size()) << ")";
cuda_assert(cuArray3DCreate(&array_3d, &desc));
diff --git a/intern/cycles/device/cuda/queue.cpp b/intern/cycles/device/cuda/queue.cpp
index 38c71866ad0..5912e68a92b 100644
--- a/intern/cycles/device/cuda/queue.cpp
+++ b/intern/cycles/device/cuda/queue.cpp
@@ -39,12 +39,12 @@ int CUDADeviceQueue::num_concurrent_states(const size_t state_size) const
num_states = max((int)(num_states * factor), 1024);
}
else {
- VLOG(3) << "CYCLES_CONCURRENT_STATES_FACTOR evaluated to 0";
+ VLOG_DEVICE_STATS << "CYCLES_CONCURRENT_STATES_FACTOR evaluated to 0";
}
}
- VLOG(3) << "GPU queue concurrent states: " << num_states << ", using up to "
- << string_human_readable_size(num_states * state_size);
+ VLOG_DEVICE_STATS << "GPU queue concurrent states: " << num_states << ", using up to "
+ << string_human_readable_size(num_states * state_size);
return num_states;
}
diff --git a/intern/cycles/device/device.cpp b/intern/cycles/device/device.cpp
index ea5b3c6dc8c..82c7881da5f 100644
--- a/intern/cycles/device/device.cpp
+++ b/intern/cycles/device/device.cpp
@@ -325,8 +325,8 @@ DeviceInfo Device::get_multi_device(const vector<DeviceInfo> &subdevices,
int orig_cpu_threads = (threads) ? threads : TaskScheduler::max_concurrency();
int cpu_threads = max(orig_cpu_threads - (subdevices.size() - 1), size_t(0));
- VLOG(1) << "CPU render threads reduced from " << orig_cpu_threads << " to " << cpu_threads
- << ", to dedicate to GPU.";
+ VLOG_INFO << "CPU render threads reduced from " << orig_cpu_threads << " to "
+ << cpu_threads << ", to dedicate to GPU.";
if (cpu_threads >= 1) {
DeviceInfo cpu_device = device;
@@ -338,7 +338,7 @@ DeviceInfo Device::get_multi_device(const vector<DeviceInfo> &subdevices,
}
}
else {
- VLOG(1) << "CPU render threads disabled for interactive render.";
+ VLOG_INFO << "CPU render threads disabled for interactive render.";
continue;
}
}
diff --git a/intern/cycles/device/hip/device.cpp b/intern/cycles/device/hip/device.cpp
index d6a5ed9c419..3c9c73e7db0 100644
--- a/intern/cycles/device/hip/device.cpp
+++ b/intern/cycles/device/hip/device.cpp
@@ -29,30 +29,31 @@ bool device_hip_init()
initialized = true;
int hipew_result = hipewInit(HIPEW_INIT_HIP);
if (hipew_result == HIPEW_SUCCESS) {
- VLOG(1) << "HIPEW initialization succeeded";
+ VLOG_INFO << "HIPEW initialization succeeded";
if (HIPDevice::have_precompiled_kernels()) {
- VLOG(1) << "Found precompiled kernels";
+ VLOG_INFO << "Found precompiled kernels";
result = true;
}
else if (hipewCompilerPath() != NULL) {
- VLOG(1) << "Found HIPCC " << hipewCompilerPath();
+ VLOG_INFO << "Found HIPCC " << hipewCompilerPath();
result = true;
}
else {
- VLOG(1) << "Neither precompiled kernels nor HIPCC was found,"
- << " unable to use HIP";
+ VLOG_INFO << "Neither precompiled kernels nor HIPCC was found,"
+ << " unable to use HIP";
}
}
else {
if (hipew_result == HIPEW_ERROR_ATEXIT_FAILED) {
- VLOG(1) << "HIPEW initialization failed: Error setting up atexit() handler";
+ VLOG_WARNING << "HIPEW initialization failed: Error setting up atexit() handler";
}
else if (hipew_result == HIPEW_ERROR_OLD_DRIVER) {
- VLOG(1) << "HIPEW initialization failed: Driver version too old, requires AMD Radeon Pro "
- "21.Q4 driver or newer";
+ VLOG_WARNING
+ << "HIPEW initialization failed: Driver version too old, requires AMD Radeon Pro "
+ "21.Q4 driver or newer";
}
else {
- VLOG(1) << "HIPEW initialization failed: Error opening HIP dynamic library";
+ VLOG_WARNING << "HIPEW initialization failed: Error opening HIP dynamic library";
}
}
@@ -165,16 +166,16 @@ void device_hip_info(vector<DeviceInfo> &devices)
hipDeviceGetAttribute(&timeout_attr, hipDeviceAttributeKernelExecTimeout, num);
if (timeout_attr && !preempt_attr) {
- VLOG(1) << "Device is recognized as display.";
+ VLOG_INFO << "Device is recognized as display.";
info.description += " (Display)";
info.display_device = true;
display_devices.push_back(info);
}
else {
- VLOG(1) << "Device has compute preemption or is not used for display.";
+ VLOG_INFO << "Device has compute preemption or is not used for display.";
devices.push_back(info);
}
- VLOG(1) << "Added device \"" << name << "\" with id \"" << info.id << "\".";
+ VLOG_INFO << "Added device \"" << name << "\" with id \"" << info.id << "\".";
}
if (!display_devices.empty())
diff --git a/intern/cycles/device/hip/device_impl.cpp b/intern/cycles/device/hip/device_impl.cpp
index ea68c821166..652c1001f85 100644
--- a/intern/cycles/device/hip/device_impl.cpp
+++ b/intern/cycles/device/hip/device_impl.cpp
@@ -233,9 +233,9 @@ string HIPDevice::compile_kernel(const uint kernel_features, const char *name, c
/* Attempt to use kernel provided with Blender. */
if (!use_adaptive_compilation()) {
const string fatbin = path_get(string_printf("lib/%s_%s.fatbin", name, arch));
- VLOG(1) << "Testing for pre-compiled kernel " << fatbin << ".";
+ VLOG_INFO << "Testing for pre-compiled kernel " << fatbin << ".";
if (path_exists(fatbin)) {
- VLOG(1) << "Using precompiled kernel.";
+ VLOG_INFO << "Using precompiled kernel.";
return fatbin;
}
}
@@ -265,9 +265,9 @@ string HIPDevice::compile_kernel(const uint kernel_features, const char *name, c
const string include_path = source_path;
const string fatbin_file = string_printf("cycles_%s_%s_%s", name, arch, kernel_md5.c_str());
const string fatbin = path_cache_get(path_join("kernels", fatbin_file));
- VLOG(1) << "Testing for locally compiled kernel " << fatbin << ".";
+ VLOG_INFO << "Testing for locally compiled kernel " << fatbin << ".";
if (path_exists(fatbin)) {
- VLOG(1) << "Using locally compiled kernel.";
+ VLOG_INFO << "Using locally compiled kernel.";
return fatbin;
}
@@ -301,7 +301,7 @@ string HIPDevice::compile_kernel(const uint kernel_features, const char *name, c
}
const int hipcc_hip_version = hipewCompilerVersion();
- VLOG(1) << "Found hipcc " << hipcc << ", HIP version " << hipcc_hip_version << ".";
+ VLOG_INFO << "Found hipcc " << hipcc << ", HIP version " << hipcc_hip_version << ".";
if (hipcc_hip_version < 40) {
printf(
"Unsupported HIP version %d.%d detected, "
@@ -361,7 +361,7 @@ bool HIPDevice::load_kernels(const uint kernel_features)
*/
if (hipModule) {
if (use_adaptive_compilation()) {
- VLOG(1) << "Skipping HIP kernel reload for adaptive compilation, not currently supported.";
+ VLOG_INFO << "Skipping HIP kernel reload for adaptive compilation, not currently supported.";
}
return true;
}
@@ -444,8 +444,8 @@ void HIPDevice::reserve_local_memory(const uint kernel_features)
hipMemGetInfo(&free_after, &total);
}
- VLOG(1) << "Local memory reserved " << string_human_readable_number(free_before - free_after)
- << " bytes. (" << string_human_readable_size(free_before - free_after) << ")";
+ VLOG_INFO << "Local memory reserved " << string_human_readable_number(free_before - free_after)
+ << " bytes. (" << string_human_readable_size(free_before - free_after) << ")";
# if 0
/* For testing mapped host memory, fill up device memory. */
@@ -476,7 +476,7 @@ void HIPDevice::init_host_memory()
}
}
else {
- VLOG(1) << "Mapped host memory disabled, failed to get system RAM";
+ VLOG_WARNING << "Mapped host memory disabled, failed to get system RAM";
map_host_limit = 0;
}
@@ -487,8 +487,8 @@ void HIPDevice::init_host_memory()
device_working_headroom = 32 * 1024 * 1024LL; // 32MB
device_texture_headroom = 128 * 1024 * 1024LL; // 128MB
- VLOG(1) << "Mapped host memory limit set to " << string_human_readable_number(map_host_limit)
- << " bytes. (" << string_human_readable_size(map_host_limit) << ")";
+ VLOG_INFO << "Mapped host memory limit set to " << string_human_readable_number(map_host_limit)
+ << " bytes. (" << string_human_readable_size(map_host_limit) << ")";
}
void HIPDevice::load_texture_info()
@@ -556,7 +556,7 @@ void HIPDevice::move_textures_to_host(size_t size, bool for_texture)
* multiple HIP devices could be moving the memory. The
* first one will do it, and the rest will adopt the pointer. */
if (max_mem) {
- VLOG(1) << "Move memory from device to host: " << max_mem->name;
+ VLOG_WORK << "Move memory from device to host: " << max_mem->name;
static thread_mutex move_mutex;
thread_scoped_lock lock(move_mutex);
@@ -658,9 +658,9 @@ HIPDevice::HIPMem *HIPDevice::generic_alloc(device_memory &mem, size_t pitch_pad
}
if (mem.name) {
- VLOG(1) << "Buffer allocate: " << mem.name << ", "
- << string_human_readable_number(mem.memory_size()) << " bytes. ("
- << string_human_readable_size(mem.memory_size()) << ")" << status;
+ VLOG_WORK << "Buffer allocate: " << mem.name << ", "
+ << string_human_readable_number(mem.memory_size()) << " bytes. ("
+ << string_human_readable_size(mem.memory_size()) << ")" << status;
}
mem.device_pointer = (device_ptr)device_pointer;
@@ -966,9 +966,9 @@ void HIPDevice::tex_alloc(device_texture &mem)
desc.NumChannels = mem.data_elements;
desc.Flags = 0;
- VLOG(1) << "Array 3D allocate: " << mem.name << ", "
- << string_human_readable_number(mem.memory_size()) << " bytes. ("
- << string_human_readable_size(mem.memory_size()) << ")";
+ VLOG_WORK << "Array 3D allocate: " << mem.name << ", "
+ << string_human_readable_number(mem.memory_size()) << " bytes. ("
+ << string_human_readable_size(mem.memory_size()) << ")";
hip_assert(hipArray3DCreate((hArray *)&array_3d, &desc));
diff --git a/intern/cycles/device/hip/queue.cpp b/intern/cycles/device/hip/queue.cpp
index 6c2c2c29624..8b3d963a32f 100644
--- a/intern/cycles/device/hip/queue.cpp
+++ b/intern/cycles/device/hip/queue.cpp
@@ -39,12 +39,12 @@ int HIPDeviceQueue::num_concurrent_states(const size_t state_size) const
num_states = max((int)(num_states * factor), 1024);
}
else {
- VLOG(3) << "CYCLES_CONCURRENT_STATES_FACTOR evaluated to 0";
+ VLOG_DEVICE_STATS << "CYCLES_CONCURRENT_STATES_FACTOR evaluated to 0";
}
}
- VLOG(3) << "GPU queue concurrent states: " << num_states << ", using up to "
- << string_human_readable_size(num_states * state_size);
+ VLOG_DEVICE_STATS << "GPU queue concurrent states: " << num_states << ", using up to "
+ << string_human_readable_size(num_states * state_size);
return num_states;
}
diff --git a/intern/cycles/device/metal/device_impl.mm b/intern/cycles/device/metal/device_impl.mm
index 086bf0af979..a0ac677beda 100644
--- a/intern/cycles/device/metal/device_impl.mm
+++ b/intern/cycles/device/metal/device_impl.mm
@@ -411,9 +411,9 @@ MetalDevice::MetalMem *MetalDevice::generic_alloc(device_memory &mem)
}
if (mem.name) {
- VLOG(2) << "Buffer allocate: " << mem.name << ", "
- << string_human_readable_number(mem.memory_size()) << " bytes. ("
- << string_human_readable_size(mem.memory_size()) << ")";
+ VLOG_WORK << "Buffer allocate: " << mem.name << ", "
+ << string_human_readable_number(mem.memory_size()) << " bytes. ("
+ << string_human_readable_size(mem.memory_size()) << ")";
}
mem.device_size = metal_buffer.allocatedSize;
@@ -800,9 +800,9 @@ void MetalDevice::tex_alloc(device_texture &mem)
desc.textureType = MTLTextureType3D;
desc.depth = mem.data_depth;
- VLOG(2) << "Texture 3D allocate: " << mem.name << ", "
- << string_human_readable_number(mem.memory_size()) << " bytes. ("
- << string_human_readable_size(mem.memory_size()) << ")";
+ VLOG_WORK << "Texture 3D allocate: " << mem.name << ", "
+ << string_human_readable_number(mem.memory_size()) << " bytes. ("
+ << string_human_readable_size(mem.memory_size()) << ")";
mtlTexture = [mtlDevice newTextureWithDescriptor:desc];
assert(mtlTexture);
@@ -834,9 +834,9 @@ void MetalDevice::tex_alloc(device_texture &mem)
desc.storageMode = storage_mode;
desc.usage = MTLTextureUsageShaderRead;
- VLOG(2) << "Texture 2D allocate: " << mem.name << ", "
- << string_human_readable_number(mem.memory_size()) << " bytes. ("
- << string_human_readable_size(mem.memory_size()) << ")";
+ VLOG_WORK << "Texture 2D allocate: " << mem.name << ", "
+ << string_human_readable_number(mem.memory_size()) << " bytes. ("
+ << string_human_readable_size(mem.memory_size()) << ")";
mtlTexture = [mtlDevice newTextureWithDescriptor:desc];
assert(mtlTexture);
diff --git a/intern/cycles/device/metal/queue.mm b/intern/cycles/device/metal/queue.mm
index 0e260886abb..55db7c5afce 100644
--- a/intern/cycles/device/metal/queue.mm
+++ b/intern/cycles/device/metal/queue.mm
@@ -311,8 +311,8 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
return false;
}
- VLOG(3) << "Metal queue launch " << device_kernel_as_string(kernel) << ", work_size "
- << work_size;
+ VLOG_DEVICE_STATS << "Metal queue launch " << device_kernel_as_string(kernel) << ", work_size "
+ << work_size;
id<MTLComputeCommandEncoder> mtlComputeCommandEncoder = get_compute_encoder(kernel);
diff --git a/intern/cycles/device/optix/device.cpp b/intern/cycles/device/optix/device.cpp
index 70810bae10d..68ca21374fd 100644
--- a/intern/cycles/device/optix/device.cpp
+++ b/intern/cycles/device/optix/device.cpp
@@ -31,12 +31,12 @@ bool device_optix_init()
const OptixResult result = optixInit();
if (result == OPTIX_ERROR_UNSUPPORTED_ABI_VERSION) {
- VLOG(1) << "OptiX initialization failed because the installed NVIDIA driver is too old. "
- "Please update to the latest driver first!";
+ VLOG_WARNING << "OptiX initialization failed because the installed NVIDIA driver is too old. "
+ "Please update to the latest driver first!";
return false;
}
else if (result != OPTIX_SUCCESS) {
- VLOG(1) << "OptiX initialization failed with error code " << (unsigned int)result;
+ VLOG_WARNING << "OptiX initialization failed with error code " << (unsigned int)result;
return false;
}
diff --git a/intern/cycles/device/optix/device_impl.cpp b/intern/cycles/device/optix/device_impl.cpp
index 9ab9bbb59c5..53697db5c04 100644
--- a/intern/cycles/device/optix/device_impl.cpp
+++ b/intern/cycles/device/optix/device_impl.cpp
@@ -278,7 +278,7 @@ OptiXDevice::OptiXDevice(const DeviceInfo &info, Stats &stats, Profiler &profile
};
# endif
if (DebugFlags().optix.use_debug) {
- VLOG(1) << "Using OptiX debug mode.";
+ VLOG_INFO << "Using OptiX debug mode.";
options.validationMode = OPTIX_DEVICE_CONTEXT_VALIDATION_MODE_ALL;
}
optix_assert(optixDeviceContextCreate(cuContext, &options, &context));
@@ -1392,11 +1392,11 @@ bool OptiXDevice::build_optix_bvh(BVHOptiX *bvh,
/* The build flags have to match the ones used to query the built-in curve intersection
program (see optixBuiltinISModuleGet above) */
build_input.type == OPTIX_BUILD_INPUT_TYPE_CURVES) {
- VLOG(2) << "Using fast to trace OptiX BVH";
+ VLOG_INFO << "Using fast to trace OptiX BVH";
options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_TRACE | OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
}
else {
- VLOG(2) << "Using fast to update OptiX BVH";
+ VLOG_INFO << "Using fast to update OptiX BVH";
options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_BUILD | OPTIX_BUILD_FLAG_ALLOW_UPDATE;
}
diff --git a/intern/cycles/device/queue.cpp b/intern/cycles/device/queue.cpp
index de65047ed6a..cc0cf0ccf84 100644
--- a/intern/cycles/device/queue.cpp
+++ b/intern/cycles/device/queue.cpp
@@ -19,7 +19,7 @@ DeviceQueue::DeviceQueue(Device *device)
DeviceQueue::~DeviceQueue()
{
- if (VLOG_IS_ON(3)) {
+ if (VLOG_DEVICE_STATS_IS_ON) {
/* Print kernel execution times sorted by time. */
vector<pair<DeviceKernelMask, double>> stats_sorted;
for (const auto &stat : stats_kernel_time_) {
@@ -32,17 +32,18 @@ DeviceQueue::~DeviceQueue()
return a.second > b.second;
});
- VLOG(3) << "GPU queue stats:";
+ VLOG_DEVICE_STATS << "GPU queue stats:";
for (const auto &[mask, time] : stats_sorted) {
- VLOG(3) << " " << std::setfill(' ') << std::setw(10) << std::fixed << std::setprecision(5)
- << std::right << time << "s: " << device_kernel_mask_as_string(mask);
+ VLOG_DEVICE_STATS << " " << std::setfill(' ') << std::setw(10) << std::fixed
+ << std::setprecision(5) << std::right << time
+ << "s: " << device_kernel_mask_as_string(mask);
}
}
}
void DeviceQueue::debug_init_execution()
{
- if (VLOG_IS_ON(3)) {
+ if (VLOG_DEVICE_STATS_IS_ON) {
last_sync_time_ = time_dt();
}
@@ -51,9 +52,9 @@ void DeviceQueue::debug_init_execution()
void DeviceQueue::debug_enqueue(DeviceKernel kernel, const int work_size)
{
- if (VLOG_IS_ON(3)) {
- VLOG(4) << "GPU queue launch " << device_kernel_as_string(kernel) << ", work_size "
- << work_size;
+ if (VLOG_DEVICE_STATS_IS_ON) {
+ VLOG_DEVICE_STATS << "GPU queue launch " << device_kernel_as_string(kernel) << ", work_size "
+ << work_size;
}
last_kernels_enqueued_ |= (uint64_t(1) << (uint64_t)kernel);
@@ -61,10 +62,10 @@ void DeviceQueue::debug_enqueue(DeviceKernel kernel, const int work_size)
void DeviceQueue::debug_synchronize()
{
- if (VLOG_IS_ON(3)) {
+ if (VLOG_DEVICE_STATS_IS_ON) {
const double new_time = time_dt();
const double elapsed_time = new_time - last_sync_time_;
- VLOG(4) << "GPU queue synchronize, elapsed " << std::setw(10) << elapsed_time << "s";
+ VLOG_DEVICE_STATS << "GPU queue synchronize, elapsed " << std::setw(10) << elapsed_time << "s";
stats_kernel_time_[last_kernels_enqueued_] += elapsed_time;