Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXavier Hallade <xavier.hallade@intel.com>2022-06-22 13:22:15 +0300
committerXavier Hallade <xavier.hallade@intel.com>2022-06-22 13:22:15 +0300
commit063055b3a5a2b2d6c4b1f6bd8a1c4ef163718321 (patch)
tree782f1ad25f2177f7e2f6bde2e55447146974548f /intern/cycles
parent803cb750275d867a1355f9526750d92a5e2551a7 (diff)
Cleanup: use C-style comments in oneAPI implementation
Diffstat (limited to 'intern/cycles')
-rw-r--r--intern/cycles/device/oneapi/device.cpp28
-rw-r--r--intern/cycles/device/oneapi/device_impl.cpp42
-rw-r--r--intern/cycles/device/oneapi/device_impl.h4
-rw-r--r--intern/cycles/device/oneapi/queue.cpp20
-rw-r--r--intern/cycles/kernel/device/gpu/parallel_active_index.h18
-rw-r--r--intern/cycles/kernel/device/oneapi/compat.h12
-rw-r--r--intern/cycles/kernel/device/oneapi/context_begin.h4
-rw-r--r--intern/cycles/kernel/device/oneapi/device_id.h4
-rw-r--r--intern/cycles/kernel/device/oneapi/dll_interface_template.h8
-rw-r--r--intern/cycles/kernel/device/oneapi/kernel.cpp32
-rw-r--r--intern/cycles/kernel/device/oneapi/kernel.h8
11 files changed, 90 insertions, 90 deletions
diff --git a/intern/cycles/device/oneapi/device.cpp b/intern/cycles/device/oneapi/device.cpp
index 326bdfabe92..b6f0f0c2b42 100644
--- a/intern/cycles/device/oneapi/device.cpp
+++ b/intern/cycles/device/oneapi/device.cpp
@@ -47,7 +47,7 @@ bool device_oneapi_init()
# endif
void *lib_handle = LOAD_ONEAPI_SHARED_LIBRARY(lib_path.c_str());
- // This shouldn't happens, but still make sense to have a branch for this
+ /* This shouldn't happen, but it still makes sense to have a branch for this. */
if (lib_handle == NULL) {
LOG(ERROR) << "oneAPI kernel shared library cannot be loaded for some reason. This should not "
"happen, however, it occurs hence oneAPI rendering will be disabled";
@@ -69,16 +69,16 @@ bool device_oneapi_init()
VLOG_INFO << "oneAPI kernel shared library has been loaded successfully";
- // We need to have this oneapi kernel shared library during all life-span of the Blender.
- // So it is not unloaded because of this.
- // FREE_SHARED_LIBRARY(lib_handle);
+ /* We need to have this oneapi kernel shared library during all life-span of the Blender.
+ * So it is not unloaded because of this.
+ * FREE_SHARED_LIBRARY(lib_handle); */
- // NOTE(@nsirgien): we need to enable JIT cache from here and
- // right now this cache policy is controlled by env. variables
- // NOTE(hallade) we also disable use of copy engine as it
- // improves stability as of intel/llvm sycl-nightly/20220529.
- // All these env variable can be set beforehand by end-users and
- // will in that case -not- be overwritten.
+ /* NOTE(@nsirgien): we need to enable JIT cache from here and
+ * right now this cache policy is controlled by env. variables. */
+ /* NOTE(hallade) we also disable use of copy engine as it
+ * improves stability as of intel/llvm sycl-nightly/20220529.
+ * All these env variable can be set beforehand by end-users and
+ * will in that case -not- be overwritten. */
# ifdef _WIN32
if (getenv("SYCL_CACHE_PERSISTENT") == nullptr) {
_putenv_s("SYCL_CACHE_PERSISTENT", "1");
@@ -136,7 +136,7 @@ static void device_iterator_cb(const char *id, const char *name, int num, void *
info.description = name;
info.num = num;
- // NOTE(@nsirgien): Should be unique at least on proper oneapi installation
+ /* NOTE(@nsirgien): Should be unique at least on proper oneapi installation. */
info.id = id;
info.has_nanovdb = true;
@@ -144,11 +144,11 @@ static void device_iterator_cb(const char *id, const char *name, int num, void *
info.has_gpu_queue = true;
- // NOTE(@nsirgien): oneAPI right now is focused on one device usage. In future it maybe will
- // change, but right now peer access from one device to another device is not supported
+ /* NOTE(@nsirgien): oneAPI right now is focused on one device usage. In future it maybe will
+ * change, but right now peer access from one device to another device is not supported. */
info.has_peer_memory = false;
- // NOTE(@nsirgien): Seems not possible to know from SYCL/oneAPI or Level0.
+ /* NOTE(@nsirgien): Seems not possible to know from SYCL/oneAPI or Level0. */
info.display_device = false;
devices->push_back(info);
diff --git a/intern/cycles/device/oneapi/device_impl.cpp b/intern/cycles/device/oneapi/device_impl.cpp
index a2f79fa1bed..5a53f1a45be 100644
--- a/intern/cycles/device/oneapi/device_impl.cpp
+++ b/intern/cycles/device/oneapi/device_impl.cpp
@@ -35,7 +35,7 @@ OneapiDevice::OneapiDevice(const DeviceInfo &info,
oneapi_dll.oneapi_set_error_cb(queue_error_cb, &oneapi_error_string);
- // Oneapi calls should be initialised on this moment;
+ /* Oneapi calls should be initialised on this moment. */
assert(oneapi_dll.oneapi_create_queue != nullptr);
bool is_finished_ok = oneapi_dll.oneapi_create_queue(device_queue, info.num);
@@ -93,8 +93,8 @@ BVHLayoutMask OneapiDevice::get_bvh_layout_mask() const
bool OneapiDevice::load_kernels(const uint requested_features)
{
assert(device_queue);
- // NOTE(@nsirgien): oneAPI can support compilation of kernel code with sertain feature set
- // with specialization constants, but it hasn't been implemented yet.
+ /* NOTE(@nsirgien): oneAPI can support compilation of kernel code with sertain feature set
+ * with specialization constants, but it hasn't been implemented yet. */
(void)requested_features;
bool is_finished_ok = oneapi_dll.oneapi_run_test_kernel(device_queue);
@@ -120,20 +120,20 @@ void OneapiDevice::generic_alloc(device_memory &mem)
{
size_t memory_size = mem.memory_size();
- // TODO(@nsirgien): In future, if scene doesn't fit into device memory, then
- // we can use USM host memory.
- // Because of the expected performance impact, implementation of this has had a low priority
- // and is not implemented yet.
+ /* TODO(@nsirgien): In future, if scene doesn't fit into device memory, then
+ * we can use USM host memory.
+ * Because of the expected performance impact, implementation of this has had a low priority
+ * and is not implemented yet. */
assert(device_queue);
- // NOTE(@nsirgien): There are three types of Unified Shared Memory (USM) in oneAPI: host, device
- // and shared. For new project it maybe more beneficial to use USM shared memory, because it
- // provides automatic migration mechanism in order to allow to use the same pointer on host and
- // on device, without need to worry about explicit memory transfer operations. But for
- // Blender/Cycles this type of memory is not very suitable in current application architecture,
- // because Cycles already uses two different pointer for host activity and device activity, and
- // also has to perform all needed memory transfer operations. So, USM device memory
- // type has been used for oneAPI device in order to better fit in Cycles architecture.
+ /* NOTE(@nsirgien): There are three types of Unified Shared Memory (USM) in oneAPI: host, device
+ * and shared. For new project it maybe more beneficial to use USM shared memory, because it
+ * provides automatic migration mechanism in order to allow to use the same pointer on host and
+ * on device, without need to worry about explicit memory transfer operations. But for
+ * Blender/Cycles this type of memory is not very suitable in current application architecture,
+ * because Cycles already uses two different pointer for host activity and device activity, and
+ * also has to perform all needed memory transfer operations. So, USM device memory
+ * type has been used for oneAPI device in order to better fit in Cycles architecture. */
void *device_pointer = oneapi_dll.oneapi_usm_alloc_device(device_queue, memory_size);
if (device_pointer == nullptr) {
size_t max_memory_on_device = oneapi_dll.oneapi_get_memcapacity(device_queue);
@@ -156,14 +156,14 @@ void OneapiDevice::generic_copy_to(device_memory &mem)
{
size_t memory_size = mem.memory_size();
- // copy operation from host shouldn't be requested if there is no memory allocated on host.
+ /* Copy operation from host shouldn't be requested if there is no memory allocated on host. */
assert(mem.host_pointer);
assert(device_queue);
oneapi_dll.oneapi_usm_memcpy(
device_queue, (void *)mem.device_pointer, (void *)mem.host_pointer, memory_size);
}
-// TODO: Make sycl::queue part of OneapiQueue and avoid using pointers to sycl::queue.
+/* TODO: Make sycl::queue part of OneapiQueue and avoid using pointers to sycl::queue. */
SyclQueue *OneapiDevice::sycl_queue()
{
return device_queue;
@@ -376,7 +376,7 @@ void OneapiDevice::tex_alloc(device_texture &mem)
generic_alloc(mem);
generic_copy_to(mem);
- // Resize if needed. Also, in case of resize - allocate in advance for future allocs.
+ /* Resize if needed. Also, in case of resize - allocate in advance for future allocs. */
const uint slot = mem.slot;
if (slot >= texture_info.size()) {
texture_info.resize(slot + 128);
@@ -390,7 +390,7 @@ void OneapiDevice::tex_alloc(device_texture &mem)
void OneapiDevice::tex_free(device_texture &mem)
{
- // There is no texture memory in SYCL.
+ /* There is no texture memory in SYCL. */
if (mem.device_pointer) {
generic_free(mem);
}
@@ -403,8 +403,8 @@ unique_ptr<DeviceQueue> OneapiDevice::gpu_queue_create()
bool OneapiDevice::should_use_graphics_interop()
{
- // NOTE(@nsirgien): oneAPI doesn't yet support direct writing into graphics API objects, so
- // return false.
+ /* NOTE(@nsirgien): oneAPI doesn't yet support direct writing into graphics API objects, so
+ * return false. */
return false;
}
diff --git a/intern/cycles/device/oneapi/device_impl.h b/intern/cycles/device/oneapi/device_impl.h
index e3bdaa959af..af5cbce4332 100644
--- a/intern/cycles/device/oneapi/device_impl.h
+++ b/intern/cycles/device/oneapi/device_impl.h
@@ -89,8 +89,8 @@ class OneapiDevice : public Device {
virtual unique_ptr<DeviceQueue> gpu_queue_create() override;
- // NOTE(@nsirgien): Create this methods to avoid some compilation problems on Windows with host
- // side compilation (MSVC)
+ /* NOTE(@nsirgien): Create this methods to avoid some compilation problems on Windows with host
+ * side compilation (MSVC). */
void *usm_aligned_alloc_host(size_t memory_size, size_t alignment);
void usm_free(void *usm_ptr);
};
diff --git a/intern/cycles/device/oneapi/queue.cpp b/intern/cycles/device/oneapi/queue.cpp
index 52a8a429208..42e2408ee7a 100644
--- a/intern/cycles/device/oneapi/queue.cpp
+++ b/intern/cycles/device/oneapi/queue.cpp
@@ -38,20 +38,20 @@ int OneapiDeviceQueue::num_concurrent_states(const size_t state_size) const
{
int num_states;
- // TODO: implement and use get_num_multiprocessors and get_max_num_threads_per_multiprocessor.
+ /* TODO: implement and use get_num_multiprocessors and get_max_num_threads_per_multiprocessor. */
const size_t compute_units = oneapi_dll_.oneapi_get_compute_units_amount(
oneapi_device_->sycl_queue());
if (compute_units >= 128) {
- // dGPU path, make sense to allocate more states, because it will be dedicated GPU memory
+ /* dGPU path, make sense to allocate more states, because it will be dedicated GPU memory. */
int base = 1024 * 1024;
- // linear dependency (with coefficient less that 1) from amount of compute units
+ /* linear dependency (with coefficient less that 1) from amount of compute units. */
num_states = (base * (compute_units / 128)) * 3 / 4;
- // Limit amount of integrator states by one quarter of device memory, because
- // other allocations will need some space as well
- // TODO: base this calculation on the how many states what the GPU is actually capable of
- // running, with some headroom to improve occupancy. If the texture don't fit, offload into
- // unified memory.
+ /* Limit amount of integrator states by one quarter of device memory, because
+ * other allocations will need some space as well
+ * TODO: base this calculation on the how many states what the GPU is actually capable of
+ * running, with some headroom to improve occupancy. If the texture don't fit, offload into
+ * unified memory. */
size_t states_memory_size = num_states * state_size;
size_t device_memory_amount =
(oneapi_dll_.oneapi_get_memcapacity)(oneapi_device_->sycl_queue());
@@ -60,8 +60,8 @@ int OneapiDeviceQueue::num_concurrent_states(const size_t state_size) const
}
}
else {
- // iGPU path - no really need to allocate a lot of integrator states, because it is shared GPU
- // memory
+ /* iGPU path - no real need to allocate a lot of integrator states because it is shared GPU
+ * memory. */
num_states = 1024 * 512;
}
diff --git a/intern/cycles/kernel/device/gpu/parallel_active_index.h b/intern/cycles/kernel/device/gpu/parallel_active_index.h
index 66851c51105..cf53bf85067 100644
--- a/intern/cycles/kernel/device/gpu/parallel_active_index.h
+++ b/intern/cycles/kernel/device/gpu/parallel_active_index.h
@@ -68,11 +68,11 @@ void gpu_parallel_active_index_array_impl(const uint num_states,
int[GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE + 1]>(item_id.get_group());
int *warp_offset = *ptr;
- // NOTE(@nsirgien): Here we calculate the same value as below but
- // faster for DPC++ : seems CUDA converting "%", "/", "*" based calculations below into
- // something faster already but DPC++ doesn't, so it's better to use
- // direct request of needed parameters - switching from this computation to computation below
- // will cause 2.5x performance slowdown.
+ /* NOTE(@nsirgien): Here we calculate the same value as below but
+ * faster for DPC++ : seems CUDA converting "%", "/", "*" based calculations below into
+ * something faster already but DPC++ doesn't, so it's better to use
+ * direct request of needed parameters - switching from this computation to computation below
+ * will cause 2.5x performance slowdown. */
const uint thread_index = item_id.get_local_id(0);
const uint thread_warp = item_id.get_sub_group().get_local_id();
@@ -123,8 +123,8 @@ __device__ void gpu_parallel_active_index_array_impl(const uint num_states,
}
#ifdef __KERNEL_ONEAPI__
- // NOTE(@nsirgien): For us here only local memory writing (warp_offset) is important,
- // so faster local barriers can be used.
+ /* NOTE(@nsirgien): For us here only local memory writing (warp_offset) is important,
+ * so faster local barriers can be used. */
ccl_gpu_local_syncthreads();
#else
ccl_gpu_syncthreads();
@@ -146,8 +146,8 @@ __device__ void gpu_parallel_active_index_array_impl(const uint num_states,
}
#ifdef __KERNEL_ONEAPI__
- // NOTE(@nsirgien): For us here only important local memory writing (warp_offset),
- // so faster local barriers can be used.
+ /* NOTE(@nsirgien): For us here only important local memory writing (warp_offset),
+ * so faster local barriers can be used. */
ccl_gpu_local_syncthreads();
#else
ccl_gpu_syncthreads();
diff --git a/intern/cycles/kernel/device/oneapi/compat.h b/intern/cycles/kernel/device/oneapi/compat.h
index ff232f1d025..30b0f088ede 100644
--- a/intern/cycles/kernel/device/oneapi/compat.h
+++ b/intern/cycles/kernel/device/oneapi/compat.h
@@ -48,7 +48,7 @@
#define kernel_assert(cond)
#define ccl_may_alias
-// clang-format off
+/* clang-format off */
/* kernel.h adapters */
#define ccl_gpu_kernel(block_num_threads, thread_num_registers)
@@ -146,7 +146,7 @@ void oneapi_kernel_##name(KernelGlobalsGPU *ccl_restrict kg, \
/* GPU texture objects */
-// clang-format on
+/* clang-format on */
/* Types */
/* It's not possible to use sycl types like sycl::float3, sycl::int3, etc
@@ -192,10 +192,10 @@ ccl_always_inline float3 make_float3(float x)
#include "util/half.h"
#include "util/types.h"
-// NOTE(@nsirgien): Declaring these functions after types headers is very important because they
-// include oneAPI headers, which transitively include math.h headers which will cause redefintions
-// of the math defines because math.h also uses them and having them defined before math.h include
-// - it actually UB
+/* NOTE(@nsirgien): Declaring these functions after types headers is very important because they
+ * include oneAPI headers, which transitively include math.h headers which will cause redefintions
+ * of the math defines because math.h also uses them and having them defined before math.h include
+ * is actually UB. */
/* Use fast math functions - get them from sycl::native namespace for native math function
* implementations */
#define cosf(x) sycl::native::cos(((float)(x)))
diff --git a/intern/cycles/kernel/device/oneapi/context_begin.h b/intern/cycles/kernel/device/oneapi/context_begin.h
index f729ffd0870..6d6f8cec4ca 100644
--- a/intern/cycles/kernel/device/oneapi/context_begin.h
+++ b/intern/cycles/kernel/device/oneapi/context_begin.h
@@ -6,8 +6,8 @@
# include <nanovdb/util/SampleFromVoxels.h>
#endif
-// clang-format off
+/* clang-format off */
struct ONEAPIKernelContext : public KernelGlobalsGPU {
public:
# include "kernel/device/oneapi/image.h"
- // clang-format on
+ /* clang-format on */
diff --git a/intern/cycles/kernel/device/oneapi/device_id.h b/intern/cycles/kernel/device/oneapi/device_id.h
index d27c553497e..b4c94ac27a2 100644
--- a/intern/cycles/kernel/device/oneapi/device_id.h
+++ b/intern/cycles/kernel/device/oneapi/device_id.h
@@ -3,8 +3,8 @@
#pragma once
-// from public source :
-// https://gitlab.freedesktop.org/mesa/mesa/-/blob/main/include/pci_ids/iris_pci_ids.h
+/* from public source :
+ * https://gitlab.freedesktop.org/mesa/mesa/-/blob/main/include/pci_ids/iris_pci_ids.h */
const static std::set<uint32_t> intel_arc_alchemist_device_ids = {
0x4f80, 0x4f81, 0x4f82, 0x4f83, 0x4f84, 0x4f87, 0x4f88, 0x5690, 0x5691,
0x5692, 0x5693, 0x5694, 0x5695, 0x5696, 0x5697, 0x56a0, 0x56a1, 0x56a2,
diff --git a/intern/cycles/kernel/device/oneapi/dll_interface_template.h b/intern/cycles/kernel/device/oneapi/dll_interface_template.h
index 63ca51b6e80..2d740b4c64a 100644
--- a/intern/cycles/kernel/device/oneapi/dll_interface_template.h
+++ b/intern/cycles/kernel/device/oneapi/dll_interface_template.h
@@ -1,4 +1,4 @@
-// device_capabilities() returns a C string that must be free'd with oneapi_free()
+/* device_capabilities() returns a C string that must be free'd with oneapi_free(). */
DLL_INTERFACE_CALL(oneapi_device_capabilities, char *)
DLL_INTERFACE_CALL(oneapi_free, void, void *)
DLL_INTERFACE_CALL(oneapi_get_memcapacity, size_t, SyclQueue *queue)
@@ -26,9 +26,9 @@ DLL_INTERFACE_CALL(oneapi_usm_memset,
DLL_INTERFACE_CALL(oneapi_run_test_kernel, bool, SyclQueue *queue)
-// Operation with Kernel globals structure - map of global/constant allocation, which filles before
-// render/kernel execution As we don't know in cycles sizeof this - Cycles will manage just as
-// pointer
+/* Operation with Kernel globals structure - map of global/constant allocation - filled before
+ * render/kernel execution As we don't know in cycles sizeof this - Cycles will manage just as
+ * pointer. */
DLL_INTERFACE_CALL(oneapi_kernel_globals_size, bool, SyclQueue *queue, size_t &kernel_global_size)
DLL_INTERFACE_CALL(oneapi_set_global_memory,
void,
diff --git a/intern/cycles/kernel/device/oneapi/kernel.cpp b/intern/cycles/kernel/device/oneapi/kernel.cpp
index 6352fd9977f..8681b00a26d 100644
--- a/intern/cycles/kernel/device/oneapi/kernel.cpp
+++ b/intern/cycles/kernel/device/oneapi/kernel.cpp
@@ -3,7 +3,7 @@
#ifdef WITH_ONEAPI
-// clang-format off
+/* clang-format off */
# include "kernel.h"
# include <iostream>
# include <map>
@@ -19,7 +19,7 @@
# include "kernel/device/oneapi/kernel_templates.h"
# include "kernel/device/gpu/kernel.h"
-// clang-format on
+/* clang-format on */
static OneAPIErrorCallback s_error_cb = nullptr;
static void *s_error_user_ptr = nullptr;
@@ -214,7 +214,7 @@ void oneapi_set_global_memory(SyclQueue *queue_,
std::string matched_name(memory_name);
-// This macros will change global ptr of KernelGlobals via name matching
+/* This macro will change global ptr of KernelGlobals via name matching. */
# define KERNEL_DATA_ARRAY(type, name) \
else if (#name == matched_name) \
{ \
@@ -238,8 +238,8 @@ void oneapi_set_global_memory(SyclQueue *queue_,
# undef KERNEL_DATA_ARRAY
}
-// TODO: Move device information to OneapiDevice initialized on creation and use it.
-// TODO: Move below function to oneapi/queue.cpp
+/* TODO: Move device information to OneapiDevice initialized on creation and use it. */
+/* TODO: Move below function to oneapi/queue.cpp. */
size_t oneapi_kernel_preferred_local_size(SyclQueue *queue_,
const DeviceKernel kernel,
const size_t kernel_global_size)
@@ -318,7 +318,7 @@ bool oneapi_enqueue_kernel(KernelContext *kernel_context,
device_kernel == DEVICE_KERNEL_INTEGRATOR_COMPACT_PATHS_ARRAY ||
device_kernel == DEVICE_KERNEL_INTEGRATOR_COMPACT_SHADOW_PATHS_ARRAY) {
int num_states = *((int *)(args[0]));
- // Round up to the next work-group
+ /* Round up to the next work-group. */
size_t groups_count = (num_states + local_size - 1) / local_size;
/* NOTE(@nsirgien): Because for now non-uniform workgroups don't work on most of
oneAPI devices,here ise xtending of work size to match uniform requirements */
@@ -706,8 +706,8 @@ static std::vector<sycl::device> oneapi_available_devices()
if (getenv("CYCLES_ONEAPI_ALL_DEVICES") != nullptr)
allow_all_devices = true;
- // Host device is useful only for debugging at the moment
- // so we hide this device with default build settings
+ /* Host device is useful only for debugging at the moment
+ * so we hide this device with default build settings. */
# ifdef WITH_ONEAPI_SYCL_HOST_ENABLED
bool allow_host = true;
# else
@@ -718,7 +718,7 @@ static std::vector<sycl::device> oneapi_available_devices()
std::vector<sycl::device> available_devices;
for (const sycl::platform &platform : oneapi_platforms) {
- // ignore OpenCL platforms to avoid using the same devices through both Level-Zero and OpenCL
+ /* ignore OpenCL platforms to avoid using the same devices through both Level-Zero and OpenCL. */
if (platform.get_backend() == sycl::backend::opencl) {
continue;
}
@@ -729,7 +729,7 @@ static std::vector<sycl::device> oneapi_available_devices()
for (const sycl::device &device : oneapi_devices) {
if (allow_all_devices) {
- // still filter out host device if build doesn't support it.
+ /* still filter out host device if build doesn't support it. */
if (allow_host || !device.is_host()) {
available_devices.push_back(device);
}
@@ -737,9 +737,9 @@ static std::vector<sycl::device> oneapi_available_devices()
else {
bool filter_out = false;
- // For now we support all Intel(R) Arc(TM) devices
- // and any future GPU with more than 128 execution units
- // official support can be broaden to older and smaller GPUs once ready
+ /* For now we support all Intel(R) Arc(TM) devices
+ * and any future GPU with more than 128 execution units
+ * official support can be broaden to older and smaller GPUs once ready. */
if (device.is_gpu() && platform.get_backend() == sycl::backend::ext_oneapi_level_zero) {
ze_device_handle_t ze_device = sycl::get_native<sycl::backend::ext_oneapi_level_zero>(
device);
@@ -752,7 +752,7 @@ static std::vector<sycl::device> oneapi_available_devices()
if (!is_dg2 || number_of_eus < 128)
filter_out = true;
- // if not already filtered out, check driver version
+ /* if not already filtered out, check driver version. */
if (!filter_out) {
int driver_build_version = parse_driver_build_version(device);
if ((driver_build_version > 100000 &&
@@ -836,8 +836,8 @@ char *oneapi_device_capabilities()
GET_NUM_ATTR(address_bits)
GET_NUM_ATTR(max_mem_alloc_size)
- // NOTE(@nsirgien): Implementation doesn't use image support as bindless images aren't
- // supported so we always return false, even if device supports HW texture usage acceleration
+ /* NOTE(@nsirgien): Implementation doesn't use image support as bindless images aren't
+ * supported so we always return false, even if device supports HW texture usage acceleration. */
bool image_support = false;
WRITE_ATTR("image_support", (size_t)image_support)
diff --git a/intern/cycles/kernel/device/oneapi/kernel.h b/intern/cycles/kernel/device/oneapi/kernel.h
index 70aca382f21..c5f853742ed 100644
--- a/intern/cycles/kernel/device/oneapi/kernel.h
+++ b/intern/cycles/kernel/device/oneapi/kernel.h
@@ -7,8 +7,8 @@
# include <stddef.h>
-// NOTE(@nsirgien): Should match underlying type in the declaration inside "kernel/types.h"
-// TODO: use kernel/types.h directly
+/* NOTE(@nsirgien): Should match underlying type in the declaration inside "kernel/types.h"
+ * TODO: use kernel/types.h directly. */
enum DeviceKernel : int;
# ifndef CYCLES_KERNEL_ONEAPI_EXPORT
@@ -33,9 +33,9 @@ typedef void (*OneAPIDeviceIteratorCallback)(const char *id,
typedef void (*OneAPIErrorCallback)(const char *error, void *user_ptr);
struct KernelContext {
- // Queue, associated with selected device
+ /* Queue, associated with selected device */
SyclQueue *queue;
- // Pointer to USM device memory with all global/constant allocation on this device
+ /* Pointer to USM device memory with all global/constant allocation on this device */
void *kernel_globals;
};