diff options
author | Nikita Sirgienko <nikita.sirgienko@intel.com> | 2022-07-06 18:26:23 +0300 |
---|---|---|
committer | Nikita Sirgienko <nikita.sirgienko@intel.com> | 2022-07-06 18:26:23 +0300 |
commit | 0df574b55ee9cf1b6c22a3a6a6cc0ef3a5c1fe83 (patch) | |
tree | bbc8c5cf42ce71dd7d1dfd34cd4add04914c59f9 /intern/cycles/kernel/device | |
parent | 6636edbb00942a1a04bdf6f3cb843a1636ffa8b4 (diff) |
Cycles: Improve an occupancy for Intel GPUs
Initially oneAPI implementation have waited after each memory
operation, even if there was no need for this. Now, the implementation
will wait only if it is really necessary - it have improved
performance noticeble for some scenes and a bit for the rest of them.
Diffstat (limited to 'intern/cycles/kernel/device')
-rw-r--r-- | intern/cycles/kernel/device/oneapi/kernel.cpp | 32 |
1 files changed, 30 insertions, 2 deletions
diff --git a/intern/cycles/kernel/device/oneapi/kernel.cpp b/intern/cycles/kernel/device/oneapi/kernel.cpp index 11a551e822e..ec979db2455 100644 --- a/intern/cycles/kernel/device/oneapi/kernel.cpp +++ b/intern/cycles/kernel/device/oneapi/kernel.cpp @@ -103,8 +103,13 @@ bool oneapi_usm_memcpy(SyclQueue *queue_, void *dest, void *src, size_t num_byte sycl::queue *queue = reinterpret_cast<sycl::queue *>(queue_); oneapi_check_usm(queue_, dest, true); oneapi_check_usm(queue_, src, true); + sycl::event mem_event = queue->memcpy(dest, src, num_bytes); +#ifdef WITH_CYCLES_DEBUG try { - sycl::event mem_event = queue->memcpy(dest, src, num_bytes); + /* NOTE(@nsirgien) Waiting on memory operation may give more preciese error + * messages in case of the problems, but due to impact on occupancy + * make sense enable it only during cycles debugging + */ mem_event.wait_and_throw(); return true; } @@ -114,6 +119,20 @@ bool oneapi_usm_memcpy(SyclQueue *queue_, void *dest, void *src, size_t num_byte } return false; } +#else + sycl::usm::alloc dest_type = get_pointer_type(dest, queue->get_context()); + sycl::usm::alloc src_type = get_pointer_type(src, queue->get_context()); + bool from_device_to_host + = dest_type == sycl::usm::alloc::host && src_type == sycl::usm::alloc::device; + bool host_or_device_memop_with_offset + = dest_type == sycl::usm::alloc::unknown || src_type == sycl::usm::alloc::unknown; + /* NOTE(@sirgienko) Host-side blocking wait on this operations is mandatory, host + * may don't wait until end of transfer before using the memory. + */ + if(from_device_to_host || host_or_device_memop_with_offset) + mem_event.wait(); + return true; +#endif } bool oneapi_usm_memset(SyclQueue *queue_, void *usm_ptr, unsigned char value, size_t num_bytes) @@ -121,8 +140,13 @@ bool oneapi_usm_memset(SyclQueue *queue_, void *usm_ptr, unsigned char value, si assert(queue_); sycl::queue *queue = reinterpret_cast<sycl::queue *>(queue_); oneapi_check_usm(queue_, usm_ptr, true); + sycl::event mem_event = queue->memset(usm_ptr, value, num_bytes); +#ifdef WITH_CYCLES_DEBUG try { - sycl::event mem_event = queue->memset(usm_ptr, value, num_bytes); + /* NOTE(@nsirgien) Waiting on memory operation may give more preciese error + * messages in case of the problems, but due to impact on occupancy + * make sense enable it only during cycles debugging + */ mem_event.wait_and_throw(); return true; } @@ -132,6 +156,10 @@ bool oneapi_usm_memset(SyclQueue *queue_, void *usm_ptr, unsigned char value, si } return false; } +#else + (void)mem_event; + return true; +#endif } bool oneapi_queue_synchronize(SyclQueue *queue_) |