/* SPDX-License-Identifier: Apache-2.0 * Copyright 2011-2022 Blender Foundation */ #ifdef WITH_HIP # include # include # include # include # include # include "device/hip/device_impl.h" # include "util/debug.h" # include "util/foreach.h" # include "util/log.h" # include "util/map.h" # include "util/md5.h" # include "util/path.h" # include "util/string.h" # include "util/system.h" # include "util/time.h" # include "util/types.h" # include "util/windows.h" # include "kernel/device/hip/globals.h" CCL_NAMESPACE_BEGIN class HIPDevice; bool HIPDevice::have_precompiled_kernels() { string fatbins_path = path_get("lib"); return path_exists(fatbins_path); } BVHLayoutMask HIPDevice::get_bvh_layout_mask() const { return BVH_LAYOUT_BVH2; } void HIPDevice::set_error(const string &error) { Device::set_error(error); if (first_error) { fprintf(stderr, "\nRefer to the Cycles GPU rendering documentation for possible solutions:\n"); fprintf(stderr, "https://docs.blender.org/manual/en/latest/render/cycles/gpu_rendering.html\n\n"); first_error = false; } } HIPDevice::HIPDevice(const DeviceInfo &info, Stats &stats, Profiler &profiler) : Device(info, stats, profiler), texture_info(this, "texture_info", MEM_GLOBAL) { first_error = true; hipDevId = info.num; hipDevice = 0; hipContext = 0; hipModule = 0; need_texture_info = false; device_texture_headroom = 0; device_working_headroom = 0; move_texture_to_host = false; map_host_limit = 0; map_host_used = 0; can_map_host = 0; pitch_alignment = 0; /* Initialize HIP. */ hipError_t result = hipInit(0); if (result != hipSuccess) { set_error(string_printf("Failed to initialize HIP runtime (%s)", hipewErrorString(result))); return; } /* Setup device and context. */ result = hipDeviceGet(&hipDevice, hipDevId); if (result != hipSuccess) { set_error(string_printf("Failed to get HIP device handle from ordinal (%s)", hipewErrorString(result))); return; } /* hipDeviceMapHost for mapping host memory when out of device memory. * hipDeviceLmemResizeToMax for reserving local memory ahead of render, * so we can predict which memory to map to host. */ hip_assert(hipDeviceGetAttribute(&can_map_host, hipDeviceAttributeCanMapHostMemory, hipDevice)); hip_assert( hipDeviceGetAttribute(&pitch_alignment, hipDeviceAttributeTexturePitchAlignment, hipDevice)); unsigned int ctx_flags = hipDeviceLmemResizeToMax; if (can_map_host) { ctx_flags |= hipDeviceMapHost; init_host_memory(); } /* Create context. */ result = hipCtxCreate(&hipContext, ctx_flags, hipDevice); if (result != hipSuccess) { set_error(string_printf("Failed to create HIP context (%s)", hipewErrorString(result))); return; } int major, minor; hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, hipDevId); hipDeviceGetAttribute(&minor, hipDeviceAttributeComputeCapabilityMinor, hipDevId); hipDevArchitecture = major * 100 + minor * 10; /* Pop context set by hipCtxCreate. */ hipCtxPopCurrent(NULL); } HIPDevice::~HIPDevice() { texture_info.free(); hip_assert(hipCtxDestroy(hipContext)); } bool HIPDevice::support_device(const uint /*kernel_features*/) { if (hipSupportsDevice(hipDevId)) { return true; } else { /* We only support Navi and above. */ hipDeviceProp_t props; hipGetDeviceProperties(&props, hipDevId); set_error(string_printf("HIP backend requires AMD RDNA graphics card or up, but found %s.", props.name)); return false; } } bool HIPDevice::check_peer_access(Device *peer_device) { if (peer_device == this) { return false; } if (peer_device->info.type != DEVICE_HIP && peer_device->info.type != DEVICE_OPTIX) { return false; } HIPDevice *const peer_device_hip = static_cast(peer_device); int can_access = 0; hip_assert(hipDeviceCanAccessPeer(&can_access, hipDevice, peer_device_hip->hipDevice)); if (can_access == 0) { return false; } // Ensure array access over the link is possible as well (for 3D textures) hip_assert(hipDeviceGetP2PAttribute( &can_access, hipDevP2PAttrHipArrayAccessSupported, hipDevice, peer_device_hip->hipDevice)); if (can_access == 0) { return false; } // Enable peer access in both directions { const HIPContextScope scope(this); hipError_t result = hipCtxEnablePeerAccess(peer_device_hip->hipContext, 0); if (result != hipSuccess) { set_error(string_printf("Failed to enable peer access on HIP context (%s)", hipewErrorString(result))); return false; } } { const HIPContextScope scope(peer_device_hip); hipError_t result = hipCtxEnablePeerAccess(hipContext, 0); if (result != hipSuccess) { set_error(string_printf("Failed to enable peer access on HIP context (%s)", hipewErrorString(result))); return false; } } return true; } bool HIPDevice::use_adaptive_compilation() { return DebugFlags().hip.adaptive_compile; } /* Common HIPCC flags which stays the same regardless of shading model, * kernel sources md5 and only depends on compiler or compilation settings. */ string HIPDevice::compile_kernel_get_common_cflags(const uint kernel_features) { const int machine = system_cpu_bits(); const string source_path = path_get("source"); const string include_path = source_path; string cflags = string_printf( "-m%d " "--use_fast_math " "-DHIPCC " "-I\"%s\"", machine, include_path.c_str()); if (use_adaptive_compilation()) { cflags += " -D__KERNEL_FEATURES__=" + to_string(kernel_features); } return cflags; } string HIPDevice::compile_kernel(const uint kernel_features, const char *name, const char *base) { /* Compute kernel name. */ int major, minor; hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, hipDevId); hipDeviceGetAttribute(&minor, hipDeviceAttributeComputeCapabilityMinor, hipDevId); hipDeviceProp_t props; hipGetDeviceProperties(&props, hipDevId); /* gcnArchName can contain tokens after the arch name with features, ie. * `gfx1010:sramecc-:xnack-` so we tokenize it to get the first part. */ char *arch = strtok(props.gcnArchName, ":"); if (arch == NULL) { arch = props.gcnArchName; } /* Attempt to use kernel provided with Blender. */ if (!use_adaptive_compilation()) { const string fatbin = path_get(string_printf("lib/%s_%s.fatbin", name, arch)); VLOG_INFO << "Testing for pre-compiled kernel " << fatbin << "."; if (path_exists(fatbin)) { VLOG_INFO << "Using precompiled kernel."; return fatbin; } } /* Try to use locally compiled kernel. */ string source_path = path_get("source"); const string source_md5 = path_files_md5_hash(source_path); /* We include cflags into md5 so changing hip toolkit or changing other * compiler command line arguments makes sure fatbin gets re-built. */ string common_cflags = compile_kernel_get_common_cflags(kernel_features); const string kernel_md5 = util_md5_string(source_md5 + common_cflags); const char *const kernel_ext = "genco"; std::string options; # ifdef _WIN32 options.append("Wno-parentheses-equality -Wno-unused-value --hipcc-func-supp -ffast-math"); # else options.append("Wno-parentheses-equality -Wno-unused-value --hipcc-func-supp -O3 -ffast-math"); # endif # ifdef _DEBUG options.append(" -save-temps"); # endif options.append(" --amdgpu-target=").append(arch); const string include_path = source_path; const string fatbin_file = string_printf("cycles_%s_%s_%s", name, arch, kernel_md5.c_str()); const string fatbin = path_cache_get(path_join("kernels", fatbin_file)); VLOG_INFO << "Testing for locally compiled kernel " << fatbin << "."; if (path_exists(fatbin)) { VLOG_INFO << "Using locally compiled kernel."; return fatbin; } # ifdef _WIN32 if (!use_adaptive_compilation() && have_precompiled_kernels()) { if (!hipSupportsDevice(hipDevId)) { set_error( string_printf("HIP backend requires compute capability 10.1 or up, but found %d.%d. " "Your GPU is not supported.", major, minor)); } else { set_error( string_printf("HIP binary kernel for this graphics card compute " "capability (%d.%d) not found.", major, minor)); } return string(); } # endif /* Compile. */ const char *const hipcc = hipewCompilerPath(); if (hipcc == NULL) { set_error( "HIP hipcc compiler not found. " "Install HIP toolkit in default location."); return string(); } const int hipcc_hip_version = hipewCompilerVersion(); VLOG_INFO << "Found hipcc " << hipcc << ", HIP version " << hipcc_hip_version << "."; if (hipcc_hip_version < 40) { printf( "Unsupported HIP version %d.%d detected, " "you need HIP 4.0 or newer.\n", hipcc_hip_version / 10, hipcc_hip_version % 10); return string(); } double starttime = time_dt(); path_create_directories(fatbin); source_path = path_join(path_join(source_path, "kernel"), path_join("device", path_join(base, string_printf("%s.cpp", name)))); string command = string_printf("%s -%s -I %s --%s %s -o \"%s\"", hipcc, options.c_str(), include_path.c_str(), kernel_ext, source_path.c_str(), fatbin.c_str()); printf("Compiling %sHIP kernel ...\n%s\n", (use_adaptive_compilation()) ? "adaptive " : "", command.c_str()); # ifdef _WIN32 command = "call " + command; # endif if (system(command.c_str()) != 0) { set_error( "Failed to execute compilation command, " "see console for details."); return string(); } /* Verify if compilation succeeded */ if (!path_exists(fatbin)) { set_error( "HIP kernel compilation failed, " "see console for details."); return string(); } printf("Kernel compilation finished in %.2lfs.\n", time_dt() - starttime); return fatbin; } bool HIPDevice::load_kernels(const uint kernel_features) { /* TODO(sergey): Support kernels re-load for HIP devices adaptive compile. * * Currently re-loading kernels will invalidate memory pointers. */ if (hipModule) { if (use_adaptive_compilation()) { VLOG_INFO << "Skipping HIP kernel reload for adaptive compilation, not currently supported."; } return true; } /* check if hip init succeeded */ if (hipContext == 0) return false; /* check if GPU is supported */ if (!support_device(kernel_features)) { return false; } /* get kernel */ const char *kernel_name = "kernel"; string fatbin = compile_kernel(kernel_features, kernel_name); if (fatbin.empty()) return false; /* open module */ HIPContextScope scope(this); string fatbin_data; hipError_t result; if (path_read_text(fatbin, fatbin_data)) result = hipModuleLoadData(&hipModule, fatbin_data.c_str()); else result = hipErrorFileNotFound; if (result != hipSuccess) set_error(string_printf( "Failed to load HIP kernel from '%s' (%s)", fatbin.c_str(), hipewErrorString(result))); if (result == hipSuccess) { kernels.load(this); reserve_local_memory(kernel_features); } return (result == hipSuccess); } void HIPDevice::reserve_local_memory(const uint kernel_features) { /* Together with hipDeviceLmemResizeToMax, this reserves local memory * needed for kernel launches, so that we can reliably figure out when * to allocate scene data in mapped host memory. */ size_t total = 0, free_before = 0, free_after = 0; { HIPContextScope scope(this); hipMemGetInfo(&free_before, &total); } { /* Use the biggest kernel for estimation. */ const DeviceKernel test_kernel = (kernel_features & KERNEL_FEATURE_NODE_RAYTRACE) ? DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_RAYTRACE : (kernel_features & KERNEL_FEATURE_MNEE) ? DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_MNEE : DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE; /* Launch kernel, using just 1 block appears sufficient to reserve memory for all * multiprocessors. It would be good to do this in parallel for the multi GPU case * still to make it faster. */ HIPDeviceQueue queue(this); device_ptr d_path_index = 0; device_ptr d_render_buffer = 0; int d_work_size = 0; DeviceKernelArguments args(&d_path_index, &d_render_buffer, &d_work_size); queue.init_execution(); queue.enqueue(test_kernel, 1, args); queue.synchronize(); } { HIPContextScope scope(this); hipMemGetInfo(&free_after, &total); } VLOG_INFO << "Local memory reserved " << string_human_readable_number(free_before - free_after) << " bytes. (" << string_human_readable_size(free_before - free_after) << ")"; # if 0 /* For testing mapped host memory, fill up device memory. */ const size_t keep_mb = 1024; while (free_after > keep_mb * 1024 * 1024LL) { hipDeviceptr_t tmp; hip_assert(hipMalloc(&tmp, 10 * 1024 * 1024LL)); hipMemGetInfo(&free_after, &total); } # endif } void HIPDevice::init_host_memory() { /* Limit amount of host mapped memory, because allocating too much can * cause system instability. Leave at least half or 4 GB of system * memory free, whichever is smaller. */ size_t default_limit = 4 * 1024 * 1024 * 1024LL; size_t system_ram = system_physical_ram(); if (system_ram > 0) { if (system_ram / 2 > default_limit) { map_host_limit = system_ram - default_limit; } else { map_host_limit = system_ram / 2; } } else { VLOG_WARNING << "Mapped host memory disabled, failed to get system RAM"; map_host_limit = 0; } /* Amount of device memory to keep is free after texture memory * and working memory allocations respectively. We set the working * memory limit headroom lower so that some space is left after all * texture memory allocations. */ device_working_headroom = 32 * 1024 * 1024LL; // 32MB device_texture_headroom = 128 * 1024 * 1024LL; // 128MB VLOG_INFO << "Mapped host memory limit set to " << string_human_readable_number(map_host_limit) << " bytes. (" << string_human_readable_size(map_host_limit) << ")"; } void HIPDevice::load_texture_info() { if (need_texture_info) { /* Unset flag before copying, so this does not loop indefinitely if the copy below calls * into 'move_textures_to_host' (which calls 'load_texture_info' again). */ need_texture_info = false; texture_info.copy_to_device(); } } void HIPDevice::move_textures_to_host(size_t size, bool for_texture) { /* Break out of recursive call, which can happen when moving memory on a multi device. */ static bool any_device_moving_textures_to_host = false; if (any_device_moving_textures_to_host) { return; } /* Signal to reallocate textures in host memory only. */ move_texture_to_host = true; while (size > 0) { /* Find suitable memory allocation to move. */ device_memory *max_mem = NULL; size_t max_size = 0; bool max_is_image = false; thread_scoped_lock lock(hip_mem_map_mutex); foreach (HIPMemMap::value_type &pair, hip_mem_map) { device_memory &mem = *pair.first; HIPMem *cmem = &pair.second; /* Can only move textures allocated on this device (and not those from peer devices). * And need to ignore memory that is already on the host. */ if (!mem.is_resident(this) || cmem->use_mapped_host) { continue; } bool is_texture = (mem.type == MEM_TEXTURE || mem.type == MEM_GLOBAL) && (&mem != &texture_info); bool is_image = is_texture && (mem.data_height > 1); /* Can't move this type of memory. */ if (!is_texture || cmem->array) { continue; } /* For other textures, only move image textures. */ if (for_texture && !is_image) { continue; } /* Try to move largest allocation, prefer moving images. */ if (is_image > max_is_image || (is_image == max_is_image && mem.device_size > max_size)) { max_is_image = is_image; max_size = mem.device_size; max_mem = &mem; } } lock.unlock(); /* Move to host memory. This part is mutex protected since * multiple HIP devices could be moving the memory. The * first one will do it, and the rest will adopt the pointer. */ if (max_mem) { VLOG_WORK << "Move memory from device to host: " << max_mem->name; static thread_mutex move_mutex; thread_scoped_lock lock(move_mutex); any_device_moving_textures_to_host = true; /* Potentially need to call back into multi device, so pointer mapping * and peer devices are updated. This is also necessary since the device * pointer may just be a key here, so cannot be accessed and freed directly. * Unfortunately it does mean that memory is reallocated on all other * devices as well, which is potentially dangerous when still in use (since * a thread rendering on another devices would only be caught in this mutex * if it so happens to do an allocation at the same time as well. */ max_mem->device_copy_to(); size = (max_size >= size) ? 0 : size - max_size; any_device_moving_textures_to_host = false; } else { break; } } /* Unset flag before texture info is reloaded, since it should stay in device memory. */ move_texture_to_host = false; /* Update texture info array with new pointers. */ load_texture_info(); } HIPDevice::HIPMem *HIPDevice::generic_alloc(device_memory &mem, size_t pitch_padding) { HIPContextScope scope(this); hipDeviceptr_t device_pointer = 0; size_t size = mem.memory_size() + pitch_padding; hipError_t mem_alloc_result = hipErrorOutOfMemory; const char *status = ""; /* First try allocating in device memory, respecting headroom. We make * an exception for texture info. It is small and frequently accessed, * so treat it as working memory. * * If there is not enough room for working memory, we will try to move * textures to host memory, assuming the performance impact would have * been worse for working memory. */ bool is_texture = (mem.type == MEM_TEXTURE || mem.type == MEM_GLOBAL) && (&mem != &texture_info); bool is_image = is_texture && (mem.data_height > 1); size_t headroom = (is_texture) ? device_texture_headroom : device_working_headroom; size_t total = 0, free = 0; hipMemGetInfo(&free, &total); /* Move textures to host memory if needed. */ if (!move_texture_to_host && !is_image && (size + headroom) >= free && can_map_host) { move_textures_to_host(size + headroom - free, is_texture); hipMemGetInfo(&free, &total); } /* Allocate in device memory. */ if (!move_texture_to_host && (size + headroom) < free) { mem_alloc_result = hipMalloc(&device_pointer, size); if (mem_alloc_result == hipSuccess) { status = " in device memory"; } } /* Fall back to mapped host memory if needed and possible. */ void *shared_pointer = 0; if (mem_alloc_result != hipSuccess && can_map_host) { if (mem.shared_pointer) { /* Another device already allocated host memory. */ mem_alloc_result = hipSuccess; shared_pointer = mem.shared_pointer; } else if (map_host_used + size < map_host_limit) { /* Allocate host memory ourselves. */ mem_alloc_result = hipHostMalloc( &shared_pointer, size, hipHostMallocMapped | hipHostMallocWriteCombined); assert((mem_alloc_result == hipSuccess && shared_pointer != 0) || (mem_alloc_result != hipSuccess && shared_pointer == 0)); } if (mem_alloc_result == hipSuccess) { hip_assert(hipHostGetDevicePointer(&device_pointer, shared_pointer, 0)); map_host_used += size; status = " in host memory"; } } if (mem_alloc_result != hipSuccess) { status = " failed, out of device and host memory"; set_error("System is out of GPU and shared host memory"); } if (mem.name) { VLOG_WORK << "Buffer allocate: " << mem.name << ", " << string_human_readable_number(mem.memory_size()) << " bytes. (" << string_human_readable_size(mem.memory_size()) << ")" << status; } mem.device_pointer = (device_ptr)device_pointer; mem.device_size = size; stats.mem_alloc(size); if (!mem.device_pointer) { return NULL; } /* Insert into map of allocations. */ thread_scoped_lock lock(hip_mem_map_mutex); HIPMem *cmem = &hip_mem_map[&mem]; if (shared_pointer != 0) { /* Replace host pointer with our host allocation. Only works if * HIP memory layout is the same and has no pitch padding. Also * does not work if we move textures to host during a render, * since other devices might be using the memory. */ if (!move_texture_to_host && pitch_padding == 0 && mem.host_pointer && mem.host_pointer != shared_pointer) { memcpy(shared_pointer, mem.host_pointer, size); /* A Call to device_memory::host_free() should be preceded by * a call to device_memory::device_free() for host memory * allocated by a device to be handled properly. Two exceptions * are here and a call in OptiXDevice::generic_alloc(), where * the current host memory can be assumed to be allocated by * device_memory::host_alloc(), not by a device */ mem.host_free(); mem.host_pointer = shared_pointer; } mem.shared_pointer = shared_pointer; mem.shared_counter++; cmem->use_mapped_host = true; } else { cmem->use_mapped_host = false; } return cmem; } void HIPDevice::generic_copy_to(device_memory &mem) { if (!mem.host_pointer || !mem.device_pointer) { return; } /* If use_mapped_host of mem is false, the current device only uses device memory allocated by * hipMalloc regardless of mem.host_pointer and mem.shared_pointer, and should copy data from * mem.host_pointer. */ thread_scoped_lock lock(hip_mem_map_mutex); if (!hip_mem_map[&mem].use_mapped_host || mem.host_pointer != mem.shared_pointer) { const HIPContextScope scope(this); hip_assert( hipMemcpyHtoD((hipDeviceptr_t)mem.device_pointer, mem.host_pointer, mem.memory_size())); } } void HIPDevice::generic_free(device_memory &mem) { if (mem.device_pointer) { HIPContextScope scope(this); thread_scoped_lock lock(hip_mem_map_mutex); DCHECK(hip_mem_map.find(&mem) != hip_mem_map.end()); const HIPMem &cmem = hip_mem_map[&mem]; /* If cmem.use_mapped_host is true, reference counting is used * to safely free a mapped host memory. */ if (cmem.use_mapped_host) { assert(mem.shared_pointer); if (mem.shared_pointer) { assert(mem.shared_counter > 0); if (--mem.shared_counter == 0) { if (mem.host_pointer == mem.shared_pointer) { mem.host_pointer = 0; } hipHostFree(mem.shared_pointer); mem.shared_pointer = 0; } } map_host_used -= mem.device_size; } else { /* Free device memory. */ hip_assert(hipFree(mem.device_pointer)); } stats.mem_free(mem.device_size); mem.device_pointer = 0; mem.device_size = 0; hip_mem_map.erase(hip_mem_map.find(&mem)); } } void HIPDevice::mem_alloc(device_memory &mem) { if (mem.type == MEM_TEXTURE) { assert(!"mem_alloc not supported for textures."); } else if (mem.type == MEM_GLOBAL) { assert(!"mem_alloc not supported for global memory."); } else { generic_alloc(mem); } } void HIPDevice::mem_copy_to(device_memory &mem) { if (mem.type == MEM_GLOBAL) { global_free(mem); global_alloc(mem); } else if (mem.type == MEM_TEXTURE) { tex_free((device_texture &)mem); tex_alloc((device_texture &)mem); } else { if (!mem.device_pointer) { generic_alloc(mem); } generic_copy_to(mem); } } void HIPDevice::mem_copy_from(device_memory &mem, size_t y, size_t w, size_t h, size_t elem) { if (mem.type == MEM_TEXTURE || mem.type == MEM_GLOBAL) { assert(!"mem_copy_from not supported for textures."); } else if (mem.host_pointer) { const size_t size = elem * w * h; const size_t offset = elem * y * w; if (mem.device_pointer) { const HIPContextScope scope(this); hip_assert(hipMemcpyDtoH( (char *)mem.host_pointer + offset, (hipDeviceptr_t)mem.device_pointer + offset, size)); } else { memset((char *)mem.host_pointer + offset, 0, size); } } } void HIPDevice::mem_zero(device_memory &mem) { if (!mem.device_pointer) { mem_alloc(mem); } if (!mem.device_pointer) { return; } /* If use_mapped_host of mem is false, mem.device_pointer currently refers to device memory * regardless of mem.host_pointer and mem.shared_pointer. */ thread_scoped_lock lock(hip_mem_map_mutex); if (!hip_mem_map[&mem].use_mapped_host || mem.host_pointer != mem.shared_pointer) { const HIPContextScope scope(this); hip_assert(hipMemsetD8((hipDeviceptr_t)mem.device_pointer, 0, mem.memory_size())); } else if (mem.host_pointer) { memset(mem.host_pointer, 0, mem.memory_size()); } } void HIPDevice::mem_free(device_memory &mem) { if (mem.type == MEM_GLOBAL) { global_free(mem); } else if (mem.type == MEM_TEXTURE) { tex_free((device_texture &)mem); } else { generic_free(mem); } } device_ptr HIPDevice::mem_alloc_sub_ptr(device_memory &mem, size_t offset, size_t /*size*/) { return (device_ptr)(((char *)mem.device_pointer) + mem.memory_elements_size(offset)); } void HIPDevice::const_copy_to(const char *name, void *host, size_t size) { HIPContextScope scope(this); hipDeviceptr_t mem; size_t bytes; hip_assert(hipModuleGetGlobal(&mem, &bytes, hipModule, "kernel_params")); assert(bytes == sizeof(KernelParamsHIP)); /* Update data storage pointers in launch parameters. */ # define KERNEL_DATA_ARRAY(data_type, data_name) \ if (strcmp(name, #data_name) == 0) { \ hip_assert(hipMemcpyHtoD(mem + offsetof(KernelParamsHIP, data_name), host, size)); \ return; \ } KERNEL_DATA_ARRAY(KernelData, data) KERNEL_DATA_ARRAY(IntegratorStateGPU, integrator_state) # include "kernel/data_arrays.h" # undef KERNEL_DATA_ARRAY } void HIPDevice::global_alloc(device_memory &mem) { if (mem.is_resident(this)) { generic_alloc(mem); generic_copy_to(mem); } const_copy_to(mem.name, &mem.device_pointer, sizeof(mem.device_pointer)); } void HIPDevice::global_free(device_memory &mem) { if (mem.is_resident(this) && mem.device_pointer) { generic_free(mem); } } void HIPDevice::tex_alloc(device_texture &mem) { HIPContextScope scope(this); size_t dsize = datatype_size(mem.data_type); size_t size = mem.memory_size(); hipTextureAddressMode address_mode = hipAddressModeWrap; switch (mem.info.extension) { case EXTENSION_REPEAT: address_mode = hipAddressModeWrap; break; case EXTENSION_EXTEND: address_mode = hipAddressModeClamp; break; case EXTENSION_CLIP: /* TODO(@arya): setting this to Mode Clamp instead of Mode Border * because it's unsupported in HIP. */ address_mode = hipAddressModeClamp; break; default: assert(0); break; } hipTextureFilterMode filter_mode; if (mem.info.interpolation == INTERPOLATION_CLOSEST) { filter_mode = hipFilterModePoint; } else { filter_mode = hipFilterModeLinear; } /* Image Texture Storage */ hipArray_Format format; switch (mem.data_type) { case TYPE_UCHAR: format = HIP_AD_FORMAT_UNSIGNED_INT8; break; case TYPE_UINT16: format = HIP_AD_FORMAT_UNSIGNED_INT16; break; case TYPE_UINT: format = HIP_AD_FORMAT_UNSIGNED_INT32; break; case TYPE_INT: format = HIP_AD_FORMAT_SIGNED_INT32; break; case TYPE_FLOAT: format = HIP_AD_FORMAT_FLOAT; break; case TYPE_HALF: format = HIP_AD_FORMAT_HALF; break; default: assert(0); return; } HIPMem *cmem = NULL; hArray array_3d = NULL; size_t src_pitch = mem.data_width * dsize * mem.data_elements; size_t dst_pitch = src_pitch; if (!mem.is_resident(this)) { thread_scoped_lock lock(hip_mem_map_mutex); cmem = &hip_mem_map[&mem]; cmem->texobject = 0; if (mem.data_depth > 1) { array_3d = (hArray)mem.device_pointer; cmem->array = array_3d; } else if (mem.data_height > 0) { dst_pitch = align_up(src_pitch, pitch_alignment); } } else if (mem.data_depth > 1) { /* 3D texture using array, there is no API for linear memory. */ HIP_ARRAY3D_DESCRIPTOR desc; desc.Width = mem.data_width; desc.Height = mem.data_height; desc.Depth = mem.data_depth; desc.Format = format; desc.NumChannels = mem.data_elements; desc.Flags = 0; VLOG_WORK << "Array 3D allocate: " << mem.name << ", " << string_human_readable_number(mem.memory_size()) << " bytes. (" << string_human_readable_size(mem.memory_size()) << ")"; hip_assert(hipArray3DCreate((hArray *)&array_3d, &desc)); if (!array_3d) { return; } HIP_MEMCPY3D param; memset(¶m, 0, sizeof(HIP_MEMCPY3D)); param.dstMemoryType = hipMemoryTypeArray; param.dstArray = array_3d; param.srcMemoryType = hipMemoryTypeHost; param.srcHost = mem.host_pointer; param.srcPitch = src_pitch; param.WidthInBytes = param.srcPitch; param.Height = mem.data_height; param.Depth = mem.data_depth; hip_assert(hipDrvMemcpy3D(¶m)); mem.device_pointer = (device_ptr)array_3d; mem.device_size = size; stats.mem_alloc(size); thread_scoped_lock lock(hip_mem_map_mutex); cmem = &hip_mem_map[&mem]; cmem->texobject = 0; cmem->array = array_3d; } else if (mem.data_height > 0) { /* 2D texture, using pitch aligned linear memory. */ dst_pitch = align_up(src_pitch, pitch_alignment); size_t dst_size = dst_pitch * mem.data_height; cmem = generic_alloc(mem, dst_size - mem.memory_size()); if (!cmem) { return; } hip_Memcpy2D param; memset(¶m, 0, sizeof(param)); param.dstMemoryType = hipMemoryTypeDevice; param.dstDevice = mem.device_pointer; param.dstPitch = dst_pitch; param.srcMemoryType = hipMemoryTypeHost; param.srcHost = mem.host_pointer; param.srcPitch = src_pitch; param.WidthInBytes = param.srcPitch; param.Height = mem.data_height; hip_assert(hipDrvMemcpy2DUnaligned(¶m)); } else { /* 1D texture, using linear memory. */ cmem = generic_alloc(mem); if (!cmem) { return; } hip_assert(hipMemcpyHtoD(mem.device_pointer, mem.host_pointer, size)); } /* Resize once */ const uint slot = mem.slot; if (slot >= texture_info.size()) { /* Allocate some slots in advance, to reduce amount * of re-allocations. */ texture_info.resize(slot + 128); } /* Set Mapping and tag that we need to (re-)upload to device */ texture_info[slot] = mem.info; need_texture_info = true; if (mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT && mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT3 && mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FPN && mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FP16) { /* Bindless textures. */ hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); if (array_3d) { resDesc.resType = hipResourceTypeArray; resDesc.res.array.h_Array = array_3d; resDesc.flags = 0; } else if (mem.data_height > 0) { resDesc.resType = hipResourceTypePitch2D; resDesc.res.pitch2D.devPtr = mem.device_pointer; resDesc.res.pitch2D.format = format; resDesc.res.pitch2D.numChannels = mem.data_elements; resDesc.res.pitch2D.height = mem.data_height; resDesc.res.pitch2D.width = mem.data_width; resDesc.res.pitch2D.pitchInBytes = dst_pitch; } else { resDesc.resType = hipResourceTypeLinear; resDesc.res.linear.devPtr = mem.device_pointer; resDesc.res.linear.format = format; resDesc.res.linear.numChannels = mem.data_elements; resDesc.res.linear.sizeInBytes = mem.device_size; } hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = address_mode; texDesc.addressMode[1] = address_mode; texDesc.addressMode[2] = address_mode; texDesc.filterMode = filter_mode; texDesc.flags = HIP_TRSF_NORMALIZED_COORDINATES; thread_scoped_lock lock(hip_mem_map_mutex); cmem = &hip_mem_map[&mem]; hip_assert(hipTexObjectCreate(&cmem->texobject, &resDesc, &texDesc, NULL)); texture_info[slot].data = (uint64_t)cmem->texobject; } else { texture_info[slot].data = (uint64_t)mem.device_pointer; } } void HIPDevice::tex_free(device_texture &mem) { if (mem.device_pointer) { HIPContextScope scope(this); thread_scoped_lock lock(hip_mem_map_mutex); DCHECK(hip_mem_map.find(&mem) != hip_mem_map.end()); const HIPMem &cmem = hip_mem_map[&mem]; if (cmem.texobject) { /* Free bindless texture. */ hipTexObjectDestroy(cmem.texobject); } if (!mem.is_resident(this)) { /* Do not free memory here, since it was allocated on a different device. */ hip_mem_map.erase(hip_mem_map.find(&mem)); } else if (cmem.array) { /* Free array. */ hipArrayDestroy(cmem.array); stats.mem_free(mem.device_size); mem.device_pointer = 0; mem.device_size = 0; hip_mem_map.erase(hip_mem_map.find(&mem)); } else { lock.unlock(); generic_free(mem); } } } unique_ptr HIPDevice::gpu_queue_create() { return make_unique(this); } bool HIPDevice::should_use_graphics_interop() { /* Check whether this device is part of OpenGL context. * * Using HIP device for graphics interoperability which is not part of the OpenGL context is * possible, but from the empiric measurements it can be considerably slower than using naive * pixels copy. */ /* Disable graphics interop for now, because of driver bug in 21.40. See T92972 */ # if 0 HIPContextScope scope(this); int num_all_devices = 0; hip_assert(hipGetDeviceCount(&num_all_devices)); if (num_all_devices == 0) { return false; } vector gl_devices(num_all_devices); uint num_gl_devices = 0; hipGLGetDevices(&num_gl_devices, gl_devices.data(), num_all_devices, hipGLDeviceListAll); for (hipDevice_t gl_device : gl_devices) { if (gl_device == hipDevice) { return true; } } # endif return false; } int HIPDevice::get_num_multiprocessors() { return get_device_default_attribute(hipDeviceAttributeMultiprocessorCount, 0); } int HIPDevice::get_max_num_threads_per_multiprocessor() { return get_device_default_attribute(hipDeviceAttributeMaxThreadsPerMultiProcessor, 0); } bool HIPDevice::get_device_attribute(hipDeviceAttribute_t attribute, int *value) { HIPContextScope scope(this); return hipDeviceGetAttribute(value, attribute, hipDevice) == hipSuccess; } int HIPDevice::get_device_default_attribute(hipDeviceAttribute_t attribute, int default_value) { int value = 0; if (!get_device_attribute(attribute, &value)) { return default_value; } return value; } CCL_NAMESPACE_END #endif