Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrecht Van Lommel <brecht@blender.org>2022-06-17 18:16:37 +0300
committerBrecht Van Lommel <brecht@blender.org>2022-06-20 13:30:48 +0300
commitff1883307f12a8b734bfcf87b01743dc73afae75 (patch)
tree95fbecc1e681e89f6a5d030cb5f5f96879dc7fa7 /intern/cycles/device/cuda/device_impl.cpp
parentb73a52302edcd99f086fc26fc62a8ed4db29d562 (diff)
Cleanup: renaming and consistency for kernel data
* Rename "texture" to "data array". This has not used textures for a long time, there are just global memory arrays now. (On old CUDA GPUs there was a cache for textures but not global memory, so we used to put all data in textures.) * For CUDA and HIP, put globals in KernelParams struct like other devices. * Drop __ prefix for data array names, no possibility for naming conflict now that these are in a struct.
Diffstat (limited to 'intern/cycles/device/cuda/device_impl.cpp')
-rw-r--r--intern/cycles/device/cuda/device_impl.cpp21
1 files changed, 16 insertions, 5 deletions
diff --git a/intern/cycles/device/cuda/device_impl.cpp b/intern/cycles/device/cuda/device_impl.cpp
index e75224abe90..00851a8e91c 100644
--- a/intern/cycles/device/cuda/device_impl.cpp
+++ b/intern/cycles/device/cuda/device_impl.cpp
@@ -23,6 +23,8 @@
# include "util/types.h"
# include "util/windows.h"
+# include "kernel/device/cuda/globals.h"
+
CCL_NAMESPACE_BEGIN
class CUDADevice;
@@ -51,7 +53,7 @@ void CUDADevice::set_error(const string &error)
}
CUDADevice::CUDADevice(const DeviceInfo &info, Stats &stats, Profiler &profiler)
- : Device(info, stats, profiler), texture_info(this, "__texture_info", MEM_GLOBAL)
+ : Device(info, stats, profiler), texture_info(this, "texture_info", MEM_GLOBAL)
{
first_error = true;
@@ -900,9 +902,19 @@ void CUDADevice::const_copy_to(const char *name, void *host, size_t size)
CUdeviceptr mem;
size_t bytes;
- cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, name));
- // assert(bytes == size);
- cuda_assert(cuMemcpyHtoD(mem, host, size));
+ cuda_assert(cuModuleGetGlobal(&mem, &bytes, cuModule, "kernel_params"));
+ assert(bytes == sizeof(KernelParamsCUDA));
+
+ /* Update data storage pointers in launch parameters. */
+# define KERNEL_DATA_ARRAY(data_type, data_name) \
+ if (strcmp(name, #data_name) == 0) { \
+ cuda_assert(cuMemcpyHtoD(mem + offsetof(KernelParamsCUDA, data_name), host, size)); \
+ return; \
+ }
+ KERNEL_DATA_ARRAY(KernelData, data)
+ KERNEL_DATA_ARRAY(IntegratorStateGPU, integrator_state)
+# include "kernel/data_arrays.h"
+# undef KERNEL_DATA_ARRAY
}
void CUDADevice::global_alloc(device_memory &mem)
@@ -926,7 +938,6 @@ void CUDADevice::tex_alloc(device_texture &mem)
{
CUDAContextScope scope(this);
- string bind_name = mem.name;
size_t dsize = datatype_size(mem.data_type);
size_t size = mem.memory_size();