Welcome to mirror list, hosted at ThFree Co, Russian Federation.

parallel_active_index.h « gpu « device « kernel « cycles « intern - git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: c1df49c4f49a7fb7fdbd3436ea04ed8a36fea671 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
/* SPDX-License-Identifier: Apache-2.0
 * Copyright 2021-2022 Blender Foundation */

#pragma once

CCL_NAMESPACE_BEGIN

/* Given an array of states, build an array of indices for which the states
 * are active.
 *
 * Shared memory requirement is `sizeof(int) * (number_of_warps + 1)`. */

#include "util/atomic.h"

#ifdef __HIP__
#  define GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE 1024
#else
#  define GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE 512
#endif

/* TODO: abstract more device differences, define ccl_gpu_local_syncthreads,
 * ccl_gpu_thread_warp, ccl_gpu_warp_index, ccl_gpu_num_warps for all devices
 * and keep device specific code in compat.h */

#ifdef __KERNEL_ONEAPI__
#  ifdef WITH_ONEAPI_SYCL_HOST_ENABLED
template<typename IsActiveOp>
void cpu_serial_active_index_array_impl(const uint num_states,
                                        ccl_global int *ccl_restrict indices,
                                        ccl_global int *ccl_restrict num_indices,
                                        IsActiveOp is_active_op)
{
  int write_index = 0;
  for (int state_index = 0; state_index < num_states; state_index++) {
    if (is_active_op(state_index))
      indices[write_index++] = state_index;
  }
  *num_indices = write_index;
  return;
}
#  endif /* WITH_ONEAPI_SYCL_HOST_ENABLED */

template<typename IsActiveOp>
void gpu_parallel_active_index_array_impl(const uint num_states,
                                          ccl_global int *ccl_restrict indices,
                                          ccl_global int *ccl_restrict num_indices,
                                          IsActiveOp is_active_op)
{
  const sycl::nd_item<1> &item_id = sycl::ext::oneapi::experimental::this_nd_item<1>();
  const uint blocksize = item_id.get_local_range(0);

  sycl::multi_ptr<int[GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE + 1],
                  sycl::access::address_space::local_space>
      ptr = sycl::ext::oneapi::group_local_memory<
          int[GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE + 1]>(item_id.get_group());
  int *warp_offset = *ptr;

  /* NOTE(@nsirgien): Here we calculate the same value as below but
   * faster for DPC++ : seems CUDA converting "%", "/", "*" based calculations below into
   * something faster already but DPC++ doesn't, so it's better to use
   * direct request of needed parameters - switching from this computation to computation below
   * will cause 2.5x performance slowdown. */
  const uint thread_index = item_id.get_local_id(0);
  const uint thread_warp = item_id.get_sub_group().get_local_id();

  const uint warp_index = item_id.get_sub_group().get_group_id();
  const uint num_warps = item_id.get_sub_group().get_group_range()[0];

  const uint state_index = item_id.get_global_id(0);

  /* Test if state corresponding to this thread is active. */
  const uint is_active = (state_index < num_states) ? is_active_op(state_index) : 0;
#else /* !__KERNEL__ONEAPI__ */
#  ifndef __KERNEL_METAL__
template<uint blocksize, typename IsActiveOp>
__device__
#  endif
    void
    gpu_parallel_active_index_array_impl(const uint num_states,
                                         ccl_global int *indices,
                                         ccl_global int *num_indices,
#  ifdef __KERNEL_METAL__
                                         const uint is_active,
                                         const uint blocksize,
                                         const int thread_index,
                                         const uint state_index,
                                         const int ccl_gpu_warp_size,
                                         const int thread_warp,
                                         const int warp_index,
                                         const int num_warps,
                                         threadgroup int *warp_offset)
{
#  else
                                          IsActiveOp is_active_op)
{
  extern ccl_gpu_shared int warp_offset[];

  const uint thread_index = ccl_gpu_thread_idx_x;
  const uint thread_warp = thread_index % ccl_gpu_warp_size;

  const uint warp_index = thread_index / ccl_gpu_warp_size;
  const uint num_warps = blocksize / ccl_gpu_warp_size;

  const uint state_index = ccl_gpu_block_idx_x * blocksize + thread_index;

  /* Test if state corresponding to this thread is active. */
  const uint is_active = (state_index < num_states) ? is_active_op(state_index) : 0;
#  endif
#endif /* !__KERNEL_ONEAPI__ */
  /* For each thread within a warp compute how many other active states precede it. */
#ifdef __KERNEL_ONEAPI__
  const uint thread_offset = sycl::exclusive_scan_over_group(
      item_id.get_sub_group(), is_active, std::plus<>());
#else
  const uint thread_offset = popcount(ccl_gpu_ballot(is_active) &
                                      ccl_gpu_thread_mask(thread_warp));
#endif

  /* Last thread in warp stores number of active states for each warp. */
#ifdef __KERNEL_ONEAPI__
  if (thread_warp == item_id.get_sub_group().get_local_range()[0] - 1) {
#else
  if (thread_warp == ccl_gpu_warp_size - 1) {
#endif
    warp_offset[warp_index] = thread_offset + is_active;
  }

#ifdef __KERNEL_ONEAPI__
  /* NOTE(@nsirgien): For us here only local memory writing (warp_offset) is important,
   * so faster local barriers can be used. */
  ccl_gpu_local_syncthreads();
#else
  ccl_gpu_syncthreads();
#endif

  /* Last thread in block converts per-warp sizes to offsets, increments global size of
   * index array and gets offset to write to. */
  if (thread_index == blocksize - 1) {
    /* TODO: parallelize this. */
    int offset = 0;
    for (int i = 0; i < num_warps; i++) {
      int num_active = warp_offset[i];
      warp_offset[i] = offset;
      offset += num_active;
    }

    const uint block_num_active = warp_offset[warp_index] + thread_offset + is_active;
    warp_offset[num_warps] = atomic_fetch_and_add_uint32(num_indices, block_num_active);
  }

#ifdef __KERNEL_ONEAPI__
  /* NOTE(@nsirgien): For us here only important local memory writing (warp_offset),
   * so faster local barriers can be used. */
  ccl_gpu_local_syncthreads();
#else
  ccl_gpu_syncthreads();
#endif

  /* Write to index array. */
  if (is_active) {
    const uint block_offset = warp_offset[num_warps];
    indices[block_offset + warp_offset[warp_index] + thread_offset] = state_index;
  }
}

#ifdef __KERNEL_METAL__

#  define gpu_parallel_active_index_array(dummy, num_states, indices, num_indices, is_active_op) \
    const uint is_active = (ccl_gpu_global_id_x() < num_states) ? \
                               is_active_op(ccl_gpu_global_id_x()) : \
                               0; \
    gpu_parallel_active_index_array_impl(num_states, \
                                         indices, \
                                         num_indices, \
                                         is_active, \
                                         metal_local_size, \
                                         metal_local_id, \
                                         metal_global_id, \
                                         simdgroup_size, \
                                         simd_lane_index, \
                                         simd_group_index, \
                                         num_simd_groups, \
                                         simdgroup_offset)
#elif defined(__KERNEL_ONEAPI__)
#  ifdef WITH_ONEAPI_SYCL_HOST_ENABLED
#    define gpu_parallel_active_index_array( \
        blocksize, num_states, indices, num_indices, is_active_op) \
      if (ccl_gpu_global_size_x() == 1) \
        cpu_serial_active_index_array_impl(num_states, indices, num_indices, is_active_op); \
      else \
        gpu_parallel_active_index_array_impl(num_states, indices, num_indices, is_active_op);
#  else
#    define gpu_parallel_active_index_array( \
        blocksize, num_states, indices, num_indices, is_active_op) \
      gpu_parallel_active_index_array_impl(num_states, indices, num_indices, is_active_op)
#  endif
#else

#  define gpu_parallel_active_index_array( \
      blocksize, num_states, indices, num_indices, is_active_op) \
    gpu_parallel_active_index_array_impl<blocksize>(num_states, indices, num_indices, is_active_op)

#endif

CCL_NAMESPACE_END