Welcome to mirror list, hosted at ThFree Co, Russian Federation.

state_flow.h « integrator « kernel « cycles « intern - git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: fed74d494341a1d6e6f20a783c465de17936ef6c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
/* SPDX-License-Identifier: Apache-2.0
 * Copyright 2011-2022 Blender Foundation */

#pragma once

#include "kernel/types.h"
#include "util/atomic.h"

CCL_NAMESPACE_BEGIN

/* Control Flow
 *
 * Utilities for control flow between kernels. The implementation may differ per device
 * or even be handled on the host side. To abstract such differences, experiment with
 * different implementations and for debugging, this is abstracted using macros.
 *
 * There is a main path for regular path tracing camera for path tracing. Shadows for next
 * event estimation branch off from this into their own path, that may be computed in
 * parallel while the main path continues.
 *
 * Each kernel on the main path must call one of these functions. These may not be called
 * multiple times from the same kernel.
 *
 * INTEGRATOR_PATH_INIT(next_kernel)
 * INTEGRATOR_PATH_NEXT(current_kernel, next_kernel)
 * INTEGRATOR_PATH_TERMINATE(current_kernel)
 *
 * For the shadow path similar functions are used, and again each shadow kernel must call
 * one of them, and only once.
 */

#define INTEGRATOR_PATH_IS_TERMINATED (INTEGRATOR_STATE(state, path, queued_kernel) == 0)
#define INTEGRATOR_SHADOW_PATH_IS_TERMINATED \
  (INTEGRATOR_STATE(state, shadow_path, queued_kernel) == 0)

#ifdef __KERNEL_GPU__

#  define INTEGRATOR_PATH_INIT(next_kernel) \
    atomic_fetch_and_add_uint32(&kernel_integrator_state.queue_counter->num_queued[next_kernel], \
                                1); \
    INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = next_kernel;
#  define INTEGRATOR_PATH_NEXT(current_kernel, next_kernel) \
    atomic_fetch_and_sub_uint32( \
        &kernel_integrator_state.queue_counter->num_queued[current_kernel], 1); \
    atomic_fetch_and_add_uint32(&kernel_integrator_state.queue_counter->num_queued[next_kernel], \
                                1); \
    INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = next_kernel;
#  define INTEGRATOR_PATH_TERMINATE(current_kernel) \
    atomic_fetch_and_sub_uint32( \
        &kernel_integrator_state.queue_counter->num_queued[current_kernel], 1); \
    INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = 0;

#  define INTEGRATOR_SHADOW_PATH_INIT(shadow_state, state, next_kernel, shadow_type) \
    IntegratorShadowState shadow_state = atomic_fetch_and_add_uint32( \
        &kernel_integrator_state.next_shadow_path_index[0], 1); \
    atomic_fetch_and_add_uint32(&kernel_integrator_state.queue_counter->num_queued[next_kernel], \
                                1); \
    INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, queued_kernel) = next_kernel;
#  define INTEGRATOR_SHADOW_PATH_NEXT(current_kernel, next_kernel) \
    atomic_fetch_and_sub_uint32( \
        &kernel_integrator_state.queue_counter->num_queued[current_kernel], 1); \
    atomic_fetch_and_add_uint32(&kernel_integrator_state.queue_counter->num_queued[next_kernel], \
                                1); \
    INTEGRATOR_STATE_WRITE(state, shadow_path, queued_kernel) = next_kernel;
#  define INTEGRATOR_SHADOW_PATH_TERMINATE(current_kernel) \
    atomic_fetch_and_sub_uint32( \
        &kernel_integrator_state.queue_counter->num_queued[current_kernel], 1); \
    INTEGRATOR_STATE_WRITE(state, shadow_path, queued_kernel) = 0;

#  define INTEGRATOR_PATH_INIT_SORTED(next_kernel, key) \
    { \
      const int key_ = key; \
      atomic_fetch_and_add_uint32( \
          &kernel_integrator_state.queue_counter->num_queued[next_kernel], 1); \
      INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = next_kernel; \
      INTEGRATOR_STATE_WRITE(state, path, shader_sort_key) = key_; \
      atomic_fetch_and_add_uint32(&kernel_integrator_state.sort_key_counter[next_kernel][key_], \
                                  1); \
    }
#  define INTEGRATOR_PATH_NEXT_SORTED(current_kernel, next_kernel, key) \
    { \
      const int key_ = key; \
      atomic_fetch_and_sub_uint32( \
          &kernel_integrator_state.queue_counter->num_queued[current_kernel], 1); \
      atomic_fetch_and_add_uint32( \
          &kernel_integrator_state.queue_counter->num_queued[next_kernel], 1); \
      INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = next_kernel; \
      INTEGRATOR_STATE_WRITE(state, path, shader_sort_key) = key_; \
      atomic_fetch_and_add_uint32(&kernel_integrator_state.sort_key_counter[next_kernel][key_], \
                                  1); \
    }

#else

#  define INTEGRATOR_PATH_INIT(next_kernel) \
    INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = next_kernel;
#  define INTEGRATOR_PATH_INIT_SORTED(next_kernel, key) \
    { \
      INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = next_kernel; \
      (void)key; \
    }
#  define INTEGRATOR_PATH_NEXT(current_kernel, next_kernel) \
    { \
      INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = next_kernel; \
      (void)current_kernel; \
    }
#  define INTEGRATOR_PATH_TERMINATE(current_kernel) \
    { \
      INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = 0; \
      (void)current_kernel; \
    }
#  define INTEGRATOR_PATH_NEXT_SORTED(current_kernel, next_kernel, key) \
    { \
      INTEGRATOR_STATE_WRITE(state, path, queued_kernel) = next_kernel; \
      (void)key; \
      (void)current_kernel; \
    }

#  define INTEGRATOR_SHADOW_PATH_INIT(shadow_state, state, next_kernel, shadow_type) \
    IntegratorShadowState shadow_state = &state->shadow_type; \
    INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, queued_kernel) = next_kernel;
#  define INTEGRATOR_SHADOW_PATH_NEXT(current_kernel, next_kernel) \
    { \
      INTEGRATOR_STATE_WRITE(state, shadow_path, queued_kernel) = next_kernel; \
      (void)current_kernel; \
    }
#  define INTEGRATOR_SHADOW_PATH_TERMINATE(current_kernel) \
    { \
      INTEGRATOR_STATE_WRITE(state, shadow_path, queued_kernel) = 0; \
      (void)current_kernel; \
    }

#endif

CCL_NAMESPACE_END