Welcome to mirror list, hosted at ThFree Co, Russian Federation.

mikk_atomic_hash_set.hh « mikktspace « intern - git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: 11d2a659966568562dde3355f0050b1abc8e4fc2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
/* SPDX-License-Identifier: Apache-2.0
 *
 * Original code:
 * Copyright (c) Meta Platforms, Inc. and affiliates.
 *
 * Modifications:
 * Copyright 2022 Blender Foundation. All rights reserved.
 */

/* Simplified version of Folly's AtomicHashArray
 * (https://github.com/facebook/folly/blob/main/folly/AtomicHashArray.h).
 *
 * Notable changes:
 * - Standalone and header-only.
 * - Behaves like a set, not like a map: There's no value type anymore, only keys.
 * - Capacity check logic have been removed, the code assumes you know the required size in
 * advance.
 * - Custom allocator support has been removed.
 * - Erase has been removed.
 * - Find has been removed.
 */

/** \file
 * \ingroup mikktspace
 */

#pragma once

#ifdef _MSC_VER
#  include <intrin.h>
#endif

#include <atomic>
#include <type_traits>

namespace mikk {

struct AtomicHashSetLinearProbeFcn {
  inline size_t operator()(size_t idx, size_t /* numProbes */, size_t capacity) const
  {
    idx += 1;  // linear probing

    // Avoid modulus because it's slow
    return LIKELY(idx < capacity) ? idx : (idx - capacity);
  }
};

struct AtomicHashSetQuadraticProbeFcn {
  inline size_t operator()(size_t idx, size_t numProbes, size_t capacity) const
  {
    idx += numProbes;  // quadratic probing

    // Avoid modulus because it's slow
    return LIKELY(idx < capacity) ? idx : (idx - capacity);
  }
};

template<class KeyT,
         bool isAtomic,
         class KeyHash = std::hash<KeyT>,
         class KeyEqual = std::equal_to<KeyT>,
         class ProbeFcn = AtomicHashSetLinearProbeFcn>
class AtomicHashSet {
  static_assert((std::is_convertible<KeyT, int32_t>::value ||
                 std::is_convertible<KeyT, int64_t>::value ||
                 std::is_convertible<KeyT, const void *>::value),
                "You are trying to use AtomicHashSet with disallowed key "
                "types.  You must use atomically compare-and-swappable integer "
                "keys, or a different container class.");

 public:
  const size_t capacity_;
  const KeyT kEmptyKey_;

  KeyHash hasher_;
  KeyEqual equalityChecker_;

 private:
  size_t kAnchorMask_;
  /* When using a single thread, we can avoid overhead by not bothering with atomic cells. */
  typedef typename std::conditional<isAtomic, std::atomic<KeyT>, KeyT>::type cell_type;
  std::vector<cell_type> cells_;

 public:
  struct Config {
    KeyT emptyKey;
    double maxLoadFactor;
    double growthFactor;
    size_t capacity;  // if positive, overrides maxLoadFactor

    //  Cannot have constexpr ctor because some compilers rightly complain.
    Config() : emptyKey((KeyT)-1), maxLoadFactor(0.8), growthFactor(-1), capacity(0)
    {
    }
  };

  /* Instead of a mess of arguments, we take a max size and a Config struct to
   * simulate named ctor parameters.  The Config struct has sensible defaults
   * for everything, but is overloaded - if you specify a positive capacity,
   * that will be used directly instead of computing it based on maxLoadFactor.
   */
  AtomicHashSet(size_t maxSize,
                KeyHash hasher = KeyHash(),
                KeyEqual equalityChecker = KeyEqual(),
                const Config &c = Config())
      : capacity_(size_t(double(maxSize) / c.maxLoadFactor) + 1),
        kEmptyKey_(c.emptyKey),
        hasher_(hasher),
        equalityChecker_(equalityChecker),
        cells_(capacity_)
  {
    /* Get next power of two. Could be done more effiently with builtin_clz, but this is not
     * performance-critical. */
    kAnchorMask_ = 1;
    while (kAnchorMask_ < capacity_)
      kAnchorMask_ *= 2;
    /* Get mask for lower bits. */
    kAnchorMask_ -= 1;

    /* Not great, but the best we can do to support both atomic and non-atomic cells
     * since std::atomic doesn't have a copy constructor so cells_(capacity_, kEmptyKey_)
     * in the initializer list won't work. */
    std::fill((KeyT *)cells_.data(), (KeyT *)cells_.data() + capacity_, kEmptyKey_);
  }

  AtomicHashSet(const AtomicHashSet &) = delete;
  AtomicHashSet &operator=(const AtomicHashSet &) = delete;

  ~AtomicHashSet() = default;

  /* Sequential specialization. */
  bool tryUpdateCell(KeyT *cell, KeyT &existingKey, KeyT newKey)
  {
    if (*cell == existingKey) {
      *cell = newKey;
      return true;
    }
    existingKey = *cell;
    return false;
  }

  /* Atomic specialization. */
  bool tryUpdateCell(std::atomic<KeyT> *cell, KeyT &existingKey, KeyT newKey)
  {
    return cell->compare_exchange_strong(existingKey, newKey, std::memory_order_acq_rel);
  }

  std::pair<KeyT, bool> emplace(KeyT key)
  {
    size_t idx = keyToAnchorIdx(key);
    size_t numProbes = 0;
    for (;;) {
      cell_type *cell = &cells_[idx];
      KeyT existingKey = kEmptyKey_;
      /* Try to replace empty cell with our key. */
      if (tryUpdateCell(cell, existingKey, key)) {
        /* Cell was empty, we're done. */
        return std::make_pair(key, true);
      }

      /* Cell was not empty, check if the existing key is equal. */
      if (equalityChecker_(existingKey, key)) {
        /* Found equal element, we're done. */
        return std::make_pair(existingKey, false);
      }

      /* Continue to next cell according to probe strategy. */
      ++numProbes;
      if (UNLIKELY(numProbes >= capacity_)) {
        // probed every cell...fail
        assert(false);
        return std::make_pair(kEmptyKey_, false);
      }

      idx = ProbeFcn()(idx, numProbes, capacity_);
    }
  }

 private:
  inline size_t keyToAnchorIdx(const KeyT k) const
  {
    const size_t hashVal = hasher_(k);
    const size_t probe = hashVal & kAnchorMask_;
    return LIKELY(probe < capacity_) ? probe : hashVal % capacity_;
  }

};  // AtomicHashSet

}  // namespace mikk