Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/blenlib/intern/smallhash.c')
-rw-r--r--source/blender/blenlib/intern/smallhash.c266
1 files changed, 238 insertions, 28 deletions
diff --git a/source/blender/blenlib/intern/smallhash.c b/source/blender/blenlib/intern/smallhash.c
index 006a3798dcd..ddeeb5f69dc 100644
--- a/source/blender/blenlib/intern/smallhash.c
+++ b/source/blender/blenlib/intern/smallhash.c
@@ -55,20 +55,56 @@
#include "BLI_smallhash.h"
+#include "BLI_asan.h"
#include "BLI_strict_flags.h"
-#define SMHASH_KEY_UNUSED ((uintptr_t)(UINTPTR_MAX - 0))
-#define SMHASH_CELL_FREE ((void *)(UINTPTR_MAX - 1))
-#define SMHASH_CELL_UNUSED ((void *)(UINTPTR_MAX - 2))
+#ifdef BLI_asan_poison
+# undef BLI_asan_poison
+#endif
+#ifdef BLI_asan_unpoison
+# undef BLI_asan_unpoison
+#endif
+
+#define BLI_asan_poison(a, b)
+#define BLI_asan_unpoison(a, b)
+
+/* NOTE: copied from BLO_blend_defs.h, don't use here because we're in BLI. */
+#ifdef __BIG_ENDIAN__
+/* Big Endian */
+# define MAKE_ID(a, b, c, d) ((int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d))
+# define MAKE_ID_8(a, b, c, d, e, f, g, h) \
+ ((int64_t)(a) << 56 | (int64_t)(b) << 48 | (int64_t)(c) << 40 | (int64_t)(d) << 32 | \
+ (int64_t)(e) << 24 | (int64_t)(f) << 16 | (int64_t)(g) << 8 | (h))
+#else
+/* Little Endian */
+# define MAKE_ID(a, b, c, d) ((int)(d) << 24 | (int)(c) << 16 | (b) << 8 | (a))
+# define MAKE_ID_8(a, b, c, d, e, f, g, h) \
+ ((int64_t)(h) << 56 | (int64_t)(g) << 48 | (int64_t)(f) << 40 | (int64_t)(e) << 32 | \
+ (int64_t)(d) << 24 | (int64_t)(c) << 16 | (int64_t)(b) << 8 | (a))
+#endif
+
+#define SMHASH_KEY_UNUSED (uintptr_t)(MAKE_ID_8('s', 'm', 'h', 'k', 'u', 'n', 'u', 's'))
+#define SMHASH_CELL_FREE (uintptr_t)(MAKE_ID_8('s', 'm', 'h', 'c', 'f', 'r', 'e', 'e'))
+#define SMHASH_CELL_UNUSED (uintptr_t)(MAKE_ID_8('s', 'm', 'h', 'c', 'u', 'n', 'u', 's'))
+
+#define USE_REMOVE
/* typically this re-assigns 'h' */
#define SMHASH_NEXT(h, hoff) \
- (CHECK_TYPE_INLINE(&(h), uint *), \
- CHECK_TYPE_INLINE(&(hoff), uint *), \
+ (CHECK_TYPE_INLINE(&(h), uintptr_t *), \
+ CHECK_TYPE_INLINE(&(hoff), uintptr_t *), \
((h) + (((hoff) = ((hoff)*2) + 1), (hoff))))
-/* nothing uses BLI_smallhash_remove yet */
-// #define USE_REMOVE
+BLI_INLINE bool check_stack_move(SmallHash *sh)
+{
+ if (sh->using_stack && sh->buckets != sh->buckets_stack) {
+ sh->buckets = sh->buckets_stack;
+
+ return true;
+ }
+
+ return false;
+}
BLI_INLINE bool smallhash_val_is_used(const void *val)
{
@@ -82,28 +118,46 @@ BLI_INLINE bool smallhash_val_is_used(const void *val)
extern const uint BLI_ghash_hash_sizes[];
#define hashsizes BLI_ghash_hash_sizes
-BLI_INLINE uint smallhash_key(const uintptr_t key)
+BLI_INLINE uintptr_t smallhash_key(const uintptr_t key)
{
- return (uint)key;
+#if 1
+ return key;
+#else
+ uintptr_t y = (size_t)key;
+ /* bottom 3 or 4 bits are likely to be 0; rotate y by 4 to avoid
+ * excessive hash collisions for dicts and sets */
+
+ return (uintptr_t)(y >> 4) | ((uintptr_t)y << (sizeof(uintptr_t[8]) - 4));
+#endif
}
/**
* Check if the number of items in the smallhash is large enough to require more buckets.
*/
-BLI_INLINE bool smallhash_test_expand_buckets(const uint nentries, const uint nbuckets)
+BLI_INLINE bool smallhash_test_expand_buckets(const uint nentries,
+ const uint nbuckets,
+ const uint nfreecells)
{
+ if (nfreecells < 3) {
+ return true;
+ }
+
/* (approx * 1.5) */
- return (nentries + (nentries >> 1)) > nbuckets;
+ return (nentries + (nentries >> 1)) > nbuckets || nfreecells < 3;
}
BLI_INLINE void smallhash_init_empty(SmallHash *sh)
{
uint i;
+ BLI_asan_unpoison(&sh->buckets, sizeof(void *));
+
for (i = 0; i < sh->nbuckets; i++) {
sh->buckets[i].key = SMHASH_KEY_UNUSED;
sh->buckets[i].val = SMHASH_CELL_FREE;
}
+
+ BLI_asan_poison(&sh->buckets, sizeof(void *));
}
/**
@@ -111,19 +165,24 @@ BLI_INLINE void smallhash_init_empty(SmallHash *sh)
*/
BLI_INLINE void smallhash_buckets_reserve(SmallHash *sh, const uint nentries_reserve)
{
- while (smallhash_test_expand_buckets(nentries_reserve, sh->nbuckets)) {
+ while (smallhash_test_expand_buckets(nentries_reserve, sh->nbuckets, sh->nbuckets + 5)) {
sh->nbuckets = hashsizes[++sh->cursize];
+ sh->nfreecells = sh->nbuckets;
}
}
-BLI_INLINE SmallHashEntry *smallhash_lookup(const SmallHash *sh, const uintptr_t key)
+BLI_INLINE SmallHashEntry *smallhash_lookup(SmallHash *sh, const uintptr_t key)
{
+ check_stack_move(sh);
+
SmallHashEntry *e;
- uint h = smallhash_key(key);
- uint hoff = 1;
+ uintptr_t h = smallhash_key(key);
+ uintptr_t hoff = 1;
BLI_assert(key != SMHASH_KEY_UNUSED);
+ BLI_asan_unpoison(&sh->buckets, sizeof(void *));
+
/* NOTE: there are always more buckets than entries,
* so we know there will always be a free bucket if the key isn't found. */
for (e = &sh->buckets[h % sh->nbuckets]; e->val != SMHASH_CELL_FREE;
@@ -135,25 +194,37 @@ BLI_INLINE SmallHashEntry *smallhash_lookup(const SmallHash *sh, const uintptr_t
}
}
+ BLI_asan_poison(&sh->buckets, sizeof(void *));
+
return NULL;
}
BLI_INLINE SmallHashEntry *smallhash_lookup_first_free(SmallHash *sh, const uintptr_t key)
{
+ check_stack_move(sh);
+
SmallHashEntry *e;
- uint h = smallhash_key(key);
- uint hoff = 1;
+ uintptr_t h = smallhash_key(key);
+ uintptr_t hoff = 1;
+
+ BLI_asan_unpoison(&sh->buckets, sizeof(void *));
for (e = &sh->buckets[h % sh->nbuckets]; smallhash_val_is_used(e->val);
h = SMHASH_NEXT(h, hoff), e = &sh->buckets[h % sh->nbuckets]) {
/* pass */
}
+ BLI_asan_poison(&sh->buckets, sizeof(void *));
+
return e;
}
BLI_INLINE void smallhash_resize_buckets(SmallHash *sh, const uint nbuckets)
{
+ check_stack_move(sh);
+
+ BLI_asan_unpoison(&sh->buckets, sizeof(void *));
+
SmallHashEntry *buckets_old = sh->buckets;
const uint nbuckets_old = sh->nbuckets;
const bool was_alloc = (buckets_old != sh->buckets_stack);
@@ -169,21 +240,29 @@ BLI_INLINE void smallhash_resize_buckets(SmallHash *sh, const uint nbuckets)
}
else {
sh->buckets = MEM_mallocN(sizeof(*sh->buckets) * nbuckets, __func__);
+ sh->using_stack = false;
}
sh->nbuckets = nbuckets;
+ sh->nfreecells = nbuckets;
+ sh->nentries = 0;
+ BLI_asan_poison(&sh->buckets, sizeof(void *));
smallhash_init_empty(sh);
for (i = 0; i < nbuckets_old; i++) {
if (smallhash_val_is_used(buckets_old[i].val)) {
SmallHashEntry *e = smallhash_lookup_first_free(sh, buckets_old[i].key);
+
e->key = buckets_old[i].key;
e->val = buckets_old[i].val;
+
+ sh->nfreecells--;
+ sh->nentries++;
}
}
- if (was_alloc) {
+ if (was_alloc && buckets_old) {
MEM_freeN(buckets_old);
}
}
@@ -194,15 +273,23 @@ void BLI_smallhash_init_ex(SmallHash *sh, const uint nentries_reserve)
sh->nentries = 0;
sh->cursize = 2;
+ sh->using_stack = true;
sh->nbuckets = hashsizes[sh->cursize];
+ sh->nfreecells = sh->nbuckets;
sh->buckets = sh->buckets_stack;
+ BLI_asan_poison(&sh->buckets, sizeof(void *));
+
if (nentries_reserve) {
smallhash_buckets_reserve(sh, nentries_reserve);
if (sh->nbuckets > SMSTACKSIZE) {
+ BLI_asan_unpoison(&sh->buckets, sizeof(void *));
sh->buckets = MEM_mallocN(sizeof(*sh->buckets) * sh->nbuckets, __func__);
+ BLI_asan_poison(&sh->buckets, sizeof(void *));
+
+ sh->using_stack = false;
}
}
@@ -217,24 +304,85 @@ void BLI_smallhash_init(SmallHash *sh)
/* NOTE: does *not* free *sh itself! only the direct data! */
void BLI_smallhash_release(SmallHash *sh)
{
- if (sh->buckets != sh->buckets_stack) {
+ check_stack_move(sh);
+
+ BLI_asan_unpoison(&sh->buckets, sizeof(void *));
+
+ if (sh->buckets && sh->buckets != sh->buckets_stack) {
MEM_freeN(sh->buckets);
}
}
+bool BLI_smallhash_ensure_p(SmallHash *sh, uintptr_t key, void ***item)
+{
+ check_stack_move(sh);
+
+ SmallHashEntry *e = NULL;
+ uintptr_t h = smallhash_key(key);
+ uintptr_t hoff = 1;
+
+ if (UNLIKELY(smallhash_test_expand_buckets(sh->nentries, sh->nbuckets, sh->nfreecells))) {
+ smallhash_resize_buckets(sh, hashsizes[++sh->cursize]);
+ }
+
+ BLI_assert(key != SMHASH_KEY_UNUSED);
+
+ BLI_asan_unpoison(&sh->buckets, sizeof(void *));
+
+ /* NOTE: there are always more buckets than entries,
+ * so we know there will always be a free bucket if the key isn't found. */
+ for (e = &sh->buckets[h % sh->nbuckets]; e->val != SMHASH_CELL_FREE;
+ h = SMHASH_NEXT(h, hoff), e = &sh->buckets[h % sh->nbuckets]) {
+ if (e->key == key) {
+ /* should never happen because unused keys are zero'd */
+ BLI_assert(e->val != SMHASH_CELL_UNUSED);
+ break;
+ }
+ }
+
+ BLI_asan_poison(&sh->buckets, sizeof(void *));
+
+ bool ret;
+
+ if (e->val == SMHASH_CELL_FREE || e->val == SMHASH_CELL_UNUSED) {
+ sh->nentries++;
+ sh->nfreecells--;
+ ret = false;
+ }
+ else {
+ ret = true;
+ }
+
+ e->key = key;
+ *item = &e->val;
+
+ return ret;
+}
+
void BLI_smallhash_insert(SmallHash *sh, uintptr_t key, void *item)
{
+ check_stack_move(sh);
+
SmallHashEntry *e;
BLI_assert(key != SMHASH_KEY_UNUSED);
BLI_assert(smallhash_val_is_used(item));
BLI_assert(BLI_smallhash_haskey(sh, key) == false);
- if (UNLIKELY(smallhash_test_expand_buckets(++sh->nentries, sh->nbuckets))) {
+ if (UNLIKELY(smallhash_test_expand_buckets(sh->nentries, sh->nbuckets, sh->nfreecells))) {
smallhash_resize_buckets(sh, hashsizes[++sh->cursize]);
}
e = smallhash_lookup_first_free(sh, key);
+
+ if (e->val == SMHASH_CELL_FREE) {
+ sh->nentries++;
+ sh->nfreecells--;
+ }
+ else if (e->val == SMHASH_CELL_UNUSED) {
+ sh->nentries++;
+ }
+
e->key = key;
e->val = item;
}
@@ -261,9 +409,46 @@ bool BLI_smallhash_reinsert(SmallHash *sh, uintptr_t key, void *item)
#ifdef USE_REMOVE
bool BLI_smallhash_remove(SmallHash *sh, uintptr_t key)
{
+ check_stack_move(sh);
+
+ // SmallHashEntry *e = smallhash_lookup(sh, key);
+
+ SmallHashEntry *e;
+ uintptr_t h = smallhash_key(key);
+ uintptr_t hoff = 1;
+
+ for (e = &sh->buckets[h % sh->nbuckets]; e->val != SMHASH_CELL_FREE;
+ h = SMHASH_NEXT(h, hoff), e = &sh->buckets[h % sh->nbuckets]) {
+ if (e->key == key) {
+ /* should never happen because unused keys are zero'd */
+ BLI_assert(e->val != SMHASH_CELL_UNUSED);
+ break;
+ }
+ }
+
+ if (e && e->key == key) {
+ h = SMHASH_NEXT(h, hoff);
+ SmallHashEntry *e2 = &sh->buckets[h & sh->nbuckets];
+
+ e->key = SMHASH_KEY_UNUSED;
+ e->val = SMHASH_CELL_UNUSED;
+
+ sh->nentries--;
+
+ return true;
+ }
+ else {
+ return false;
+ }
+}
+
+bool BLI_smallhash_remove_p(SmallHash *sh, uintptr_t key, void **val)
+{
SmallHashEntry *e = smallhash_lookup(sh, key);
if (e) {
+ *val = e->val;
+
e->key = SMHASH_KEY_UNUSED;
e->val = SMHASH_CELL_UNUSED;
sh->nentries--;
@@ -276,34 +461,54 @@ bool BLI_smallhash_remove(SmallHash *sh, uintptr_t key)
}
#endif
-void *BLI_smallhash_lookup(const SmallHash *sh, uintptr_t key)
+void *BLI_smallhash_lookup(SmallHash *sh, uintptr_t key)
{
SmallHashEntry *e = smallhash_lookup(sh, key);
return e ? e->val : NULL;
}
-void **BLI_smallhash_lookup_p(const SmallHash *sh, uintptr_t key)
+void **BLI_smallhash_lookup_p(SmallHash *sh, uintptr_t key)
{
SmallHashEntry *e = smallhash_lookup(sh, key);
return e ? &e->val : NULL;
}
-bool BLI_smallhash_haskey(const SmallHash *sh, uintptr_t key)
+void BLI_smallhash_clear(SmallHash *sh, uintptr_t key)
+{
+ check_stack_move(sh);
+
+ BLI_asan_unpoison(&sh->buckets, sizeof(void *));
+
+ SmallHashEntry *e = sh->buckets;
+
+ for (uint i = 0; i < sh->nbuckets; i++, e++) {
+ e->key = SMHASH_KEY_UNUSED;
+ e->val = SMHASH_CELL_FREE;
+ }
+
+ sh->nentries = 0;
+
+ BLI_asan_poison(&sh->buckets, sizeof(void *));
+}
+
+bool BLI_smallhash_haskey(SmallHash *sh, uintptr_t key)
{
SmallHashEntry *e = smallhash_lookup(sh, key);
return (e != NULL);
}
-int BLI_smallhash_len(const SmallHash *sh)
+int BLI_smallhash_len(SmallHash *sh)
{
return (int)sh->nentries;
}
BLI_INLINE SmallHashEntry *smallhash_iternext(SmallHashIter *iter, uintptr_t *key)
{
+ BLI_asan_unpoison(&iter->sh->buckets, sizeof(void *));
+
while (iter->i < iter->sh->nbuckets) {
if (smallhash_val_is_used(iter->sh->buckets[iter->i].val)) {
if (key) {
@@ -316,6 +521,7 @@ BLI_INLINE SmallHashEntry *smallhash_iternext(SmallHashIter *iter, uintptr_t *ke
iter->i++;
}
+ BLI_asan_poison(&iter->sh->buckets, sizeof(void *));
return NULL;
}
@@ -333,16 +539,20 @@ void **BLI_smallhash_iternext_p(SmallHashIter *iter, uintptr_t *key)
return e ? &e->val : NULL;
}
-void *BLI_smallhash_iternew(const SmallHash *sh, SmallHashIter *iter, uintptr_t *key)
+void *BLI_smallhash_iternew(SmallHash *sh, SmallHashIter *iter, uintptr_t *key)
{
+ check_stack_move(sh);
+
iter->sh = sh;
iter->i = 0;
return BLI_smallhash_iternext(iter, key);
}
-void **BLI_smallhash_iternew_p(const SmallHash *sh, SmallHashIter *iter, uintptr_t *key)
+void **BLI_smallhash_iternew_p(SmallHash *sh, SmallHashIter *iter, uintptr_t *key)
{
+ check_stack_move(sh);
+
iter->sh = sh;
iter->i = 0;
@@ -408,8 +618,8 @@ double BLI_smallhash_calc_quality(SmallHash *sh)
if (sh->buckets[i].key != SMHASH_KEY_UNUSED) {
uint64_t count = 0;
SmallHashEntry *e, *e_final = &sh->buckets[i];
- uint h = smallhash_key(e_final->key);
- uint hoff = 1;
+ uintptr_t h = smallhash_key(e_final->key);
+ uintptr_t hoff = 1;
for (e = &sh->buckets[h % sh->nbuckets]; e != e_final;
h = SMHASH_NEXT(h, hoff), e = &sh->buckets[h % sh->nbuckets]) {