xref: /xnu-8019.80.24/bsd/skywalk/lib/cuckoo_hashtable.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2018-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <skywalk/os_skywalk_private.h>
29 
30 #include "cuckoo_hashtable.h"
31 
32 #define CUCKOO_TAG "com.apple.skywalk.libcuckoo"
33 kern_allocation_name_t cuckoo_tag;
34 
35 SYSCTL_NODE(_kern_skywalk, OID_AUTO, libcuckoo, CTLFLAG_RW | CTLFLAG_LOCKED,
36     0, "Skywalk Cuckoo Hashtable Library");
37 
38 uint32_t cuckoo_verbose = 0;
39 #if (DEVELOPMENT || DEBUG)
40 SYSCTL_UINT(_kern_skywalk_libcuckoo, OID_AUTO, verbose,
41     CTLFLAG_RW | CTLFLAG_LOCKED, &cuckoo_verbose, 0, "");
42 #endif /* DEVELOPMENT || DEBUG */
43 
44 typedef enum cht_verb {
45 	CHTV_ERR = 0,
46 	CHTV_WARN = 1,
47 	CHTV_INFO = 2,
48 	CHTV_DEBUG = 3,
49 } cht_verb_t;
50 
51 static LCK_GRP_DECLARE(cht_lock_group, "CHT_LOCK");
52 static LCK_ATTR_DECLARE(cht_lock_attr, 0, 0);
53 
54 #if SK_LOG
55 #define cht_log(level, _fmt, ...)       \
56 	do {    \
57 	        if (level <= cuckoo_verbose) {  \
58 	                kprintf("Cuckoo: thread %p %-30s " _fmt "\n",   \
59 	                    current_thread(), __FUNCTION__, ##__VA_ARGS__);     \
60 	        }       \
61 	} while (0);
62 #else  /* !SK_LOG */
63 #define cht_log(_flag, _fmt, ...) do { ((void)0); } while (0)
64 #endif /* !SK_LOG */
65 
66 #define cht_err(_fmt, ...) cht_log(CHTV_ERR, _fmt, ##__VA_ARGS__)
67 #define cht_warn(_fmt, ...) cht_log(CHTV_WARN, _fmt, ##__VA_ARGS__)
68 #define cht_info(_fmt, ...) cht_log(CHTV_INFO, _fmt, ##__VA_ARGS__)
69 #define cht_debug(_fmt, ...) cht_log(CHTV_DEBUG, _fmt, ##__VA_ARGS__)
70 
71 static inline int
cuckoo_node_chain(struct cuckoo_node * node,struct cuckoo_node * new_node)72 cuckoo_node_chain(struct cuckoo_node *node,
73     struct cuckoo_node *new_node)
74 {
75 	struct cuckoo_node *prev_node = node;
76 
77 	/* new node must be zero initialized */
78 	ASSERT(new_node->next == NULL);
79 
80 	/* use tail insert to check for duplicate along list */
81 	while (__improbable(node != NULL)) {
82 		if (node == new_node) {
83 			return EEXIST;
84 		}
85 		prev_node = node;
86 		node = node->next;
87 	}
88 
89 	prev_node->next = new_node;
90 
91 	return 0;
92 }
93 
94 static inline bool
cuckoo_node_del(struct cuckoo_node ** pnode,struct cuckoo_node * del_node)95 cuckoo_node_del(struct cuckoo_node **pnode,
96     struct cuckoo_node *del_node)
97 {
98 	ASSERT(pnode != NULL);
99 
100 	struct cuckoo_node *node = *pnode;
101 	while (node != NULL && node != del_node) {
102 		pnode = &node->next;
103 		node = node->next;
104 	}
105 	if (__probable(node != NULL)) {
106 		*pnode = node->next;
107 		node->next = NULL;
108 		return true;
109 	}
110 
111 	return false;
112 }
113 
114 static inline void
cuckoo_node_set_next(struct cuckoo_node * node,struct cuckoo_node * next_node)115 cuckoo_node_set_next(struct cuckoo_node *node, struct cuckoo_node *next_node)
116 {
117 	node->next = next_node;
118 }
119 
120 /* We probably won't add RCU soon so use simple pointer reference for now */
121 static inline struct cuckoo_node *
cuckoo_node_next(struct cuckoo_node * node)122 cuckoo_node_next(struct cuckoo_node *node)
123 {
124 	return node->next;
125 }
126 
127 #define _CHT_MAX_LOAD_SHRINK 40       /* at least below 40% load to shrink */
128 #define _CHT_MIN_LOAD_EXPAND 85       /* cuckoo could hold 85% full table */
129 
130 enum cuckoo_resize_ops {
131 	_CHT_RESIZE_EXPAND = 0,
132 	_CHT_RESIZE_SHRINK = 1,
133 };
134 
135 /*
136  * Following classic Cuckoo hash table design, cuckoo_hashtable use k hash
137  * functions to derive multiple candidate hash table bucket indexes.
138  * Here cuckoo_hashtable use k=2.
139  *     prim_bkt_idx = bkt_idx[1] = hash[1](key) % N_BUCKETS
140  *     alt_bkt_idx  = bkt_idx[2] = hash[2](key) % N_BUCKETS
141  *
142  * Currently, we let the caller pass in the actual key's hash value, because
143  * in most of the use cases, caller probably have already calculated the hash
144  * value of actual key (e.g. using hardware offloading or copy+hash). This also
145  * save us from storing the key in the table (or any side data structure). So
146  *
147  *     hash[1] = hash    // hash(hash value) passed in from caller
148  *     hash[2] = __alt_hash(hash[1])
149  *
150  * __alt_hash derives h2 using h1's high bits, since calculating primary
151  * bucket index uses its low bits. So alt_hash is still a uniformly distributed
152  * random variable (but not independent of h1, but is fine for hashtable usage).
153  *
154  * There is option to store h2 in the table bucket as well but cuckoo_hashtable
155  * is not doing this to use less memory usage with the small price of a few
156  * more cpu cycles during add/del operation. Assuming that the hashtable is
157  * read-heavy rather than write-heavy, this is reasonable.
158  *
159  * In the rare case of full hash value collision, where
160  *     hash[1] == hash[1]'
161  * , there is no way for the hash table to differentiate two objects, thus we
162  * need to chain the fully collided objects under the same bucket slot.
163  * The caller need to walk the chain to explicitly compare the full length key
164  * to find the correct object.
165  *
166  * Reference Counting
167  * The hashtable assumes all objects are reference counted. It takes function
168  * pointers that retain and release the object.
169  * Adding to the table will call its retain function.
170  * Deleting from the table will call its release function.
171  *
172  */
173 
174 /* hash might be zero, so always use _node == NULL to test empty slot */
175 struct _slot {
176 	uint32_t                _hash;
177 	struct cuckoo_node      *_node;
178 };
179 
180 /*
181  * Cuckoo hashtable cache line awareness:
182  *   - ARM platform has 128B CPU cache line.
183  *   - Intel platform has 64B CPU cache line. However, hardware prefetcher
184  *     treats cache lines as 128B chunk and prefetch the other 64B cache line.
185  *
186  * Thus cuckoo_hashtable use 128B as bucket size to make best use CPU cache
187  * resource.
188  */
189 #define _CHT_CACHELINE_CHUNK 128
190 #define _CHT_SLOT_INVAL UINT8_MAX
191 static const uint8_t _CHT_BUCKET_SLOTS =
192     ((_CHT_CACHELINE_CHUNK - sizeof(lck_mtx_t) - sizeof(uint8_t)) /
193     sizeof(struct _slot));
194 
195 struct _bucket {
196 	struct _slot            _slots[_CHT_BUCKET_SLOTS];
197 	decl_lck_mtx_data(, _lock);
198 	uint8_t                 _inuse;
199 } __attribute__((aligned(_CHT_CACHELINE_CHUNK)));
200 
201 struct cuckoo_hashtable {
202 	uint32_t        _bitmask;       /* 1s' mask for quick MOD */
203 	uint32_t        _n_buckets;     /* number of buckets */
204 
205 	volatile uint32_t _n_entries;   /* number of entires in table */
206 	uint32_t          _capacity;    /* max number of entires */
207 	uint32_t          _rcapacity;   /* requested capacity */
208 
209 	bool            _busy;
210 	uint32_t        _resize_waiters;
211 	decl_lck_rw_data(, _resize_lock);
212 	decl_lck_mtx_data(, _lock);
213 
214 	struct _bucket  *_buckets;
215 
216 	int (*_obj_cmp)(struct cuckoo_node *node, void *key);
217 	void (*_obj_retain)(struct cuckoo_node *);
218 	void (*_obj_release)(struct cuckoo_node *);
219 } __attribute__((aligned(_CHT_CACHELINE_CHUNK)));
220 
221 static inline void
__slot_set(struct _slot * slt,uint32_t hash,struct cuckoo_node * node)222 __slot_set(struct _slot *slt, uint32_t hash, struct cuckoo_node *node)
223 {
224 	slt->_hash = hash;
225 	slt->_node = node;
226 }
227 
228 static inline void
__slot_reset(struct _slot * slt)229 __slot_reset(struct _slot *slt)
230 {
231 	slt->_hash = 0;
232 	slt->_node = NULL;
233 }
234 
235 static inline uint32_t
__alt_hash(uint32_t hash)236 __alt_hash(uint32_t hash)
237 {
238 #define _CHT_ALT_HASH_MIX       0x5bd1e995      /* Murmur hash mix */
239 	uint32_t tag = hash >> 16;
240 	uint32_t alt_hash = hash ^ ((tag + 1) * _CHT_ALT_HASH_MIX);
241 	return alt_hash;
242 }
243 
244 static inline struct _bucket *
__get_bucket(struct cuckoo_hashtable * h,uint32_t b_i)245 __get_bucket(struct cuckoo_hashtable *h, uint32_t b_i)
246 {
247 	return &h->_buckets[b_i];
248 }
249 
250 static inline struct _bucket *
__prim_bucket(struct cuckoo_hashtable * h,uint32_t hash)251 __prim_bucket(struct cuckoo_hashtable *h, uint32_t hash)
252 {
253 	return __get_bucket(h, hash & h->_bitmask);
254 }
255 
256 static inline struct _bucket *
__alt_bucket(struct cuckoo_hashtable * h,uint32_t hash)257 __alt_bucket(struct cuckoo_hashtable *h, uint32_t hash)
258 {
259 	return __get_bucket(h, __alt_hash(hash) & h->_bitmask);
260 }
261 
262 #if SK_LOG
263 static inline size_t
__bucket_idx(struct cuckoo_hashtable * h,struct _bucket * b)264 __bucket_idx(struct cuckoo_hashtable *h, struct _bucket *b)
265 {
266 	return ((uintptr_t)b - (uintptr_t)&h->_buckets[0]) / sizeof(struct _bucket);
267 }
268 #endif /* SK_LOG */
269 
270 static inline struct _slot *
__bucket_slot(struct _bucket * b,uint32_t slot_idx)271 __bucket_slot(struct _bucket *b, uint32_t slot_idx)
272 {
273 	return &b->_slots[slot_idx];
274 }
275 
276 static inline bool
__slot_empty(struct _slot * s)277 __slot_empty(struct _slot *s)
278 {
279 	return s->_node == NULL;
280 }
281 
282 static inline uint32_t
__align32pow2(uint32_t v)283 __align32pow2(uint32_t v)
284 {
285 	v--;
286 	v |= v >> 1;
287 	v |= v >> 2;
288 	v |= v >> 4;
289 	v |= v >> 8;
290 	v |= v >> 16;
291 	v++;
292 
293 	return v;
294 }
295 
296 uint32_t
cuckoo_hashtable_load_factor(struct cuckoo_hashtable * h)297 cuckoo_hashtable_load_factor(struct cuckoo_hashtable *h)
298 {
299 	return (100 * h->_n_entries) / (h->_n_buckets * _CHT_BUCKET_SLOTS);
300 }
301 
302 /*
303  * Cuckoo hashtable uses regular mutex.  Most operations(find/add) should
304  * finish faster than a context switch.  It avoids using the spin lock since
305  * it might cause issues on certain platforms (e.g. x86_64) where the trap
306  * handler for dealing with FP/SIMD use would be invoked to perform thread-
307  * specific allocations; the use of FP/SIMD here is related to the memory
308  * compare with mask routines.  Even in case of another thread holding a
309  * bucket lock and went asleep, cuckoo path search would try to find another
310  * path without blockers.
311  *
312  * The only exception is table expansion, which could take a long time, we use
313  * read/write lock to protect the whole table against any read/write in that
314  * case.
315  */
316 
317 /* find/add only acquires table rlock, and serialize with bucket lock */
318 #define __lock_bucket(b)        lck_mtx_lock(&b->_lock)
319 #define __unlock_bucket(b)      lck_mtx_unlock(&b->_lock)
320 
321 #define _CHT_DEADLOCK_THRESHOLD 20
322 static inline bool
__lock_bucket_with_backoff(struct _bucket * b)323 __lock_bucket_with_backoff(struct _bucket *b)
324 {
325 	uint32_t try_counter = 0;
326 	while (!lck_mtx_try_lock(&b->_lock)) {
327 		if (try_counter++ > _CHT_DEADLOCK_THRESHOLD) {
328 			return false;
329 		}
330 	}
331 	return true;
332 }
333 
334 #define __rlock_table(h)        lck_rw_lock_shared(&h->_resize_lock)
335 #define __unrlock_table(h)      lck_rw_unlock_shared(&h->_resize_lock)
336 #define __r2wlock_table(h)      lck_rw_lock_shared_to_exclusive(&h->_resize_lock)
337 #define __wlock_table(h)        lck_rw_lock_exclusive(&h->_resize_lock)
338 #define __unwlock_table(h)      lck_rw_unlock_exclusive(&h->_resize_lock)
339 
340 static inline int
__resize_begin(struct cuckoo_hashtable * h)341 __resize_begin(struct cuckoo_hashtable *h)
342 {
343 	// takes care of concurrent resize
344 	lck_mtx_lock(&h->_lock);
345 	while (h->_busy) {
346 		if (++(h->_resize_waiters) == 0) {   /* wraparound */
347 			h->_resize_waiters++;
348 		}
349 		int error = msleep(&h->_resize_waiters, &h->_lock,
350 		    (PZERO + 1), __FUNCTION__, NULL);
351 		if (error == EINTR) {
352 			cht_warn("resize waiter was interrupted");
353 			ASSERT(h->_resize_waiters > 0);
354 			h->_resize_waiters--;
355 			lck_mtx_unlock(&h->_lock);
356 			return EINTR;
357 		}
358 		// resizer finished
359 		lck_mtx_unlock(&h->_lock);
360 		return EAGAIN;
361 	}
362 
363 	h->_busy = true;
364 	lck_mtx_unlock(&h->_lock);
365 
366 	// takes other readers offline
367 	__wlock_table(h);
368 	return 0;
369 }
370 
371 static inline void
__resize_end(struct cuckoo_hashtable * h)372 __resize_end(struct cuckoo_hashtable *h)
373 {
374 	__unwlock_table(h);
375 	lck_mtx_lock(&h->_lock);
376 	h->_busy = false;
377 	if (__improbable(h->_resize_waiters > 0)) {
378 		h->_resize_waiters = 0;
379 		wakeup(&h->_resize_waiters);
380 	}
381 	lck_mtx_unlock(&h->_lock);
382 }
383 
384 void
cuckoo_hashtable_init(void)385 cuckoo_hashtable_init(void)
386 {
387 	_CASSERT(sizeof(struct _bucket) <= _CHT_CACHELINE_CHUNK);
388 	ASSERT(cuckoo_tag == NULL);
389 	cuckoo_tag = kern_allocation_name_allocate(CUCKOO_TAG, 0);
390 	ASSERT(cuckoo_tag != NULL);
391 }
392 
393 struct cuckoo_hashtable *
cuckoo_hashtable_create(struct cuckoo_hashtable_params * p)394 cuckoo_hashtable_create(struct cuckoo_hashtable_params *p)
395 {
396 	struct cuckoo_hashtable *h = NULL;
397 	uint32_t n = 0;
398 	uint32_t n_buckets = 0;
399 	struct _bucket *buckets = NULL;
400 	uint32_t i;
401 
402 	if (p->cht_capacity > CUCKOO_HASHTABLE_ENTRIES_MAX ||
403 	    p->cht_capacity < _CHT_BUCKET_SLOTS) {
404 		return NULL;
405 	}
406 
407 	ASSERT(p->cht_capacity < UINT32_MAX);
408 	n = (uint32_t)p->cht_capacity;
409 	h = sk_alloc_type(struct cuckoo_hashtable, Z_WAITOK | Z_NOFAIL, cuckoo_tag);
410 
411 	n_buckets = __align32pow2(n / _CHT_BUCKET_SLOTS);
412 	buckets = sk_alloc_type_array(struct _bucket, n_buckets, Z_WAITOK, cuckoo_tag);
413 	if (buckets == NULL) {
414 		sk_free_type(struct cuckoo_hashtable, h);
415 		return NULL;
416 	}
417 
418 	for (i = 0; i < n_buckets; i++) {
419 		lck_mtx_init(&buckets[i]._lock, &cht_lock_group, &cht_lock_attr);
420 	}
421 
422 	lck_mtx_init(&h->_lock, &cht_lock_group, &cht_lock_attr);
423 
424 	h->_n_entries = 0;
425 	h->_n_buckets = n_buckets;
426 	h->_capacity = h->_rcapacity = h->_n_buckets * _CHT_BUCKET_SLOTS;
427 	h->_bitmask = n_buckets - 1;
428 	h->_buckets = buckets;
429 	lck_rw_init(&h->_resize_lock, &cht_lock_group, &cht_lock_attr);
430 	h->_busy = false;
431 	h->_resize_waiters = 0;
432 
433 	ASSERT(p->cht_obj_retain != NULL);
434 	ASSERT(p->cht_obj_release != NULL);
435 	ASSERT(p->cht_obj_cmp != NULL);
436 	h->_obj_cmp = p->cht_obj_cmp;
437 	h->_obj_retain = p->cht_obj_retain;
438 	h->_obj_release = p->cht_obj_release;
439 
440 	return h;
441 }
442 
443 void
cuckoo_hashtable_free(struct cuckoo_hashtable * h)444 cuckoo_hashtable_free(struct cuckoo_hashtable *h)
445 {
446 	uint32_t i;
447 
448 	if (h == NULL) {
449 		return;
450 	}
451 
452 	ASSERT(h->_n_entries == 0);
453 
454 	if (h->_buckets != NULL) {
455 		for (i = 0; i < h->_n_buckets; i++) {
456 			lck_mtx_destroy(&h->_buckets[i]._lock, &cht_lock_group);
457 		}
458 		sk_free_type_array(struct _bucket, h->_n_buckets, h->_buckets);
459 	}
460 	sk_free_type(struct cuckoo_hashtable, h);
461 }
462 
463 size_t
cuckoo_hashtable_entries(struct cuckoo_hashtable * h)464 cuckoo_hashtable_entries(struct cuckoo_hashtable *h)
465 {
466 	return h->_n_entries;
467 }
468 
469 size_t
cuckoo_hashtable_capacity(struct cuckoo_hashtable * h)470 cuckoo_hashtable_capacity(struct cuckoo_hashtable *h)
471 {
472 	return h->_n_buckets * _CHT_BUCKET_SLOTS;
473 }
474 
475 size_t
cuckoo_hashtable_memory_footprint(struct cuckoo_hashtable * h)476 cuckoo_hashtable_memory_footprint(struct cuckoo_hashtable *h)
477 {
478 	size_t total_meminuse = sizeof(struct cuckoo_hashtable) +
479 	    (h->_n_buckets * sizeof(struct _bucket));
480 	return total_meminuse;
481 }
482 
483 static inline struct cuckoo_node *
__find_in_bucket(struct cuckoo_hashtable * h,struct _bucket * b,void * key,uint32_t hash)484 __find_in_bucket(struct cuckoo_hashtable *h, struct _bucket *b, void *key,
485     uint32_t hash)
486 {
487 	uint32_t i;
488 	struct cuckoo_node *node = NULL;
489 
490 	__lock_bucket(b);
491 	if (b->_inuse == 0) {
492 		goto done;
493 	}
494 	for (i = 0; i < _CHT_BUCKET_SLOTS; i++) {
495 		if (b->_slots[i]._hash == hash) {
496 			node = b->_slots[i]._node;
497 			while (node != NULL) {
498 				if (h->_obj_cmp(node, key) == 0) {
499 					h->_obj_retain(node);
500 					goto done;
501 				}
502 				node = cuckoo_node_next(node);
503 			}
504 		}
505 	}
506 
507 done:
508 	__unlock_bucket(b);
509 	return node;
510 }
511 
512 /* will return node retained */
513 struct cuckoo_node *
cuckoo_hashtable_find_with_hash(struct cuckoo_hashtable * h,void * key,uint32_t hash)514 cuckoo_hashtable_find_with_hash(struct cuckoo_hashtable *h, void *key,
515     uint32_t hash)
516 {
517 	struct _bucket *b1, *b2;
518 	struct cuckoo_node *node = NULL;
519 
520 	__rlock_table(h);
521 
522 	b1 = __prim_bucket(h, hash);
523 	if ((node = __find_in_bucket(h, b1, key, hash)) != NULL) {
524 		goto done;
525 	}
526 
527 	b2 = __alt_bucket(h, hash);
528 	if ((node = __find_in_bucket(h, b2, key, hash)) != NULL) {
529 		goto done;
530 	}
531 
532 done:
533 	__unrlock_table(h);
534 	return node;
535 }
536 
537 /*
538  * To add a key into cuckoo_hashtable:
539  *   1. First it searches the key's two candidate buckets b1, b2
540  *   2. If there are slots available in b1 or b2, we place the key there
541  *   3. Otherwise cuckoo_hashtable will have to probe and make space
542  *
543  * To move keys around (open addressing hash table), cuckoo_hashtable needs to
544  * first find available slot via Cuckoo search. Here it uses bread-first-search
545  * to find the shorted path towards an empty bucket slot.
546  *
547  */
548 static inline int
__add_to_bucket(struct cuckoo_hashtable * h,struct _bucket * b,struct cuckoo_node * node,uint32_t hash)549 __add_to_bucket(struct cuckoo_hashtable *h, struct _bucket *b,
550     struct cuckoo_node *node, uint32_t hash)
551 {
552 	int ret = -1;
553 	uint8_t avail_i = _CHT_SLOT_INVAL;
554 
555 	__lock_bucket(b);
556 	if (b->_inuse == _CHT_BUCKET_SLOTS) {
557 		goto done;
558 	}
559 	for (uint8_t i = 0; i < _CHT_BUCKET_SLOTS; i++) {
560 		struct _slot *s = __bucket_slot(b, i);
561 		if (__slot_empty(s)) {
562 			if (avail_i == _CHT_SLOT_INVAL) {
563 				avail_i = i;
564 			}
565 		} else {
566 			/* chain to existing slot with same hash */
567 			if (__improbable(s->_hash == hash)) {
568 				ASSERT(s->_node != NULL);
569 				ret = cuckoo_node_chain(s->_node, node);
570 				if (ret != 0) {
571 					goto done;
572 				}
573 				cht_debug("hash %x node %p inserted [%zu][%d]",
574 				    hash, node, __bucket_idx(h, b), i);
575 				OSAddAtomic(1, &h->_n_entries);
576 				h->_obj_retain(node);
577 				goto done;
578 			}
579 		}
580 	}
581 	if (avail_i != _CHT_SLOT_INVAL) {
582 		h->_obj_retain(node);
583 		b->_slots[avail_i]._hash = hash;
584 		b->_slots[avail_i]._node = node;
585 		b->_inuse++;
586 		cht_debug("hash %x node %p inserted [%zu][%d]", hash, node,
587 		    __bucket_idx(h, b), avail_i);
588 		OSAddAtomic(1, &h->_n_entries);
589 		ret = 0;
590 	}
591 done:
592 	__unlock_bucket(b);
593 	return ret;
594 }
595 
596 #define _CHT_BFS_QUEUE_LEN      UINT8_MAX
597 #define _CHT_BFS_QUEUE_END      (_CHT_BFS_QUEUE_LEN - _CHT_BUCKET_SLOTS)
598 
599 struct _bfs_node {
600 	uint32_t        bkt_idx;
601 	uint8_t         prev_node_idx;
602 	uint8_t         prev_slot_idx;
603 };
604 
605 /*
606  * Move slots backwards on cuckoo path
607  *
608  * cuckoo_move would hold at most 2 locks at any time, moving from
609  * the end of cuckoo path toward the bucket where new keys should be
610  * stored. There could be chances of dead lock in case of multiple
611  * writers have overlapping cuckoo path. We could arrange the order of
612  * locking to avoid that but then we have to take all locks upfront,
613  * which is not friendly to concurrent readers. So instead, we try to
614  * take one by one(but still at most 2 locks holding at any time),
615  * with backoff in mind.
616  */
617 static int
cuckoo_move(struct cuckoo_hashtable * h,struct cuckoo_node * node,uint32_t hash,struct _bfs_node * queue,uint8_t leaf_node_idx,uint8_t leaf_slot)618 cuckoo_move(struct cuckoo_hashtable *h, struct cuckoo_node *node,
619     uint32_t hash, struct _bfs_node *queue, uint8_t leaf_node_idx,
620     uint8_t leaf_slot)
621 {
622 	struct _bfs_node *prev_node, *curr_node;
623 	struct _bucket *from_bkt, *to_bkt, *alt_bkt;
624 	uint8_t from_slot, to_slot;
625 
626 	curr_node = &queue[leaf_node_idx];
627 	to_bkt = __get_bucket(h, curr_node->bkt_idx);
628 	to_slot = leaf_slot;
629 
630 	__lock_bucket(to_bkt);
631 
632 	while (__probable(curr_node->prev_node_idx != _CHT_BFS_QUEUE_LEN)) {
633 		prev_node = &queue[curr_node->prev_node_idx];
634 		from_bkt = __get_bucket(h, prev_node->bkt_idx);
635 		from_slot = curr_node->prev_slot_idx;
636 
637 		if (!__lock_bucket_with_backoff(from_bkt)) {
638 			/* a dead lock or a sleeping-thread holding the lock */
639 			__unlock_bucket(to_bkt);
640 			cht_warn("cuckoo move deadlock detected");
641 			return EINVAL;
642 		}
643 
644 		/*
645 		 * Verify cuckoo path by checking:
646 		 * 1. from_bkt[from_slot]'s alternative bucket is still to_bkt
647 		 * 3. to_bkt[to_slot] is still vacant
648 		 */
649 		alt_bkt = __alt_bucket(h, from_bkt->_slots[from_slot]._hash);
650 		if (alt_bkt != to_bkt ||
651 		    !__slot_empty(__bucket_slot(to_bkt, to_slot))) {
652 			__unlock_bucket(from_bkt);
653 			__unlock_bucket(to_bkt);
654 			cht_warn("cuckoo move path invalid: %s %s",
655 			    alt_bkt != to_bkt ? "alt_bkt != to_bkt" : "",
656 			    !__slot_empty(__bucket_slot(to_bkt, to_slot)) ?
657 			    "!slot_empty(to_bkt, to_slot)" : "");
658 			return EINVAL;
659 		}
660 
661 		cht_log(CHTV_DEBUG, "Move [0x%llx][%d] to [0x%llx][%d]",
662 		    from_bkt - h->_buckets, from_slot, to_bkt - h->_buckets,
663 		    to_slot);
664 
665 		ASSERT(to_bkt->_slots[to_slot]._node == NULL);
666 		ASSERT(to_bkt->_slots[to_slot]._hash == 0);
667 
668 		/* move entry backward */
669 		to_bkt->_slots[to_slot] = from_bkt->_slots[from_slot];
670 		to_bkt->_inuse++;
671 		__slot_reset(&from_bkt->_slots[from_slot]);
672 		from_bkt->_inuse--;
673 
674 		__unlock_bucket(to_bkt);
675 
676 		curr_node = prev_node;
677 		to_bkt = from_bkt;
678 		to_slot = from_slot;
679 	}
680 
681 	ASSERT(curr_node->prev_node_idx == _CHT_BFS_QUEUE_LEN);
682 	ASSERT(curr_node->prev_slot_idx == _CHT_SLOT_INVAL);
683 
684 	/* if root slot is no longer valid */
685 	if (to_bkt->_slots[to_slot]._node != NULL) {
686 		__unlock_bucket(to_bkt);
687 		return EINVAL;
688 	}
689 
690 	to_bkt->_inuse++;
691 	__slot_set(&to_bkt->_slots[to_slot], hash, node);
692 	h->_obj_retain(node);
693 	__unlock_bucket(to_bkt);
694 
695 	OSAddAtomic(1, &h->_n_entries);
696 
697 	cht_debug("hash %x node %p inserted at [%zu][%d]", hash, node,
698 	    __bucket_idx(h, to_bkt), to_slot);
699 
700 	return 0;
701 }
702 
703 static int
cuckoo_probe(struct cuckoo_hashtable * h,struct cuckoo_node * node,uint32_t hash)704 cuckoo_probe(struct cuckoo_hashtable *h, struct cuckoo_node *node,
705     uint32_t hash)
706 {
707 	struct _bfs_node queue[_CHT_BFS_QUEUE_LEN];
708 	uint8_t head, tail;
709 	struct _bucket *b;
710 	uint8_t avail_i;
711 	int ret = ENOMEM;
712 
713 	/* probe starts from its primary bucket */
714 	queue[0].bkt_idx = hash & h->_bitmask;
715 	queue[0].prev_node_idx = _CHT_BFS_QUEUE_LEN;
716 	queue[0].prev_slot_idx = _CHT_SLOT_INVAL;
717 
718 	head = 0;
719 	tail = 1;
720 
721 	while (__probable(tail != head && tail < _CHT_BFS_QUEUE_END)) {
722 		b = __get_bucket(h, queue[head].bkt_idx);
723 		avail_i = _CHT_SLOT_INVAL;
724 		for (uint8_t i = 0; i < _CHT_BUCKET_SLOTS; i++) {
725 			struct _slot *s = __bucket_slot(b, i);
726 			if (__slot_empty(s)) {
727 				if (avail_i == _CHT_SLOT_INVAL) {
728 					avail_i = i;
729 				}
730 				continue;
731 			}
732 
733 			/*
734 			 * Another node with same hash could have been probed
735 			 * into this bucket, chain to it.
736 			 */
737 			if (__improbable(s->_hash == hash)) {
738 				ASSERT(s->_node != NULL);
739 				ret = cuckoo_node_chain(s->_node, node);
740 				if (ret != 0) {
741 					goto done;
742 				}
743 				cht_debug("hash %x node %p inserted [%zu][%d]",
744 				    hash, node, __bucket_idx(h, b), i);
745 				OSAddAtomic(1, &h->_n_entries);
746 				h->_obj_retain(node);
747 				goto done;
748 			}
749 
750 			queue[tail].bkt_idx = __alt_hash(s->_hash) & h->_bitmask;
751 			queue[tail].prev_node_idx = head;
752 			queue[tail].prev_slot_idx = i;
753 			tail++;
754 		}
755 
756 		if (avail_i != _CHT_SLOT_INVAL) {
757 			ret = cuckoo_move(h, node, hash, queue, head, avail_i);
758 			if (ret == 0) {
759 				goto done;
760 			} else if (ret == EINVAL) {
761 				cht_warn("cukoo path invalidated");
762 				goto skip;
763 			} else {
764 				cht_err("faild: unknown err %d", ret);
765 				goto done;
766 			}
767 		}
768 skip:
769 		head++;
770 	}
771 
772 	if (tail == head || tail >= _CHT_BFS_QUEUE_END) {
773 		cht_warn("failed: cuckoo probe out of search space "
774 		    "head %d tail %d (%d/%d, load factor %d%%)", head, tail,
775 		    h->_n_entries, h->_capacity,
776 		    cuckoo_hashtable_load_factor(h));
777 		ret = ENOSPC;
778 	} else {
779 		cht_warn("failed: cuckoo probe path invalidated "
780 		    " (%d/%d, load factor %d%%)", h->_n_entries, h->_capacity,
781 		    cuckoo_hashtable_load_factor(h));
782 		ret = EAGAIN;
783 	}
784 done:
785 	return ret;
786 }
787 
788 static inline void
789 __foreach_node(struct cuckoo_hashtable *h, bool wlocked,
790     void (^node_handler)(struct cuckoo_node *, uint32_t hash))
791 {
792 	if (!wlocked) {
793 		__rlock_table(h);
794 	}
795 	for (uint32_t i = 0; i < h->_n_buckets; i++) {
796 		struct _bucket *b = &h->_buckets[i];
797 		if (b->_inuse == 0) {
798 			continue;
799 		}
800 		if (!wlocked) {
801 			__lock_bucket(b);
802 		}
803 		for (uint32_t j = 0; j < _CHT_BUCKET_SLOTS; j++) {
804 			struct _slot *s = __bucket_slot(b, j);
805 			struct cuckoo_node *node = NULL, *next_node = NULL;
806 			node = s->_node;
807 			while (node != NULL) {
808 				next_node = cuckoo_node_next(node);
809 				node_handler(node, s->_hash);
810 				node = next_node;
811 			}
812 		}
813 		if (!wlocked) {
814 			__unlock_bucket(b);
815 		}
816 	}
817 	if (!wlocked) {
818 		__unrlock_table(h);
819 	}
820 }
821 
822 void
823 cuckoo_hashtable_foreach(struct cuckoo_hashtable *h,
824     void (^node_handler)(struct cuckoo_node *, uint32_t hash))
825 {
826 	__foreach_node(h, false, node_handler);
827 }
828 
829 static void
cuckoo_dummy_retain(struct cuckoo_node * node)830 cuckoo_dummy_retain(struct cuckoo_node *node)
831 {
832 #pragma unused(node)
833 }
834 
835 static void
cuckoo_dummy_release(struct cuckoo_node * node)836 cuckoo_dummy_release(struct cuckoo_node *node)
837 {
838 #pragma unused(node)
839 }
840 
841 static int
cuckoo_resize(struct cuckoo_hashtable * h,enum cuckoo_resize_ops option)842 cuckoo_resize(struct cuckoo_hashtable *h, enum cuckoo_resize_ops option)
843 {
844 	int ret = 0;
845 
846 	/* backoff from concurrent expansion */
847 	do {
848 		ret = __resize_begin(h);
849 		if (ret == EAGAIN) {
850 			cht_info("resize done by peer");
851 			return EAGAIN;
852 		}
853 	} while (ret == EINTR);
854 
855 	uint32_t curr_capacity = h->_n_buckets * _CHT_BUCKET_SLOTS;
856 	uint32_t curr_load = (100 * h->_n_entries) / curr_capacity;
857 	uint32_t curr_buckets = h->_n_buckets;
858 	uint32_t new_capacity;
859 	__block size_t add_called = 0;
860 
861 	/* check load factor to ensure we are not hitting something else */
862 	if (option == _CHT_RESIZE_EXPAND) {
863 		if (curr_load < _CHT_MIN_LOAD_EXPAND) {
864 			cht_warn("Warning: early expand at %f load", curr_load);
865 		}
866 		new_capacity = curr_capacity * 2;
867 	} else {
868 		if (curr_load > _CHT_MAX_LOAD_SHRINK ||
869 		    curr_capacity == h->_rcapacity) {
870 			goto done;
871 		}
872 		new_capacity = curr_capacity / 2;
873 	}
874 
875 	cht_info("resize %d/(%d -> %d)", h->_n_entries,
876 	    curr_capacity, new_capacity);
877 
878 	struct cuckoo_hashtable_params new_p = {
879 		.cht_capacity = new_capacity,
880 		.cht_obj_cmp = h->_obj_cmp,
881 		.cht_obj_retain = cuckoo_dummy_retain,
882 		.cht_obj_release = cuckoo_dummy_release,
883 	};
884 	struct cuckoo_hashtable *tmp_h;
885 	tmp_h = cuckoo_hashtable_create(&new_p);
886 	if (tmp_h == NULL) {
887 		ret = ENOMEM;
888 		goto done;
889 	}
890 
891 	__foreach_node(h, true, ^(struct cuckoo_node *node, uint32_t hash) {
892 		int error = 0;
893 		cuckoo_node_set_next(node, NULL);
894 		error = cuckoo_hashtable_add_with_hash(tmp_h, node, hash);
895 		ASSERT(error == 0);
896 		add_called++;
897 	});
898 
899 	if (__improbable(cuckoo_hashtable_entries(h) !=
900 	    cuckoo_hashtable_entries(tmp_h))) {
901 		panic("h %zu add_called %zu tmp_h %zu",
902 		    cuckoo_hashtable_entries(h), add_called,
903 		    cuckoo_hashtable_entries(tmp_h));
904 	}
905 
906 	for (uint32_t i = 0; i < h->_n_buckets; i++) {
907 		lck_mtx_destroy(&h->_buckets[i]._lock, &cht_lock_group);
908 	}
909 	h->_n_buckets = tmp_h->_n_buckets;
910 	h->_capacity = h->_n_buckets * _CHT_BUCKET_SLOTS;
911 	h->_bitmask = tmp_h->_bitmask;
912 	sk_free_type_array(struct _bucket, curr_buckets, h->_buckets);
913 
914 	h->_buckets = tmp_h->_buckets;
915 	lck_rw_destroy(&tmp_h->_resize_lock, &cht_lock_group);
916 	lck_mtx_destroy(&tmp_h->_lock, &cht_lock_group);
917 	sk_free_type(struct cuckoo_hashtable, tmp_h);
918 
919 done:
920 	__resize_end(h);
921 
922 	return ret;
923 }
924 
925 static inline int
cuckoo_add_no_expand(struct cuckoo_hashtable * h,struct cuckoo_node * node,uint32_t hash)926 cuckoo_add_no_expand(struct cuckoo_hashtable *h,
927     struct cuckoo_node *node, uint32_t hash)
928 {
929 	struct _bucket *b1, *b2;
930 	int ret = -1;
931 
932 	__rlock_table(h);
933 
934 	b1 = __prim_bucket(h, hash);
935 	if ((ret = __add_to_bucket(h, b1, node, hash)) == 0) {
936 		goto done;
937 	}
938 
939 	b2 = __alt_bucket(h, hash);
940 	if ((ret = __add_to_bucket(h, b2, node, hash)) == 0) {
941 		goto done;
942 	}
943 
944 	ret = cuckoo_probe(h, node, hash);
945 done:
946 	__unrlock_table(h);
947 	return ret;
948 }
949 
950 int
cuckoo_hashtable_add_with_hash(struct cuckoo_hashtable * h,struct cuckoo_node * node,uint32_t hash)951 cuckoo_hashtable_add_with_hash(struct cuckoo_hashtable *h,
952     struct cuckoo_node *node, uint32_t hash)
953 {
954 	int ret;
955 
956 	/* neutralize node to avoid non-terminating tail */
957 	ASSERT(cuckoo_node_next(node) == NULL);
958 
959 	ret = cuckoo_add_no_expand(h, node, hash);
960 	if (ret == ENOSPC) {
961 		do {
962 			ret = cuckoo_resize(h, _CHT_RESIZE_EXPAND);
963 			if (ret != 0 && ret != EAGAIN) {
964 				break;
965 			}
966 			// this could still fail, when other threads added
967 			// enough objs that another resize is needed
968 			ret = cuckoo_add_no_expand(h, node, hash);
969 		} while (ret == ENOSPC);
970 	}
971 
972 	return ret;
973 }
974 
975 static inline int
__del_from_bucket(struct cuckoo_hashtable * h,struct _bucket * b,struct cuckoo_node * node,uint32_t hash)976 __del_from_bucket(struct cuckoo_hashtable *h, struct _bucket *b,
977     struct cuckoo_node *node, uint32_t hash)
978 {
979 	uint32_t i;
980 
981 	__lock_bucket(b);
982 	for (i = 0; i < _CHT_BUCKET_SLOTS; i++) {
983 		if (b->_slots[i]._hash == hash) {
984 			if (cuckoo_node_del(&b->_slots[i]._node, node)) {
985 				h->_obj_release(node);
986 				OSAddAtomic(-1, &h->_n_entries);
987 				if (__slot_empty(__bucket_slot(b, i))) {
988 					b->_slots[i]._hash = 0;
989 					b->_inuse--;
990 				}
991 				__unlock_bucket(b);
992 				return 0;
993 			}
994 		}
995 	}
996 	__unlock_bucket(b);
997 	return ENOENT;
998 }
999 
1000 int
cuckoo_hashtable_del(struct cuckoo_hashtable * h,struct cuckoo_node * node,uint32_t hash)1001 cuckoo_hashtable_del(struct cuckoo_hashtable *h,
1002     struct cuckoo_node *node, uint32_t hash)
1003 {
1004 	struct _bucket *b1, *b2;
1005 	int ret = -1;
1006 
1007 	__rlock_table(h);
1008 
1009 	b1 = __prim_bucket(h, hash);
1010 	if ((ret = __del_from_bucket(h, b1, node, hash)) == 0) {
1011 		goto done;
1012 	}
1013 
1014 	b2 = __alt_bucket(h, hash);
1015 	if ((ret = __del_from_bucket(h, b2, node, hash)) == 0) {
1016 		goto done;
1017 	}
1018 
1019 done:
1020 	if (ret == 0) {
1021 		cuckoo_node_set_next(node, NULL);
1022 	}
1023 	__unrlock_table(h);
1024 	return ret;
1025 }
1026 
1027 void
cuckoo_hashtable_try_shrink(struct cuckoo_hashtable * h)1028 cuckoo_hashtable_try_shrink(struct cuckoo_hashtable *h)
1029 {
1030 	cuckoo_resize(h, _CHT_RESIZE_SHRINK);
1031 }
1032 
1033 #if (DEVELOPMENT || DEBUG)
1034 
1035 static inline bool
cuckoo_node_looped(struct cuckoo_node * node)1036 cuckoo_node_looped(struct cuckoo_node *node)
1037 {
1038 	struct cuckoo_node *runner = node;
1039 
1040 	if (node == NULL) {
1041 		return false;
1042 	}
1043 
1044 	while (runner->next && runner->next->next) {
1045 		runner = runner->next->next;
1046 		node = node->next;
1047 
1048 		if (runner == node) {
1049 			return true;
1050 		}
1051 	}
1052 	return false;
1053 }
1054 
1055 int
cuckoo_hashtable_health_check(struct cuckoo_hashtable * h)1056 cuckoo_hashtable_health_check(struct cuckoo_hashtable *h)
1057 {
1058 	uint32_t hash;
1059 	uint32_t i, j;
1060 	struct _bucket *b;
1061 	struct cuckoo_node *node;
1062 	bool healthy = true;
1063 	uint32_t seen = 0;
1064 
1065 	__wlock_table(h);
1066 
1067 	for (i = 0; i < h->_n_buckets; i++) {
1068 		b = &h->_buckets[i];
1069 		uint8_t inuse = 0;
1070 		for (j = 0; j < _CHT_BUCKET_SLOTS; j++) {
1071 			hash = b->_slots[j]._hash;
1072 			node = b->_slots[j]._node;
1073 			if (node != NULL) {
1074 				inuse++;
1075 			}
1076 			while (node != NULL) {
1077 				seen++;
1078 				if ((__prim_bucket(h, hash) != b) &&
1079 				    (__alt_bucket(h, hash) != b)) {
1080 					panic("[%d][%d] stray hash %x node %p",
1081 					    i, j, hash, node);
1082 					healthy = false;
1083 				}
1084 
1085 				if (cuckoo_node_looped(node)) {
1086 					panic("[%d][%d] looped hash %x node %p",
1087 					    i, j, hash, node);
1088 					healthy = false;
1089 				}
1090 				node = cuckoo_node_next(node);
1091 			}
1092 		}
1093 		ASSERT(inuse == b->_inuse);
1094 	}
1095 
1096 	if (seen != h->_n_entries) {
1097 		panic("seen %d != n_entries %d", seen, h->_n_entries);
1098 	}
1099 
1100 	__unwlock_table(h);
1101 
1102 	if (!healthy) {
1103 		cht_err("table unhealthy");
1104 		return -1;
1105 	} else {
1106 		return 0;
1107 	}
1108 }
1109 
1110 void
cuckoo_hashtable_dump(struct cuckoo_hashtable * h)1111 cuckoo_hashtable_dump(struct cuckoo_hashtable *h)
1112 {
1113 	uint32_t hash;
1114 	struct cuckoo_node *node;
1115 	uint32_t i, j;
1116 	struct _bucket *b;
1117 
1118 	cuckoo_hashtable_health_check(h);
1119 
1120 	for (i = 0; i < h->_n_buckets; i++) {
1121 		printf("%d\t", i);
1122 		b = &h->_buckets[i];
1123 		for (j = 0; j < _CHT_BUCKET_SLOTS; j++) {
1124 			hash = b->_slots[j]._hash;
1125 			node = b->_slots[j]._node;
1126 			printf("0x%08x(%p) ", hash, node);
1127 		}
1128 		printf("\n");
1129 	}
1130 }
1131 #endif /* !DEVELOPMENT && !DEBUG */
1132