xref: /xnu-8020.101.4/bsd/skywalk/lib/cuckoo_hashtable_test.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2018-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #if (DEVELOPMENT || DEBUG)
30 
31 #pragma clang optimize off
32 
33 #include <libkern/OSAtomic.h>
34 #include <os/refcnt.h>
35 #include <skywalk/os_skywalk_private.h>
36 #include <skywalk/lib/cuckoo_hashtable.h>
37 
38 #define CUCKOO_TEST_TAG "com.apple.skywalk.libcuckoo.test"
39 kern_allocation_name_t cuckoo_test_tag;
40 
41 os_refgrp_decl(static, cht_obj_refgrp, "CuckooTestRefGroup", NULL);
42 
43 static void cuckoo_test_start(void *, wait_result_t);
44 static void cuckoo_test_stop(void *, wait_result_t);
45 
46 extern unsigned int ml_wait_max_cpus(void);
47 
48 // threading related
49 static int cht_inited = 0;
50 static int cht_enabled;
51 static int cht_busy;
52 
53 decl_lck_mtx_data(static, cht_lock);
54 
55 static struct cuckoo_hashtable *h = NULL;
56 
57 struct cht_thread_conf {
58 	thread_t        ctc_thread;     /* thread instance */
59 	uint32_t        ctc_nthreads;   /* number of threads */
60 	uint32_t        ctc_id;         /* thread id */
61 } __attribute__((aligned(CHANNEL_CACHE_ALIGN_MAX)));
62 
63 static struct cht_thread_conf *chth_confs;
64 static uint32_t chth_nthreads;
65 static uint32_t chth_cnt;
66 static boolean_t chth_run;
67 
68 enum {
69 	COS_NOT_ADDED = 0,      /* no inserted, available for insertion */
70 	COS_BUSY = -1,          /* being inserted/deleted */
71 	COS_ADDED = 1,          /* inserted, available for deletion  */
72 } co_state_t;
73 
74 // Cuckoo hashtable key object
75 
76 struct cht_obj {
77 	struct cuckoo_node      co_cnode;       // cuckoo node
78 	int64_t                 co_key;         // unique key
79 	uint32_t                co_hash;        // dummy hash value (not collision-free)
80 	os_refcnt_t             co_refcnt;      // reference count
81 	volatile int32_t        co_state;       // co_state_t
82 	uint32_t                co_seen;        // number of times seen
83 };
84 
85 #if PLATFORM_WatchOS
86 static const uint32_t CHT_OBJ_MAX = 16 * 1024;
87 #else /* PLATFORM_WatchOS */
88 static const uint32_t CHT_OBJ_MAX = 512 * 1024;
89 #endif /* !PLATFORM_WatchOS */
90 static struct cht_obj *cht_objs;
91 
92 static int
cht_obj_cmp__(struct cuckoo_node * node,void * key)93 cht_obj_cmp__(struct cuckoo_node *node, void *key)
94 {
95 	struct cht_obj *co = container_of(node, struct cht_obj, co_cnode);
96 	int64_t key1 = *(int64_t *)key;
97 
98 	if (co->co_key < key1) {
99 		return -1;
100 	} else if (co->co_key > key1) {
101 		return 1;
102 	}
103 
104 	return 0;
105 }
106 
107 static void
cht_obj_retain(struct cht_obj * co)108 cht_obj_retain(struct cht_obj *co)
109 {
110 	(void)os_ref_retain(&co->co_refcnt);
111 }
112 
113 static void
cht_obj_retain__(struct cuckoo_node * node)114 cht_obj_retain__(struct cuckoo_node *node)
115 {
116 	struct cht_obj *co = container_of(node, struct cht_obj, co_cnode);
117 	return cht_obj_retain(co);
118 }
119 
120 static void
cht_obj_release(struct cht_obj * co)121 cht_obj_release(struct cht_obj *co)
122 {
123 	(void)os_ref_release(&co->co_refcnt);
124 }
125 
126 static void
cht_obj_release__(struct cuckoo_node * node)127 cht_obj_release__(struct cuckoo_node *node)
128 {
129 	struct cht_obj *co = container_of(node, struct cht_obj, co_cnode);
130 	cht_obj_release(co);
131 }
132 
133 static int
cht_obj_refcnt(struct cht_obj * co)134 cht_obj_refcnt(struct cht_obj *co)
135 {
136 	return os_ref_get_count(&co->co_refcnt);
137 }
138 
139 static struct cuckoo_hashtable_params params_template = {
140 	.cht_capacity = 1024,
141 	.cht_obj_cmp = cht_obj_cmp__,
142 	.cht_obj_retain = cht_obj_retain__,
143 	.cht_obj_release = cht_obj_release__,
144 };
145 
146 void
cht_test_init(void)147 cht_test_init(void)
148 {
149 	if (OSCompareAndSwap(0, 1, &cht_inited)) {
150 		lck_mtx_init(&cht_lock, &sk_lock_group, &sk_lock_attr);
151 
152 		ASSERT(cuckoo_test_tag == NULL);
153 		cuckoo_test_tag = kern_allocation_name_allocate(CUCKOO_TEST_TAG, 0);
154 		ASSERT(cuckoo_test_tag != NULL);
155 	}
156 }
157 
158 void
cht_test_fini(void)159 cht_test_fini(void)
160 {
161 	lck_mtx_destroy(&cht_lock, &sk_lock_group);
162 }
163 
164 static void
cht_obj_init()165 cht_obj_init()
166 {
167 	// init testing objects
168 	cht_objs = sk_alloc_type_array(struct cht_obj, CHT_OBJ_MAX,
169 	    Z_WAITOK, cuckoo_test_tag);
170 	VERIFY(cht_objs != NULL);
171 
172 	for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
173 		cht_objs[i].co_key = i;
174 		do {
175 			read_random(&cht_objs[i].co_hash, sizeof(cht_objs[i].co_hash));
176 		} while (cht_objs[i].co_hash == 0);
177 		os_ref_init(&cht_objs[i].co_refcnt, &cht_obj_refgrp);
178 		cht_objs[i].co_state = COS_NOT_ADDED;
179 	}
180 }
181 
182 static void
cht_obj_fini()183 cht_obj_fini()
184 {
185 	VERIFY(cht_objs != NULL);
186 	for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
187 		ASSERT(os_ref_release(&cht_objs[i].co_refcnt) == 0);
188 		cht_objs[i].co_state = COS_NOT_ADDED;
189 		cht_objs[i].co_seen = 0;
190 	}
191 	// init testing objects
192 	sk_free_type_array(struct cht_obj, CHT_OBJ_MAX, cht_objs);
193 }
194 
195 static void
cht_basic_tests(void)196 cht_basic_tests(void)
197 {
198 	SK_ERR("start");
199 
200 	// Cuckoo hashtable creation
201 	h = cuckoo_hashtable_create(&params_template);
202 
203 	// basic add/del
204 	struct cht_obj co1 = {
205 		.co_cnode = {NULL},
206 		.co_key = -1,
207 		.co_hash = 1,
208 		.co_state = COS_NOT_ADDED,
209 		.co_seen = 0
210 	};
211 	struct cht_obj co2 = {
212 		.co_cnode = {NULL},
213 		.co_key = -2,
214 		.co_hash = 1,
215 		.co_state = COS_NOT_ADDED,
216 		.co_seen = 0
217 	};
218 	os_ref_init(&co1.co_refcnt, &cht_obj_refgrp);
219 	os_ref_init(&co2.co_refcnt, &cht_obj_refgrp);
220 
221 	struct cuckoo_node *node = NULL;
222 	__block struct cht_obj *co = NULL;
223 	int error = 0;
224 
225 	// add objs with duplicate hash
226 	error = cuckoo_hashtable_add_with_hash(h, &co1.co_cnode, co1.co_hash);
227 	ASSERT(error == 0);
228 
229 	error = cuckoo_hashtable_add_with_hash(h, &co2.co_cnode, co2.co_hash);
230 	ASSERT(error == 0);
231 
232 	ASSERT(cuckoo_hashtable_entries(h) == 2);
233 
234 	node = cuckoo_hashtable_find_with_hash(h, &co1.co_key, co1.co_hash);
235 	ASSERT(node != NULL);
236 	ASSERT(node == &co1.co_cnode);
237 
238 	node = cuckoo_hashtable_find_with_hash(h, &co2.co_key, co2.co_hash);
239 	ASSERT(node != NULL);
240 	ASSERT(node == &co2.co_cnode);
241 
242 	cuckoo_hashtable_del(h, &co1.co_cnode, co1.co_hash);
243 
244 	node = cuckoo_hashtable_find_with_hash(h, &co1.co_key, co1.co_hash);
245 	ASSERT(node == NULL);
246 
247 	node = cuckoo_hashtable_find_with_hash(h, &co2.co_key, co2.co_hash);
248 	ASSERT(node != NULL);
249 	ASSERT(node == &co2.co_cnode);
250 
251 	cuckoo_hashtable_del(h, &co2.co_cnode, co2.co_hash);
252 	node = cuckoo_hashtable_find_with_hash(h, &co2.co_key, co2.co_hash);
253 	ASSERT(node == NULL);
254 
255 	ASSERT(cuckoo_hashtable_entries(h) == 0);
256 
257 	// add all objs
258 	for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
259 		co = &cht_objs[i];
260 		error = cuckoo_hashtable_add_with_hash(h, &co->co_cnode, co->co_hash);
261 		ASSERT(error == 0);
262 		ASSERT(cuckoo_hashtable_entries(h) == i + 1);
263 		co->co_state = COS_ADDED;
264 	}
265 
266 	// find all objs
267 	for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
268 		co = &cht_objs[i];
269 		ASSERT(co->co_state = COS_ADDED);
270 		node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
271 		ASSERT(node != NULL);
272 		ASSERT(node == &co->co_cnode);
273 		ASSERT(cht_obj_refcnt(co) == 3);
274 		cht_obj_release(co);
275 	}
276 
277 	// walk all objs
278 	cuckoo_hashtable_foreach(h, ^(struct cuckoo_node *curr_node, uint32_t curr_hash) {
279 		co = container_of(curr_node, struct cht_obj, co_cnode);
280 		ASSERT(co->co_hash == curr_hash);
281 		ASSERT(cht_obj_refcnt(co) == 2);
282 		co->co_seen++;
283 	});
284 
285 	for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
286 		co = &cht_objs[i];
287 		ASSERT(co->co_seen == 1);
288 	}
289 
290 	size_t memory_use_before_shrink = cuckoo_hashtable_memory_footprint(h);
291 
292 	// del all objs
293 	for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
294 		co = &cht_objs[i];
295 		ASSERT(co->co_state = COS_ADDED);
296 		node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
297 		ASSERT(cht_obj_refcnt(co) == 3);
298 		cuckoo_hashtable_del(h, &co->co_cnode, co->co_hash);
299 		cht_obj_release(co);
300 		ASSERT(cht_obj_refcnt(co) == 1);
301 		ASSERT(cuckoo_hashtable_entries(h) == CHT_OBJ_MAX - i - 1);
302 		co->co_seen = 0;
303 	}
304 
305 	// shrink
306 	cuckoo_hashtable_try_shrink(h);
307 
308 	ASSERT(cuckoo_hashtable_memory_footprint(h) < memory_use_before_shrink);
309 
310 	// self healthy check
311 	cuckoo_hashtable_health_check(h);
312 
313 	cuckoo_hashtable_free(h);
314 
315 	SK_ERR("done");
316 }
317 
318 static void
cht_concurrent_ops_begin()319 cht_concurrent_ops_begin()
320 {
321 	/* let skmem_test_start() know we're ready */
322 	lck_mtx_lock(&cht_lock);
323 	atomic_add_32(&chth_cnt, 1);
324 	wakeup((caddr_t)&chth_cnt);
325 
326 	do {
327 		(void) msleep(&chth_run, &cht_lock, (PZERO - 1),
328 		    "chthfuncw", NULL);
329 	} while (!chth_run);
330 	lck_mtx_unlock(&cht_lock);
331 }
332 
333 static void
cht_concurrent_ops_done()334 cht_concurrent_ops_done()
335 {
336 	/* let skmem_test_start() know we're finished */
337 	lck_mtx_lock(&cht_lock);
338 	VERIFY(atomic_add_32_ov(&chth_cnt, -1) != 0);
339 	wakeup((caddr_t)&chth_cnt);
340 	lck_mtx_unlock(&cht_lock);
341 }
342 
343 static void
cht_concurrent_add_init(void)344 cht_concurrent_add_init(void)
345 {
346 	h = cuckoo_hashtable_create(&params_template);
347 }
348 
349 static void
cht_concurrent_add(void * v,wait_result_t w)350 cht_concurrent_add(void *v, wait_result_t w)
351 {
352 #pragma unused(v, w)
353 	cht_concurrent_ops_begin();
354 
355 	struct cht_thread_conf *conf = v;
356 	uint32_t objs_per_cpu = CHT_OBJ_MAX / conf->ctc_nthreads;
357 	uint32_t objs_start_idx = objs_per_cpu * conf->ctc_id;
358 	uint32_t objs_to_add = objs_per_cpu;
359 
360 	// last thread id add any tailing objs
361 	if (conf->ctc_id == conf->ctc_nthreads - 1) {
362 		objs_to_add += (CHT_OBJ_MAX % conf->ctc_nthreads);
363 	}
364 
365 	for (uint32_t i = 0; i < objs_to_add; i++) {
366 		struct cht_obj *co = &cht_objs[objs_start_idx + i];
367 		int error = cuckoo_hashtable_add_with_hash(h, &co->co_cnode, co->co_hash);
368 		ASSERT(error == 0);
369 		co->co_state = COS_ADDED;
370 
371 		struct cuckoo_node *node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
372 		ASSERT(node != NULL);
373 		ASSERT(node == &co->co_cnode);
374 		cht_obj_release(co);
375 	}
376 
377 	cht_concurrent_ops_done();
378 }
379 
380 static void
cht_concurrent_add_check()381 cht_concurrent_add_check()
382 {
383 	__block struct cht_obj *co = NULL;
384 	struct cuckoo_node *node = NULL;
385 
386 	// find all objs
387 	for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
388 		co = &cht_objs[i];
389 		ASSERT(co->co_state = COS_ADDED);
390 		node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
391 		ASSERT(node != NULL);
392 		ASSERT(node == &co->co_cnode);
393 		ASSERT(cht_obj_refcnt(co) == 3);
394 		cht_obj_release(co);
395 	}
396 
397 	// walk all objs
398 	cuckoo_hashtable_foreach(h, ^(struct cuckoo_node *curr_node, uint32_t curr_hash) {
399 		co = container_of(curr_node, struct cht_obj, co_cnode);
400 		ASSERT(co->co_hash == curr_hash);
401 		ASSERT(cht_obj_refcnt(co) == 2);
402 		co->co_seen++;
403 	});
404 
405 	for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
406 		co = &cht_objs[i];
407 		//ASSERT(co->co_seen == 1);
408 	}
409 }
410 
411 static void
cht_concurrent_add_fini(void)412 cht_concurrent_add_fini(void)
413 {
414 	struct cht_obj *co = NULL;
415 	struct cuckoo_node *node = NULL;
416 
417 	// del all objs
418 	for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
419 		co = &cht_objs[i];
420 		ASSERT(co->co_state = COS_ADDED);
421 		node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
422 		ASSERT(cht_obj_refcnt(co) == 3);
423 		cuckoo_hashtable_del(h, &co->co_cnode, co->co_hash);
424 		cht_obj_release(co);
425 		ASSERT(cht_obj_refcnt(co) == 1);
426 		ASSERT(cuckoo_hashtable_entries(h) == CHT_OBJ_MAX - i - 1);
427 	}
428 
429 	cuckoo_hashtable_free(h);
430 }
431 
432 
433 static void
cht_concurrent_del_init(void)434 cht_concurrent_del_init(void)
435 {
436 	h = cuckoo_hashtable_create(&params_template);
437 
438 	for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
439 		struct cht_obj *co = &cht_objs[i];
440 		int error = cuckoo_hashtable_add_with_hash(h, &co->co_cnode, co->co_hash);
441 		ASSERT(error == 0);
442 		ASSERT(cuckoo_hashtable_entries(h) == i + 1);
443 		co->co_state = COS_ADDED;
444 	}
445 }
446 
447 static void
cht_concurrent_del(void * v,wait_result_t w)448 cht_concurrent_del(void *v, wait_result_t w)
449 {
450 #pragma unused(v, w)
451 	cht_concurrent_ops_begin();
452 
453 	struct cht_thread_conf *conf = v;
454 	uint32_t objs_per_cpu = CHT_OBJ_MAX / conf->ctc_nthreads;
455 	uint32_t objs_start_idx = objs_per_cpu * conf->ctc_id;
456 	uint32_t objs_to_del = objs_per_cpu;
457 
458 	// last thread id add any tailing objs
459 	if (conf->ctc_id == conf->ctc_nthreads - 1) {
460 		objs_to_del += (CHT_OBJ_MAX % conf->ctc_nthreads);
461 	}
462 
463 	for (uint32_t i = 0; i < objs_to_del; i++) {
464 		struct cht_obj *co = &cht_objs[objs_start_idx + i];
465 		int error = cuckoo_hashtable_del(h, &co->co_cnode, co->co_hash);
466 		ASSERT(error == 0);
467 		co->co_state = COS_NOT_ADDED;
468 
469 		struct cuckoo_node *node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
470 		ASSERT(node == NULL);
471 		ASSERT(cht_obj_refcnt(co) == 1);
472 	}
473 
474 	cht_concurrent_ops_done();
475 }
476 
477 static void
cht_concurrent_del_check()478 cht_concurrent_del_check()
479 {
480 	ASSERT(cuckoo_hashtable_entries(h) == 0);
481 
482 	for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
483 		struct cht_obj *co = &cht_objs[i];
484 		struct cuckoo_node *node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
485 		ASSERT(node == NULL);
486 		ASSERT(cht_obj_refcnt(co) == 1);
487 	}
488 }
489 
490 static void
cht_concurrent_del_fini(void)491 cht_concurrent_del_fini(void)
492 {
493 	cuckoo_hashtable_free(h);
494 }
495 
496 static void
cht_concurrent_duo_init(void)497 cht_concurrent_duo_init(void)
498 {
499 	struct cuckoo_hashtable_params p = params_template;
500 	p.cht_capacity = CHT_OBJ_MAX / 2;
501 	h = cuckoo_hashtable_create(&p);
502 
503 	// populate 1/3 of the objects
504 	for (uint32_t i = 0; i < CHT_OBJ_MAX; i += 3) {
505 		struct cht_obj *co = &cht_objs[i];
506 		int error = cuckoo_hashtable_add_with_hash(h, &co->co_cnode, co->co_hash);
507 		ASSERT(error == 0);
508 		co->co_state = COS_ADDED;
509 	}
510 }
511 
512 static void
cht_concurrent_duo(void * v,wait_result_t w)513 cht_concurrent_duo(void *v, wait_result_t w)
514 {
515 #pragma unused(v, w)
516 #define DUO_ITERATIONS (2 * CHT_OBJ_MAX)
517 
518 #define DUO_OPS_MASK   0x0000000f
519 #define DUO_OPS_ADD    0x9
520 
521 #define DUO_IDX_MASK   0xfffffff0
522 #define DUO_IDX_SHIFT  0x8
523 
524 	cht_concurrent_ops_begin();
525 
526 	uint32_t *rands;
527 	rands = sk_alloc_data(sizeof(uint32_t) * DUO_ITERATIONS, Z_WAITOK, cuckoo_test_tag);
528 	VERIFY(rands != NULL);
529 	read_random(rands, sizeof(uint32_t) * DUO_ITERATIONS);
530 
531 	for (uint32_t i = 0; i < DUO_ITERATIONS; i++) {
532 		uint32_t rand, ops, idx;
533 		rand = rands[i];
534 		ops = rand & DUO_OPS_MASK;
535 		idx = (rand >> DUO_IDX_SHIFT) % CHT_OBJ_MAX;
536 
537 		// choose an ops (add, del, shrink)
538 		if (ops < DUO_OPS_ADD) {
539 			struct cht_obj *co = &cht_objs[idx];
540 			if (atomic_test_set_32(&co->co_state, COS_NOT_ADDED, COS_BUSY)) {
541 				struct cuckoo_node *node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
542 				ASSERT(node == NULL);
543 				int error = cuckoo_hashtable_add_with_hash(h, &co->co_cnode, co->co_hash);
544 				ASSERT(error == 0);
545 				ASSERT(cht_obj_refcnt(co) == 2);
546 
547 				co->co_state = COS_ADDED;
548 			}
549 		} else {
550 			struct cht_obj *co = &cht_objs[idx];
551 			if (atomic_test_set_32(&co->co_state, COS_ADDED, COS_BUSY)) {
552 				struct cuckoo_node *node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
553 				ASSERT(node != NULL);
554 				ASSERT(node == &co->co_cnode);
555 				int error = cuckoo_hashtable_del(h, &co->co_cnode, co->co_hash);
556 				ASSERT(error == 0);
557 				ASSERT(cht_obj_refcnt(co) == 2);
558 				cht_obj_release(co);
559 
560 				co->co_state = COS_NOT_ADDED;
561 			}
562 		}
563 	}
564 
565 	sk_free_data(rands, sizeof(uint32_t) * DUO_ITERATIONS);
566 	cht_concurrent_ops_done();
567 }
568 
569 static void
cht_concurrent_duo_check()570 cht_concurrent_duo_check()
571 {
572 	size_t added = 0;
573 	for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
574 		struct cht_obj *co = &cht_objs[i];
575 		if (co->co_state == COS_ADDED) {
576 			struct cuckoo_node *node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
577 			ASSERT(node != NULL);
578 			ASSERT(node == &co->co_cnode);
579 			added++;
580 			cht_obj_release(co);
581 		} else {
582 			struct cuckoo_node *node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
583 			ASSERT(node == NULL);
584 		}
585 	}
586 
587 	ASSERT(added == cuckoo_hashtable_entries(h));
588 }
589 
590 static void
cht_concurrent_duo_fini(void)591 cht_concurrent_duo_fini(void)
592 {
593 	for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
594 		struct cht_obj *co = &cht_objs[i];
595 		if (co->co_state == COS_ADDED) {
596 			int error = cuckoo_hashtable_del(h, &co->co_cnode, co->co_hash);
597 			ASSERT(error == 0);
598 		}
599 	}
600 
601 	ASSERT(cuckoo_hashtable_entries(h) == 0);
602 
603 	cuckoo_hashtable_free(h);
604 }
605 
606 static void
cht_concurrent_tests(void (* cht_concurrent_init)(void),void (* cht_concurrent_ops)(void * v,wait_result_t w),void (* cht_concurrent_check)(void),void (* cht_concurrent_fini)(void))607 cht_concurrent_tests(
608 	void (*cht_concurrent_init)(void),
609 	void (*cht_concurrent_ops)(void *v, wait_result_t w),
610 	void (*cht_concurrent_check)(void),
611 	void (*cht_concurrent_fini)(void))
612 {
613 	uint32_t nthreads = MAX(2, ml_wait_max_cpus() * 3 / 4);
614 
615 	SK_ERR("start, nthreads %d", nthreads);
616 
617 	cht_concurrent_init();
618 
619 	// init multithread test config
620 	if (chth_confs == NULL) {
621 		chth_nthreads = nthreads;
622 		chth_confs = sk_alloc_type_array(struct cht_thread_conf, nthreads,
623 		    Z_WAITOK | Z_NOFAIL, cuckoo_test_tag);
624 	}
625 
626 	for (uint32_t i = 0; i < nthreads; i++) {
627 		chth_confs[i].ctc_nthreads = nthreads;
628 		chth_confs[i].ctc_id = i;
629 		if (kernel_thread_start(cht_concurrent_ops, (void *)&chth_confs[i],
630 		    &chth_confs[i].ctc_thread) != KERN_SUCCESS) {
631 			panic("failed to create cuckoo test thread");
632 			__builtin_unreachable();
633 		}
634 	}
635 
636 	// wait for threads to spwan
637 	lck_mtx_lock(&cht_lock);
638 	do {
639 		struct timespec ts = { 0, 100 * USEC_PER_SEC };
640 		(void) msleep(&chth_cnt, &cht_lock, (PZERO - 1),
641 		    "skmtstartw", &ts);
642 	} while (chth_cnt < nthreads);
643 	VERIFY(chth_cnt == nthreads);
644 	lck_mtx_unlock(&cht_lock);
645 
646 	// signal threads to run
647 	lck_mtx_lock(&cht_lock);
648 	VERIFY(!chth_run);
649 	chth_run = TRUE;
650 	wakeup((caddr_t)&chth_run);
651 	lck_mtx_unlock(&cht_lock);
652 
653 	// wait until all threads are done
654 	lck_mtx_lock(&cht_lock);
655 	do {
656 		struct timespec ts = { 0, 100 * USEC_PER_SEC };
657 		(void) msleep(&chth_cnt, &cht_lock, (PZERO - 1),
658 		    "skmtstopw", &ts);
659 	} while (chth_cnt != 0);
660 	chth_run = FALSE;
661 	lck_mtx_unlock(&cht_lock);
662 
663 	// check results
664 	cht_concurrent_check();
665 
666 	cht_concurrent_fini();
667 
668 	SK_ERR("done");
669 }
670 
671 static void
cuckoo_test_start(void * v,wait_result_t w)672 cuckoo_test_start(void *v, wait_result_t w)
673 {
674 #pragma unused(v, w)
675 	lck_mtx_lock(&cht_lock);
676 	VERIFY(!cht_busy);
677 	cht_busy = 1;
678 	lck_mtx_unlock(&cht_lock);
679 
680 	cht_obj_init();
681 
682 	cht_basic_tests();
683 
684 	cht_concurrent_tests(cht_concurrent_add_init, cht_concurrent_add, cht_concurrent_add_check, cht_concurrent_add_fini);
685 	cht_concurrent_tests(cht_concurrent_del_init, cht_concurrent_del, cht_concurrent_del_check, cht_concurrent_del_fini);
686 	cht_concurrent_tests(cht_concurrent_duo_init, cht_concurrent_duo, cht_concurrent_duo_check, cht_concurrent_duo_fini);
687 
688 	lck_mtx_lock(&cht_lock);
689 	cht_enabled = 1;
690 	wakeup((caddr_t)&cht_enabled);
691 	lck_mtx_unlock(&cht_lock);
692 }
693 
694 static void
cuckoo_test_stop(void * v,wait_result_t w)695 cuckoo_test_stop(void *v, wait_result_t w)
696 {
697 #pragma unused(v, w)
698 
699 	if (chth_confs != NULL) {
700 		sk_free_type_array(struct cht_thread_conf, chth_nthreads, chth_confs);
701 		chth_confs = NULL;
702 		chth_nthreads = 0;
703 	}
704 
705 	cht_obj_fini();
706 
707 	lck_mtx_lock(&cht_lock);
708 	VERIFY(cht_busy);
709 	cht_busy = 0;
710 	cht_enabled = 0;
711 	wakeup((caddr_t)&cht_enabled);
712 	lck_mtx_unlock(&cht_lock);
713 }
714 
715 static int
sysctl_cuckoo_test(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)716 sysctl_cuckoo_test(__unused struct sysctl_oid *oidp,
717     __unused void *arg1, __unused int arg2, struct sysctl_req *req)
718 {
719 	int error, newvalue, changed;
720 	thread_t th;
721 	thread_continue_t func;
722 
723 	lck_mtx_lock(&cht_lock);
724 	if ((error = sysctl_io_number(req, cht_enabled, sizeof(int),
725 	    &newvalue, &changed)) != 0) {
726 		SK_ERR("failed to get new sysctl value");
727 		goto done;
728 	}
729 
730 	if (changed && cht_enabled != newvalue) {
731 		if (newvalue && cht_busy) {
732 			SK_ERR("previous cuckoo test instance is still active");
733 			error = EBUSY;
734 			goto done;
735 		}
736 
737 		if (newvalue) {
738 			func = cuckoo_test_start;
739 		} else {
740 			func = cuckoo_test_stop;
741 		}
742 
743 		if (kernel_thread_start(func, NULL, &th) != KERN_SUCCESS) {
744 			SK_ERR("failed to create cuckoo test action thread");
745 			error = EBUSY;
746 			goto done;
747 		}
748 		do {
749 			SK_ERR("waiting for %s to complete",
750 			    newvalue ? "startup" : "shutdown");
751 			error = msleep(&cht_enabled, &cht_lock,
752 			    PWAIT | PCATCH, "skmtw", NULL);
753 			/* BEGIN CSTYLED */
754 			/*
755 			 * Loop exit conditions:
756 			 *   - we were interrupted
757 			 *     OR
758 			 *   - we are starting up and are enabled
759 			 *     (Startup complete)
760 			 *     OR
761 			 *   - we are starting up and are not busy
762 			 *     (Failed startup)
763 			 *     OR
764 			 *   - we are shutting down and are not busy
765 			 *     (Shutdown complete)
766 			 */
767 			/* END CSTYLED */
768 		} while (!((error == EINTR) || (newvalue && cht_enabled) ||
769 		    (newvalue && !cht_busy) || (!newvalue && !cht_busy)));
770 
771 		SK_ERR("exited from msleep");
772 		thread_deallocate(th);
773 	}
774 
775 done:
776 	lck_mtx_unlock(&cht_lock);
777 	return error;
778 }
779 
780 SYSCTL_PROC(_kern_skywalk_libcuckoo, OID_AUTO, test,
781     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
782     sysctl_cuckoo_test, "I", "Start Cuckoo hashtable test");
783 
784 #endif /* DEVELOPMENT || DEBUG */
785