1 /*
2 * Copyright (c) 2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #ifndef _KERN_SMR_HASH_H_
31 #define _KERN_SMR_HASH_H_
32
33 #include <kern/lock_mtx.h>
34 #include <kern/smr.h>
35 #include <os/hash.h>
36 #include <vm/vm_memtag.h>
37 #if XNU_KERNEL_PRIVATE
38 #include <kern/lock_ptr.h>
39 #include <kern/counter.h>
40 #endif
41
42 __BEGIN_DECLS
43
44
45 /*!
46 * @typedef smrh_key_t
47 *
48 * @brief
49 * A union that can represent several kinds of keys for SMR Hash Tables.
50 *
51 * @discussion
52 * For strings or opaque structs, @c smrk_len must hold the correct key size.
53 * For scalars (using the smrk_u64 field), the length is more advisory.
54 */
55 typedef struct {
56 union {
57 const char *smrk_string;
58 const void *smrk_opaque;
59 uint64_t smrk_u64;
60 };
61 size_t smrk_len;
62 } smrh_key_t;
63
64
65 /*!
66 * @struct smrh_traits
67 *
68 * @brief
69 * This structure parametrizes the behavior of SMR hash tables.
70 *
71 * @discussion
72 * Such structures are typically made with @c SMRH_TRAITS_DEFINE_*.
73 *
74 * Traits must be static and const in the same translation unit that implements
75 * the hash table, and possibly export methods to other modules. This design is
76 * not unlike C++ traits structure used in template parametrization, and rely on
77 * the constness of all structures for the compiler to actually elide most
78 * function calls, while maintaining decently good ergonomics.
79 *
80 *
81 * @field key_hash (automatic) computes a hash for a given key.
82 * @field key_equ (automatic) returns if two keys are equal
83 * @field obj_hash (automatic) computes a hash for a given object.
84 * @field obj_equ (automatic) returns if an object has the specified key
85 *
86 * @field domain the SMR domain which protects elements.
87 *
88 * @field obj_try_get function which attempts to acquire a reference on an
89 * object, or returns failure. This function is used
90 * to verify objects are "live" for the smrh_*_get()
91 * verbs.
92 */
93 struct smrh_traits {
94 unsigned long link_offset;
95 smr_t domain;
96 uint32_t (*key_hash)(smrh_key_t, uint32_t);
97 bool (*key_equ)(smrh_key_t, smrh_key_t);
98 uint32_t (*obj_hash)(const struct smrq_slink *, uint32_t);
99 bool (*obj_equ)(const struct smrq_slink *, smrh_key_t);
100 bool (*obj_try_get)(void *);
101 };
102 typedef const struct smrh_traits *smrh_traits_t;
103
104
105 #pragma mark SMR hash keys
106
107 /*!
108 * @macro SMRH_SCALAR_KEY()
109 *
110 * @brief
111 * Generates a @c smrh_key_t value out of a scalar.
112 */
113 #define SMRH_SCALAR_KEY(e) \
114 (smrh_key_t){ .smrk_u64 = (e), .smrk_len = sizeof(e) }
115
116
117 /*!
118 * @function smrh_key_hash_u32
119 *
120 * @brief
121 * Hashing function to use as a @c key_hash trait for 32bit scalars.
122 */
123 __pure2
124 static inline uint32_t
smrh_key_hash_u32(smrh_key_t key,uint32_t seed)125 smrh_key_hash_u32(smrh_key_t key, uint32_t seed)
126 {
127 uint32_t x = (uint32_t)key.smrk_u64 + seed;
128
129 x ^= x >> 16;
130 x *= 0x7feb352dU;
131 x ^= x >> 15;
132 x *= 0x846ca68bU;
133 x ^= x >> 16;
134
135 return x;
136 }
137
138 /*!
139 * @function smrh_key_hash_u64
140 *
141 * @brief
142 * Hashing function to use as a @c key_hash trait for 64bit scalars.
143 */
144 __pure2
145 static inline uint32_t
smrh_key_hash_u64(smrh_key_t key,uint32_t seed)146 smrh_key_hash_u64(smrh_key_t key, uint32_t seed)
147 {
148 uint64_t x = key.smrk_u64 + seed;
149
150 x ^= x >> 30;
151 x *= 0xbf58476d1ce4e5b9U;
152 x ^= x >> 27;
153 x *= 0x94d049bb133111ebU;
154 x ^= x >> 31;
155
156 return (uint32_t)x;
157 }
158
159 /*!
160 * @function smrh_key_hash_mem
161 *
162 * @brief
163 * Hashing function to use as a @c key_hash trait for byte arrays.
164 */
165 __stateful_pure
166 static inline uint32_t
smrh_key_hash_mem(smrh_key_t key,uint32_t seed)167 smrh_key_hash_mem(smrh_key_t key, uint32_t seed)
168 {
169 return os_hash_jenkins(key.smrk_opaque, key.smrk_len, seed);
170 }
171
172 /*!
173 * @function smrh_key_hash_str
174 *
175 * @brief
176 * Hashing function to use as a @c key_hash trait for C strings.
177 */
178 __stateful_pure
179 static inline uint32_t
smrh_key_hash_str(smrh_key_t key,uint32_t seed)180 smrh_key_hash_str(smrh_key_t key, uint32_t seed)
181 {
182 return os_hash_jenkins(key.smrk_opaque, key.smrk_len, seed);
183 }
184
185
186 /*!
187 * @function smrh_key_equ_scalar
188 *
189 * @brief
190 * Equality function to use as @c key_equ for scalars.
191 */
192 static inline bool
smrh_key_equ_scalar(smrh_key_t k1,smrh_key_t k2)193 smrh_key_equ_scalar(smrh_key_t k1, smrh_key_t k2)
194 {
195 return k1.smrk_u64 == k2.smrk_u64;
196 }
197
198 /*!
199 * @function smrh_key_equ_mem
200 *
201 * @brief
202 * Equality function to use as @c key_equ for byte arrays.
203 */
204 static inline bool
smrh_key_equ_mem(smrh_key_t k1,smrh_key_t k2)205 smrh_key_equ_mem(smrh_key_t k1, smrh_key_t k2)
206 {
207 assert(k1.smrk_len == k2.smrk_len);
208 return memcmp(k1.smrk_opaque, k2.smrk_opaque, k1.smrk_len) == 0;
209 }
210
211 /*!
212 * @function smrh_key_equ_str
213 *
214 * @brief
215 * Equality function to use as @c key_equ for strings.
216 */
217 static inline bool
smrh_key_equ_str(smrh_key_t k1,smrh_key_t k2)218 smrh_key_equ_str(smrh_key_t k1, smrh_key_t k2)
219 {
220 return k1.smrk_len == k2.smrk_len &&
221 memcmp(k1.smrk_opaque, k2.smrk_opaque, k1.smrk_len) == 0;
222 }
223
224
225 #pragma mark SMR hash traits
226
227 #if __cplusplus
228 #define __smrh_traits_storage static constexpr
229 #else
230 #define __smrh_traits_storage static const __used
231 #endif
232
233 /*!
234 * @macro SMRH_TRAITS_DEFINE()
235 *
236 * @brief
237 * Defines a relatively naked typed SMR Hash traits structure.
238 *
239 * @discussion
240 * Clients must provide:
241 * - domain,
242 * - key_hash,
243 * - key_equ,
244 * - obj_hash,
245 * - obj_equ.
246 *
247 * Clients might provide:
248 * - obj_try_get.
249 *
250 * @param name the name of the global to create.
251 * @param type_t the type of objects that will be hashed
252 * @param link_field the linkage used to link elements
253 */
254 #define SMRH_TRAITS_DEFINE(name, type_t, link_field, ...) \
255 __smrh_traits_storage struct name { \
256 type_t *smrht_obj_type[0]; \
257 struct smrh_traits smrht; \
258 } name = { .smrht = { \
259 .link_offset = offsetof(type_t, link_field), \
260 __VA_ARGS__ \
261 } }
262
263 /*!
264 * @macro SMRH_TRAITS_DEFINE_SCALAR()
265 *
266 * @brief
267 * Defines a relatively typed SMR Hash traits structure for scalar keys.
268 *
269 * @discussion
270 * Clients must provide:
271 * - domain.
272 *
273 * Clients might provide:
274 * - obj_try_get.
275 *
276 * @param name the name of the global to create.
277 * @param type_t the type of objects that will be hashed
278 * @param key_field the field holding the key
279 * @param link_field the linkage used to link elements
280 */
281 #define SMRH_TRAITS_DEFINE_SCALAR(name, type_t, key_field, link_field, ...) \
282 static uint32_t \
283 name ## _obj_hash(const struct smrq_slink *link, uint32_t seed) \
284 { \
285 __auto_type o = __container_of(link, const type_t, link_field); \
286 smrh_key_t k = SMRH_SCALAR_KEY(o->key_field); \
287 \
288 if (k.smrk_len > sizeof(uint32_t)) { \
289 return smrh_key_hash_u64(k, seed); \
290 } else { \
291 return smrh_key_hash_u32(k, seed); \
292 } \
293 } \
294 \
295 static bool \
296 name ## _obj_equ(const struct smrq_slink *link, smrh_key_t key) \
297 { \
298 __auto_type o = __container_of(link, const type_t, link_field); \
299 \
300 return smrh_key_equ_scalar(SMRH_SCALAR_KEY(o->key_field), key); \
301 } \
302 \
303 SMRH_TRAITS_DEFINE(name, type_t, link_field, \
304 .key_hash = sizeof(((type_t *)NULL)->key_field) > 4 \
305 ? smrh_key_hash_u64 : smrh_key_hash_u32, \
306 .key_equ = smrh_key_equ_scalar, \
307 .obj_hash = name ## _obj_hash, \
308 .obj_equ = name ## _obj_equ, \
309 __VA_ARGS__ \
310 )
311
312 /*!
313 * @macro SMRH_TRAITS_DEFINE_STR()
314 *
315 * @brief
316 * Defines a basic typed SMR Hash traits structure for string keys.
317 *
318 * @discussion
319 * Clients must provide:
320 * - domain,
321 * - obj_hash,
322 * - obj_equ.
323 *
324 * Clients might provide:
325 * - obj_try_get.
326 *
327 * @param name the name of the global to create.
328 * @param type_t the type of objects that will be hashed
329 * @param link_field the linkage used to link elements
330 */
331 #define SMRH_TRAITS_DEFINE_STR(name, type_t, link_field, ...) \
332 SMRH_TRAITS_DEFINE(name, type_t, link_field, \
333 .key_hash = smrh_key_hash_str, \
334 .key_equ = smrh_key_equ_str, \
335 __VA_ARGS__ \
336 )
337
338 /*!
339 * @macro SMRH_TRAITS_DEFINE_MEM()
340 *
341 * @brief
342 * Defines a basic typed SMR Hash traits structure for byte array keys.
343 *
344 * @discussion
345 * Clients must provide:
346 * - domain,
347 * - obj_hash,
348 * - obj_equ.
349 *
350 * Clients might provide:
351 * - obj_try_get.
352 *
353 * @param name the name of the global to create.
354 * @param type_t the type of objects that will be hashed
355 * @param link_field the linkage used to link elements
356 */
357 #define SMRH_TRAITS_DEFINE_MEM(name, type_t, link_field, ...) \
358 SMRH_TRAITS_DEFINE(name, type_t, link_field, \
359 .key_hash = smrh_key_hash_mem, \
360 .key_equ = smrh_key_equ_mem, \
361 __VA_ARGS__ \
362 )
363
364 /*!
365 * @macro smrht_enter()
366 *
367 * @brief
368 * Conveniency macro to enter the domain associated
369 * with a specified hash table traits
370 */
371 #define smrht_enter(traits) \
372 smr_enter((traits)->smrht.domain)
373
374 /*!
375 * @macro smrht_leave()
376 *
377 * @brief
378 * Conveniency macro to leave the domain associated
379 * with a specified hash table traits
380 */
381 #define smrht_leave(traits) \
382 smr_leave((traits)->smrht.domain)
383
384
385 #pragma mark - SMR hash tables
386
387
388 /*!
389 * @struct smr_hash
390 *
391 * @brief
392 * This type implements simple closed addressing hash table.
393 *
394 * @discussion
395 * Using such a table allows for concurrent readers,
396 * but assumes external synchronization for mutations.
397 *
398 * In particular it means that insertions and deletions
399 * might block behind resizing the table.
400 *
401 * These hash tables aren't meant to be robust to attackers
402 * trying to cause hash collisions.
403 *
404 * Resizing is possible concurrently to readers implementing
405 * the relativistic hash table growth scheme.
406 * (https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf)
407 */
408 struct smr_hash {
409 #define SMRH_ARRAY_ORDER_SHIFT (48)
410 #define SMRH_ARRAY_ORDER_MASK (0x00fful << SMRH_ARRAY_ORDER_SHIFT)
411 uintptr_t smrh_array;
412 uint32_t smrh_count;
413 bool smrh_resizing;
414 uint8_t smrh_unused1;
415 uint16_t smrh_unused2;
416 };
417
418 #pragma mark SMR hash tables: initialization and accessors
419
420 /*!
421 * @function smr_hash_init_empty()
422 *
423 * @brief
424 * Initializes a hash to be empty.
425 *
426 * @discussion
427 * No entry can be added to this hash, but lookup functions
428 * will return sensible results.
429 *
430 * @c smr_hash_init() must be used again if the hash is to be used for
431 * insertions, which is safe with respect to concurrent readers.
432 *
433 * Calling @c smr_hash_destroy() on an empty-initialized hash is legal.
434 *
435 * Whether the hash table is empty-initialized can be tested with
436 * @c smr_hash_is_empty_initialized().
437 */
438 extern void smr_hash_init_empty(
439 struct smr_hash *smrh);
440
441 /*!
442 * @function smr_hash_init()
443 *
444 * @brief
445 * Initializes a hash with the specified size.
446 *
447 * @discussion
448 * This function never fails but requires for `size` to be
449 * smaller than KALLOC_SAFE_ALLOC_SIZE / sizeof(struct smrq_slist_head)
450 * (or to be called during early boot).
451 */
452 extern void smr_hash_init(
453 struct smr_hash *smrh,
454 size_t size);
455
456 /*!
457 * @function smr_hash_destroy()
458 *
459 * @brief
460 * Destroys a hash initiailized with smr_hash_init().
461 *
462 * @discussion
463 * This doesn't clean up the table from any elements it still contains.
464 * @c smr_hash_serialized_clear() must be called first if needed.
465 */
466 extern void smr_hash_destroy(
467 struct smr_hash *smrh);
468
469 /*!
470 * @function smr_hash_is_empty_initialized()
471 *
472 * @brief
473 * Returns whether the smr hash is empty as a result of calling
474 * smr_hash_init_empty().
475 */
476 extern bool smr_hash_is_empty_initialized(
477 struct smr_hash *smrh);
478
479 /*!
480 * @struct smr_array
481 *
482 * @brief
483 * The array pointer of a hash is packed with its size for atomicity reasons,
484 * this type is used for decoding / setting this pointer.
485 */
486 struct smr_hash_array {
487 struct smrq_slist_head *smrh_array;
488 uint16_t smrh_order;
489 };
490
491 /*!
492 * @function smr_hash_array_decode
493 *
494 * @brief
495 * Decodes the array pointer of a hash table.
496 */
497 static inline struct smr_hash_array
smr_hash_array_decode(const struct smr_hash * smrh)498 smr_hash_array_decode(const struct smr_hash *smrh)
499 {
500 struct smr_hash_array array;
501 uintptr_t ptr = os_atomic_load(&smrh->smrh_array, relaxed);
502
503 array.smrh_order = (uint8_t)(ptr >> SMRH_ARRAY_ORDER_SHIFT);
504 #ifndef __BUILDING_XNU_LIBRARY__
505 /* when running in kernel space, top bits are supposed to be 0xff*/
506 ptr |= SMRH_ARRAY_ORDER_MASK;
507 #else
508 /* in user-mode top bits need to be 00 */
509 ptr &= ~SMRH_ARRAY_ORDER_MASK;
510 #endif
511 array.smrh_array = (struct smrq_slist_head *)ptr;
512
513 return array;
514 }
515
516 /*!
517 * @function smr_hash_size()
518 *
519 * @brief
520 * Returns the number of buckets in the hash table.
521 */
522 __attribute__((overloadable, always_inline))
523 static inline unsigned long
smr_hash_size(struct smr_hash_array array)524 smr_hash_size(struct smr_hash_array array)
525 {
526 return 1ul << (64 - array.smrh_order);
527 }
528 __attribute__((overloadable, always_inline))
529 static inline unsigned long
smr_hash_size(const struct smr_hash * smrh)530 smr_hash_size(const struct smr_hash *smrh)
531 {
532 return smr_hash_size(smr_hash_array_decode(smrh));
533 }
534
535 /*!
536 * @function smr_hash_serialized_count()
537 *
538 * @brief
539 * Returns the number of elements in the hash table.
540 *
541 * @discussion
542 * It can be called without serialization held,
543 * but the value is then racy.
544 */
545 __attribute__((always_inline))
546 static inline unsigned long
smr_hash_serialized_count(const struct smr_hash * smrh)547 smr_hash_serialized_count(const struct smr_hash *smrh)
548 {
549 return smrh->smrh_count;
550 }
551
552
553 #pragma mark SMR hash tables: read operations
554
555 /*!
556 * @function smr_hash_get()
557 *
558 * @brief
559 * Conveniency function for simple lookups.
560 *
561 * @discussion
562 * The SMR domain for this table must not be entered.
563 *
564 * This function doesn't require any synchronization and will enter/leave
565 *
566 * the SMR domain protecting elements automatically, and call the @c obj_try_get
567 * trait to validate/retain the element.
568 *
569 * @param smrh the hash table
570 * @param key the key to lookup
571 * @param traits the traits for the hash table
572 */
573 #define smr_hash_get(smrh, key, traits) ({ \
574 (smrht_obj_t(traits))__smr_hash_get(smrh, key, &(traits)->smrht); \
575 })
576
577 /*!
578 * @function smr_hash_contains()
579 *
580 * @brief
581 * Conveniency function for simple contains checks.
582 *
583 * @discussion
584 * The SMR domain for this table must not be entered.
585 *
586 * This function doesn't require any synchronization and will enter/leave
587 *
588 * @param smrh the hash table
589 * @param key the key to lookup
590 * @param traits the traits for the hash table
591 */
592 #define smr_hash_contains(smrh, key, traits) ({ \
593 smrh_traits_t __smrht = &(traits)->smrht; \
594 struct smrq_slist_head *__hd; \
595 bool __contains; \
596 \
597 smr_enter(__smrht->domain); \
598 __hd = __smr_hash_bucket(smrh, key, __smrht); \
599 __contains = (__smr_hash_entered_find(__hd, key, __smrht) != NULL); \
600 smr_leave(__smrht->domain); \
601 \
602 __contains; \
603 })
604
605 /*!
606 * @function smr_hash_entered_find()
607 *
608 * @brief
609 * Lookups an element in the table by key.
610 *
611 * @discussion
612 * The SMR domain for this table must be entered.
613 *
614 * This function returns the first element found that matches the key.
615 * This element might be about to be deleted or stale, and it is up
616 * to the client to make that determination if required.
617 *
618 * There might be more elements that can be found for that key,
619 * but because elements are inserted at the head of buckets,
620 * other matches should all be staler entries than the one returned.
621 *
622 * @param smrh the hash table
623 * @param key the key to lookup
624 * @param traits the traits for the hash table
625 */
626 #define smr_hash_entered_find(smrh, key, traits) ({ \
627 smrh_traits_t __smrht = &(traits)->smrht; \
628 struct smrq_slist_head *__hd = __smr_hash_bucket(smrh, key, __smrht); \
629 \
630 (smrht_obj_t(traits))__smr_hash_entered_find(__hd, key, __smrht); \
631 })
632
633 /*!
634 * @function smr_hash_serialized_find()
635 *
636 * @brief
637 * Lookups an element in the table by key.
638 *
639 * @discussion
640 * The SMR domain for this table must NOT be entered.
641 * This function requires external serialization with other mutations.
642 *
643 * This function returns the first element found that matches the key.
644 * This element might be about to be deleted or stale, and it is up
645 * to the client to make that determination if required.
646 *
647 * There might be more elements that can be found for that key,
648 * but because elements are inserted at the head of buckets,
649 * other matches should all be staler entries than the one returned.
650 *
651 * @param smrh the hash table
652 * @param key the key to lookup
653 * @param traits the traits for the hash table
654 */
655 #define smr_hash_serialized_find(smrh, key, traits) ({ \
656 smrh_traits_t __smrht = &(traits)->smrht; \
657 struct smrq_slist_head *__hd = __smr_hash_bucket(smrh, key, __smrht); \
658 \
659 (smrht_obj_t(traits))__smr_hash_serialized_find(__hd, key, __smrht); \
660 })
661
662
663 #pragma mark SMR hash tables: mutations
664
665 /*!
666 * @function smr_hash_serialized_insert()
667 *
668 * @brief
669 * Insert an object in the hash table.
670 *
671 * @discussion
672 * The SMR domain for this table must NOT be entered.
673 * This function requires external serialization with other mutations.
674 *
675 * Clients of this call must have checked there is no previous entry
676 * for this key in the hash table.
677 *
678 * @param smrh the hash table
679 * @param link the pointer to the linkage to insert.
680 * @param traits the traits for the hash table
681 */
682 #define smr_hash_serialized_insert(smrh, link, traits) ({ \
683 smrh_traits_t __smrht = &(traits)->smrht; \
684 struct smr_hash *__h = (smrh); \
685 struct smrq_slink *__link = (link); \
686 struct smrq_slist_head *__hd; \
687 \
688 __hd = __smr_hash_bucket(__h, __link, __smrht); \
689 __h->smrh_count++; \
690 smrq_serialized_insert_head(__hd, __link); \
691 })
692
693 /*!
694 * @function smr_hash_serialized_get_or_insert()
695 *
696 * @brief
697 * Insert an object in the hash table, or get the conflicting element.
698 *
699 * @discussion
700 * The SMR domain for this table must NOT be entered.
701 * This function requires external serialization with other mutations.
702 *
703 * @param smrh the hash table
704 * @param link the pointer to the linkage to insert.
705 * @param traits the traits for the hash table
706 */
707 #define smr_hash_serialized_get_or_insert(smrh, key, link, traits) ({ \
708 (smrht_obj_t(traits))__smr_hash_serialized_get_or_insert(smrh, key, \
709 link, &(traits)->smrht); \
710 })
711
712 /*!
713 * @function smr_hash_serialized_remove()
714 *
715 * @brief
716 * Remove an object from the hash table.
717 *
718 * @discussion
719 * The SMR domain for this table must NOT be entered.
720 * This function requires external serialization with other mutations.
721 *
722 * Note that the object once removed must be retired via SMR,
723 * and not freed immediately.
724 *
725 * If the object isn't in the hash table, this function will crash with
726 * a NULL deref while walking the bucket where the element should belong.
727 *
728 * @param smrh the hash table
729 * @param link the pointer to the linkage to remove.
730 * @param traits the traits for the hash table
731 */
732 #define smr_hash_serialized_remove(smrh, link, traits) ({ \
733 smrh_traits_t __smrht = &(traits)->smrht; \
734 struct smr_hash *__h = (smrh); \
735 struct smrq_slink *__link = (link); \
736 struct smrq_slist_head *__hd; \
737 \
738 __hd = __smr_hash_bucket(__h, __link, __smrht); \
739 __h->smrh_count--; \
740 smrq_serialized_remove(__hd, __link); \
741 })
742
743 /*!
744 * @function smr_hash_serialized_replace()
745 *
746 * @brief
747 * Replaces an object in the hash
748 *
749 * @discussion
750 * The SMR domain for this table must NOT be entered.
751 * This function requires external serialization with other mutations.
752 *
753 * Note that the old object once removed must be retired via SMR,
754 * and not freed immediately.
755 *
756 * If the old object isn't in the hash table, this function will crash with
757 * a NULL deref while walking the bucket where the element should belong.
758 *
759 * The new object MUST have the same key as the object it replaces,
760 * otherwise behavior is undefined.
761 *
762 * @param smrh the hash table
763 * @param old_link the pointer to the linkage to remove.
764 * @param new_link the pointer to the linkage to insert.
765 * @param traits the traits for the hash table
766 */
767 #define smr_hash_serialized_replace(smrh, old_link, new_link, traits) ({ \
768 smrh_traits_t __smrht = &(traits)->smrht; \
769 struct smrq_slink *__link = (old_link); \
770 struct smrq_slist_head *__hd; \
771 \
772 __hd = __smr_hash_bucket(smrh, __link, __smrht); \
773 smrq_serialized_replace(__hd, __link, (new_link)); \
774 })
775
776 /*!
777 * @function smr_hash_serialized_clear()
778 *
779 * @brief
780 * Empties an SMR hash table.
781 *
782 * @discussion
783 * This function requires external serialization with other mutations.
784 *
785 * @param smrh the hash table to clear
786 * @param traits the traits for this hash table
787 * @param free a block to call on each element in the table.
788 */
789 #define smr_hash_serialized_clear(smrh, traits, free...) \
790 __smr_hash_serialized_clear(smrh, &(traits)->smrht, free)
791
792
793 #pragma mark SMR hash tables: resizing
794
795 /*
796 * Implementing the growth policy is not builtin because
797 * there is a LOT of ways to do it, with some variants
798 * (such as asynchronoulsy) require a lot of bookkeeping
799 * which would grow the structure and prevent lean clients
800 * to use it without any growth policy.
801 */
802
803 /*!
804 * @function smr_hash_serialized_should_shrink()
805 *
806 * @brief
807 * Allows implementing a typical policy for shrinking.
808 *
809 * @discussion
810 * Returns whether the table is at least @c min_size large,
811 * and whether the table has more than @c size_factor buckets
812 * per @c count_factor elements.
813 */
814 static inline bool
smr_hash_serialized_should_shrink(const struct smr_hash * smrh,uint32_t min_size,uint32_t size_factor,uint32_t count_factor)815 smr_hash_serialized_should_shrink(
816 const struct smr_hash *smrh,
817 uint32_t min_size,
818 uint32_t size_factor,
819 uint32_t count_factor)
820 {
821 size_t size = smr_hash_size(smrh);
822
823 if (size > min_size && !smrh->smrh_resizing) {
824 return size * count_factor > smrh->smrh_count * size_factor;
825 }
826 return false;
827 }
828
829 /*!
830 * @function smr_hash_serialized_should_grow()
831 *
832 * @brief
833 * Allows implementing a typical policy for shrinking.
834 *
835 * @discussion
836 * Returns whether the table has less than @c size_factor buckets
837 * per @c count_factor elements.
838 */
839 static inline bool
smr_hash_serialized_should_grow(const struct smr_hash * smrh,uint32_t size_factor,uint32_t count_factor)840 smr_hash_serialized_should_grow(
841 const struct smr_hash *smrh,
842 uint32_t size_factor,
843 uint32_t count_factor)
844 {
845 size_t size = smr_hash_size(smrh);
846
847 if (!smrh->smrh_resizing) {
848 return size * count_factor < smrh->smrh_count * size_factor;
849 }
850 return false;
851 }
852
853 /*!
854 * @function smr_hash_shrink_and_unlock()
855 *
856 * @brief
857 * Shrinks a hash table (halves the number of buckets).
858 *
859 * @discussion
860 * This function synchronizes with other mutations using
861 * the passed in mutex.
862 *
863 * Shrinking is a relatively fast operation (however it still
864 * is mostly linear in the number of elements in the hash).
865 *
866 * This function doesn't perform any policy checks such as
867 * "minimal size" being sane or density of buckets being good.
868 *
869 * This function assumes it is called with the lock held,
870 * and returns with it unlocked.
871 *
872 * @returns
873 * - KERN_SUCCESS: the resize was successful.
874 * - KERN_RESOURCE_SHORTAGE: the system was out of memory.
875 * - KERN_FAILURE: the hash table was already resizing.
876 */
877 #define smr_hash_shrink_and_unlock(smrh, mutex, traits) \
878 __smr_hash_shrink_and_unlock(smrh, mutex, &(traits)->smrht)
879
880
881 /*!
882 * @function smr_hash_grow_and_unlock()
883 *
884 * @brief
885 * Grows a hash table (doubles the number of buckets).
886 *
887 * @discussion
888 * This function synchronizes with other mutations using
889 * the passed in mutex.
890 *
891 * Growing is relatively expensive, as it will rehash all elements,
892 * and call smr_synchronize() several times over the course
893 * of the operation. And mutations are delayed while this growth is
894 * occurring.
895 *
896 * This function doesn't perform any policy checks such as
897 * "maximal size" being sane or density of buckets being good.
898 *
899 * This function assumes it is called with the lock held,
900 * and returns with it unlocked.
901 *
902 * @returns
903 * - KERN_SUCCESS: the resize was successful.
904 * - KERN_RESOURCE_SHORTAGE: the system was out of memory.
905 * - KERN_FAILURE: the hash table was already resizing.
906 */
907 #define smr_hash_grow_and_unlock(smrh, mutex, traits) \
908 __smr_hash_grow_and_unlock(smrh, mutex, &(traits)->smrht)
909
910
911 #pragma mark SMR hash tables: iteration
912
913 /*!
914 * @struct smr_hash_iterator
915 *
916 * @brief
917 * Structure used for SMR hash iterations.
918 *
919 * @discussion
920 * Do not manipulate internal fields directly, use the accessors instead.
921 *
922 * Using iteration can be done either under an entered SMR domain,
923 * or under serialization.
924 *
925 * However erasure is only supported under serialization.
926 *
927 * Note that entered enumeration is done with preemption disabled
928 * and should be used in a limited capacity. Such enumerations
929 * might observe elements already removed from the table (due
930 * to concurrent deletions) or elements twice (due to concurrent resizes).
931 */
932 struct smr_hash_iterator {
933 struct smr_hash *smrh;
934 struct smrq_slist_head *hd_next;
935 struct smrq_slist_head *hd_last;
936 __smrq_slink_t *prev;
937 struct smrq_slink *link;
938 };
939
940 /*!
941 * @function smr_hash_iter_begin()
942 *
943 * @brief
944 * Initialize an SMR iterator, and advance it to the first element.
945 *
946 * @discussion
947 * This function must be used in either serialized or entered context.
948 */
949 static inline struct smr_hash_iterator
smr_hash_iter_begin(struct smr_hash * smrh)950 smr_hash_iter_begin(struct smr_hash *smrh)
951 {
952 struct smr_hash_array array = smr_hash_array_decode(smrh);
953 struct smr_hash_iterator it = {
954 .smrh = smrh,
955 .hd_next = array.smrh_array,
956 .hd_last = array.smrh_array + smr_hash_size(array),
957 };
958
959 do {
960 it.prev = &it.hd_next->first;
961 it.link = smr_entered_load(it.prev);
962 it.hd_next++;
963 } while (it.link == NULL && it.hd_next < it.hd_last);
964
965 return it;
966 }
967
968 /*!
969 * @function smr_hash_iter_get()
970 *
971 * @brief
972 * Returns the current value of the iterator, or NULL.
973 *
974 * @discussion
975 * This function must be used in either serialized or entered context.
976 */
977 #define smr_hash_iter_get(it, traits) ({ \
978 struct smr_hash_iterator __smrh_it = (it); \
979 void *__obj = NULL; \
980 \
981 if (__smrh_it.link) { \
982 __obj = __smrht_link_to_obj(&(traits)->smrht, __smrh_it.link); \
983 } \
984 \
985 (smrht_obj_t(traits))__obj; \
986 })
987
988 /*!
989 * @function smr_hash_iter_advance()
990 *
991 * @brief
992 * Advance the iterator to the next element.
993 *
994 * @description
995 * This function must be used in either serialized or entered context.
996 */
997 static inline void
smr_hash_iter_advance(struct smr_hash_iterator * it)998 smr_hash_iter_advance(struct smr_hash_iterator *it)
999 {
1000 it->prev = &it->link->next;
1001
1002 while ((it->link = smr_entered_load(it->prev)) == NULL) {
1003 if (it->hd_next == it->hd_last) {
1004 break;
1005 }
1006 it->prev = &it->hd_next->first;
1007 it->hd_next++;
1008 }
1009 }
1010
1011 /*!
1012 * @function smr_hash_iter_serialized_erase()
1013 *
1014 * @brief
1015 * Erases the current item from the hash and advance the cursor.
1016 *
1017 * @description
1018 * This function requires external serialization with other mutations.
1019 *
1020 * The object once removed must be retired via SMR,
1021 * and not freed immediately.
1022 */
1023 static inline void
smr_hash_iter_serialized_erase(struct smr_hash_iterator * it)1024 smr_hash_iter_serialized_erase(struct smr_hash_iterator *it)
1025 {
1026 it->link = smr_serialized_load(&it->link->next);
1027 it->smrh->smrh_count--;
1028 smr_serialized_store_relaxed(it->prev, it->link);
1029
1030 while (it->link == NULL) {
1031 if (it->hd_next == it->hd_last) {
1032 break;
1033 }
1034 it->prev = &it->hd_next->first;
1035 it->link = smr_serialized_load(it->prev);
1036 it->hd_next++;
1037 }
1038 }
1039
1040 /*!
1041 * @function smr_hash_foreach()
1042 *
1043 * @brief
1044 * Enumerates all elements in a hash table.
1045 *
1046 * @discussion
1047 * This function must be used in either serialized or entered context.
1048 *
1049 * When used in entered context, the enumeration might observe stale objects
1050 * that haven't been removed yet, or elements twice (due to concurrent resizes),
1051 * and the disambiguation must be done by the client if it matters.
1052 *
1053 * It is not permitted to erase elements during this enumeration,
1054 * manual use of the iterator APIs must be used if this is desired.
1055 *
1056 * @param obj the enumerator variable
1057 * @param smrh the hash table to enumerate
1058 * @param traits the traits for the hash table
1059 */
1060 #define smr_hash_foreach(obj, smrh, traits) \
1061 for (struct smr_hash_iterator __it = smr_hash_iter_begin(smrh); \
1062 ((obj) = smr_hash_iter_get(__it, traits)); \
1063 smr_hash_iter_advance(&__it))
1064
1065
1066 #if XNU_KERNEL_PRIVATE
1067 #pragma mark - SMR scalable hash tables
1068
1069
1070 /*!
1071 * @typedef smrsh_state_t
1072 *
1073 * @brief
1074 * Atomic state for the scalable SMR hash table.
1075 *
1076 * @discussion
1077 * Scalable hash tables have 2 sets of seeds and bucket arrays,
1078 * which are used for concurrent rehashing.
1079 *
1080 * Each growth/shrink/re-seed operation will swap sizes
1081 * and set of seed/array atomically by changing the state.
1082 */
1083 typedef struct {
1084 uint8_t curidx;
1085 uint8_t curshift;
1086 uint8_t newidx;
1087 uint8_t newshift;
1088 } smrsh_state_t;
1089
1090
1091 /*!
1092 * @typedef smrsh_rehash_t
1093 *
1094 * @brief
1095 * Internal state management for various rehashing operations.
1096 */
1097 __options_closed_decl(smrsh_rehash_t, uint8_t, {
1098 SMRSH_REHASH_NONE = 0x00,
1099 SMRSH_REHASH_RESEED = 0x01,
1100 SMRSH_REHASH_SHRINK = 0x02,
1101 SMRSH_REHASH_GROW = 0x04,
1102 SMRSH_REHASH_RUNNING = 0x08,
1103 });
1104
1105
1106 /*!
1107 * @enum smrsh_policy_t
1108 *
1109 * @brief
1110 * Describes the growth/shrink policies for scalable SMR hash tables.
1111 *
1112 * @description
1113 * In general, singleton global hash tables that are central
1114 * to the performance of the system likely want to use
1115 * @c SMRSH_BALANCED_NOSHRINK or @c SMRSH_FASTEST.
1116 *
1117 * Hash tables that tend to be instantiated multiple times,
1118 * or have bursty behaviors should use more conservative policies.
1119 *
1120 * @const SMRSH_COMPACT
1121 * Choose a policy that is very memory conscious and will favour aggressive
1122 * shrinking and allow relatively long hash chains.
1123 *
1124 * @const SMRSH_BALANCED
1125 * Choose a balanced policy that allows for medium sized hash chains,
1126 * and shrinks less aggressively than @c SMRSH_COMPACT.
1127 *
1128 * @const SMRSH_BALANCED_NOSHRINK
1129 * This policy is the same as @c SMRSH_BALANCED, but the hash table
1130 * will not be allowed to shrink.
1131 *
1132 * @const SMRSH_FASTEST
1133 * This policy grows aggressively, only tolerating relatively short
1134 * hash chains, and will never shrink.
1135 */
1136 __enum_closed_decl(smrsh_policy_t, uint32_t, {
1137 SMRSH_COMPACT,
1138 SMRSH_BALANCED,
1139 SMRSH_BALANCED_NOSHRINK,
1140 SMRSH_FASTEST,
1141 });
1142
1143
1144 /*!
1145 * @struct smr_shash
1146 *
1147 * @brief
1148 * Type for scalable SMR hash table.
1149 *
1150 * @description
1151 * Unlike its little brother @c smr_hash, these kinds of hash tables
1152 * allow for concurrent mutations (with fined grained per-bucket locks)
1153 * of the hash table.
1154 *
1155 * It also observes high collision rates and tries to adjust the hash
1156 * seeds in order to rebalance the hash tables when this happens.
1157 *
1158 * All this goodness however comes at a cost:
1159 * - these hash tables can't be enumerated
1160 * - these hash tables are substantially bigger (@c smr_hash is 2 pointers big,
1161 * where @c smr_shash is bigger and allocates a thread_call and a scalable
1162 * counter).
1163 */
1164 struct smr_shash {
1165 hw_lck_ptr_t *_Atomic smrsh_array[2];
1166 uint32_t _Atomic smrsh_seed[2];
1167 smrsh_state_t _Atomic smrsh_state;
1168 smrsh_rehash_t _Atomic smrsh_rehashing;
1169 smrsh_policy_t smrsh_policy;
1170 uint16_t smrsh_min_shift : 5;
1171 uint16_t __unused_bits : 11;
1172 scalable_counter_t smrsh_count;
1173 struct thread_call *smrsh_callout;
1174 };
1175
1176 #define SMRSH_BUCKET_STOP_BIT 0x1ul
1177
1178
1179
1180 #pragma mark SMR scalable hash tables: initialization and accessors
1181
1182 /*!
1183 * @function smr_shash_init()
1184 *
1185 * @brief
1186 * Initializes a scalable hash table.
1187 *
1188 * @param smrh the scalable hash table struct to initialize.
1189 * @param policy the growth policy to use (see @c smrsh_policy_t).
1190 * @param min_size the number of buckets the table should not shrink below.
1191 */
1192 extern void smr_shash_init(
1193 struct smr_shash *smrh,
1194 smrsh_policy_t policy,
1195 size_t min_size);
1196
1197 /*!
1198 * @function smr_shash_destroy()
1199 *
1200 * @brief
1201 * Releases the resources held by a table.
1202 *
1203 * @param smrh the scalable hash table struct to destroy.
1204 * @param traits the SMR hash traits for this table.
1205 * @param free an optional callback to call on each element
1206 * still in the hash table.
1207 */
1208 #define smr_shash_destroy(smrh, traits, free...) \
1209 __smr_shash_destroy(smrh, &(traits)->smrht, free)
1210
1211
1212 #pragma mark SMR scalable hash tables: read operations
1213
1214 /*!
1215 * @function smr_shash_entered_find()
1216 *
1217 * @brief
1218 * Looks up an element by key in the hash table.
1219 *
1220 * @discussion
1221 * The SMR domain protecting the hash table elements must have been entered
1222 * to call this function.
1223 *
1224 * This function returns an object for which the @c obj_try_get
1225 * callback hasn't been called, which means that accessing the element
1226 * is only valid inside the current SMR critical section, or until
1227 * further action to "retain" the element has been taken.
1228 *
1229 * @param smrh the scalable hash table.
1230 * @param key the key to lookup.
1231 * @param traits the SMR hash traits for this table.
1232 *
1233 * @returns the first found element or NULL.
1234 */
1235 #define smr_shash_entered_find(smrh, key, traits) ({ \
1236 void *__obj; \
1237 \
1238 __obj = __smr_shash_entered_find(smrh, key, &(traits)->smrht); \
1239 \
1240 (smrht_obj_t(traits))__obj; \
1241 })
1242
1243
1244 /*!
1245 * @function smr_shash_entered_get()
1246 *
1247 * @brief
1248 * Looks up an element by key in the hash table.
1249 *
1250 * @discussion
1251 * The SMR domain protecting the hash table elements must have been entered
1252 * to call this function.
1253 *
1254 * This function returns an object for which the @c obj_try_get
1255 * callback has been called, which ensures it is valid even
1256 * after the current SMR critical section ends.
1257 *
1258 * @param smrh the scalable hash table.
1259 * @param key the key to lookup.
1260 * @param traits the SMR hash traits for this table.
1261 *
1262 * @returns the first found element or NULL.
1263 */
1264 #define smr_shash_entered_get(smrh, key, traits) ({ \
1265 void *__obj; \
1266 \
1267 __obj = __smr_shash_entered_get(smrh, key, &(traits)->smrht); \
1268 \
1269 (smrht_obj_t(traits))__obj; \
1270 })
1271
1272 /*!
1273 * @function smr_shash_get()
1274 *
1275 * @brief
1276 * Looks up an element by key in the hash table.
1277 *
1278 * @discussion
1279 * Conveniency wrapper for @c smr_shash_entered_get()
1280 * which creates an SMR critical section around its call.
1281 *
1282 * The SMR domain protecting the hash table must NOT have been entered
1283 * to call this function.
1284 *
1285 * @param smrh the scalable hash table.
1286 * @param key the key to lookup.
1287 * @param traits the SMR hash traits for this table.
1288 *
1289 * @returns the first found element or NULL.
1290 */
1291 #define smr_shash_get(smrh, key, traits) ({ \
1292 void *__obj; \
1293 \
1294 smrht_enter(traits); \
1295 __obj = __smr_shash_entered_get(smrh, key, &(traits)->smrht); \
1296 smrht_leave(traits); \
1297 \
1298 (smrht_obj_t(traits))__obj; \
1299 })
1300
1301
1302 #pragma mark SMR scalable hash tables: mutations
1303
1304 /*!
1305 * @function smr_shash_entered_get_or_insert()
1306 *
1307 * @brief
1308 * Inserts an element in the hash table, or return a pre-existing element
1309 * in the hash table for that key.
1310 *
1311 * @discussion
1312 * The SMR domain protecting the hash table elements must have been entered
1313 * to call this function.
1314 *
1315 * This function either returns an object for which the @c obj_try_get
1316 * callback has been called, or inserts the passed in element.
1317 *
1318 * @param smrh the scalable hash table.
1319 * @param key the key to lookup.
1320 * @param link the element to insert (its "key" must be @c key).
1321 * @param traits the SMR hash traits for this table.
1322 *
1323 * @returns NULL if the insertion happened,
1324 * or the "retained" colliding element otherwise.
1325 */
1326 #define smr_shash_entered_get_or_insert(smrh, key, link, traits) ({ \
1327 smrh_traits_t __smrht = &(traits)->smrht; \
1328 void *__obj; \
1329 \
1330 __obj = __smr_shash_entered_get_or_insert(smrh, key, link, \
1331 &(traits)->smrht); \
1332 \
1333 (smrht_obj_t(traits))__obj; \
1334 })
1335
1336 /*!
1337 * @function smr_shash_get_or_insert()
1338 *
1339 * @brief
1340 * Looks up an element by key in the hash table.
1341 *
1342 * @discussion
1343 * Conveniency wrapper for @c smr_shash_entered_get_or_insert()
1344 * which creates an SMR critical section around its call.
1345 *
1346 * The SMR domain protecting the hash table must NOT have been entered
1347 * to call this function.
1348 *
1349 * @param smrh the scalable hash table.
1350 * @param key the key to lookup.
1351 * @param link the element to insert (its "key" must be @c key).
1352 * @param traits the SMR hash traits for this table.
1353 *
1354 * @returns NULL if the insertion happened,
1355 * or the "retained" colliding element otherwise.
1356 */
1357 #define smr_shash_get_or_insert(smrh, key, link, traits) ({ \
1358 void *__obj; \
1359 \
1360 smrht_enter(traits); \
1361 __obj = __smr_shash_entered_get_or_insert(smrh, key, link, \
1362 &(traits)->smrht); \
1363 smrht_leave(traits); \
1364 \
1365 (smrht_obj_t(traits))__obj; \
1366 })
1367
1368
1369 /*!
1370 * @function smr_shash_entered_remove()
1371 *
1372 * @brief
1373 * Removes an element from the hash table.
1374 *
1375 * @discussion
1376 * The SMR domain protecting the hash table must have been entered
1377 * to call this function.
1378 *
1379 * The removed element can't be added back to the hash table
1380 * and must be retired via SMR and not freed immediately.
1381 *
1382 * @param smrh the scalable hash table.
1383 * @param link the element to remove from the hash table.
1384 * @param traits the SMR hash traits for this table.
1385 */
1386 #define smr_shash_entered_remove(smrh, link, traits) ({ \
1387 smr_shash_mut_cursor_t __cursor; \
1388 struct smrq_slink *__link = (link); \
1389 struct smr_shash *__smrh = (smrh); \
1390 \
1391 __cursor = smr_shash_entered_mut_begin(__smrh, __link, traits); \
1392 smr_shash_entered_mut_erase(__smrh, __cursor, __link, traits); \
1393 })
1394
1395 /*!
1396 * @function smr_shash_remove()
1397 *
1398 * @brief
1399 * Removes an element from the hash table.
1400 *
1401 * @discussion
1402 * Conveniency wrapper for @c smr_shash_entered_remove()
1403 * which creates an SMR critical section around its call.
1404 *
1405 * The SMR domain protecting the hash table must NOT have been entered
1406 * to call this function.
1407 *
1408 * The removed element can't be added back to the hash table
1409 * and must be retired via SMR and not freed immediately.
1410 *
1411 * @param smrh the scalable hash table.
1412 * @param link the element to remove from the hash table.
1413 * @param traits the SMR hash traits for this table.
1414 */
1415 #define smr_shash_remove(smrh, link, traits) ({ \
1416 smrht_enter(traits); \
1417 smr_shash_entered_remove(smrh, link, traits); \
1418 smrht_leave(traits); \
1419 })
1420
1421
1422 /*!
1423 * @function smr_shash_entered_replace()
1424 *
1425 * @brief
1426 * Replaces an element in the hash table with another.
1427 *
1428 * @discussion
1429 * Elements must have the same key, otherwise the behavior is undefined.
1430 *
1431 * The SMR domain protecting the hash table must have been entered
1432 * to call this function.
1433 *
1434 * The removed element can't be added back to the hash table
1435 * and must be retired via SMR and not freed immediately.
1436 *
1437 * @param smrh the scalable hash table.
1438 * @param old_link the element to remove from the hash table.
1439 * @param new_link the element to insert in the hash table.
1440 * @param traits the SMR hash traits for this table.
1441 */
1442 #define smr_shash_entered_replace(smrh, old_link, new_link, traits) ({ \
1443 smr_shash_mut_cursor_t __cursor; \
1444 struct smrq_slink *__link = (old_link); \
1445 \
1446 __cursor = smr_shash_entered_mut_begin(smrh, __link, traits); \
1447 smr_shash_entered_mut_replace(__cursor, __link, new_link); \
1448 })
1449
1450 /*!
1451 * @function smr_shash_replace()
1452 *
1453 * @brief
1454 * Replaces an element in the hash table with another.
1455 *
1456 * @discussion
1457 * Conveniency wrapper for @c smr_shash_entered_replace()
1458 * which creates an SMR critical section around its call.
1459 *
1460 * Elements must have the same key, otherwise the behavior is undefined.
1461 *
1462 * The SMR domain protecting the hash table must NOT have been entered
1463 * to call this function.
1464 *
1465 * The removed element can't be added back to the hash table
1466 * and must be retired via SMR and not freed immediately.
1467 *
1468 * @param smrh the scalable hash table.
1469 * @param old_link the element to remove from the hash table.
1470 * @param new_link the element to insert in the hash table.
1471 * @param traits the SMR hash traits for this table.
1472 */
1473 #define smr_shash_replace(smrh, old_link, new_link, traits) ({ \
1474 smrht_enter(traits); \
1475 smr_shash_entered_replace(smrh, old_link, new_link, traits); \
1476 smrht_leave(traits); \
1477 })
1478
1479
1480 #pragma mark SMR scalable hash tables: advanced mutations
1481
1482 /*!
1483 * @typedef smr_shash_mut_cursor_t
1484 *
1485 * @brief
1486 * Cursor used for advanced mutations.
1487 */
1488 typedef struct {
1489 hw_lck_ptr_t *head;
1490 __smrq_slink_t *prev;
1491 } smr_shash_mut_cursor_t;
1492
1493
1494 /*!
1495 * @macro smr_shash_entered_mut_begin()
1496 *
1497 * @brief
1498 * Creates a mutation cursor for the specified element.
1499 *
1500 * @discussion
1501 * A mutation cursor allows to erase or replace an element
1502 * in the hash table.
1503 *
1504 * The cursor returned by this function holds a lock,
1505 * and it is undefined to have two live cursors at a time
1506 * (it will typically deadlock, and unlike typical locks,
1507 * there's no a priori lock ordering that can be derived
1508 * to prevent it).
1509 *
1510 * The SMR domain protecting the hash table must have been entered
1511 * to call this function.
1512 *
1513 * One and exactly one of these three calls must be performed
1514 * on a cursor before the SMR transaction is ended:
1515 * - smr_shash_entered_mut_erase() to erase the element it was made for,
1516 * - smr_shash_entered_mut_replace() to replace the element it was made for,
1517 * - smr_shash_entered_mut_abort() to abandon the cursor without mutation.
1518 *
1519 * @param smrh the scalable hash table.
1520 * @param link the element to create a cursor for (must be in the hash).
1521 * @param traits the SMR hash traits for this table.
1522 */
1523 #define smr_shash_entered_mut_begin(smrh, link, traits) \
1524 __smr_shash_entered_mut_begin(smrh, link, &(traits)->smrht)
1525
1526
1527 /*!
1528 * @macro smr_shash_entered_mut_erase()
1529 *
1530 * @brief
1531 * Erases the element used to make the cursor.
1532 *
1533 * @discussion
1534 * The passed in element must be the same that was used to make the cursor.
1535 *
1536 * The call must be made in the same SMR transaction that was entered
1537 * to make the cursor.
1538 *
1539 * The cursor is invalidated once this call returns.
1540 *
1541 * The removed element can't be added back to the hash table
1542 * and must be retired via SMR and not freed immediately.
1543 *
1544 * @param smrh the scalable hash table.
1545 * @param cursor the cursor made for the element to remove.
1546 * @param link the element used to create @c cursor.
1547 * @param traits the SMR hash traits for this table.
1548 */
1549 #define smr_shash_entered_mut_erase(smrh, cursor, link, traits) \
1550 __smr_shash_entered_mut_erase(smrh, cursor, link, &(traits)->smrht)
1551
1552
1553 /*!
1554 * @macro smr_shash_entered_mut_replace()
1555 *
1556 * @brief
1557 * Replaces the element used to make the cursor.
1558 *
1559 * @discussion
1560 * The passed in element must be the same that was used to make the cursor.
1561 *
1562 * The call must be made in the same SMR transaction that was entered
1563 * to make the cursor.
1564 *
1565 * The cursor is invalidated once this call returns.
1566 *
1567 * The removed element can't be added back to the hash table
1568 * and must be retired via SMR and not freed immediately.
1569 *
1570 * The new object MUST have the same key as the object it replaces,
1571 * otherwise behavior is undefined.
1572 *
1573 * @param smrh the scalable hash table.
1574 * @param cursor the cursor made for the element to remove.
1575 * @param old_link the element used to create @c cursor.
1576 * @param new_link the element to replace @c old_link with.
1577 * @param traits the SMR hash traits for this table.
1578 */
1579 #define smr_shash_entered_mut_replace(cursor, old_link, new_link, traits) \
1580 __smr_shash_entered_mut_replace(cursor, old_link, new_link, &(traits)->smrht)
1581
1582
1583 /*!
1584 * @macro smr_shash_entered_mut_abort()
1585 *
1586 * @brief
1587 * Invalidates a cursor made with @c smr_shash_entered_mut_begin()
1588 *
1589 * @discussion
1590 * The call must be made in the same SMR transaction that was entered
1591 * to make the cursor.
1592 *
1593 * @param cursor the cursor to invalidate.
1594 */
1595 #define smr_shash_entered_mut_abort(cursor) \
1596 __smr_shash_entered_mut_abort(cursor)
1597
1598
1599 #endif /* XNU_KERNEL_PRIVATE */
1600 #pragma mark - implementation details
1601 #pragma mark SMR hash traits
1602
1603 #define smrht_obj_t(traits) \
1604 typeof((traits)->smrht_obj_type[0])
1605
1606 static inline void *
__smrht_link_to_obj(smrh_traits_t traits,const struct smrq_slink * link)1607 __smrht_link_to_obj(smrh_traits_t traits, const struct smrq_slink *link)
1608 {
1609 void *ptr = (void *)((uintptr_t)link - traits->link_offset);
1610
1611 __builtin_assume(ptr != NULL);
1612 return ptr;
1613 }
1614
1615
1616 #pragma mark SMR hash tables
1617
1618 static inline unsigned long
__smr_hash_mask(struct smr_hash_array array)1619 __smr_hash_mask(struct smr_hash_array array)
1620 {
1621 return ~0ul >> array.smrh_order;
1622 }
1623
1624
1625 __attribute__((overloadable))
1626 static inline struct smrq_slist_head *
__smr_hash_bucket(const struct smr_hash * smrh,struct smrq_slink * link,smrh_traits_t smrht)1627 __smr_hash_bucket(
1628 const struct smr_hash *smrh,
1629 struct smrq_slink *link,
1630 smrh_traits_t smrht)
1631 {
1632 struct smr_hash_array array = smr_hash_array_decode(smrh);
1633 uint32_t index = __smr_hash_mask(array) & smrht->obj_hash(link, 0);
1634
1635 return &array.smrh_array[index];
1636 }
1637
1638 __attribute__((overloadable))
1639 static inline struct smrq_slist_head *
__smr_hash_bucket(const struct smr_hash * smrh,smrh_key_t key,smrh_traits_t smrht)1640 __smr_hash_bucket(
1641 const struct smr_hash *smrh,
1642 smrh_key_t key,
1643 smrh_traits_t smrht)
1644 {
1645 struct smr_hash_array array = smr_hash_array_decode(smrh);
1646 uint32_t index = __smr_hash_mask(array) & smrht->key_hash(key, 0);
1647
1648 return &array.smrh_array[index];
1649 }
1650
1651 static inline void *
__smr_hash_entered_find(const struct smrq_slist_head * head,smrh_key_t key,smrh_traits_t smrht)1652 __smr_hash_entered_find(
1653 const struct smrq_slist_head *head,
1654 smrh_key_t key,
1655 smrh_traits_t smrht)
1656 {
1657 for (struct smrq_slink *link = smr_entered_load(&head->first);
1658 link; link = smr_entered_load(&link->next)) {
1659 if (smrht->obj_equ(link, key)) {
1660 return __smrht_link_to_obj(smrht, link);
1661 }
1662 }
1663
1664 return NULL;
1665 }
1666
1667 static inline void *
__smr_hash_serialized_find(const struct smrq_slist_head * head,smrh_key_t key,smrh_traits_t smrht)1668 __smr_hash_serialized_find(
1669 const struct smrq_slist_head *head,
1670 smrh_key_t key,
1671 smrh_traits_t smrht)
1672 {
1673 for (struct smrq_slink *link = smr_serialized_load(&head->first);
1674 link; link = smr_serialized_load(&link->next)) {
1675 if (smrht->obj_equ(link, key)) {
1676 return __smrht_link_to_obj(smrht, link);
1677 }
1678 }
1679
1680 return NULL;
1681 }
1682
1683 static inline void *
__smr_hash_get(const struct smr_hash * smrh,smrh_key_t key,smrh_traits_t smrht)1684 __smr_hash_get(
1685 const struct smr_hash *smrh,
1686 smrh_key_t key,
1687 smrh_traits_t smrht)
1688 {
1689 struct smrq_slist_head *head;
1690 void *obj = NULL;
1691
1692 smr_enter(smrht->domain);
1693 head = __smr_hash_bucket(smrh, key, smrht);
1694 obj = __smr_hash_entered_find(head, key, smrht);
1695 if (obj && !smrht->obj_try_get(obj)) {
1696 obj = NULL;
1697 }
1698 smr_leave(smrht->domain);
1699
1700 return obj;
1701 }
1702
1703 static inline void *
__smr_hash_serialized_get_or_insert(struct smr_hash * smrh,smrh_key_t key,struct smrq_slink * link,smrh_traits_t smrht)1704 __smr_hash_serialized_get_or_insert(
1705 struct smr_hash *smrh,
1706 smrh_key_t key,
1707 struct smrq_slink *link,
1708 smrh_traits_t smrht)
1709 {
1710 struct smrq_slist_head *head;
1711 void *obj = NULL;
1712
1713 head = __smr_hash_bucket(smrh, key, smrht);
1714 obj = __smr_hash_serialized_find(head, key, smrht);
1715 if (!obj || !smrht->obj_try_get(obj)) {
1716 smrh->smrh_count++;
1717 smrq_serialized_insert_head(head, link);
1718 obj = NULL;
1719 }
1720
1721 return obj;
1722 }
1723
1724 extern void __smr_hash_serialized_clear(
1725 struct smr_hash *smrh,
1726 smrh_traits_t smrht,
1727 void (^free)(void *obj));
1728
1729 extern kern_return_t __smr_hash_shrink_and_unlock(
1730 struct smr_hash *smrh,
1731 lck_mtx_t *lock,
1732 smrh_traits_t smrht);
1733
1734 extern kern_return_t __smr_hash_grow_and_unlock(
1735 struct smr_hash *smrh,
1736 lck_mtx_t *lock,
1737 smrh_traits_t smrht);
1738
1739 #if XNU_KERNEL_PRIVATE
1740 #pragma GCC visibility push(hidden)
1741
1742 #pragma mark SMR scalable hash tables
1743
1744 __enum_closed_decl(smrsh_sel_t, uint8_t, {
1745 SMRSH_CUR,
1746 SMRSH_NEW,
1747 });
1748
1749 __attribute__((always_inline))
1750 static inline uint32_t
__smr_shash_load_seed(const struct smr_shash * smrh,size_t idx)1751 __smr_shash_load_seed(
1752 const struct smr_shash *smrh,
1753 size_t idx)
1754 {
1755 uintptr_t addr = (uintptr_t)smrh->smrsh_seed;
1756
1757 /*
1758 * prevent the optimizer from thinking it knows _anything_
1759 * about `smrsh_seed` to avoid codegen like this:
1760 *
1761 * return idx ? smrh->smrsh_seed[1] : smrh->smrsh_seed[0]
1762 *
1763 * This only has a control dependency which doesn't provide
1764 * the proper ordering. (control dependencies order
1765 * writes-after-dependency and not loads).
1766 */
1767
1768 return os_atomic_load(&((const uint32_t _Atomic *)addr)[idx], relaxed);
1769 }
1770
1771 __attribute__((always_inline))
1772 static inline hw_lck_ptr_t *
__smr_shash_load_array(const struct smr_shash * smrh,size_t idx)1773 __smr_shash_load_array(
1774 const struct smr_shash *smrh,
1775 size_t idx)
1776 {
1777 uintptr_t addr = (uintptr_t)smrh->smrsh_array;
1778
1779 /*
1780 * prevent the optimizer from thinking it knows _anything_
1781 * about `smrsh_array` to avoid codegen like this:
1782 *
1783 * return idx ? smrh->smrsh_array[1] : smrh->smrsh_array[0]
1784 *
1785 * This only has a control dependency which doesn't provide
1786 * the proper ordering. (control dependencies order
1787 * writes-after-dependency and not loads).
1788 */
1789
1790 return os_atomic_load(&((hw_lck_ptr_t * _Atomic const *)addr)[idx], relaxed);
1791 }
1792
1793 __attribute__((always_inline, overloadable))
1794 static inline uint32_t
__smr_shash_hash(const struct smr_shash * smrh,size_t idx,smrh_key_t key,smrh_traits_t traits)1795 __smr_shash_hash(
1796 const struct smr_shash *smrh,
1797 size_t idx,
1798 smrh_key_t key,
1799 smrh_traits_t traits)
1800 {
1801 return traits->key_hash(key, __smr_shash_load_seed(smrh, idx));
1802 }
1803
1804 __attribute__((always_inline, overloadable))
1805 static inline uint32_t
__smr_shash_hash(const struct smr_shash * smrh,size_t idx,const struct smrq_slink * link,smrh_traits_t traits)1806 __smr_shash_hash(
1807 const struct smr_shash *smrh,
1808 size_t idx,
1809 const struct smrq_slink *link,
1810 smrh_traits_t traits)
1811 {
1812 return traits->obj_hash(link, __smr_shash_load_seed(smrh, idx));
1813 }
1814
1815 static inline hw_lck_ptr_t *
__smr_shash_bucket(const struct smr_shash * smrh,smrsh_state_t state,smrsh_sel_t sel,uint32_t hash)1816 __smr_shash_bucket(
1817 const struct smr_shash *smrh,
1818 smrsh_state_t state,
1819 smrsh_sel_t sel,
1820 uint32_t hash)
1821 {
1822 hw_lck_ptr_t *array;
1823 uint8_t shift;
1824
1825 switch (sel) {
1826 case SMRSH_CUR:
1827 array = __smr_shash_load_array(smrh, state.curidx);
1828 shift = state.curshift;
1829 break;
1830 case SMRSH_NEW:
1831 array = __smr_shash_load_array(smrh, state.newidx);
1832 shift = state.newshift;
1833 break;
1834 }
1835
1836 return &array[hash >> shift];
1837 }
1838
1839 static inline bool
__smr_shash_is_stop(struct smrq_slink * link)1840 __smr_shash_is_stop(struct smrq_slink *link)
1841 {
1842 return (uintptr_t)link & SMRSH_BUCKET_STOP_BIT;
1843 }
1844
1845 static inline struct smrq_slink *
__smr_shash_bucket_stop(const hw_lck_ptr_t * head)1846 __smr_shash_bucket_stop(const hw_lck_ptr_t *head)
1847 {
1848 return (struct smrq_slink *)((uintptr_t)head | SMRSH_BUCKET_STOP_BIT);
1849 }
1850
1851 extern void *__smr_shash_entered_find_slow(
1852 const struct smr_shash *smrh,
1853 smrh_key_t key,
1854 hw_lck_ptr_t *head,
1855 smrh_traits_t traits);
1856
1857 static inline void *
__smr_shash_entered_find(const struct smr_shash * smrh,smrh_key_t key,smrh_traits_t traits)1858 __smr_shash_entered_find(
1859 const struct smr_shash *smrh,
1860 smrh_key_t key,
1861 smrh_traits_t traits)
1862 {
1863 struct smrq_slink *link;
1864 smrsh_state_t state;
1865 hw_lck_ptr_t *head;
1866 uint32_t hash;
1867
1868 state = os_atomic_load(&smrh->smrsh_state, dependency);
1869 hash = __smr_shash_hash(smrh, state.curidx, key, traits);
1870 head = __smr_shash_bucket(smrh, state, SMRSH_CUR, hash);
1871
1872 link = (struct smrq_slink *)hw_lck_ptr_value(head);
1873 while (!__smr_shash_is_stop(link)) {
1874 if (traits->obj_equ(link, key)) {
1875 return __smrht_link_to_obj(traits, link);
1876 }
1877 link = smr_entered_load(&link->next);
1878 }
1879
1880 if (__probable(link == __smr_shash_bucket_stop(head))) {
1881 return NULL;
1882 }
1883 return __smr_shash_entered_find_slow(smrh, key, head, traits);
1884 }
1885
1886 static inline void *
__smr_shash_entered_get(const struct smr_shash * smrh,smrh_key_t key,smrh_traits_t traits)1887 __smr_shash_entered_get(
1888 const struct smr_shash *smrh,
1889 smrh_key_t key,
1890 smrh_traits_t traits)
1891 {
1892 void *obj = __smr_shash_entered_find(smrh, key, traits);
1893
1894 return obj && traits->obj_try_get(obj) ? obj : NULL;
1895 }
1896
1897 extern void __smr_shash_destroy(
1898 struct smr_shash *smrh,
1899 smrh_traits_t traits,
1900 void (^free)(void *));
1901
1902 extern void *__smr_shash_entered_get_or_insert(
1903 struct smr_shash *smrh,
1904 smrh_key_t key,
1905 struct smrq_slink *link,
1906 smrh_traits_t traits);
1907
1908 extern smr_shash_mut_cursor_t __smr_shash_entered_mut_begin(
1909 struct smr_shash *smrh,
1910 struct smrq_slink *link,
1911 smrh_traits_t traits);
1912
1913 extern void __smr_shash_entered_mut_erase(
1914 struct smr_shash *smrh,
1915 smr_shash_mut_cursor_t cursor,
1916 struct smrq_slink *link,
1917 smrh_traits_t traits);
1918
1919 extern void __smr_shash_entered_mut_replace(
1920 smr_shash_mut_cursor_t cursor,
1921 struct smrq_slink *old_link,
1922 struct smrq_slink *new_link);
1923
1924 extern void __smr_shash_entered_mut_abort(
1925 smr_shash_mut_cursor_t cursor);
1926
1927 #pragma GCC visibility pop
1928 #endif /* XNU_KERNEL_PRIVATE */
1929
1930 __END_DECLS
1931
1932 #endif /* _KERN_SMR_HASH_H_ */
1933