1 /*
2 * Copyright (c) 2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #ifndef _KERN_SMR_HASH_H_
31 #define _KERN_SMR_HASH_H_
32
33 #include <kern/lock_mtx.h>
34 #include <kern/smr.h>
35 #include <os/hash.h>
36 #include <vm/vm_memtag.h>
37 #if XNU_KERNEL_PRIVATE
38 #include <kern/lock_ptr.h>
39 #include <kern/counter.h>
40 #endif
41
42 __BEGIN_DECLS
43
44
45 /*!
46 * @typedef smrh_key_t
47 *
48 * @brief
49 * A union that can represent several kinds of keys for SMR Hash Tables.
50 *
51 * @discussion
52 * For strings or opaque structs, @c smrk_len must hold the correct key size.
53 * For scalars (using the smrk_u64 field), the length is more advisory.
54 */
55 typedef struct {
56 union {
57 const char *smrk_string;
58 const void *smrk_opaque;
59 uint64_t smrk_u64;
60 };
61 size_t smrk_len;
62 } smrh_key_t;
63
64
65 /*!
66 * @struct smrh_traits
67 *
68 * @brief
69 * This structure parametrizes the behavior of SMR hash tables.
70 *
71 * @discussion
72 * Such structures are typically made with @c SMRH_TRAITS_DEFINE_*.
73 *
74 * Traits must be static and const in the same translation unit that implements
75 * the hash table, and possibly export methods to other modules. This design is
76 * not unlike C++ traits structure used in template parametrization, and rely on
77 * the constness of all structures for the compiler to actually elide most
78 * function calls, while maintaining decently good ergonomics.
79 *
80 *
81 * @field key_hash (automatic) computes a hash for a given key.
82 * @field key_equ (automatic) returns if two keys are equal
83 * @field obj_hash (automatic) computes a hash for a given object.
84 * @field obj_equ (automatic) returns if an object has the specified key
85 *
86 * @field domain the SMR domain which protects elements.
87 *
88 * @field obj_try_get function which attempts to acquire a reference on an
89 * object, or returns failure. This function is used
90 * to verify objects are "live" for the smrh_*_get()
91 * verbs.
92 */
93 struct smrh_traits {
94 unsigned long link_offset;
95 smr_t domain;
96 uint32_t (*key_hash)(smrh_key_t, uint32_t);
97 bool (*key_equ)(smrh_key_t, smrh_key_t);
98 uint32_t (*obj_hash)(const struct smrq_slink *, uint32_t);
99 bool (*obj_equ)(const struct smrq_slink *, smrh_key_t);
100 bool (*obj_try_get)(void *);
101 };
102 typedef const struct smrh_traits *smrh_traits_t;
103
104
105 #pragma mark SMR hash keys
106
107 /*!
108 * @macro SMRH_SCALAR_KEY()
109 *
110 * @brief
111 * Generates a @c smrh_key_t value out of a scalar.
112 */
113 #define SMRH_SCALAR_KEY(e) \
114 (smrh_key_t){ .smrk_u64 = (e), .smrk_len = sizeof(e) }
115
116
117 /*!
118 * @function smrh_key_hash_u32
119 *
120 * @brief
121 * Hashing function to use as a @c key_hash trait for 32bit scalars.
122 */
123 __pure2
124 static inline uint32_t
smrh_key_hash_u32(smrh_key_t key,uint32_t seed)125 smrh_key_hash_u32(smrh_key_t key, uint32_t seed)
126 {
127 uint32_t x = (uint32_t)key.smrk_u64 + seed;
128
129 x ^= x >> 16;
130 x *= 0x7feb352dU;
131 x ^= x >> 15;
132 x *= 0x846ca68bU;
133 x ^= x >> 16;
134
135 return x;
136 }
137
138 /*!
139 * @function smrh_key_hash_u64
140 *
141 * @brief
142 * Hashing function to use as a @c key_hash trait for 64bit scalars.
143 */
144 __pure2
145 static inline uint32_t
smrh_key_hash_u64(smrh_key_t key,uint32_t seed)146 smrh_key_hash_u64(smrh_key_t key, uint32_t seed)
147 {
148 uint64_t x = key.smrk_u64 + seed;
149
150 x ^= x >> 30;
151 x *= 0xbf58476d1ce4e5b9U;
152 x ^= x >> 27;
153 x *= 0x94d049bb133111ebU;
154 x ^= x >> 31;
155
156 return (uint32_t)x;
157 }
158
159 /*!
160 * @function smrh_key_hash_mem
161 *
162 * @brief
163 * Hashing function to use as a @c key_hash trait for byte arrays.
164 */
165 __stateful_pure
166 static inline uint32_t
smrh_key_hash_mem(smrh_key_t key,uint32_t seed)167 smrh_key_hash_mem(smrh_key_t key, uint32_t seed)
168 {
169 return os_hash_jenkins(key.smrk_opaque, key.smrk_len, seed);
170 }
171
172 /*!
173 * @function smrh_key_hash_str
174 *
175 * @brief
176 * Hashing function to use as a @c key_hash trait for C strings.
177 */
178 __stateful_pure
179 static inline uint32_t
smrh_key_hash_str(smrh_key_t key,uint32_t seed)180 smrh_key_hash_str(smrh_key_t key, uint32_t seed)
181 {
182 return os_hash_jenkins(key.smrk_opaque, key.smrk_len, seed);
183 }
184
185
186 /*!
187 * @function smrh_key_equ_scalar
188 *
189 * @brief
190 * Equality function to use as @c key_equ for scalars.
191 */
192 static inline bool
smrh_key_equ_scalar(smrh_key_t k1,smrh_key_t k2)193 smrh_key_equ_scalar(smrh_key_t k1, smrh_key_t k2)
194 {
195 return k1.smrk_u64 == k2.smrk_u64;
196 }
197
198 /*!
199 * @function smrh_key_equ_mem
200 *
201 * @brief
202 * Equality function to use as @c key_equ for byte arrays.
203 */
204 static inline bool
smrh_key_equ_mem(smrh_key_t k1,smrh_key_t k2)205 smrh_key_equ_mem(smrh_key_t k1, smrh_key_t k2)
206 {
207 assert(k1.smrk_len == k2.smrk_len);
208 return memcmp(k1.smrk_opaque, k2.smrk_opaque, k1.smrk_len) == 0;
209 }
210
211 /*!
212 * @function smrh_key_equ_str
213 *
214 * @brief
215 * Equality function to use as @c key_equ for strings.
216 */
217 static inline bool
smrh_key_equ_str(smrh_key_t k1,smrh_key_t k2)218 smrh_key_equ_str(smrh_key_t k1, smrh_key_t k2)
219 {
220 return k1.smrk_len == k2.smrk_len &&
221 memcmp(k1.smrk_opaque, k2.smrk_opaque, k1.smrk_len) == 0;
222 }
223
224
225 #pragma mark SMR hash traits
226
227 #if __cplusplus
228 #define __smrh_traits_storage static constexpr
229 #else
230 #define __smrh_traits_storage static const __used
231 #endif
232
233 /*!
234 * @macro SMRH_TRAITS_DEFINE()
235 *
236 * @brief
237 * Defines a relatively naked typed SMR Hash traits structure.
238 *
239 * @discussion
240 * Clients must provide:
241 * - domain,
242 * - key_hash,
243 * - key_equ,
244 * - obj_hash,
245 * - obj_equ.
246 *
247 * Clients might provide:
248 * - obj_try_get.
249 *
250 * @param name the name of the global to create.
251 * @param type_t the type of objects that will be hashed
252 * @param link_field the linkage used to link elements
253 */
254 #define SMRH_TRAITS_DEFINE(name, type_t, link_field, ...) \
255 __smrh_traits_storage struct name { \
256 type_t *smrht_obj_type[0]; \
257 struct smrh_traits smrht; \
258 } name = { .smrht = { \
259 .link_offset = offsetof(type_t, link_field), \
260 __VA_ARGS__ \
261 } }
262
263 /*!
264 * @macro SMRH_TRAITS_DEFINE_SCALAR()
265 *
266 * @brief
267 * Defines a relatively typed SMR Hash traits structure for scalar keys.
268 *
269 * @discussion
270 * Clients must provide:
271 * - domain.
272 *
273 * Clients might provide:
274 * - obj_try_get.
275 *
276 * @param name the name of the global to create.
277 * @param type_t the type of objects that will be hashed
278 * @param key_field the field holding the key
279 * @param link_field the linkage used to link elements
280 */
281 #define SMRH_TRAITS_DEFINE_SCALAR(name, type_t, key_field, link_field, ...) \
282 static uint32_t \
283 name ## _obj_hash(const struct smrq_slink *link, uint32_t seed) \
284 { \
285 __auto_type o = __container_of(link, const type_t, link_field); \
286 smrh_key_t k = SMRH_SCALAR_KEY(o->key_field); \
287 \
288 if (k.smrk_len > sizeof(uint32_t)) { \
289 return smrh_key_hash_u64(k, seed); \
290 } else { \
291 return smrh_key_hash_u32(k, seed); \
292 } \
293 } \
294 \
295 static bool \
296 name ## _obj_equ(const struct smrq_slink *link, smrh_key_t key) \
297 { \
298 __auto_type o = __container_of(link, const type_t, link_field); \
299 \
300 return smrh_key_equ_scalar(SMRH_SCALAR_KEY(o->key_field), key); \
301 } \
302 \
303 SMRH_TRAITS_DEFINE(name, type_t, link_field, \
304 .key_hash = sizeof(((type_t *)NULL)->key_field) > 4 \
305 ? smrh_key_hash_u64 : smrh_key_hash_u32, \
306 .key_equ = smrh_key_equ_scalar, \
307 .obj_hash = name ## _obj_hash, \
308 .obj_equ = name ## _obj_equ, \
309 __VA_ARGS__ \
310 )
311
312 /*!
313 * @macro SMRH_TRAITS_DEFINE_STR()
314 *
315 * @brief
316 * Defines a basic typed SMR Hash traits structure for string keys.
317 *
318 * @discussion
319 * Clients must provide:
320 * - domain,
321 * - obj_hash,
322 * - obj_equ.
323 *
324 * Clients might provide:
325 * - obj_try_get.
326 *
327 * @param name the name of the global to create.
328 * @param type_t the type of objects that will be hashed
329 * @param link_field the linkage used to link elements
330 */
331 #define SMRH_TRAITS_DEFINE_STR(name, type_t, link_field, ...) \
332 SMRH_TRAITS_DEFINE(name, type_t, link_field, \
333 .key_hash = smrh_key_hash_str, \
334 .key_equ = smrh_key_equ_str, \
335 __VA_ARGS__ \
336 )
337
338 /*!
339 * @macro SMRH_TRAITS_DEFINE_MEM()
340 *
341 * @brief
342 * Defines a basic typed SMR Hash traits structure for byte array keys.
343 *
344 * @discussion
345 * Clients must provide:
346 * - domain,
347 * - obj_hash,
348 * - obj_equ.
349 *
350 * Clients might provide:
351 * - obj_try_get.
352 *
353 * @param name the name of the global to create.
354 * @param type_t the type of objects that will be hashed
355 * @param link_field the linkage used to link elements
356 */
357 #define SMRH_TRAITS_DEFINE_MEM(name, type_t, link_field, ...) \
358 SMRH_TRAITS_DEFINE(name, type_t, link_field, \
359 .key_hash = smrh_key_hash_mem, \
360 .key_equ = smrh_key_equ_mem, \
361 __VA_ARGS__ \
362 )
363
364 /*!
365 * @macro smrht_enter()
366 *
367 * @brief
368 * Conveniency macro to enter the domain associated
369 * with a specified hash table traits
370 */
371 #define smrht_enter(traits) \
372 smr_enter((traits)->smrht.domain)
373
374 /*!
375 * @macro smrht_leave()
376 *
377 * @brief
378 * Conveniency macro to leave the domain associated
379 * with a specified hash table traits
380 */
381 #define smrht_leave(traits) \
382 smr_leave((traits)->smrht.domain)
383
384
385 #pragma mark - SMR hash tables
386
387
388 /*!
389 * @struct smr_hash
390 *
391 * @brief
392 * This type implements simple closed addressing hash table.
393 *
394 * @discussion
395 * Using such a table allows for concurrent readers,
396 * but assumes external synchronization for mutations.
397 *
398 * In particular it means that insertions and deletions
399 * might block behind resizing the table.
400 *
401 * These hash tables aren't meant to be robust to attackers
402 * trying to cause hash collisions.
403 *
404 * Resizing is possible concurrently to readers implementing
405 * the relativistic hash table growth scheme.
406 * (https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf)
407 */
408 struct smr_hash {
409 #define SMRH_ARRAY_ORDER_SHIFT (48)
410 #define SMRH_ARRAY_ORDER_MASK (0x00fful << SMRH_ARRAY_ORDER_SHIFT)
411 uintptr_t smrh_array;
412 uint32_t smrh_count;
413 bool smrh_resizing;
414 uint8_t smrh_unused1;
415 uint16_t smrh_unused2;
416 };
417
418 #pragma mark SMR hash tables: initialization and accessors
419
420 /*!
421 * @function smr_hash_init_empty()
422 *
423 * @brief
424 * Initializes a hash to be empty.
425 *
426 * @discussion
427 * No entry can be added to this hash, but lookup functions
428 * will return sensible results.
429 *
430 * @c smr_hash_init() must be used again if the hash is to be used for
431 * insertions, which is safe with respect to concurrent readers.
432 *
433 * Calling @c smr_hash_destroy() on an empty-initialized hash is legal.
434 *
435 * Whether the hash table is empty-initialized can be tested with
436 * @c smr_hash_is_empty_initialized().
437 */
438 extern void smr_hash_init_empty(
439 struct smr_hash *smrh);
440
441 /*!
442 * @function smr_hash_init()
443 *
444 * @brief
445 * Initializes a hash with the specified size.
446 *
447 * @discussion
448 * This function never fails but requires for `size` to be
449 * smaller than KALLOC_SAFE_ALLOC_SIZE / sizeof(struct smrq_slist_head)
450 * (or to be called during early boot).
451 */
452 extern void smr_hash_init(
453 struct smr_hash *smrh,
454 size_t size);
455
456 /*!
457 * @function smr_hash_destroy()
458 *
459 * @brief
460 * Destroys a hash initiailized with smr_hash_init().
461 *
462 * @discussion
463 * This doesn't clean up the table from any elements it still contains.
464 * @c smr_hash_serialized_clear() must be called first if needed.
465 */
466 extern void smr_hash_destroy(
467 struct smr_hash *smrh);
468
469 /*!
470 * @function smr_hash_is_empty_initialized()
471 *
472 * @brief
473 * Returns whether the smr hash is empty as a result of calling
474 * smr_hash_init_empty().
475 */
476 extern bool smr_hash_is_empty_initialized(
477 struct smr_hash *smrh);
478
479 /*!
480 * @struct smr_array
481 *
482 * @brief
483 * The array pointer of a hash is packed with its size for atomicity reasons,
484 * this type is used for decoding / setting this pointer.
485 */
486 struct smr_hash_array {
487 struct smrq_slist_head *smrh_array;
488 uint16_t smrh_order;
489 };
490
491 /*!
492 * @function smr_hash_array_decode
493 *
494 * @brief
495 * Decodes the array pointer of a hash table.
496 */
497 static inline struct smr_hash_array
smr_hash_array_decode(const struct smr_hash * smrh)498 smr_hash_array_decode(const struct smr_hash *smrh)
499 {
500 struct smr_hash_array array;
501 uintptr_t ptr = os_atomic_load(&smrh->smrh_array, relaxed);
502
503 array.smrh_order = (uint8_t)(ptr >> SMRH_ARRAY_ORDER_SHIFT);
504 ptr |= SMRH_ARRAY_ORDER_MASK;
505 array.smrh_array = (struct smrq_slist_head *)ptr;
506
507 return array;
508 }
509
510 /*!
511 * @function smr_hash_size()
512 *
513 * @brief
514 * Returns the number of buckets in the hash table.
515 */
516 __attribute__((overloadable, always_inline))
517 static inline unsigned long
smr_hash_size(struct smr_hash_array array)518 smr_hash_size(struct smr_hash_array array)
519 {
520 return 1ul << (64 - array.smrh_order);
521 }
522 __attribute__((overloadable, always_inline))
523 static inline unsigned long
smr_hash_size(const struct smr_hash * smrh)524 smr_hash_size(const struct smr_hash *smrh)
525 {
526 return smr_hash_size(smr_hash_array_decode(smrh));
527 }
528
529 /*!
530 * @function smr_hash_serialized_count()
531 *
532 * @brief
533 * Returns the number of elements in the hash table.
534 *
535 * @discussion
536 * It can be called without serialization held,
537 * but the value is then racy.
538 */
539 __attribute__((always_inline))
540 static inline unsigned long
smr_hash_serialized_count(const struct smr_hash * smrh)541 smr_hash_serialized_count(const struct smr_hash *smrh)
542 {
543 return smrh->smrh_count;
544 }
545
546
547 #pragma mark SMR hash tables: read operations
548
549 /*!
550 * @function smr_hash_get()
551 *
552 * @brief
553 * Conveniency function for simple lookups.
554 *
555 * @discussion
556 * The SMR domain for this table must not be entered.
557 *
558 * This function doesn't require any synchronization and will enter/leave
559 *
560 * the SMR domain protecting elements automatically, and call the @c obj_try_get
561 * trait to validate/retain the element.
562 *
563 * @param smrh the hash table
564 * @param key the key to lookup
565 * @param traits the traits for the hash table
566 */
567 #define smr_hash_get(smrh, key, traits) ({ \
568 (smrht_obj_t(traits))__smr_hash_get(smrh, key, &(traits)->smrht); \
569 })
570
571 /*!
572 * @function smr_hash_contains()
573 *
574 * @brief
575 * Conveniency function for simple contains checks.
576 *
577 * @discussion
578 * The SMR domain for this table must not be entered.
579 *
580 * This function doesn't require any synchronization and will enter/leave
581 *
582 * @param smrh the hash table
583 * @param key the key to lookup
584 * @param traits the traits for the hash table
585 */
586 #define smr_hash_contains(smrh, key, traits) ({ \
587 smrh_traits_t __smrht = &(traits)->smrht; \
588 struct smrq_slist_head *__hd; \
589 bool __contains; \
590 \
591 smr_enter(__smrht->domain); \
592 __hd = __smr_hash_bucket(smrh, key, __smrht); \
593 __contains = (__smr_hash_entered_find(__hd, key, __smrht) != NULL); \
594 smr_leave(__smrht->domain); \
595 \
596 __contains; \
597 })
598
599 /*!
600 * @function smr_hash_entered_find()
601 *
602 * @brief
603 * Lookups an element in the table by key.
604 *
605 * @discussion
606 * The SMR domain for this table must be entered.
607 *
608 * This function returns the first element found that matches the key.
609 * This element might be about to be deleted or stale, and it is up
610 * to the client to make that determination if required.
611 *
612 * There might be more elements that can be found for that key,
613 * but because elements are inserted at the head of buckets,
614 * other matches should all be staler entries than the one returned.
615 *
616 * @param smrh the hash table
617 * @param key the key to lookup
618 * @param traits the traits for the hash table
619 */
620 #define smr_hash_entered_find(smrh, key, traits) ({ \
621 smrh_traits_t __smrht = &(traits)->smrht; \
622 struct smrq_slist_head *__hd = __smr_hash_bucket(smrh, key, __smrht); \
623 \
624 (smrht_obj_t(traits))__smr_hash_entered_find(__hd, key, __smrht); \
625 })
626
627 /*!
628 * @function smr_hash_serialized_find()
629 *
630 * @brief
631 * Lookups an element in the table by key.
632 *
633 * @discussion
634 * The SMR domain for this table must NOT be entered.
635 * This function requires external serialization with other mutations.
636 *
637 * This function returns the first element found that matches the key.
638 * This element might be about to be deleted or stale, and it is up
639 * to the client to make that determination if required.
640 *
641 * There might be more elements that can be found for that key,
642 * but because elements are inserted at the head of buckets,
643 * other matches should all be staler entries than the one returned.
644 *
645 * @param smrh the hash table
646 * @param key the key to lookup
647 * @param traits the traits for the hash table
648 */
649 #define smr_hash_serialized_find(smrh, key, traits) ({ \
650 smrh_traits_t __smrht = &(traits)->smrht; \
651 struct smrq_slist_head *__hd = __smr_hash_bucket(smrh, key, __smrht); \
652 \
653 (smrht_obj_t(traits))__smr_hash_serialized_find(__hd, key, __smrht); \
654 })
655
656
657 #pragma mark SMR hash tables: mutations
658
659 /*!
660 * @function smr_hash_serialized_insert()
661 *
662 * @brief
663 * Insert an object in the hash table.
664 *
665 * @discussion
666 * The SMR domain for this table must NOT be entered.
667 * This function requires external serialization with other mutations.
668 *
669 * Clients of this call must have checked there is no previous entry
670 * for this key in the hash table.
671 *
672 * @param smrh the hash table
673 * @param link the pointer to the linkage to insert.
674 * @param traits the traits for the hash table
675 */
676 #define smr_hash_serialized_insert(smrh, link, traits) ({ \
677 smrh_traits_t __smrht = &(traits)->smrht; \
678 struct smr_hash *__h = (smrh); \
679 struct smrq_slink *__link = (link); \
680 struct smrq_slist_head *__hd; \
681 \
682 __hd = __smr_hash_bucket(__h, __link, __smrht); \
683 __h->smrh_count++; \
684 smrq_serialized_insert_head(__hd, __link); \
685 })
686
687 /*!
688 * @function smr_hash_serialized_get_or_insert()
689 *
690 * @brief
691 * Insert an object in the hash table, or get the conflicting element.
692 *
693 * @discussion
694 * The SMR domain for this table must NOT be entered.
695 * This function requires external serialization with other mutations.
696 *
697 * @param smrh the hash table
698 * @param link the pointer to the linkage to insert.
699 * @param traits the traits for the hash table
700 */
701 #define smr_hash_serialized_get_or_insert(smrh, key, link, traits) ({ \
702 (smrht_obj_t(traits))__smr_hash_serialized_get_or_insert(smrh, key, \
703 link, &(traits)->smrht); \
704 })
705
706 /*!
707 * @function smr_hash_serialized_remove()
708 *
709 * @brief
710 * Remove an object from the hash table.
711 *
712 * @discussion
713 * The SMR domain for this table must NOT be entered.
714 * This function requires external serialization with other mutations.
715 *
716 * Note that the object once removed must be retired via SMR,
717 * and not freed immediately.
718 *
719 * If the object isn't in the hash table, this function will crash with
720 * a NULL deref while walking the bucket where the element should belong.
721 *
722 * @param smrh the hash table
723 * @param link the pointer to the linkage to remove.
724 * @param traits the traits for the hash table
725 */
726 #define smr_hash_serialized_remove(smrh, link, traits) ({ \
727 smrh_traits_t __smrht = &(traits)->smrht; \
728 struct smr_hash *__h = (smrh); \
729 struct smrq_slink *__link = (link); \
730 struct smrq_slist_head *__hd; \
731 \
732 __hd = __smr_hash_bucket(__h, __link, __smrht); \
733 __h->smrh_count--; \
734 smrq_serialized_remove(__hd, __link); \
735 })
736
737 /*!
738 * @function smr_hash_serialized_replace()
739 *
740 * @brief
741 * Replaces an object in the hash
742 *
743 * @discussion
744 * The SMR domain for this table must NOT be entered.
745 * This function requires external serialization with other mutations.
746 *
747 * Note that the old object once removed must be retired via SMR,
748 * and not freed immediately.
749 *
750 * If the old object isn't in the hash table, this function will crash with
751 * a NULL deref while walking the bucket where the element should belong.
752 *
753 * The new object MUST have the same key as the object it replaces,
754 * otherwise behavior is undefined.
755 *
756 * @param smrh the hash table
757 * @param old_link the pointer to the linkage to remove.
758 * @param new_link the pointer to the linkage to insert.
759 * @param traits the traits for the hash table
760 */
761 #define smr_hash_serialized_replace(smrh, old_link, new_link, traits) ({ \
762 smrh_traits_t __smrht = &(traits)->smrht; \
763 struct smrq_slink *__link = (old_link); \
764 struct smrq_slist_head *__hd; \
765 \
766 __hd = __smr_hash_bucket(smrh, __link, __smrht); \
767 smrq_serialized_replace(__hd, __link, (new_link)); \
768 })
769
770 /*!
771 * @function smr_hash_serialized_clear()
772 *
773 * @brief
774 * Empties an SMR hash table.
775 *
776 * @discussion
777 * This function requires external serialization with other mutations.
778 *
779 * @param smrh the hash table to clear
780 * @param traits the traits for this hash table
781 * @param free a block to call on each element in the table.
782 */
783 #define smr_hash_serialized_clear(smrh, traits, free...) \
784 __smr_hash_serialized_clear(smrh, &(traits)->smrht, free)
785
786
787 #pragma mark SMR hash tables: resizing
788
789 /*
790 * Implementing the growth policy is not builtin because
791 * there is a LOT of ways to do it, with some variants
792 * (such as asynchronoulsy) require a lot of bookkeeping
793 * which would grow the structure and prevent lean clients
794 * to use it without any growth policy.
795 */
796
797 /*!
798 * @function smr_hash_serialized_should_shrink()
799 *
800 * @brief
801 * Allows implementing a typical policy for shrinking.
802 *
803 * @discussion
804 * Returns whether the table is at least @c min_size large,
805 * and whether the table has more than @c size_factor buckets
806 * per @c count_factor elements.
807 */
808 static inline bool
smr_hash_serialized_should_shrink(const struct smr_hash * smrh,uint32_t min_size,uint32_t size_factor,uint32_t count_factor)809 smr_hash_serialized_should_shrink(
810 const struct smr_hash *smrh,
811 uint32_t min_size,
812 uint32_t size_factor,
813 uint32_t count_factor)
814 {
815 size_t size = smr_hash_size(smrh);
816
817 if (size > min_size && !smrh->smrh_resizing) {
818 return size * count_factor > smrh->smrh_count * size_factor;
819 }
820 return false;
821 }
822
823 /*!
824 * @function smr_hash_serialized_should_grow()
825 *
826 * @brief
827 * Allows implementing a typical policy for shrinking.
828 *
829 * @discussion
830 * Returns whether the table has less than @c size_factor buckets
831 * per @c count_factor elements.
832 */
833 static inline bool
smr_hash_serialized_should_grow(const struct smr_hash * smrh,uint32_t size_factor,uint32_t count_factor)834 smr_hash_serialized_should_grow(
835 const struct smr_hash *smrh,
836 uint32_t size_factor,
837 uint32_t count_factor)
838 {
839 size_t size = smr_hash_size(smrh);
840
841 if (!smrh->smrh_resizing) {
842 return size * count_factor < smrh->smrh_count * size_factor;
843 }
844 return false;
845 }
846
847 /*!
848 * @function smr_hash_shrink_and_unlock()
849 *
850 * @brief
851 * Shrinks a hash table (halves the number of buckets).
852 *
853 * @discussion
854 * This function synchronizes with other mutations using
855 * the passed in mutex.
856 *
857 * Shrinking is a relatively fast operation (however it still
858 * is mostly linear in the number of elements in the hash).
859 *
860 * This function doesn't perform any policy checks such as
861 * "minimal size" being sane or density of buckets being good.
862 *
863 * This function assumes it is called with the lock held,
864 * and returns with it unlocked.
865 *
866 * @returns
867 * - KERN_SUCCESS: the resize was successful.
868 * - KERN_RESOURCE_SHORTAGE: the system was out of memory.
869 * - KERN_FAILURE: the hash table was already resizing.
870 */
871 #define smr_hash_shrink_and_unlock(smrh, mutex, traits) \
872 __smr_hash_shrink_and_unlock(smrh, mutex, &(traits)->smrht)
873
874
875 /*!
876 * @function smr_hash_grow_and_unlock()
877 *
878 * @brief
879 * Grows a hash table (doubles the number of buckets).
880 *
881 * @discussion
882 * This function synchronizes with other mutations using
883 * the passed in mutex.
884 *
885 * Growing is relatively expensive, as it will rehash all elements,
886 * and call smr_synchronize() several times over the course
887 * of the operation. And mutations are delayed while this growth is
888 * occurring.
889 *
890 * This function doesn't perform any policy checks such as
891 * "maximal size" being sane or density of buckets being good.
892 *
893 * This function assumes it is called with the lock held,
894 * and returns with it unlocked.
895 *
896 * @returns
897 * - KERN_SUCCESS: the resize was successful.
898 * - KERN_RESOURCE_SHORTAGE: the system was out of memory.
899 * - KERN_FAILURE: the hash table was already resizing.
900 */
901 #define smr_hash_grow_and_unlock(smrh, mutex, traits) \
902 __smr_hash_grow_and_unlock(smrh, mutex, &(traits)->smrht)
903
904
905 #pragma mark SMR hash tables: iteration
906
907 /*!
908 * @struct smr_hash_iterator
909 *
910 * @brief
911 * Structure used for SMR hash iterations.
912 *
913 * @discussion
914 * Do not manipulate internal fields directly, use the accessors instead.
915 *
916 * Using iteration can be done either under an entered SMR domain,
917 * or under serialization.
918 *
919 * However erasure is only supported under serialization.
920 *
921 * Note that entered enumeration is done with preemption disabled
922 * and should be used in a limited capacity. Such enumerations
923 * might observe elements already removed from the table (due
924 * to concurrent deletions) or elements twice (due to concurrent resizes).
925 */
926 struct smr_hash_iterator {
927 struct smr_hash *smrh;
928 struct smrq_slist_head *hd_next;
929 struct smrq_slist_head *hd_last;
930 __smrq_slink_t *prev;
931 struct smrq_slink *link;
932 };
933
934 /*!
935 * @function smr_hash_iter_begin()
936 *
937 * @brief
938 * Initialize an SMR iterator, and advance it to the first element.
939 *
940 * @discussion
941 * This function must be used in either serialized or entered context.
942 */
943 static inline struct smr_hash_iterator
smr_hash_iter_begin(struct smr_hash * smrh)944 smr_hash_iter_begin(struct smr_hash *smrh)
945 {
946 struct smr_hash_array array = smr_hash_array_decode(smrh);
947 struct smr_hash_iterator it = {
948 .smrh = smrh,
949 .hd_next = array.smrh_array,
950 .hd_last = array.smrh_array + smr_hash_size(array),
951 };
952
953 do {
954 it.prev = &it.hd_next->first;
955 it.link = smr_entered_load(it.prev);
956 it.hd_next++;
957 } while (it.link == NULL && it.hd_next < it.hd_last);
958
959 return it;
960 }
961
962 /*!
963 * @function smr_hash_iter_get()
964 *
965 * @brief
966 * Returns the current value of the iterator, or NULL.
967 *
968 * @discussion
969 * This function must be used in either serialized or entered context.
970 */
971 #define smr_hash_iter_get(it, traits) ({ \
972 struct smr_hash_iterator __smrh_it = (it); \
973 void *__obj = NULL; \
974 \
975 if (__smrh_it.link) { \
976 __obj = __smrht_link_to_obj(&(traits)->smrht, __smrh_it.link); \
977 } \
978 \
979 (smrht_obj_t(traits))__obj; \
980 })
981
982 /*!
983 * @function smr_hash_iter_advance()
984 *
985 * @brief
986 * Advance the iterator to the next element.
987 *
988 * @description
989 * This function must be used in either serialized or entered context.
990 */
991 static inline void
smr_hash_iter_advance(struct smr_hash_iterator * it)992 smr_hash_iter_advance(struct smr_hash_iterator *it)
993 {
994 it->prev = &it->link->next;
995
996 while ((it->link = smr_entered_load(it->prev)) == NULL) {
997 if (it->hd_next == it->hd_last) {
998 break;
999 }
1000 it->prev = &it->hd_next->first;
1001 it->hd_next++;
1002 }
1003 }
1004
1005 /*!
1006 * @function smr_hash_iter_serialized_erase()
1007 *
1008 * @brief
1009 * Erases the current item from the hash and advance the cursor.
1010 *
1011 * @description
1012 * This function requires external serialization with other mutations.
1013 *
1014 * The object once removed must be retired via SMR,
1015 * and not freed immediately.
1016 */
1017 static inline void
smr_hash_iter_serialized_erase(struct smr_hash_iterator * it)1018 smr_hash_iter_serialized_erase(struct smr_hash_iterator *it)
1019 {
1020 it->link = smr_serialized_load(&it->link->next);
1021 it->smrh->smrh_count--;
1022 smr_serialized_store_relaxed(it->prev, it->link);
1023
1024 while (it->link == NULL) {
1025 if (it->hd_next == it->hd_last) {
1026 break;
1027 }
1028 it->prev = &it->hd_next->first;
1029 it->link = smr_serialized_load(it->prev);
1030 it->hd_next++;
1031 }
1032 }
1033
1034 /*!
1035 * @function smr_hash_foreach()
1036 *
1037 * @brief
1038 * Enumerates all elements in a hash table.
1039 *
1040 * @discussion
1041 * This function must be used in either serialized or entered context.
1042 *
1043 * When used in entered context, the enumeration might observe stale objects
1044 * that haven't been removed yet, or elements twice (due to concurrent resizes),
1045 * and the disambiguation must be done by the client if it matters.
1046 *
1047 * It is not permitted to erase elements during this enumeration,
1048 * manual use of the iterator APIs must be used if this is desired.
1049 *
1050 * @param obj the enumerator variable
1051 * @param smrh the hash table to enumerate
1052 * @param traits the traits for the hash table
1053 */
1054 #define smr_hash_foreach(obj, smrh, traits) \
1055 for (struct smr_hash_iterator __it = smr_hash_iter_begin(smrh); \
1056 ((obj) = smr_hash_iter_get(__it, traits)); \
1057 smr_hash_iter_advance(&__it))
1058
1059
1060 #if XNU_KERNEL_PRIVATE
1061 #pragma mark - SMR scalable hash tables
1062
1063
1064 /*!
1065 * @typedef smrsh_state_t
1066 *
1067 * @brief
1068 * Atomic state for the scalable SMR hash table.
1069 *
1070 * @discussion
1071 * Scalable hash tables have 2 sets of seeds and bucket arrays,
1072 * which are used for concurrent rehashing.
1073 *
1074 * Each growth/shrink/re-seed operation will swap sizes
1075 * and set of seed/array atomically by changing the state.
1076 */
1077 typedef struct {
1078 uint8_t curidx;
1079 uint8_t curshift;
1080 uint8_t newidx;
1081 uint8_t newshift;
1082 } smrsh_state_t;
1083
1084
1085 /*!
1086 * @typedef smrsh_rehash_t
1087 *
1088 * @brief
1089 * Internal state management for various rehashing operations.
1090 */
1091 __options_closed_decl(smrsh_rehash_t, uint8_t, {
1092 SMRSH_REHASH_NONE = 0x00,
1093 SMRSH_REHASH_RESEED = 0x01,
1094 SMRSH_REHASH_SHRINK = 0x02,
1095 SMRSH_REHASH_GROW = 0x04,
1096 SMRSH_REHASH_RUNNING = 0x08,
1097 });
1098
1099
1100 /*!
1101 * @enum smrsh_policy_t
1102 *
1103 * @brief
1104 * Describes the growth/shrink policies for scalable SMR hash tables.
1105 *
1106 * @description
1107 * In general, singleton global hash tables that are central
1108 * to the performance of the system likely want to use
1109 * @c SMRSH_BALANCED_NOSHRINK or @c SMRSH_FASTEST.
1110 *
1111 * Hash tables that tend to be instantiated multiple times,
1112 * or have bursty behaviors should use more conservative policies.
1113 *
1114 * @const SMRSH_COMPACT
1115 * Choose a policy that is very memory conscious and will favour aggressive
1116 * shrinking and allow relatively long hash chains.
1117 *
1118 * @const SMRSH_BALANCED
1119 * Choose a balanced policy that allows for medium sized hash chains,
1120 * and shrinks less aggressively than @c SMRSH_COMPACT.
1121 *
1122 * @const SMRSH_BALANCED_NOSHRINK
1123 * This policy is the same as @c SMRSH_BALANCED, but the hash table
1124 * will not be allowed to shrink.
1125 *
1126 * @const SMRSH_FASTEST
1127 * This policy grows aggressively, only tolerating relatively short
1128 * hash chains, and will never shrink.
1129 */
1130 __enum_closed_decl(smrsh_policy_t, uint32_t, {
1131 SMRSH_COMPACT,
1132 SMRSH_BALANCED,
1133 SMRSH_BALANCED_NOSHRINK,
1134 SMRSH_FASTEST,
1135 });
1136
1137
1138 /*!
1139 * @struct smr_shash
1140 *
1141 * @brief
1142 * Type for scalable SMR hash table.
1143 *
1144 * @description
1145 * Unlike its little brother @c smr_hash, these kinds of hash tables
1146 * allow for concurrent mutations (with fined grained per-bucket locks)
1147 * of the hash table.
1148 *
1149 * It also observes high collision rates and tries to adjust the hash
1150 * seeds in order to rebalance the hash tables when this happens.
1151 *
1152 * All this goodness however comes at a cost:
1153 * - these hash tables can't be enumerated
1154 * - these hash tables are substantially bigger (@c smr_hash is 2 pointers big,
1155 * where @c smr_shash is bigger and allocates a thread_call and a scalable
1156 * counter).
1157 */
1158 struct smr_shash {
1159 hw_lck_ptr_t *_Atomic smrsh_array[2];
1160 uint32_t _Atomic smrsh_seed[2];
1161 smrsh_state_t _Atomic smrsh_state;
1162 smrsh_rehash_t _Atomic smrsh_rehashing;
1163 smrsh_policy_t smrsh_policy;
1164 uint16_t smrsh_min_shift : 5;
1165 uint16_t __unused_bits : 11;
1166 scalable_counter_t smrsh_count;
1167 struct thread_call *smrsh_callout;
1168 };
1169
1170 #define SMRSH_BUCKET_STOP_BIT 0x1ul
1171
1172
1173
1174 #pragma mark SMR scalable hash tables: initialization and accessors
1175
1176 /*!
1177 * @function smr_shash_init()
1178 *
1179 * @brief
1180 * Initializes a scalable hash table.
1181 *
1182 * @param smrh the scalable hash table struct to initialize.
1183 * @param policy the growth policy to use (see @c smrsh_policy_t).
1184 * @param min_size the number of buckets the table should not shrink below.
1185 */
1186 extern void smr_shash_init(
1187 struct smr_shash *smrh,
1188 smrsh_policy_t policy,
1189 size_t min_size);
1190
1191 /*!
1192 * @function smr_shash_destroy()
1193 *
1194 * @brief
1195 * Releases the resources held by a table.
1196 *
1197 * @param smrh the scalable hash table struct to destroy.
1198 * @param traits the SMR hash traits for this table.
1199 * @param free an optional callback to call on each element
1200 * still in the hash table.
1201 */
1202 #define smr_shash_destroy(smrh, traits, free...) \
1203 __smr_shash_destroy(smrh, &(traits)->smrht, free)
1204
1205
1206 #pragma mark SMR scalable hash tables: read operations
1207
1208 /*!
1209 * @function smr_shash_entered_find()
1210 *
1211 * @brief
1212 * Looks up an element by key in the hash table.
1213 *
1214 * @discussion
1215 * The SMR domain protecting the hash table elements must have been entered
1216 * to call this function.
1217 *
1218 * This function returns an object for which the @c obj_try_get
1219 * callback hasn't been called, which means that accessing the element
1220 * is only valid inside the current SMR critical section, or until
1221 * further action to "retain" the element has been taken.
1222 *
1223 * @param smrh the scalable hash table.
1224 * @param key the key to lookup.
1225 * @param traits the SMR hash traits for this table.
1226 *
1227 * @returns the first found element or NULL.
1228 */
1229 #define smr_shash_entered_find(smrh, key, traits) ({ \
1230 void *__obj; \
1231 \
1232 __obj = __smr_shash_entered_find(smrh, key, &(traits)->smrht); \
1233 \
1234 (smrht_obj_t(traits))__obj; \
1235 })
1236
1237
1238 /*!
1239 * @function smr_shash_entered_get()
1240 *
1241 * @brief
1242 * Looks up an element by key in the hash table.
1243 *
1244 * @discussion
1245 * The SMR domain protecting the hash table elements must have been entered
1246 * to call this function.
1247 *
1248 * This function returns an object for which the @c obj_try_get
1249 * callback has been called, which ensures it is valid even
1250 * after the current SMR critical section ends.
1251 *
1252 * @param smrh the scalable hash table.
1253 * @param key the key to lookup.
1254 * @param traits the SMR hash traits for this table.
1255 *
1256 * @returns the first found element or NULL.
1257 */
1258 #define smr_shash_entered_get(smrh, key, traits) ({ \
1259 void *__obj; \
1260 \
1261 __obj = __smr_shash_entered_get(smrh, key, &(traits)->smrht); \
1262 \
1263 (smrht_obj_t(traits))__obj; \
1264 })
1265
1266 /*!
1267 * @function smr_shash_get()
1268 *
1269 * @brief
1270 * Looks up an element by key in the hash table.
1271 *
1272 * @discussion
1273 * Conveniency wrapper for @c smr_shash_entered_get()
1274 * which creates an SMR critical section around its call.
1275 *
1276 * The SMR domain protecting the hash table must NOT have been entered
1277 * to call this function.
1278 *
1279 * @param smrh the scalable hash table.
1280 * @param key the key to lookup.
1281 * @param traits the SMR hash traits for this table.
1282 *
1283 * @returns the first found element or NULL.
1284 */
1285 #define smr_shash_get(smrh, key, traits) ({ \
1286 void *__obj; \
1287 \
1288 smrht_enter(traits); \
1289 __obj = __smr_shash_entered_get(smrh, key, &(traits)->smrht); \
1290 smrht_leave(traits); \
1291 \
1292 (smrht_obj_t(traits))__obj; \
1293 })
1294
1295
1296 #pragma mark SMR scalable hash tables: mutations
1297
1298 /*!
1299 * @function smr_shash_entered_get_or_insert()
1300 *
1301 * @brief
1302 * Inserts an element in the hash table, or return a pre-existing element
1303 * in the hash table for that key.
1304 *
1305 * @discussion
1306 * The SMR domain protecting the hash table elements must have been entered
1307 * to call this function.
1308 *
1309 * This function either returns an object for which the @c obj_try_get
1310 * callback has been called, or inserts the passed in element.
1311 *
1312 * @param smrh the scalable hash table.
1313 * @param key the key to lookup.
1314 * @param link the element to insert (its "key" must be @c key).
1315 * @param traits the SMR hash traits for this table.
1316 *
1317 * @returns NULL if the insertion happened,
1318 * or the "retained" colliding element otherwise.
1319 */
1320 #define smr_shash_entered_get_or_insert(smrh, key, link, traits) ({ \
1321 smrh_traits_t __smrht = &(traits)->smrht; \
1322 void *__obj; \
1323 \
1324 __obj = __smr_shash_entered_get_or_insert(smrh, key, link, \
1325 &(traits)->smrht); \
1326 \
1327 (smrht_obj_t(traits))__obj; \
1328 })
1329
1330 /*!
1331 * @function smr_shash_get_or_insert()
1332 *
1333 * @brief
1334 * Looks up an element by key in the hash table.
1335 *
1336 * @discussion
1337 * Conveniency wrapper for @c smr_shash_entered_get_or_insert()
1338 * which creates an SMR critical section around its call.
1339 *
1340 * The SMR domain protecting the hash table must NOT have been entered
1341 * to call this function.
1342 *
1343 * @param smrh the scalable hash table.
1344 * @param key the key to lookup.
1345 * @param link the element to insert (its "key" must be @c key).
1346 * @param traits the SMR hash traits for this table.
1347 *
1348 * @returns NULL if the insertion happened,
1349 * or the "retained" colliding element otherwise.
1350 */
1351 #define smr_shash_get_or_insert(smrh, key, link, traits) ({ \
1352 void *__obj; \
1353 \
1354 smrht_enter(traits); \
1355 __obj = __smr_shash_entered_get_or_insert(smrh, key, link, \
1356 &(traits)->smrht); \
1357 smrht_leave(traits); \
1358 \
1359 (smrht_obj_t(traits))__obj; \
1360 })
1361
1362
1363 /*!
1364 * @function smr_shash_entered_remove()
1365 *
1366 * @brief
1367 * Removes an element from the hash table.
1368 *
1369 * @discussion
1370 * The SMR domain protecting the hash table must have been entered
1371 * to call this function.
1372 *
1373 * The removed element can't be added back to the hash table
1374 * and must be retired via SMR and not freed immediately.
1375 *
1376 * @param smrh the scalable hash table.
1377 * @param link the element to remove from the hash table.
1378 * @param traits the SMR hash traits for this table.
1379 */
1380 #define smr_shash_entered_remove(smrh, link, traits) ({ \
1381 smr_shash_mut_cursor_t __cursor; \
1382 struct smrq_slink *__link = (link); \
1383 struct smr_shash *__smrh = (smrh); \
1384 \
1385 __cursor = smr_shash_entered_mut_begin(__smrh, __link, traits); \
1386 smr_shash_entered_mut_erase(__smrh, __cursor, __link, traits); \
1387 })
1388
1389 /*!
1390 * @function smr_shash_remove()
1391 *
1392 * @brief
1393 * Removes an element from the hash table.
1394 *
1395 * @discussion
1396 * Conveniency wrapper for @c smr_shash_entered_remove()
1397 * which creates an SMR critical section around its call.
1398 *
1399 * The SMR domain protecting the hash table must NOT have been entered
1400 * to call this function.
1401 *
1402 * The removed element can't be added back to the hash table
1403 * and must be retired via SMR and not freed immediately.
1404 *
1405 * @param smrh the scalable hash table.
1406 * @param link the element to remove from the hash table.
1407 * @param traits the SMR hash traits for this table.
1408 */
1409 #define smr_shash_remove(smrh, link, traits) ({ \
1410 smrht_enter(traits); \
1411 smr_shash_entered_remove(smrh, link, traits); \
1412 smrht_leave(traits); \
1413 })
1414
1415
1416 /*!
1417 * @function smr_shash_entered_replace()
1418 *
1419 * @brief
1420 * Replaces an element in the hash table with another.
1421 *
1422 * @discussion
1423 * Elements must have the same key, otherwise the behavior is undefined.
1424 *
1425 * The SMR domain protecting the hash table must have been entered
1426 * to call this function.
1427 *
1428 * The removed element can't be added back to the hash table
1429 * and must be retired via SMR and not freed immediately.
1430 *
1431 * @param smrh the scalable hash table.
1432 * @param old_link the element to remove from the hash table.
1433 * @param new_link the element to insert in the hash table.
1434 * @param traits the SMR hash traits for this table.
1435 */
1436 #define smr_shash_entered_replace(smrh, old_link, new_link, traits) ({ \
1437 smr_shash_mut_cursor_t __cursor; \
1438 struct smrq_slink *__link = (old_link); \
1439 \
1440 __cursor = smr_shash_entered_mut_begin(smrh, __link, traits); \
1441 smr_shash_entered_mut_replace(__cursor, __link, new_link); \
1442 })
1443
1444 /*!
1445 * @function smr_shash_replace()
1446 *
1447 * @brief
1448 * Replaces an element in the hash table with another.
1449 *
1450 * @discussion
1451 * Conveniency wrapper for @c smr_shash_entered_replace()
1452 * which creates an SMR critical section around its call.
1453 *
1454 * Elements must have the same key, otherwise the behavior is undefined.
1455 *
1456 * The SMR domain protecting the hash table must NOT have been entered
1457 * to call this function.
1458 *
1459 * The removed element can't be added back to the hash table
1460 * and must be retired via SMR and not freed immediately.
1461 *
1462 * @param smrh the scalable hash table.
1463 * @param old_link the element to remove from the hash table.
1464 * @param new_link the element to insert in the hash table.
1465 * @param traits the SMR hash traits for this table.
1466 */
1467 #define smr_shash_replace(smrh, old_link, new_link, traits) ({ \
1468 smrht_enter(traits); \
1469 smr_shash_entered_replace(smrh, old_link, new_link, traits); \
1470 smrht_leave(traits); \
1471 })
1472
1473
1474 #pragma mark SMR scalable hash tables: advanced mutations
1475
1476 /*!
1477 * @typedef smr_shash_mut_cursor_t
1478 *
1479 * @brief
1480 * Cursor used for advanced mutations.
1481 */
1482 typedef struct {
1483 hw_lck_ptr_t *head;
1484 __smrq_slink_t *prev;
1485 } smr_shash_mut_cursor_t;
1486
1487
1488 /*!
1489 * @macro smr_shash_entered_mut_begin()
1490 *
1491 * @brief
1492 * Creates a mutation cursor for the specified element.
1493 *
1494 * @discussion
1495 * A mutation cursor allows to erase or replace an element
1496 * in the hash table.
1497 *
1498 * The cursor returned by this function holds a lock,
1499 * and it is undefined to have two live cursors at a time
1500 * (it will typically deadlock, and unlike typical locks,
1501 * there's no a priori lock ordering that can be derived
1502 * to prevent it).
1503 *
1504 * The SMR domain protecting the hash table must have been entered
1505 * to call this function.
1506 *
1507 * One and exactly one of these three calls must be performed
1508 * on a cursor before the SMR transaction is ended:
1509 * - smr_shash_entered_mut_erase() to erase the element it was made for,
1510 * - smr_shash_entered_mut_replace() to replace the element it was made for,
1511 * - smr_shash_entered_mut_abort() to abandon the cursor without mutation.
1512 *
1513 * @param smrh the scalable hash table.
1514 * @param link the element to create a cursor for (must be in the hash).
1515 * @param traits the SMR hash traits for this table.
1516 */
1517 #define smr_shash_entered_mut_begin(smrh, link, traits) \
1518 __smr_shash_entered_mut_begin(smrh, link, &(traits)->smrht)
1519
1520
1521 /*!
1522 * @macro smr_shash_entered_mut_erase()
1523 *
1524 * @brief
1525 * Erases the element used to make the cursor.
1526 *
1527 * @discussion
1528 * The passed in element must be the same that was used to make the cursor.
1529 *
1530 * The call must be made in the same SMR transaction that was entered
1531 * to make the cursor.
1532 *
1533 * The cursor is invalidated once this call returns.
1534 *
1535 * The removed element can't be added back to the hash table
1536 * and must be retired via SMR and not freed immediately.
1537 *
1538 * @param smrh the scalable hash table.
1539 * @param cursor the cursor made for the element to remove.
1540 * @param link the element used to create @c cursor.
1541 * @param traits the SMR hash traits for this table.
1542 */
1543 #define smr_shash_entered_mut_erase(smrh, cursor, link, traits) \
1544 __smr_shash_entered_mut_erase(smrh, cursor, link, &(traits)->smrht)
1545
1546
1547 /*!
1548 * @macro smr_shash_entered_mut_replace()
1549 *
1550 * @brief
1551 * Replaces the element used to make the cursor.
1552 *
1553 * @discussion
1554 * The passed in element must be the same that was used to make the cursor.
1555 *
1556 * The call must be made in the same SMR transaction that was entered
1557 * to make the cursor.
1558 *
1559 * The cursor is invalidated once this call returns.
1560 *
1561 * The removed element can't be added back to the hash table
1562 * and must be retired via SMR and not freed immediately.
1563 *
1564 * The new object MUST have the same key as the object it replaces,
1565 * otherwise behavior is undefined.
1566 *
1567 * @param smrh the scalable hash table.
1568 * @param cursor the cursor made for the element to remove.
1569 * @param old_link the element used to create @c cursor.
1570 * @param new_link the element to replace @c old_link with.
1571 * @param traits the SMR hash traits for this table.
1572 */
1573 #define smr_shash_entered_mut_replace(cursor, old_link, new_link, traits) \
1574 __smr_shash_entered_mut_replace(cursor, old_link, new_link, &(traits)->smrht)
1575
1576
1577 /*!
1578 * @macro smr_shash_entered_mut_abort()
1579 *
1580 * @brief
1581 * Invalidates a cursor made with @c smr_shash_entered_mut_begin()
1582 *
1583 * @discussion
1584 * The call must be made in the same SMR transaction that was entered
1585 * to make the cursor.
1586 *
1587 * @param cursor the cursor to invalidate.
1588 */
1589 #define smr_shash_entered_mut_abort(cursor) \
1590 __smr_shash_entered_mut_abort(cursor)
1591
1592
1593 #endif /* XNU_KERNEL_PRIVATE */
1594 #pragma mark - implementation details
1595 #pragma mark SMR hash traits
1596
1597 #define smrht_obj_t(traits) \
1598 typeof((traits)->smrht_obj_type[0])
1599
1600 static inline void *
__smrht_link_to_obj(smrh_traits_t traits,const struct smrq_slink * link)1601 __smrht_link_to_obj(smrh_traits_t traits, const struct smrq_slink *link)
1602 {
1603 void *ptr = (void *)((uintptr_t)link - traits->link_offset);
1604
1605 __builtin_assume(ptr != NULL);
1606 return ptr;
1607 }
1608
1609
1610 #pragma mark SMR hash tables
1611
1612 static inline unsigned long
__smr_hash_mask(struct smr_hash_array array)1613 __smr_hash_mask(struct smr_hash_array array)
1614 {
1615 return ~0ul >> array.smrh_order;
1616 }
1617
1618
1619 __attribute__((overloadable))
1620 static inline struct smrq_slist_head *
__smr_hash_bucket(const struct smr_hash * smrh,struct smrq_slink * link,smrh_traits_t smrht)1621 __smr_hash_bucket(
1622 const struct smr_hash *smrh,
1623 struct smrq_slink *link,
1624 smrh_traits_t smrht)
1625 {
1626 struct smr_hash_array array = smr_hash_array_decode(smrh);
1627 uint32_t index = __smr_hash_mask(array) & smrht->obj_hash(link, 0);
1628
1629 return &array.smrh_array[index];
1630 }
1631
1632 __attribute__((overloadable))
1633 static inline struct smrq_slist_head *
__smr_hash_bucket(const struct smr_hash * smrh,smrh_key_t key,smrh_traits_t smrht)1634 __smr_hash_bucket(
1635 const struct smr_hash *smrh,
1636 smrh_key_t key,
1637 smrh_traits_t smrht)
1638 {
1639 struct smr_hash_array array = smr_hash_array_decode(smrh);
1640 uint32_t index = __smr_hash_mask(array) & smrht->key_hash(key, 0);
1641
1642 return &array.smrh_array[index];
1643 }
1644
1645 static inline void *
__smr_hash_entered_find(const struct smrq_slist_head * head,smrh_key_t key,smrh_traits_t smrht)1646 __smr_hash_entered_find(
1647 const struct smrq_slist_head *head,
1648 smrh_key_t key,
1649 smrh_traits_t smrht)
1650 {
1651 for (struct smrq_slink *link = smr_entered_load(&head->first);
1652 link; link = smr_entered_load(&link->next)) {
1653 if (smrht->obj_equ(link, key)) {
1654 return __smrht_link_to_obj(smrht, link);
1655 }
1656 }
1657
1658 return NULL;
1659 }
1660
1661 static inline void *
__smr_hash_serialized_find(const struct smrq_slist_head * head,smrh_key_t key,smrh_traits_t smrht)1662 __smr_hash_serialized_find(
1663 const struct smrq_slist_head *head,
1664 smrh_key_t key,
1665 smrh_traits_t smrht)
1666 {
1667 for (struct smrq_slink *link = smr_serialized_load(&head->first);
1668 link; link = smr_serialized_load(&link->next)) {
1669 if (smrht->obj_equ(link, key)) {
1670 return __smrht_link_to_obj(smrht, link);
1671 }
1672 }
1673
1674 return NULL;
1675 }
1676
1677 static inline void *
__smr_hash_get(const struct smr_hash * smrh,smrh_key_t key,smrh_traits_t smrht)1678 __smr_hash_get(
1679 const struct smr_hash *smrh,
1680 smrh_key_t key,
1681 smrh_traits_t smrht)
1682 {
1683 struct smrq_slist_head *head;
1684 void *obj = NULL;
1685
1686 smr_enter(smrht->domain);
1687 head = __smr_hash_bucket(smrh, key, smrht);
1688 obj = __smr_hash_entered_find(head, key, smrht);
1689 if (obj && !smrht->obj_try_get(obj)) {
1690 obj = NULL;
1691 }
1692 smr_leave(smrht->domain);
1693
1694 return obj;
1695 }
1696
1697 static inline void *
__smr_hash_serialized_get_or_insert(struct smr_hash * smrh,smrh_key_t key,struct smrq_slink * link,smrh_traits_t smrht)1698 __smr_hash_serialized_get_or_insert(
1699 struct smr_hash *smrh,
1700 smrh_key_t key,
1701 struct smrq_slink *link,
1702 smrh_traits_t smrht)
1703 {
1704 struct smrq_slist_head *head;
1705 void *obj = NULL;
1706
1707 head = __smr_hash_bucket(smrh, key, smrht);
1708 obj = __smr_hash_serialized_find(head, key, smrht);
1709 if (!obj || !smrht->obj_try_get(obj)) {
1710 smrh->smrh_count++;
1711 smrq_serialized_insert_head(head, link);
1712 obj = NULL;
1713 }
1714
1715 return obj;
1716 }
1717
1718 extern void __smr_hash_serialized_clear(
1719 struct smr_hash *smrh,
1720 smrh_traits_t smrht,
1721 void (^free)(void *obj));
1722
1723 extern kern_return_t __smr_hash_shrink_and_unlock(
1724 struct smr_hash *smrh,
1725 lck_mtx_t *lock,
1726 smrh_traits_t smrht);
1727
1728 extern kern_return_t __smr_hash_grow_and_unlock(
1729 struct smr_hash *smrh,
1730 lck_mtx_t *lock,
1731 smrh_traits_t smrht);
1732
1733 #if XNU_KERNEL_PRIVATE
1734 #pragma GCC visibility push(hidden)
1735
1736 #pragma mark SMR scalable hash tables
1737
1738 __enum_closed_decl(smrsh_sel_t, uint8_t, {
1739 SMRSH_CUR,
1740 SMRSH_NEW,
1741 });
1742
1743 __attribute__((always_inline))
1744 static inline uint32_t
__smr_shash_load_seed(const struct smr_shash * smrh,size_t idx)1745 __smr_shash_load_seed(
1746 const struct smr_shash *smrh,
1747 size_t idx)
1748 {
1749 uintptr_t addr = (uintptr_t)smrh->smrsh_seed;
1750
1751 /*
1752 * prevent the optimizer from thinking it knows _anything_
1753 * about `smrsh_seed` to avoid codegen like this:
1754 *
1755 * return idx ? smrh->smrsh_seed[1] : smrh->smrsh_seed[0]
1756 *
1757 * This only has a control dependency which doesn't provide
1758 * the proper ordering. (control dependencies order
1759 * writes-after-dependency and not loads).
1760 */
1761
1762 return os_atomic_load(&((const uint32_t _Atomic *)addr)[idx], relaxed);
1763 }
1764
1765 __attribute__((always_inline))
1766 static inline hw_lck_ptr_t *
__smr_shash_load_array(const struct smr_shash * smrh,size_t idx)1767 __smr_shash_load_array(
1768 const struct smr_shash *smrh,
1769 size_t idx)
1770 {
1771 uintptr_t addr = (uintptr_t)smrh->smrsh_array;
1772
1773 /*
1774 * prevent the optimizer from thinking it knows _anything_
1775 * about `smrsh_array` to avoid codegen like this:
1776 *
1777 * return idx ? smrh->smrsh_array[1] : smrh->smrsh_array[0]
1778 *
1779 * This only has a control dependency which doesn't provide
1780 * the proper ordering. (control dependencies order
1781 * writes-after-dependency and not loads).
1782 */
1783
1784 return os_atomic_load(&((hw_lck_ptr_t * _Atomic const *)addr)[idx], relaxed);
1785 }
1786
1787 __attribute__((always_inline, overloadable))
1788 static inline uint32_t
__smr_shash_hash(const struct smr_shash * smrh,size_t idx,smrh_key_t key,smrh_traits_t traits)1789 __smr_shash_hash(
1790 const struct smr_shash *smrh,
1791 size_t idx,
1792 smrh_key_t key,
1793 smrh_traits_t traits)
1794 {
1795 return traits->key_hash(key, __smr_shash_load_seed(smrh, idx));
1796 }
1797
1798 __attribute__((always_inline, overloadable))
1799 static inline uint32_t
__smr_shash_hash(const struct smr_shash * smrh,size_t idx,const struct smrq_slink * link,smrh_traits_t traits)1800 __smr_shash_hash(
1801 const struct smr_shash *smrh,
1802 size_t idx,
1803 const struct smrq_slink *link,
1804 smrh_traits_t traits)
1805 {
1806 return traits->obj_hash(link, __smr_shash_load_seed(smrh, idx));
1807 }
1808
1809 static inline hw_lck_ptr_t *
__smr_shash_bucket(const struct smr_shash * smrh,smrsh_state_t state,smrsh_sel_t sel,uint32_t hash)1810 __smr_shash_bucket(
1811 const struct smr_shash *smrh,
1812 smrsh_state_t state,
1813 smrsh_sel_t sel,
1814 uint32_t hash)
1815 {
1816 hw_lck_ptr_t *array;
1817 uint8_t shift;
1818
1819 switch (sel) {
1820 case SMRSH_CUR:
1821 array = __smr_shash_load_array(smrh, state.curidx);
1822 shift = state.curshift;
1823 break;
1824 case SMRSH_NEW:
1825 array = __smr_shash_load_array(smrh, state.newidx);
1826 shift = state.newshift;
1827 break;
1828 }
1829
1830 return &array[hash >> shift];
1831 }
1832
1833 static inline bool
__smr_shash_is_stop(struct smrq_slink * link)1834 __smr_shash_is_stop(struct smrq_slink *link)
1835 {
1836 return (uintptr_t)link & SMRSH_BUCKET_STOP_BIT;
1837 }
1838
1839 static inline struct smrq_slink *
__smr_shash_bucket_stop(const hw_lck_ptr_t * head)1840 __smr_shash_bucket_stop(const hw_lck_ptr_t *head)
1841 {
1842 return (struct smrq_slink *)((uintptr_t)head | SMRSH_BUCKET_STOP_BIT);
1843 }
1844
1845 extern void *__smr_shash_entered_find_slow(
1846 const struct smr_shash *smrh,
1847 smrh_key_t key,
1848 hw_lck_ptr_t *head,
1849 smrh_traits_t traits);
1850
1851 static inline void *
__smr_shash_entered_find(const struct smr_shash * smrh,smrh_key_t key,smrh_traits_t traits)1852 __smr_shash_entered_find(
1853 const struct smr_shash *smrh,
1854 smrh_key_t key,
1855 smrh_traits_t traits)
1856 {
1857 struct smrq_slink *link;
1858 smrsh_state_t state;
1859 hw_lck_ptr_t *head;
1860 uint32_t hash;
1861
1862 state = os_atomic_load(&smrh->smrsh_state, dependency);
1863 hash = __smr_shash_hash(smrh, state.curidx, key, traits);
1864 head = __smr_shash_bucket(smrh, state, SMRSH_CUR, hash);
1865
1866 link = (struct smrq_slink *)hw_lck_ptr_value(head);
1867 while (!__smr_shash_is_stop(link)) {
1868 if (traits->obj_equ(link, key)) {
1869 return __smrht_link_to_obj(traits, link);
1870 }
1871 link = smr_entered_load(&link->next);
1872 }
1873
1874 if (__probable(link == __smr_shash_bucket_stop(head))) {
1875 return NULL;
1876 }
1877 return __smr_shash_entered_find_slow(smrh, key, head, traits);
1878 }
1879
1880 static inline void *
__smr_shash_entered_get(const struct smr_shash * smrh,smrh_key_t key,smrh_traits_t traits)1881 __smr_shash_entered_get(
1882 const struct smr_shash *smrh,
1883 smrh_key_t key,
1884 smrh_traits_t traits)
1885 {
1886 void *obj = __smr_shash_entered_find(smrh, key, traits);
1887
1888 return obj && traits->obj_try_get(obj) ? obj : NULL;
1889 }
1890
1891 extern void __smr_shash_destroy(
1892 struct smr_shash *smrh,
1893 smrh_traits_t traits,
1894 void (^free)(void *));
1895
1896 extern void *__smr_shash_entered_get_or_insert(
1897 struct smr_shash *smrh,
1898 smrh_key_t key,
1899 struct smrq_slink *link,
1900 smrh_traits_t traits);
1901
1902 extern smr_shash_mut_cursor_t __smr_shash_entered_mut_begin(
1903 struct smr_shash *smrh,
1904 struct smrq_slink *link,
1905 smrh_traits_t traits);
1906
1907 extern void __smr_shash_entered_mut_erase(
1908 struct smr_shash *smrh,
1909 smr_shash_mut_cursor_t cursor,
1910 struct smrq_slink *link,
1911 smrh_traits_t traits);
1912
1913 extern void __smr_shash_entered_mut_replace(
1914 smr_shash_mut_cursor_t cursor,
1915 struct smrq_slink *old_link,
1916 struct smrq_slink *new_link);
1917
1918 extern void __smr_shash_entered_mut_abort(
1919 smr_shash_mut_cursor_t cursor);
1920
1921 #pragma GCC visibility pop
1922 #endif /* XNU_KERNEL_PRIVATE */
1923
1924 __END_DECLS
1925
1926 #endif /* _KERN_SMR_HASH_H_ */
1927