1 /*
2 * Copyright (c) 2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #ifndef _KERN_SMR_HASH_H_
31 #define _KERN_SMR_HASH_H_
32
33 #include <kern/lock_mtx.h>
34 #include <kern/smr.h>
35 #include <os/hash.h>
36 #include <vm/vm_memtag.h>
37 #if XNU_KERNEL_PRIVATE
38 #include <kern/lock_ptr.h>
39 #include <kern/counter.h>
40 #endif
41
42 __BEGIN_DECLS
43
44
45 /*!
46 * @typedef smrh_key_t
47 *
48 * @brief
49 * A union that can represent several kinds of keys for SMR Hash Tables.
50 *
51 * @discussion
52 * For strings or opaque structs, @c smrk_len must hold the correct key size.
53 * For scalars (using the smrk_u64 field), the length is more advisory.
54 */
55 typedef struct {
56 union {
57 const char *smrk_string;
58 const void *smrk_opaque;
59 uint64_t smrk_u64;
60 };
61 size_t smrk_len;
62 } smrh_key_t;
63
64
65 /*!
66 * @struct smrh_traits
67 *
68 * @brief
69 * This structure parametrizes the behavior of SMR hash tables.
70 *
71 * @discussion
72 * Such structures are typically made with @c SMRH_TRAITS_DEFINE_*.
73 *
74 * Traits must be static and const in the same translation unit that implements
75 * the hash table, and possibly export methods to other modules. This design is
76 * not unlike C++ traits structure used in template parametrization, and rely on
77 * the constness of all structures for the compiler to actually elide most
78 * function calls, while maintaining decently good ergonomics.
79 *
80 *
81 * @field key_hash (automatic) computes a hash for a given key.
82 * @field key_equ (automatic) returns if two keys are equal
83 * @field obj_hash (automatic) computes a hash for a given object.
84 * @field obj_equ (automatic) returns if an object has the specified key
85 *
86 * @field domain the SMR domain which protects elements.
87 *
88 * @field obj_try_get function which attempts to acquire a reference on an
89 * object, or returns failure. This function is used
90 * to verify objects are "live" for the smrh_*_get()
91 * verbs.
92 */
93 struct smrh_traits {
94 unsigned long link_offset;
95 smr_t domain;
96 uint32_t (*key_hash)(smrh_key_t, uint32_t);
97 bool (*key_equ)(smrh_key_t, smrh_key_t);
98 uint32_t (*obj_hash)(const struct smrq_slink *, uint32_t);
99 bool (*obj_equ)(const struct smrq_slink *, smrh_key_t);
100 bool (*obj_try_get)(void *);
101 };
102 typedef const struct smrh_traits *smrh_traits_t;
103
104
105 #pragma mark SMR hash keys
106
107 /*!
108 * @macro SMRH_SCALAR_KEY()
109 *
110 * @brief
111 * Generates a @c smrh_key_t value out of a scalar.
112 */
113 #define SMRH_SCALAR_KEY(e) \
114 (smrh_key_t){ .smrk_u64 = (e), .smrk_len = sizeof(e) }
115
116
117 /*!
118 * @function smrh_key_hash_u32
119 *
120 * @brief
121 * Hashing function to use as a @c key_hash trait for 32bit scalars.
122 */
123 __pure2
124 static inline uint32_t
smrh_key_hash_u32(smrh_key_t key,uint32_t seed)125 smrh_key_hash_u32(smrh_key_t key, uint32_t seed)
126 {
127 uint32_t x = (uint32_t)key.smrk_u64 + seed;
128
129 x ^= x >> 16;
130 x *= 0x7feb352dU;
131 x ^= x >> 15;
132 x *= 0x846ca68bU;
133 x ^= x >> 16;
134
135 return x;
136 }
137
138 /*!
139 * @function smrh_key_hash_u64
140 *
141 * @brief
142 * Hashing function to use as a @c key_hash trait for 64bit scalars.
143 */
144 __pure2
145 static inline uint32_t
smrh_key_hash_u64(smrh_key_t key,uint32_t seed)146 smrh_key_hash_u64(smrh_key_t key, uint32_t seed)
147 {
148 uint64_t x = key.smrk_u64 + seed;
149
150 x ^= x >> 30;
151 x *= 0xbf58476d1ce4e5b9U;
152 x ^= x >> 27;
153 x *= 0x94d049bb133111ebU;
154 x ^= x >> 31;
155
156 return (uint32_t)x;
157 }
158
159 /*!
160 * @function smrh_key_hash_mem
161 *
162 * @brief
163 * Hashing function to use as a @c key_hash trait for byte arrays.
164 */
165 __stateful_pure
166 static inline uint32_t
smrh_key_hash_mem(smrh_key_t key,uint32_t seed)167 smrh_key_hash_mem(smrh_key_t key, uint32_t seed)
168 {
169 return os_hash_jenkins(key.smrk_opaque, key.smrk_len, seed);
170 }
171
172 /*!
173 * @function smrh_key_hash_str
174 *
175 * @brief
176 * Hashing function to use as a @c key_hash trait for C strings.
177 */
178 __stateful_pure
179 static inline uint32_t
smrh_key_hash_str(smrh_key_t key,uint32_t seed)180 smrh_key_hash_str(smrh_key_t key, uint32_t seed)
181 {
182 return os_hash_jenkins(key.smrk_opaque, key.smrk_len, seed);
183 }
184
185
186 /*!
187 * @function smrh_key_equ_scalar
188 *
189 * @brief
190 * Equality function to use as @c key_equ for scalars.
191 */
192 static inline bool
smrh_key_equ_scalar(smrh_key_t k1,smrh_key_t k2)193 smrh_key_equ_scalar(smrh_key_t k1, smrh_key_t k2)
194 {
195 return k1.smrk_u64 == k2.smrk_u64;
196 }
197
198 /*!
199 * @function smrh_key_equ_mem
200 *
201 * @brief
202 * Equality function to use as @c key_equ for byte arrays.
203 */
204 static inline bool
smrh_key_equ_mem(smrh_key_t k1,smrh_key_t k2)205 smrh_key_equ_mem(smrh_key_t k1, smrh_key_t k2)
206 {
207 assert(k1.smrk_len == k2.smrk_len);
208 return memcmp(k1.smrk_opaque, k2.smrk_opaque, k1.smrk_len) == 0;
209 }
210
211 /*!
212 * @function smrh_key_equ_str
213 *
214 * @brief
215 * Equality function to use as @c key_equ for strings.
216 */
217 static inline bool
smrh_key_equ_str(smrh_key_t k1,smrh_key_t k2)218 smrh_key_equ_str(smrh_key_t k1, smrh_key_t k2)
219 {
220 return k1.smrk_len == k2.smrk_len &&
221 memcmp(k1.smrk_opaque, k2.smrk_opaque, k1.smrk_len) == 0;
222 }
223
224
225 #pragma mark SMR hash traits
226
227 #if __cplusplus
228 #define __smrh_traits_storage static constexpr
229 #else
230 #define __smrh_traits_storage static const __used
231 #endif
232
233 /*!
234 * @macro SMRH_TRAITS_DEFINE()
235 *
236 * @brief
237 * Defines a relatively naked typed SMR Hash traits structure.
238 *
239 * @discussion
240 * Clients must provide:
241 * - domain,
242 * - key_hash,
243 * - key_equ,
244 * - obj_hash,
245 * - obj_equ.
246 *
247 * Clients might provide:
248 * - obj_try_get.
249 *
250 * @param name the name of the global to create.
251 * @param type_t the type of objects that will be hashed
252 * @param link_field the linkage used to link elements
253 */
254 #define SMRH_TRAITS_DEFINE(name, type_t, link_field, ...) \
255 __smrh_traits_storage struct name { \
256 type_t *smrht_obj_type[0]; \
257 struct smrh_traits smrht; \
258 } name = { .smrht = { \
259 .link_offset = offsetof(type_t, link_field), \
260 __VA_ARGS__ \
261 } }
262
263 /*!
264 * @macro SMRH_TRAITS_DEFINE_SCALAR()
265 *
266 * @brief
267 * Defines a relatively typed SMR Hash traits structure for scalar keys.
268 *
269 * @discussion
270 * Clients must provide:
271 * - domain.
272 *
273 * Clients might provide:
274 * - obj_try_get.
275 *
276 * @param name the name of the global to create.
277 * @param type_t the type of objects that will be hashed
278 * @param key_field the field holding the key
279 * @param link_field the linkage used to link elements
280 */
281 #define SMRH_TRAITS_DEFINE_SCALAR(name, type_t, key_field, link_field, ...) \
282 static uint32_t \
283 name ## _obj_hash(const struct smrq_slink *link, uint32_t seed) \
284 { \
285 __auto_type o = __container_of(link, const type_t, link_field); \
286 smrh_key_t k = SMRH_SCALAR_KEY(o->key_field); \
287 \
288 if (k.smrk_len > sizeof(uint32_t)) { \
289 return smrh_key_hash_u64(k, seed); \
290 } else { \
291 return smrh_key_hash_u32(k, seed); \
292 } \
293 } \
294 \
295 static bool \
296 name ## _obj_equ(const struct smrq_slink *link, smrh_key_t key) \
297 { \
298 __auto_type o = __container_of(link, const type_t, link_field); \
299 \
300 return smrh_key_equ_scalar(SMRH_SCALAR_KEY(o->key_field), key); \
301 } \
302 \
303 SMRH_TRAITS_DEFINE(name, type_t, link_field, \
304 .key_hash = sizeof(((type_t *)NULL)->key_field) > 4 \
305 ? smrh_key_hash_u64 : smrh_key_hash_u32, \
306 .key_equ = smrh_key_equ_scalar, \
307 .obj_hash = name ## _obj_hash, \
308 .obj_equ = name ## _obj_equ, \
309 __VA_ARGS__ \
310 )
311
312 /*!
313 * @macro SMRH_TRAITS_DEFINE_STR()
314 *
315 * @brief
316 * Defines a basic typed SMR Hash traits structure for string keys.
317 *
318 * @discussion
319 * Clients must provide:
320 * - domain,
321 * - obj_hash,
322 * - obj_equ.
323 *
324 * Clients might provide:
325 * - obj_try_get.
326 *
327 * @param name the name of the global to create.
328 * @param type_t the type of objects that will be hashed
329 * @param link_field the linkage used to link elements
330 */
331 #define SMRH_TRAITS_DEFINE_STR(name, type_t, link_field, ...) \
332 SMRH_TRAITS_DEFINE(name, type_t, link_field, \
333 .key_hash = smrh_key_hash_str, \
334 .key_equ = smrh_key_equ_str, \
335 __VA_ARGS__ \
336 )
337
338 /*!
339 * @macro SMRH_TRAITS_DEFINE_MEM()
340 *
341 * @brief
342 * Defines a basic typed SMR Hash traits structure for byte array keys.
343 *
344 * @discussion
345 * Clients must provide:
346 * - domain,
347 * - obj_hash,
348 * - obj_equ.
349 *
350 * Clients might provide:
351 * - obj_try_get.
352 *
353 * @param name the name of the global to create.
354 * @param type_t the type of objects that will be hashed
355 * @param link_field the linkage used to link elements
356 */
357 #define SMRH_TRAITS_DEFINE_MEM(name, type_t, link_field, ...) \
358 SMRH_TRAITS_DEFINE(name, type_t, link_field, \
359 .key_hash = smrh_key_hash_mem, \
360 .key_equ = smrh_key_equ_mem, \
361 __VA_ARGS__ \
362 )
363
364 /*!
365 * @macro smrht_enter()
366 *
367 * @brief
368 * Conveniency macro to enter the domain associated
369 * with a specified hash table traits
370 */
371 #define smrht_enter(traits) \
372 smr_enter((traits)->smrht.domain)
373
374 /*!
375 * @macro smrht_leave()
376 *
377 * @brief
378 * Conveniency macro to leave the domain associated
379 * with a specified hash table traits
380 */
381 #define smrht_leave(traits) \
382 smr_leave((traits)->smrht.domain)
383
384
385 #pragma mark - SMR hash tables
386
387
388 /*!
389 * @struct smr_hash
390 *
391 * @brief
392 * This type implements simple closed addressing hash table.
393 *
394 * @discussion
395 * Using such a table allows for concurrent readers,
396 * but assumes external synchronization for mutations.
397 *
398 * In particular it means that insertions and deletions
399 * might block behind resizing the table.
400 *
401 * These hash tables aren't meant to be robust to attackers
402 * trying to cause hash collisions.
403 *
404 * Resizing is possible concurrently to readers implementing
405 * the relativistic hash table growth scheme.
406 * (https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf)
407 */
408 struct smr_hash {
409 #define SMRH_ARRAY_ORDER_SHIFT (48)
410 #define SMRH_ARRAY_ORDER_MASK (0xfffful << SMRH_ARRAY_ORDER_SHIFT)
411 uintptr_t smrh_array;
412 uint32_t smrh_count;
413 bool smrh_resizing;
414 uint8_t smrh_unused1;
415 uint16_t smrh_unused2;
416 };
417
418 #pragma mark SMR hash tables: initialization and accessors
419
420 /*!
421 * @function smr_hash_init_empty()
422 *
423 * @brief
424 * Initializes a hash to be empty.
425 *
426 * @discussion
427 * No entry can be added to this hash, but lookup functions
428 * will return sensible results.
429 *
430 * @c smr_hash_init() must be used again if the hash is to be used for
431 * insertions, which is safe with respect to concurrent readers.
432 *
433 * Calling @c smr_hash_destroy() on an empty-initialized hash is legal.
434 *
435 * Whether the hash table is empty-initialized can be tested with
436 * @c smr_hash_is_empty_initialized().
437 */
438 extern void smr_hash_init_empty(
439 struct smr_hash *smrh);
440
441 /*!
442 * @function smr_hash_init()
443 *
444 * @brief
445 * Initializes a hash with the specified size.
446 *
447 * @discussion
448 * This function never fails but requires for `size` to be
449 * smaller than KALLOC_SAFE_ALLOC_SIZE / sizeof(struct smrq_slist_head)
450 * (or to be called during early boot).
451 */
452 extern void smr_hash_init(
453 struct smr_hash *smrh,
454 size_t size);
455
456 /*!
457 * @function smr_hash_destroy()
458 *
459 * @brief
460 * Destroys a hash initiailized with smr_hash_init().
461 *
462 * @discussion
463 * This doesn't clean up the table from any elements it still contains.
464 * @c smr_hash_serialized_clear() must be called first if needed.
465 */
466 extern void smr_hash_destroy(
467 struct smr_hash *smrh);
468
469 /*!
470 * @function smr_hash_is_empty_initialized()
471 *
472 * @brief
473 * Returns whether the smr hash is empty as a result of calling
474 * smr_hash_init_empty().
475 */
476 extern bool smr_hash_is_empty_initialized(
477 struct smr_hash *smrh);
478
479 /*!
480 * @struct smr_array
481 *
482 * @brief
483 * The array pointer of a hash is packed with its size for atomicity reasons,
484 * this type is used for decoding / setting this pointer.
485 */
486 struct smr_hash_array {
487 struct smrq_slist_head *smrh_array;
488 uint16_t smrh_order;
489 };
490
491 /*!
492 * @function smr_hash_array_decode
493 *
494 * @brief
495 * Decodes the array pointer of a hash table.
496 */
497 static inline struct smr_hash_array
smr_hash_array_decode(const struct smr_hash * smrh)498 smr_hash_array_decode(const struct smr_hash *smrh)
499 {
500 struct smr_hash_array array;
501 uintptr_t ptr = os_atomic_load(&smrh->smrh_array, relaxed);
502
503 array.smrh_order = (uint16_t)(ptr >> SMRH_ARRAY_ORDER_SHIFT);
504 ptr |= SMRH_ARRAY_ORDER_MASK;
505 #if CONFIG_KERNEL_TAGGING
506 ptr = vm_memtag_fixup_ptr(ptr);
507 #endif /* CONFIG_KERNEL_TAGGING */
508 array.smrh_array = (struct smrq_slist_head *)ptr;
509
510 return array;
511 }
512
513 /*!
514 * @function smr_hash_size()
515 *
516 * @brief
517 * Returns the number of buckets in the hash table.
518 */
519 __attribute__((overloadable, always_inline))
520 static inline unsigned long
smr_hash_size(struct smr_hash_array array)521 smr_hash_size(struct smr_hash_array array)
522 {
523 return 1ul << (64 - array.smrh_order);
524 }
525 __attribute__((overloadable, always_inline))
526 static inline unsigned long
smr_hash_size(const struct smr_hash * smrh)527 smr_hash_size(const struct smr_hash *smrh)
528 {
529 return smr_hash_size(smr_hash_array_decode(smrh));
530 }
531
532 /*!
533 * @function smr_hash_serialized_count()
534 *
535 * @brief
536 * Returns the number of elements in the hash table.
537 *
538 * @discussion
539 * It can be called without serialization held,
540 * but the value is then racy.
541 */
542 __attribute__((always_inline))
543 static inline unsigned long
smr_hash_serialized_count(const struct smr_hash * smrh)544 smr_hash_serialized_count(const struct smr_hash *smrh)
545 {
546 return smrh->smrh_count;
547 }
548
549
550 #pragma mark SMR hash tables: read operations
551
552 /*!
553 * @function smr_hash_get()
554 *
555 * @brief
556 * Conveniency function for simple lookups.
557 *
558 * @discussion
559 * The SMR domain for this table must not be entered.
560 *
561 * This function doesn't require any synchronization and will enter/leave
562 *
563 * the SMR domain protecting elements automatically, and call the @c obj_try_get
564 * trait to validate/retain the element.
565 *
566 * @param smrh the hash table
567 * @param key the key to lookup
568 * @param traits the traits for the hash table
569 */
570 #define smr_hash_get(smrh, key, traits) ({ \
571 (smrht_obj_t(traits))__smr_hash_get(smrh, key, &(traits)->smrht); \
572 })
573
574 /*!
575 * @function smr_hash_contains()
576 *
577 * @brief
578 * Conveniency function for simple contains checks.
579 *
580 * @discussion
581 * The SMR domain for this table must not be entered.
582 *
583 * This function doesn't require any synchronization and will enter/leave
584 *
585 * @param smrh the hash table
586 * @param key the key to lookup
587 * @param traits the traits for the hash table
588 */
589 #define smr_hash_contains(smrh, key, traits) ({ \
590 smrh_traits_t __smrht = &(traits)->smrht; \
591 struct smrq_slist_head *__hd; \
592 bool __contains; \
593 \
594 smr_enter(__smrht->domain); \
595 __hd = __smr_hash_bucket(smrh, key, __smrht); \
596 __contains = (__smr_hash_entered_find(__hd, key, __smrht) != NULL); \
597 smr_leave(__smrht->domain); \
598 \
599 __contains; \
600 })
601
602 /*!
603 * @function smr_hash_entered_find()
604 *
605 * @brief
606 * Lookups an element in the table by key.
607 *
608 * @discussion
609 * The SMR domain for this table must be entered.
610 *
611 * This function returns the first element found that matches the key.
612 * This element might be about to be deleted or stale, and it is up
613 * to the client to make that determination if required.
614 *
615 * There might be more elements that can be found for that key,
616 * but because elements are inserted at the head of buckets,
617 * other matches should all be staler entries than the one returned.
618 *
619 * @param smrh the hash table
620 * @param key the key to lookup
621 * @param traits the traits for the hash table
622 */
623 #define smr_hash_entered_find(smrh, key, traits) ({ \
624 smrh_traits_t __smrht = &(traits)->smrht; \
625 struct smrq_slist_head *__hd = __smr_hash_bucket(smrh, key, __smrht); \
626 \
627 (smrht_obj_t(traits))__smr_hash_entered_find(__hd, key, __smrht); \
628 })
629
630 /*!
631 * @function smr_hash_serialized_find()
632 *
633 * @brief
634 * Lookups an element in the table by key.
635 *
636 * @discussion
637 * The SMR domain for this table must NOT be entered.
638 * This function requires external serialization with other mutations.
639 *
640 * This function returns the first element found that matches the key.
641 * This element might be about to be deleted or stale, and it is up
642 * to the client to make that determination if required.
643 *
644 * There might be more elements that can be found for that key,
645 * but because elements are inserted at the head of buckets,
646 * other matches should all be staler entries than the one returned.
647 *
648 * @param smrh the hash table
649 * @param key the key to lookup
650 * @param traits the traits for the hash table
651 */
652 #define smr_hash_serialized_find(smrh, key, traits) ({ \
653 smrh_traits_t __smrht = &(traits)->smrht; \
654 struct smrq_slist_head *__hd = __smr_hash_bucket(smrh, key, __smrht); \
655 \
656 (smrht_obj_t(traits))__smr_hash_serialized_find(__hd, key, __smrht); \
657 })
658
659
660 #pragma mark SMR hash tables: mutations
661
662 /*!
663 * @function smr_hash_serialized_insert()
664 *
665 * @brief
666 * Insert an object in the hash table.
667 *
668 * @discussion
669 * The SMR domain for this table must NOT be entered.
670 * This function requires external serialization with other mutations.
671 *
672 * Clients of this call must have checked there is no previous entry
673 * for this key in the hash table.
674 *
675 * @param smrh the hash table
676 * @param link the pointer to the linkage to insert.
677 * @param traits the traits for the hash table
678 */
679 #define smr_hash_serialized_insert(smrh, link, traits) ({ \
680 smrh_traits_t __smrht = &(traits)->smrht; \
681 struct smr_hash *__h = (smrh); \
682 struct smrq_slink *__link = (link); \
683 struct smrq_slist_head *__hd; \
684 \
685 __hd = __smr_hash_bucket(__h, __link, __smrht); \
686 __h->smrh_count++; \
687 smrq_serialized_insert_head(__hd, __link); \
688 })
689
690 /*!
691 * @function smr_hash_serialized_get_or_insert()
692 *
693 * @brief
694 * Insert an object in the hash table, or get the conflicting element.
695 *
696 * @discussion
697 * The SMR domain for this table must NOT be entered.
698 * This function requires external serialization with other mutations.
699 *
700 * @param smrh the hash table
701 * @param link the pointer to the linkage to insert.
702 * @param traits the traits for the hash table
703 */
704 #define smr_hash_serialized_get_or_insert(smrh, key, link, traits) ({ \
705 (smrht_obj_t(traits))__smr_hash_serialized_get_or_insert(smrh, key, \
706 link, &(traits)->smrht); \
707 })
708
709 /*!
710 * @function smr_hash_serialized_remove()
711 *
712 * @brief
713 * Remove an object from the hash table.
714 *
715 * @discussion
716 * The SMR domain for this table must NOT be entered.
717 * This function requires external serialization with other mutations.
718 *
719 * Note that the object once removed must be retired via SMR,
720 * and not freed immediately.
721 *
722 * If the object isn't in the hash table, this function will crash with
723 * a NULL deref while walking the bucket where the element should belong.
724 *
725 * @param smrh the hash table
726 * @param link the pointer to the linkage to remove.
727 * @param traits the traits for the hash table
728 */
729 #define smr_hash_serialized_remove(smrh, link, traits) ({ \
730 smrh_traits_t __smrht = &(traits)->smrht; \
731 struct smr_hash *__h = (smrh); \
732 struct smrq_slink *__link = (link); \
733 struct smrq_slist_head *__hd; \
734 \
735 __hd = __smr_hash_bucket(__h, __link, __smrht); \
736 __h->smrh_count--; \
737 smrq_serialized_remove(__hd, __link); \
738 })
739
740 /*!
741 * @function smr_hash_serialized_replace()
742 *
743 * @brief
744 * Replaces an object in the hash
745 *
746 * @discussion
747 * The SMR domain for this table must NOT be entered.
748 * This function requires external serialization with other mutations.
749 *
750 * Note that the old object once removed must be retired via SMR,
751 * and not freed immediately.
752 *
753 * If the old object isn't in the hash table, this function will crash with
754 * a NULL deref while walking the bucket where the element should belong.
755 *
756 * The new object MUST have the same key as the object it replaces,
757 * otherwise behavior is undefined.
758 *
759 * @param smrh the hash table
760 * @param old_link the pointer to the linkage to remove.
761 * @param new_link the pointer to the linkage to insert.
762 * @param traits the traits for the hash table
763 */
764 #define smr_hash_serialized_replace(smrh, old_link, new_link, traits) ({ \
765 smrh_traits_t __smrht = &(traits)->smrht; \
766 struct smrq_slink *__link = (old_link); \
767 struct smrq_slist_head *__hd; \
768 \
769 __hd = __smr_hash_bucket(smrh, __link, __smrht); \
770 smrq_serialized_replace(__hd, __link, (new_link)); \
771 })
772
773 /*!
774 * @function smr_hash_serialized_clear()
775 *
776 * @brief
777 * Empties an SMR hash table.
778 *
779 * @discussion
780 * This function requires external serialization with other mutations.
781 *
782 * @param smrh the hash table to clear
783 * @param traits the traits for this hash table
784 * @param free a block to call on each element in the table.
785 */
786 #define smr_hash_serialized_clear(smrh, traits, free...) \
787 __smr_hash_serialized_clear(smrh, &(traits)->smrht, free)
788
789
790 #pragma mark SMR hash tables: resizing
791
792 /*
793 * Implementing the growth policy is not builtin because
794 * there is a LOT of ways to do it, with some variants
795 * (such as asynchronoulsy) require a lot of bookkeeping
796 * which would grow the structure and prevent lean clients
797 * to use it without any growth policy.
798 */
799
800 /*!
801 * @function smr_hash_serialized_should_shrink()
802 *
803 * @brief
804 * Allows implementing a typical policy for shrinking.
805 *
806 * @discussion
807 * Returns whether the table is at least @c min_size large,
808 * and whether the table has more than @c size_factor buckets
809 * per @c count_factor elements.
810 */
811 static inline bool
smr_hash_serialized_should_shrink(const struct smr_hash * smrh,uint32_t min_size,uint32_t size_factor,uint32_t count_factor)812 smr_hash_serialized_should_shrink(
813 const struct smr_hash *smrh,
814 uint32_t min_size,
815 uint32_t size_factor,
816 uint32_t count_factor)
817 {
818 size_t size = smr_hash_size(smrh);
819
820 if (size > min_size && !smrh->smrh_resizing) {
821 return size * count_factor > smrh->smrh_count * size_factor;
822 }
823 return false;
824 }
825
826 /*!
827 * @function smr_hash_serialized_should_grow()
828 *
829 * @brief
830 * Allows implementing a typical policy for shrinking.
831 *
832 * @discussion
833 * Returns whether the table has less than @c size_factor buckets
834 * per @c count_factor elements.
835 */
836 static inline bool
smr_hash_serialized_should_grow(const struct smr_hash * smrh,uint32_t size_factor,uint32_t count_factor)837 smr_hash_serialized_should_grow(
838 const struct smr_hash *smrh,
839 uint32_t size_factor,
840 uint32_t count_factor)
841 {
842 size_t size = smr_hash_size(smrh);
843
844 if (!smrh->smrh_resizing) {
845 return size * count_factor < smrh->smrh_count * size_factor;
846 }
847 return false;
848 }
849
850 /*!
851 * @function smr_hash_shrink_and_unlock()
852 *
853 * @brief
854 * Shrinks a hash table (halves the number of buckets).
855 *
856 * @discussion
857 * This function synchronizes with other mutations using
858 * the passed in mutex.
859 *
860 * Shrinking is a relatively fast operation (however it still
861 * is mostly linear in the number of elements in the hash).
862 *
863 * This function doesn't perform any policy checks such as
864 * "minimal size" being sane or density of buckets being good.
865 *
866 * This function assumes it is called with the lock held,
867 * and returns with it unlocked.
868 *
869 * @returns
870 * - KERN_SUCCESS: the resize was successful.
871 * - KERN_RESOURCE_SHORTAGE: the system was out of memory.
872 * - KERN_FAILURE: the hash table was already resizing.
873 */
874 #define smr_hash_shrink_and_unlock(smrh, mutex, traits) \
875 __smr_hash_shrink_and_unlock(smrh, mutex, &(traits)->smrht)
876
877
878 /*!
879 * @function smr_hash_grow_and_unlock()
880 *
881 * @brief
882 * Grows a hash table (doubles the number of buckets).
883 *
884 * @discussion
885 * This function synchronizes with other mutations using
886 * the passed in mutex.
887 *
888 * Growing is relatively expensive, as it will rehash all elements,
889 * and call smr_synchronize() several times over the course
890 * of the operation. And mutations are delayed while this growth is
891 * occurring.
892 *
893 * This function doesn't perform any policy checks such as
894 * "maximal size" being sane or density of buckets being good.
895 *
896 * This function assumes it is called with the lock held,
897 * and returns with it unlocked.
898 *
899 * @returns
900 * - KERN_SUCCESS: the resize was successful.
901 * - KERN_RESOURCE_SHORTAGE: the system was out of memory.
902 * - KERN_FAILURE: the hash table was already resizing.
903 */
904 #define smr_hash_grow_and_unlock(smrh, mutex, traits) \
905 __smr_hash_grow_and_unlock(smrh, mutex, &(traits)->smrht)
906
907
908 #pragma mark SMR hash tables: iteration
909
910 /*!
911 * @struct smr_hash_iterator
912 *
913 * @brief
914 * Structure used for SMR hash iterations.
915 *
916 * @discussion
917 * Do not manipulate internal fields directly, use the accessors instead.
918 *
919 * Using iteration can be done either under an entered SMR domain,
920 * or under serialization.
921 *
922 * However erasure is only supported under serialization.
923 *
924 * Note that entered enumeration is done with preemption disabled
925 * and should be used in a limited capacity. Such enumerations
926 * might observe elements already removed from the table (due
927 * to concurrent deletions) or elements twice (due to concurrent resizes).
928 */
929 struct smr_hash_iterator {
930 struct smr_hash *smrh;
931 struct smrq_slist_head *hd_next;
932 struct smrq_slist_head *hd_last;
933 __smrq_slink_t *prev;
934 struct smrq_slink *link;
935 };
936
937 /*!
938 * @function smr_hash_iter_begin()
939 *
940 * @brief
941 * Initialize an SMR iterator, and advance it to the first element.
942 *
943 * @discussion
944 * This function must be used in either serialized or entered context.
945 */
946 static inline struct smr_hash_iterator
smr_hash_iter_begin(struct smr_hash * smrh)947 smr_hash_iter_begin(struct smr_hash *smrh)
948 {
949 struct smr_hash_array array = smr_hash_array_decode(smrh);
950 struct smr_hash_iterator it = {
951 .smrh = smrh,
952 .hd_next = array.smrh_array,
953 .hd_last = array.smrh_array + smr_hash_size(array),
954 };
955
956 do {
957 it.prev = &it.hd_next->first;
958 it.link = smr_entered_load(it.prev);
959 it.hd_next++;
960 } while (it.link == NULL && it.hd_next < it.hd_last);
961
962 return it;
963 }
964
965 /*!
966 * @function smr_hash_iter_get()
967 *
968 * @brief
969 * Returns the current value of the iterator, or NULL.
970 *
971 * @discussion
972 * This function must be used in either serialized or entered context.
973 */
974 #define smr_hash_iter_get(it, traits) ({ \
975 struct smr_hash_iterator __smrh_it = (it); \
976 void *__obj = NULL; \
977 \
978 if (__smrh_it.link) { \
979 __obj = __smrht_link_to_obj(&(traits)->smrht, __smrh_it.link); \
980 } \
981 \
982 (smrht_obj_t(traits))__obj; \
983 })
984
985 /*!
986 * @function smr_hash_iter_advance()
987 *
988 * @brief
989 * Advance the iterator to the next element.
990 *
991 * @description
992 * This function must be used in either serialized or entered context.
993 */
994 static inline void
smr_hash_iter_advance(struct smr_hash_iterator * it)995 smr_hash_iter_advance(struct smr_hash_iterator *it)
996 {
997 it->prev = &it->link->next;
998
999 while ((it->link = smr_entered_load(it->prev)) == NULL) {
1000 if (it->hd_next == it->hd_last) {
1001 break;
1002 }
1003 it->prev = &it->hd_next->first;
1004 it->hd_next++;
1005 }
1006 }
1007
1008 /*!
1009 * @function smr_hash_iter_serialized_erase()
1010 *
1011 * @brief
1012 * Erases the current item from the hash and advance the cursor.
1013 *
1014 * @description
1015 * This function requires external serialization with other mutations.
1016 *
1017 * The object once removed must be retired via SMR,
1018 * and not freed immediately.
1019 */
1020 static inline void
smr_hash_iter_serialized_erase(struct smr_hash_iterator * it)1021 smr_hash_iter_serialized_erase(struct smr_hash_iterator *it)
1022 {
1023 it->link = smr_serialized_load(&it->link->next);
1024 it->smrh->smrh_count--;
1025 smr_serialized_store_relaxed(it->prev, it->link);
1026
1027 while (it->link == NULL) {
1028 if (it->hd_next == it->hd_last) {
1029 break;
1030 }
1031 it->prev = &it->hd_next->first;
1032 it->link = smr_serialized_load(it->prev);
1033 it->hd_next++;
1034 }
1035 }
1036
1037 /*!
1038 * @function smr_hash_foreach()
1039 *
1040 * @brief
1041 * Enumerates all elements in a hash table.
1042 *
1043 * @discussion
1044 * This function must be used in either serialized or entered context.
1045 *
1046 * When used in entered context, the enumeration might observe stale objects
1047 * that haven't been removed yet, or elements twice (due to concurrent resizes),
1048 * and the disambiguation must be done by the client if it matters.
1049 *
1050 * It is not permitted to erase elements during this enumeration,
1051 * manual use of the iterator APIs must be used if this is desired.
1052 *
1053 * @param obj the enumerator variable
1054 * @param smrh the hash table to enumerate
1055 * @param traits the traits for the hash table
1056 */
1057 #define smr_hash_foreach(obj, smrh, traits) \
1058 for (struct smr_hash_iterator __it = smr_hash_iter_begin(smrh); \
1059 ((obj) = smr_hash_iter_get(__it, traits)); \
1060 smr_hash_iter_advance(&__it))
1061
1062
1063 #if XNU_KERNEL_PRIVATE
1064 #pragma mark - SMR scalable hash tables
1065
1066
1067 /*!
1068 * @typedef smrsh_state_t
1069 *
1070 * @brief
1071 * Atomic state for the scalable SMR hash table.
1072 *
1073 * @discussion
1074 * Scalable hash tables have 2 sets of seeds and bucket arrays,
1075 * which are used for concurrent rehashing.
1076 *
1077 * Each growth/shrink/re-seed operation will swap sizes
1078 * and set of seed/array atomically by changing the state.
1079 */
1080 typedef struct {
1081 uint8_t curidx;
1082 uint8_t curshift;
1083 uint8_t newidx;
1084 uint8_t newshift;
1085 } smrsh_state_t;
1086
1087
1088 /*!
1089 * @typedef smrsh_rehash_t
1090 *
1091 * @brief
1092 * Internal state management for various rehashing operations.
1093 */
1094 __options_closed_decl(smrsh_rehash_t, uint8_t, {
1095 SMRSH_REHASH_NONE = 0x00,
1096 SMRSH_REHASH_RESEED = 0x01,
1097 SMRSH_REHASH_SHRINK = 0x02,
1098 SMRSH_REHASH_GROW = 0x04,
1099 SMRSH_REHASH_RUNNING = 0x08,
1100 });
1101
1102
1103 /*!
1104 * @enum smrsh_policy_t
1105 *
1106 * @brief
1107 * Describes the growth/shrink policies for scalable SMR hash tables.
1108 *
1109 * @description
1110 * In general, singleton global hash tables that are central
1111 * to the performance of the system likely want to use
1112 * @c SMRSH_BALANCED_NOSHRINK or @c SMRSH_FASTEST.
1113 *
1114 * Hash tables that tend to be instantiated multiple times,
1115 * or have bursty behaviors should use more conservative policies.
1116 *
1117 * @const SMRSH_COMPACT
1118 * Choose a policy that is very memory conscious and will favour aggressive
1119 * shrinking and allow relatively long hash chains.
1120 *
1121 * @const SMRSH_BALANCED
1122 * Choose a balanced policy that allows for medium sized hash chains,
1123 * and shrinks less aggressively than @c SMRSH_COMPACT.
1124 *
1125 * @const SMRSH_BALANCED_NOSHRINK
1126 * This policy is the same as @c SMRSH_BALANCED, but the hash table
1127 * will not be allowed to shrink.
1128 *
1129 * @const SMRSH_FASTEST
1130 * This policy grows aggressively, only tolerating relatively short
1131 * hash chains, and will never shrink.
1132 */
1133 __enum_closed_decl(smrsh_policy_t, uint32_t, {
1134 SMRSH_COMPACT,
1135 SMRSH_BALANCED,
1136 SMRSH_BALANCED_NOSHRINK,
1137 SMRSH_FASTEST,
1138 });
1139
1140
1141 /*!
1142 * @struct smr_shash
1143 *
1144 * @brief
1145 * Type for scalable SMR hash table.
1146 *
1147 * @description
1148 * Unlike its little brother @c smr_hash, these kinds of hash tables
1149 * allow for concurrent mutations (with fined grained per-bucket locks)
1150 * of the hash table.
1151 *
1152 * It also observes high collision rates and tries to adjust the hash
1153 * seeds in order to rebalance the hash tables when this happens.
1154 *
1155 * All this goodness however comes at a cost:
1156 * - these hash tables can't be enumerated
1157 * - these hash tables are substantially bigger (@c smr_hash is 2 pointers big,
1158 * where @c smr_shash is bigger and allocates a thread_call and a scalable
1159 * counter).
1160 */
1161 struct smr_shash {
1162 hw_lck_ptr_t *_Atomic smrsh_array[2];
1163 uint32_t _Atomic smrsh_seed[2];
1164 smrsh_state_t _Atomic smrsh_state;
1165 smrsh_rehash_t _Atomic smrsh_rehashing;
1166 smrsh_policy_t smrsh_policy;
1167 uint16_t smrsh_min_shift : 5;
1168 uint16_t __unused_bits : 11;
1169 scalable_counter_t smrsh_count;
1170 struct thread_call *smrsh_callout;
1171 };
1172
1173 #define SMRSH_BUCKET_STOP_BIT 0x1ul
1174
1175
1176
1177 #pragma mark SMR scalable hash tables: initialization and accessors
1178
1179 /*!
1180 * @function smr_shash_init()
1181 *
1182 * @brief
1183 * Initializes a scalable hash table.
1184 *
1185 * @param smrh the scalable hash table struct to initialize.
1186 * @param policy the growth policy to use (see @c smrsh_policy_t).
1187 * @param min_size the number of buckets the table should not shrink below.
1188 */
1189 extern void smr_shash_init(
1190 struct smr_shash *smrh,
1191 smrsh_policy_t policy,
1192 size_t min_size);
1193
1194 /*!
1195 * @function smr_shash_destroy()
1196 *
1197 * @brief
1198 * Releases the resources held by a table.
1199 *
1200 * @param smrh the scalable hash table struct to destroy.
1201 * @param traits the SMR hash traits for this table.
1202 * @param free an optional callback to call on each element
1203 * still in the hash table.
1204 */
1205 #define smr_shash_destroy(smrh, traits, free...) \
1206 __smr_shash_destroy(smrh, &(traits)->smrht, free)
1207
1208
1209 #pragma mark SMR scalable hash tables: read operations
1210
1211 /*!
1212 * @function smr_shash_entered_find()
1213 *
1214 * @brief
1215 * Looks up an element by key in the hash table.
1216 *
1217 * @discussion
1218 * The SMR domain protecting the hash table elements must have been entered
1219 * to call this function.
1220 *
1221 * This function returns an object for which the @c obj_try_get
1222 * callback hasn't been called, which means that accessing the element
1223 * is only valid inside the current SMR critical section, or until
1224 * further action to "retain" the element has been taken.
1225 *
1226 * @param smrh the scalable hash table.
1227 * @param key the key to lookup.
1228 * @param traits the SMR hash traits for this table.
1229 *
1230 * @returns the first found element or NULL.
1231 */
1232 #define smr_shash_entered_find(smrh, key, traits) ({ \
1233 void *__obj; \
1234 \
1235 __obj = __smr_shash_entered_find(smrh, key, &(traits)->smrht); \
1236 \
1237 (smrht_obj_t(traits))__obj; \
1238 })
1239
1240
1241 /*!
1242 * @function smr_shash_entered_get()
1243 *
1244 * @brief
1245 * Looks up an element by key in the hash table.
1246 *
1247 * @discussion
1248 * The SMR domain protecting the hash table elements must have been entered
1249 * to call this function.
1250 *
1251 * This function returns an object for which the @c obj_try_get
1252 * callback has been called, which ensures it is valid even
1253 * after the current SMR critical section ends.
1254 *
1255 * @param smrh the scalable hash table.
1256 * @param key the key to lookup.
1257 * @param traits the SMR hash traits for this table.
1258 *
1259 * @returns the first found element or NULL.
1260 */
1261 #define smr_shash_entered_get(smrh, key, traits) ({ \
1262 void *__obj; \
1263 \
1264 __obj = __smr_shash_entered_get(smrh, key, &(traits)->smrht); \
1265 \
1266 (smrht_obj_t(traits))__obj; \
1267 })
1268
1269 /*!
1270 * @function smr_shash_get()
1271 *
1272 * @brief
1273 * Looks up an element by key in the hash table.
1274 *
1275 * @discussion
1276 * Conveniency wrapper for @c smr_shash_entered_get()
1277 * which creates an SMR critical section around its call.
1278 *
1279 * The SMR domain protecting the hash table must NOT have been entered
1280 * to call this function.
1281 *
1282 * @param smrh the scalable hash table.
1283 * @param key the key to lookup.
1284 * @param traits the SMR hash traits for this table.
1285 *
1286 * @returns the first found element or NULL.
1287 */
1288 #define smr_shash_get(smrh, key, traits) ({ \
1289 void *__obj; \
1290 \
1291 smrht_enter(traits); \
1292 __obj = __smr_shash_entered_get(smrh, key, &(traits)->smrht); \
1293 smrht_leave(traits); \
1294 \
1295 (smrht_obj_t(traits))__obj; \
1296 })
1297
1298
1299 #pragma mark SMR scalable hash tables: mutations
1300
1301 /*!
1302 * @function smr_shash_entered_get_or_insert()
1303 *
1304 * @brief
1305 * Inserts an element in the hash table, or return a pre-existing element
1306 * in the hash table for that key.
1307 *
1308 * @discussion
1309 * The SMR domain protecting the hash table elements must have been entered
1310 * to call this function.
1311 *
1312 * This function either returns an object for which the @c obj_try_get
1313 * callback has been called, or inserts the passed in element.
1314 *
1315 * @param smrh the scalable hash table.
1316 * @param key the key to lookup.
1317 * @param link the element to insert (its "key" must be @c key).
1318 * @param traits the SMR hash traits for this table.
1319 *
1320 * @returns NULL if the insertion happened,
1321 * or the "retained" colliding element otherwise.
1322 */
1323 #define smr_shash_entered_get_or_insert(smrh, key, link, traits) ({ \
1324 smrh_traits_t __smrht = &(traits)->smrht; \
1325 void *__obj; \
1326 \
1327 __obj = __smr_shash_entered_get_or_insert(smrh, key, link, \
1328 &(traits)->smrht); \
1329 \
1330 (smrht_obj_t(traits))__obj; \
1331 })
1332
1333 /*!
1334 * @function smr_shash_get_or_insert()
1335 *
1336 * @brief
1337 * Looks up an element by key in the hash table.
1338 *
1339 * @discussion
1340 * Conveniency wrapper for @c smr_shash_entered_get_or_insert()
1341 * which creates an SMR critical section around its call.
1342 *
1343 * The SMR domain protecting the hash table must NOT have been entered
1344 * to call this function.
1345 *
1346 * @param smrh the scalable hash table.
1347 * @param key the key to lookup.
1348 * @param link the element to insert (its "key" must be @c key).
1349 * @param traits the SMR hash traits for this table.
1350 *
1351 * @returns NULL if the insertion happened,
1352 * or the "retained" colliding element otherwise.
1353 */
1354 #define smr_shash_get_or_insert(smrh, key, link, traits) ({ \
1355 void *__obj; \
1356 \
1357 smrht_enter(traits); \
1358 __obj = __smr_shash_entered_get_or_insert(smrh, key, link, \
1359 &(traits)->smrht); \
1360 smrht_leave(traits); \
1361 \
1362 (smrht_obj_t(traits))__obj; \
1363 })
1364
1365
1366 /*!
1367 * @function smr_shash_entered_remove()
1368 *
1369 * @brief
1370 * Removes an element from the hash table.
1371 *
1372 * @discussion
1373 * The SMR domain protecting the hash table must have been entered
1374 * to call this function.
1375 *
1376 * The removed element can't be added back to the hash table
1377 * and must be retired via SMR and not freed immediately.
1378 *
1379 * @param smrh the scalable hash table.
1380 * @param link the element to remove from the hash table.
1381 * @param traits the SMR hash traits for this table.
1382 */
1383 #define smr_shash_entered_remove(smrh, link, traits) ({ \
1384 smr_shash_mut_cursor_t __cursor; \
1385 struct smrq_slink *__link = (link); \
1386 struct smr_shash *__smrh = (smrh); \
1387 \
1388 __cursor = smr_shash_entered_mut_begin(__smrh, __link, traits); \
1389 smr_shash_entered_mut_erase(__smrh, __cursor, __link, traits); \
1390 })
1391
1392 /*!
1393 * @function smr_shash_remove()
1394 *
1395 * @brief
1396 * Removes an element from the hash table.
1397 *
1398 * @discussion
1399 * Conveniency wrapper for @c smr_shash_entered_remove()
1400 * which creates an SMR critical section around its call.
1401 *
1402 * The SMR domain protecting the hash table must NOT have been entered
1403 * to call this function.
1404 *
1405 * The removed element can't be added back to the hash table
1406 * and must be retired via SMR and not freed immediately.
1407 *
1408 * @param smrh the scalable hash table.
1409 * @param link the element to remove from the hash table.
1410 * @param traits the SMR hash traits for this table.
1411 */
1412 #define smr_shash_remove(smrh, link, traits) ({ \
1413 smrht_enter(traits); \
1414 smr_shash_entered_remove(smrh, link, traits); \
1415 smrht_leave(traits); \
1416 })
1417
1418
1419 /*!
1420 * @function smr_shash_entered_replace()
1421 *
1422 * @brief
1423 * Replaces an element in the hash table with another.
1424 *
1425 * @discussion
1426 * Elements must have the same key, otherwise the behavior is undefined.
1427 *
1428 * The SMR domain protecting the hash table must have been entered
1429 * to call this function.
1430 *
1431 * The removed element can't be added back to the hash table
1432 * and must be retired via SMR and not freed immediately.
1433 *
1434 * @param smrh the scalable hash table.
1435 * @param old_link the element to remove from the hash table.
1436 * @param new_link the element to insert in the hash table.
1437 * @param traits the SMR hash traits for this table.
1438 */
1439 #define smr_shash_entered_replace(smrh, old_link, new_link, traits) ({ \
1440 smr_shash_mut_cursor_t __cursor; \
1441 struct smrq_slink *__link = (old_link); \
1442 \
1443 __cursor = smr_shash_entered_mut_begin(smrh, __link, traits); \
1444 smr_shash_entered_mut_replace(__cursor, __link, new_link); \
1445 })
1446
1447 /*!
1448 * @function smr_shash_replace()
1449 *
1450 * @brief
1451 * Replaces an element in the hash table with another.
1452 *
1453 * @discussion
1454 * Conveniency wrapper for @c smr_shash_entered_replace()
1455 * which creates an SMR critical section around its call.
1456 *
1457 * Elements must have the same key, otherwise the behavior is undefined.
1458 *
1459 * The SMR domain protecting the hash table must NOT have been entered
1460 * to call this function.
1461 *
1462 * The removed element can't be added back to the hash table
1463 * and must be retired via SMR and not freed immediately.
1464 *
1465 * @param smrh the scalable hash table.
1466 * @param old_link the element to remove from the hash table.
1467 * @param new_link the element to insert in the hash table.
1468 * @param traits the SMR hash traits for this table.
1469 */
1470 #define smr_shash_replace(smrh, old_link, new_link, traits) ({ \
1471 smrht_enter(traits); \
1472 smr_shash_entered_replace(smrh, old_link, new_link, traits); \
1473 smrht_leave(traits); \
1474 })
1475
1476
1477 #pragma mark SMR scalable hash tables: advanced mutations
1478
1479 /*!
1480 * @typedef smr_shash_mut_cursor_t
1481 *
1482 * @brief
1483 * Cursor used for advanced mutations.
1484 */
1485 typedef struct {
1486 hw_lck_ptr_t *head;
1487 __smrq_slink_t *prev;
1488 } smr_shash_mut_cursor_t;
1489
1490
1491 /*!
1492 * @macro smr_shash_entered_mut_begin()
1493 *
1494 * @brief
1495 * Creates a mutation cursor for the specified element.
1496 *
1497 * @discussion
1498 * A mutation cursor allows to erase or replace an element
1499 * in the hash table.
1500 *
1501 * The cursor returned by this function holds a lock,
1502 * and it is undefined to have two live cursors at a time
1503 * (it will typically deadlock, and unlike typical locks,
1504 * there's no a priori lock ordering that can be derived
1505 * to prevent it).
1506 *
1507 * The SMR domain protecting the hash table must have been entered
1508 * to call this function.
1509 *
1510 * One and exactly one of these three calls must be performed
1511 * on a cursor before the SMR transaction is ended:
1512 * - smr_shash_entered_mut_erase() to erase the element it was made for,
1513 * - smr_shash_entered_mut_replace() to replace the element it was made for,
1514 * - smr_shash_entered_mut_abort() to abandon the cursor without mutation.
1515 *
1516 * @param smrh the scalable hash table.
1517 * @param link the element to create a cursor for (must be in the hash).
1518 * @param traits the SMR hash traits for this table.
1519 */
1520 #define smr_shash_entered_mut_begin(smrh, link, traits) \
1521 __smr_shash_entered_mut_begin(smrh, link, &(traits)->smrht)
1522
1523
1524 /*!
1525 * @macro smr_shash_entered_mut_erase()
1526 *
1527 * @brief
1528 * Erases the element used to make the cursor.
1529 *
1530 * @discussion
1531 * The passed in element must be the same that was used to make the cursor.
1532 *
1533 * The call must be made in the same SMR transaction that was entered
1534 * to make the cursor.
1535 *
1536 * The cursor is invalidated once this call returns.
1537 *
1538 * The removed element can't be added back to the hash table
1539 * and must be retired via SMR and not freed immediately.
1540 *
1541 * @param smrh the scalable hash table.
1542 * @param cursor the cursor made for the element to remove.
1543 * @param link the element used to create @c cursor.
1544 * @param traits the SMR hash traits for this table.
1545 */
1546 #define smr_shash_entered_mut_erase(smrh, cursor, link, traits) \
1547 __smr_shash_entered_mut_erase(smrh, cursor, link, &(traits)->smrht)
1548
1549
1550 /*!
1551 * @macro smr_shash_entered_mut_replace()
1552 *
1553 * @brief
1554 * Replaces the element used to make the cursor.
1555 *
1556 * @discussion
1557 * The passed in element must be the same that was used to make the cursor.
1558 *
1559 * The call must be made in the same SMR transaction that was entered
1560 * to make the cursor.
1561 *
1562 * The cursor is invalidated once this call returns.
1563 *
1564 * The removed element can't be added back to the hash table
1565 * and must be retired via SMR and not freed immediately.
1566 *
1567 * The new object MUST have the same key as the object it replaces,
1568 * otherwise behavior is undefined.
1569 *
1570 * @param smrh the scalable hash table.
1571 * @param cursor the cursor made for the element to remove.
1572 * @param old_link the element used to create @c cursor.
1573 * @param new_link the element to replace @c old_link with.
1574 * @param traits the SMR hash traits for this table.
1575 */
1576 #define smr_shash_entered_mut_replace(cursor, old_link, new_link, traits) \
1577 __smr_shash_entered_mut_replace(cursor, old_link, new_link, &(traits)->smrht)
1578
1579
1580 /*!
1581 * @macro smr_shash_entered_mut_abort()
1582 *
1583 * @brief
1584 * Invalidates a cursor made with @c smr_shash_entered_mut_begin()
1585 *
1586 * @discussion
1587 * The call must be made in the same SMR transaction that was entered
1588 * to make the cursor.
1589 *
1590 * @param cursor the cursor to invalidate.
1591 */
1592 #define smr_shash_entered_mut_abort(cursor) \
1593 __smr_shash_entered_mut_abort(cursor)
1594
1595
1596 #endif /* XNU_KERNEL_PRIVATE */
1597 #pragma mark - implementation details
1598 #pragma mark SMR hash traits
1599
1600 #define smrht_obj_t(traits) \
1601 typeof((traits)->smrht_obj_type[0])
1602
1603 static inline void *
__smrht_link_to_obj(smrh_traits_t traits,const struct smrq_slink * link)1604 __smrht_link_to_obj(smrh_traits_t traits, const struct smrq_slink *link)
1605 {
1606 void *ptr = (void *)((uintptr_t)link - traits->link_offset);
1607
1608 __builtin_assume(ptr != NULL);
1609 return ptr;
1610 }
1611
1612
1613 #pragma mark SMR hash tables
1614
1615 static inline unsigned long
__smr_hash_mask(struct smr_hash_array array)1616 __smr_hash_mask(struct smr_hash_array array)
1617 {
1618 return ~0ul >> array.smrh_order;
1619 }
1620
1621
1622 __attribute__((overloadable))
1623 static inline struct smrq_slist_head *
__smr_hash_bucket(const struct smr_hash * smrh,struct smrq_slink * link,smrh_traits_t smrht)1624 __smr_hash_bucket(
1625 const struct smr_hash *smrh,
1626 struct smrq_slink *link,
1627 smrh_traits_t smrht)
1628 {
1629 struct smr_hash_array array = smr_hash_array_decode(smrh);
1630 uint32_t index = __smr_hash_mask(array) & smrht->obj_hash(link, 0);
1631
1632 return &array.smrh_array[index];
1633 }
1634
1635 __attribute__((overloadable))
1636 static inline struct smrq_slist_head *
__smr_hash_bucket(const struct smr_hash * smrh,smrh_key_t key,smrh_traits_t smrht)1637 __smr_hash_bucket(
1638 const struct smr_hash *smrh,
1639 smrh_key_t key,
1640 smrh_traits_t smrht)
1641 {
1642 struct smr_hash_array array = smr_hash_array_decode(smrh);
1643 uint32_t index = __smr_hash_mask(array) & smrht->key_hash(key, 0);
1644
1645 return &array.smrh_array[index];
1646 }
1647
1648 static inline void *
__smr_hash_entered_find(const struct smrq_slist_head * head,smrh_key_t key,smrh_traits_t smrht)1649 __smr_hash_entered_find(
1650 const struct smrq_slist_head *head,
1651 smrh_key_t key,
1652 smrh_traits_t smrht)
1653 {
1654 for (struct smrq_slink *link = smr_entered_load(&head->first);
1655 link; link = smr_entered_load(&link->next)) {
1656 if (smrht->obj_equ(link, key)) {
1657 return __smrht_link_to_obj(smrht, link);
1658 }
1659 }
1660
1661 return NULL;
1662 }
1663
1664 static inline void *
__smr_hash_serialized_find(const struct smrq_slist_head * head,smrh_key_t key,smrh_traits_t smrht)1665 __smr_hash_serialized_find(
1666 const struct smrq_slist_head *head,
1667 smrh_key_t key,
1668 smrh_traits_t smrht)
1669 {
1670 for (struct smrq_slink *link = smr_serialized_load(&head->first);
1671 link; link = smr_serialized_load(&link->next)) {
1672 if (smrht->obj_equ(link, key)) {
1673 return __smrht_link_to_obj(smrht, link);
1674 }
1675 }
1676
1677 return NULL;
1678 }
1679
1680 static inline void *
__smr_hash_get(const struct smr_hash * smrh,smrh_key_t key,smrh_traits_t smrht)1681 __smr_hash_get(
1682 const struct smr_hash *smrh,
1683 smrh_key_t key,
1684 smrh_traits_t smrht)
1685 {
1686 struct smrq_slist_head *head;
1687 void *obj = NULL;
1688
1689 smr_enter(smrht->domain);
1690 head = __smr_hash_bucket(smrh, key, smrht);
1691 obj = __smr_hash_entered_find(head, key, smrht);
1692 if (obj && !smrht->obj_try_get(obj)) {
1693 obj = NULL;
1694 }
1695 smr_leave(smrht->domain);
1696
1697 return obj;
1698 }
1699
1700 static inline void *
__smr_hash_serialized_get_or_insert(struct smr_hash * smrh,smrh_key_t key,struct smrq_slink * link,smrh_traits_t smrht)1701 __smr_hash_serialized_get_or_insert(
1702 struct smr_hash *smrh,
1703 smrh_key_t key,
1704 struct smrq_slink *link,
1705 smrh_traits_t smrht)
1706 {
1707 struct smrq_slist_head *head;
1708 void *obj = NULL;
1709
1710 head = __smr_hash_bucket(smrh, key, smrht);
1711 obj = __smr_hash_serialized_find(head, key, smrht);
1712 if (!obj || !smrht->obj_try_get(obj)) {
1713 smrh->smrh_count++;
1714 smrq_serialized_insert_head(head, link);
1715 obj = NULL;
1716 }
1717
1718 return obj;
1719 }
1720
1721 extern void __smr_hash_serialized_clear(
1722 struct smr_hash *smrh,
1723 smrh_traits_t smrht,
1724 void (^free)(void *obj));
1725
1726 extern kern_return_t __smr_hash_shrink_and_unlock(
1727 struct smr_hash *smrh,
1728 lck_mtx_t *lock,
1729 smrh_traits_t smrht);
1730
1731 extern kern_return_t __smr_hash_grow_and_unlock(
1732 struct smr_hash *smrh,
1733 lck_mtx_t *lock,
1734 smrh_traits_t smrht);
1735
1736 #if XNU_KERNEL_PRIVATE
1737 #pragma GCC visibility push(hidden)
1738
1739 #pragma mark SMR scalable hash tables
1740
1741 __enum_closed_decl(smrsh_sel_t, uint8_t, {
1742 SMRSH_CUR,
1743 SMRSH_NEW,
1744 });
1745
1746 __attribute__((always_inline))
1747 static inline uint32_t
__smr_shash_load_seed(const struct smr_shash * smrh,size_t idx)1748 __smr_shash_load_seed(
1749 const struct smr_shash *smrh,
1750 size_t idx)
1751 {
1752 uintptr_t addr = (uintptr_t)smrh->smrsh_seed;
1753
1754 /*
1755 * prevent the optimizer from thinking it knows _anything_
1756 * about `smrsh_seed` to avoid codegen like this:
1757 *
1758 * return idx ? smrh->smrsh_seed[1] : smrh->smrsh_seed[0]
1759 *
1760 * This only has a control dependency which doesn't provide
1761 * the proper ordering. (control dependencies order
1762 * writes-after-dependency and not loads).
1763 */
1764
1765 return os_atomic_load(&((const uint32_t _Atomic *)addr)[idx], relaxed);
1766 }
1767
1768 __attribute__((always_inline))
1769 static inline hw_lck_ptr_t *
__smr_shash_load_array(const struct smr_shash * smrh,size_t idx)1770 __smr_shash_load_array(
1771 const struct smr_shash *smrh,
1772 size_t idx)
1773 {
1774 uintptr_t addr = (uintptr_t)smrh->smrsh_array;
1775
1776 /*
1777 * prevent the optimizer from thinking it knows _anything_
1778 * about `smrsh_array` to avoid codegen like this:
1779 *
1780 * return idx ? smrh->smrsh_array[1] : smrh->smrsh_array[0]
1781 *
1782 * This only has a control dependency which doesn't provide
1783 * the proper ordering. (control dependencies order
1784 * writes-after-dependency and not loads).
1785 */
1786
1787 return os_atomic_load(&((hw_lck_ptr_t * _Atomic const *)addr)[idx], relaxed);
1788 }
1789
1790 __attribute__((always_inline, overloadable))
1791 static inline uint32_t
__smr_shash_hash(const struct smr_shash * smrh,size_t idx,smrh_key_t key,smrh_traits_t traits)1792 __smr_shash_hash(
1793 const struct smr_shash *smrh,
1794 size_t idx,
1795 smrh_key_t key,
1796 smrh_traits_t traits)
1797 {
1798 return traits->key_hash(key, __smr_shash_load_seed(smrh, idx));
1799 }
1800
1801 __attribute__((always_inline, overloadable))
1802 static inline uint32_t
__smr_shash_hash(const struct smr_shash * smrh,size_t idx,const struct smrq_slink * link,smrh_traits_t traits)1803 __smr_shash_hash(
1804 const struct smr_shash *smrh,
1805 size_t idx,
1806 const struct smrq_slink *link,
1807 smrh_traits_t traits)
1808 {
1809 return traits->obj_hash(link, __smr_shash_load_seed(smrh, idx));
1810 }
1811
1812 static inline hw_lck_ptr_t *
__smr_shash_bucket(const struct smr_shash * smrh,smrsh_state_t state,smrsh_sel_t sel,uint32_t hash)1813 __smr_shash_bucket(
1814 const struct smr_shash *smrh,
1815 smrsh_state_t state,
1816 smrsh_sel_t sel,
1817 uint32_t hash)
1818 {
1819 hw_lck_ptr_t *array;
1820 uint8_t shift;
1821
1822 switch (sel) {
1823 case SMRSH_CUR:
1824 array = __smr_shash_load_array(smrh, state.curidx);
1825 shift = state.curshift;
1826 break;
1827 case SMRSH_NEW:
1828 array = __smr_shash_load_array(smrh, state.newidx);
1829 shift = state.newshift;
1830 break;
1831 }
1832
1833 return &array[hash >> shift];
1834 }
1835
1836 static inline bool
__smr_shash_is_stop(struct smrq_slink * link)1837 __smr_shash_is_stop(struct smrq_slink *link)
1838 {
1839 return (uintptr_t)link & SMRSH_BUCKET_STOP_BIT;
1840 }
1841
1842 static inline struct smrq_slink *
__smr_shash_bucket_stop(const hw_lck_ptr_t * head)1843 __smr_shash_bucket_stop(const hw_lck_ptr_t *head)
1844 {
1845 return (struct smrq_slink *)((uintptr_t)head | SMRSH_BUCKET_STOP_BIT);
1846 }
1847
1848 extern void *__smr_shash_entered_find_slow(
1849 const struct smr_shash *smrh,
1850 smrh_key_t key,
1851 hw_lck_ptr_t *head,
1852 smrh_traits_t traits);
1853
1854 static inline void *
__smr_shash_entered_find(const struct smr_shash * smrh,smrh_key_t key,smrh_traits_t traits)1855 __smr_shash_entered_find(
1856 const struct smr_shash *smrh,
1857 smrh_key_t key,
1858 smrh_traits_t traits)
1859 {
1860 struct smrq_slink *link;
1861 smrsh_state_t state;
1862 hw_lck_ptr_t *head;
1863 uint32_t hash;
1864
1865 state = os_atomic_load(&smrh->smrsh_state, dependency);
1866 hash = __smr_shash_hash(smrh, state.curidx, key, traits);
1867 head = __smr_shash_bucket(smrh, state, SMRSH_CUR, hash);
1868
1869 link = (struct smrq_slink *)hw_lck_ptr_value(head);
1870 while (!__smr_shash_is_stop(link)) {
1871 if (traits->obj_equ(link, key)) {
1872 return __smrht_link_to_obj(traits, link);
1873 }
1874 link = smr_entered_load(&link->next);
1875 }
1876
1877 if (__probable(link == __smr_shash_bucket_stop(head))) {
1878 return NULL;
1879 }
1880 return __smr_shash_entered_find_slow(smrh, key, head, traits);
1881 }
1882
1883 static inline void *
__smr_shash_entered_get(const struct smr_shash * smrh,smrh_key_t key,smrh_traits_t traits)1884 __smr_shash_entered_get(
1885 const struct smr_shash *smrh,
1886 smrh_key_t key,
1887 smrh_traits_t traits)
1888 {
1889 void *obj = __smr_shash_entered_find(smrh, key, traits);
1890
1891 return obj && traits->obj_try_get(obj) ? obj : NULL;
1892 }
1893
1894 extern void __smr_shash_destroy(
1895 struct smr_shash *smrh,
1896 smrh_traits_t traits,
1897 void (^free)(void *));
1898
1899 extern void *__smr_shash_entered_get_or_insert(
1900 struct smr_shash *smrh,
1901 smrh_key_t key,
1902 struct smrq_slink *link,
1903 smrh_traits_t traits);
1904
1905 extern smr_shash_mut_cursor_t __smr_shash_entered_mut_begin(
1906 struct smr_shash *smrh,
1907 struct smrq_slink *link,
1908 smrh_traits_t traits);
1909
1910 extern void __smr_shash_entered_mut_erase(
1911 struct smr_shash *smrh,
1912 smr_shash_mut_cursor_t cursor,
1913 struct smrq_slink *link,
1914 smrh_traits_t traits);
1915
1916 extern void __smr_shash_entered_mut_replace(
1917 smr_shash_mut_cursor_t cursor,
1918 struct smrq_slink *old_link,
1919 struct smrq_slink *new_link);
1920
1921 extern void __smr_shash_entered_mut_abort(
1922 smr_shash_mut_cursor_t cursor);
1923
1924 #pragma GCC visibility pop
1925 #endif /* XNU_KERNEL_PRIVATE */
1926
1927 __END_DECLS
1928
1929 #endif /* _KERN_SMR_HASH_H_ */
1930