xref: /xnu-12377.41.6/osfmk/kern/smr.h (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2021-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KERN_SMR_H_
30 #define _KERN_SMR_H_
31 
32 #include <sys/cdefs.h>
33 #include <stdbool.h>
34 #include <stdint.h>
35 #include <machine/trap.h>
36 #include <kern/assert.h>
37 #include <kern/debug.h>
38 #include <kern/smr_types.h>
39 #include <os/atomic_private.h>
40 #if XNU_KERNEL_PRIVATE
41 #include <kern/startup.h>
42 #endif
43 
44 __BEGIN_DECLS
45 
46 #pragma mark SMR pointers
47 
48 /*
49  * SMR Accessors are meant to provide safe access to SMR protected
50  * pointers and prevent misuse and accidental access.
51  *
52  * Accessors are grouped by type:
53  * entered      - Use while in a read section (between smr_enter/smr_leave())
54  * serialized   - Use while holding a lock that serializes writers.
55  *                Updates are synchronized with readers via included barriers.
56  * unserialized - Use after the memory is out of scope and not visible to
57  *                readers.
58  *
59  * All acceses include a parameter for an assert to verify the required
60  * synchronization.
61  */
62 
63 
64 /*!
65  * @macro smr_unsafe_load()
66  *
67  * @brief
68  * Read from an SMR protected pointer without any synchronization.
69  *
70  * @discussion
71  * This returns an integer on purpose as dereference is generally unsafe.
72  */
73 #define smr_unsafe_load(ptr) \
74 	({ (uintptr_t)((ptr)->__smr_ptr); })
75 
76 /*!
77  * @macro smr_entered_load()
78  *
79  * @brief
80  * Read from an SMR protected pointer while in a read section.
81  */
82 #define smr_entered_load(ptr) \
83 	({ (ptr)->__smr_ptr; })
84 
85 /*!
86  * @macro smr_entered_load_assert()
87  *
88  * @brief
89  * Read from an SMR protected pointer while in a read section.
90  */
91 #define smr_entered_load_assert(ptr, smr)  ({ \
92 	assert(smr_entered(smr)); \
93 	(ptr)->__smr_ptr; \
94 })
95 
96 /*!
97  * @macro smr_entered_load_acquire()
98  *
99  * @brief
100  * Read from an SMR protected pointer while in a read section (with acquire
101  * fence).
102  */
103 #define smr_entered_load_acquire(ptr) \
104 	os_atomic_load(&(ptr)->__smr_ptr, acquire)
105 
106 /*!
107  * @macro smr_entered_load_acquire_assert()
108  *
109  * @brief
110  * Read from an SMR protected pointer while in a read section.
111  */
112 #define smr_entered_load_acquire_assert(ptr, smr)  ({ \
113 	assert(smr_entered(smr)); \
114 	os_atomic_load(&(ptr)->__smr_ptr, acquire); \
115 })
116 
117 /*!
118  * @macro smr_serialized_load_assert()
119  *
120  * @brief
121  * Read from an SMR protected pointer while serialized by an
122  * external mechanism.
123  */
124 #define smr_serialized_load_assert(ptr, held_cond)  ({ \
125 	assertf(held_cond, "smr_serialized_load: lock not held"); \
126 	(ptr)->__smr_ptr; \
127 })
128 
129 /*!
130  * @macro smr_serialized_load()
131  *
132  * @brief
133  * Read from an SMR protected pointer while serialized by an
134  * external mechanism.
135  */
136 #define smr_serialized_load(ptr) \
137 	smr_serialized_load_assert(ptr, true)
138 
139 /*!
140  * @macro smr_init_store()
141  *
142  * @brief
143  * Store @c value to an SMR protected pointer during initialization.
144  */
145 #define smr_init_store(ptr, value) \
146 	({ (ptr)->__smr_ptr = value; })
147 
148 /*!
149  * @macro smr_clear_store()
150  *
151  * @brief
152  * Clear (sets to 0) an SMR protected pointer (this is always "allowed" to do).
153  */
154 #define smr_clear_store(ptr) \
155 	smr_init_store(ptr, 0)
156 
157 /*!
158  * @macro smr_serialized_store_assert()
159  *
160  * @brief
161  * Store @c value to an SMR protected pointer while serialized by an
162  * external mechanism.
163  *
164  * @discussion
165  * Writers that are serialized with mutual exclusion or on a single
166  * thread should use smr_serialized_store() rather than swap.
167  */
168 #define smr_serialized_store_assert(ptr, value, held_cond)  ({ \
169 	assertf(held_cond, "smr_serialized_store: lock not held"); \
170 	os_atomic_thread_fence(release); \
171 	(ptr)->__smr_ptr = value; \
172 })
173 
174 /*!
175  * @macro smr_serialized_store()
176  *
177  * @brief
178  * Store @c value to an SMR protected pointer while serialized by an
179  * external mechanism.
180  *
181  * @discussion
182  * Writers that are serialized with mutual exclusion or on a single
183  * thread should use smr_serialized_store() rather than swap.
184  */
185 #define smr_serialized_store(ptr, value) \
186 	smr_serialized_store_assert(ptr, value, true)
187 
188 /*!
189  * @macro smr_serialized_store_relaxed_assert()
190  *
191  * @brief
192  * Store @c value to an SMR protected pointer while serialized by an
193  * external mechanism.
194  *
195  * @discussion
196  * This function can be used when storing a value that was already
197  * previously stored with smr_serialized_store() (for example during
198  * a linked list removal).
199  */
200 #define smr_serialized_store_relaxed_assert(ptr, value, held_cond)  ({ \
201 	assertf(held_cond, "smr_serialized_store_relaxed: lock not held"); \
202 	(ptr)->__smr_ptr = value; \
203 })
204 
205 /*!
206  * @macro smr_serialized_store_relaxed()
207  *
208  * @brief
209  * Store @c value to an SMR protected pointer while serialized by an
210  * external mechanism.
211  *
212  * @discussion
213  * This function can be used when storing a value that was already
214  * previously stored with smr_serialized_store() (for example during
215  * a linked list removal).
216  */
217 #define smr_serialized_store_relaxed(ptr, value) \
218 	smr_serialized_store_relaxed_assert(ptr, value, true)
219 
220 /*!
221  * @macro smr_serialized_swap_assert()
222  *
223  * @brief
224  * Swap @c value with an SMR protected pointer and return the old value
225  * while serialized by an external mechanism.
226  *
227  * @discussion
228  * Swap permits multiple writers to update a pointer concurrently.
229  */
230 #define smr_serialized_swap_assert(ptr, value, held_cond)  ({ \
231 	assertf(held_cond, "smr_serialized_store: lock not held"); \
232 	os_atomic_xchg(&(ptr)->__smr_ptr, value, release); \
233 })
234 
235 /*!
236  * @macro smr_serialized_swap()
237  *
238  * @brief
239  * Swap @c value with an SMR protected pointer and return the old value
240  * while serialized by an external mechanism.
241  *
242  * @discussion
243  * Swap permits multiple writers to update a pointer concurrently.
244  */
245 #define smr_serialized_swap(ptr, value) \
246 	smr_serialized_swap_assert(ptr, value, true)
247 
248 /*!
249  * @macro smr_unserialized_load()
250  *
251  * @brief.
252  * Read from an SMR protected pointer when no serialization is required
253  * such as in the destructor callback or when the caller guarantees other
254  * synchronization.
255  */
256 #define smr_unserialized_load(ptr) \
257 	({ (ptr)->__smr_ptr; })
258 
259 /*!
260  * @macro smr_unserialized_store()
261  *
262  * @brief.
263  * Store to an SMR protected pointer when no serialiation is required
264  * such as in the destructor callback or when the caller guarantees other
265  * synchronization.
266  */
267 #define smr_unserialized_store(ptr, value) \
268 	({ (ptr)->__smr_ptr = value; })
269 
270 
271 #pragma mark SMR queues
272 
273 /*
274  * SMR queues are queues that are meant to be read under SMR critical sections
275  * concurrently with possible updates to the queue.
276  *
277  * /!\ Such read operations CAN ONLY BE PERFORMED IN FORWARD DIRECTION. /!\
278  *
279  * Queues can be either:
280  * - lists where the head is a single pointer,
281  *   and insertions can only be at the head;
282  * - tail queues where the head is two pointers,
283  *   and insertions can be either at the head or the tail.
284  *
285  * Queue linkages can either be single forward pointer linkages or double
286  * forward/backward linkages. The latter supports O(1) deletion.
287  *
288  *
289  * The entire API surface uses type inference for the implementations,
290  * which allows to relatively easily change between the 4 types of queues
291  * with very minimal API changes (mostly the types of list heads and fields).
292  */
293 
294 
295 /*!
296  * @macro smrq_init
297  *
298  * @brief
299  * Initializes an SMR queue head.
300  */
301 #define smrq_init(head)  ({ \
302 	__auto_type __head = (head);                                            \
303                                                                                 \
304 	smr_init_store(&__head->first, NULL);                                   \
305 	if (__smrq_lastp(__head)) {                                             \
306 	    *__smrq_lastp(__head) = &__head->first;                             \
307 	}                                                                       \
308 })
309 
310 
311 /*!
312  * @macro smrq_empty
313  *
314  * @brief
315  * Returns whether an SMR queue is empty, can be called from any context.
316  */
317 #define smrq_empty(head) \
318 	(smr_unsafe_load(&(head)->first) == 0)
319 
320 
321 /*!
322  * @macro smrq_entered_first
323  *
324  * @brief
325  * Returns the first element of an SMR queue, while in a read section.
326  */
327 #define smrq_entered_first(head, type_t, field) \
328 	__container_of_safe(smr_entered_load(&(head)->first), type_t, field)
329 
330 
331 /*!
332  * @macro smrq_entered_next
333  *
334  * @brief
335  * Returns the next element of an SMR queue element, while in a read section.
336  */
337 #define smrq_entered_next(elem, field) \
338 	__container_of_safe(smr_entered_load(&(elem)->field.next), \
339 	    typeof(*(elem)), field)
340 
341 
342 /*!
343  * @macro smrq_entered_foreach
344  *
345  * @brief
346  * Enumerates an SMR queue, while in a read section.
347  */
348 #define smrq_entered_foreach(it, head, field) \
349 	for (__auto_type __it = smr_entered_load(&(head)->first);               \
350 	    ((it) = __container_of_safe(__it, typeof(*(it)), field));           \
351 	    __it = smr_entered_load(&__it->next))
352 
353 
354 /*!
355  * @macro smrq_serialized_first
356  *
357  * @brief
358  * Returns the first element of an SMR queue, while being serialized
359  * by an external mechanism.
360  */
361 #define smrq_serialized_first(head, type_t, link) \
362 	__container_of_safe(smr_serialized_load(&(head)->first), type_t, link)
363 
364 /*!
365  * @macro smrq_serialized_next
366  *
367  * @brief
368  * Returns the next element of an SMR queue element, while being serialized
369  * by an external mechanism.
370  */
371 #define smrq_serialized_next(elem, field) \
372 	__container_of_safe(smr_serialized_load(&(elem)->field.next), \
373 	    typeof(*(elem)), field)
374 
375 /*!
376  * @macro smrq_serialized_foreach
377  *
378  * @brief
379  * Enumerates an SMR queue, while being serialized
380  * by an external mechanism.
381  */
382 #define smrq_serialized_foreach(it, head, field) \
383 	for (__auto_type __it = smr_serialized_load(&(head)->first);            \
384 	    ((it) = __container_of_safe(__it, typeof(*(it)), field));           \
385 	    __it = smr_serialized_load(&__it->next))
386 
387 /*!
388  * @macro smrq_serialized_foreach_safe
389  *
390  * @brief
391  * Enumerates an SMR queue, while being serialized
392  * by an external mechanism.
393  *
394  * @discussion
395  * This variant supports removing the current element from the queue.
396  */
397 #define smrq_serialized_foreach_safe(it, head, field) \
398 	for (__auto_type __it = smr_serialized_load(&(head)->first),            \
399 	    __next_it = __it;                                                   \
400 	    ((it) = __container_of_safe(__it, typeof(*(it)), field)) &&         \
401 	    ((__next_it = smr_serialized_load(&__it->next)), 1);                \
402 	    __it = __next_it)
403 
404 
405 /*!
406  * @macro smrq_serialized_insert_head
407  *
408  * @brief
409  * Inserts an element at the head of an SMR queue, while being serialized
410  * by an external mechanism.
411  */
412 #define smrq_serialized_insert_head(head, elem)  ({ \
413 	__auto_type __head = (head);                                            \
414                                                                                 \
415 	__smrq_serialized_insert(&__head->first, (elem),                        \
416 	   smr_serialized_load(&__head->first), __smrq_lastp(__head));          \
417 })
418 
419 
420 /*!
421  * @macro smrq_serialized_insert_tail
422  *
423  * @brief
424  * Inserts an element at the tail of an SMR queue, while being serialized
425  * by an external mechanism.
426  */
427 #define smrq_serialized_insert_tail(head, elem)  ({ \
428 	__auto_type __head = (head);                                            \
429                                                                                 \
430 	__smrq_serialized_insert(__head->last, (elem),                          \
431 	   NULL, &__head->last);                                                \
432 })
433 
434 
435 /*!
436  * @macro smrq_serialized_insert_head_relaxed
437  *
438  * @brief
439  * Inserts an element at the head of an SMR queue, while being serialized
440  * by an external mechanism, without any barrier.
441  */
442 #define smrq_serialized_insert_head_relaxed(head, elem)  ({ \
443 	__auto_type __head = (head);                                            \
444                                                                                 \
445 	__smrq_serialized_insert_relaxed(&__head->first, (elem),                \
446 	   smr_serialized_load(&__head->first), __smrq_lastp(__head));          \
447 })
448 
449 
450 /*!
451  * @macro smrq_serialized_insert_tail_relaxed
452  *
453  * @brief
454  * Inserts an element at the tail of an SMR queue, while being serialized
455  * by an external mechanism, without any barrier.
456  */
457 #define smrq_serialized_insert_tail_relaxed(head, elem)  ({ \
458 	__auto_type __head = (head);                                            \
459                                                                                 \
460 	__smrq_serialized_insert_relaxed(__head->last, (elem),                  \
461 	   NULL, &__head->last);                                                \
462 })
463 
464 
465 /*!
466  * @macro smrq_serialized_remove
467  *
468  * @brief
469  * Removes an element from an SMR queue, while being serialized
470  * by an external mechanism.
471  *
472  * @discussion
473  * The @c head argument is actually unused for the @c smrq_list queue type.
474  * It is still advised to pass it, the compiler should be able to optimize
475  * the code away as computing a list head ought to have no side effects.
476  */
477 #define smrq_serialized_remove(head, elem)  ({ \
478 	__auto_type __head = (head);                                            \
479                                                                                 \
480 	__smrq_serialized_remove(&__head->first, (elem), __smrq_lastp(__head)); \
481 })
482 
483 
484 /*!
485  * @macro smrq_serialized_replace
486  *
487  * @brief
488  * Replaces an element on an SMR queue with another at the same spot,
489  * while being serialized by an external mechanism.
490  */
491 #define smrq_serialized_replace(head, old_elem, new_elem)  ({ \
492 	__auto_type __head = (head);                                            \
493                                                                                 \
494 	__smrq_serialized_replace(&__head->first,                               \
495 	    (old_elem), (new_elem), __smrq_lastp(__head));                      \
496 })
497 
498 
499 /*!
500  * @macro smrq_serialized_iter
501  *
502  * @brief
503  * Enumerates an SMR singly linked queue, while being serialized
504  * by an external mechanism.
505  *
506  * @discussion
507  * This is for manual loops that typically perform erasures.
508  *
509  * The body of the loop must move the cursor using (once):
510  * - smrq_serialized_iter_next() to to go the next element,
511  * - smrq_serialized_iter_erase() to erase the current element.
512  *
513  * The iterator variable will _not_ be updated until the next
514  * loop iteration.
515  *
516  * This form is preferred to smrq_serialized_foreach_safe()
517  * for singly linked lists as smrq_serialized_iter_erase()
518  * is O(1) as opposed to smrq_serialized_remove().
519  */
520 #define smrq_serialized_iter(it, head, field) \
521 	for (__smrq_slink_t *__prev_##it = &(head)->first,                      \
522 	    *__chk_##it = __prev_##it;                                          \
523 	    ((it) = __container_of_safe(smr_serialized_load(__prev_##it),       \
524 	    typeof(*(it)), field));                                             \
525 	    assert(__chk_##it), __chk_##it = __prev_##it)
526 
527 /*!
528  * @macro smrq_serialized_iter_next
529  *
530  * @brief
531  * Goes to the next element inside an smrq_serialied_iter() loop.
532  */
533 #define smrq_serialized_iter_next(it, field)  ({ \
534 	assert(__chk_##it == __prev_##it);                                      \
535 	__chk_##it = NULL;                                                      \
536 	__prev_##it = &(it)->field.next;                                        \
537 })
538 
539 /*!
540  * @macro smrq_serialized_iter_erase
541  *
542  * @brief
543  * Erases the element pointed at by the cursor.
544  */
545 #define smrq_serialized_iter_erase(it, field)  ({ \
546 	assert(__chk_##it == __prev_##it);                                      \
547 	__chk_##it = NULL;                                                      \
548 	__smrq_serialized_remove_one(__prev_##it, &(it)->field, NULL);          \
549 })
550 
551 
552 /*!
553  * @macro smrq_serialized_append
554  *
555  * @brief
556  * Appends a given list at the end of the previous one.
557  *
558  * @discussion
559  * /!\ WARNING /!\: this doesn't "move" the "source" queue like *_CONCAT
560  * for <sys/queue.h>, as it is useful to merge/split hash queues concurrently
561  * with readers while allowing readers to still read via the "source" queue.
562  *
563  * However, the "source" queue needs to be reset to a valid state
564  * if it is to be used again.
565  */
566 #define smrq_serialized_append(dst, src)  ({ \
567 	__auto_type __src = (src);                                              \
568 	__auto_type __dst = (dst);                                              \
569                                                                                 \
570 	__smrq_serialized_append(&__dst->first, __smrq_lastp(__dst),            \
571 	    &__src->first, __smrq_lastp(__src));                                \
572 })
573 
574 
575 #pragma mark SMR domains
576 
577 /*!
578  * @enum smr_flags_t
579  *
580  * @brief
581  * Options to pass to smr_domain_create()
582  *
583  * @const SMR_NONE
584  * Default values for the flags.
585  #if XNU_KERNEL_PRIVATE
586  *
587  * @const SMR_SLEEPABLE
588  * Create a sleepable SMR domain.
589  #endif
590  */
591 __options_closed_decl(smr_flags_t, unsigned long, {
592 	SMR_NONE              = 0x00000000,
593 #if XNU_KERNEL_PRIVATE
594 	SMR_SLEEPABLE         = 0x00000001,
595 #endif
596 });
597 
598 /*!
599  * @function smr_domain_create()
600  *
601  * @brief
602  * Create an SMR domain.
603  *
604  * @discussion
605  * Be mindful when creating SMR domains, and consider carefully
606  * whether to add one or consolidate an existing one.
607  *
608  *
609  * Memory usage
610  * ~~~~~~~~~~~~
611  *
612  * SMR domains are fairly large structures that scale with the number
613  * of cores of the machine. They are meant to be use in a coarse grained
614  * manner.
615  *
616  * In addition to that, when @c smr_call() is used with the domain,
617  * the queues of callbacks are drained based on memory pressure within
618  * the domain. The more domains, the more dormant memory might exist.
619  *
620  * In general, memory considerations drive toward less domains.
621  *
622  *
623  * Scalability
624  * ~~~~~~~~~~~
625  *
626  * An SMR domain is built on top of an atomic state that is used
627  * to perform grace period detection. The more "write" activity
628  * there is on the domain (@c smr_call(), @c smr_advance(), etc...),
629  * the more this atomic might become contended. In particular,
630  * certain usage patterns might scale extremely well independently,
631  * but cause higher contention when sharing a domain.
632  *
633  * Another thing to consider is that when @c smr_call() is being used,
634  * if the callbacks act on vastly different data structures, then as
635  * the callbacks are being drained, cache misses will be higher.
636  *
637  * However, the more domains are in use, the more probable it is
638  * that using it will cause a cache miss.
639  *
640  * Generally, scalability considerations drive toward balanced
641  * coarse-grained domains.
642  *
643  *
644  * Invariants
645  * ~~~~~~~~~~
646  *
647  * The last aspect leading to the decision of creating versus reusing
648  * an SMR domain is about the invariants that these domains protect.
649  *
650  * Object graphs that are protected with SMR and are used together
651  * in many workloads will likely require to share an SMR domain
652  * in order to provide the required guarantees. Having @c smr_call()
653  * callbacks in a given domain cause downstream @c smr_call() into
654  * another different domain regularly is probably a sign that these
655  * domains should be shared.
656  *
657  * Another aspect to consider is that using @c smr_synchronize()
658  * or @c smr_barrier() can lead to two classes of problems:
659  *
660  * - these operations are extremely heavy, and if some subsystem needs to
661  *    perform them on several domains, performance will be disappointing.
662  *
663  * - these operations are akin to taking a "write lock" on the domain,
664  *   and as such can cause deadlocks when used improperly.
665  *   Using a coarser grained unique domain is a good way to simplify
666  *   reasoning about the locking dependencies between SMR domains
667  *   and other regular locks.
668  *
669  *
670  * Guidance
671  * ~~~~~~~~
672  *
673  * In general, the entire kernel should have relatively few SMR domains,
674  * at the scale of the big subsystems of the kernel (think: Mach IPC, Mach VM,
675  * VFS, Networking, ...).
676  *
677  * When write operations (@c smr_call(), @c smr_synchronize, ...) are used
678  * rarely, consider using the system wide default domains.
679  */
680 extern smr_t smr_domain_create(smr_flags_t flags, const char *name);
681 
682 /*!
683  * @function smr_domain_free()
684  *
685  * @brief
686  * Destroys an SMR domain previously create with @c smr_domain_create().
687  */
688 extern void smr_domain_free(smr_t smr);
689 
690 
691 /*!
692  * @function smr_entered()
693  *
694  * @brief
695  * Returns whether an SMR critical section is entered.
696  */
697 extern bool smr_entered(smr_t smr) __result_use_check;
698 
699 /*!
700  * @function smr_enter()
701  *
702  * @brief
703  * Enter a non preemptible SMR critical section.
704  *
705  * @discussion
706  * Entering an SMR critical section is non reentrant.
707  * (entering it recursively is undefined and will panic on development kernels)
708  *
709  * @c smr_leave() must be called to end this section.
710  *
711  * This function can'be be used in interrupt context.
712  */
713 extern void smr_enter(smr_t smr);
714 
715 /*!
716  * @function smr_leave()
717  *
718  * @brief
719  * Leave a non preemptible SMR critical section.
720  */
721 extern void smr_leave(smr_t smr);
722 
723 
724 /*!
725  * @function smr_call()
726  *
727  * @brief
728  * Defer making a call until it is safe to assume all readers
729  * will observe any update prior to this call.
730  *
731  * @discussion
732  * The target SMR domain must NOT be entered when making this call.
733  *
734  * The passed @c size doesn't have to be precise, it should be a rough
735  * estimate of the memory that will be reclaimed when when the call is made.
736  *
737  * This function gives no guarantee of forward progress,
738  * unless the magic SMR_CALL_EXPEDITE size is passed to @c smr_call().
739  *
740  * This function can'be be used in interrupt context.
741  */
742 extern void smr_call(smr_t smr, smr_node_t node, vm_size_t size, smr_cb_t cb);
743 
744 #define SMR_CALL_EXPEDITE       ((vm_size_t)~0)
745 
746 /*!
747  * @function smr_synchronize()
748  *
749  * @brief
750  * Wait until all readers have observed any updates made prior to this call.
751  *
752  * @discussion
753  * The target SMR domain must NOT be entered when making this call.
754  *
755  * This function is quite expensive, and asynchronous deferred processing
756  * using @c smr_call() should be used instead when possible.
757  *
758  * Reserve using this call for events that are extremely rare (like system
759  * configuration events such as configuring networking interfaces, changing
760  * system wide security policies, or loading/unloading a kernel extension).
761  *
762  * This function should typically be called with preemption enabled,
763  * and no locks held.
764  */
765 extern void smr_synchronize(smr_t smr);
766 
767 /*!
768  * @function smr_barrier()
769  *
770  * @brief
771  * Wait until all readers have observed any updates made prior to this call,
772  * and all @c smr_call() callbacks dispatched prior to this call on any core
773  * have completed.
774  *
775  * @discussion
776  * The target SMR domain must NOT be entered when making this call.
777  *
778  * This function is typically used when some data structure is being
779  * accessed by @c smr_call() callbacks and that data structure needs
780  * to be retired.
781  *
782  * Reserve using this call for events that are extremely rare (like system
783  * configuration events such as configuring networking interfaces, changing
784  * system wide security policies, or loading/unloading a kernel extension).
785  *
786  * This function should typically be called with preemption enabled,
787  * and no locks held.
788  */
789 extern void smr_barrier(smr_t smr);
790 
791 
792 #ifdef XNU_KERNEL_PRIVATE
793 #pragma GCC visibility push(hidden)
794 #pragma mark - XNU only
795 #pragma mark XNU only: SMR domains advanced
796 
797 #define SMR_SEQ_INVALID         ((smr_seq_t)0)
798 #define SMR_SEQ_SLEEPABLE       ((smr_seq_t)1) /* only on smr_pcpu::rd_seq */
799 #define SMR_SEQ_INIT            ((smr_seq_t)2)
800 #define SMR_SEQ_INC             ((smr_seq_t)4)
801 
802 typedef long                    smr_delta_t;
803 
804 #define SMR_SEQ_DELTA(a, b)     ((smr_delta_t)((a) - (b)))
805 #define SMR_SEQ_CMP(a, op, b)   (SMR_SEQ_DELTA(a, b) op 0)
806 
807 /*!
808  * @typedef smr_clock_t
809  *
810  * @brief
811  * Represents an SMR domain clock, internal type not manipulated by clients.
812  */
813 typedef struct {
814 	smr_seq_t               s_rd_seq;
815 	smr_seq_t               s_wr_seq;
816 } smr_clock_t;
817 
818 #define SMR_NAME_MAX            24
819 
820 /*!
821  * @typedef smr_t
822  *
823  * @brief
824  * Declares an SMR domain of synchronization.
825  */
826 struct smr {
827 	smr_clock_t             smr_clock;
828 	struct smr_pcpu        *smr_pcpu;
829 	unsigned long           smr_flags;
830 	unsigned long           smr_early;
831 	char                    smr_name[SMR_NAME_MAX];
832 } __attribute__((aligned(64)));
833 
834 /*!
835  * @macro SMR_DEFINE_FLAGS
836  *
837  * @brief
838  * Define an SMR domain with specific create flags.
839  */
840 #define SMR_DEFINE_FLAGS(var, name, flags) \
841 	struct smr var = { \
842 	        .smr_clock.s_rd_seq = SMR_SEQ_INIT, \
843 	        .smr_clock.s_wr_seq = SMR_SEQ_INIT, \
844 	        .smr_flags = (flags), \
845 	        .smr_name  = "" name, \
846 	}; \
847 	STARTUP_ARG(TUNABLES, STARTUP_RANK_LAST, __smr_domain_init, &(var)); \
848 	STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, __smr_domain_init, &(var))
849 
850 /*!
851  * @macro SMR_DEFINE
852  *
853  * @brief
854  * Define an SMR domain.
855  */
856 #define SMR_DEFINE(var, name) \
857 	SMR_DEFINE_FLAGS(var, name, SMR_NONE)
858 
859 
860 /*!
861  * @macro SMR_DEFINE_SLEEPABLE
862  *
863  * @brief
864  * Define a sleepable SMR domain.
865  */
866 #define SMR_DEFINE_SLEEPABLE(var, name) \
867 	SMR_DEFINE_FLAGS(var, name, SMR_SLEEPABLE)
868 
869 
870 /*!
871  * @function smr_advance()
872  *
873  * @brief
874  * Advance the write sequence and return the value
875  * for use as a wait goal.
876  *
877  * @discussion
878  * This guarantees that any changes made by the calling thread
879  * prior to this call will be visible to all threads after
880  * the read sequence meets or exceeds the return value.
881  */
882 extern smr_seq_t smr_advance(smr_t smr) __result_use_check;
883 
884 /*!
885  * @function smr_deferred_advance()
886  *
887  * @brief
888  * Pretend-advance the write sequence and return the value
889  * for use as a wait goal.
890  *
891  * @discussion
892  * This guarantees that any changes made by the calling thread
893  * prior to this call will be visible to all threads after
894  * the read sequence meets or exceeds the return value.
895  *
896  * Unlike smr_advance(), the global clock isn't really advanced,
897  * it only sets a goal in the future. This can be used to control
898  * the pace of updating the global clock and avoid global atomics.
899  *
900  * In order for the clock to advance, clients of this API must call
901  * @c smr_deferred_advance_commit() with the goal returned by this call.
902  *
903  * Note that calls to @c smr_advance() or @c smr_wait() when passed
904  * the goal returned by this function would also allow the clock
905  * to make progress and are legal (yet less efficient) calls to make.
906  */
907 extern smr_seq_t smr_deferred_advance(smr_t smr) __result_use_check;
908 
909 /*!
910  * @function smr_deferred_advance_commit()
911  *
912  * @brief
913  * Actually advance the write sequence to the goal returned by a previous
914  * call to @c smr_deferred_advance().
915  */
916 extern void smr_deferred_advance_commit(smr_t smr, smr_seq_t seq);
917 
918 
919 /*!
920  * @function smr_poll
921  *
922  * @brief
923  * Poll to determine whether all readers have observed the @c goal
924  * write sequence number.
925  *
926  * @discussion
927  * This function is safe to be called from preemption disabled context
928  * and its worst complexity is O(ncpu).
929  *
930  * @returns true if the goal is met and false if not.
931  */
932 extern bool smr_poll(smr_t smr, smr_seq_t goal) __result_use_check;
933 
934 /*!
935  * @function smr_wait
936  *
937  * @brief
938  * Wait until all readers have observed
939  * the @c goal write sequence number.
940  *
941  * @discussion
942  * This function is safe to be called from preemption disabled context
943  * as it never explicitly blocks, however this is not recommended.
944  */
945 extern void smr_wait(smr_t smr, smr_seq_t goal);
946 
947 
948 #pragma mark XNU only: major sleepable SMR domains
949 /*
950  * Note: this is private for now because sleepable sections that do "bad" things
951  *       (such as doing an upcall to userspace, or doing VM allocations) have
952  *       the danger that they can stall the reclamation worker threads,
953  *       which are a singleton resource.
954  *
955  *       Until this can be mitigated or designed better, this stays private.
956  */
957 
958 /*!
959  * @typedef smr_tracker_t
960  *
961  * @brief
962  * Structure used to track active sleepable SMR sections.
963  *
964  * @field smrt_domain  the entered SMR domain
965  * @field smrt_seq     the SMR sequence at the time of smr_enter_sleepable().
966  * @field smrt_link    linkage used to track stalled sections.
967  * @field smrt_stack   linkage used to track entered sections.
968  * @field smrt_ctid    (if stalled) the ctid of the thread in this section.
969  * @field smrt_cpu     (if stalled) the cpu the thread was on when stalled.
970  */
971 typedef struct smr_tracker {
972 	smr_t                   smrt_domain;
973 	smr_seq_t               smrt_seq;
974 	struct smrq_link        smrt_link;
975 	struct smrq_slink       smrt_stack;
976 	uint32_t                smrt_ctid;
977 	int                     smrt_cpu;
978 } *smr_tracker_t;
979 
980 /*!
981  * @function smr_enter_sleepable()
982  *
983  * @brief
984  * Enter a sleepable SMR critical section.
985  *
986  * @discussion
987  * Entering an SMR critical section is non recursive
988  * (entering it recursively is undefined and will panic on development kernels)
989  *
990  * @c smr_leave_sleepable() must be called to end this section,
991  * passing the same tracker pointer.
992  *
993  * The SMR domain must have been created with the @c SMR_SLEEPABLE flag.
994  *
995  * It is permitted to do operations that might block under such a transaction,
996  * such as acquiring a lock, or freeing memory.
997  *
998  * It is forbidden to perform operations that wait for an unbounded amount of
999  * time such as waiting for networking packets or even a hardware driver event,
1000  * as these could cause grace periods (and memory reclamation) to stall for
1001  * a very long time.
1002  */
1003 extern void smr_enter_sleepable(smr_t smr, smr_tracker_t tracker);
1004 
1005 /*!
1006  * @function smr_leave_sleepable()
1007  *
1008  * @brief
1009  * Leave a sleepable SMR critical section entered with @c smr_enter_sleepable().
1010  */
1011 extern void smr_leave_sleepable(smr_t smr, smr_tracker_t tracker);
1012 
1013 
1014 #pragma mark XNU only: major subsystems SMR domains
1015 
1016 /*!
1017  * @brief
1018  * A global system wide non preemptible domain.
1019  *
1020  * @discussion
1021  * This is provided as a fallback for when a specific SMR domain
1022  * would be overkill.
1023  *
1024  * Try not use the @c smr_system name directly, instead define
1025  * a subsystem domain that happens to be defined to it, so that
1026  * understanding the invariants being provided is easier.
1027  */
1028 extern struct smr smr_system;
1029 
1030 /*!
1031  * @brief
1032  * A global system wide sleepable domain.
1033  *
1034  * @discussion
1035  * This is provided as a fallback for when a specific SMR domain
1036  * would be overkill.
1037  *
1038  * Try not use the @c smr_system_sleepable name directly,
1039  * instead define a subsystem domain that happens to be defined to it,
1040  * so that understanding the invariants being provided is easier.
1041  */
1042 extern struct smr smr_system_sleepable;
1043 
1044 
1045 /*!
1046  * @macro smr_ipc
1047  *
1048  * @brief
1049  * The SMR domain for the Mach IPC subsystem.
1050  */
1051 #define smr_ipc                         smr_system
1052 #define smr_ipc_entered()               smr_entered(&smr_ipc)
1053 #define smr_ipc_enter()                 smr_enter(&smr_ipc)
1054 #define smr_ipc_leave()                 smr_leave(&smr_ipc)
1055 
1056 #define smr_ipc_call(n, sz, cb)         smr_call(&smr_ipc, n, sz, cb)
1057 #define smr_ipc_synchronize()           smr_synchronize(&smr_ipc)
1058 #define smr_ipc_barrier()               smr_barrier(&smr_ipc)
1059 
1060 
1061 /*!
1062  * @macro smr_proc_task
1063  *
1064  * @brief
1065  * The SMR domain for the proc/task and adjacent objects.
1066  */
1067 #define smr_proc_task                   smr_system
1068 #define smr_proc_task_entered()         smr_entered(&smr_proc_task)
1069 #define smr_proc_task_enter()           smr_enter(&smr_proc_task)
1070 #define smr_proc_task_leave()           smr_leave(&smr_proc_task)
1071 
1072 #define smr_proc_task_call(n, sz, cb)   smr_call(&smr_proc_task, n, sz, cb)
1073 #define smr_proc_task_synchronize()     smr_synchronize(&smr_proc_task)
1074 #define smr_proc_task_barrier()         smr_barrier(&smr_proc_task)
1075 
1076 
1077 /*!
1078  * @macro smr_iokit
1079  *
1080  * @brief
1081  * The SMR domain for IOKit
1082  */
1083 #define smr_iokit                       smr_system
1084 #define smr_iokit_entered()             smr_entered(&smr_iokit)
1085 #define smr_iokit_enter()               smr_enter(&smr_iokit)
1086 #define smr_iokit_leave()               smr_leave(&smr_iokit)
1087 
1088 #define smr_iokit_call(n, sz, cb)       smr_call(&smr_iokit, n, sz, cb)
1089 #define smr_iokit_synchronize()         smr_synchronize(&smr_iokit)
1090 #define smr_iokit_barrier()             smr_barrier(&smr_iokit)
1091 
1092 
1093 /*!
1094  * @macro smr_oslog
1095  *
1096  * @brief
1097  * The SMR domain for kernel OSLog handles.
1098  */
1099 #define smr_oslog                       smr_system
1100 #define smr_oslog_entered()             smr_entered(&smr_oslog)
1101 #define smr_oslog_enter()               smr_enter(&smr_oslog)
1102 #define smr_oslog_leave()               smr_leave(&smr_oslog)
1103 
1104 #define smr_oslog_call(n, sz, cb)       smr_call(&smr_oslog, n, sz, cb)
1105 #define smr_oslog_synchronize()         smr_synchronize(&smr_oslog)
1106 #define smr_oslog_barrier()             smr_barrier(&smr_oslog)
1107 
1108 
1109 #pragma mark XNU only: implementation details
1110 
1111 extern void __smr_domain_init(smr_t);
1112 
1113 #ifdef MACH_KERNEL_PRIVATE
1114 struct processor;
1115 
1116 extern bool smr_entered_cpu_noblock(smr_t smr, int cpu) __result_use_check;
1117 
1118 extern void smr_ack_ipi(void);
1119 
1120 extern void smr_mark_active_trackers_stalled(struct thread *self);
1121 
1122 __options_closed_decl(smr_cpu_reason_t, uint8_t, {
1123 	SMR_CPU_REASON_NONE        = 0x00,
1124 	SMR_CPU_REASON_OFFLINE     = 0x01,
1125 	SMR_CPU_REASON_IGNORED     = 0x02,
1126 	SMR_CPU_REASON_ALL         = 0x03,
1127 });
1128 
1129 extern void smr_cpu_init(struct processor *);
1130 extern void smr_cpu_up(struct processor *, smr_cpu_reason_t);
1131 extern void smr_cpu_down(struct processor *, smr_cpu_reason_t);
1132 
1133 extern void smr_cpu_join(struct processor *, uint64_t ctime);
1134 extern void smr_cpu_tick(uint64_t ctime, bool safe_point);
1135 extern void smr_cpu_leave(struct processor *, uint64_t ctime);
1136 
1137 extern void smr_maintenance(uint64_t ctime);
1138 
1139 #if CONFIG_QUIESCE_COUNTER
1140 extern void cpu_quiescent_set_storage(uint64_t _Atomic *ptr);
1141 #endif
1142 #endif /* MACH_KERNEL_PRIVATE */
1143 
1144 extern uint32_t smr_cpu_checkin_get_min_interval_us(void);
1145 
1146 extern uint32_t smr_cpu_checkin_get_min_interval_us(void);
1147 
1148 extern void smr_cpu_checkin_set_min_interval_us(uint32_t new_value);
1149 
1150 #pragma GCC visibility pop
1151 #endif /* XNU_KERNEL_PRIVATE */
1152 #pragma mark - implementation details
1153 #pragma mark implementation details: SMR queues
1154 
1155 __dead2
1156 static inline void
__smr_linkage_invalid(__smrq_link_t * link)1157 __smr_linkage_invalid(__smrq_link_t *link)
1158 {
1159 	struct smrq_link *elem = __container_of(link, struct smrq_link, next);
1160 
1161 	ml_fatal_trap_invalid_list_linkage((unsigned long)elem);
1162 }
1163 
1164 __dead2
1165 static inline void
__smr_stail_invalid(__smrq_slink_t * link,__smrq_slink_t * last __unused)1166 __smr_stail_invalid(__smrq_slink_t *link, __smrq_slink_t *last __unused)
1167 {
1168 	struct smrq_slink *elem = __container_of(link, struct smrq_slink, next);
1169 
1170 	ml_fatal_trap_invalid_list_linkage((unsigned long)elem);
1171 }
1172 
1173 __dead2
1174 static inline void
__smr_tail_invalid(__smrq_link_t * link,__smrq_link_t * last __unused)1175 __smr_tail_invalid(__smrq_link_t *link, __smrq_link_t *last __unused)
1176 {
1177 	struct smrq_link *elem = __container_of(link, struct smrq_link, next);
1178 
1179 	ml_fatal_trap_invalid_list_linkage((unsigned long)elem);
1180 }
1181 
1182 __attribute__((always_inline, overloadable))
1183 static inline __smrq_slink_t **
__smrq_lastp(struct smrq_slist_head * head __unused)1184 __smrq_lastp(struct smrq_slist_head *head __unused)
1185 {
1186 	return NULL;
1187 }
1188 
1189 __attribute__((always_inline, overloadable))
1190 static inline __smrq_link_t **
__smrq_lastp(struct smrq_list_head * head __unused)1191 __smrq_lastp(struct smrq_list_head *head __unused)
1192 {
1193 	return NULL;
1194 }
1195 
1196 __attribute__((always_inline, overloadable))
1197 static inline __smrq_slink_t **
__smrq_lastp(struct smrq_stailq_head * head)1198 __smrq_lastp(struct smrq_stailq_head *head)
1199 {
1200 	__smrq_slink_t **last = &head->last;
1201 
1202 	__builtin_assume(last != NULL);
1203 	return last;
1204 }
1205 
1206 __attribute__((always_inline, overloadable))
1207 static inline __smrq_link_t **
__smrq_lastp(struct smrq_tailq_head * head)1208 __smrq_lastp(struct smrq_tailq_head *head)
1209 {
1210 	__smrq_link_t **last = &head->last;
1211 
1212 	__builtin_assume(last != NULL);
1213 	return last;
1214 }
1215 
1216 
1217 __attribute__((always_inline, overloadable))
1218 static inline void
__smrq_serialized_insert(__smrq_slink_t * prev,struct smrq_slink * elem,struct smrq_slink * next,__smrq_slink_t ** lastp)1219 __smrq_serialized_insert(
1220 	__smrq_slink_t         *prev,
1221 	struct smrq_slink      *elem,
1222 	struct smrq_slink      *next,
1223 	__smrq_slink_t        **lastp)
1224 {
1225 	if (next == NULL && lastp) {
1226 		if (*lastp != prev || smr_serialized_load(prev)) {
1227 			__smr_stail_invalid(prev, *lastp);
1228 		}
1229 	}
1230 
1231 	smr_serialized_store_relaxed(&elem->next, next);
1232 	smr_serialized_store(prev, elem);
1233 	if (next == NULL && lastp) {
1234 		*lastp = &elem->next;
1235 	}
1236 }
1237 
1238 __attribute__((always_inline, overloadable))
1239 static inline void
__smrq_serialized_insert(__smrq_link_t * prev,struct smrq_link * elem,struct smrq_link * next,__smrq_link_t ** lastp)1240 __smrq_serialized_insert(
1241 	__smrq_link_t          *prev,
1242 	struct smrq_link       *elem,
1243 	struct smrq_link       *next,
1244 	__smrq_link_t         **lastp)
1245 {
1246 	if (next != NULL && next->prev != prev) {
1247 		__smr_linkage_invalid(prev);
1248 	}
1249 	if (next == NULL && lastp) {
1250 		if (*lastp != prev || smr_serialized_load(prev)) {
1251 			__smr_tail_invalid(prev, *lastp);
1252 		}
1253 	}
1254 
1255 	smr_serialized_store_relaxed(&elem->next, next);
1256 	elem->prev = prev;
1257 	smr_serialized_store(prev, elem);
1258 
1259 	if (next != NULL) {
1260 		next->prev = &elem->next;
1261 	} else if (lastp) {
1262 		*lastp = &elem->next;
1263 	}
1264 }
1265 
1266 
1267 __attribute__((always_inline, overloadable))
1268 static inline void
__smrq_serialized_insert_relaxed(__smrq_slink_t * prev,struct smrq_slink * elem,struct smrq_slink * next,__smrq_slink_t ** lastp)1269 __smrq_serialized_insert_relaxed(
1270 	__smrq_slink_t         *prev,
1271 	struct smrq_slink      *elem,
1272 	struct smrq_slink      *next,
1273 	__smrq_slink_t        **lastp)
1274 {
1275 	if (next == NULL && lastp) {
1276 		if (*lastp != prev || smr_serialized_load(prev)) {
1277 			__smr_stail_invalid(prev, *lastp);
1278 		}
1279 	}
1280 
1281 	smr_serialized_store_relaxed(&elem->next, next);
1282 	smr_serialized_store_relaxed(prev, elem);
1283 	if (next == NULL && lastp) {
1284 		*lastp = &elem->next;
1285 	}
1286 }
1287 
1288 __attribute__((always_inline, overloadable))
1289 static inline void
__smrq_serialized_insert_relaxed(__smrq_link_t * prev,struct smrq_link * elem,struct smrq_link * next,__smrq_link_t ** lastp)1290 __smrq_serialized_insert_relaxed(
1291 	__smrq_link_t          *prev,
1292 	struct smrq_link       *elem,
1293 	struct smrq_link       *next,
1294 	__smrq_link_t         **lastp)
1295 {
1296 	if (next != NULL && next->prev != prev) {
1297 		__smr_linkage_invalid(prev);
1298 	}
1299 	if (next == NULL && lastp) {
1300 		if (*lastp != prev || smr_serialized_load(prev)) {
1301 			__smr_tail_invalid(prev, *lastp);
1302 		}
1303 	}
1304 
1305 	smr_serialized_store_relaxed(&elem->next, next);
1306 	elem->prev = prev;
1307 	smr_serialized_store_relaxed(prev, elem);
1308 
1309 	if (next != NULL) {
1310 		next->prev = &elem->next;
1311 	} else if (lastp) {
1312 		*lastp = &elem->next;
1313 	}
1314 }
1315 
1316 
1317 __attribute__((always_inline, overloadable))
1318 static inline void
__smrq_serialized_remove_one(__smrq_slink_t * prev,struct smrq_slink * elem,__smrq_slink_t ** lastp)1319 __smrq_serialized_remove_one(
1320 	__smrq_slink_t         *prev,
1321 	struct smrq_slink      *elem,
1322 	__smrq_slink_t        **lastp)
1323 {
1324 	struct smrq_slink *next;
1325 
1326 	/*
1327 	 * Removal "skips" a link this way:
1328 	 *
1329 	 *     e1 ---> e2 ---> e3  becomes e1 -----------> e3
1330 	 *
1331 	 * When e3 was inserted, a release barrier was issued
1332 	 * by smr_serialized_store().  We do not need to issue
1333 	 * a release barrier upon removal because `next` carries
1334 	 * a dependency on that smr_serialized_store()d value.
1335 	 */
1336 	next = smr_serialized_load(&elem->next);
1337 	smr_serialized_store_relaxed(prev, next);
1338 	if (next == NULL && lastp) {
1339 		*lastp = prev;
1340 	}
1341 }
1342 
1343 __attribute__((always_inline, overloadable))
1344 static inline void
__smrq_serialized_remove_one(__smrq_link_t * prev,struct smrq_link * elem,__smrq_link_t ** lastp)1345 __smrq_serialized_remove_one(
1346 	__smrq_link_t          *prev,
1347 	struct smrq_link       *elem,
1348 	__smrq_link_t         **lastp)
1349 {
1350 	struct smrq_link *next;
1351 
1352 	next = smr_serialized_load(&elem->next);
1353 
1354 	if (smr_serialized_load(prev) != elem) {
1355 		__smr_linkage_invalid(prev);
1356 	}
1357 	if (next && next->prev != &elem->next) {
1358 		__smr_linkage_invalid(&elem->next);
1359 	}
1360 
1361 	/*
1362 	 * Removal "skips" a link this way:
1363 	 *
1364 	 *     e1 ---> e2 ---> e3  becomes e1 -----------> e3
1365 	 *
1366 	 * When e3 was inserted, a release barrier was issued
1367 	 * by smr_serialized_store().  We do not need to issue
1368 	 * a release barrier upon removal because `next` carries
1369 	 * a dependency on that smr_serialized_store()d value.
1370 	 */
1371 	smr_serialized_store_relaxed(prev, next);
1372 
1373 	if (next != NULL) {
1374 		next->prev = prev;
1375 	} else if (lastp) {
1376 		*lastp = prev;
1377 	}
1378 	elem->prev = NULL;
1379 }
1380 
1381 
1382 __attribute__((always_inline, overloadable))
1383 static inline void
__smrq_serialized_remove(__smrq_slink_t * first,struct smrq_slink * elem,__smrq_slink_t ** lastp)1384 __smrq_serialized_remove(
1385 	__smrq_slink_t         *first,
1386 	struct smrq_slink      *elem,
1387 	__smrq_slink_t        **lastp)
1388 {
1389 	__smrq_slink_t *prev = first;
1390 	struct smrq_slink *cur;
1391 
1392 	while ((cur = smr_serialized_load(prev)) != elem) {
1393 		prev = &cur->next;
1394 	}
1395 
1396 	__smrq_serialized_remove_one(prev, elem, lastp);
1397 }
1398 
1399 __attribute__((always_inline, overloadable))
1400 static inline void
__smrq_serialized_remove(__smrq_link_t * first __unused,struct smrq_link * elem,__smrq_link_t ** lastp)1401 __smrq_serialized_remove(
1402 	__smrq_link_t          *first __unused,
1403 	struct smrq_link       *elem,
1404 	__smrq_link_t         **lastp)
1405 {
1406 	__smrq_serialized_remove_one(elem->prev, elem, lastp);
1407 }
1408 
1409 
1410 __attribute__((always_inline, overloadable))
1411 static inline void
__smrq_serialized_replace(__smrq_slink_t * first,struct smrq_slink * old_elem,struct smrq_slink * new_elem,__smrq_slink_t ** lastp)1412 __smrq_serialized_replace(
1413 	__smrq_slink_t         *first,
1414 	struct smrq_slink      *old_elem,
1415 	struct smrq_slink      *new_elem,
1416 	__smrq_slink_t        **lastp)
1417 {
1418 	__smrq_slink_t *prev = first;
1419 	struct smrq_slink *cur;
1420 	struct smrq_slink *next;
1421 
1422 	while ((cur = smr_serialized_load(prev)) != old_elem) {
1423 		prev = &cur->next;
1424 	}
1425 
1426 	next = smr_serialized_load(&old_elem->next);
1427 	smr_serialized_store_relaxed(&new_elem->next, next);
1428 	smr_serialized_store(prev, new_elem);
1429 
1430 	if (next == NULL && lastp) {
1431 		*lastp = &new_elem->next;
1432 	}
1433 }
1434 
1435 __attribute__((always_inline, overloadable))
1436 static inline void
__smrq_serialized_replace(__smrq_link_t * first __unused,struct smrq_link * old_elem,struct smrq_link * new_elem,__smrq_link_t ** lastp)1437 __smrq_serialized_replace(
1438 	__smrq_link_t          *first __unused,
1439 	struct smrq_link       *old_elem,
1440 	struct smrq_link       *new_elem,
1441 	__smrq_link_t         **lastp)
1442 {
1443 	__smrq_link_t *prev;
1444 	struct smrq_link *next;
1445 
1446 	prev = old_elem->prev;
1447 	next = smr_serialized_load(&old_elem->next);
1448 
1449 	if (smr_serialized_load(prev) != old_elem) {
1450 		__smr_linkage_invalid(prev);
1451 	}
1452 	if (next && next->prev != &old_elem->next) {
1453 		__smr_linkage_invalid(&old_elem->next);
1454 	}
1455 
1456 	smr_serialized_store_relaxed(&new_elem->next, next);
1457 	new_elem->prev = prev;
1458 	smr_serialized_store(prev, new_elem);
1459 
1460 	if (next != NULL) {
1461 		next->prev = &new_elem->next;
1462 	} else if (lastp) {
1463 		*lastp = &new_elem->next;
1464 	}
1465 	old_elem->prev = NULL;
1466 }
1467 
1468 __attribute__((always_inline, overloadable))
1469 static inline void
__smrq_serialized_append(__smrq_slink_t * dst_first,__smrq_slink_t ** dst_lastp,__smrq_slink_t * src_first,__smrq_slink_t ** src_lastp)1470 __smrq_serialized_append(
1471 	__smrq_slink_t         *dst_first,
1472 	__smrq_slink_t        **dst_lastp,
1473 	__smrq_slink_t         *src_first,
1474 	__smrq_slink_t        **src_lastp)
1475 {
1476 	struct smrq_slink *src = smr_serialized_load(src_first);
1477 	struct smrq_slink *dst;
1478 
1479 	if (dst_lastp) {
1480 		if (src) {
1481 			smr_serialized_store_relaxed(*dst_lastp, src);
1482 			*dst_lastp = *src_lastp;
1483 		}
1484 	} else {
1485 		while ((dst = smr_serialized_load(dst_first))) {
1486 			dst_first = &dst->next;
1487 		}
1488 		smr_serialized_store_relaxed(dst_first, src);
1489 	}
1490 }
1491 
1492 __attribute__((always_inline, overloadable))
1493 static inline void
__smrq_serialized_append(__smrq_link_t * dst_first,__smrq_link_t ** dst_lastp,__smrq_link_t * src_first,__smrq_link_t ** src_lastp)1494 __smrq_serialized_append(
1495 	__smrq_link_t          *dst_first,
1496 	__smrq_link_t         **dst_lastp,
1497 	__smrq_link_t          *src_first,
1498 	__smrq_link_t         **src_lastp)
1499 {
1500 	struct smrq_link *src = smr_serialized_load(src_first);
1501 	struct smrq_link *dst;
1502 
1503 	if (dst_lastp) {
1504 		if (src) {
1505 			smr_serialized_store_relaxed(*dst_lastp, src);
1506 			src->prev = *dst_lastp;
1507 			*dst_lastp = *src_lastp;
1508 		}
1509 	} else {
1510 		while ((dst = smr_serialized_load(dst_first))) {
1511 			dst_first = &dst->next;
1512 		}
1513 		smr_serialized_store_relaxed(dst_first, src);
1514 		src->prev = &dst->next;
1515 	}
1516 }
1517 
1518 __END_DECLS
1519 
1520 #endif /* _KERN_SMR_H_ */
1521