1 /*
2 * Copyright (c) 2021-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _KERN_SMR_H_
30 #define _KERN_SMR_H_
31
32 #include <sys/cdefs.h>
33 #include <stdbool.h>
34 #include <stdint.h>
35 #include <kern/assert.h>
36 #include <kern/debug.h>
37 #include <kern/smr_types.h>
38 #include <kern/startup.h>
39 #include <os/atomic_private.h>
40
41 __BEGIN_DECLS
42
43 #pragma mark SMR pointers
44
45 /*
46 * SMR Accessors are meant to provide safe access to SMR protected
47 * pointers and prevent misuse and accidental access.
48 *
49 * Accessors are grouped by type:
50 * entered - Use while in a read section (between smr_enter/smr_leave())
51 * serialized - Use while holding a lock that serializes writers.
52 * Updates are synchronized with readers via included barriers.
53 * unserialized - Use after the memory is out of scope and not visible to
54 * readers.
55 *
56 * All acceses include a parameter for an assert to verify the required
57 * synchronization.
58 */
59
60
61 /*!
62 * @macro smr_unsafe_load()
63 *
64 * @brief
65 * Read from an SMR protected pointer without any synchronization.
66 *
67 * @discussion
68 * This returns an integer on purpose as dereference is generally unsafe.
69 */
70 #define smr_unsafe_load(ptr) \
71 ({ (uintptr_t)((ptr)->__smr_ptr); })
72
73 /*!
74 * @macro smr_entered_load()
75 *
76 * @brief
77 * Read from an SMR protected pointer while in a read section.
78 */
79 #define smr_entered_load(ptr) \
80 ({ (ptr)->__smr_ptr; })
81
82 /*!
83 * @macro smr_entered_load_assert()
84 *
85 * @brief
86 * Read from an SMR protected pointer while in a read section.
87 */
88 #define smr_entered_load_assert(ptr, smr) ({ \
89 assert(smr_entered(smr)); \
90 (ptr)->__smr_ptr; \
91 })
92
93 /*!
94 * @macro smr_entered_load_acquire()
95 *
96 * @brief
97 * Read from an SMR protected pointer while in a read section (with acquire
98 * fence).
99 */
100 #define smr_entered_load_acquire(ptr) \
101 os_atomic_load(&(ptr)->__smr_ptr, acquire)
102
103 /*!
104 * @macro smr_entered_load_acquire_assert()
105 *
106 * @brief
107 * Read from an SMR protected pointer while in a read section.
108 */
109 #define smr_entered_load_acquire_assert(ptr, smr) ({ \
110 assert(smr_entered(smr)); \
111 os_atomic_load(&(ptr)->__smr_ptr, acquire); \
112 })
113
114 /*!
115 * @macro smr_serialized_load_assert()
116 *
117 * @brief
118 * Read from an SMR protected pointer while serialized by an
119 * external mechanism.
120 */
121 #define smr_serialized_load_assert(ptr, held_cond) ({ \
122 assertf(held_cond, "smr_serialized_load: lock not held"); \
123 (ptr)->__smr_ptr; \
124 })
125
126 /*!
127 * @macro smr_serialized_load()
128 *
129 * @brief
130 * Read from an SMR protected pointer while serialized by an
131 * external mechanism.
132 */
133 #define smr_serialized_load(ptr) \
134 smr_serialized_load_assert(ptr, true)
135
136 /*!
137 * @macro smr_init_store()
138 *
139 * @brief
140 * Store @c value to an SMR protected pointer during initialization.
141 */
142 #define smr_init_store(ptr, value) \
143 ({ (ptr)->__smr_ptr = value; })
144
145 /*!
146 * @macro smr_clear_store()
147 *
148 * @brief
149 * Clear (sets to 0) an SMR protected pointer (this is always "allowed" to do).
150 */
151 #define smr_clear_store(ptr) \
152 smr_init_store(ptr, 0)
153
154 /*!
155 * @macro smr_serialized_store_assert()
156 *
157 * @brief
158 * Store @c value to an SMR protected pointer while serialized by an
159 * external mechanism.
160 *
161 * @discussion
162 * Writers that are serialized with mutual exclusion or on a single
163 * thread should use smr_serialized_store() rather than swap.
164 */
165 #define smr_serialized_store_assert(ptr, value, held_cond) ({ \
166 assertf(held_cond, "smr_serialized_store: lock not held"); \
167 os_atomic_thread_fence(release); \
168 (ptr)->__smr_ptr = value; \
169 })
170
171 /*!
172 * @macro smr_serialized_store()
173 *
174 * @brief
175 * Store @c value to an SMR protected pointer while serialized by an
176 * external mechanism.
177 *
178 * @discussion
179 * Writers that are serialized with mutual exclusion or on a single
180 * thread should use smr_serialized_store() rather than swap.
181 */
182 #define smr_serialized_store(ptr, value) \
183 smr_serialized_store_assert(ptr, value, true)
184
185 /*!
186 * @macro smr_serialized_store_relaxed_assert()
187 *
188 * @brief
189 * Store @c value to an SMR protected pointer while serialized by an
190 * external mechanism.
191 *
192 * @discussion
193 * This function can be used when storing a value that was already
194 * previously stored with smr_serialized_store() (for example during
195 * a linked list removal).
196 */
197 #define smr_serialized_store_relaxed_assert(ptr, value, held_cond) ({ \
198 assertf(held_cond, "smr_serialized_store_relaxed: lock not held"); \
199 (ptr)->__smr_ptr = value; \
200 })
201
202 /*!
203 * @macro smr_serialized_store_relaxed()
204 *
205 * @brief
206 * Store @c value to an SMR protected pointer while serialized by an
207 * external mechanism.
208 *
209 * @discussion
210 * This function can be used when storing a value that was already
211 * previously stored with smr_serialized_store() (for example during
212 * a linked list removal).
213 */
214 #define smr_serialized_store_relaxed(ptr, value) \
215 smr_serialized_store_relaxed_assert(ptr, value, true)
216
217 /*!
218 * @macro smr_serialized_swap_assert()
219 *
220 * @brief
221 * Swap @c value with an SMR protected pointer and return the old value
222 * while serialized by an external mechanism.
223 *
224 * @discussion
225 * Swap permits multiple writers to update a pointer concurrently.
226 */
227 #define smr_serialized_swap_assert(ptr, value, held_cond) ({ \
228 assertf(held_cond, "smr_serialized_store: lock not held"); \
229 os_atomic_xchg(&(ptr)->__smr_ptr, value, release); \
230 })
231
232 /*!
233 * @macro smr_serialized_swap()
234 *
235 * @brief
236 * Swap @c value with an SMR protected pointer and return the old value
237 * while serialized by an external mechanism.
238 *
239 * @discussion
240 * Swap permits multiple writers to update a pointer concurrently.
241 */
242 #define smr_serialized_swap(ptr, value) \
243 smr_serialized_swap_assert(ptr, value, true)
244
245 /*!
246 * @macro smr_unserialized_load()
247 *
248 * @brief.
249 * Read from an SMR protected pointer when no serialization is required
250 * such as in the destructor callback or when the caller guarantees other
251 * synchronization.
252 */
253 #define smr_unserialized_load(ptr) \
254 ({ (ptr)->__smr_ptr; })
255
256 /*!
257 * @macro smr_unserialized_store()
258 *
259 * @brief.
260 * Store to an SMR protected pointer when no serialiation is required
261 * such as in the destructor callback or when the caller guarantees other
262 * synchronization.
263 */
264 #define smr_unserialized_store(ptr, value) \
265 ({ (ptr)->__smr_ptr = value; })
266
267
268 #pragma mark SMR queues
269
270 /*
271 * SMR queues are queues that are meant to be read under SMR critical sections
272 * concurrently with possible updates to the queue.
273 *
274 * /!\ Such read operations CAN ONLY BE PERFORMED IN FORWARD DIRECTION. /!\
275 *
276 * Queues can be either:
277 * - lists where the head is a single pointer,
278 * and insertions can only be at the head;
279 * - tail queues where the head is two pointers,
280 * and insertions can be either at the head or the tail.
281 *
282 * Queue linkages can either be single forward pointer linkages or double
283 * forward/backward linkages. The latter supports O(1) deletion.
284 *
285 *
286 * The entire API surface uses type inference for the implementations,
287 * which allows to relatively easily change between the 4 types of queues
288 * with very minimal API changes (mostly the types of list heads and fields).
289 */
290
291
292 /*!
293 * @macro smrq_init
294 *
295 * @brief
296 * Initializes an SMR queue head.
297 */
298 #define smrq_init(head) ({ \
299 __auto_type __head = (head); \
300 \
301 smr_init_store(&__head->first, NULL); \
302 if (__smrq_lastp(__head)) { \
303 *__smrq_lastp(__head) = &__head->first; \
304 } \
305 })
306
307
308 /*!
309 * @macro smrq_empty
310 *
311 * @brief
312 * Returns whether an SMR queue is empty, can be called from any context.
313 */
314 #define smrq_empty(head) \
315 (smr_unsafe_load(&(head)->first) == 0)
316
317
318 /*!
319 * @macro smrq_entered_first
320 *
321 * @brief
322 * Returns the first element of an SMR queue, while in a read section.
323 */
324 #define smrq_entered_first(head, type_t, field) \
325 __container_of_safe(smr_entered_load(&(head)->first), type_t, field)
326
327
328 /*!
329 * @macro smrq_entered_next
330 *
331 * @brief
332 * Returns the next element of an SMR queue element, while in a read section.
333 */
334 #define smrq_entered_next(elem, field) \
335 __container_of_safe(smr_entered_load(&(elem)->field.next), \
336 typeof(*(elem)), field)
337
338
339 /*!
340 * @macro smrq_entered_foreach
341 *
342 * @brief
343 * Enumerates an SMR queue, while in a read section.
344 */
345 #define smrq_entered_foreach(it, head, field) \
346 for (__auto_type __it = smr_entered_load(&(head)->first); \
347 ((it) = __container_of_safe(__it, typeof(*(it)), field)); \
348 __it = smr_entered_load(&__it->next))
349
350
351 /*!
352 * @macro smrq_serialized_first
353 *
354 * @brief
355 * Returns the first element of an SMR queue, while being serialized
356 * by an external mechanism.
357 */
358 #define smrq_serialized_first(head, type_t, link) \
359 __container_of_safe(smr_serialized_load(&(head)->first), type_t, link)
360
361 /*!
362 * @macro smrq_serialized_next
363 *
364 * @brief
365 * Returns the next element of an SMR queue element, while being serialized
366 * by an external mechanism.
367 */
368 #define smrq_serialized_next(elem, field) \
369 __container_of_safe(smr_serialized_load(&(elem)->field.next), \
370 typeof(*(elem)), field)
371
372 /*!
373 * @macro smrq_serialized_foreach
374 *
375 * @brief
376 * Enumerates an SMR queue, while being serialized
377 * by an external mechanism.
378 */
379 #define smrq_serialized_foreach(it, head, field) \
380 for (__auto_type __it = smr_serialized_load(&(head)->first); \
381 ((it) = __container_of_safe(__it, typeof(*(it)), field)); \
382 __it = smr_serialized_load(&__it->next))
383
384 /*!
385 * @macro smrq_serialized_foreach_safe
386 *
387 * @brief
388 * Enumerates an SMR queue, while being serialized
389 * by an external mechanism.
390 *
391 * @discussion
392 * This variant supports removing the current element from the queue.
393 */
394 #define smrq_serialized_foreach_safe(it, head, field) \
395 for (__auto_type __it = smr_serialized_load(&(head)->first), \
396 __next_it = __it; \
397 ((it) = __container_of_safe(__it, typeof(*(it)), field)) && \
398 ((__next_it = smr_serialized_load(&__it->next)), 1); \
399 __it = __next_it)
400
401
402 /*!
403 * @macro smrq_serialized_insert_head
404 *
405 * @brief
406 * Inserts an element at the head of an SMR queue, while being serialized
407 * by an external mechanism.
408 */
409 #define smrq_serialized_insert_head(head, elem) ({ \
410 __auto_type __head = (head); \
411 \
412 __smrq_serialized_insert(&__head->first, (elem), \
413 smr_serialized_load(&__head->first), __smrq_lastp(__head)); \
414 })
415
416
417 /*!
418 * @macro smrq_serialized_insert_tail
419 *
420 * @brief
421 * Inserts an element at the tail of an SMR queue, while being serialized
422 * by an external mechanism.
423 */
424 #define smrq_serialized_insert_tail(head, elem) ({ \
425 __auto_type __head = (head); \
426 \
427 __smrq_serialized_insert(__head->last, (elem), \
428 NULL, &__head->last); \
429 })
430
431
432 /*!
433 * @macro smrq_serialized_insert_head_relaxed
434 *
435 * @brief
436 * Inserts an element at the head of an SMR queue, while being serialized
437 * by an external mechanism, without any barrier.
438 */
439 #define smrq_serialized_insert_head_relaxed(head, elem) ({ \
440 __auto_type __head = (head); \
441 \
442 __smrq_serialized_insert_relaxed(&__head->first, (elem), \
443 smr_serialized_load(&__head->first), __smrq_lastp(__head)); \
444 })
445
446
447 /*!
448 * @macro smrq_serialized_insert_tail_relaxed
449 *
450 * @brief
451 * Inserts an element at the tail of an SMR queue, while being serialized
452 * by an external mechanism, without any barrier.
453 */
454 #define smrq_serialized_insert_tail_relaxed(head, elem) ({ \
455 __auto_type __head = (head); \
456 \
457 __smrq_serialized_insert_relaxed(__head->last, (elem), \
458 NULL, &__head->last); \
459 })
460
461
462 /*!
463 * @macro smrq_serialized_remove
464 *
465 * @brief
466 * Removes an element from an SMR queue, while being serialized
467 * by an external mechanism.
468 *
469 * @discussion
470 * The @c head argument is actually unused for the @c smrq_list queue type.
471 * It is still advised to pass it, the compiler should be able to optimize
472 * the code away as computing a list head ought to have no side effects.
473 */
474 #define smrq_serialized_remove(head, elem) ({ \
475 __auto_type __head = (head); \
476 \
477 __smrq_serialized_remove(&__head->first, (elem), __smrq_lastp(__head)); \
478 })
479
480
481 /*!
482 * @macro smrq_serialized_replace
483 *
484 * @brief
485 * Replaces an element on an SMR queue with another at the same spot,
486 * while being serialized by an external mechanism.
487 */
488 #define smrq_serialized_replace(head, old_elem, new_elem) ({ \
489 __auto_type __head = (head); \
490 \
491 __smrq_serialized_replace(&__head->first, \
492 (old_elem), (new_elem), __smrq_lastp(__head)); \
493 })
494
495
496 /*!
497 * @macro smrq_serialized_iter
498 *
499 * @brief
500 * Enumerates an SMR singly linked queue, while being serialized
501 * by an external mechanism.
502 *
503 * @discussion
504 * This is for manual loops that typically perform erasures.
505 *
506 * The body of the loop must move the cursor using (once):
507 * - smrq_serialized_iter_next() to to go the next element,
508 * - smrq_serialized_iter_erase() to erase the current element.
509 *
510 * The iterator variable will _not_ be updated until the next
511 * loop iteration.
512 *
513 * This form is preferred to smrq_serialized_foreach_safe()
514 * for singly linked lists as smrq_serialized_iter_erase()
515 * is O(1) as opposed to smrq_serialized_remove().
516 */
517 #define smrq_serialized_iter(it, head, field) \
518 for (__smrq_slink_t *__prev_##it = &(head)->first, \
519 *__chk_##it = __prev_##it; \
520 ((it) = __container_of_safe(smr_serialized_load(__prev_##it), \
521 typeof(*(it)), field)); \
522 assert(__chk_##it), __chk_##it = __prev_##it)
523
524 /*!
525 * @macro smrq_serialized_iter_next
526 *
527 * @brief
528 * Goes to the next element inside an smrq_serialied_iter() loop.
529 */
530 #define smrq_serialized_iter_next(it, field) ({ \
531 assert(__chk_##it == __prev_##it); \
532 __chk_##it = NULL; \
533 __prev_##it = &(it)->field.next; \
534 })
535
536 /*!
537 * @macro smrq_serialized_iter_erase
538 *
539 * @brief
540 * Erases the element pointed at by the cursor.
541 */
542 #define smrq_serialized_iter_erase(it, field) ({ \
543 assert(__chk_##it == __prev_##it); \
544 __chk_##it = NULL; \
545 __smrq_serialized_remove_one(__prev_##it, &(it)->field, NULL); \
546 })
547
548
549 /*!
550 * @macro smrq_serialized_append
551 *
552 * @brief
553 * Appends a given list at the end of the previous one.
554 *
555 * @discussion
556 * /!\ WARNING /!\: this doesn't "move" the "source" queue like *_CONCAT
557 * for <sys/queue.h>, as it is useful to merge/split hash queues concurrently
558 * with readers while allowing readers to still read via the "source" queue.
559 *
560 * However, the "source" queue needs to be reset to a valid state
561 * if it is to be used again.
562 */
563 #define smrq_serialized_append(dst, src) ({ \
564 __auto_type __src = (src); \
565 __auto_type __dst = (dst); \
566 \
567 __smrq_serialized_append(&__dst->first, __smrq_lastp(__dst), \
568 &__src->first, __smrq_lastp(__src)); \
569 })
570
571
572 #pragma mark SMR domains
573
574 /*!
575 * @enum smr_flags_t
576 *
577 * @brief
578 * Options to pass to smr_domain_create()
579 *
580 * @const SMR_NONE
581 * Default values for the flags.
582 #if XNU_KERNEL_PRIVATE
583 *
584 * @const SMR_SLEEPABLE
585 * Create a sleepable SMR domain.
586 #endif
587 */
588 __options_closed_decl(smr_flags_t, unsigned long, {
589 SMR_NONE = 0x00000000,
590 #if XNU_KERNEL_PRIVATE
591 SMR_SLEEPABLE = 0x00000001,
592 #endif
593 });
594
595 /*!
596 * @function smr_domain_create()
597 *
598 * @brief
599 * Create an SMR domain.
600 *
601 * @discussion
602 * Be mindful when creating SMR domains, and consider carefully
603 * whether to add one or consolidate an existing one.
604 *
605 *
606 * Memory usage
607 * ~~~~~~~~~~~~
608 *
609 * SMR domains are fairly large structures that scale with the number
610 * of cores of the machine. They are meant to be use in a coarse grained
611 * manner.
612 *
613 * In addition to that, when @c smr_call() is used with the domain,
614 * the queues of callbacks are drained based on memory pressure within
615 * the domain. The more domains, the more dormant memory might exist.
616 *
617 * In general, memory considerations drive toward less domains.
618 *
619 *
620 * Scalability
621 * ~~~~~~~~~~~
622 *
623 * An SMR domain is built on top of an atomic state that is used
624 * to perform grace period detection. The more "write" activity
625 * there is on the domain (@c smr_call(), @c smr_advance(), etc...),
626 * the more this atomic might become contended. In particular,
627 * certain usage patterns might scale extremely well independently,
628 * but cause higher contention when sharing a domain.
629 *
630 * Another thing to consider is that when @c smr_call() is being used,
631 * if the callbacks act on vastly different data structures, then as
632 * the callbacks are being drained, cache misses will be higher.
633 *
634 * However, the more domains are in use, the more probable it is
635 * that using it will cause a cache miss.
636 *
637 * Generally, scalability considerations drive toward balanced
638 * coarse-grained domains.
639 *
640 *
641 * Invariants
642 * ~~~~~~~~~~
643 *
644 * The last aspect leading to the decision of creating versus reusing
645 * an SMR domain is about the invariants that these domains protect.
646 *
647 * Object graphs that are protected with SMR and are used together
648 * in many workloads will likely require to share an SMR domain
649 * in order to provide the required guarantees. Having @c smr_call()
650 * callbacks in a given domain cause downstream @c smr_call() into
651 * another different domain regularly is probably a sign that these
652 * domains should be shared.
653 *
654 * Another aspect to consider is that using @c smr_synchronize()
655 * or @c smr_barrier() can lead to two classes of problems:
656 *
657 * - these operations are extremely heavy, and if some subsystem needs to
658 * perform them on several domains, performance will be disappointing.
659 *
660 * - these operations are akin to taking a "write lock" on the domain,
661 * and as such can cause deadlocks when used improperly.
662 * Using a coarser grained unique domain is a good way to simplify
663 * reasoning about the locking dependencies between SMR domains
664 * and other regular locks.
665 *
666 *
667 * Guidance
668 * ~~~~~~~~
669 *
670 * In general, the entire kernel should have relatively few SMR domains,
671 * at the scale of the big subsystems of the kernel (think: Mach IPC, Mach VM,
672 * VFS, Networking, ...).
673 *
674 * When write operations (@c smr_call(), @c smr_synchronize, ...) are used
675 * rarely, consider using the system wide default domains.
676 */
677 extern smr_t smr_domain_create(smr_flags_t flags, const char *name);
678
679 /*!
680 * @function smr_domain_free()
681 *
682 * @brief
683 * Destroys an SMR domain previously create with @c smr_domain_create().
684 */
685 extern void smr_domain_free(smr_t smr);
686
687
688 /*!
689 * @function smr_entered()
690 *
691 * @brief
692 * Returns whether an SMR critical section is entered.
693 */
694 extern bool smr_entered(smr_t smr) __result_use_check;
695
696 /*!
697 * @function smr_enter()
698 *
699 * @brief
700 * Enter a non preemptible SMR critical section.
701 *
702 * @discussion
703 * Entering an SMR critical section is non reentrant.
704 * (entering it recursively is undefined and will panic on development kernels)
705 *
706 * @c smr_leave() must be called to end this section.
707 */
708 extern void smr_enter(smr_t smr);
709
710 /*!
711 * @function smr_leave()
712 *
713 * @brief
714 * Leave a non preemptible SMR critical section.
715 */
716 extern void smr_leave(smr_t smr);
717
718
719 /*!
720 * @function smr_call()
721 *
722 * @brief
723 * Defer making a call until it is safe to assume all readers
724 * will observe any update prior to this call.
725 *
726 * @discussion
727 * The target SMR domain must NOT be entered when making this call.
728 *
729 * The passed @c size doesn't have to be precise, it should be a rough
730 * estimate of the memory that will be reclaimed when when the call is made.
731 *
732 * This function gives no guarantee of forward progress,
733 * unless the magic SMR_CALL_EXPEDITE size is passed to @c smr_call().
734 */
735 extern void smr_call(smr_t smr, smr_node_t node, vm_size_t size, smr_cb_t cb);
736
737 #define SMR_CALL_EXPEDITE ((vm_size_t)~0)
738
739 /*!
740 * @function smr_synchronize()
741 *
742 * @brief
743 * Wait until all readers have observed any updates made prior to this call.
744 *
745 * @discussion
746 * The target SMR domain must NOT be entered when making this call.
747 *
748 * This function is quite expensive, and asynchronous deferred processing
749 * using @c smr_call() should be used instead when possible.
750 *
751 * Reserve using this call for events that are extremely rare (like system
752 * configuration events such as configuring networking interfaces, changing
753 * system wide security policies, or loading/unloading a kernel extension).
754 *
755 * This function should typically be called with preemption enabled,
756 * and no locks held.
757 */
758 extern void smr_synchronize(smr_t smr);
759
760 /*!
761 * @function smr_barrier()
762 *
763 * @brief
764 * Wait until all readers have observed any updates made prior to this call,
765 * and all @c smr_call() callbacks dispatched prior to this call on any core
766 * have completed.
767 *
768 * @discussion
769 * The target SMR domain must NOT be entered when making this call.
770 *
771 * This function is typically used when some data structure is being
772 * accessed by @c smr_call() callbacks and that data structure needs
773 * to be retired.
774 *
775 * Reserve using this call for events that are extremely rare (like system
776 * configuration events such as configuring networking interfaces, changing
777 * system wide security policies, or loading/unloading a kernel extension).
778 *
779 * This function should typically be called with preemption enabled,
780 * and no locks held.
781 */
782 extern void smr_barrier(smr_t smr);
783
784
785 #ifdef XNU_KERNEL_PRIVATE
786 #pragma GCC visibility push(hidden)
787 #pragma mark - XNU only
788 #pragma mark XNU only: SMR domains advanced
789
790 #define SMR_SEQ_INVALID ((smr_seq_t)0)
791 #define SMR_SEQ_SLEEPABLE ((smr_seq_t)1) /* only on smr_pcpu::rd_seq */
792 #define SMR_SEQ_INIT ((smr_seq_t)2)
793 #define SMR_SEQ_INC ((smr_seq_t)4)
794
795 typedef long smr_delta_t;
796
797 #define SMR_SEQ_DELTA(a, b) ((smr_delta_t)((a) - (b)))
798 #define SMR_SEQ_CMP(a, op, b) (SMR_SEQ_DELTA(a, b) op 0)
799
800 /*!
801 * @typedef smr_clock_t
802 *
803 * @brief
804 * Represents an SMR domain clock, internal type not manipulated by clients.
805 */
806 typedef struct {
807 smr_seq_t s_rd_seq;
808 smr_seq_t s_wr_seq;
809 } smr_clock_t;
810
811 #define SMR_NAME_MAX 24
812
813 /*!
814 * @typedef smr_t
815 *
816 * @brief
817 * Declares an SMR domain of synchronization.
818 */
819 struct smr {
820 smr_clock_t smr_clock;
821 struct smr_pcpu *smr_pcpu;
822 unsigned long smr_flags;
823 unsigned long smr_early;
824 char smr_name[SMR_NAME_MAX];
825 } __attribute__((aligned(64)));
826
827 /*!
828 * @macro SMR_DEFINE_FLAGS
829 *
830 * @brief
831 * Define an SMR domain with specific create flags.
832 */
833 #define SMR_DEFINE_FLAGS(var, name, flags) \
834 struct smr var = { \
835 .smr_clock.s_rd_seq = SMR_SEQ_INIT, \
836 .smr_clock.s_wr_seq = SMR_SEQ_INIT, \
837 .smr_flags = (flags), \
838 .smr_name = "" name, \
839 }; \
840 STARTUP_ARG(TUNABLES, STARTUP_RANK_LAST, __smr_domain_init, &(var)); \
841 STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, __smr_domain_init, &(var))
842
843 /*!
844 * @macro SMR_DEFINE
845 *
846 * @brief
847 * Define an SMR domain.
848 */
849 #define SMR_DEFINE(var, name) \
850 SMR_DEFINE_FLAGS(var, name, SMR_NONE)
851
852
853 /*!
854 * @macro SMR_DEFINE_SLEEPABLE
855 *
856 * @brief
857 * Define a sleepable SMR domain.
858 */
859 #define SMR_DEFINE_SLEEPABLE(var, name) \
860 SMR_DEFINE_FLAGS(var, name, SMR_SLEEPABLE)
861
862
863 /*!
864 * @function smr_advance()
865 *
866 * @brief
867 * Advance the write sequence and return the value
868 * for use as a wait goal.
869 *
870 * @discussion
871 * This guarantees that any changes made by the calling thread
872 * prior to this call will be visible to all threads after
873 * the read sequence meets or exceeds the return value.
874 */
875 extern smr_seq_t smr_advance(smr_t smr) __result_use_check;
876
877 /*!
878 * @function smr_deferred_advance()
879 *
880 * @brief
881 * Pretend-advance the write sequence and return the value
882 * for use as a wait goal.
883 *
884 * @discussion
885 * This guarantees that any changes made by the calling thread
886 * prior to this call will be visible to all threads after
887 * the read sequence meets or exceeds the return value.
888 *
889 * Unlike smr_advance(), the global clock isn't really advanced,
890 * it only sets a goal in the future. This can be used to control
891 * the pace of updating the global clock and avoid global atomics.
892 *
893 * In order for the clock to advance, clients of this API must call
894 * @c smr_deferred_advance_commit() with the goal returned by this call.
895 *
896 * Note that calls to @c smr_advance() or @c smr_wait() when passed
897 * the goal returned by this function would also allow the clock
898 * to make progress and are legal (yet less efficient) calls to make.
899 */
900 extern smr_seq_t smr_deferred_advance(smr_t smr) __result_use_check;
901
902 /*!
903 * @function smr_deferred_advance_commit()
904 *
905 * @brief
906 * Actually advance the write sequence to the goal returned by a previous
907 * call to @c smr_deferred_advance().
908 */
909 extern void smr_deferred_advance_commit(smr_t smr, smr_seq_t seq);
910
911
912 /*!
913 * @function smr_poll
914 *
915 * @brief
916 * Poll to determine whether all readers have observed the @c goal
917 * write sequence number.
918 *
919 * @discussion
920 * This function is safe to be called from preemption disabled context
921 * and its worst complexity is O(ncpu).
922 *
923 * @returns true if the goal is met and false if not.
924 */
925 extern bool smr_poll(smr_t smr, smr_seq_t goal) __result_use_check;
926
927 /*!
928 * @function smr_wait
929 *
930 * @brief
931 * Wait until all readers have observed
932 * the @c goal write sequence number.
933 *
934 * @discussion
935 * This function is safe to be called from preemption disabled context
936 * as it never explicitly blocks, however this is not recommended.
937 */
938 extern void smr_wait(smr_t smr, smr_seq_t goal);
939
940
941 #pragma mark XNU only: major sleepable SMR domains
942 /*
943 * Note: this is private for now because sleepable sections that do "bad" things
944 * (such as doing an upcall to userspace, or doing VM allocations) have
945 * the danger that they can stall the reclamation worker threads,
946 * which are a singleton resource.
947 *
948 * Until this can be mitigated or designed better, this stays private.
949 */
950
951 /*!
952 * @typedef smr_tracker_t
953 *
954 * @brief
955 * Structure used to track active sleepable SMR sections.
956 *
957 * @field smrt_domain the entered SMR domain
958 * @field smrt_seq the SMR sequence at the time of smr_enter_sleepable().
959 * @field smrt_link linkage used to track stalled sections.
960 * @field smrt_stack linkage used to track entered sections.
961 * @field smrt_ctid (if stalled) the ctid of the thread in this section.
962 * @field smrt_cpu (if stalled) the cpu the thread was on when stalled.
963 */
964 typedef struct smr_tracker {
965 smr_t smrt_domain;
966 smr_seq_t smrt_seq;
967 struct smrq_link smrt_link;
968 struct smrq_slink smrt_stack;
969 uint32_t smrt_ctid;
970 int smrt_cpu;
971 } *smr_tracker_t;
972
973 /*!
974 * @function smr_enter_sleepable()
975 *
976 * @brief
977 * Enter a sleepable SMR critical section.
978 *
979 * @discussion
980 * Entering an SMR critical section is non recursive
981 * (entering it recursively is undefined and will panic on development kernels)
982 *
983 * @c smr_leave_sleepable() must be called to end this section,
984 * passing the same tracker pointer.
985 *
986 * The SMR domain must have been created with the @c SMR_SLEEPABLE flag.
987 *
988 * It is permitted to do operations that might block under such a transaction,
989 * such as acquiring a lock, or freeing memory.
990 *
991 * It is forbidden to perform operations that wait for an unbounded amount of
992 * time such as waiting for networking packets or even a hardware driver event,
993 * as these could cause grace periods (and memory reclamation) to stall for
994 * a very long time.
995 */
996 extern void smr_enter_sleepable(smr_t smr, smr_tracker_t tracker);
997
998 /*!
999 * @function smr_leave_sleepable()
1000 *
1001 * @brief
1002 * Leave a sleepable SMR critical section entered with @c smr_enter_sleepable().
1003 */
1004 extern void smr_leave_sleepable(smr_t smr, smr_tracker_t tracker);
1005
1006
1007 #pragma mark XNU only: major subsystems SMR domains
1008
1009 /*!
1010 * @brief
1011 * A global system wide non preemptible domain.
1012 *
1013 * @discussion
1014 * This is provided as a fallback for when a specific SMR domain
1015 * would be overkill.
1016 *
1017 * Try not use the @c smr_system name directly, instead define
1018 * a subsystem domain that happens to be defined to it, so that
1019 * understanding the invariants being provided is easier.
1020 */
1021 extern struct smr smr_system;
1022
1023 /*!
1024 * @brief
1025 * A global system wide sleepable domain.
1026 *
1027 * @discussion
1028 * This is provided as a fallback for when a specific SMR domain
1029 * would be overkill.
1030 *
1031 * Try not use the @c smr_system_sleepable name directly,
1032 * instead define a subsystem domain that happens to be defined to it,
1033 * so that understanding the invariants being provided is easier.
1034 */
1035 extern struct smr smr_system_sleepable;
1036
1037
1038 /*!
1039 * @macro smr_ipc
1040 *
1041 * @brief
1042 * The SMR domain for the Mach IPC subsystem.
1043 */
1044 #define smr_ipc smr_system
1045 #define smr_ipc_entered() smr_entered(&smr_ipc)
1046 #define smr_ipc_enter() smr_enter(&smr_ipc)
1047 #define smr_ipc_leave() smr_leave(&smr_ipc)
1048
1049 #define smr_ipc_call(n, sz, cb) smr_call(&smr_ipc, n, sz, cb)
1050 #define smr_ipc_synchronize() smr_synchronize(&smr_ipc)
1051 #define smr_ipc_barrier() smr_barrier(&smr_ipc)
1052
1053
1054 /*!
1055 * @macro smr_proc_task
1056 *
1057 * @brief
1058 * The SMR domain for the proc/task and adjacent objects.
1059 */
1060 #define smr_proc_task smr_system
1061 #define smr_proc_task_entered() smr_entered(&smr_proc_task)
1062 #define smr_proc_task_enter() smr_enter(&smr_proc_task)
1063 #define smr_proc_task_leave() smr_leave(&smr_proc_task)
1064
1065 #define smr_proc_task_call(n, sz, cb) smr_call(&smr_proc_task, n, sz, cb)
1066 #define smr_proc_task_synchronize() smr_synchronize(&smr_proc_task)
1067 #define smr_proc_task_barrier() smr_barrier(&smr_proc_task)
1068
1069
1070 /*!
1071 * @macro smr_iokit
1072 *
1073 * @brief
1074 * The SMR domain for IOKit
1075 */
1076 #define smr_iokit smr_system
1077 #define smr_iokit_entered() smr_entered(&smr_iokit)
1078 #define smr_iokit_enter() smr_enter(&smr_iokit)
1079 #define smr_iokit_leave() smr_leave(&smr_iokit)
1080
1081 #define smr_iokit_call(n, sz, cb) smr_call(&smr_iokit, n, sz, cb)
1082 #define smr_iokit_synchronize() smr_synchronize(&smr_iokit)
1083 #define smr_iokit_barrier() smr_barrier(&smr_iokit)
1084
1085
1086 #pragma mark XNU only: implementation details
1087
1088 extern void __smr_domain_init(smr_t);
1089
1090 #ifdef MACH_KERNEL_PRIVATE
1091 struct processor;
1092
1093 extern bool smr_entered_cpu_noblock(smr_t smr, int cpu) __result_use_check;
1094
1095 extern void smr_ack_ipi(void);
1096
1097 extern void smr_mark_active_trackers_stalled(struct thread *self);
1098
1099 __options_closed_decl(smr_cpu_reason_t, uint8_t, {
1100 SMR_CPU_REASON_NONE = 0x00,
1101 SMR_CPU_REASON_OFFLINE = 0x01,
1102 SMR_CPU_REASON_IGNORED = 0x02,
1103 SMR_CPU_REASON_ALL = 0x03,
1104 });
1105
1106 extern void smr_cpu_init(struct processor *);
1107 extern void smr_cpu_up(struct processor *, smr_cpu_reason_t);
1108 extern void smr_cpu_down(struct processor *, smr_cpu_reason_t);
1109
1110 extern void smr_cpu_join(struct processor *, uint64_t ctime);
1111 extern void smr_cpu_tick(uint64_t ctime, bool safe_point);
1112 extern void smr_cpu_leave(struct processor *, uint64_t ctime);
1113
1114 extern void smr_maintenance(uint64_t ctime);
1115
1116 #if CONFIG_QUIESCE_COUNTER
1117 extern void cpu_quiescent_set_storage(uint64_t _Atomic *ptr);
1118 #endif
1119 #endif /* MACH_KERNEL_PRIVATE */
1120
1121 extern uint32_t smr_cpu_checkin_get_min_interval_us(void);
1122
1123 extern uint32_t smr_cpu_checkin_get_min_interval_us(void);
1124
1125 extern void smr_cpu_checkin_set_min_interval_us(uint32_t new_value);
1126
1127 #pragma GCC visibility pop
1128 #endif /* XNU_KERNEL_PRIVATE */
1129 #pragma mark - implementation details
1130 #pragma mark implementation details: SMR queues
1131
1132 extern void __smr_linkage_invalid(__smrq_link_t *link) __abortlike;
1133 extern void __smr_stail_invalid(__smrq_slink_t *link, __smrq_slink_t *last) __abortlike;
1134 extern void __smr_tail_invalid(__smrq_link_t *link, __smrq_link_t *last) __abortlike;
1135
1136 __attribute__((always_inline, overloadable))
1137 static inline __smrq_slink_t **
__smrq_lastp(struct smrq_slist_head * head __unused)1138 __smrq_lastp(struct smrq_slist_head *head __unused)
1139 {
1140 return NULL;
1141 }
1142
1143 __attribute__((always_inline, overloadable))
1144 static inline __smrq_link_t **
__smrq_lastp(struct smrq_list_head * head __unused)1145 __smrq_lastp(struct smrq_list_head *head __unused)
1146 {
1147 return NULL;
1148 }
1149
1150 __attribute__((always_inline, overloadable))
1151 static inline __smrq_slink_t **
__smrq_lastp(struct smrq_stailq_head * head)1152 __smrq_lastp(struct smrq_stailq_head *head)
1153 {
1154 __smrq_slink_t **last = &head->last;
1155
1156 __builtin_assume(last != NULL);
1157 return last;
1158 }
1159
1160 __attribute__((always_inline, overloadable))
1161 static inline __smrq_link_t **
__smrq_lastp(struct smrq_tailq_head * head)1162 __smrq_lastp(struct smrq_tailq_head *head)
1163 {
1164 __smrq_link_t **last = &head->last;
1165
1166 __builtin_assume(last != NULL);
1167 return last;
1168 }
1169
1170
1171 __attribute__((always_inline, overloadable))
1172 static inline void
__smrq_serialized_insert(__smrq_slink_t * prev,struct smrq_slink * elem,struct smrq_slink * next,__smrq_slink_t ** lastp)1173 __smrq_serialized_insert(
1174 __smrq_slink_t *prev,
1175 struct smrq_slink *elem,
1176 struct smrq_slink *next,
1177 __smrq_slink_t **lastp)
1178 {
1179 if (next == NULL && lastp) {
1180 if (*lastp != prev || smr_serialized_load(prev)) {
1181 __smr_stail_invalid(prev, *lastp);
1182 }
1183 }
1184
1185 smr_serialized_store_relaxed(&elem->next, next);
1186 smr_serialized_store(prev, elem);
1187 if (next == NULL && lastp) {
1188 *lastp = &elem->next;
1189 }
1190 }
1191
1192 __attribute__((always_inline, overloadable))
1193 static inline void
__smrq_serialized_insert(__smrq_link_t * prev,struct smrq_link * elem,struct smrq_link * next,__smrq_link_t ** lastp)1194 __smrq_serialized_insert(
1195 __smrq_link_t *prev,
1196 struct smrq_link *elem,
1197 struct smrq_link *next,
1198 __smrq_link_t **lastp)
1199 {
1200 if (next != NULL && next->prev != prev) {
1201 __smr_linkage_invalid(prev);
1202 }
1203 if (next == NULL && lastp) {
1204 if (*lastp != prev || smr_serialized_load(prev)) {
1205 __smr_tail_invalid(prev, *lastp);
1206 }
1207 }
1208
1209 smr_serialized_store_relaxed(&elem->next, next);
1210 elem->prev = prev;
1211 smr_serialized_store(prev, elem);
1212
1213 if (next != NULL) {
1214 next->prev = &elem->next;
1215 } else if (lastp) {
1216 *lastp = &elem->next;
1217 }
1218 }
1219
1220
1221 __attribute__((always_inline, overloadable))
1222 static inline void
__smrq_serialized_insert_relaxed(__smrq_slink_t * prev,struct smrq_slink * elem,struct smrq_slink * next,__smrq_slink_t ** lastp)1223 __smrq_serialized_insert_relaxed(
1224 __smrq_slink_t *prev,
1225 struct smrq_slink *elem,
1226 struct smrq_slink *next,
1227 __smrq_slink_t **lastp)
1228 {
1229 if (next == NULL && lastp) {
1230 if (*lastp != prev || smr_serialized_load(prev)) {
1231 __smr_stail_invalid(prev, *lastp);
1232 }
1233 }
1234
1235 smr_serialized_store_relaxed(&elem->next, next);
1236 smr_serialized_store_relaxed(prev, elem);
1237 if (next == NULL && lastp) {
1238 *lastp = &elem->next;
1239 }
1240 }
1241
1242 __attribute__((always_inline, overloadable))
1243 static inline void
__smrq_serialized_insert_relaxed(__smrq_link_t * prev,struct smrq_link * elem,struct smrq_link * next,__smrq_link_t ** lastp)1244 __smrq_serialized_insert_relaxed(
1245 __smrq_link_t *prev,
1246 struct smrq_link *elem,
1247 struct smrq_link *next,
1248 __smrq_link_t **lastp)
1249 {
1250 if (next != NULL && next->prev != prev) {
1251 __smr_linkage_invalid(prev);
1252 }
1253 if (next == NULL && lastp) {
1254 if (*lastp != prev || smr_serialized_load(prev)) {
1255 __smr_tail_invalid(prev, *lastp);
1256 }
1257 }
1258
1259 smr_serialized_store_relaxed(&elem->next, next);
1260 elem->prev = prev;
1261 smr_serialized_store_relaxed(prev, elem);
1262
1263 if (next != NULL) {
1264 next->prev = &elem->next;
1265 } else if (lastp) {
1266 *lastp = &elem->next;
1267 }
1268 }
1269
1270
1271 __attribute__((always_inline, overloadable))
1272 static inline void
__smrq_serialized_remove_one(__smrq_slink_t * prev,struct smrq_slink * elem,__smrq_slink_t ** lastp)1273 __smrq_serialized_remove_one(
1274 __smrq_slink_t *prev,
1275 struct smrq_slink *elem,
1276 __smrq_slink_t **lastp)
1277 {
1278 struct smrq_slink *next;
1279
1280 /*
1281 * Removal "skips" a link this way:
1282 *
1283 * e1 ---> e2 ---> e3 becomes e1 -----------> e3
1284 *
1285 * When e3 was inserted, a release barrier was issued
1286 * by smr_serialized_store(). We do not need to issue
1287 * a release barrier upon removal because `next` carries
1288 * a dependency on that smr_serialized_store()d value.
1289 */
1290 next = smr_serialized_load(&elem->next);
1291 smr_serialized_store_relaxed(prev, next);
1292 if (next == NULL && lastp) {
1293 *lastp = prev;
1294 }
1295 }
1296
1297 __attribute__((always_inline, overloadable))
1298 static inline void
__smrq_serialized_remove_one(__smrq_link_t * prev,struct smrq_link * elem,__smrq_link_t ** lastp)1299 __smrq_serialized_remove_one(
1300 __smrq_link_t *prev,
1301 struct smrq_link *elem,
1302 __smrq_link_t **lastp)
1303 {
1304 struct smrq_link *next;
1305
1306 next = smr_serialized_load(&elem->next);
1307
1308 if (smr_serialized_load(prev) != elem) {
1309 __smr_linkage_invalid(prev);
1310 }
1311 if (next && next->prev != &elem->next) {
1312 __smr_linkage_invalid(&elem->next);
1313 }
1314
1315 /*
1316 * Removal "skips" a link this way:
1317 *
1318 * e1 ---> e2 ---> e3 becomes e1 -----------> e3
1319 *
1320 * When e3 was inserted, a release barrier was issued
1321 * by smr_serialized_store(). We do not need to issue
1322 * a release barrier upon removal because `next` carries
1323 * a dependency on that smr_serialized_store()d value.
1324 */
1325 smr_serialized_store_relaxed(prev, next);
1326
1327 if (next != NULL) {
1328 next->prev = prev;
1329 } else if (lastp) {
1330 *lastp = prev;
1331 }
1332 elem->prev = NULL;
1333 }
1334
1335
1336 __attribute__((always_inline, overloadable))
1337 static inline void
__smrq_serialized_remove(__smrq_slink_t * first,struct smrq_slink * elem,__smrq_slink_t ** lastp)1338 __smrq_serialized_remove(
1339 __smrq_slink_t *first,
1340 struct smrq_slink *elem,
1341 __smrq_slink_t **lastp)
1342 {
1343 __smrq_slink_t *prev = first;
1344 struct smrq_slink *cur;
1345
1346 while ((cur = smr_serialized_load(prev)) != elem) {
1347 prev = &cur->next;
1348 }
1349
1350 __smrq_serialized_remove_one(prev, elem, lastp);
1351 }
1352
1353 __attribute__((always_inline, overloadable))
1354 static inline void
__smrq_serialized_remove(__smrq_link_t * first __unused,struct smrq_link * elem,__smrq_link_t ** lastp)1355 __smrq_serialized_remove(
1356 __smrq_link_t *first __unused,
1357 struct smrq_link *elem,
1358 __smrq_link_t **lastp)
1359 {
1360 __smrq_serialized_remove_one(elem->prev, elem, lastp);
1361 }
1362
1363
1364 __attribute__((always_inline, overloadable))
1365 static inline void
__smrq_serialized_replace(__smrq_slink_t * first,struct smrq_slink * old_elem,struct smrq_slink * new_elem,__smrq_slink_t ** lastp)1366 __smrq_serialized_replace(
1367 __smrq_slink_t *first,
1368 struct smrq_slink *old_elem,
1369 struct smrq_slink *new_elem,
1370 __smrq_slink_t **lastp)
1371 {
1372 __smrq_slink_t *prev = first;
1373 struct smrq_slink *cur;
1374 struct smrq_slink *next;
1375
1376 while ((cur = smr_serialized_load(prev)) != old_elem) {
1377 prev = &cur->next;
1378 }
1379
1380 next = smr_serialized_load(&old_elem->next);
1381 smr_serialized_store_relaxed(&new_elem->next, next);
1382 smr_serialized_store(prev, new_elem);
1383
1384 if (next == NULL && lastp) {
1385 *lastp = &new_elem->next;
1386 }
1387 }
1388
1389 __attribute__((always_inline, overloadable))
1390 static inline void
__smrq_serialized_replace(__smrq_link_t * first __unused,struct smrq_link * old_elem,struct smrq_link * new_elem,__smrq_link_t ** lastp)1391 __smrq_serialized_replace(
1392 __smrq_link_t *first __unused,
1393 struct smrq_link *old_elem,
1394 struct smrq_link *new_elem,
1395 __smrq_link_t **lastp)
1396 {
1397 __smrq_link_t *prev;
1398 struct smrq_link *next;
1399
1400 prev = old_elem->prev;
1401 next = smr_serialized_load(&old_elem->next);
1402
1403 if (smr_serialized_load(prev) != old_elem) {
1404 __smr_linkage_invalid(prev);
1405 }
1406 if (next && next->prev != &old_elem->next) {
1407 __smr_linkage_invalid(&old_elem->next);
1408 }
1409
1410 smr_serialized_store_relaxed(&new_elem->next, next);
1411 new_elem->prev = prev;
1412 smr_serialized_store(prev, new_elem);
1413
1414 if (next != NULL) {
1415 next->prev = &new_elem->next;
1416 } else if (lastp) {
1417 *lastp = &new_elem->next;
1418 }
1419 old_elem->prev = NULL;
1420 }
1421
1422 __attribute__((always_inline, overloadable))
1423 static inline void
__smrq_serialized_append(__smrq_slink_t * dst_first,__smrq_slink_t ** dst_lastp,__smrq_slink_t * src_first,__smrq_slink_t ** src_lastp)1424 __smrq_serialized_append(
1425 __smrq_slink_t *dst_first,
1426 __smrq_slink_t **dst_lastp,
1427 __smrq_slink_t *src_first,
1428 __smrq_slink_t **src_lastp)
1429 {
1430 struct smrq_slink *src = smr_serialized_load(src_first);
1431 struct smrq_slink *dst;
1432
1433 if (dst_lastp) {
1434 if (src) {
1435 smr_serialized_store_relaxed(*dst_lastp, src);
1436 *dst_lastp = *src_lastp;
1437 }
1438 } else {
1439 while ((dst = smr_serialized_load(dst_first))) {
1440 dst_first = &dst->next;
1441 }
1442 smr_serialized_store_relaxed(dst_first, src);
1443 }
1444 }
1445
1446 __attribute__((always_inline, overloadable))
1447 static inline void
__smrq_serialized_append(__smrq_link_t * dst_first,__smrq_link_t ** dst_lastp,__smrq_link_t * src_first,__smrq_link_t ** src_lastp)1448 __smrq_serialized_append(
1449 __smrq_link_t *dst_first,
1450 __smrq_link_t **dst_lastp,
1451 __smrq_link_t *src_first,
1452 __smrq_link_t **src_lastp)
1453 {
1454 struct smrq_link *src = smr_serialized_load(src_first);
1455 struct smrq_link *dst;
1456
1457 if (dst_lastp) {
1458 if (src) {
1459 smr_serialized_store_relaxed(*dst_lastp, src);
1460 src->prev = *dst_lastp;
1461 *dst_lastp = *src_lastp;
1462 }
1463 } else {
1464 while ((dst = smr_serialized_load(dst_first))) {
1465 dst_first = &dst->next;
1466 }
1467 smr_serialized_store_relaxed(dst_first, src);
1468 src->prev = &dst->next;
1469 }
1470 }
1471
1472 __END_DECLS
1473
1474 #endif /* _KERN_SMR_H_ */
1475