1 /*
2 * Copyright (c) 2021-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _KERN_SMR_H_
30 #define _KERN_SMR_H_
31
32 #include <sys/cdefs.h>
33 #include <stdbool.h>
34 #include <stdint.h>
35 #include <kern/assert.h>
36 #include <kern/debug.h>
37 #include <kern/smr_types.h>
38 #include <kern/startup.h>
39 #include <os/atomic_private.h>
40
41 __BEGIN_DECLS
42
43 #pragma mark SMR pointers
44
45 /*
46 * SMR Accessors are meant to provide safe access to SMR protected
47 * pointers and prevent misuse and accidental access.
48 *
49 * Accessors are grouped by type:
50 * entered - Use while in a read section (between smr_enter/smr_leave())
51 * serialized - Use while holding a lock that serializes writers.
52 * Updates are synchronized with readers via included barriers.
53 * unserialized - Use after the memory is out of scope and not visible to
54 * readers.
55 *
56 * All acceses include a parameter for an assert to verify the required
57 * synchronization.
58 */
59
60
61 /*!
62 * @macro smr_unsafe_load()
63 *
64 * @brief
65 * Read from an SMR protected pointer without any synchronization.
66 *
67 * @discussion
68 * This returns an integer on purpose as dereference is generally unsafe.
69 */
70 #define smr_unsafe_load(ptr) \
71 ({ (uintptr_t)((ptr)->__smr_ptr); })
72
73 /*!
74 * @macro smr_entered_load()
75 *
76 * @brief
77 * Read from an SMR protected pointer while in a read section.
78 */
79 #define smr_entered_load(ptr) \
80 ({ (ptr)->__smr_ptr; })
81
82 /*!
83 * @macro smr_entered_load_assert()
84 *
85 * @brief
86 * Read from an SMR protected pointer while in a read section.
87 */
88 #define smr_entered_load_assert(ptr, smr) ({ \
89 assert(smr_entered(smr)); \
90 (ptr)->__smr_ptr; \
91 })
92
93 /*!
94 * @macro smr_entered_load_acquire()
95 *
96 * @brief
97 * Read from an SMR protected pointer while in a read section (with acquire
98 * fence).
99 */
100 #define smr_entered_load_acquire(ptr) \
101 os_atomic_load(&(ptr)->__smr_ptr, acquire)
102
103 /*!
104 * @macro smr_entered_load_acquire_assert()
105 *
106 * @brief
107 * Read from an SMR protected pointer while in a read section.
108 */
109 #define smr_entered_load_acquire_assert(ptr, smr) ({ \
110 assert(smr_entered(smr)); \
111 os_atomic_load(&(ptr)->__smr_ptr, acquire); \
112 })
113
114 /*!
115 * @macro smr_serialized_load_assert()
116 *
117 * @brief
118 * Read from an SMR protected pointer while serialized by an
119 * external mechanism.
120 */
121 #define smr_serialized_load_assert(ptr, held_cond) ({ \
122 assertf(held_cond, "smr_serialized_load: lock not held"); \
123 (ptr)->__smr_ptr; \
124 })
125
126 /*!
127 * @macro smr_serialized_load()
128 *
129 * @brief
130 * Read from an SMR protected pointer while serialized by an
131 * external mechanism.
132 */
133 #define smr_serialized_load(ptr) \
134 smr_serialized_load_assert(ptr, true)
135
136 /*!
137 * @macro smr_init_store()
138 *
139 * @brief
140 * Store @c value to an SMR protected pointer during initialization.
141 */
142 #define smr_init_store(ptr, value) \
143 ({ (ptr)->__smr_ptr = value; })
144
145 /*!
146 * @macro smr_clear_store()
147 *
148 * @brief
149 * Clear (sets to 0) an SMR protected pointer (this is always "allowed" to do).
150 */
151 #define smr_clear_store(ptr) \
152 smr_init_store(ptr, 0)
153
154 /*!
155 * @macro smr_serialized_store_assert()
156 *
157 * @brief
158 * Store @c value to an SMR protected pointer while serialized by an
159 * external mechanism.
160 *
161 * @discussion
162 * Writers that are serialized with mutual exclusion or on a single
163 * thread should use smr_serialized_store() rather than swap.
164 */
165 #define smr_serialized_store_assert(ptr, value, held_cond) ({ \
166 assertf(held_cond, "smr_serialized_store: lock not held"); \
167 os_atomic_thread_fence(release); \
168 (ptr)->__smr_ptr = value; \
169 })
170
171 /*!
172 * @macro smr_serialized_store()
173 *
174 * @brief
175 * Store @c value to an SMR protected pointer while serialized by an
176 * external mechanism.
177 *
178 * @discussion
179 * Writers that are serialized with mutual exclusion or on a single
180 * thread should use smr_serialized_store() rather than swap.
181 */
182 #define smr_serialized_store(ptr, value) \
183 smr_serialized_store_assert(ptr, value, true)
184
185 /*!
186 * @macro smr_serialized_store_relaxed_assert()
187 *
188 * @brief
189 * Store @c value to an SMR protected pointer while serialized by an
190 * external mechanism.
191 *
192 * @discussion
193 * This function can be used when storing a value that was already
194 * previously stored with smr_serialized_store() (for example during
195 * a linked list removal).
196 */
197 #define smr_serialized_store_relaxed_assert(ptr, value, held_cond) ({ \
198 assertf(held_cond, "smr_serialized_store_relaxed: lock not held"); \
199 (ptr)->__smr_ptr = value; \
200 })
201
202 /*!
203 * @macro smr_serialized_store_relaxed()
204 *
205 * @brief
206 * Store @c value to an SMR protected pointer while serialized by an
207 * external mechanism.
208 *
209 * @discussion
210 * This function can be used when storing a value that was already
211 * previously stored with smr_serialized_store() (for example during
212 * a linked list removal).
213 */
214 #define smr_serialized_store_relaxed(ptr, value) \
215 smr_serialized_store_relaxed_assert(ptr, value, true)
216
217 /*!
218 * @macro smr_serialized_swap_assert()
219 *
220 * @brief
221 * Swap @c value with an SMR protected pointer and return the old value
222 * while serialized by an external mechanism.
223 *
224 * @discussion
225 * Swap permits multiple writers to update a pointer concurrently.
226 */
227 #define smr_serialized_swap_assert(ptr, value, held_cond) ({ \
228 assertf(held_cond, "smr_serialized_store: lock not held"); \
229 os_atomic_xchg(&(ptr)->__smr_ptr, value, release); \
230 })
231
232 /*!
233 * @macro smr_serialized_swap()
234 *
235 * @brief
236 * Swap @c value with an SMR protected pointer and return the old value
237 * while serialized by an external mechanism.
238 *
239 * @discussion
240 * Swap permits multiple writers to update a pointer concurrently.
241 */
242 #define smr_serialized_swap(ptr, value) \
243 smr_serialized_swap_assert(ptr, value, true)
244
245 /*!
246 * @macro smr_unserialized_load()
247 *
248 * @brief.
249 * Read from an SMR protected pointer when no serialization is required
250 * such as in the destructor callback or when the caller guarantees other
251 * synchronization.
252 */
253 #define smr_unserialized_load(ptr) \
254 ({ (ptr)->__smr_ptr; })
255
256 /*!
257 * @macro smr_unserialized_store()
258 *
259 * @brief.
260 * Store to an SMR protected pointer when no serialiation is required
261 * such as in the destructor callback or when the caller guarantees other
262 * synchronization.
263 */
264 #define smr_unserialized_store(ptr, value) \
265 ({ (ptr)->__smr_ptr = value; })
266
267
268 #pragma mark SMR queues
269
270 /*
271 * SMR queues are queues that are meant to be read under SMR critical sections
272 * concurrently with possible updates to the queue.
273 *
274 * /!\ Such read operations CAN ONLY BE PERFORMED IN FORWARD DIRECTION. /!\
275 *
276 * Queues can be either:
277 * - lists where the head is a single pointer,
278 * and insertions can only be at the head;
279 * - tail queues where the head is two pointers,
280 * and insertions can be either at the head or the tail.
281 *
282 * Queue linkages can either be single forward pointer linkages or double
283 * forward/backward linkages. The latter supports O(1) deletion.
284 *
285 *
286 * The entire API surface uses type inference for the implementations,
287 * which allows to relatively easily change between the 4 types of queues
288 * with very minimal API changes (mostly the types of list heads and fields).
289 */
290
291
292 /*!
293 * @macro smrq_init
294 *
295 * @brief
296 * Initializes an SMR queue head.
297 */
298 #define smrq_init(head) ({ \
299 __auto_type __head = (head); \
300 \
301 smr_init_store(&__head->first, NULL); \
302 if (__smrq_lastp(__head)) { \
303 *__smrq_lastp(__head) = &__head->first; \
304 } \
305 })
306
307
308 /*!
309 * @macro smrq_empty
310 *
311 * @brief
312 * Returns whether an SMR queue is empty, can be called from any context.
313 */
314 #define smrq_empty(head) \
315 (smr_unsafe_load(&(head)->first) == 0)
316
317
318 /*!
319 * @macro smrq_entered_first
320 *
321 * @brief
322 * Returns the first element of an SMR queue, while in a read section.
323 */
324 #define smrq_entered_first(head, type_t, field) \
325 __container_of_safe(smr_entered_load(&(head)->first), type_t, field)
326
327
328 /*!
329 * @macro smrq_entered_next
330 *
331 * @brief
332 * Returns the next element of an SMR queue element, while in a read section.
333 */
334 #define smrq_entered_next(elem, field) \
335 __container_of_safe(smr_entered_load(&(elem)->field.next), \
336 typeof(*(elem)), field)
337
338
339 /*!
340 * @macro smrq_entered_foreach
341 *
342 * @brief
343 * Enumerates an SMR queue, while in a read section.
344 */
345 #define smrq_entered_foreach(it, head, field) \
346 for (__auto_type __it = smr_entered_load(&(head)->first); \
347 ((it) = __container_of_safe(__it, typeof(*(it)), field)); \
348 __it = smr_entered_load(&__it->next))
349
350
351 /*!
352 * @macro smrq_serialized_first
353 *
354 * @brief
355 * Returns the first element of an SMR queue, while being serialized
356 * by an external mechanism.
357 */
358 #define smrq_serialized_first(head, type_t, link) \
359 __container_of_safe(smr_serialized_load(&(head)->first), type_t, link)
360
361 /*!
362 * @macro smrq_serialized_next
363 *
364 * @brief
365 * Returns the next element of an SMR queue element, while being serialized
366 * by an external mechanism.
367 */
368 #define smrq_serialized_next(elem, field) \
369 __container_of_safe(smr_serialized_load(&(elem)->field.next), \
370 typeof(*(elem)), field)
371
372 /*!
373 * @macro smrq_serialized_foreach
374 *
375 * @brief
376 * Enumerates an SMR queue, while being serialized
377 * by an external mechanism.
378 */
379 #define smrq_serialized_foreach(it, head, field) \
380 for (__auto_type __it = smr_serialized_load(&(head)->first); \
381 ((it) = __container_of_safe(__it, typeof(*(it)), field)); \
382 __it = smr_serialized_load(&__it->next))
383
384 /*!
385 * @macro smrq_serialized_foreach_safe
386 *
387 * @brief
388 * Enumerates an SMR queue, while being serialized
389 * by an external mechanism.
390 *
391 * @discussion
392 * This variant supports removing the current element from the queue.
393 */
394 #define smrq_serialized_foreach_safe(it, head, field) \
395 for (__auto_type __it = smr_serialized_load(&(head)->first), \
396 __next_it = __it; \
397 ((it) = __container_of_safe(__it, typeof(*(it)), field)) && \
398 ((__next_it = smr_serialized_load(&__it->next)), 1); \
399 __it = __next_it)
400
401
402 /*!
403 * @macro smrq_serialized_insert_head
404 *
405 * @brief
406 * Inserts an element at the head of an SMR queue, while being serialized
407 * by an external mechanism.
408 */
409 #define smrq_serialized_insert_head(head, elem) ({ \
410 __auto_type __head = (head); \
411 \
412 __smrq_serialized_insert(&__head->first, (elem), \
413 smr_serialized_load(&__head->first), __smrq_lastp(__head)); \
414 })
415
416
417 /*!
418 * @macro smrq_serialized_insert_tail
419 *
420 * @brief
421 * Inserts an element at the tail of an SMR queue, while being serialized
422 * by an external mechanism.
423 */
424 #define smrq_serialized_insert_tail(head, elem) ({ \
425 __auto_type __head = (head); \
426 \
427 __smrq_serialized_insert(__head->last, (elem), \
428 NULL, &__head->last); \
429 })
430
431
432 /*!
433 * @macro smrq_serialized_remove
434 *
435 * @brief
436 * Removes an element from an SMR queue, while being serialized
437 * by an external mechanism.
438 *
439 * @discussion
440 * The @c head argument is actually unused for the @c smrq_list queue type.
441 * It is still advised to pass it, the compiler should be able to optimize
442 * the code away as computing a list head ought to have no side effects.
443 */
444 #define smrq_serialized_remove(head, elem) ({ \
445 __auto_type __head = (head); \
446 \
447 __smrq_serialized_remove(&__head->first, (elem), __smrq_lastp(__head)); \
448 })
449
450
451 /*!
452 * @macro smrq_serialized_replace
453 *
454 * @brief
455 * Replaces an element on an SMR queue with another at the same spot,
456 * while being serialized by an external mechanism.
457 */
458 #define smrq_serialized_replace(head, old_elem, new_elem) ({ \
459 __auto_type __head = (head); \
460 \
461 __smrq_serialized_replace(&__head->first, \
462 (old_elem), (new_elem), __smrq_lastp(__head)); \
463 })
464
465
466 /*!
467 * @macro smrq_serialized_iter
468 *
469 * @brief
470 * Enumerates an SMR singly linked queue, while being serialized
471 * by an external mechanism.
472 *
473 * @discussion
474 * This is for manual loops that typically perform erasures.
475 *
476 * The body of the loop must move the cursor using (once):
477 * - smrq_serialized_iter_next() to to go the next element,
478 * - smrq_serialized_iter_erase() to erase the current element.
479 *
480 * The iterator variable will _not_ be updated until the next
481 * loop iteration.
482 *
483 * This form is prefered to smrq_serialized_foreach_safe()
484 * for singly linked lists as smrq_serialized_iter_erase()
485 * is O(1) as opposed to smrq_serialized_remove().
486 */
487 #define smrq_serialized_iter(it, head, field) \
488 for (__smrq_slink_t *__prev_##it = &(head)->first, \
489 *__chk_##it = __prev_##it; \
490 ((it) = __container_of_safe(smr_serialized_load(__prev_##it), \
491 typeof(*(it)), field)); \
492 assert(__chk_##it), __chk_##it = __prev_##it)
493
494 /*!
495 * @macro smrq_serialized_iter_next
496 *
497 * @brief
498 * Goes to the next element inside an smrq_serialied_iter() loop.
499 */
500 #define smrq_serialized_iter_next(it, field) ({ \
501 assert(__chk_##it == __prev_##it); \
502 __chk_##it = NULL; \
503 __prev_##it = &(it)->field.next; \
504 })
505
506 /*!
507 * @macro smrq_serialized_iter_erase
508 *
509 * @brief
510 * Erases the element pointed at by the cursor.
511 */
512 #define smrq_serialized_iter_erase(it, field) ({ \
513 assert(__chk_##it == __prev_##it); \
514 __chk_##it = NULL; \
515 __smrq_serialized_remove_one(__prev_##it, &(it)->field, NULL); \
516 })
517
518
519 /*!
520 * @macro smrq_serialized_append
521 *
522 * @brief
523 * Appends a given list at the end of the previous one.
524 *
525 * @discussion
526 * /!\ WARNING /!\: this doesn't "move" the "source" queue like *_CONCAT
527 * for <sys/queue.h>, as it is useful to merge/split hash queues concurrently
528 * with readers while allowing readers to still read via the "source" queue.
529 *
530 * However, the "source" queue needs to be reset to a valid state
531 * if it is to be used again.
532 */
533 #define smrq_serialized_append(dst, src) ({ \
534 __auto_type __src = (src); \
535 __auto_type __dst = (dst); \
536 \
537 __smrq_serialized_append(&__dst->first, __smrq_lastp(__dst), \
538 &__src->first, __smrq_lastp(__src)); \
539 })
540
541
542 #pragma mark SMR domains
543
544 /*!
545 * @enum smr_flags_t
546 *
547 * @brief
548 * Options to pass to smr_domain_create()
549 *
550 * @const SMR_NONE
551 * Default values for the flags.
552 */
553 __options_closed_decl(smr_flags_t, unsigned long, {
554 SMR_NONE = 0x00000000,
555 });
556
557 /*!
558 * @function smr_domain_create()
559 *
560 * @brief
561 * Create an SMR domain.
562 */
563 extern smr_t smr_domain_create(smr_flags_t flags);
564
565 /*!
566 * @function smr_domain_free()
567 *
568 * @brief
569 * Destroys an SMR domain previously create with @c smr_domain_create().
570 */
571 extern void smr_domain_free(smr_t smr);
572
573
574 /*!
575 * @function smr_entered()
576 *
577 * @brief
578 * Returns whether an SMR critical section is entered.
579 */
580 extern bool smr_entered(smr_t smr) __result_use_check;
581
582 /*!
583 * @function smr_enter()
584 *
585 * @brief
586 * Enter a non preemptible SMR critical section.
587 *
588 * @discussion
589 * Entering an SMR critical section is non reentrant.
590 * (entering it recursively is undefined and will panic on development kernels)
591 *
592 * @c smr_leave() must be called to end this section.
593 */
594 extern void smr_enter(smr_t smr);
595
596 /*!
597 * @function smr_leave()
598 *
599 * @brief
600 * Leave a non preemptible SMR critical section.
601 */
602 extern void smr_leave(smr_t smr);
603
604
605 /*!
606 * @function smr_synchronize()
607 *
608 * @brief
609 * Synchronize advances the write sequence
610 * and returns when all readers have observed it.
611 *
612 * @discussion
613 * This is roughly equivalent to @c smr_wait(smr, smr_advance(smr))
614 *
615 * It is however better to cache a sequence number returned
616 * from @c smr_advance(), and poll or wait for it at a latter time,
617 * as there will be less chance of spinning while waiting for readers.
618 */
619 extern void smr_synchronize(smr_t smr);
620
621
622 #ifdef XNU_KERNEL_PRIVATE
623 #pragma GCC visibility push(hidden)
624 #pragma mark - XNU only
625 #pragma mark XNU only: SMR domains advanced
626
627 #define SMR_SEQ_INVALID ((smr_seq_t)0)
628 #define SMR_SEQ_INIT ((smr_seq_t)1)
629 #define SMR_SEQ_INC ((smr_seq_t)2)
630
631 typedef long smr_delta_t;
632
633 #define SMR_SEQ_DELTA(a, b) ((smr_delta_t)((a) - (b)))
634 #define SMR_SEQ_CMP(a, op, b) (SMR_SEQ_DELTA(a, b) op 0)
635
636 /*!
637 * @typedef smr_clock_t
638 *
639 * @brief
640 * Represents an SMR domain clock, internal type not manipulated by clients.
641 */
642 typedef struct {
643 smr_seq_t s_rd_seq;
644 smr_seq_t s_wr_seq;
645 } smr_clock_t;
646
647 /*!
648 * @typedef smr_t
649 *
650 * @brief
651 * Declares an SMR domain of synchronization.
652 */
653 struct smr {
654 smr_clock_t smr_clock;
655 struct smr_pcpu *smr_pcpu;
656 unsigned long smr_flags;
657 unsigned long smr_early;
658 };
659
660 /*!
661 * @macro SMR_DEFINE_FLAGS
662 *
663 * @brief
664 * Define an SMR domain with specific create flags.
665 */
666 #define SMR_DEFINE_FLAGS(var, flags) \
667 struct smr var = { \
668 .smr_clock.s_rd_seq = SMR_SEQ_INIT, \
669 .smr_clock.s_wr_seq = SMR_SEQ_INIT, \
670 .smr_flags = (flags), \
671 }; \
672 STARTUP_ARG(TUNABLES, STARTUP_RANK_LAST, __smr_domain_init, &(var)); \
673 STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, __smr_domain_init, &(var))
674
675 /*!
676 * @macro SMR_DEFINE
677 *
678 * @brief
679 * Define an SMR domain.
680 */
681 #define SMR_DEFINE(var) \
682 SMR_DEFINE_FLAGS(var, SMR_NONE)
683
684
685 /*!
686 * @function smr_advance()
687 *
688 * @brief
689 * Advance the write sequence and return the value
690 * for use as a wait goal.
691 *
692 * @discussion
693 * This guarantees that any changes made by the calling thread
694 * prior to this call will be visible to all threads after
695 * the read sequence meets or exceeds the return value.
696 */
697 extern smr_seq_t smr_advance(smr_t smr) __result_use_check;
698
699 /*!
700 * @function smr_deferred_advance()
701 *
702 * @brief
703 * Pretend-advance the write sequence and return the value
704 * for use as a wait goal.
705 *
706 * @discussion
707 * This guarantees that any changes made by the calling thread
708 * prior to this call will be visible to all threads after
709 * the read sequence meets or exceeds the return value.
710 *
711 * Unlike smr_advance(), the global clock isn't really advanced,
712 * it only sets a goal in the future. This can be used to control
713 * the pace of updating the global clock and avoid global atomics.
714 *
715 * In order for the clock to advance, clients of this API must call
716 * @c smr_deferred_advance_commit() with the goal returned by this call.
717 *
718 * Note that calls to @c smr_advance() or @c smr_wait() when passed
719 * the goal returned by this function would also allow the clock
720 * to make progress and are legal (yet less efficient) calls to make.
721 */
722 extern smr_seq_t smr_deferred_advance(smr_t smr) __result_use_check;
723
724 /*!
725 * @function smr_deferred_advance_commit()
726 *
727 * @brief
728 * Actually advance the write sequence to the goal returned by a previous
729 * call to @c smr_deferred_advance().
730 */
731 extern void smr_deferred_advance_commit(smr_t smr, smr_seq_t seq);
732
733
734 /*!
735 * @function smr_poll
736 *
737 * @brief
738 * Poll to determine whether all readers have observed the @c goal
739 * write sequence number.
740 *
741 * @discussion
742 * This function is safe to be called from preemption disabled context
743 * and its worst complexity is O(ncpu).
744 *
745 * @returns true if the goal is met and false if not.
746 */
747 extern bool smr_poll(smr_t smr, smr_seq_t goal) __result_use_check;
748
749 /*!
750 * @function smr_wait
751 *
752 * @brief
753 * Wait until all readers have observed
754 * the @c goal write sequence number.
755 *
756 * @discussion
757 * This function is safe to be called from preemption disabled context
758 * as it never explicitly blocks, however this is not recommended.
759 */
760 extern void smr_wait(smr_t smr, smr_seq_t goal);
761
762
763 #pragma mark XNU only: system global SMR
764
765 /*!
766 * @brief
767 * The SMR domain behind the smr_global_*() KPI.
768 *
769 * @discussion
770 * This is provided as a fallback for when a specific SMR domain
771 * would be overkill.
772 */
773 extern struct smr smr_system;
774
775 #define smr_global_entered() smr_entered(&smr_system)
776 #define smr_global_enter() smr_enter(&smr_system)
777 #define smr_global_leave() smr_leave(&smr_system)
778
779 #define smr_global_advance() smr_advance(&smr_system)
780 #define smr_global_poll(goal) smr_poll(&smr_system, goal)
781 #define smr_global_wait(goal) smr_wait(&smr_system, goal)
782 #define smr_global_synchronize() smr_synchronize(&smr_system)
783
784 /*!
785 * @function smr_global_retire()
786 *
787 * @brief
788 * Schedule a callback to free some memory once it is safe to collect it.
789 *
790 * @discussion
791 * The default system wide global SMR system provides a way
792 * for elements protected by it (using @c smr_global_enter()
793 * and @c smr_global_leave() to protect access) to be reclaimed
794 * when this is safe to.
795 *
796 * This function can't be called with preemption disabled as it may block.
797 * In particular it can't be called from within an SMR critical section.
798 *
799 * @param value the address of the element to reclaim.
800 * @param size an estimate of the size of the memory that will be freed.
801 * @param destructor the callback to run to actually destroy the element.
802 */
803 extern void smr_global_retire(
804 void *value,
805 size_t size,
806 void (*destructor)(void *));
807
808
809 #pragma mark XNU only: implementation details
810
811 extern void __smr_domain_init(smr_t);
812
813 #ifdef MACH_KERNEL_PRIVATE
814
815 extern bool smr_entered_cpu(smr_t smr, int cpu) __result_use_check;
816
817 extern void smr_register_mpsc_queue(void);
818
819 #endif /* MACH_KERNEL_PRIVATE */
820
821 #pragma GCC visibility pop
822 #endif /* XNU_KERNEL_PRIVATE */
823 #pragma mark - implementation details
824 #pragma mark implementation details: SMR queues
825
826 extern void __smr_linkage_invalid(__smrq_link_t *link) __abortlike;
827 extern void __smr_stail_invalid(__smrq_slink_t *link, __smrq_slink_t *last) __abortlike;
828 extern void __smr_tail_invalid(__smrq_link_t *link, __smrq_link_t *last) __abortlike;
829
830 __attribute__((always_inline, overloadable))
831 static inline __smrq_slink_t **
__smrq_lastp(struct smrq_slist_head * head __unused)832 __smrq_lastp(struct smrq_slist_head *head __unused)
833 {
834 return NULL;
835 }
836
837 __attribute__((always_inline, overloadable))
838 static inline __smrq_link_t **
__smrq_lastp(struct smrq_list_head * head __unused)839 __smrq_lastp(struct smrq_list_head *head __unused)
840 {
841 return NULL;
842 }
843
844 __attribute__((always_inline, overloadable))
845 static inline __smrq_slink_t **
__smrq_lastp(struct smrq_stailq_head * head)846 __smrq_lastp(struct smrq_stailq_head *head)
847 {
848 __smrq_slink_t **last = &head->last;
849
850 __builtin_assume(last != NULL);
851 return last;
852 }
853
854 __attribute__((always_inline, overloadable))
855 static inline __smrq_link_t **
__smrq_lastp(struct smrq_tailq_head * head)856 __smrq_lastp(struct smrq_tailq_head *head)
857 {
858 __smrq_link_t **last = &head->last;
859
860 __builtin_assume(last != NULL);
861 return last;
862 }
863
864
865 __attribute__((always_inline, overloadable))
866 static inline void
__smrq_serialized_insert(__smrq_slink_t * prev,struct smrq_slink * elem,struct smrq_slink * next,__smrq_slink_t ** lastp)867 __smrq_serialized_insert(
868 __smrq_slink_t *prev,
869 struct smrq_slink *elem,
870 struct smrq_slink *next,
871 __smrq_slink_t **lastp)
872 {
873 if (next == NULL && lastp) {
874 if (*lastp != prev || smr_serialized_load(prev)) {
875 __smr_stail_invalid(prev, *lastp);
876 }
877 }
878
879 smr_serialized_store_relaxed(&elem->next, next);
880 smr_serialized_store(prev, elem);
881 if (next == NULL && lastp) {
882 *lastp = &elem->next;
883 }
884 }
885
886 __attribute__((always_inline, overloadable))
887 static inline void
__smrq_serialized_insert(__smrq_link_t * prev,struct smrq_link * elem,struct smrq_link * next,__smrq_link_t ** lastp)888 __smrq_serialized_insert(
889 __smrq_link_t *prev,
890 struct smrq_link *elem,
891 struct smrq_link *next,
892 __smrq_link_t **lastp)
893 {
894 if (next != NULL && next->prev != prev) {
895 __smr_linkage_invalid(prev);
896 }
897 if (next == NULL && lastp) {
898 if (*lastp != prev || smr_serialized_load(prev)) {
899 __smr_tail_invalid(prev, *lastp);
900 }
901 }
902
903 smr_serialized_store_relaxed(&elem->next, next);
904 elem->prev = prev;
905 smr_serialized_store(prev, elem);
906
907 if (next != NULL) {
908 next->prev = &elem->next;
909 } else if (lastp) {
910 *lastp = &elem->next;
911 }
912 }
913
914
915 __attribute__((always_inline, overloadable))
916 static inline void
__smrq_serialized_remove_one(__smrq_slink_t * prev,struct smrq_slink * elem,__smrq_slink_t ** lastp)917 __smrq_serialized_remove_one(
918 __smrq_slink_t *prev,
919 struct smrq_slink *elem,
920 __smrq_slink_t **lastp)
921 {
922 struct smrq_slink *next;
923
924 /*
925 * Removal "skips" a link this way:
926 *
927 * e1 ---> e2 ---> e3 becomes e1 -----------> e3
928 *
929 * When e3 was inserted, a release barrier was issued
930 * by smr_serialized_store(). We do not need to issue
931 * a release barrier upon removal because `next` carries
932 * a dependency on that smr_serialized_store()d value.
933 */
934 next = smr_serialized_load(&elem->next);
935 smr_serialized_store_relaxed(prev, next);
936 if (next == NULL && lastp) {
937 *lastp = prev;
938 }
939 }
940
941 __attribute__((always_inline, overloadable))
942 static inline void
__smrq_serialized_remove_one(__smrq_link_t * prev,struct smrq_link * elem,__smrq_link_t ** lastp)943 __smrq_serialized_remove_one(
944 __smrq_link_t *prev,
945 struct smrq_link *elem,
946 __smrq_link_t **lastp)
947 {
948 struct smrq_link *next;
949
950 next = smr_serialized_load(&elem->next);
951
952 if (smr_serialized_load(prev) != elem) {
953 __smr_linkage_invalid(prev);
954 }
955 if (next && next->prev != &elem->next) {
956 __smr_linkage_invalid(&elem->next);
957 }
958
959 /*
960 * Removal "skips" a link this way:
961 *
962 * e1 ---> e2 ---> e3 becomes e1 -----------> e3
963 *
964 * When e3 was inserted, a release barrier was issued
965 * by smr_serialized_store(). We do not need to issue
966 * a release barrier upon removal because `next` carries
967 * a dependency on that smr_serialized_store()d value.
968 */
969 smr_serialized_store_relaxed(prev, next);
970
971 if (next != NULL) {
972 next->prev = prev;
973 } else if (lastp) {
974 *lastp = prev;
975 }
976 elem->prev = NULL;
977 }
978
979
980 __attribute__((always_inline, overloadable))
981 static inline void
__smrq_serialized_remove(__smrq_slink_t * first,struct smrq_slink * elem,__smrq_slink_t ** lastp)982 __smrq_serialized_remove(
983 __smrq_slink_t *first,
984 struct smrq_slink *elem,
985 __smrq_slink_t **lastp)
986 {
987 __smrq_slink_t *prev = first;
988 struct smrq_slink *cur;
989
990 while ((cur = smr_serialized_load(prev)) != elem) {
991 prev = &cur->next;
992 }
993
994 __smrq_serialized_remove_one(prev, elem, lastp);
995 }
996
997 __attribute__((always_inline, overloadable))
998 static inline void
__smrq_serialized_remove(__smrq_link_t * first __unused,struct smrq_link * elem,__smrq_link_t ** lastp)999 __smrq_serialized_remove(
1000 __smrq_link_t *first __unused,
1001 struct smrq_link *elem,
1002 __smrq_link_t **lastp)
1003 {
1004 __smrq_serialized_remove_one(elem->prev, elem, lastp);
1005 }
1006
1007
1008 __attribute__((always_inline, overloadable))
1009 static inline void
__smrq_serialized_replace(__smrq_slink_t * first,struct smrq_slink * old_elem,struct smrq_slink * new_elem,__smrq_slink_t ** lastp)1010 __smrq_serialized_replace(
1011 __smrq_slink_t *first,
1012 struct smrq_slink *old_elem,
1013 struct smrq_slink *new_elem,
1014 __smrq_slink_t **lastp)
1015 {
1016 __smrq_slink_t *prev = first;
1017 struct smrq_slink *cur;
1018 struct smrq_slink *next;
1019
1020 while ((cur = smr_serialized_load(prev)) != old_elem) {
1021 prev = &cur->next;
1022 }
1023
1024 next = smr_serialized_load(&old_elem->next);
1025 smr_serialized_store_relaxed(&new_elem->next, next);
1026 smr_serialized_store(prev, new_elem);
1027
1028 if (next == NULL && lastp) {
1029 *lastp = &new_elem->next;
1030 }
1031 }
1032
1033 __attribute__((always_inline, overloadable))
1034 static inline void
__smrq_serialized_replace(__smrq_link_t * first __unused,struct smrq_link * old_elem,struct smrq_link * new_elem,__smrq_link_t ** lastp)1035 __smrq_serialized_replace(
1036 __smrq_link_t *first __unused,
1037 struct smrq_link *old_elem,
1038 struct smrq_link *new_elem,
1039 __smrq_link_t **lastp)
1040 {
1041 __smrq_link_t *prev;
1042 struct smrq_link *next;
1043
1044 prev = old_elem->prev;
1045 next = smr_serialized_load(&old_elem->next);
1046
1047 if (smr_serialized_load(prev) != old_elem) {
1048 __smr_linkage_invalid(prev);
1049 }
1050 if (next && next->prev != &old_elem->next) {
1051 __smr_linkage_invalid(&old_elem->next);
1052 }
1053
1054 smr_serialized_store_relaxed(&new_elem->next, next);
1055 new_elem->prev = prev;
1056 smr_serialized_store(prev, new_elem);
1057
1058 if (next != NULL) {
1059 next->prev = &new_elem->next;
1060 } else if (lastp) {
1061 *lastp = &new_elem->next;
1062 }
1063 old_elem->prev = NULL;
1064 }
1065
1066 __attribute__((always_inline, overloadable))
1067 static inline void
__smrq_serialized_append(__smrq_slink_t * dst_first,__smrq_slink_t ** dst_lastp,__smrq_slink_t * src_first,__smrq_slink_t ** src_lastp)1068 __smrq_serialized_append(
1069 __smrq_slink_t *dst_first,
1070 __smrq_slink_t **dst_lastp,
1071 __smrq_slink_t *src_first,
1072 __smrq_slink_t **src_lastp)
1073 {
1074 struct smrq_slink *src = smr_serialized_load(src_first);
1075 struct smrq_slink *dst;
1076
1077 if (dst_lastp) {
1078 if (src) {
1079 smr_serialized_store_relaxed(*dst_lastp, src);
1080 *dst_lastp = *src_lastp;
1081 }
1082 } else {
1083 while ((dst = smr_serialized_load(dst_first))) {
1084 dst_first = &dst->next;
1085 }
1086 smr_serialized_store_relaxed(dst_first, src);
1087 }
1088 }
1089
1090 __attribute__((always_inline, overloadable))
1091 static inline void
__smrq_serialized_append(__smrq_link_t * dst_first,__smrq_link_t ** dst_lastp,__smrq_link_t * src_first,__smrq_link_t ** src_lastp)1092 __smrq_serialized_append(
1093 __smrq_link_t *dst_first,
1094 __smrq_link_t **dst_lastp,
1095 __smrq_link_t *src_first,
1096 __smrq_link_t **src_lastp)
1097 {
1098 struct smrq_link *src = smr_serialized_load(src_first);
1099 struct smrq_link *dst;
1100
1101 if (dst_lastp) {
1102 if (src) {
1103 smr_serialized_store_relaxed(*dst_lastp, src);
1104 src->prev = *dst_lastp;
1105 *dst_lastp = *src_lastp;
1106 }
1107 } else {
1108 while ((dst = smr_serialized_load(dst_first))) {
1109 dst_first = &dst->next;
1110 }
1111 smr_serialized_store_relaxed(dst_first, src);
1112 src->prev = &dst->next;
1113 }
1114 }
1115
1116 __END_DECLS
1117
1118 #endif /* _KERN_SMR_H_ */
1119