xref: /xnu-8792.41.9/osfmk/kern/smr.h (revision 5c2921b07a2480ab43ec66f5b9e41cb872bc554f) !
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KERN_SMR_H_
30 #define _KERN_SMR_H_
31 
32 #include <sys/cdefs.h>
33 #include <stdbool.h>
34 #include <stdint.h>
35 #include <kern/startup.h>
36 #include <os/atomic_private.h>
37 
38 __BEGIN_DECLS
39 
40 #ifdef XNU_KERNEL_PRIVATE
41 #pragma GCC visibility push(hidden)
42 
43 /*!
44  * @typedef smr_seq_t
45  *
46  * @brief
47  * Represents an opaque SMR sequence number.
48  */
49 typedef unsigned long           smr_seq_t;
50 #define SMR_SEQ_INVALID         ((smr_seq_t)0)
51 #define SMR_SEQ_INIT            ((smr_seq_t)1)
52 
53 /*!
54  * @typedef smr_clock_t
55  *
56  * @brief
57  * Represents an SMR domain clock, internal type not manipulated by clients.
58  */
59 typedef union {
60 	struct {
61 #ifdef __LITTLE_ENDIAN__
62 		smr_seq_t       s_rd_seq;
63 		smr_seq_t       s_wr_seq;
64 #else
65 		smr_seq_t       s_wr_seq;
66 		smr_seq_t       s_rd_seq;
67 #endif
68 	};
69 	__uint128_t             s_combined;
70 } smr_clock_t;
71 
72 /*!
73  * @typedef smr_t
74  *
75  * @brief
76  * Declares an SMR domain of synchronization.
77  */
78 typedef struct smr {
79 	smr_clock_t             smr_clock;
80 	unsigned long           smr_pcpu;
81 	unsigned long           smr_budget;
82 } *smr_t;
83 
84 
85 #pragma mark - pointers allowing hazardous access
86 
87 /*
88  * SMR Accessors are meant to provide safe access to SMR protected
89  * pointers and prevent misuse and accidental access.
90  *
91  * Accessors are grouped by type:
92  * entered      - Use while in a read section (between smr_enter/smr_leave())
93  * serialized   - Use while holding a lock that serializes writers.
94  *                Updates are synchronized with readers via included barriers.
95  * unserialized - Use after the memory is out of scope and not visible to
96  *                readers.
97  *
98  * All acceses include a parameter for an assert to verify the required
99  * synchronization.
100  */
101 
102 /*!
103  * @macro SMR_POINTER_DECL
104  *
105  * @brief
106  * Macro to declare a pointer type that uses SMR for access.
107  */
108 #define SMR_POINTER_DECL(name, type_t) \
109 	struct name { type_t volatile __smr_ptr; }
110 
111 /*!
112  * @macro SMR_POINTER
113  *
114  * @brief
115  * Macro to declare a pointer that uses SMR for access.
116  */
117 #define SMR_POINTER(type_t) \
118 	SMR_POINTER_DECL(, type_t)
119 
120 
121 /*!
122  * @macro smr_unsafe_load()
123  *
124  * @brief
125  * Read from an SMR protected pointer without any synchronization.
126  *
127  * @discussion
128  * This returns an integer on purpose as dereference is generally unsafe.
129  */
130 #define smr_unsafe_load(ptr) \
131 	({ (uintptr_t)((ptr)->__smr_ptr); })
132 
133 /*!
134  * @macro smr_entered_load()
135  *
136  * @brief
137  * Read from an SMR protected pointer while in a read section.
138  */
139 #define smr_entered_load(ptr) \
140 	({ (ptr)->__smr_ptr; })
141 
142 /*!
143  * @macro smr_entered_load_assert()
144  *
145  * @brief
146  * Read from an SMR protected pointer while in a read section.
147  */
148 #define smr_entered_load_assert(ptr, smr)  ({ \
149 	assert(smr_entered(smr)); \
150 	(ptr)->__smr_ptr; \
151 })
152 
153 /*!
154  * @macro smr_entered_load_acquire()
155  *
156  * @brief
157  * Read from an SMR protected pointer while in a read section (with acquire
158  * fence).
159  */
160 #define smr_entered_load_acquire(ptr) \
161 	os_atomic_load(&(ptr)->__smr_ptr, acquire)
162 
163 /*!
164  * @macro smr_entered_load_acquire_assert()
165  *
166  * @brief
167  * Read from an SMR protected pointer while in a read section.
168  */
169 #define smr_entered_load_acquire_assert(ptr, smr)  ({ \
170 	assert(smr_entered(smr)); \
171 	os_atomic_load(&(ptr)->__smr_ptr, acquire); \
172 })
173 
174 /*!
175  * @macro smr_serialized_load_assert()
176  *
177  * @brief
178  * Read from an SMR protected pointer while serialized by an
179  * external mechanism.
180  */
181 #define smr_serialized_load_assert(ptr, held_cond)  ({ \
182 	assertf(held_cond, "smr_serialized_load: lock not held"); \
183 	(ptr)->__smr_ptr; \
184 })
185 
186 /*!
187  * @macro smr_serialized_load()
188  *
189  * @brief
190  * Read from an SMR protected pointer while serialized by an
191  * external mechanism.
192  */
193 #define smr_serialized_load(ptr) \
194 	smr_serialized_load_assert(ptr, true)
195 
196 /*!
197  * @macro smr_init_store()
198  *
199  * @brief
200  * Store @c value to an SMR protected pointer during initialization.
201  */
202 #define smr_init_store(ptr, value) \
203 	({ (ptr)->__smr_ptr = value; })
204 
205 /*!
206  * @macro smr_clear_store()
207  *
208  * @brief
209  * Clear (sets to 0) an SMR protected pointer (this is always "allowed" to do).
210  */
211 #define smr_clear_store(ptr) \
212 	smr_init_store(ptr, 0)
213 
214 /*!
215  * @macro smr_serialized_store_assert()
216  *
217  * @brief
218  * Store @c value to an SMR protected pointer while serialized by an
219  * external mechanism.
220  *
221  * @discussion
222  * Writers that are serialized with mutual exclusion or on a single
223  * thread should use smr_serialized_store() rather than swap.
224  */
225 #define smr_serialized_store_assert(ptr, value, held_cond)  ({ \
226 	assertf(held_cond, "smr_serialized_store: lock not held"); \
227 	os_atomic_thread_fence(release); \
228 	(ptr)->__smr_ptr = value; \
229 })
230 
231 /*!
232  * @macro smr_serialized_store()
233  *
234  * @brief
235  * Store @c value to an SMR protected pointer while serialized by an
236  * external mechanism.
237  *
238  * @discussion
239  * Writers that are serialized with mutual exclusion or on a single
240  * thread should use smr_serialized_store() rather than swap.
241  */
242 #define smr_serialized_store(ptr, value) \
243 	smr_serialized_store_assert(ptr, value, true)
244 
245 /*!
246  * @macro smr_serialized_store_relaxed_assert()
247  *
248  * @brief
249  * Store @c value to an SMR protected pointer while serialized by an
250  * external mechanism.
251  *
252  * @discussion
253  * This function can be used when storing a value that was already
254  * previously stored with smr_serialized_store() (for example during
255  * a linked list removal).
256  */
257 #define smr_serialized_store_relaxed_assert(ptr, value, held_cond)  ({ \
258 	assertf(held_cond, "smr_serialized_store_relaxed: lock not held"); \
259 	(ptr)->__smr_ptr = value; \
260 })
261 
262 /*!
263  * @macro smr_serialized_store_relaxed()
264  *
265  * @brief
266  * Store @c value to an SMR protected pointer while serialized by an
267  * external mechanism.
268  *
269  * @discussion
270  * This function can be used when storing a value that was already
271  * previously stored with smr_serialized_store() (for example during
272  * a linked list removal).
273  */
274 #define smr_serialized_store_relaxed(ptr, value) \
275 	smr_serialized_store_relaxed_assert(ptr, value, true)
276 
277 /*!
278  * @macro smr_serialized_swap_assert()
279  *
280  * @brief
281  * Swap @c value with an SMR protected pointer and return the old value
282  * while serialized by an external mechanism.
283  *
284  * @discussion
285  * Swap permits multiple writers to update a pointer concurrently.
286  */
287 #define smr_serialized_swap_assert(ptr, value, held_cond)  ({ \
288 	assertf(held_cond, "smr_serialized_store: lock not held"); \
289 	os_atomic_xchg(&(ptr)->__smr_ptr, value, release); \
290 })
291 
292 /*!
293  * @macro smr_serialized_swap()
294  *
295  * @brief
296  * Swap @c value with an SMR protected pointer and return the old value
297  * while serialized by an external mechanism.
298  *
299  * @discussion
300  * Swap permits multiple writers to update a pointer concurrently.
301  */
302 #define smr_serialized_swap(ptr, value) \
303 	smr_serialized_swap_assert(ptr, value, true)
304 
305 /*!
306  * @macro smr_unserialized_load()
307  *
308  * @brief.
309  * Read from an SMR protected pointer when no serialization is required
310  * such as in the destructor callback or when the caller guarantees other
311  * synchronization.
312  */
313 #define smr_unserialized_load(ptr) \
314 	({ (ptr)->__smr_ptr; })
315 
316 /*!
317  * @macro smr_unserialized_store()
318  *
319  * @brief.
320  * Store to an SMR protected pointer when no serialiation is required
321  * such as in the destructor callback or when the caller guarantees other
322  * synchronization.
323  */
324 #define smr_unserialized_store(ptr, value) \
325 	({ (ptr)->__smr_ptr = value; })
326 
327 
328 /*!
329  * @macro SMR_DEFINE
330  *
331  * @brief
332  * Define a global SMR domain, which will be available when zalloc is available.
333  */
334 #define SMR_DEFINE(var) \
335 	struct smr var = { \
336 	        .smr_clock.s_rd_seq = SMR_SEQ_INIT, \
337 	        .smr_clock.s_wr_seq = SMR_SEQ_INIT, \
338 	}; \
339 	STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, __smr_init, &var)
340 
341 /*!
342  * @macro SMR_DEFINE_EARLY
343  *
344  * @brief
345  * Define an SMR domain that needs to be functional immediately at boot.
346  */
347 #define SMR_DEFINE_EARLY(var) \
348 	SMR_DEFINE(var); \
349 	STARTUP_ARG(TUNABLES, STARTUP_RANK_LAST, __smr_init, &var)
350 
351 
352 #pragma mark - manipulating an SMR clock
353 
354 /*!
355  * @function smr_init()
356  *
357  * @brief
358  * Initialize an smr struct.
359  */
360 extern void smr_init(smr_t);
361 
362 /*!
363  * @function smr_set_deferred_budget()
364  *
365  * @brief
366  * Configures an SMR domain with a budget for smr_deferred_advance().
367  */
368 extern void smr_set_deferred_budget(smr_t, unsigned long);
369 
370 /*!
371  * @function smr_destroy()
372  *
373  * @brief
374  * Destroys an smr struct previously initialized with @c smr_init().
375  */
376 extern void smr_destroy(smr_t);
377 
378 /*!
379  * @function smr_entered()
380  *
381  * @brief
382  * Returns whether an SMR critical section is entered.
383  */
384 extern bool smr_entered(smr_t) __result_use_check;
385 
386 /*!
387  * @function smr_enter()
388  *
389  * @brief
390  * Enter an SMR critical section.
391  */
392 extern void smr_enter(smr_t);
393 
394 /*!
395  * @function smr_leave()
396  *
397  * @brief
398  * Leave an SMR critical section.
399  */
400 extern void smr_leave(smr_t);
401 
402 
403 /*!
404  * @function smr_advance()
405  *
406  * @brief
407  * Advance the write sequence and return the value
408  * for use as a wait goal.
409  *
410  * @discussion
411  * This guarantees that any changes made by the calling thread
412  * prior to this call will be visible to all threads after
413  * the read sequence meets or exceeds the return value.
414  */
415 extern smr_seq_t smr_advance(smr_t) __result_use_check;
416 
417 /*!
418  * @function smr_deferred_advance()
419  *
420  * @brief
421  * Advance the write sequence and return the value
422  * for use as a wait goal.
423  *
424  * @discussion
425  * This guarantees that any changes made by the calling thread
426  * prior to this call will be visible to all threads after
427  * the read sequence meets or exceeds the return value.
428  */
429 extern smr_seq_t smr_deferred_advance(smr_t, unsigned long) __result_use_check;
430 
431 /*!
432  * @function smr_deferred_advance()
433  *
434  * @brief
435  * Advance the write sequence and return the value
436  * for use as a wait goal.
437  *
438  * @discussion
439  * This guarantees that any changes made by the calling thread
440  * prior to this call will be visible to all threads after
441  * the read sequence meets or exceeds the return value.
442  *
443  * Preemption must be disabled.
444  */
445 extern smr_seq_t smr_deferred_advance_nopreempt(smr_t, unsigned long) __result_use_check;
446 
447 /*!
448  * @function smr_poll
449  *
450  * @brief
451  * Poll to determine whether all readers have observed the @c goal
452  * write sequence number.
453  *
454  * @discussion
455  * This function is safe to be called from preemption disabled context
456  * and its worst complexity is O(ncpu).
457  *
458  * @returns true if the goal is met and false if not.
459  */
460 extern bool smr_poll(smr_t smr, smr_seq_t goal) __result_use_check;
461 
462 /*!
463  * @function smr_wait
464  *
465  * @brief
466  * Wait until all readers have observed
467  * the @c goal write sequence number.
468  *
469  * @discussion
470  * This function is safe to be called from preemption disabled context
471  * as it never explicitly blocks, however this is not recommended.
472  */
473 extern void smr_wait(smr_t smr, smr_seq_t goal);
474 
475 /*!
476  * @function smr_synchronize()
477  *
478  * @brief
479  * Synchronize advances the write sequence
480  * and returns when all readers have observed it.
481  *
482  * @discussion
483  * This is roughly equivalent to @c smr_wait(smr, smr_advance(smr))
484  *
485  * It is however better to cache a sequence number returned
486  * from @c smr_advance(), and poll or wait for it at a latter time,
487  * as there will be less chance of spinning while waiting for readers.
488  */
489 extern void smr_synchronize(smr_t);
490 
491 
492 #pragma mark - system global SMR
493 
494 /*!
495  * @function smr_global_entered()
496  *
497  * @brief
498  * Returns whether the system wide global SMR critical section is entered.
499  */
500 extern bool smr_global_entered(void) __result_use_check;
501 
502 /*!
503  * @function smr_global_entered()
504  *
505  * @brief
506  * Enter the system wide global SMR critical section.
507  */
508 extern void smr_global_enter(void);
509 
510 /*!
511  * @function smr_global_leave()
512  *
513  * @brief
514  * Leave the system wide global SMR critical section.
515  */
516 extern void smr_global_leave(void);
517 
518 /*!
519  * @function smr_global_retire()
520  *
521  * @brief
522  * Schedule a callback to free some memory once it is safe to collect it.
523  *
524  * @discussion
525  * The default system wide global SMR system provides a way
526  * for elements protected by it (using @c smr_global_enter()
527  * and @c smr_global_leave() to protect access) to be reclaimed
528  * when this is safe to.
529  *
530  * This function can't be called with preemption disabled as it may block.
531  * In particular it can't be called from within an SMR critical section.
532  *
533  * @param value         the address of the element to reclaim.
534  * @param size          an estimate of the size of the memory that will be freed.
535  * @param destructor    the callback to run to actually destroy the element.
536  */
537 extern void smr_global_retire(
538 	void                   *value,
539 	size_t                  size,
540 	void                  (*destructor)(void *));
541 
542 
543 #pragma mark - implementation details
544 
545 extern void __smr_init(smr_t);
546 
547 #if MACH_KERNEL_PRIVATE
548 extern void
549 smr_register_mpsc_queue(void);
550 #endif
551 
552 #pragma GCC visibility pop
553 #endif // XNU_KERNEL_PRIVATE
554 
555 __END_DECLS
556 
557 #endif /* _KERN_SMR_H_ */
558