xref: /xnu-8020.101.4/osfmk/kern/smr.h (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KERN_SMR_H_
30 #define _KERN_SMR_H_
31 
32 #include <sys/cdefs.h>
33 #include <sys/_endian.h>
34 #include <stdbool.h>
35 #include <stdint.h>
36 #include <kern/startup.h>
37 #include <os/atomic_private.h>
38 
39 __BEGIN_DECLS
40 
41 #ifdef XNU_KERNEL_PRIVATE
42 #pragma GCC visibility push(hidden)
43 
44 /*!
45  * @typedef smr_seq_t
46  *
47  * @brief
48  * Represents an opaque SMR sequence number.
49  */
50 typedef unsigned long           smr_seq_t;
51 #define SMR_SEQ_INVALID         ((smr_seq_t)0)
52 #define SMR_SEQ_INIT            ((smr_seq_t)1)
53 
54 /*!
55  * @typedef smr_clock_t
56  *
57  * @brief
58  * Represents an SMR domain clock, internal type not manipulated by clients.
59  */
60 typedef union {
61 	struct {
62 #if __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN
63 		smr_seq_t       s_rd_seq;
64 		smr_seq_t       s_wr_seq;
65 #else
66 		smr_seq_t       s_wr_seq;
67 		smr_seq_t       s_rd_seq;
68 #endif
69 	};
70 #if __LP64__
71 	__uint128_t             s_combined;
72 #else
73 	uint64_t                s_combined;
74 #endif
75 } smr_clock_t;
76 
77 /*!
78  * @typedef smr_t
79  *
80  * @brief
81  * Declares an SMR domain of synchronization.
82  */
83 typedef struct smr {
84 	smr_clock_t             smr_clock;
85 	unsigned long           smr_pcpu;
86 } *smr_t;
87 
88 
89 /*
90  * SMR Accessors are meant to provide safe access to SMR protected
91  * pointers and prevent misuse and accidental access.
92  *
93  * Accessors are grouped by type:
94  * entered      - Use while in a read section (between smr_enter/smr_leave())
95  * serialized   - Use while holding a lock that serializes writers.
96  *                Updates are synchronized with readers via included barriers.
97  * unserialized - Use after the memory is out of scope and not visible to
98  *                readers.
99  *
100  * All acceses include a parameter for an assert to verify the required
101  * synchronization.
102  */
103 
104 /*!
105  * @macro SMR_POINTER_DECL
106  *
107  * @brief
108  * Macro to declare a pointer type that uses SMR for access.
109  */
110 #define SMR_POINTER_DECL(name, type_t) \
111 	struct name { type_t volatile __smr_ptr; }
112 
113 /*!
114  * @macro SMR_POINTER
115  *
116  * @brief
117  * Macro to declare a pointer that uses SMR for access.
118  */
119 #define SMR_POINTER(type_t) \
120 	SMR_POINTER_DECL(, type_t)
121 
122 
123 /*!
124  * @macro smr_entered_load()
125  *
126  * @brief
127  * Read from an SMR protected pointer while in a read section.
128  */
129 #define smr_entered_load(ptr) \
130 	({ (ptr)->__smr_ptr; })
131 
132 /*!
133  * @macro smr_entered_load_assert()
134  *
135  * @brief
136  * Read from an SMR protected pointer while in a read section.
137  */
138 #define smr_entered_load_assert(ptr, smr)  ({ \
139 	assert(smr_entered(smr)); \
140 	(ptr)->__smr_ptr; \
141 })
142 
143 /*!
144  * @macro smr_entered_load_acquire()
145  *
146  * @brief
147  * Read from an SMR protected pointer while in a read section (with acquire
148  * fence).
149  */
150 #define smr_entered_load_acquire(ptr) \
151 	os_atomic_load(&(ptr)->__smr_ptr, acquire)
152 
153 /*!
154  * @macro smr_entered_load_acquire_assert()
155  *
156  * @brief
157  * Read from an SMR protected pointer while in a read section.
158  */
159 #define smr_entered_load_acquire_assert(ptr, smr)  ({ \
160 	assert(smr_entered(smr)); \
161 	os_atomic_load(&(ptr)->__smr_ptr, acquire); \
162 })
163 
164 /*!
165  * @macro smr_serialized_load_assert()
166  *
167  * @brief
168  * Read from an SMR protected pointer while serialized by an
169  * external mechanism.
170  */
171 #define smr_serialized_load_assert(ptr, held_cond)  ({ \
172 	assertf(held_cond, "smr_serialized_load: lock not held"); \
173 	(ptr)->__smr_ptr; \
174 })
175 
176 /*!
177  * @macro smr_serialized_load()
178  *
179  * @brief
180  * Read from an SMR protected pointer while serialized by an
181  * external mechanism.
182  */
183 #define smr_serialized_load(ptr) \
184 	smr_serialized_load_assert(ptr, true)
185 
186 /*!
187  * @macro smr_init_store()
188  *
189  * @brief
190  * Store @c value to an SMR protected pointer during initialization.
191  */
192 #define smr_init_store(ptr, value) \
193 	({ (ptr)->__smr_ptr = value; })
194 
195 /*!
196  * @macro smr_clear_store()
197  *
198  * @brief
199  * Clear (sets to 0) an SMR protected pointer (this is always "allowed" to do).
200  */
201 #define smr_clear_store(ptr) \
202 	smr_init_store(ptr, (typeof((ptr)->__smr_ptr))0)
203 
204 /*!
205  * @macro smr_serialized_store_assert()
206  *
207  * @brief
208  * Store @c value to an SMR protected pointer while serialized by an
209  * external mechanism.
210  *
211  * @discussion
212  * Writers that are serialized with mutual exclusion or on a single
213  * thread should use smr_serialized_store() rather than swap.
214  */
215 #define smr_serialized_store_assert(ptr, value, held_cond)  ({ \
216 	assertf(held_cond, "smr_serialized_store: lock not held"); \
217 	os_atomic_thread_fence(release); \
218 	(ptr)->__smr_ptr = value; \
219 })
220 
221 /*!
222  * @macro smr_serialized_store()
223  *
224  * @brief
225  * Store @c value to an SMR protected pointer while serialized by an
226  * external mechanism.
227  *
228  * @discussion
229  * Writers that are serialized with mutual exclusion or on a single
230  * thread should use smr_serialized_store() rather than swap.
231  */
232 #define smr_serialized_store(ptr, value) \
233 	smr_serialized_store_assert(ptr, value, true)
234 
235 /*!
236  * @macro smr_serialized_swap_assert()
237  *
238  * @brief
239  * Swap @c value with an SMR protected pointer and return the old value
240  * while serialized by an external mechanism.
241  *
242  * @discussion
243  * Swap permits multiple writers to update a pointer concurrently.
244  */
245 #define smr_serialized_swap_assert(ptr, value, held_cond)  ({ \
246 	assertf(held_cond, "smr_serialized_store: lock not held"); \
247 	os_atomic_xchg(&(ptr)->__smr_ptr, value, release); \
248 })
249 
250 /*!
251  * @macro smr_serialized_swap()
252  *
253  * @brief
254  * Swap @c value with an SMR protected pointer and return the old value
255  * while serialized by an external mechanism.
256  *
257  * @discussion
258  * Swap permits multiple writers to update a pointer concurrently.
259  */
260 #define smr_serialized_swap(ptr, value) \
261 	smr_serialized_swap_assert(ptr, value, true)
262 
263 /*!
264  * @macro smr_unserialized_load()
265  *
266  * @brief.
267  * Read from an SMR protected pointer when no serialization is required
268  * such as in the destructor callback or when the caller guarantees other
269  * synchronization.
270  */
271 #define smr_unserialized_load(ptr) \
272 	({ (ptr)->__smr_ptr; })
273 
274 /*!
275  * @macro smr_unserialized_store()
276  *
277  * @brief.
278  * Store to an SMR protected pointer when no serialiation is required
279  * such as in the destructor callback or when the caller guarantees other
280  * synchronization.
281  */
282 #define smr_unserialized_store(ptr, value) \
283 	({ (ptr)->__smr_ptr = value; })
284 
285 
286 /*!
287  * @macro SMR_DEFINE
288  *
289  * @brief
290  * Define a global SMR domain, which will be available when zalloc is available.
291  */
292 #define SMR_DEFINE(var) \
293 	struct smr var = { \
294 	        .smr_clock.s_rd_seq = SMR_SEQ_INIT, \
295 	        .smr_clock.s_wr_seq = SMR_SEQ_INIT, \
296 	}; \
297 	STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, __smr_init, &var)
298 
299 /*!
300  * @macro SMR_DEFINE_EARLY
301  *
302  * @brief
303  * Define an SMR domain that needs to be functional immediately at boot.
304  */
305 #define SMR_DEFINE_EARLY(var) \
306 	SMR_DEFINE(var); \
307 	STARTUP_ARG(TUNABLES, STARTUP_RANK_LAST, __smr_init, &var)
308 
309 extern void __smr_init(smr_t);
310 
311 /*!
312  * @function smr_init()
313  *
314  * @brief
315  * Initialize an smr struct.
316  */
317 extern void smr_init(smr_t);
318 
319 /*!
320  * @function smr_destroy()
321  *
322  * @brief
323  * Destroys an smr struct previously initialized with @c smr_init().
324  */
325 extern void smr_destroy(smr_t);
326 
327 /*!
328  * @function smr_entered()
329  *
330  * @brief
331  * Returns whether an SMR critical section is entered.
332  */
333 extern bool smr_entered(smr_t) __result_use_check;
334 
335 /*!
336  * @function smr_enter()
337  *
338  * @brief
339  * Enter an SMR critical section.
340  */
341 extern void smr_enter(smr_t);
342 
343 /*!
344  * @function smr_leave()
345  *
346  * @brief
347  * Leave an SMR critical section.
348  */
349 extern void smr_leave(smr_t);
350 
351 
352 /*!
353  * @function smr_advance()
354  *
355  * @brief
356  * Advance the write sequence and return the value
357  * for use as a wait goal.
358  *
359  * @discussion
360  * This guarantees that any changes made by the calling thread
361  * prior to this call will be visible to all threads after
362  * the read sequence meets or exceeds the return value.
363  *
364  * This function may busy loop if the readers are roughly 1 billion
365  * sequence numbers behind the writers.
366  */
367 extern smr_seq_t smr_advance(smr_t) __result_use_check;
368 
369 /*!
370  * @function smr_poll
371  *
372  * @brief
373  * Poll to determine whether all readers have observed the @c goal
374  * write sequence number.
375  *
376  * @discussion
377  * This function is safe to be called from preemption disabled context
378  * and its worst complexity is O(ncpu).
379  *
380  * @returns true if the goal is met and false if not.
381  */
382 extern bool smr_poll(smr_t smr, smr_seq_t goal) __result_use_check;
383 
384 /*!
385  * @function smr_wait
386  *
387  * @brief
388  * Wait until all readers have observed
389  * the @c goal write sequence number.
390  *
391  * @discussion
392  * This function is safe to be called from preemption disabled context
393  * as it never explicitly blocks, however this is not recommended.
394  */
395 extern void smr_wait(smr_t smr, smr_seq_t goal);
396 
397 /*!
398  * @function smr_synchronize()
399  *
400  * @brief
401  * Synchronize advances the write sequence
402  * and returns when all readers have observed it.
403  *
404  * @discussion
405  * This is roughly equivalent to @c smr_wait(smr, smr_advance(smr))
406  *
407  * It is however better to cache a sequence number returned
408  * from @c smr_advance(), and poll or wait for it at a latter time,
409  * as there will be less chance of spinning while waiting for readers.
410  */
411 extern void smr_synchronize(smr_t);
412 
413 #pragma GCC visibility pop
414 #endif // XNU_KERNEL_PRIVATE
415 
416 __END_DECLS
417 
418 #endif /* _KERN_SMR_H_ */
419