xref: /xnu-11417.140.69/libkern/os/refcnt_internal.h (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 #ifndef _OS_REFCNT_INTERNAL_H
2 #define _OS_REFCNT_INTERNAL_H
3 
4 struct os_refcnt {
5 	os_ref_atomic_t ref_count;
6 #if OS_REFCNT_DEBUG
7 	struct os_refgrp *ref_group;
8 #endif
9 };
10 
11 #if OS_REFCNT_DEBUG
12 
13 __options_closed_decl(os_refgrp_flags_t, uint64_t, {
14 	OS_REFGRP_F_NONE           = 0x0,
15 	OS_REFGRP_F_ALWAYS_ENABLED = 0x1,
16 });
17 
18 struct os_refgrp {
19 	const char *grp_name;
20 	os_ref_atomic_t grp_children;  /* number of refcount objects in group */
21 	os_ref_atomic_t grp_count;     /* current reference count of group */
22 	_Atomic uint64_t grp_retain_total;
23 	_Atomic uint64_t grp_release_total;
24 	struct os_refgrp *grp_parent;
25 	void *grp_log;                 /* refcount logging context */
26 	uint64_t grp_flags;            /* Unused for now. */
27 };
28 
29 #endif
30 
31 # define OS_REF_ATOMIC_INITIALIZER 0
32 #if OS_REFCNT_DEBUG
33 # define OS_REF_INITIALIZER { .ref_count = OS_REF_ATOMIC_INITIALIZER, .ref_group = NULL }
34 #else
35 # define OS_REF_INITIALIZER { .ref_count = OS_REF_ATOMIC_INITIALIZER }
36 #endif
37 
38 __BEGIN_DECLS
39 
40 #if OS_REFCNT_DEBUG
41 # define os_ref_if_debug(x, y) x
42 #else
43 # define os_ref_if_debug(x, y) y
44 #endif
45 
46 void os_ref_init_count_external(os_ref_atomic_t *, struct os_refgrp *, os_ref_count_t);
47 void os_ref_retain_external(os_ref_atomic_t *, struct os_refgrp *);
48 void os_ref_retain_locked_external(os_ref_atomic_t *, struct os_refgrp *);
49 os_ref_count_t os_ref_release_external(os_ref_atomic_t *, struct os_refgrp *,
50     memory_order release_order, memory_order dealloc_order);
51 os_ref_count_t os_ref_release_relaxed_external(os_ref_atomic_t *, struct os_refgrp *);
52 os_ref_count_t os_ref_release_barrier_external(os_ref_atomic_t *, struct os_refgrp *);
53 os_ref_count_t os_ref_release_locked_external(os_ref_atomic_t *, struct os_refgrp *);
54 bool os_ref_retain_try_external(os_ref_atomic_t *, struct os_refgrp *);
55 
56 #if XNU_KERNEL_PRIVATE
57 void os_ref_init_count_internal(os_ref_atomic_t *, struct os_refgrp *, os_ref_count_t);
58 void os_ref_retain_internal(os_ref_atomic_t *, struct os_refgrp *);
59 void os_ref_retain_floor_internal(os_ref_atomic_t *, os_ref_count_t, struct os_refgrp *);
60 os_ref_count_t os_ref_release_relaxed_internal(os_ref_atomic_t *, struct os_refgrp *);
61 os_ref_count_t os_ref_release_barrier_internal(os_ref_atomic_t *, struct os_refgrp *);
62 os_ref_count_t os_ref_release_internal(os_ref_atomic_t *, struct os_refgrp *,
63     memory_order release_order, memory_order dealloc_order);
64 bool os_ref_retain_try_internal(os_ref_atomic_t *, struct os_refgrp *);
65 bool os_ref_retain_floor_try_internal(os_ref_atomic_t *, os_ref_count_t, struct os_refgrp *);
66 void os_ref_retain_locked_internal(os_ref_atomic_t *, struct os_refgrp *);
67 void os_ref_retain_floor_locked_internal(os_ref_atomic_t *, os_ref_count_t, struct os_refgrp *);
68 os_ref_count_t os_ref_release_locked_internal(os_ref_atomic_t *, struct os_refgrp *);
69 #else
70 /* For now, the internal and external variants are identical */
71 #define os_ref_init_count_internal      os_ref_init_count_external
72 #define os_ref_retain_internal          os_ref_retain_external
73 #define os_ref_retain_locked_internal   os_ref_retain_locked_external
74 #define os_ref_release_internal         os_ref_release_external
75 #define os_ref_release_barrier_internal os_ref_release_barrier_external
76 #define os_ref_release_relaxed_internal os_ref_release_relaxed_external
77 #define os_ref_release_locked_internal  os_ref_release_locked_external
78 #define os_ref_retain_try_internal      os_ref_retain_try_external
79 #endif
80 
81 static inline void
os_ref_init_count(struct os_refcnt * rc,struct os_refgrp * __unused grp,os_ref_count_t count)82 os_ref_init_count(struct os_refcnt *rc, struct os_refgrp * __unused grp, os_ref_count_t count)
83 {
84 #if OS_REFCNT_DEBUG
85 	rc->ref_group = grp;
86 #endif
87 	os_ref_init_count_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL), count);
88 }
89 
90 static inline void
os_ref_retain(struct os_refcnt * rc)91 os_ref_retain(struct os_refcnt *rc)
92 {
93 	os_ref_retain_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL));
94 }
95 
96 static inline os_ref_count_t
os_ref_release_locked(struct os_refcnt * rc)97 os_ref_release_locked(struct os_refcnt *rc)
98 {
99 	return os_ref_release_locked_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL));
100 }
101 
102 static inline void
os_ref_retain_locked(struct os_refcnt * rc)103 os_ref_retain_locked(struct os_refcnt *rc)
104 {
105 	os_ref_retain_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL));
106 }
107 
108 static inline bool
os_ref_retain_try(struct os_refcnt * rc)109 os_ref_retain_try(struct os_refcnt *rc)
110 {
111 	return os_ref_retain_try_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL));
112 }
113 
114 __deprecated_msg("inefficient codegen, prefer os_ref_release / os_ref_release_relaxed")
115 static inline os_ref_count_t OS_WARN_RESULT
os_ref_release_explicit(struct os_refcnt * rc,memory_order release_order,memory_order dealloc_order)116 os_ref_release_explicit(struct os_refcnt *rc, memory_order release_order, memory_order dealloc_order)
117 {
118 	return os_ref_release_internal(&rc->ref_count, os_ref_if_debug(rc->ref_group, NULL),
119 	           release_order, dealloc_order);
120 }
121 
122 #if OS_REFCNT_DEBUG
123 # define os_refgrp_initializer(name, parent, flags) \
124 	 { \
125 	        .grp_name =          (name), \
126 	        .grp_children =      (0u), \
127 	        .grp_count =         (0u), \
128 	        .grp_retain_total =  (0u), \
129 	        .grp_release_total = (0u), \
130 	        .grp_parent =        (parent), \
131 	        .grp_log =           NULL, \
132 	        .grp_flags =         flags, \
133 	}
134 
135 # define os_refgrp_decl_flags(qual, var, name, parent, flags) \
136 	qual struct os_refgrp __attribute__((section("__DATA,__refgrps"))) var =  \
137 	    os_refgrp_initializer(name, parent, flags)
138 
139 # define os_refgrp_decl(qual, var, name, parent) \
140 	os_refgrp_decl_flags(qual, var, name, parent, OS_REFGRP_F_NONE)
141 
142 # define os_refgrp_decl_extern(var) \
143 	extern struct os_refgrp var
144 
145 /* Create a default group based on the init() callsite if no explicit group
146  * is provided. */
147 # define os_ref_init_count(rc, grp, count) ({ \
148 	        os_refgrp_decl(static, __grp, __func__, NULL); \
149 	        (os_ref_init_count)((rc), (grp) ? (grp) : &__grp, (count)); \
150 	})
151 
152 #else /* OS_REFCNT_DEBUG */
153 
154 # define os_refgrp_decl(qual, var, name, parent) extern struct os_refgrp var __attribute__((unused))
155 # define os_refgrp_decl_extern(var) os_refgrp_decl(, var, ,)
156 # define os_ref_init_count(rc, grp, count) (os_ref_init_count)((rc), NULL, (count))
157 
158 #endif /* OS_REFCNT_DEBUG */
159 
160 #if XNU_KERNEL_PRIVATE
161 void os_ref_panic_live(void *rc) __abortlike;
162 #else
163 __abortlike
164 static inline void
os_ref_panic_live(void * rc)165 os_ref_panic_live(void *rc)
166 {
167 	panic("os_refcnt: unexpected release of final reference (rc=%p)\n", rc);
168 	__builtin_unreachable();
169 }
170 #endif
171 
172 #if XNU_KERNEL_PRIVATE
173 void os_ref_panic_last(void *rc) __abortlike;
174 #else
175 __abortlike
176 static inline void
os_ref_panic_last(void * rc)177 os_ref_panic_last(void *rc)
178 {
179 	panic("os_refcnt: expected release of final reference but rc %p!=0\n", rc);
180 	__builtin_unreachable();
181 }
182 #endif
183 
184 static inline os_ref_count_t OS_WARN_RESULT
os_ref_release(struct os_refcnt * rc)185 os_ref_release(struct os_refcnt *rc)
186 {
187 	return os_ref_release_barrier_internal(&rc->ref_count,
188 	           os_ref_if_debug(rc->ref_group, NULL));
189 }
190 
191 static inline void
os_ref_release_last(struct os_refcnt * rc)192 os_ref_release_last(struct os_refcnt *rc)
193 {
194 	if (__improbable(os_ref_release(rc) != 0)) {
195 		os_ref_panic_last(rc);
196 	}
197 }
198 
199 static inline os_ref_count_t OS_WARN_RESULT
os_ref_release_relaxed(struct os_refcnt * rc)200 os_ref_release_relaxed(struct os_refcnt *rc)
201 {
202 	return os_ref_release_relaxed_internal(&rc->ref_count,
203 	           os_ref_if_debug(rc->ref_group, NULL));
204 }
205 
206 static inline void
os_ref_release_live(struct os_refcnt * rc)207 os_ref_release_live(struct os_refcnt *rc)
208 {
209 	if (__improbable(os_ref_release(rc) == 0)) {
210 		os_ref_panic_live(rc);
211 	}
212 }
213 
214 static inline os_ref_count_t
os_ref_get_count_internal(os_ref_atomic_t * rc)215 os_ref_get_count_internal(os_ref_atomic_t *rc)
216 {
217 	return atomic_load_explicit(rc, memory_order_relaxed);
218 }
219 
220 static inline os_ref_count_t
os_ref_get_count(struct os_refcnt * rc)221 os_ref_get_count(struct os_refcnt *rc)
222 {
223 	return os_ref_get_count_internal(&rc->ref_count);
224 }
225 
226 #if !OS_REFCNT_DEBUG
227 #define os_pcpu_ref_init(ref, grp)              (os_pcpu_ref_init)(ref, NULL)
228 #define os_pcpu_ref_destroy(ref, grp)           (os_pcpu_ref_destroy)(ref, NULL)
229 #define os_pcpu_ref_kill(ref, grp)              (os_pcpu_ref_kill)(ref, NULL)
230 #define os_pcpu_ref_retain(ref, grp)            (os_pcpu_ref_retain)(ref, NULL)
231 #define os_pcpu_ref_retain_try(ref, grp)        (os_pcpu_ref_retain_try)(ref, NULL)
232 #define os_pcpu_ref_release(ref, grp)           (os_pcpu_ref_release)(ref, NULL)
233 #define os_pcpu_ref_release_live(ref, grp)      (os_pcpu_ref_release_live)(ref, NULL)
234 #endif
235 
236 #if XNU_KERNEL_PRIVATE
237 #pragma GCC visibility push(hidden)
238 
239 /*
240  * Raw API
241  */
242 
243 static inline void
os_ref_init_count_raw(os_ref_atomic_t * rc,struct os_refgrp * grp,os_ref_count_t count)244 os_ref_init_count_raw(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t count)
245 {
246 	os_ref_init_count_internal(rc, grp, count);
247 }
248 
249 static inline void
os_ref_retain_floor(struct os_refcnt * rc,os_ref_count_t f)250 os_ref_retain_floor(struct os_refcnt *rc, os_ref_count_t f)
251 {
252 	os_ref_retain_floor_internal(&rc->ref_count, f, os_ref_if_debug(rc->ref_group, NULL));
253 }
254 
255 static inline void
os_ref_retain_raw(os_ref_atomic_t * rc,struct os_refgrp * grp)256 os_ref_retain_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
257 {
258 	os_ref_retain_internal(rc, grp);
259 }
260 
261 static inline void
os_ref_retain_floor_raw(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * grp)262 os_ref_retain_floor_raw(os_ref_atomic_t *rc, os_ref_count_t f, struct os_refgrp *grp)
263 {
264 	os_ref_retain_floor_internal(rc, f, grp);
265 }
266 
267 static inline os_ref_count_t
os_ref_release_raw(os_ref_atomic_t * rc,struct os_refgrp * grp)268 os_ref_release_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
269 {
270 	return os_ref_release_barrier_internal(rc, grp);
271 }
272 
273 static inline os_ref_count_t
os_ref_release_raw_relaxed(os_ref_atomic_t * rc,struct os_refgrp * grp)274 os_ref_release_raw_relaxed(os_ref_atomic_t *rc, struct os_refgrp *grp)
275 {
276 	return os_ref_release_relaxed_internal(rc, grp);
277 }
278 
279 static inline void
os_ref_release_live_raw(os_ref_atomic_t * rc,struct os_refgrp * grp)280 os_ref_release_live_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
281 {
282 	if (__improbable(os_ref_release_barrier_internal(rc, grp) == 0)) {
283 		os_ref_panic_live(rc);
284 	}
285 }
286 
287 static inline bool
os_ref_retain_try_raw(os_ref_atomic_t * rc,struct os_refgrp * grp)288 os_ref_retain_try_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
289 {
290 	return os_ref_retain_try_internal(rc, grp);
291 }
292 
293 static inline bool
os_ref_retain_floor_try_raw(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * grp)294 os_ref_retain_floor_try_raw(os_ref_atomic_t *rc, os_ref_count_t f,
295     struct os_refgrp *grp)
296 {
297 	return os_ref_retain_floor_try_internal(rc, f, grp);
298 }
299 
300 static inline void
os_ref_retain_locked_raw(os_ref_atomic_t * rc,struct os_refgrp * grp)301 os_ref_retain_locked_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
302 {
303 	os_ref_retain_locked_internal(rc, grp);
304 }
305 
306 static inline void
os_ref_retain_floor_locked_raw(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * grp)307 os_ref_retain_floor_locked_raw(os_ref_atomic_t *rc, os_ref_count_t f,
308     struct os_refgrp *grp)
309 {
310 	os_ref_retain_floor_locked_internal(rc, f, grp);
311 }
312 
313 static inline os_ref_count_t
os_ref_release_locked_raw(os_ref_atomic_t * rc,struct os_refgrp * grp)314 os_ref_release_locked_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
315 {
316 	return os_ref_release_locked_internal(rc, grp);
317 }
318 
319 static inline void
os_ref_release_live_locked_raw(os_ref_atomic_t * rc,struct os_refgrp * grp)320 os_ref_release_live_locked_raw(os_ref_atomic_t *rc, struct os_refgrp *grp)
321 {
322 	if (__improbable(os_ref_release_locked_internal(rc, grp) == 0)) {
323 		os_ref_panic_live(rc);
324 	}
325 }
326 
327 static inline os_ref_count_t
os_ref_get_count_raw(os_ref_atomic_t * rc)328 os_ref_get_count_raw(os_ref_atomic_t *rc)
329 {
330 	return os_ref_get_count_internal(rc);
331 }
332 
333 #if !OS_REFCNT_DEBUG
334 /* remove the group argument for non-debug */
335 #define os_ref_init_count_raw(rc, grp, count) (os_ref_init_count_raw)((rc), NULL, (count))
336 #define os_ref_retain_raw(rc, grp) (os_ref_retain_raw)((rc), NULL)
337 #define os_ref_retain_floor_raw(rc, f, grp) (os_ref_retain_floor_raw)((rc), f, NULL)
338 #define os_ref_release_raw(rc, grp) (os_ref_release_raw)((rc), NULL)
339 #define os_ref_release_raw_relaxed(rc, grp) (os_ref_release_raw_relaxed)((rc), NULL)
340 #define os_ref_release_live_raw(rc, grp) (os_ref_release_live_raw)((rc), NULL)
341 #define os_ref_retain_try_raw(rc, grp) (os_ref_retain_try_raw)((rc), NULL)
342 #define os_ref_retain_floor_try_raw(rc, f, grp) (os_ref_retain_floor_try_raw)((rc), f, NULL)
343 #define os_ref_retain_locked_raw(rc, grp) (os_ref_retain_locked_raw)((rc), NULL)
344 #define os_ref_retain_floor_locked_raw(rc, f, grp) (os_ref_retain_floor_locked_raw)((rc), f, NULL)
345 #define os_ref_release_locked_raw(rc, grp) (os_ref_release_locked_raw)((rc), NULL)
346 #define os_ref_release_live_locked_raw(rc, grp) (os_ref_release_live_locked_raw)((rc), NULL)
347 #endif
348 
349 extern void
350 os_ref_log_fini(struct os_refgrp *grp);
351 
352 extern void
353 os_ref_log_init(struct os_refgrp *grp);
354 
355 extern void
356 os_ref_retain_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp);
357 extern void
358 os_ref_retain_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp);
359 extern uint32_t
360 os_ref_retain_try_mask_internal(os_ref_atomic_t *, uint32_t n,
361     uint32_t reject_mask, struct os_refgrp *grp) OS_WARN_RESULT;
362 extern bool
363 os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t *, uint32_t n,
364     uint32_t reject_mask, struct os_refgrp *grp) OS_WARN_RESULT;
365 
366 extern uint32_t
367 os_ref_release_barrier_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp);
368 extern uint32_t
369 os_ref_release_relaxed_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp);
370 
371 static inline uint32_t
os_ref_get_raw_mask(os_ref_atomic_t * rc)372 os_ref_get_raw_mask(os_ref_atomic_t *rc)
373 {
374 	return os_ref_get_count_internal(rc);
375 }
376 
377 static inline uint32_t
os_ref_get_bits_mask(os_ref_atomic_t * rc,uint32_t b)378 os_ref_get_bits_mask(os_ref_atomic_t *rc, uint32_t b)
379 {
380 	return os_ref_get_raw_mask(rc) & ((1u << b) - 1);
381 }
382 
383 static inline os_ref_count_t
os_ref_get_count_mask(os_ref_atomic_t * rc,uint32_t b)384 os_ref_get_count_mask(os_ref_atomic_t *rc, uint32_t b)
385 {
386 	return os_ref_get_raw_mask(rc) >> b;
387 }
388 
389 static inline void
os_ref_retain_mask(os_ref_atomic_t * rc,uint32_t b,struct os_refgrp * grp)390 os_ref_retain_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
391 {
392 	os_ref_retain_mask_internal(rc, 1u << b, grp);
393 }
394 
395 static inline void
os_ref_retain_acquire_mask(os_ref_atomic_t * rc,uint32_t b,struct os_refgrp * grp)396 os_ref_retain_acquire_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
397 {
398 	os_ref_retain_acquire_mask_internal(rc, 1u << b, grp);
399 }
400 
401 static inline uint32_t
os_ref_retain_try_mask(os_ref_atomic_t * rc,uint32_t b,uint32_t reject_mask,struct os_refgrp * grp)402 os_ref_retain_try_mask(os_ref_atomic_t *rc, uint32_t b,
403     uint32_t reject_mask, struct os_refgrp *grp)
404 {
405 	return os_ref_retain_try_mask_internal(rc, 1u << b, reject_mask, grp);
406 }
407 
408 static inline bool
os_ref_retain_try_acquire_mask(os_ref_atomic_t * rc,uint32_t b,uint32_t reject_mask,struct os_refgrp * grp)409 os_ref_retain_try_acquire_mask(os_ref_atomic_t *rc, uint32_t b,
410     uint32_t reject_mask, struct os_refgrp *grp)
411 {
412 	return os_ref_retain_try_acquire_mask_internal(rc, 1u << b, reject_mask, grp);
413 }
414 
415 static inline uint32_t
os_ref_release_raw_mask(os_ref_atomic_t * rc,uint32_t b,struct os_refgrp * grp)416 os_ref_release_raw_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
417 {
418 	return os_ref_release_barrier_mask_internal(rc, 1u << b, grp);
419 }
420 
421 static inline uint32_t
os_ref_release_raw_relaxed_mask(os_ref_atomic_t * rc,uint32_t b,struct os_refgrp * grp)422 os_ref_release_raw_relaxed_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
423 {
424 	return os_ref_release_relaxed_mask_internal(rc, 1u << b, grp);
425 }
426 
427 static inline os_ref_count_t
os_ref_release_mask(os_ref_atomic_t * rc,uint32_t b,struct os_refgrp * grp)428 os_ref_release_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
429 {
430 	return os_ref_release_barrier_mask_internal(rc, 1u << b, grp) >> b;
431 }
432 
433 static inline os_ref_count_t
os_ref_release_relaxed_mask(os_ref_atomic_t * rc,uint32_t b,struct os_refgrp * grp)434 os_ref_release_relaxed_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
435 {
436 	return os_ref_release_relaxed_mask_internal(rc, 1u << b, grp) >> b;
437 }
438 
439 static inline uint32_t
os_ref_release_live_raw_mask(os_ref_atomic_t * rc,uint32_t b,struct os_refgrp * grp)440 os_ref_release_live_raw_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
441 {
442 	uint32_t val = os_ref_release_barrier_mask_internal(rc, 1u << b, grp);
443 	if (__improbable(val < 1u << b)) {
444 		os_ref_panic_live(rc);
445 	}
446 	return val;
447 }
448 
449 static inline void
os_ref_release_live_mask(os_ref_atomic_t * rc,uint32_t b,struct os_refgrp * grp)450 os_ref_release_live_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp)
451 {
452 	os_ref_release_live_raw_mask(rc, b, grp);
453 }
454 
455 #if !OS_REFCNT_DEBUG
456 /* remove the group argument for non-debug */
457 #define os_ref_init_count_mask(rc, b, grp, init_c, init_b) (os_ref_init_count_mask)(rc, b, NULL, init_c, init_b)
458 #define os_ref_retain_mask(rc, b, grp) (os_ref_retain_mask)((rc), (b), NULL)
459 #define os_ref_retain_acquire_mask(rc, b, grp) (os_ref_retain_acquire_mask)((rc), (b), NULL)
460 #define os_ref_retain_try_mask(rc, b, m, grp) (os_ref_retain_try_mask)((rc), (b), (m), NULL)
461 #define os_ref_retain_try_acquire_mask(rc, b, grp) (os_ref_retain_try_acquire_mask)((rc), (b), NULL)
462 #define os_ref_release_mask(rc, b, grp) (os_ref_release_mask)((rc), (b), NULL)
463 #define os_ref_release_relaxed_mask(rc, b, grp) (os_ref_release_relaxed_mask)((rc), (b), NULL)
464 #define os_ref_release_raw_mask(rc, b, grp) (os_ref_release_raw_mask)((rc), (b), NULL)
465 #define os_ref_release_relaxed_raw_mask(rc, b, grp) (os_ref_release_relaxed_raw_mask)((rc), (b), NULL)
466 #define os_ref_release_live_raw_mask(rc, b, grp) (os_ref_release_live_raw_mask)((rc), (b), NULL)
467 #define os_ref_release_live_mask(rc, b, grp) (os_ref_release_live_mask)((rc), (b), NULL)
468 #endif
469 
470 #pragma GCC visibility pop
471 #endif
472 
473 __END_DECLS
474 
475 #endif /* _OS_REFCNT_INTERNAL_H */
476