xref: /xnu-8020.101.4/libkern/os/refcnt.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 #if KERNEL
2 #include <kern/assert.h>
3 #include <kern/debug.h>
4 #include <pexpert/pexpert.h>
5 #include <kern/btlog.h>
6 #include <kern/backtrace.h>
7 #include <libkern/libkern.h>
8 #endif
9 #include <os/atomic_private.h>
10 
11 #include "refcnt.h"
12 
13 #define OS_REFCNT_MAX_COUNT     ((os_ref_count_t)0x0FFFFFFFUL)
14 
15 #if OS_REFCNT_DEBUG
16 extern struct os_refgrp global_ref_group;
17 os_refgrp_decl(, global_ref_group, "all", NULL);
18 
19 extern bool ref_debug_enable;
20 bool ref_debug_enable = false;
21 
22 /*
23  * XXX
24  * A hack to allow task refgrps to be implicitly enabled. Once all upstream has
25  * been recompiled grp_flags can be used to indicate whether or not a group is
26  * enabled by default.
27  */
28 #define REFLOG_GRP_DEBUG_ENABLED(grp) \
29     __improbable(grp != NULL && (ref_debug_enable || \
30 	(grp->grp_name != NULL && strncmp("task", grp->grp_name, 4) == 0)))
31 
32 static const size_t ref_log_nrecords = 1000000;
33 
34 __enum_closed_decl(reflog_op_t, uint8_t, {
35 	REFLOG_RETAIN  = 1,
36 	REFLOG_RELEASE = 2
37 });
38 
39 #define __debug_only
40 #else
41 # define __debug_only __unused
42 #endif /* OS_REFCNT_DEBUG */
43 
44 void
os_ref_panic_live(void * rc)45 os_ref_panic_live(void *rc)
46 {
47 	panic("os_refcnt: unexpected release of final reference (rc=%p)", rc);
48 	__builtin_unreachable();
49 }
50 
51 __abortlike
52 static void
os_ref_panic_underflow(void * rc)53 os_ref_panic_underflow(void *rc)
54 {
55 	panic("os_refcnt: underflow (rc=%p)", rc);
56 	__builtin_unreachable();
57 }
58 
59 __abortlike
60 static void
os_ref_panic_resurrection(void * rc)61 os_ref_panic_resurrection(void *rc)
62 {
63 	panic("os_refcnt: attempted resurrection (rc=%p)", rc);
64 	__builtin_unreachable();
65 }
66 
67 __abortlike
68 static void
os_ref_panic_overflow(void * rc)69 os_ref_panic_overflow(void *rc)
70 {
71 	panic("os_refcnt: overflow (rc=%p)", rc);
72 	__builtin_unreachable();
73 }
74 
75 static inline void
os_ref_check_underflow(void * rc,os_ref_count_t count,os_ref_count_t n)76 os_ref_check_underflow(void *rc, os_ref_count_t count, os_ref_count_t n)
77 {
78 	if (__improbable(count < n)) {
79 		os_ref_panic_underflow(rc);
80 	}
81 }
82 
83 static inline void
os_ref_check_overflow(os_ref_atomic_t * rc,os_ref_count_t count)84 os_ref_check_overflow(os_ref_atomic_t *rc, os_ref_count_t count)
85 {
86 	if (__improbable(count >= OS_REFCNT_MAX_COUNT)) {
87 		os_ref_panic_overflow(rc);
88 	}
89 }
90 
91 static inline void
os_ref_check_retain(os_ref_atomic_t * rc,os_ref_count_t count,os_ref_count_t n)92 os_ref_check_retain(os_ref_atomic_t *rc, os_ref_count_t count, os_ref_count_t n)
93 {
94 	if (__improbable(count < n)) {
95 		os_ref_panic_resurrection(rc);
96 	}
97 	os_ref_check_overflow(rc, count);
98 }
99 
100 #if OS_REFCNT_DEBUG
101 #if KERNEL
102 __attribute__((cold, noinline))
103 static void
ref_log_op(struct os_refgrp * grp,void * elem,reflog_op_t op)104 ref_log_op(struct os_refgrp *grp, void *elem, reflog_op_t op)
105 {
106 	if (grp == NULL) {
107 		return;
108 	}
109 
110 	if (grp->grp_log == NULL) {
111 		ref_log_op(grp->grp_parent, elem, op);
112 		return;
113 	}
114 
115 	btlog_record((btlog_t)grp->grp_log, elem, op,
116 	    btref_get(__builtin_frame_address(0), BTREF_GET_NOWAIT));
117 }
118 
119 __attribute__((cold, noinline))
120 static void
ref_log_drop(struct os_refgrp * grp,void * elem)121 ref_log_drop(struct os_refgrp *grp, void *elem)
122 {
123 	if (!REFLOG_GRP_DEBUG_ENABLED(grp)) {
124 		return;
125 	}
126 
127 	if (grp->grp_log == NULL) {
128 		ref_log_drop(grp->grp_parent, elem);
129 		return;
130 	}
131 
132 	btlog_erase(grp->grp_log, elem);
133 }
134 
135 __attribute__((cold, noinline))
136 void
os_ref_log_init(struct os_refgrp * grp)137 os_ref_log_init(struct os_refgrp *grp)
138 {
139 	if (grp->grp_log != NULL) {
140 		return;
141 	}
142 
143 	char grpbuf[128];
144 	char *refgrp = grpbuf;
145 	if (!PE_parse_boot_argn("rlog", refgrp, sizeof(grpbuf))) {
146 		return;
147 	}
148 
149 	/*
150 	 * Enable refcount statistics if the rlog boot-arg is present,
151 	 * even when no specific group is logged.
152 	 */
153 	ref_debug_enable = true;
154 
155 	const char *g;
156 	while ((g = strsep(&refgrp, ",")) != NULL) {
157 		if (strcmp(g, grp->grp_name) == 0) {
158 			/* enable logging on this refgrp */
159 			grp->grp_log = btlog_create(BTLOG_HASH,
160 			    ref_log_nrecords, 0);
161 			return;
162 		}
163 	}
164 }
165 
166 
167 __attribute__((cold, noinline))
168 void
os_ref_log_fini(struct os_refgrp * grp)169 os_ref_log_fini(struct os_refgrp *grp)
170 {
171 	if (grp->grp_log == NULL) {
172 		return;
173 	}
174 
175 	btlog_destroy(grp->grp_log);
176 	grp->grp_log = NULL;
177 }
178 
179 #else
180 
181 #ifndef os_ref_log_fini
182 inline void
os_ref_log_fini(struct os_refgrp * grp __unused)183 os_ref_log_fini(struct os_refgrp *grp __unused)
184 {
185 }
186 #endif
187 
188 #ifndef os_ref_log_init
189 inline void
os_ref_log_init(struct os_refgrp * grp __unused)190 os_ref_log_init(struct os_refgrp *grp __unused)
191 {
192 }
193 #endif
194 #ifndef ref_log_op
195 static inline void
ref_log_op(struct os_refgrp * grp __unused,void * rc __unused,reflog_op_t op __unused)196 ref_log_op(struct os_refgrp *grp __unused, void *rc __unused, reflog_op_t op __unused)
197 {
198 }
199 #endif
200 #ifndef ref_log_drop
201 static inline void
ref_log_drop(struct os_refgrp * grp __unused,void * rc __unused)202 ref_log_drop(struct os_refgrp *grp __unused, void *rc __unused)
203 {
204 }
205 #endif
206 
207 #endif /* KERNEL */
208 
209 /*
210  * attach a new refcnt to a group
211  */
212 __attribute__((cold, noinline))
213 static void
ref_attach_to_group(os_ref_atomic_t * rc,struct os_refgrp * grp,os_ref_count_t init_count)214 ref_attach_to_group(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t init_count)
215 {
216 	if (grp == NULL) {
217 		return;
218 	}
219 
220 	if (atomic_fetch_add_explicit(&grp->grp_children, 1, memory_order_relaxed) == 0) {
221 		/* First reference count object in this group. Check if we should enable
222 		 * refcount logging. */
223 		os_ref_log_init(grp);
224 	}
225 
226 	atomic_fetch_add_explicit(&grp->grp_count, init_count, memory_order_relaxed);
227 	atomic_fetch_add_explicit(&grp->grp_retain_total, init_count, memory_order_relaxed);
228 
229 	if (grp == &global_ref_group) {
230 		return;
231 	}
232 
233 	if (grp->grp_parent == NULL) {
234 		grp->grp_parent = &global_ref_group;
235 	}
236 
237 	ref_attach_to_group(rc, grp->grp_parent, init_count);
238 }
239 
240 static void
ref_retain_group(struct os_refgrp * grp)241 ref_retain_group(struct os_refgrp *grp)
242 {
243 	if (grp) {
244 		atomic_fetch_add_explicit(&grp->grp_count, 1, memory_order_relaxed);
245 		atomic_fetch_add_explicit(&grp->grp_retain_total, 1, memory_order_relaxed);
246 		ref_retain_group(grp->grp_parent);
247 	}
248 }
249 
250 __attribute__((cold, noinline))
251 static void
ref_release_group(struct os_refgrp * grp)252 ref_release_group(struct os_refgrp *grp)
253 {
254 	if (grp) {
255 		atomic_fetch_sub_explicit(&grp->grp_count, 1, memory_order_relaxed);
256 		atomic_fetch_add_explicit(&grp->grp_release_total, 1, memory_order_relaxed);
257 
258 		ref_release_group(grp->grp_parent);
259 	}
260 }
261 
262 __attribute__((cold, noinline))
263 static void
ref_drop_group(struct os_refgrp * grp)264 ref_drop_group(struct os_refgrp *grp)
265 {
266 	if (grp) {
267 		atomic_fetch_sub_explicit(&grp->grp_children, 1, memory_order_relaxed);
268 		ref_drop_group(grp->grp_parent);
269 	}
270 }
271 
272 __attribute__((cold, noinline))
273 static void
ref_init_debug(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp,os_ref_count_t count)274 ref_init_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count)
275 {
276 	ref_attach_to_group(rc, grp, count);
277 
278 	for (os_ref_count_t i = 0; i < count; i++) {
279 		ref_log_op(grp, (void *)rc, REFLOG_RETAIN);
280 	}
281 }
282 
283 __attribute__((cold, noinline))
284 static void
ref_retain_debug(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp)285 ref_retain_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
286 {
287 	ref_retain_group(grp);
288 	ref_log_op(grp, (void *)rc, REFLOG_RETAIN);
289 }
290 #endif
291 
292 void
os_ref_init_count_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp,os_ref_count_t count)293 os_ref_init_count_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count)
294 {
295 	os_ref_check_underflow(rc, count, 1);
296 	atomic_init(rc, count);
297 
298 #if OS_REFCNT_DEBUG
299 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
300 		ref_init_debug(rc, grp, count);
301 	}
302 #endif
303 }
304 
305 static inline void
__os_ref_retain(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * __debug_only grp)306 __os_ref_retain(os_ref_atomic_t *rc, os_ref_count_t f,
307     struct os_refgrp * __debug_only grp)
308 {
309 	os_ref_count_t old = atomic_fetch_add_explicit(rc, 1, memory_order_relaxed);
310 	os_ref_check_retain(rc, old, f);
311 
312 #if OS_REFCNT_DEBUG
313 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
314 		ref_retain_debug(rc, grp);
315 	}
316 #endif
317 }
318 
319 void
os_ref_retain_internal(os_ref_atomic_t * rc,struct os_refgrp * grp)320 os_ref_retain_internal(os_ref_atomic_t *rc, struct os_refgrp *grp)
321 {
322 	__os_ref_retain(rc, 1, grp);
323 }
324 
325 void
os_ref_retain_floor_internal(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * grp)326 os_ref_retain_floor_internal(os_ref_atomic_t *rc, os_ref_count_t f,
327     struct os_refgrp *grp)
328 {
329 	__os_ref_retain(rc, f, grp);
330 }
331 
332 static inline bool
__os_ref_retain_try(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * __debug_only grp)333 __os_ref_retain_try(os_ref_atomic_t *rc, os_ref_count_t f,
334     struct os_refgrp * __debug_only grp)
335 {
336 	os_ref_count_t cur, next;
337 
338 	os_atomic_rmw_loop(rc, cur, next, relaxed, {
339 		if (__improbable(cur < f)) {
340 		        os_atomic_rmw_loop_give_up(return false);
341 		}
342 
343 		next = cur + 1;
344 	});
345 
346 	os_ref_check_overflow(rc, cur);
347 
348 #if OS_REFCNT_DEBUG
349 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
350 		ref_retain_debug(rc, grp);
351 	}
352 #endif
353 
354 	return true;
355 }
356 
357 bool
os_ref_retain_try_internal(os_ref_atomic_t * rc,struct os_refgrp * grp)358 os_ref_retain_try_internal(os_ref_atomic_t *rc, struct os_refgrp *grp)
359 {
360 	return __os_ref_retain_try(rc, 1, grp);
361 }
362 
363 bool
os_ref_retain_floor_try_internal(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * grp)364 os_ref_retain_floor_try_internal(os_ref_atomic_t *rc, os_ref_count_t f,
365     struct os_refgrp *grp)
366 {
367 	return __os_ref_retain_try(rc, f, grp);
368 }
369 
370 __attribute__((always_inline))
371 static inline os_ref_count_t
_os_ref_release_inline(os_ref_atomic_t * rc,os_ref_count_t n,struct os_refgrp * __debug_only grp,memory_order release_order,memory_order dealloc_order)372 _os_ref_release_inline(os_ref_atomic_t *rc, os_ref_count_t n,
373     struct os_refgrp * __debug_only grp,
374     memory_order release_order, memory_order dealloc_order)
375 {
376 	os_ref_count_t val;
377 
378 #if OS_REFCNT_DEBUG
379 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
380 		/*
381 		 * Care not to use 'rc' after the decrement because it might be deallocated
382 		 * under us.
383 		 */
384 		ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
385 		ref_release_group(grp);
386 	}
387 #endif
388 
389 	val = atomic_fetch_sub_explicit(rc, n, release_order);
390 	os_ref_check_underflow(rc, val, n);
391 	val -= n;
392 	if (__improbable(val < n)) {
393 		atomic_load_explicit(rc, dealloc_order);
394 	}
395 
396 #if OS_REFCNT_DEBUG
397 	/*
398 	 * The only way to safely access the ref count or group after
399 	 * decrementing the count is when the count is zero (as the caller won't
400 	 * see the zero until the function returns).
401 	 */
402 	if (val == 0 && (REFLOG_GRP_DEBUG_ENABLED(grp))) {
403 		ref_drop_group(grp);
404 		ref_log_drop(grp, (void *)rc); /* rc is only used as an identifier */
405 	}
406 #endif
407 
408 	return val;
409 }
410 
411 #if OS_REFCNT_DEBUG
412 __attribute__((noinline))
413 static os_ref_count_t
os_ref_release_n_internal(os_ref_atomic_t * rc,os_ref_count_t n,struct os_refgrp * __debug_only grp,memory_order release_order,memory_order dealloc_order)414 os_ref_release_n_internal(os_ref_atomic_t *rc, os_ref_count_t n,
415     struct os_refgrp * __debug_only grp,
416     memory_order release_order, memory_order dealloc_order)
417 {
418 	// Legacy exported interface with bad codegen due to the barriers
419 	// not being immediate
420 	//
421 	// Also serves as the debug function
422 	return _os_ref_release_inline(rc, n, grp, release_order, dealloc_order);
423 }
424 #endif
425 
426 __attribute__((noinline))
427 os_ref_count_t
os_ref_release_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp,memory_order release_order,memory_order dealloc_order)428 os_ref_release_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp,
429     memory_order release_order, memory_order dealloc_order)
430 {
431 	// Legacy exported interface with bad codegen due to the barriers
432 	// not being immediate
433 	//
434 	// Also serves as the debug function
435 	return _os_ref_release_inline(rc, 1, grp, release_order, dealloc_order);
436 }
437 
438 os_ref_count_t
os_ref_release_barrier_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp)439 os_ref_release_barrier_internal(os_ref_atomic_t *rc,
440     struct os_refgrp * __debug_only grp)
441 {
442 #if OS_REFCNT_DEBUG
443 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
444 		return os_ref_release_internal(rc, grp,
445 		           memory_order_release, memory_order_acquire);
446 	}
447 #endif
448 	return _os_ref_release_inline(rc, 1, NULL,
449 	           memory_order_release, memory_order_acquire);
450 }
451 
452 os_ref_count_t
os_ref_release_relaxed_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp)453 os_ref_release_relaxed_internal(os_ref_atomic_t *rc,
454     struct os_refgrp * __debug_only grp)
455 {
456 #if OS_REFCNT_DEBUG
457 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
458 		return os_ref_release_internal(rc, grp,
459 		           memory_order_relaxed, memory_order_relaxed);
460 	}
461 #endif
462 	return _os_ref_release_inline(rc, 1, NULL,
463 	           memory_order_relaxed, memory_order_relaxed);
464 }
465 
466 static inline void
__os_ref_retain_locked(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * __debug_only grp)467 __os_ref_retain_locked(os_ref_atomic_t *rc, os_ref_count_t f,
468     struct os_refgrp * __debug_only grp)
469 {
470 	os_ref_count_t val = os_ref_get_count_internal(rc);
471 	os_ref_check_retain(rc, val, f);
472 	atomic_store_explicit(rc, ++val, memory_order_relaxed);
473 
474 #if OS_REFCNT_DEBUG
475 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
476 		ref_retain_debug(rc, grp);
477 	}
478 #endif
479 }
480 
481 void
os_ref_retain_locked_internal(os_ref_atomic_t * rc,struct os_refgrp * grp)482 os_ref_retain_locked_internal(os_ref_atomic_t *rc, struct os_refgrp *grp)
483 {
484 	__os_ref_retain_locked(rc, 1, grp);
485 }
486 
487 void
os_ref_retain_floor_locked_internal(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * grp)488 os_ref_retain_floor_locked_internal(os_ref_atomic_t *rc, os_ref_count_t f,
489     struct os_refgrp *grp)
490 {
491 	__os_ref_retain_locked(rc, f, grp);
492 }
493 
494 os_ref_count_t
os_ref_release_locked_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp)495 os_ref_release_locked_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
496 {
497 #if OS_REFCNT_DEBUG
498 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
499 		ref_release_group(grp);
500 		ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
501 	}
502 #endif
503 
504 	os_ref_count_t val = os_ref_get_count_internal(rc);
505 	os_ref_check_underflow(rc, val, 1);
506 	atomic_store_explicit(rc, --val, memory_order_relaxed);
507 
508 #if OS_REFCNT_DEBUG
509 	if (val == 0 && (REFLOG_GRP_DEBUG_ENABLED(grp))) {
510 		ref_drop_group(grp);
511 		ref_log_drop(grp, (void *)rc);
512 	}
513 #endif
514 
515 	return val;
516 }
517 
518 /*
519  * Bitwise API
520  */
521 
522 #undef os_ref_init_count_mask
523 void
os_ref_init_count_mask(os_ref_atomic_t * rc,uint32_t b,struct os_refgrp * __debug_only grp,os_ref_count_t init_count,uint32_t init_bits)524 os_ref_init_count_mask(os_ref_atomic_t *rc, uint32_t b,
525     struct os_refgrp *__debug_only grp,
526     os_ref_count_t init_count, uint32_t init_bits)
527 {
528 	assert(init_bits < (1U << b));
529 	atomic_init(rc, (init_count << b) | init_bits);
530 	os_ref_check_underflow(rc, (init_count << b), 1u << b);
531 
532 #if OS_REFCNT_DEBUG
533 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
534 		ref_init_debug(rc, grp, init_count);
535 	}
536 #endif
537 }
538 
539 __attribute__((always_inline))
540 static inline void
os_ref_retain_mask_inline(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp,memory_order mo)541 os_ref_retain_mask_inline(os_ref_atomic_t *rc, uint32_t n,
542     struct os_refgrp *__debug_only grp, memory_order mo)
543 {
544 	os_ref_count_t old = atomic_fetch_add_explicit(rc, n, mo);
545 	os_ref_check_retain(rc, old, n);
546 
547 #if OS_REFCNT_DEBUG
548 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
549 		ref_retain_debug(rc, grp);
550 	}
551 #endif
552 }
553 
554 void
os_ref_retain_mask_internal(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp)555 os_ref_retain_mask_internal(os_ref_atomic_t *rc, uint32_t n,
556     struct os_refgrp *__debug_only grp)
557 {
558 	os_ref_retain_mask_inline(rc, n, grp, memory_order_relaxed);
559 }
560 
561 void
os_ref_retain_acquire_mask_internal(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp)562 os_ref_retain_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n,
563     struct os_refgrp *__debug_only grp)
564 {
565 	os_ref_retain_mask_inline(rc, n, grp, memory_order_acquire);
566 }
567 
568 uint32_t
os_ref_release_barrier_mask_internal(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp)569 os_ref_release_barrier_mask_internal(os_ref_atomic_t *rc, uint32_t n,
570     struct os_refgrp *__debug_only grp)
571 {
572 #if OS_REFCNT_DEBUG
573 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
574 		return os_ref_release_n_internal(rc, n, grp,
575 		           memory_order_release, memory_order_acquire);
576 	}
577 #endif
578 
579 	return _os_ref_release_inline(rc, n, NULL,
580 	           memory_order_release, memory_order_acquire);
581 }
582 
583 uint32_t
os_ref_release_relaxed_mask_internal(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp)584 os_ref_release_relaxed_mask_internal(os_ref_atomic_t *rc, uint32_t n,
585     struct os_refgrp *__debug_only grp)
586 {
587 #if OS_REFCNT_DEBUG
588 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
589 		return os_ref_release_n_internal(rc, n, grp,
590 		           memory_order_relaxed, memory_order_relaxed);
591 	}
592 #endif
593 
594 	return _os_ref_release_inline(rc, n, NULL,
595 	           memory_order_relaxed, memory_order_relaxed);
596 }
597 
598 uint32_t
os_ref_retain_try_mask_internal(os_ref_atomic_t * rc,uint32_t n,uint32_t reject_mask,struct os_refgrp * __debug_only grp)599 os_ref_retain_try_mask_internal(os_ref_atomic_t *rc, uint32_t n,
600     uint32_t reject_mask, struct os_refgrp *__debug_only grp)
601 {
602 	os_ref_count_t cur, next;
603 
604 	os_atomic_rmw_loop(rc, cur, next, relaxed, {
605 		if (__improbable(cur < n || (cur & reject_mask))) {
606 		        os_atomic_rmw_loop_give_up(return 0);
607 		}
608 		next = cur + n;
609 	});
610 
611 	os_ref_check_overflow(rc, cur);
612 
613 #if OS_REFCNT_DEBUG
614 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
615 		ref_retain_debug(rc, grp);
616 	}
617 #endif
618 
619 	return next;
620 }
621 
622 bool
os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t * rc,uint32_t n,uint32_t reject_mask,struct os_refgrp * __debug_only grp)623 os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n,
624     uint32_t reject_mask, struct os_refgrp *__debug_only grp)
625 {
626 	os_ref_count_t cur, next;
627 
628 	os_atomic_rmw_loop(rc, cur, next, acquire, {
629 		if (__improbable(cur < n || (cur & reject_mask))) {
630 		        os_atomic_rmw_loop_give_up(return false);
631 		}
632 		next = cur + n;
633 	});
634 
635 	os_ref_check_overflow(rc, cur);
636 
637 #if OS_REFCNT_DEBUG
638 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
639 		ref_retain_debug(rc, grp);
640 	}
641 #endif
642 
643 	return true;
644 }
645