xref: /xnu-8796.101.5/libkern/os/refcnt.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 #if KERNEL
2 #include <kern/assert.h>
3 #include <kern/debug.h>
4 #include <pexpert/pexpert.h>
5 #include <kern/btlog.h>
6 #include <kern/backtrace.h>
7 #include <libkern/libkern.h>
8 #endif
9 #include <os/atomic_private.h>
10 
11 #include "refcnt.h"
12 
13 #define OS_REFCNT_MAX_COUNT     ((os_ref_count_t)0x0FFFFFFFUL)
14 
15 #if OS_REFCNT_DEBUG
16 extern struct os_refgrp global_ref_group;
17 os_refgrp_decl(, global_ref_group, "all", NULL);
18 
19 extern bool ref_debug_enable;
20 bool ref_debug_enable = false;
21 
22 #define REFLOG_GRP_DEBUG_ENABLED(grp) \
23     __improbable(grp != NULL && (ref_debug_enable || \
24 	(grp->grp_flags & OS_REFGRP_F_ALWAYS_ENABLED) != 0))
25 
26 static const size_t ref_log_nrecords = 1000000;
27 
28 __enum_closed_decl(reflog_op_t, uint8_t, {
29 	REFLOG_RETAIN  = 1,
30 	REFLOG_RELEASE = 2
31 });
32 
33 #define __debug_only
34 #else
35 # define __debug_only __unused
36 #endif /* OS_REFCNT_DEBUG */
37 
38 void
os_ref_panic_live(void * rc)39 os_ref_panic_live(void *rc)
40 {
41 	panic("os_refcnt: unexpected release of final reference (rc=%p)", rc);
42 	__builtin_unreachable();
43 }
44 
45 __abortlike
46 static void
os_ref_panic_underflow(void * rc)47 os_ref_panic_underflow(void *rc)
48 {
49 	panic("os_refcnt: underflow (rc=%p)", rc);
50 	__builtin_unreachable();
51 }
52 
53 __abortlike
54 static void
os_ref_panic_overflow(void * rc)55 os_ref_panic_overflow(void *rc)
56 {
57 	panic("os_refcnt: overflow (rc=%p)", rc);
58 	__builtin_unreachable();
59 }
60 
61 __abortlike
62 static void
os_ref_panic_retain(os_ref_atomic_t * rc)63 os_ref_panic_retain(os_ref_atomic_t *rc)
64 {
65 	if (os_atomic_load(rc, relaxed) >= OS_REFCNT_MAX_COUNT) {
66 		panic("os_refcnt: overflow (rc=%p)", rc);
67 	} else {
68 		panic("os_refcnt: attempted resurrection (rc=%p)", rc);
69 	}
70 }
71 
72 static inline void
os_ref_check_underflow(void * rc,os_ref_count_t count,os_ref_count_t n)73 os_ref_check_underflow(void *rc, os_ref_count_t count, os_ref_count_t n)
74 {
75 	if (__improbable(count < n)) {
76 		os_ref_panic_underflow(rc);
77 	}
78 }
79 
80 static inline void
os_ref_check_overflow(os_ref_atomic_t * rc,os_ref_count_t count)81 os_ref_check_overflow(os_ref_atomic_t *rc, os_ref_count_t count)
82 {
83 	if (__improbable(count >= OS_REFCNT_MAX_COUNT)) {
84 		os_ref_panic_overflow(rc);
85 	}
86 }
87 
88 static inline void
os_ref_check_retain(os_ref_atomic_t * rc,os_ref_count_t count,os_ref_count_t n)89 os_ref_check_retain(os_ref_atomic_t *rc, os_ref_count_t count, os_ref_count_t n)
90 {
91 	if (__improbable(count < n || count >= OS_REFCNT_MAX_COUNT)) {
92 		os_ref_panic_retain(rc);
93 	}
94 }
95 
96 #if OS_REFCNT_DEBUG
97 #if KERNEL
98 __attribute__((cold, noinline))
99 static void
ref_log_op(struct os_refgrp * grp,void * elem,reflog_op_t op)100 ref_log_op(struct os_refgrp *grp, void *elem, reflog_op_t op)
101 {
102 	if (grp == NULL) {
103 		return;
104 	}
105 
106 	if (grp->grp_log == NULL) {
107 		ref_log_op(grp->grp_parent, elem, op);
108 		return;
109 	}
110 
111 	btlog_record((btlog_t)grp->grp_log, elem, op,
112 	    btref_get(__builtin_frame_address(0), BTREF_GET_NOWAIT));
113 }
114 
115 __attribute__((cold, noinline))
116 static void
ref_log_drop(struct os_refgrp * grp,void * elem)117 ref_log_drop(struct os_refgrp *grp, void *elem)
118 {
119 	if (!REFLOG_GRP_DEBUG_ENABLED(grp)) {
120 		return;
121 	}
122 
123 	if (grp->grp_log == NULL) {
124 		ref_log_drop(grp->grp_parent, elem);
125 		return;
126 	}
127 
128 	btlog_erase(grp->grp_log, elem);
129 }
130 
131 __attribute__((cold, noinline))
132 void
os_ref_log_init(struct os_refgrp * grp)133 os_ref_log_init(struct os_refgrp *grp)
134 {
135 	if (grp->grp_log != NULL) {
136 		return;
137 	}
138 
139 	char grpbuf[128];
140 	char *refgrp = grpbuf;
141 	if (!PE_parse_boot_argn("rlog", refgrp, sizeof(grpbuf))) {
142 		return;
143 	}
144 
145 	/*
146 	 * Enable refcount statistics if the rlog boot-arg is present,
147 	 * even when no specific group is logged.
148 	 */
149 	ref_debug_enable = true;
150 
151 	const char *g;
152 	while ((g = strsep(&refgrp, ",")) != NULL) {
153 		if (strcmp(g, grp->grp_name) == 0) {
154 			/* enable logging on this refgrp */
155 			grp->grp_log = btlog_create(BTLOG_HASH,
156 			    ref_log_nrecords, 0);
157 			return;
158 		}
159 	}
160 }
161 
162 
163 __attribute__((cold, noinline))
164 void
os_ref_log_fini(struct os_refgrp * grp)165 os_ref_log_fini(struct os_refgrp *grp)
166 {
167 	if (grp->grp_log == NULL) {
168 		return;
169 	}
170 
171 	btlog_destroy(grp->grp_log);
172 	grp->grp_log = NULL;
173 }
174 
175 #else
176 
177 #ifndef os_ref_log_fini
178 inline void
os_ref_log_fini(struct os_refgrp * grp __unused)179 os_ref_log_fini(struct os_refgrp *grp __unused)
180 {
181 }
182 #endif
183 
184 #ifndef os_ref_log_init
185 inline void
os_ref_log_init(struct os_refgrp * grp __unused)186 os_ref_log_init(struct os_refgrp *grp __unused)
187 {
188 }
189 #endif
190 #ifndef ref_log_op
191 static inline void
ref_log_op(struct os_refgrp * grp __unused,void * rc __unused,reflog_op_t op __unused)192 ref_log_op(struct os_refgrp *grp __unused, void *rc __unused, reflog_op_t op __unused)
193 {
194 }
195 #endif
196 #ifndef ref_log_drop
197 static inline void
ref_log_drop(struct os_refgrp * grp __unused,void * rc __unused)198 ref_log_drop(struct os_refgrp *grp __unused, void *rc __unused)
199 {
200 }
201 #endif
202 
203 #endif /* KERNEL */
204 
205 /*
206  * attach a new refcnt to a group
207  */
208 __attribute__((cold, noinline))
209 static void
ref_attach_to_group(os_ref_atomic_t * rc,struct os_refgrp * grp,os_ref_count_t init_count)210 ref_attach_to_group(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t init_count)
211 {
212 	if (grp == NULL) {
213 		return;
214 	}
215 
216 	if (atomic_fetch_add_explicit(&grp->grp_children, 1, memory_order_relaxed) == 0) {
217 		/* First reference count object in this group. Check if we should enable
218 		 * refcount logging. */
219 		os_ref_log_init(grp);
220 	}
221 
222 	atomic_fetch_add_explicit(&grp->grp_count, init_count, memory_order_relaxed);
223 	atomic_fetch_add_explicit(&grp->grp_retain_total, init_count, memory_order_relaxed);
224 
225 	if (grp == &global_ref_group) {
226 		return;
227 	}
228 
229 	if (grp->grp_parent == NULL) {
230 		grp->grp_parent = &global_ref_group;
231 	}
232 
233 	ref_attach_to_group(rc, grp->grp_parent, init_count);
234 }
235 
236 static void
ref_retain_group(struct os_refgrp * grp)237 ref_retain_group(struct os_refgrp *grp)
238 {
239 	if (grp) {
240 		atomic_fetch_add_explicit(&grp->grp_count, 1, memory_order_relaxed);
241 		atomic_fetch_add_explicit(&grp->grp_retain_total, 1, memory_order_relaxed);
242 		ref_retain_group(grp->grp_parent);
243 	}
244 }
245 
246 __attribute__((cold, noinline))
247 static void
ref_release_group(struct os_refgrp * grp)248 ref_release_group(struct os_refgrp *grp)
249 {
250 	if (grp) {
251 		atomic_fetch_sub_explicit(&grp->grp_count, 1, memory_order_relaxed);
252 		atomic_fetch_add_explicit(&grp->grp_release_total, 1, memory_order_relaxed);
253 
254 		ref_release_group(grp->grp_parent);
255 	}
256 }
257 
258 __attribute__((cold, noinline))
259 static void
ref_drop_group(struct os_refgrp * grp)260 ref_drop_group(struct os_refgrp *grp)
261 {
262 	if (grp) {
263 		atomic_fetch_sub_explicit(&grp->grp_children, 1, memory_order_relaxed);
264 		ref_drop_group(grp->grp_parent);
265 	}
266 }
267 
268 __attribute__((cold, noinline))
269 static void
ref_init_debug(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp,os_ref_count_t count)270 ref_init_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count)
271 {
272 	ref_attach_to_group(rc, grp, count);
273 
274 	for (os_ref_count_t i = 0; i < count; i++) {
275 		ref_log_op(grp, (void *)rc, REFLOG_RETAIN);
276 	}
277 }
278 
279 __attribute__((cold, noinline))
280 static void
ref_retain_debug(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp)281 ref_retain_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
282 {
283 	ref_retain_group(grp);
284 	ref_log_op(grp, (void *)rc, REFLOG_RETAIN);
285 }
286 #endif
287 
288 void
os_ref_init_count_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp,os_ref_count_t count)289 os_ref_init_count_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count)
290 {
291 	os_ref_check_underflow(rc, count, 1);
292 	atomic_init(rc, count);
293 
294 #if OS_REFCNT_DEBUG
295 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
296 		ref_init_debug(rc, grp, count);
297 	}
298 #endif
299 }
300 
301 static inline void
__os_ref_retain(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * __debug_only grp)302 __os_ref_retain(os_ref_atomic_t *rc, os_ref_count_t f,
303     struct os_refgrp * __debug_only grp)
304 {
305 	os_ref_count_t old = atomic_fetch_add_explicit(rc, 1, memory_order_relaxed);
306 	os_ref_check_retain(rc, old, f);
307 
308 #if OS_REFCNT_DEBUG
309 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
310 		ref_retain_debug(rc, grp);
311 	}
312 #endif
313 }
314 
315 void
os_ref_retain_internal(os_ref_atomic_t * rc,struct os_refgrp * grp)316 os_ref_retain_internal(os_ref_atomic_t *rc, struct os_refgrp *grp)
317 {
318 	__os_ref_retain(rc, 1, grp);
319 }
320 
321 void
os_ref_retain_floor_internal(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * grp)322 os_ref_retain_floor_internal(os_ref_atomic_t *rc, os_ref_count_t f,
323     struct os_refgrp *grp)
324 {
325 	__os_ref_retain(rc, f, grp);
326 }
327 
328 static inline bool
__os_ref_retain_try(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * __debug_only grp)329 __os_ref_retain_try(os_ref_atomic_t *rc, os_ref_count_t f,
330     struct os_refgrp * __debug_only grp)
331 {
332 	os_ref_count_t cur, next;
333 
334 	os_atomic_rmw_loop(rc, cur, next, relaxed, {
335 		if (__improbable(cur < f)) {
336 		        os_atomic_rmw_loop_give_up(return false);
337 		}
338 
339 		next = cur + 1;
340 	});
341 
342 	os_ref_check_overflow(rc, cur);
343 
344 #if OS_REFCNT_DEBUG
345 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
346 		ref_retain_debug(rc, grp);
347 	}
348 #endif
349 
350 	return true;
351 }
352 
353 bool
os_ref_retain_try_internal(os_ref_atomic_t * rc,struct os_refgrp * grp)354 os_ref_retain_try_internal(os_ref_atomic_t *rc, struct os_refgrp *grp)
355 {
356 	return __os_ref_retain_try(rc, 1, grp);
357 }
358 
359 bool
os_ref_retain_floor_try_internal(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * grp)360 os_ref_retain_floor_try_internal(os_ref_atomic_t *rc, os_ref_count_t f,
361     struct os_refgrp *grp)
362 {
363 	return __os_ref_retain_try(rc, f, grp);
364 }
365 
366 __attribute__((always_inline))
367 static inline os_ref_count_t
_os_ref_release_inline(os_ref_atomic_t * rc,os_ref_count_t n,struct os_refgrp * __debug_only grp,memory_order release_order,memory_order dealloc_order)368 _os_ref_release_inline(os_ref_atomic_t *rc, os_ref_count_t n,
369     struct os_refgrp * __debug_only grp,
370     memory_order release_order, memory_order dealloc_order)
371 {
372 	os_ref_count_t val;
373 
374 #if OS_REFCNT_DEBUG
375 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
376 		/*
377 		 * Care not to use 'rc' after the decrement because it might be deallocated
378 		 * under us.
379 		 */
380 		ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
381 		ref_release_group(grp);
382 	}
383 #endif
384 
385 	val = atomic_fetch_sub_explicit(rc, n, release_order);
386 	os_ref_check_underflow(rc, val, n);
387 	val -= n;
388 	if (__improbable(val < n)) {
389 		atomic_load_explicit(rc, dealloc_order);
390 	}
391 
392 #if OS_REFCNT_DEBUG
393 	/*
394 	 * The only way to safely access the ref count or group after
395 	 * decrementing the count is when the count is zero (as the caller won't
396 	 * see the zero until the function returns).
397 	 */
398 	if (val == 0 && (REFLOG_GRP_DEBUG_ENABLED(grp))) {
399 		ref_drop_group(grp);
400 		ref_log_drop(grp, (void *)rc); /* rc is only used as an identifier */
401 	}
402 #endif
403 
404 	return val;
405 }
406 
407 #if OS_REFCNT_DEBUG
408 __attribute__((noinline))
409 static os_ref_count_t
os_ref_release_n_internal(os_ref_atomic_t * rc,os_ref_count_t n,struct os_refgrp * __debug_only grp,memory_order release_order,memory_order dealloc_order)410 os_ref_release_n_internal(os_ref_atomic_t *rc, os_ref_count_t n,
411     struct os_refgrp * __debug_only grp,
412     memory_order release_order, memory_order dealloc_order)
413 {
414 	// Legacy exported interface with bad codegen due to the barriers
415 	// not being immediate
416 	//
417 	// Also serves as the debug function
418 	return _os_ref_release_inline(rc, n, grp, release_order, dealloc_order);
419 }
420 #endif
421 
422 __attribute__((noinline))
423 os_ref_count_t
os_ref_release_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp,memory_order release_order,memory_order dealloc_order)424 os_ref_release_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp,
425     memory_order release_order, memory_order dealloc_order)
426 {
427 	// Legacy exported interface with bad codegen due to the barriers
428 	// not being immediate
429 	//
430 	// Also serves as the debug function
431 	return _os_ref_release_inline(rc, 1, grp, release_order, dealloc_order);
432 }
433 
434 os_ref_count_t
os_ref_release_barrier_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp)435 os_ref_release_barrier_internal(os_ref_atomic_t *rc,
436     struct os_refgrp * __debug_only grp)
437 {
438 #if OS_REFCNT_DEBUG
439 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
440 		return os_ref_release_internal(rc, grp,
441 		           memory_order_release, memory_order_acquire);
442 	}
443 #endif
444 	return _os_ref_release_inline(rc, 1, NULL,
445 	           memory_order_release, memory_order_acquire);
446 }
447 
448 os_ref_count_t
os_ref_release_relaxed_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp)449 os_ref_release_relaxed_internal(os_ref_atomic_t *rc,
450     struct os_refgrp * __debug_only grp)
451 {
452 #if OS_REFCNT_DEBUG
453 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
454 		return os_ref_release_internal(rc, grp,
455 		           memory_order_relaxed, memory_order_relaxed);
456 	}
457 #endif
458 	return _os_ref_release_inline(rc, 1, NULL,
459 	           memory_order_relaxed, memory_order_relaxed);
460 }
461 
462 static inline void
__os_ref_retain_locked(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * __debug_only grp)463 __os_ref_retain_locked(os_ref_atomic_t *rc, os_ref_count_t f,
464     struct os_refgrp * __debug_only grp)
465 {
466 	os_ref_count_t val = os_ref_get_count_internal(rc);
467 	os_ref_check_retain(rc, val, f);
468 	atomic_store_explicit(rc, ++val, memory_order_relaxed);
469 
470 #if OS_REFCNT_DEBUG
471 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
472 		ref_retain_debug(rc, grp);
473 	}
474 #endif
475 }
476 
477 void
os_ref_retain_locked_internal(os_ref_atomic_t * rc,struct os_refgrp * grp)478 os_ref_retain_locked_internal(os_ref_atomic_t *rc, struct os_refgrp *grp)
479 {
480 	__os_ref_retain_locked(rc, 1, grp);
481 }
482 
483 void
os_ref_retain_floor_locked_internal(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * grp)484 os_ref_retain_floor_locked_internal(os_ref_atomic_t *rc, os_ref_count_t f,
485     struct os_refgrp *grp)
486 {
487 	__os_ref_retain_locked(rc, f, grp);
488 }
489 
490 os_ref_count_t
os_ref_release_locked_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp)491 os_ref_release_locked_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
492 {
493 #if OS_REFCNT_DEBUG
494 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
495 		ref_release_group(grp);
496 		ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
497 	}
498 #endif
499 
500 	os_ref_count_t val = os_ref_get_count_internal(rc);
501 	os_ref_check_underflow(rc, val, 1);
502 	atomic_store_explicit(rc, --val, memory_order_relaxed);
503 
504 #if OS_REFCNT_DEBUG
505 	if (val == 0 && (REFLOG_GRP_DEBUG_ENABLED(grp))) {
506 		ref_drop_group(grp);
507 		ref_log_drop(grp, (void *)rc);
508 	}
509 #endif
510 
511 	return val;
512 }
513 
514 /*
515  * Bitwise API
516  */
517 
518 #undef os_ref_init_count_mask
519 void
os_ref_init_count_mask(os_ref_atomic_t * rc,uint32_t b,struct os_refgrp * __debug_only grp,os_ref_count_t init_count,uint32_t init_bits)520 os_ref_init_count_mask(os_ref_atomic_t *rc, uint32_t b,
521     struct os_refgrp *__debug_only grp,
522     os_ref_count_t init_count, uint32_t init_bits)
523 {
524 	assert(init_bits < (1U << b));
525 	atomic_init(rc, (init_count << b) | init_bits);
526 	os_ref_check_underflow(rc, (init_count << b), 1u << b);
527 
528 #if OS_REFCNT_DEBUG
529 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
530 		ref_init_debug(rc, grp, init_count);
531 	}
532 #endif
533 }
534 
535 __attribute__((always_inline))
536 static inline void
os_ref_retain_mask_inline(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp,memory_order mo)537 os_ref_retain_mask_inline(os_ref_atomic_t *rc, uint32_t n,
538     struct os_refgrp *__debug_only grp, memory_order mo)
539 {
540 	os_ref_count_t old = atomic_fetch_add_explicit(rc, n, mo);
541 	os_ref_check_retain(rc, old, n);
542 
543 #if OS_REFCNT_DEBUG
544 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
545 		ref_retain_debug(rc, grp);
546 	}
547 #endif
548 }
549 
550 void
os_ref_retain_mask_internal(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp)551 os_ref_retain_mask_internal(os_ref_atomic_t *rc, uint32_t n,
552     struct os_refgrp *__debug_only grp)
553 {
554 	os_ref_retain_mask_inline(rc, n, grp, memory_order_relaxed);
555 }
556 
557 void
os_ref_retain_acquire_mask_internal(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp)558 os_ref_retain_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n,
559     struct os_refgrp *__debug_only grp)
560 {
561 	os_ref_retain_mask_inline(rc, n, grp, memory_order_acquire);
562 }
563 
564 uint32_t
os_ref_release_barrier_mask_internal(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp)565 os_ref_release_barrier_mask_internal(os_ref_atomic_t *rc, uint32_t n,
566     struct os_refgrp *__debug_only grp)
567 {
568 #if OS_REFCNT_DEBUG
569 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
570 		return os_ref_release_n_internal(rc, n, grp,
571 		           memory_order_release, memory_order_acquire);
572 	}
573 #endif
574 
575 	return _os_ref_release_inline(rc, n, NULL,
576 	           memory_order_release, memory_order_acquire);
577 }
578 
579 uint32_t
os_ref_release_relaxed_mask_internal(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp)580 os_ref_release_relaxed_mask_internal(os_ref_atomic_t *rc, uint32_t n,
581     struct os_refgrp *__debug_only grp)
582 {
583 #if OS_REFCNT_DEBUG
584 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
585 		return os_ref_release_n_internal(rc, n, grp,
586 		           memory_order_relaxed, memory_order_relaxed);
587 	}
588 #endif
589 
590 	return _os_ref_release_inline(rc, n, NULL,
591 	           memory_order_relaxed, memory_order_relaxed);
592 }
593 
594 uint32_t
os_ref_retain_try_mask_internal(os_ref_atomic_t * rc,uint32_t n,uint32_t reject_mask,struct os_refgrp * __debug_only grp)595 os_ref_retain_try_mask_internal(os_ref_atomic_t *rc, uint32_t n,
596     uint32_t reject_mask, struct os_refgrp *__debug_only grp)
597 {
598 	os_ref_count_t cur, next;
599 
600 	os_atomic_rmw_loop(rc, cur, next, relaxed, {
601 		if (__improbable(cur < n || (cur & reject_mask))) {
602 		        os_atomic_rmw_loop_give_up(return 0);
603 		}
604 		next = cur + n;
605 	});
606 
607 	os_ref_check_overflow(rc, cur);
608 
609 #if OS_REFCNT_DEBUG
610 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
611 		ref_retain_debug(rc, grp);
612 	}
613 #endif
614 
615 	return next;
616 }
617 
618 bool
os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t * rc,uint32_t n,uint32_t reject_mask,struct os_refgrp * __debug_only grp)619 os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n,
620     uint32_t reject_mask, struct os_refgrp *__debug_only grp)
621 {
622 	os_ref_count_t cur, next;
623 
624 	os_atomic_rmw_loop(rc, cur, next, acquire, {
625 		if (__improbable(cur < n || (cur & reject_mask))) {
626 		        os_atomic_rmw_loop_give_up(return false);
627 		}
628 		next = cur + n;
629 	});
630 
631 	os_ref_check_overflow(rc, cur);
632 
633 #if OS_REFCNT_DEBUG
634 	if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
635 		ref_retain_debug(rc, grp);
636 	}
637 #endif
638 
639 	return true;
640 }
641