1 #if KERNEL
2 #include <kern/assert.h>
3 #include <kern/debug.h>
4 #include <pexpert/pexpert.h>
5 #include <kern/btlog.h>
6 #include <kern/backtrace.h>
7 #include <libkern/libkern.h>
8 #endif
9 #include <os/atomic_private.h>
10
11 #include "refcnt.h"
12
13 #define OS_REFCNT_MAX_COUNT ((os_ref_count_t)0x0FFFFFFFUL)
14
15 #if OS_REFCNT_DEBUG
16 extern struct os_refgrp global_ref_group;
17 os_refgrp_decl(, global_ref_group, "all", NULL);
18
19 extern bool ref_debug_enable;
20 bool ref_debug_enable = false;
21
22 /*
23 * XXX
24 * A hack to allow task refgrps to be implicitly enabled. Once all upstream has
25 * been recompiled grp_flags can be used to indicate whether or not a group is
26 * enabled by default.
27 */
28 #define REFLOG_GRP_DEBUG_ENABLED(grp) \
29 __improbable(grp != NULL && (ref_debug_enable || \
30 (grp->grp_name != NULL && strncmp("task", grp->grp_name, 4) == 0)))
31
32 static const size_t ref_log_nrecords = 1000000;
33
34 __enum_closed_decl(reflog_op_t, uint8_t, {
35 REFLOG_RETAIN = 1,
36 REFLOG_RELEASE = 2
37 });
38
39 #define __debug_only
40 #else
41 # define __debug_only __unused
42 #endif /* OS_REFCNT_DEBUG */
43
44 void
os_ref_panic_live(void * rc)45 os_ref_panic_live(void *rc)
46 {
47 panic("os_refcnt: unexpected release of final reference (rc=%p)", rc);
48 __builtin_unreachable();
49 }
50
51 __abortlike
52 static void
os_ref_panic_underflow(void * rc)53 os_ref_panic_underflow(void *rc)
54 {
55 panic("os_refcnt: underflow (rc=%p)", rc);
56 __builtin_unreachable();
57 }
58
59 __abortlike
60 static void
os_ref_panic_overflow(void * rc)61 os_ref_panic_overflow(void *rc)
62 {
63 panic("os_refcnt: overflow (rc=%p)", rc);
64 __builtin_unreachable();
65 }
66
67 __abortlike
68 static void
os_ref_panic_retain(os_ref_atomic_t * rc)69 os_ref_panic_retain(os_ref_atomic_t *rc)
70 {
71 if (os_atomic_load(rc, relaxed) >= OS_REFCNT_MAX_COUNT) {
72 panic("os_refcnt: overflow (rc=%p)", rc);
73 } else {
74 panic("os_refcnt: attempted resurrection (rc=%p)", rc);
75 }
76 }
77
78 static inline void
os_ref_check_underflow(void * rc,os_ref_count_t count,os_ref_count_t n)79 os_ref_check_underflow(void *rc, os_ref_count_t count, os_ref_count_t n)
80 {
81 if (__improbable(count < n)) {
82 os_ref_panic_underflow(rc);
83 }
84 }
85
86 static inline void
os_ref_check_overflow(os_ref_atomic_t * rc,os_ref_count_t count)87 os_ref_check_overflow(os_ref_atomic_t *rc, os_ref_count_t count)
88 {
89 if (__improbable(count >= OS_REFCNT_MAX_COUNT)) {
90 os_ref_panic_overflow(rc);
91 }
92 }
93
94 static inline void
os_ref_check_retain(os_ref_atomic_t * rc,os_ref_count_t count,os_ref_count_t n)95 os_ref_check_retain(os_ref_atomic_t *rc, os_ref_count_t count, os_ref_count_t n)
96 {
97 if (__improbable(count < n || count >= OS_REFCNT_MAX_COUNT)) {
98 os_ref_panic_retain(rc);
99 }
100 }
101
102 #if OS_REFCNT_DEBUG
103 #if KERNEL
104 __attribute__((cold, noinline))
105 static void
ref_log_op(struct os_refgrp * grp,void * elem,reflog_op_t op)106 ref_log_op(struct os_refgrp *grp, void *elem, reflog_op_t op)
107 {
108 if (grp == NULL) {
109 return;
110 }
111
112 if (grp->grp_log == NULL) {
113 ref_log_op(grp->grp_parent, elem, op);
114 return;
115 }
116
117 btlog_record((btlog_t)grp->grp_log, elem, op,
118 btref_get(__builtin_frame_address(0), BTREF_GET_NOWAIT));
119 }
120
121 __attribute__((cold, noinline))
122 static void
ref_log_drop(struct os_refgrp * grp,void * elem)123 ref_log_drop(struct os_refgrp *grp, void *elem)
124 {
125 if (!REFLOG_GRP_DEBUG_ENABLED(grp)) {
126 return;
127 }
128
129 if (grp->grp_log == NULL) {
130 ref_log_drop(grp->grp_parent, elem);
131 return;
132 }
133
134 btlog_erase(grp->grp_log, elem);
135 }
136
137 __attribute__((cold, noinline))
138 void
os_ref_log_init(struct os_refgrp * grp)139 os_ref_log_init(struct os_refgrp *grp)
140 {
141 if (grp->grp_log != NULL) {
142 return;
143 }
144
145 char grpbuf[128];
146 char *refgrp = grpbuf;
147 if (!PE_parse_boot_argn("rlog", refgrp, sizeof(grpbuf))) {
148 return;
149 }
150
151 /*
152 * Enable refcount statistics if the rlog boot-arg is present,
153 * even when no specific group is logged.
154 */
155 ref_debug_enable = true;
156
157 const char *g;
158 while ((g = strsep(&refgrp, ",")) != NULL) {
159 if (strcmp(g, grp->grp_name) == 0) {
160 /* enable logging on this refgrp */
161 grp->grp_log = btlog_create(BTLOG_HASH,
162 ref_log_nrecords, 0);
163 return;
164 }
165 }
166 }
167
168
169 __attribute__((cold, noinline))
170 void
os_ref_log_fini(struct os_refgrp * grp)171 os_ref_log_fini(struct os_refgrp *grp)
172 {
173 if (grp->grp_log == NULL) {
174 return;
175 }
176
177 btlog_destroy(grp->grp_log);
178 grp->grp_log = NULL;
179 }
180
181 #else
182
183 #ifndef os_ref_log_fini
184 inline void
os_ref_log_fini(struct os_refgrp * grp __unused)185 os_ref_log_fini(struct os_refgrp *grp __unused)
186 {
187 }
188 #endif
189
190 #ifndef os_ref_log_init
191 inline void
os_ref_log_init(struct os_refgrp * grp __unused)192 os_ref_log_init(struct os_refgrp *grp __unused)
193 {
194 }
195 #endif
196 #ifndef ref_log_op
197 static inline void
ref_log_op(struct os_refgrp * grp __unused,void * rc __unused,reflog_op_t op __unused)198 ref_log_op(struct os_refgrp *grp __unused, void *rc __unused, reflog_op_t op __unused)
199 {
200 }
201 #endif
202 #ifndef ref_log_drop
203 static inline void
ref_log_drop(struct os_refgrp * grp __unused,void * rc __unused)204 ref_log_drop(struct os_refgrp *grp __unused, void *rc __unused)
205 {
206 }
207 #endif
208
209 #endif /* KERNEL */
210
211 /*
212 * attach a new refcnt to a group
213 */
214 __attribute__((cold, noinline))
215 static void
ref_attach_to_group(os_ref_atomic_t * rc,struct os_refgrp * grp,os_ref_count_t init_count)216 ref_attach_to_group(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t init_count)
217 {
218 if (grp == NULL) {
219 return;
220 }
221
222 if (atomic_fetch_add_explicit(&grp->grp_children, 1, memory_order_relaxed) == 0) {
223 /* First reference count object in this group. Check if we should enable
224 * refcount logging. */
225 os_ref_log_init(grp);
226 }
227
228 atomic_fetch_add_explicit(&grp->grp_count, init_count, memory_order_relaxed);
229 atomic_fetch_add_explicit(&grp->grp_retain_total, init_count, memory_order_relaxed);
230
231 if (grp == &global_ref_group) {
232 return;
233 }
234
235 if (grp->grp_parent == NULL) {
236 grp->grp_parent = &global_ref_group;
237 }
238
239 ref_attach_to_group(rc, grp->grp_parent, init_count);
240 }
241
242 static void
ref_retain_group(struct os_refgrp * grp)243 ref_retain_group(struct os_refgrp *grp)
244 {
245 if (grp) {
246 atomic_fetch_add_explicit(&grp->grp_count, 1, memory_order_relaxed);
247 atomic_fetch_add_explicit(&grp->grp_retain_total, 1, memory_order_relaxed);
248 ref_retain_group(grp->grp_parent);
249 }
250 }
251
252 __attribute__((cold, noinline))
253 static void
ref_release_group(struct os_refgrp * grp)254 ref_release_group(struct os_refgrp *grp)
255 {
256 if (grp) {
257 atomic_fetch_sub_explicit(&grp->grp_count, 1, memory_order_relaxed);
258 atomic_fetch_add_explicit(&grp->grp_release_total, 1, memory_order_relaxed);
259
260 ref_release_group(grp->grp_parent);
261 }
262 }
263
264 __attribute__((cold, noinline))
265 static void
ref_drop_group(struct os_refgrp * grp)266 ref_drop_group(struct os_refgrp *grp)
267 {
268 if (grp) {
269 atomic_fetch_sub_explicit(&grp->grp_children, 1, memory_order_relaxed);
270 ref_drop_group(grp->grp_parent);
271 }
272 }
273
274 __attribute__((cold, noinline))
275 static void
ref_init_debug(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp,os_ref_count_t count)276 ref_init_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count)
277 {
278 ref_attach_to_group(rc, grp, count);
279
280 for (os_ref_count_t i = 0; i < count; i++) {
281 ref_log_op(grp, (void *)rc, REFLOG_RETAIN);
282 }
283 }
284
285 __attribute__((cold, noinline))
286 static void
ref_retain_debug(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp)287 ref_retain_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
288 {
289 ref_retain_group(grp);
290 ref_log_op(grp, (void *)rc, REFLOG_RETAIN);
291 }
292 #endif
293
294 void
os_ref_init_count_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp,os_ref_count_t count)295 os_ref_init_count_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count)
296 {
297 os_ref_check_underflow(rc, count, 1);
298 atomic_init(rc, count);
299
300 #if OS_REFCNT_DEBUG
301 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
302 ref_init_debug(rc, grp, count);
303 }
304 #endif
305 }
306
307 static inline void
__os_ref_retain(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * __debug_only grp)308 __os_ref_retain(os_ref_atomic_t *rc, os_ref_count_t f,
309 struct os_refgrp * __debug_only grp)
310 {
311 os_ref_count_t old = atomic_fetch_add_explicit(rc, 1, memory_order_relaxed);
312 os_ref_check_retain(rc, old, f);
313
314 #if OS_REFCNT_DEBUG
315 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
316 ref_retain_debug(rc, grp);
317 }
318 #endif
319 }
320
321 void
os_ref_retain_internal(os_ref_atomic_t * rc,struct os_refgrp * grp)322 os_ref_retain_internal(os_ref_atomic_t *rc, struct os_refgrp *grp)
323 {
324 __os_ref_retain(rc, 1, grp);
325 }
326
327 void
os_ref_retain_floor_internal(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * grp)328 os_ref_retain_floor_internal(os_ref_atomic_t *rc, os_ref_count_t f,
329 struct os_refgrp *grp)
330 {
331 __os_ref_retain(rc, f, grp);
332 }
333
334 static inline bool
__os_ref_retain_try(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * __debug_only grp)335 __os_ref_retain_try(os_ref_atomic_t *rc, os_ref_count_t f,
336 struct os_refgrp * __debug_only grp)
337 {
338 os_ref_count_t cur, next;
339
340 os_atomic_rmw_loop(rc, cur, next, relaxed, {
341 if (__improbable(cur < f)) {
342 os_atomic_rmw_loop_give_up(return false);
343 }
344
345 next = cur + 1;
346 });
347
348 os_ref_check_overflow(rc, cur);
349
350 #if OS_REFCNT_DEBUG
351 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
352 ref_retain_debug(rc, grp);
353 }
354 #endif
355
356 return true;
357 }
358
359 bool
os_ref_retain_try_internal(os_ref_atomic_t * rc,struct os_refgrp * grp)360 os_ref_retain_try_internal(os_ref_atomic_t *rc, struct os_refgrp *grp)
361 {
362 return __os_ref_retain_try(rc, 1, grp);
363 }
364
365 bool
os_ref_retain_floor_try_internal(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * grp)366 os_ref_retain_floor_try_internal(os_ref_atomic_t *rc, os_ref_count_t f,
367 struct os_refgrp *grp)
368 {
369 return __os_ref_retain_try(rc, f, grp);
370 }
371
372 __attribute__((always_inline))
373 static inline os_ref_count_t
_os_ref_release_inline(os_ref_atomic_t * rc,os_ref_count_t n,struct os_refgrp * __debug_only grp,memory_order release_order,memory_order dealloc_order)374 _os_ref_release_inline(os_ref_atomic_t *rc, os_ref_count_t n,
375 struct os_refgrp * __debug_only grp,
376 memory_order release_order, memory_order dealloc_order)
377 {
378 os_ref_count_t val;
379
380 #if OS_REFCNT_DEBUG
381 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
382 /*
383 * Care not to use 'rc' after the decrement because it might be deallocated
384 * under us.
385 */
386 ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
387 ref_release_group(grp);
388 }
389 #endif
390
391 val = atomic_fetch_sub_explicit(rc, n, release_order);
392 os_ref_check_underflow(rc, val, n);
393 val -= n;
394 if (__improbable(val < n)) {
395 atomic_load_explicit(rc, dealloc_order);
396 }
397
398 #if OS_REFCNT_DEBUG
399 /*
400 * The only way to safely access the ref count or group after
401 * decrementing the count is when the count is zero (as the caller won't
402 * see the zero until the function returns).
403 */
404 if (val == 0 && (REFLOG_GRP_DEBUG_ENABLED(grp))) {
405 ref_drop_group(grp);
406 ref_log_drop(grp, (void *)rc); /* rc is only used as an identifier */
407 }
408 #endif
409
410 return val;
411 }
412
413 #if OS_REFCNT_DEBUG
414 __attribute__((noinline))
415 static os_ref_count_t
os_ref_release_n_internal(os_ref_atomic_t * rc,os_ref_count_t n,struct os_refgrp * __debug_only grp,memory_order release_order,memory_order dealloc_order)416 os_ref_release_n_internal(os_ref_atomic_t *rc, os_ref_count_t n,
417 struct os_refgrp * __debug_only grp,
418 memory_order release_order, memory_order dealloc_order)
419 {
420 // Legacy exported interface with bad codegen due to the barriers
421 // not being immediate
422 //
423 // Also serves as the debug function
424 return _os_ref_release_inline(rc, n, grp, release_order, dealloc_order);
425 }
426 #endif
427
428 __attribute__((noinline))
429 os_ref_count_t
os_ref_release_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp,memory_order release_order,memory_order dealloc_order)430 os_ref_release_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp,
431 memory_order release_order, memory_order dealloc_order)
432 {
433 // Legacy exported interface with bad codegen due to the barriers
434 // not being immediate
435 //
436 // Also serves as the debug function
437 return _os_ref_release_inline(rc, 1, grp, release_order, dealloc_order);
438 }
439
440 os_ref_count_t
os_ref_release_barrier_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp)441 os_ref_release_barrier_internal(os_ref_atomic_t *rc,
442 struct os_refgrp * __debug_only grp)
443 {
444 #if OS_REFCNT_DEBUG
445 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
446 return os_ref_release_internal(rc, grp,
447 memory_order_release, memory_order_acquire);
448 }
449 #endif
450 return _os_ref_release_inline(rc, 1, NULL,
451 memory_order_release, memory_order_acquire);
452 }
453
454 os_ref_count_t
os_ref_release_relaxed_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp)455 os_ref_release_relaxed_internal(os_ref_atomic_t *rc,
456 struct os_refgrp * __debug_only grp)
457 {
458 #if OS_REFCNT_DEBUG
459 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
460 return os_ref_release_internal(rc, grp,
461 memory_order_relaxed, memory_order_relaxed);
462 }
463 #endif
464 return _os_ref_release_inline(rc, 1, NULL,
465 memory_order_relaxed, memory_order_relaxed);
466 }
467
468 static inline void
__os_ref_retain_locked(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * __debug_only grp)469 __os_ref_retain_locked(os_ref_atomic_t *rc, os_ref_count_t f,
470 struct os_refgrp * __debug_only grp)
471 {
472 os_ref_count_t val = os_ref_get_count_internal(rc);
473 os_ref_check_retain(rc, val, f);
474 atomic_store_explicit(rc, ++val, memory_order_relaxed);
475
476 #if OS_REFCNT_DEBUG
477 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
478 ref_retain_debug(rc, grp);
479 }
480 #endif
481 }
482
483 void
os_ref_retain_locked_internal(os_ref_atomic_t * rc,struct os_refgrp * grp)484 os_ref_retain_locked_internal(os_ref_atomic_t *rc, struct os_refgrp *grp)
485 {
486 __os_ref_retain_locked(rc, 1, grp);
487 }
488
489 void
os_ref_retain_floor_locked_internal(os_ref_atomic_t * rc,os_ref_count_t f,struct os_refgrp * grp)490 os_ref_retain_floor_locked_internal(os_ref_atomic_t *rc, os_ref_count_t f,
491 struct os_refgrp *grp)
492 {
493 __os_ref_retain_locked(rc, f, grp);
494 }
495
496 os_ref_count_t
os_ref_release_locked_internal(os_ref_atomic_t * rc,struct os_refgrp * __debug_only grp)497 os_ref_release_locked_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp)
498 {
499 #if OS_REFCNT_DEBUG
500 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
501 ref_release_group(grp);
502 ref_log_op(grp, (void *)rc, REFLOG_RELEASE);
503 }
504 #endif
505
506 os_ref_count_t val = os_ref_get_count_internal(rc);
507 os_ref_check_underflow(rc, val, 1);
508 atomic_store_explicit(rc, --val, memory_order_relaxed);
509
510 #if OS_REFCNT_DEBUG
511 if (val == 0 && (REFLOG_GRP_DEBUG_ENABLED(grp))) {
512 ref_drop_group(grp);
513 ref_log_drop(grp, (void *)rc);
514 }
515 #endif
516
517 return val;
518 }
519
520 /*
521 * Bitwise API
522 */
523
524 #undef os_ref_init_count_mask
525 void
os_ref_init_count_mask(os_ref_atomic_t * rc,uint32_t b,struct os_refgrp * __debug_only grp,os_ref_count_t init_count,uint32_t init_bits)526 os_ref_init_count_mask(os_ref_atomic_t *rc, uint32_t b,
527 struct os_refgrp *__debug_only grp,
528 os_ref_count_t init_count, uint32_t init_bits)
529 {
530 assert(init_bits < (1U << b));
531 atomic_init(rc, (init_count << b) | init_bits);
532 os_ref_check_underflow(rc, (init_count << b), 1u << b);
533
534 #if OS_REFCNT_DEBUG
535 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
536 ref_init_debug(rc, grp, init_count);
537 }
538 #endif
539 }
540
541 __attribute__((always_inline))
542 static inline void
os_ref_retain_mask_inline(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp,memory_order mo)543 os_ref_retain_mask_inline(os_ref_atomic_t *rc, uint32_t n,
544 struct os_refgrp *__debug_only grp, memory_order mo)
545 {
546 os_ref_count_t old = atomic_fetch_add_explicit(rc, n, mo);
547 os_ref_check_retain(rc, old, n);
548
549 #if OS_REFCNT_DEBUG
550 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
551 ref_retain_debug(rc, grp);
552 }
553 #endif
554 }
555
556 void
os_ref_retain_mask_internal(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp)557 os_ref_retain_mask_internal(os_ref_atomic_t *rc, uint32_t n,
558 struct os_refgrp *__debug_only grp)
559 {
560 os_ref_retain_mask_inline(rc, n, grp, memory_order_relaxed);
561 }
562
563 void
os_ref_retain_acquire_mask_internal(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp)564 os_ref_retain_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n,
565 struct os_refgrp *__debug_only grp)
566 {
567 os_ref_retain_mask_inline(rc, n, grp, memory_order_acquire);
568 }
569
570 uint32_t
os_ref_release_barrier_mask_internal(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp)571 os_ref_release_barrier_mask_internal(os_ref_atomic_t *rc, uint32_t n,
572 struct os_refgrp *__debug_only grp)
573 {
574 #if OS_REFCNT_DEBUG
575 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
576 return os_ref_release_n_internal(rc, n, grp,
577 memory_order_release, memory_order_acquire);
578 }
579 #endif
580
581 return _os_ref_release_inline(rc, n, NULL,
582 memory_order_release, memory_order_acquire);
583 }
584
585 uint32_t
os_ref_release_relaxed_mask_internal(os_ref_atomic_t * rc,uint32_t n,struct os_refgrp * __debug_only grp)586 os_ref_release_relaxed_mask_internal(os_ref_atomic_t *rc, uint32_t n,
587 struct os_refgrp *__debug_only grp)
588 {
589 #if OS_REFCNT_DEBUG
590 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
591 return os_ref_release_n_internal(rc, n, grp,
592 memory_order_relaxed, memory_order_relaxed);
593 }
594 #endif
595
596 return _os_ref_release_inline(rc, n, NULL,
597 memory_order_relaxed, memory_order_relaxed);
598 }
599
600 uint32_t
os_ref_retain_try_mask_internal(os_ref_atomic_t * rc,uint32_t n,uint32_t reject_mask,struct os_refgrp * __debug_only grp)601 os_ref_retain_try_mask_internal(os_ref_atomic_t *rc, uint32_t n,
602 uint32_t reject_mask, struct os_refgrp *__debug_only grp)
603 {
604 os_ref_count_t cur, next;
605
606 os_atomic_rmw_loop(rc, cur, next, relaxed, {
607 if (__improbable(cur < n || (cur & reject_mask))) {
608 os_atomic_rmw_loop_give_up(return 0);
609 }
610 next = cur + n;
611 });
612
613 os_ref_check_overflow(rc, cur);
614
615 #if OS_REFCNT_DEBUG
616 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
617 ref_retain_debug(rc, grp);
618 }
619 #endif
620
621 return next;
622 }
623
624 bool
os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t * rc,uint32_t n,uint32_t reject_mask,struct os_refgrp * __debug_only grp)625 os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n,
626 uint32_t reject_mask, struct os_refgrp *__debug_only grp)
627 {
628 os_ref_count_t cur, next;
629
630 os_atomic_rmw_loop(rc, cur, next, acquire, {
631 if (__improbable(cur < n || (cur & reject_mask))) {
632 os_atomic_rmw_loop_give_up(return false);
633 }
634 next = cur + n;
635 });
636
637 os_ref_check_overflow(rc, cur);
638
639 #if OS_REFCNT_DEBUG
640 if (REFLOG_GRP_DEBUG_ENABLED(grp)) {
641 ref_retain_debug(rc, grp);
642 }
643 #endif
644
645 return true;
646 }
647