xref: /xnu-8020.121.3/osfmk/kern/btlog.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2012-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/assert.h>
30 #include <kern/backtrace.h>
31 #include <kern/btlog.h>
32 #include <kern/smr.h>
33 #include <kern/startup.h>
34 #include <kern/thread_call.h>
35 #include <os/hash.h>
36 #include <mach/vm_map.h>
37 #include <mach/vm_param.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_map.h>
40 #include <vm/pmap.h>
41 
42 #pragma mark btref & helpers
43 
44 static LCK_GRP_DECLARE(bt_library_lck_grp, "bt_library");
45 static SMR_DEFINE_EARLY(bt_library_smr);
46 
47 #define BTS_FRAMES_MAX          13
48 #define BTS_FRAMES_REF_MASK     0xfffffff0
49 #define BTS_FRAMES_REF_INC      0x00000010
50 #define BTS_FRAMES_LEN_MASK     0x0000000f
51 
52 typedef SMR_POINTER(btref_t)    btref_smr_t;
53 
54 typedef union bt_stack {
55 	struct {
56 		btref_smr_t     bts_next;
57 		uint32_t        bts_ref_len;
58 		uint32_t        bts_hash;
59 		uint32_t        bts_frames[BTS_FRAMES_MAX];
60 	};
61 	struct {
62 		uint32_t        bts_padding[3 + BTS_FRAMES_MAX - 1 - sizeof(long) / 4];
63 		uint32_t        bts_free_next;
64 		smr_seq_t       bts_free_seq;
65 	};
66 } *bt_stack_t;
67 
68 static_assert(sizeof(union bt_stack) == 64); /* allocation scheme needs it */
69 
70 #define BTREF_PERMANENT_BIT     0x80000000u
71 #define BTREF_OP_MASK           0x0000003fu
72 #define BTREF_VALID_MASK        0xc000003fu
73 
74 #define BTL_SIZE_INIT           (1u << 20)
75 #define BTL_SIZE_MAX            (1u << 30)
76 #define BTL_SLABS               9
77 
78 #define BTL_PARAM_INIT          0x00000020u
79 #define BTL_PARAM_PARITY(p)     ((p) >> 31)
80 #define BTL_PARAM_SHIFT(p)      (32 - ((p) & 0x3f))
81 #define BTL_PARAM_IDX(p, h)     ((uint64_t)(h) >> ((p) & 0x3f))
82 #define BTL_PARAM_NEXT(p)       ((p) - 0x80000001u)
83 
84 #define BTL_HASH_SHIFT          8
85 #define BTL_HASH_COUNT          (1u << BTL_HASH_SHIFT)
86 #define BTL_HASH_MASK           (BTL_HASH_COUNT - 1)
87 
88 static_assert((BTL_SIZE_INIT << BTL_SLABS) == BTL_SIZE_MAX / 2);
89 
90 typedef struct bt_hash {
91 	btref_smr_t             bth_array[BTL_HASH_COUNT];
92 } *bt_hash_t;
93 
94 #if DEBUG || DEVELOPMENT
95 #define BTLIB_VALIDATE          1
96 #else
97 #define BTLIB_VALIDATE          0
98 #endif
99 
100 /*!
101  * @typedef bt_library_t
102  *
103  * @brief
104  * Describes a backtrace library.
105  *
106  * @discussion
107  * A backtrace library is a scalable hash table of backtraces
108  * used for debugging purposes.
109  *
110  * By default there is a single singleton one, but the code
111  * is amenable to have several instances.
112  *
113  *
114  * <h2>Data structure design</h2>
115  *
116  * Its hash table is structured like this:
117  *
118  *     par = BTL_PARAM_PARITY(btl->btl_param);
119  *     sz  = 1u << BTL_PARAM_SHIFT(btl->btl_param);
120  *
121  *     btl->btl_hash[par]
122  *           │
123  *           │     ╭─────── array of size "sz" buckets ───────╮
124  *           ╰───> │                                          │
125  *                 ╰──────────────────────────────────┼───────╯
126  *                                                    │
127  *               ╭─────── struct bt_hash ───────╮     │
128  *               │                              │ <───╯
129  *               ╰──┼───────────────────────────╯
130  *                  │
131  *                  ╰──> Stack ──> Stack ──> Stack ──> X
132  *
133  *
134  * The "btl_hash" two entries are used with the "btl_param" switch in order
135  * to swap the outer array while growing the hash without perturbating
136  * readers.
137  *
138  * The lists of stacks are also maintained in "hash" order which allows
139  * for the rehashing to be a clean split of the lists.
140  *
141  * All stack pointers are "references" which are a smaller 32bit offset
142  * within the library backing store (slabs).
143  *
144  */
145 typedef struct bt_library {
146 	lck_ticket_t            btl_lock;
147 	SMR_POINTER(uint32_t)   btl_param;
148 
149 	bt_hash_t              *btl_hash[2];
150 	thread_call_t           btl_call;
151 	thread_t                btl_grower;
152 
153 	btref_t                *btl_free_tail;
154 	btref_t                 btl_free_head;
155 
156 	btref_t                 btl_deferred_head;
157 
158 	bool                    btl_waiters;
159 	bool                    btl_in_callout;
160 	bool                    btl_rehashing;
161 	uint8_t                 btl_slab_cur;
162 	uint32_t                btl_alloc_pos;
163 	uint32_t                btl_faulted_pos;
164 	uint32_t                btl_max_pos;
165 	vm_address_t            btl_slabs[BTL_SLABS];
166 } *bt_library_t;
167 
168 static struct bt_library        bt_library;
169 
170 static size_t
__btstack_len(bt_stack_t bts)171 __btstack_len(bt_stack_t bts)
172 {
173 	return bts->bts_ref_len & BTS_FRAMES_LEN_MASK;
174 }
175 
176 static size_t
__btstack_size(bt_stack_t bts)177 __btstack_size(bt_stack_t bts)
178 {
179 	return sizeof(uint32_t) * __btstack_len(bts);
180 }
181 
182 static bool
__btstack_same(bt_stack_t a,bt_stack_t b)183 __btstack_same(bt_stack_t a, bt_stack_t b)
184 {
185 	return a->bts_hash == b->bts_hash &&
186 	       __btstack_len(a) == __btstack_len(b) &&
187 	       memcmp(a->bts_frames, b->bts_frames, __btstack_size(a)) == 0;
188 }
189 
190 static uint32_t
__btstack_capture(bt_stack_t bts,void * fp,bool permanent)191 __btstack_capture(bt_stack_t bts, void *fp, bool permanent)
192 {
193 	struct backtrace_control ctl = {
194 		.btc_frame_addr = (vm_offset_t)fp,
195 	};
196 	size_t size;
197 
198 	size = backtrace_packed(BTP_KERN_OFFSET_32, (uint8_t *)bts->bts_frames,
199 	    sizeof(bts->bts_frames), &ctl, NULL);
200 	bts->bts_ref_len = (size / sizeof(uint32_t)) +
201 	    (permanent ? BTS_FRAMES_REF_MASK : BTS_FRAMES_REF_INC);
202 	return bts->bts_hash = os_hash_jenkins(bts->bts_frames, size);
203 }
204 
205 static btref_t
__btstack_try_retain(btref_t btref,bt_stack_t bts,btref_get_flags_t flags)206 __btstack_try_retain(btref_t btref, bt_stack_t bts, btref_get_flags_t flags)
207 {
208 	uint32_t oref, nref;
209 
210 	oref = bts->bts_ref_len;
211 
212 	do {
213 		switch (oref & BTS_FRAMES_REF_MASK) {
214 		case 0:
215 			return 0;
216 		case BTS_FRAMES_REF_MASK:
217 			return btref | BTREF_PERMANENT_BIT;
218 		}
219 
220 		nref = oref + BTS_FRAMES_REF_INC;
221 		if (flags & BTREF_GET_PERMANENT) {
222 			nref |= BTS_FRAMES_REF_MASK;
223 		}
224 	} while (!os_atomic_cmpxchgv(&bts->bts_ref_len,
225 	    oref, nref, &oref, relaxed));
226 
227 	if ((nref & BTS_FRAMES_REF_MASK) == BTS_FRAMES_REF_MASK) {
228 		btref |= BTREF_PERMANENT_BIT;
229 	}
230 
231 	return btref;
232 }
233 
234 __abortlike
235 static void
__btstack_resurrect_panic(bt_stack_t bts)236 __btstack_resurrect_panic(bt_stack_t bts)
237 {
238 	panic("trying to resurrect bt stack %p", bts);
239 }
240 
241 static btref_t
__btstack_retain(btref_t btref,bt_stack_t bts,btref_get_flags_t flags)242 __btstack_retain(btref_t btref, bt_stack_t bts, btref_get_flags_t flags)
243 {
244 	uint32_t oref, nref;
245 
246 	oref = bts->bts_ref_len;
247 
248 	do {
249 		switch (oref & BTS_FRAMES_REF_MASK) {
250 		case 0:
251 			__btstack_resurrect_panic(bts);
252 		case BTS_FRAMES_REF_MASK:
253 			return btref | BTREF_PERMANENT_BIT;
254 		}
255 
256 		nref = oref + BTS_FRAMES_REF_INC;
257 		if (flags & BTREF_GET_PERMANENT) {
258 			nref |= BTS_FRAMES_REF_MASK;
259 		}
260 	} while (!os_atomic_cmpxchgv(&bts->bts_ref_len,
261 	    oref, nref, &oref, relaxed));
262 
263 	if ((nref & BTS_FRAMES_REF_MASK) == BTS_FRAMES_REF_MASK) {
264 		btref |= BTREF_PERMANENT_BIT;
265 	}
266 
267 	return btref;
268 }
269 
270 __abortlike
271 static void
__btstack_over_release_panic(bt_stack_t bts)272 __btstack_over_release_panic(bt_stack_t bts)
273 {
274 	panic("trying to over-release bt stack %p", bts);
275 }
276 
277 static bool
__btstack_release(bt_stack_t bts)278 __btstack_release(bt_stack_t bts)
279 {
280 	uint32_t oref, nref;
281 
282 	oref = bts->bts_ref_len;
283 
284 	do {
285 		switch (oref & BTS_FRAMES_REF_MASK) {
286 		case 0:
287 			__btstack_over_release_panic(bts);
288 		case BTS_FRAMES_REF_MASK:
289 			return false;
290 		}
291 
292 		nref = oref - BTS_FRAMES_REF_INC;
293 	} while (!os_atomic_cmpxchgv(&bts->bts_ref_len,
294 	    oref, nref, &oref, relaxed));
295 
296 	return nref < BTS_FRAMES_REF_INC;
297 }
298 
299 static bt_stack_t
__btlib_deref(bt_library_t btl,btref_t ref)300 __btlib_deref(bt_library_t btl, btref_t ref)
301 {
302 	uint32_t slab = 0;
303 
304 	if (ref >= BTL_SIZE_INIT) {
305 		slab = __builtin_clz(BTL_SIZE_INIT) - __builtin_clz(ref) + 1;
306 	}
307 	return (bt_stack_t)(btl->btl_slabs[slab] + ref);
308 }
309 
310 static void
__btlib_lock(bt_library_t btl)311 __btlib_lock(bt_library_t btl)
312 {
313 	lck_ticket_lock(&btl->btl_lock, &bt_library_lck_grp);
314 }
315 
316 static void
__btlib_unlock(bt_library_t btl)317 __btlib_unlock(bt_library_t btl)
318 {
319 	lck_ticket_unlock(&btl->btl_lock);
320 }
321 
322 static inline btref_smr_t *
__btlib_head(bt_library_t btl,uint32_t param,uint32_t hash)323 __btlib_head(bt_library_t btl, uint32_t param, uint32_t hash)
324 {
325 	uint32_t par = BTL_PARAM_PARITY(param);
326 	uint32_t idx = BTL_PARAM_IDX(param, hash);
327 
328 	return &btl->btl_hash[par][idx]->bth_array[hash & BTL_HASH_MASK];
329 }
330 
331 #pragma mark btref growth & rehashing
332 
333 static void __btlib_remove_deferred_locked(bt_library_t btl);
334 
335 static bool
__btlib_growth_needed(bt_library_t btl)336 __btlib_growth_needed(bt_library_t btl)
337 {
338 	if (btl->btl_faulted_pos >= btl->btl_alloc_pos + PAGE_SIZE / 2) {
339 		return false;
340 	}
341 
342 	if (btl->btl_faulted_pos == btl->btl_max_pos &&
343 	    btl->btl_slab_cur + 1 == BTL_SLABS) {
344 		return false;
345 	}
346 
347 	return true;
348 }
349 
350 static bool
__btlib_rehash_needed(bt_library_t btl)351 __btlib_rehash_needed(bt_library_t btl)
352 {
353 	uint32_t param = smr_serialized_load(&btl->btl_param);
354 	uint32_t shift = BTL_HASH_SHIFT + BTL_PARAM_SHIFT(param);
355 
356 	return (btl->btl_faulted_pos >> (3 + shift)) >= sizeof(union bt_stack);
357 }
358 
359 static void
__btlib_callout_wakeup(bt_library_t btl)360 __btlib_callout_wakeup(bt_library_t btl)
361 {
362 	if (startup_phase >= STARTUP_SUB_THREAD_CALL &&
363 	    !btl->btl_in_callout) {
364 		thread_call_enter(btl->btl_call);
365 	}
366 }
367 
368 __attribute__((noinline))
369 static void
__btlib_grow(bt_library_t btl)370 __btlib_grow(bt_library_t btl)
371 {
372 	kern_return_t kr = KERN_SUCCESS;
373 	vm_address_t addr;
374 
375 	while (btl->btl_grower) {
376 		btl->btl_waiters = true;
377 		lck_ticket_sleep_with_inheritor(&btl->btl_lock,
378 		    &bt_library_lck_grp, LCK_SLEEP_DEFAULT,
379 		    &btl->btl_grower, btl->btl_grower,
380 		    THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
381 		if (!__btlib_growth_needed(btl)) {
382 			return;
383 		}
384 	}
385 	btl->btl_grower = current_thread();
386 
387 	__btlib_unlock(btl);
388 
389 	if (btl->btl_faulted_pos == btl->btl_max_pos) {
390 		uint8_t slab = btl->btl_slab_cur + 1;
391 		vm_size_t size = btl->btl_max_pos;
392 
393 		kr = kmem_alloc(kernel_map, &addr, size,
394 		    KMA_KOBJECT | KMA_ZERO | KMA_VAONLY | KMA_DATA,
395 		    VM_KERN_MEMORY_DIAG);
396 		if (kr != KERN_SUCCESS) {
397 			goto done;
398 		}
399 
400 		btl->btl_slab_cur = slab;
401 		btl->btl_slabs[slab] = addr - size;
402 		btl->btl_max_pos += size;
403 	}
404 
405 	if (btl->btl_faulted_pos < btl->btl_alloc_pos + PAGE_SIZE / 2) {
406 		uint8_t slab = btl->btl_slab_cur;
407 
408 		addr = btl->btl_slabs[slab] + btl->btl_faulted_pos;
409 
410 		kr = kernel_memory_populate(addr, PAGE_SIZE,
411 		    KMA_KOBJECT | KMA_ZERO, VM_KERN_MEMORY_DIAG);
412 	}
413 
414 done:
415 	__btlib_lock(btl);
416 
417 	if (kr == KERN_SUCCESS) {
418 		btl->btl_faulted_pos += PAGE_SIZE;
419 	}
420 
421 	btl->btl_grower = NULL;
422 
423 	if (btl->btl_waiters) {
424 		btl->btl_waiters = false;
425 		wakeup_all_with_inheritor(&btl->btl_grower, THREAD_AWAKENED);
426 	}
427 
428 	if (__btlib_rehash_needed(btl)) {
429 		__btlib_callout_wakeup(btl);
430 	}
431 }
432 
433 static void
__btlib_split_step(bt_library_t btl,bt_hash_t * bthp,uint32_t idx,uint32_t mask)434 __btlib_split_step(
435 	bt_library_t            btl,
436 	bt_hash_t              *bthp,
437 	uint32_t                idx,
438 	uint32_t                mask)
439 {
440 	btref_smr_t *head, *prev;
441 	bt_stack_t   bts;
442 	btref_t      ref;
443 
444 	__btlib_lock(btl);
445 
446 	if (__btlib_growth_needed(btl)) {
447 		__btlib_grow(btl);
448 	}
449 
450 	for (uint32_t i = 0; i < BTL_HASH_COUNT; i++) {
451 		prev = head = &bthp[idx]->bth_array[i];
452 
453 		while ((ref = smr_serialized_load(prev)) != BTREF_NULL) {
454 			bts = __btlib_deref(btl, ref);
455 			if (bts->bts_hash & mask) {
456 				break;
457 			}
458 			prev = &bts->bts_next;
459 		}
460 
461 		if (idx & 1) {
462 			smr_init_store(head, ref);
463 		} else {
464 			smr_clear_store(prev);
465 		}
466 	}
467 
468 	__btlib_unlock(btl);
469 }
470 
471 #if BTLIB_VALIDATE
472 static void
__btlib_validate(bt_library_t btl,bt_hash_t * bthp,uint32_t size,uint32_t param)473 __btlib_validate(
474 	bt_library_t            btl,
475 	bt_hash_t              *bthp,
476 	uint32_t                size,
477 	uint32_t                param)
478 {
479 	bt_stack_t bts;
480 	btref_t ref;
481 
482 	for (uint32_t i = 0; i < size; i++) {
483 		for (uint32_t j = 0; j < BTL_HASH_COUNT; j++) {
484 			ref = smr_serialized_load(&bthp[i]->bth_array[j]);
485 			if (ref == 0) {
486 				continue;
487 			}
488 			bts = __btlib_deref(btl, ref);
489 			assert3u(BTL_PARAM_IDX(param, bts->bts_hash), ==, i);
490 			assert3u(bts->bts_hash & BTL_HASH_MASK, ==, j);
491 		}
492 	}
493 }
494 #endif /* BTLIB_VALIDATE */
495 
496 __attribute__((noinline))
497 static void
__btlib_rehash_and_lock(bt_library_t btl)498 __btlib_rehash_and_lock(bt_library_t btl)
499 {
500 	uint32_t   param_old, size_old, mask;
501 	bt_hash_t *bthp_old;
502 	bt_hash_t *bthp;
503 	smr_seq_t  s1, s2;
504 
505 	/*
506 	 * Step 1: compute all the right sizes and parameters
507 	 *         and allocate the new hash table elements.
508 	 */
509 	param_old = smr_serialized_load(&btl->btl_param);
510 	bthp_old  = btl->btl_hash[BTL_PARAM_PARITY(param_old)];
511 	size_old  = 1u << BTL_PARAM_SHIFT(param_old);
512 	bthp      = kalloc_type(bt_hash_t, 2 * size_old, Z_WAITOK_ZERO);
513 	mask      = 1u << (BTL_PARAM_NEXT(param_old) & 0x1f);
514 
515 	if (bthp == NULL) {
516 		return;
517 	}
518 
519 	for (uint32_t i = 0; i < size_old; i++) {
520 		bthp[2 * i] = bthp_old[i];
521 		bthp[2 * i + 1] = kalloc_type(struct bt_hash,
522 		    Z_WAITOK_ZERO_NOFAIL);
523 	}
524 
525 	/*
526 	 * Step 2: Copy all the hash table buckets in one go.
527 	 *         And publish the new array.
528 	 *
529 	 * TODO: consider if we want to let go of the lock sometimes.
530 	 */
531 	__btlib_lock(btl);
532 
533 	btl->btl_rehashing = true;
534 
535 	for (uint32_t i = 0; i < size_old; i++) {
536 		memcpy(bthp[2 * i + 1], bthp[2 * i], sizeof(struct bt_hash));
537 	}
538 
539 	btl->btl_hash[!BTL_PARAM_PARITY(param_old)] = bthp;
540 
541 	smr_serialized_store(&btl->btl_param, BTL_PARAM_NEXT(param_old));
542 
543 	__btlib_unlock(btl);
544 
545 	smr_synchronize(&bt_library_smr);
546 
547 	/*
548 	 * Step 3: Compute the "odd" lists
549 	 *
550 	 * When we arrive here, we have 2 buckets per list working this way,
551 	 * assumnig the hash bit that we are interested in changes on "C -> D":
552 	 *
553 	 * [ even ] -> A -> B -> C -> D -> E -> 0
554 	 * [ odd  ] ---^
555 	 *
556 	 * We will now build:
557 	 *
558 	 * [ even ] -> A -> B -> C -> D -> E -> 0
559 	 * [ odd  ] ------------------^
560 	 *
561 	 * Note: we try to advance the SMR clock twice,
562 	 *       in the hope that for larger hashes it will
563 	 *       help smr_wait() not to spin.
564 	 */
565 
566 	for (uint32_t i = 0; i < size_old; i += 2) {
567 		__btlib_split_step(btl, bthp, i + 1, mask);
568 	}
569 	s1 = smr_advance(&bt_library_smr);
570 
571 	if (size_old >= 2) {
572 		for (uint32_t i = size_old; i < 2 * size_old; i += 2) {
573 			__btlib_split_step(btl, bthp, i + 1, mask);
574 		}
575 		s2 = smr_advance(&bt_library_smr);
576 	}
577 
578 	/*
579 	 * It's now possible to free the old array, do it,
580 	 * in a feeble attempt to give SMR readers more time before
581 	 * the next smr_wait().
582 	 */
583 	btl->btl_hash[BTL_PARAM_PARITY(param_old)] = NULL;
584 	kfree_type(bt_hash_t, size_old, bthp_old);
585 
586 	/*
587 	 * Step 4: Split the "even" lists
588 	 *
589 	 * We will now cut the "C -> D" link in the even bucket, ending up with:
590 	 *
591 	 * [ even ] -> A -> B -> C -> 0
592 	 * [ odd  ] ----------------> D -> E -> 0
593 	 */
594 	smr_wait(&bt_library_smr, s1);
595 	for (uint32_t i = 0; i < size_old; i += 2) {
596 		__btlib_split_step(btl, bthp, i, mask);
597 	}
598 
599 	if (size_old >= 2) {
600 		smr_wait(&bt_library_smr, s2);
601 		for (uint32_t i = size_old; i < 2 * size_old; i += 2) {
602 			__btlib_split_step(btl, bthp, i, mask);
603 		}
604 	}
605 
606 	/*
607 	 * Help readers see the cuts.
608 	 */
609 	(void)smr_advance(&bt_library_smr);
610 
611 	__btlib_lock(btl);
612 
613 	btl->btl_rehashing = false;
614 
615 #if BTLIB_VALIDATE
616 	__btlib_validate(btl, bthp, size_old * 2, BTL_PARAM_NEXT(param_old));
617 #endif /* BTLIB_VALIDATE */
618 
619 	__btlib_remove_deferred_locked(btl);
620 }
621 
622 static void
__btlib_callout(thread_call_param_t arg0,thread_call_param_t __unused arg1)623 __btlib_callout(thread_call_param_t arg0, thread_call_param_t __unused arg1)
624 {
625 	bt_library_t btl = arg0;
626 
627 	__btlib_lock(btl);
628 	btl->btl_in_callout = true;
629 
630 	if (__btlib_growth_needed(btl)) {
631 		__btlib_grow(btl);
632 	}
633 
634 	while (__btlib_rehash_needed(btl)) {
635 		__btlib_unlock(btl);
636 		__btlib_rehash_and_lock(btl);
637 	}
638 
639 	btl->btl_in_callout = false;
640 	__btlib_unlock(btl);
641 }
642 
643 static void
__btlib_init(bt_library_t btl)644 __btlib_init(bt_library_t btl)
645 {
646 	kern_return_t kr;
647 	vm_address_t  addr;
648 	bt_hash_t    *bthp;
649 
650 	lck_ticket_init(&btl->btl_lock, &bt_library_lck_grp);
651 	btl->btl_free_tail = &btl->btl_free_head;
652 	btl->btl_call = thread_call_allocate_with_options(__btlib_callout, btl,
653 	    THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
654 
655 	kr = kmem_alloc(kernel_map, &addr, BTL_SIZE_INIT,
656 	    KMA_KOBJECT | KMA_ZERO | KMA_VAONLY | KMA_DATA,
657 	    VM_KERN_MEMORY_DIAG);
658 	if (kr != KERN_SUCCESS) {
659 		panic("unable to allocate initial VA: %d", kr);
660 	}
661 
662 	bthp = kalloc_type(bt_hash_t, 1, Z_WAITOK_ZERO_NOFAIL);
663 	bthp[0] = kalloc_type(struct bt_hash, Z_WAITOK_ZERO_NOFAIL);
664 
665 	btl->btl_slabs[0]  = addr;
666 	btl->btl_max_pos   = BTL_SIZE_INIT;
667 	btl->btl_alloc_pos = sizeof(union bt_stack);
668 	btl->btl_hash[0]   = bthp;
669 	smr_init_store(&btl->btl_param, BTL_PARAM_INIT);
670 }
671 STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, __btlib_init, &bt_library);
672 
673 #pragma mark btref insertion/removal fastpaths
674 
675 __attribute__((noinline))
676 static btref_t
__btlib_insert(bt_library_t btl,bt_stack_t needle,btref_get_flags_t flags,uint32_t hash)677 __btlib_insert(
678 	bt_library_t            btl,
679 	bt_stack_t              needle,
680 	btref_get_flags_t       flags,
681 	uint32_t                hash)
682 {
683 	bt_stack_t bts;
684 	btref_smr_t *prev;
685 	btref_t ref;
686 
687 	__btlib_lock(btl);
688 
689 	if (__btlib_growth_needed(btl)) {
690 		/*
691 		 * Do this first so that we keep the lock held
692 		 * while we insert.
693 		 */
694 		if ((flags & BTREF_GET_NOWAIT) == 0) {
695 			__btlib_grow(btl);
696 		} else {
697 			__btlib_callout_wakeup(btl);
698 		}
699 	}
700 
701 	prev = __btlib_head(btl, smr_serialized_load(&btl->btl_param), hash);
702 	while ((ref = smr_serialized_load(prev)) != BTREF_NULL) {
703 		bts = __btlib_deref(btl, ref);
704 
705 #if BTLIB_VALIDATE
706 		assert3u(bts->bts_hash & BTL_HASH_MASK, ==,
707 		    hash & BTL_HASH_MASK);
708 #endif /* BTLIB_VALIDATE */
709 
710 		if (needle->bts_hash < bts->bts_hash) {
711 			break;
712 		}
713 		if (__btstack_same(needle, bts)) {
714 			ref = __btstack_try_retain(ref, bts, flags);
715 			if (ref) {
716 				__btlib_unlock(btl);
717 				return ref;
718 			}
719 			break;
720 		}
721 		prev = &bts->bts_next;
722 	}
723 
724 	if (btl->btl_free_head) {
725 		ref = btl->btl_free_head;
726 		bts = __btlib_deref(btl, btl->btl_free_head);
727 		if (smr_poll(&bt_library_smr, bts->bts_free_seq)) {
728 			if ((btl->btl_free_head = bts->bts_free_next) == 0) {
729 				btl->btl_free_tail = &btl->btl_free_head;
730 			}
731 			goto allocated;
732 		}
733 	}
734 
735 	if (__improbable(btl->btl_alloc_pos + sizeof(union bt_stack) >
736 	    btl->btl_faulted_pos)) {
737 		__btlib_unlock(btl);
738 		return BTREF_NULL;
739 	}
740 
741 	ref = btl->btl_alloc_pos;
742 	btl->btl_alloc_pos = ref + sizeof(union bt_stack);
743 	bts = __btlib_deref(btl, ref);
744 
745 allocated:
746 	*bts = *needle;
747 	smr_serialized_store(&bts->bts_next, smr_serialized_load(prev));
748 	smr_serialized_store(prev, ref);
749 
750 	__btlib_unlock(btl);
751 
752 	return ref | ((flags & BTREF_GET_PERMANENT) != 0);
753 }
754 
755 __abortlike
756 static void
__btlib_remove_notfound_panic(bt_library_t btl,bt_stack_t bts)757 __btlib_remove_notfound_panic(bt_library_t btl, bt_stack_t bts)
758 {
759 	panic("couldn't find stack %p in library %p", bts, btl);
760 }
761 
762 static void
__btlib_remove_locked(bt_library_t btl,btref_t ref,bt_stack_t bts)763 __btlib_remove_locked(bt_library_t btl, btref_t ref, bt_stack_t bts)
764 {
765 	uint32_t hash = bts->bts_hash;
766 	uint32_t param = smr_serialized_load(&btl->btl_param);
767 	btref_smr_t *prev;
768 
769 	if (btl->btl_rehashing) {
770 		/*
771 		 * We can't really delete things during rehash.
772 		 * put them on the deferred list.
773 		 */
774 		bts->bts_free_next = btl->btl_deferred_head;
775 		btl->btl_deferred_head = ref;
776 		return;
777 	}
778 
779 	prev = __btlib_head(btl, param, hash);
780 	for (;;) {
781 		btref_t tmp = smr_serialized_load(prev);
782 
783 		if (tmp == ref) {
784 			break;
785 		}
786 		if (tmp == 0) {
787 			__btlib_remove_notfound_panic(btl, bts);
788 		}
789 		prev = &__btlib_deref(btl, tmp)->bts_next;
790 	}
791 
792 	smr_serialized_store(prev, smr_serialized_load(&bts->bts_next));
793 	bts->bts_free_next = 0;
794 	*btl->btl_free_tail = ref;
795 	btl->btl_free_tail = &bts->bts_free_next;
796 	bts->bts_free_seq = smr_advance(&bt_library_smr);
797 }
798 
799 static void
__btlib_remove_deferred_locked(bt_library_t btl)800 __btlib_remove_deferred_locked(bt_library_t btl)
801 {
802 	btref_t ref, next;
803 	bt_stack_t bts;
804 
805 	next = btl->btl_deferred_head;
806 	btl->btl_deferred_head = 0;
807 	while ((ref = next)) {
808 		bts = __btlib_deref(btl, ref);
809 		next = bts->bts_free_next;
810 		__btlib_remove_locked(btl, ref, bts);
811 	}
812 }
813 
814 __attribute__((noinline))
815 static void
__btlib_remove(bt_library_t btl,btref_t ref,bt_stack_t bts)816 __btlib_remove(bt_library_t btl, btref_t ref, bt_stack_t bts)
817 {
818 	__btlib_lock(btl);
819 	__btlib_remove_locked(btl, ref, bts);
820 	__btlib_unlock(btl);
821 }
822 
823 static btref_t
__btlib_get(bt_library_t btl,void * fp,btref_get_flags_t flags)824 __btlib_get(bt_library_t btl, void *fp, btref_get_flags_t flags)
825 {
826 	union bt_stack needle;
827 	btref_smr_t *head;
828 	uint32_t hash, param;
829 	btref_t ref;
830 
831 	if (bt_library.btl_alloc_pos == 0) {
832 		return BTREF_NULL;
833 	}
834 
835 	hash  = __btstack_capture(&needle, fp, (flags & BTREF_GET_PERMANENT));
836 
837 	smr_enter(&bt_library_smr);
838 
839 	/*
840 	 * The hash "params" have a single bit to select the btl_hash[]
841 	 * pointer that is used.
842 	 *
843 	 * The compiler knows enough about this code to break
844 	 * the dependency chains that we would like, generating code like this:
845 	 *
846 	 *     bthp = btl->btl_hash[0];
847 	 *     if (BTL_PARAM_PARITY(param)) {
848 	 *             bthp = btl->btl_hash[1];
849 	 *     }
850 	 *
851 	 * We could try to play tricks but this would be brittle, so instead,
852 	 * use a proper acquire barrier on param, which pairs with
853 	 * smr_serialized_store(&btl->btl_param, ...)
854 	 * in __btlib_rehash_and_lock().
855 	 *
856 	 *
857 	 * Similarly, because the `bts_next` fields are not dereferenced
858 	 * right away but used as part of complicated arithmetics,
859 	 * trusting the compiler's maintaining of dependencies
860 	 * is a tall order, sometimes, an acquire barrier is best.
861 	 */
862 	param = smr_entered_load_acquire(&btl->btl_param);
863 	head  = __btlib_head(btl, param, hash);
864 	ref   = smr_entered_load_acquire(head);
865 
866 	while (ref) {
867 		bt_stack_t bts = __btlib_deref(btl, ref);
868 
869 #if BTLIB_VALIDATE
870 		assert3u(bts->bts_hash & BTL_HASH_MASK, ==,
871 		    hash & BTL_HASH_MASK);
872 #endif /* BTLIB_VALIDATE */
873 
874 		if (needle.bts_hash < bts->bts_hash) {
875 			break;
876 		}
877 		if (__btstack_same(&needle, bts) &&
878 		    (ref = __btstack_try_retain(ref, bts, flags))) {
879 			smr_leave(&bt_library_smr);
880 			return ref;
881 		}
882 
883 		ref = smr_entered_load_acquire(&bts->bts_next);
884 	}
885 
886 	smr_leave(&bt_library_smr);
887 
888 	return __btlib_insert(btl, &needle, flags, hash);
889 }
890 
891 btref_t
btref_get(void * fp,btref_get_flags_t flags)892 btref_get(void *fp, btref_get_flags_t flags)
893 {
894 	return __btlib_get(&bt_library, fp, flags);
895 }
896 
897 __abortlike
898 static void
__btref_invalid(btref_t btref)899 __btref_invalid(btref_t btref)
900 {
901 	panic("trying to manipulate invalid backtrace ref: 0x%08x", btref);
902 }
903 
904 static inline bool
__btref_isvalid(btref_t btref)905 __btref_isvalid(btref_t btref)
906 {
907 	return ((btref & BTREF_VALID_MASK) & ~BTREF_GET_PERMANENT) == 0;
908 }
909 
910 btref_t
btref_retain(btref_t btref)911 btref_retain(btref_t btref)
912 {
913 	uint32_t sig  = btref & BTREF_VALID_MASK;
914 
915 	if (btref && sig == 0) {
916 		bt_stack_t bts = __btlib_deref(&bt_library, btref);
917 
918 		btref = __btstack_retain(btref, bts, 0);
919 	} else if (sig & ~BTREF_PERMANENT_BIT) {
920 		__btref_invalid(btref);
921 	}
922 
923 	return btref;
924 }
925 
926 void
btref_put(btref_t btref)927 btref_put(btref_t btref)
928 {
929 	uint32_t sig = btref & BTREF_VALID_MASK;
930 
931 	if (btref && sig == 0) {
932 		bt_library_t btl = &bt_library;
933 		bt_stack_t bts = __btlib_deref(btl, btref);
934 
935 		if (__improbable(__btstack_release(bts))) {
936 			__btlib_remove(btl, btref, bts);
937 		}
938 	} else if (sig & ~BTREF_PERMANENT_BIT) {
939 		__btref_invalid(btref);
940 	}
941 }
942 
943 uint32_t
btref_decode_unslide(btref_t btref,mach_vm_address_t bt_out[])944 btref_decode_unslide(btref_t btref, mach_vm_address_t bt_out[])
945 {
946 #if __LP64__
947 	uintptr_t *bt = (uintptr_t *)bt_out;
948 #else
949 	uintptr_t  bt_buf[BTLOG_MAX_DEPTH];
950 	uintptr_t *bt = bt_buf;
951 #endif
952 
953 	if (__btref_isvalid(btref)) {
954 		bt_stack_t bts = __btlib_deref(&bt_library, btref);
955 		uint32_t   len = __btstack_len(bts);
956 		backtrace_unpack(BTP_KERN_OFFSET_32, bt, BTLOG_MAX_DEPTH,
957 		    (uint8_t *)bts->bts_frames, sizeof(uint32_t) * len);
958 
959 		for (uint32_t i = 0; i < len; i++) {
960 			bt_out[i] = VM_KERNEL_UNSLIDE(bt[i]);
961 		}
962 
963 		return len;
964 	}
965 
966 	__btref_invalid(btref);
967 }
968 
969 #pragma mark btlog types and helpers
970 
971 struct btlog {
972 	btlog_type_t            btl_type;
973 	uint32_t                btl_disabled : 1;
974 	uint32_t                btl_sample_max : 23;
975 #define BTL_SAMPLE_LIMIT        0x007fffffu
976 	uint32_t                btl_count;
977 	lck_ticket_t            btl_lock;
978 	uint32_t     *__zpercpu btl_sample;
979 };
980 
981 #if __LP64__
982 #define BTLOG_ENTRY_ADDR_BITS   48
983 #else
984 #define BTLOG_ENTRY_ADDR_BITS   32
985 #endif
986 
987 struct bt_log_entry {
988 	vm_address_t            btle_addr;
989 	btref_t                 btle_where;
990 } __attribute__((packed, aligned(4)));
991 
992 struct btlog_log {
993 	struct btlog            btll_hdr;
994 #define btll_count              btll_hdr.btl_count
995 	uint32_t                btll_pos;
996 	struct bt_log_entry     btll_entries[__counted_by(btll_count)];
997 };
998 
999 
1000 #define BT_HASH_END_MARKER      UINT32_MAX
1001 
1002 struct bt_hash_entry {
1003 	vm_address_t            bthe_addr;
1004 	uint32_t                bthe_next;
1005 	btref_t                 bthe_where;
1006 };
1007 
1008 struct bt_hash_head {
1009 	uint32_t                bthh_first;
1010 	uint32_t                bthh_last;
1011 };
1012 
1013 struct btlog_hash {
1014 	struct btlog            btlh_hdr;
1015 #define btlh_count              btlh_hdr.btl_count
1016 	uint32_t                btlh_pos;
1017 	struct bt_hash_head     btlh_free;
1018 	struct bt_hash_entry    btlh_entries[__counted_by(btlh_count)];
1019 };
1020 
1021 typedef union {
1022 	vm_address_t            bta;
1023 	struct btlog           *btl;
1024 	struct btlog_log       *btll;
1025 	struct btlog_hash      *btlh;
1026 } __attribute__((transparent_union)) btlogu_t;
1027 
1028 static LCK_GRP_DECLARE(btlog_lck_grp, "btlog");
1029 
1030 static void
__btlog_lock(btlogu_t btlu)1031 __btlog_lock(btlogu_t btlu)
1032 {
1033 	lck_ticket_lock(&btlu.btl->btl_lock, &btlog_lck_grp);
1034 }
1035 
1036 static void
__btlog_unlock(btlogu_t btlu)1037 __btlog_unlock(btlogu_t btlu)
1038 {
1039 	lck_ticket_unlock(&btlu.btl->btl_lock);
1040 }
1041 
1042 static void *
__btlog_elem_normalize(void * addr)1043 __btlog_elem_normalize(void *addr)
1044 {
1045 #if CONFIG_KERNEL_TBI
1046 	addr = (void *)VM_KERNEL_TBI_FILL((vm_offset_t)addr);
1047 #endif /* CONFIG_KERNEL_TBI */
1048 	return addr;
1049 }
1050 
1051 static long
__btlog_elem_encode(void * addr)1052 __btlog_elem_encode(void *addr)
1053 {
1054 	return ~(long)__btlog_elem_normalize(addr);
1055 }
1056 
1057 static void *
__btlog_elem_decode(long addr)1058 __btlog_elem_decode(long addr)
1059 {
1060 	return (void *)~addr;
1061 }
1062 
1063 static struct bt_hash_head *
__btlog_hash_hash(struct btlog_hash * btlh)1064 __btlog_hash_hash(struct btlog_hash *btlh)
1065 {
1066 	return (struct bt_hash_head *)(btlh->btlh_entries + btlh->btlh_count);
1067 }
1068 
1069 static uint32_t
__btlog_hash_count(struct btlog_hash * btlh)1070 __btlog_hash_count(struct btlog_hash *btlh)
1071 {
1072 	return btlh->btlh_count >> 2;
1073 }
1074 
1075 static struct bt_hash_head *
__btlog_hash_head(struct btlog_hash * btlh,void * addr)1076 __btlog_hash_head(struct btlog_hash *btlh, void *addr)
1077 {
1078 	uint32_t h = os_hash_kernel_pointer(__btlog_elem_normalize(addr));
1079 	h &= (__btlog_hash_count(btlh) - 1);
1080 	return &__btlog_hash_hash(btlh)[h];
1081 }
1082 
1083 __attribute__((overloadable))
1084 static struct btlog_size_pair {
1085 	vm_size_t btsp_size;
1086 	uint32_t  btsp_count;
1087 }
__btlog_size(btlog_type_t type,uint32_t count)1088 __btlog_size(btlog_type_t type, uint32_t count)
1089 {
1090 	struct btlog_size_pair pair = {0};
1091 
1092 	switch (type) {
1093 	case BTLOG_LOG:
1094 		pair.btsp_size = round_page(sizeof(struct btlog_log) +
1095 		    count * sizeof(struct bt_log_entry));
1096 		pair.btsp_count = (pair.btsp_size - sizeof(struct btlog_log)) /
1097 		    sizeof(struct bt_log_entry);
1098 		break;
1099 
1100 	case BTLOG_HASH:
1101 		pair.btsp_count = MAX(1u << fls(count - 1), 128u);
1102 		pair.btsp_size = round_page(sizeof(struct btlog_hash) +
1103 		    pair.btsp_count * sizeof(struct bt_log_entry) +
1104 		    (pair.btsp_count >> 2) * sizeof(struct btlog_hash));
1105 		break;
1106 	}
1107 
1108 	return pair;
1109 }
1110 
1111 __attribute__((overloadable))
1112 static struct btlog_size_pair
__btlog_size(btlogu_t btlu)1113 __btlog_size(btlogu_t btlu)
1114 {
1115 	return __btlog_size(btlu.btl->btl_type, btlu.btl->btl_count);
1116 }
1117 
1118 static inline btref_t
__bt_ref(uint32_t stack_and_op)1119 __bt_ref(uint32_t stack_and_op)
1120 {
1121 	return stack_and_op & ~BTREF_OP_MASK;
1122 }
1123 
1124 static inline btref_t
__bt_op(uint32_t stack_and_op)1125 __bt_op(uint32_t stack_and_op)
1126 {
1127 	return stack_and_op & BTREF_OP_MASK;
1128 }
1129 
1130 #pragma mark btlog_log
1131 
1132 static void
__btlog_log_destroy(struct btlog_log * btll)1133 __btlog_log_destroy(struct btlog_log *btll)
1134 {
1135 	for (uint32_t i = 0; i < btll->btll_count; i++) {
1136 		btref_put(__bt_ref(btll->btll_entries[i].btle_where));
1137 	}
1138 }
1139 
1140 static void
__btlog_log_record(struct btlog_log * btll,void * addr,uint8_t op,btref_t btref)1141 __btlog_log_record(struct btlog_log *btll, void *addr, uint8_t op, btref_t btref)
1142 {
1143 	struct bt_log_entry *btle;
1144 	btref_t old = BTREF_NULL;
1145 	uint32_t pos;
1146 
1147 	__btlog_lock(btll);
1148 
1149 	if (__improbable(btll->btll_hdr.btl_disabled)) {
1150 		goto disabled;
1151 	}
1152 
1153 	pos = btll->btll_pos;
1154 	if (pos + 1 == btll->btll_count) {
1155 		btll->btll_pos = 0;
1156 	} else {
1157 		btll->btll_pos = pos + 1;
1158 	}
1159 
1160 	btle  = &btll->btll_entries[pos];
1161 	old   = __bt_ref(btle->btle_where);
1162 	*btle = (struct bt_log_entry){
1163 		.btle_addr  = __btlog_elem_encode(addr),
1164 		.btle_where = btref | (op & BTREF_OP_MASK),
1165 	};
1166 
1167 disabled:
1168 	__btlog_unlock(btll);
1169 
1170 	btref_put(old);
1171 }
1172 
1173 #pragma mark btlog_hash
1174 
1175 static void
__btlog_hash_init(struct btlog_hash * btlh)1176 __btlog_hash_init(struct btlog_hash *btlh)
1177 {
1178 	struct bt_hash_head *hash = __btlog_hash_hash(btlh);
1179 
1180 	btlh->btlh_free.bthh_first = BT_HASH_END_MARKER;
1181 	btlh->btlh_free.bthh_last = BT_HASH_END_MARKER;
1182 
1183 	for (size_t i = 0; i < __btlog_hash_count(btlh); i++) {
1184 		hash[i].bthh_first = BT_HASH_END_MARKER;
1185 		hash[i].bthh_last = BT_HASH_END_MARKER;
1186 	}
1187 }
1188 
1189 static void
__btlog_hash_destroy(struct btlog_hash * btlh)1190 __btlog_hash_destroy(struct btlog_hash *btlh)
1191 {
1192 	for (uint32_t i = 0; i < btlh->btlh_count; i++) {
1193 		btref_put(__bt_ref(btlh->btlh_entries[i].bthe_where));
1194 	}
1195 }
1196 
1197 static uint32_t
__btlog_hash_stailq_pop_first(struct btlog_hash * btlh,struct bt_hash_head * head)1198 __btlog_hash_stailq_pop_first(
1199 	struct btlog_hash      *btlh,
1200 	struct bt_hash_head    *head)
1201 {
1202 	struct bt_hash_entry *bthe;
1203 	uint32_t pos = head->bthh_first;
1204 
1205 	bthe = &btlh->btlh_entries[pos];
1206 	btlh->btlh_free.bthh_first = bthe->bthe_next;
1207 	if (bthe->bthe_next == BT_HASH_END_MARKER) {
1208 		btlh->btlh_free.bthh_last = BT_HASH_END_MARKER;
1209 	} else {
1210 		bthe->bthe_next = BT_HASH_END_MARKER;
1211 	}
1212 
1213 	return pos;
1214 }
1215 
1216 static void
__btlog_hash_stailq_remove(struct bt_hash_head * head,struct bt_hash_entry * bthe,uint32_t * prev,uint32_t ppos)1217 __btlog_hash_stailq_remove(
1218 	struct bt_hash_head    *head,
1219 	struct bt_hash_entry   *bthe,
1220 	uint32_t               *prev,
1221 	uint32_t                ppos)
1222 {
1223 	*prev = bthe->bthe_next;
1224 	if (bthe->bthe_next == BT_HASH_END_MARKER) {
1225 		head->bthh_last = ppos;
1226 	} else {
1227 		bthe->bthe_next = BT_HASH_END_MARKER;
1228 	}
1229 }
1230 
1231 static void
__btlog_hash_stailq_append(struct btlog_hash * btlh,struct bt_hash_head * head,uint32_t pos)1232 __btlog_hash_stailq_append(
1233 	struct btlog_hash      *btlh,
1234 	struct bt_hash_head    *head,
1235 	uint32_t                pos)
1236 {
1237 	if (head->bthh_last == BT_HASH_END_MARKER) {
1238 		head->bthh_first = head->bthh_last = pos;
1239 	} else {
1240 		btlh->btlh_entries[head->bthh_last].bthe_next = pos;
1241 		head->bthh_last = pos;
1242 	}
1243 }
1244 
1245 static void
__btlog_hash_remove(struct btlog_hash * btlh,struct bt_hash_entry * bthe)1246 __btlog_hash_remove(
1247 	struct btlog_hash      *btlh,
1248 	struct bt_hash_entry   *bthe)
1249 {
1250 	struct bt_hash_head *head;
1251 	uint32_t *prev;
1252 	uint32_t ppos;
1253 
1254 	head = __btlog_hash_head(btlh, __btlog_elem_decode(bthe->bthe_addr));
1255 	prev = &head->bthh_first;
1256 	ppos = BT_HASH_END_MARKER;
1257 
1258 	while (bthe != &btlh->btlh_entries[*prev]) {
1259 		ppos = *prev;
1260 		prev = &btlh->btlh_entries[ppos].bthe_next;
1261 	}
1262 
1263 	__btlog_hash_stailq_remove(head, bthe, prev, ppos);
1264 }
1265 
1266 static void
__btlog_hash_record(struct btlog_hash * btlh,void * addr,uint8_t op,btref_t btref)1267 __btlog_hash_record(struct btlog_hash *btlh, void *addr, uint8_t op, btref_t btref)
1268 {
1269 	struct bt_hash_head *head;
1270 	struct bt_hash_entry *bthe;
1271 	btref_t old = BTREF_NULL;
1272 	uint32_t pos;
1273 
1274 	head = __btlog_hash_head(btlh, __btlog_elem_normalize(addr));
1275 
1276 	__btlog_lock(btlh);
1277 
1278 	if (__improbable(btlh->btlh_hdr.btl_disabled)) {
1279 		goto disabled;
1280 	}
1281 
1282 	if (btlh->btlh_free.bthh_first != BT_HASH_END_MARKER) {
1283 		pos  = __btlog_hash_stailq_pop_first(btlh, &btlh->btlh_free);
1284 		bthe = &btlh->btlh_entries[pos];
1285 	} else {
1286 		pos  = btlh->btlh_pos;
1287 		if (pos + 1 == btlh->btlh_count) {
1288 			btlh->btlh_pos = 0;
1289 		} else {
1290 			btlh->btlh_pos = pos + 1;
1291 		}
1292 		bthe = &btlh->btlh_entries[pos];
1293 		if (bthe->bthe_addr) {
1294 			__btlog_hash_remove(btlh, bthe);
1295 		}
1296 	}
1297 
1298 	old   = __bt_ref(bthe->bthe_where);
1299 	*bthe = (struct bt_hash_entry){
1300 		.bthe_addr  = __btlog_elem_encode(addr),
1301 		.bthe_where = btref | (op & BTREF_OP_MASK),
1302 		.bthe_next  = BT_HASH_END_MARKER,
1303 	};
1304 
1305 	if (btref & BTREF_VALID_MASK) {
1306 		assert(__btlib_deref(&bt_library,
1307 		    btref & BTREF_VALID_MASK)->bts_ref_len >= BTS_FRAMES_REF_INC);
1308 	}
1309 
1310 	__btlog_hash_stailq_append(btlh, head, pos);
1311 
1312 disabled:
1313 	__btlog_unlock(btlh);
1314 
1315 	btref_put(old);
1316 }
1317 
1318 static void
__btlog_hash_erase(struct btlog_hash * btlh,void * addr)1319 __btlog_hash_erase(struct btlog_hash *btlh, void *addr)
1320 {
1321 	struct bt_hash_head *head;
1322 	struct bt_hash_entry *bthe;
1323 	uint32_t *prev;
1324 	uint32_t pos, ppos;
1325 
1326 	addr = __btlog_elem_normalize(addr);
1327 	head = __btlog_hash_head(btlh, addr);
1328 	prev = &head->bthh_first;
1329 	ppos = BT_HASH_END_MARKER;
1330 
1331 	__btlog_lock(btlh);
1332 
1333 	if (__improbable(btlh->btlh_hdr.btl_disabled)) {
1334 		goto disabled;
1335 	}
1336 
1337 	while ((pos = *prev) != BT_HASH_END_MARKER) {
1338 		bthe = &btlh->btlh_entries[pos];
1339 		if (__btlog_elem_decode(bthe->bthe_addr) == addr) {
1340 			bthe->bthe_addr = 0;
1341 			__btlog_hash_stailq_remove(head, bthe, prev, ppos);
1342 			__btlog_hash_stailq_append(btlh, &btlh->btlh_free, pos);
1343 		} else {
1344 			ppos = *prev;
1345 			prev = &btlh->btlh_entries[ppos].bthe_next;
1346 		}
1347 	}
1348 
1349 disabled:
1350 	__btlog_unlock(btlh);
1351 }
1352 
1353 #pragma mark btlog APIs
1354 
1355 static void
__btlog_init(btlogu_t btlu)1356 __btlog_init(btlogu_t btlu)
1357 {
1358 	switch (btlu.btl->btl_type) {
1359 	case BTLOG_HASH:
1360 		__btlog_hash_init(btlu.btlh);
1361 		break;
1362 
1363 	case BTLOG_LOG:
1364 		break;
1365 	}
1366 }
1367 
1368 btlog_t
btlog_create(btlog_type_t type,uint32_t count,uint32_t sample)1369 btlog_create(btlog_type_t type, uint32_t count, uint32_t sample)
1370 {
1371 	struct btlog_size_pair pair = __btlog_size(type, count);
1372 	kern_return_t kr;
1373 	btlogu_t btlu;
1374 
1375 	kr = kmem_alloc(kernel_map, &btlu.bta, pair.btsp_size,
1376 	    KMA_KOBJECT | KMA_ZERO, VM_KERN_MEMORY_DIAG);
1377 
1378 	if (kr != KERN_SUCCESS) {
1379 		return NULL;
1380 	}
1381 
1382 	if (sample > BTL_SAMPLE_LIMIT) {
1383 		sample = BTL_SAMPLE_LIMIT;
1384 	}
1385 
1386 	btlu.btl->btl_type = type;
1387 	btlu.btl->btl_sample_max = sample;
1388 	btlu.btl->btl_count = pair.btsp_count;
1389 	lck_ticket_init(&btlu.btl->btl_lock, &btlog_lck_grp);
1390 	assert3u(btlu.btl->btl_count, !=, 0);
1391 
1392 	if (sample > 1) {
1393 		btlu.btl->btl_sample = zalloc_percpu(percpu_u64_zone,
1394 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1395 		zpercpu_foreach_cpu(cpu) {
1396 			uint32_t *counter;
1397 
1398 			counter = zpercpu_get_cpu(btlu.btl->btl_sample, cpu);
1399 			*counter = (cpu + 1) * sample / zpercpu_count();
1400 		}
1401 	}
1402 
1403 	__btlog_init(btlu);
1404 
1405 	return btlu.btl;
1406 }
1407 
1408 static void
__btlog_destroy(btlogu_t btlu)1409 __btlog_destroy(btlogu_t btlu)
1410 {
1411 	switch (btlu.btl->btl_type) {
1412 	case BTLOG_LOG:
1413 		__btlog_log_destroy(btlu.btll);
1414 		break;
1415 
1416 	case BTLOG_HASH:
1417 		__btlog_hash_destroy(btlu.btlh);
1418 		break;
1419 	}
1420 }
1421 
1422 void
btlog_destroy(btlogu_t btlu)1423 btlog_destroy(btlogu_t btlu)
1424 {
1425 	if (!btlu.btl->btl_disabled) {
1426 		__btlog_destroy(btlu);
1427 	}
1428 	if (btlu.btl->btl_sample) {
1429 		zfree_percpu(percpu_u64_zone, btlu.btl->btl_sample);
1430 	}
1431 	lck_ticket_destroy(&btlu.btl->btl_lock, &btlog_lck_grp);
1432 	kmem_free(kernel_map, btlu.bta, __btlog_size(btlu).btsp_size);
1433 }
1434 
1435 kern_return_t
btlog_enable(btlogu_t btlu)1436 btlog_enable(btlogu_t btlu)
1437 {
1438 	vm_size_t size;
1439 	kern_return_t kr = KERN_SUCCESS;
1440 
1441 	size = __btlog_size(btlu).btsp_size;
1442 	if (size > PAGE_SIZE) {
1443 		kr = kernel_memory_populate(btlu.bta + PAGE_SIZE,
1444 		    size - PAGE_SIZE, KMA_KOBJECT | KMA_ZERO,
1445 		    VM_KERN_MEMORY_DIAG);
1446 	}
1447 
1448 	if (kr == KERN_SUCCESS) {
1449 		__btlog_init(btlu);
1450 
1451 		__btlog_lock(btlu);
1452 		assert(btlu.btl->btl_disabled);
1453 		btlu.btl->btl_disabled = false;
1454 		__btlog_unlock(btlu);
1455 	}
1456 
1457 	return kr;
1458 }
1459 
1460 void
btlog_disable(btlogu_t btlu)1461 btlog_disable(btlogu_t btlu)
1462 {
1463 	vm_size_t size;
1464 
1465 	__btlog_lock(btlu);
1466 	assert(!btlu.btl->btl_disabled);
1467 	btlu.btl->btl_disabled = true;
1468 	__btlog_unlock(btlu);
1469 
1470 	__btlog_destroy(btlu);
1471 
1472 	size = __btlog_size(btlu).btsp_size;
1473 	bzero((char *)btlu.bta + sizeof(*btlu.btl),
1474 	    PAGE_SIZE - sizeof(*btlu.btl));
1475 	if (size > PAGE_SIZE) {
1476 		kernel_memory_depopulate(btlu.bta + PAGE_SIZE,
1477 		    size - PAGE_SIZE, KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
1478 	}
1479 }
1480 
1481 btlog_type_t
btlog_get_type(btlog_t btlog)1482 btlog_get_type(btlog_t btlog)
1483 {
1484 	return btlog->btl_type;
1485 }
1486 
1487 uint32_t
btlog_get_count(btlog_t btlog)1488 btlog_get_count(btlog_t btlog)
1489 {
1490 	return btlog->btl_count;
1491 }
1492 
1493 bool
btlog_sample(btlog_t btlog)1494 btlog_sample(btlog_t btlog)
1495 {
1496 	uint32_t *counter;
1497 
1498 	if (btlog->btl_sample == NULL) {
1499 		return true;
1500 	}
1501 
1502 	counter = zpercpu_get(btlog->btl_sample);
1503 	if (os_atomic_dec_orig(counter, relaxed) != 0) {
1504 		return false;
1505 	}
1506 
1507 	os_atomic_store(counter, btlog->btl_sample_max - 1, relaxed);
1508 	return true;
1509 }
1510 
1511 void
btlog_record(btlogu_t btlu,void * addr,uint8_t op,btref_t btref)1512 btlog_record(btlogu_t btlu, void *addr, uint8_t op, btref_t btref)
1513 {
1514 	if (btlu.btl->btl_disabled) {
1515 		return;
1516 	}
1517 	switch (btlu.btl->btl_type) {
1518 	case BTLOG_LOG:
1519 		__btlog_log_record(btlu.btll, addr, op, btref);
1520 		break;
1521 
1522 	case BTLOG_HASH:
1523 		__btlog_hash_record(btlu.btlh, addr, op, btref);
1524 		break;
1525 	}
1526 }
1527 
1528 void
btlog_erase(btlogu_t btlu,void * addr)1529 btlog_erase(btlogu_t btlu, void *addr)
1530 {
1531 	if (btlu.btl->btl_disabled) {
1532 		return;
1533 	}
1534 	switch (btlu.btl->btl_type) {
1535 	case BTLOG_HASH:
1536 		__btlog_hash_erase(btlu.btlh, addr);
1537 		break;
1538 
1539 	case BTLOG_LOG:
1540 		break;
1541 	}
1542 }
1543 
1544 extern void
1545 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
1546 
1547 struct btlog_record {
1548 	uint32_t btr_where;
1549 	uint32_t btr_count;
1550 };
1551 
1552 static int
btlog_record_cmp_where(const void * e1,const void * e2)1553 btlog_record_cmp_where(const void *e1, const void *e2)
1554 {
1555 	const struct btlog_record *a = e1;
1556 	const struct btlog_record *b = e2;
1557 
1558 	if (a->btr_where == b->btr_where) {
1559 		return 0;
1560 	}
1561 	return a->btr_where > b->btr_where ? 1 : -1;
1562 }
1563 
1564 static bool
btlog_records_pack(struct btlog_record * array,uint32_t * countp)1565 btlog_records_pack(struct btlog_record *array, uint32_t *countp)
1566 {
1567 	uint32_t r, w, count = *countp;
1568 
1569 	qsort(array, count, sizeof(struct btlog_record), btlog_record_cmp_where);
1570 
1571 	for (r = 1, w = 1; r < count; r++) {
1572 		if (array[w - 1].btr_where == array[r].btr_where) {
1573 			array[w - 1].btr_count += array[r].btr_count;
1574 		} else {
1575 			array[w++] = array[r];
1576 		}
1577 	}
1578 
1579 	if (w == count) {
1580 		return false;
1581 	}
1582 
1583 	*countp = w;
1584 	return true;
1585 }
1586 
1587 static int
btlog_record_cmp_rev_count(const void * e1,const void * e2)1588 btlog_record_cmp_rev_count(const void *e1, const void *e2)
1589 {
1590 	const struct btlog_record *a = e1;
1591 	const struct btlog_record *b = e2;
1592 
1593 	if (a->btr_count == b->btr_count) {
1594 		return 0;
1595 	}
1596 	return a->btr_count > b->btr_count ? -1 : 1;
1597 }
1598 
1599 kern_return_t
btlog_get_records(btlogu_t btl,zone_btrecord_t ** records,unsigned int * numrecs)1600 btlog_get_records(
1601 	btlogu_t                btl,
1602 	zone_btrecord_t       **records,
1603 	unsigned int           *numrecs)
1604 {
1605 	struct btlog_record *btr_array;
1606 	struct btlog_record  btr;
1607 	zone_btrecord_t     *rec_array;
1608 	vm_offset_t          addr, end, size, ipc_map_size;
1609 	kern_return_t        kr;
1610 	uint32_t             count = 0;
1611 
1612 	/*
1613 	 * Step 1: collect all the backtraces in the logs in wired memory
1614 	 *
1615 	 *         note that the ipc_kernel_map is small, and we might have
1616 	 *         too little space.
1617 	 *
1618 	 *         In order to accomodate, we will deduplicate as we go.
1619 	 *         If we still overflow space, we return KERN_NO_SPACE.
1620 	 */
1621 
1622 	ipc_map_size = (vm_offset_t)(vm_map_max(ipc_kernel_map) -
1623 	    vm_map_min(ipc_kernel_map));
1624 	size = round_page(btlog_get_count(btl.btl) * sizeof(struct btlog_record));
1625 	if (size > ipc_map_size) {
1626 		size = ipc_map_size / 4;
1627 	}
1628 
1629 	for (;;) {
1630 		kr = kmem_alloc(ipc_kernel_map, &addr, size,
1631 		    KMA_DATA, VM_KERN_MEMORY_IPC);
1632 		if (kr == KERN_SUCCESS) {
1633 			break;
1634 		}
1635 		if (size < (1U << 19)) {
1636 			return kr;
1637 		}
1638 		size /= 2;
1639 	}
1640 
1641 	btr_array = (struct btlog_record *)addr;
1642 	rec_array = (zone_btrecord_t *)addr;
1643 	kr = KERN_NOT_FOUND;
1644 
1645 	__btlog_lock(btl);
1646 
1647 	if (btl.btl->btl_disabled) {
1648 		goto disabled;
1649 	}
1650 
1651 	switch (btl.btl->btl_type) {
1652 	case BTLOG_LOG:
1653 		for (uint32_t i = 0; i < btl.btl->btl_count; i++) {
1654 			struct bt_log_entry *btle = &btl.btll->btll_entries[i];
1655 
1656 			if (!btle->btle_addr) {
1657 				break;
1658 			}
1659 			if ((count + 1) * sizeof(struct btlog_record) > size) {
1660 				if (!btlog_records_pack(btr_array, &count)) {
1661 					kr = KERN_NO_SPACE;
1662 					count = 0;
1663 					break;
1664 				}
1665 			}
1666 			btr_array[count].btr_where = btle->btle_where;
1667 			btr_array[count].btr_count = 1;
1668 			count++;
1669 		}
1670 		break;
1671 
1672 	case BTLOG_HASH:
1673 		for (uint32_t i = 0; i < btl.btl->btl_count; i++) {
1674 			struct bt_hash_entry *bthe = &btl.btlh->btlh_entries[i];
1675 
1676 			if (!bthe->bthe_addr) {
1677 				continue;
1678 			}
1679 			if ((count + 1) * sizeof(struct btlog_record) > size) {
1680 				if (!btlog_records_pack(btr_array, &count)) {
1681 					kr = KERN_NO_SPACE;
1682 					count = 0;
1683 					break;
1684 				}
1685 			}
1686 			btr_array[count].btr_where = bthe->bthe_where;
1687 			btr_array[count].btr_count = 1;
1688 			count++;
1689 		}
1690 		break;
1691 	}
1692 
1693 	/*
1694 	 * Step 2: unique all the records, and retain them
1695 	 */
1696 
1697 	if (count) {
1698 		btlog_records_pack(btr_array, &count);
1699 		/*
1700 		 * If the backtraces won't fit,
1701 		 * sort them in reverse popularity order and clip.
1702 		 */
1703 		if (count > size / sizeof(zone_btrecord_t)) {
1704 			qsort(btr_array, count, sizeof(struct btlog_record),
1705 			    btlog_record_cmp_rev_count);
1706 			count = size / sizeof(zone_btrecord_t);
1707 		}
1708 		for (uint32_t i = 0; i < count; i++) {
1709 			btref_retain(__bt_ref(btr_array[i].btr_where));
1710 		}
1711 	}
1712 
1713 disabled:
1714 	__btlog_unlock(btl);
1715 
1716 	if (count == 0) {
1717 		kmem_free(ipc_kernel_map, addr, size);
1718 		return kr;
1719 	}
1720 
1721 	/*
1722 	 * Step 3: Expand the backtraces in place, in reverse order.
1723 	 */
1724 
1725 	for (uint32_t i = count; i-- > 0;) {
1726 		btr = *(volatile struct btlog_record *)&btr_array[i];
1727 
1728 		rec_array[i] = (zone_btrecord_t){
1729 			.ref_count      = btr.btr_count,
1730 			.operation_type = __bt_op(btr.btr_where),
1731 		};
1732 		btref_decode_unslide(__bt_ref(btr.btr_where), rec_array[i].bt);
1733 		btref_put(__bt_ref(btr.btr_where));
1734 	}
1735 
1736 	/*
1737 	 * Step 4: Free the excess memory, zero padding, and unwire the buffer.
1738 	 */
1739 
1740 	end = round_page((vm_offset_t)(rec_array + count));
1741 	bzero(rec_array + count, end - (vm_address_t)(rec_array + count));
1742 	if (end < addr + size) {
1743 		kmem_free(ipc_kernel_map, end, addr + size - end);
1744 	}
1745 
1746 	kr = vm_map_unwire(ipc_kernel_map, addr, end, FALSE);
1747 	assert(kr == KERN_SUCCESS);
1748 
1749 	*records = rec_array;
1750 	*numrecs = count;
1751 	return KERN_SUCCESS;
1752 }
1753 
1754 uint32_t
btlog_guess_top(btlogu_t btlu,vm_address_t bt[],uint32_t * len)1755 btlog_guess_top(btlogu_t btlu, vm_address_t bt[], uint32_t *len)
1756 {
1757 	struct btlog_hash *btlh = btlu.btlh;
1758 	const unsigned RECS = 8;
1759 	struct btlog_record recs[RECS] = {0};
1760 	bt_stack_t bts;
1761 
1762 	if (btlu.btl->btl_type != BTLOG_HASH) {
1763 		return 0;
1764 	}
1765 
1766 	if (!lck_ticket_lock_try(&btlu.btl->btl_lock, &btlog_lck_grp)) {
1767 		return 0;
1768 	}
1769 
1770 	if (btlu.btl->btl_disabled || btlh->btlh_count == 0) {
1771 		goto disabled;
1772 	}
1773 
1774 	/*
1775 	 * This is called from panic context, and can't really
1776 	 * do what btlog_get_records() do and allocate memory.
1777 	 *
1778 	 * Instead, we use the refcounts in the bt library
1779 	 * as a proxy for counts (of course those backtraces
1780 	 * can be inflated due to being shared with other logs,
1781 	 * which is why we use `RECS` slots in the array to find
1782 	 * the RECS more popular stacks at all).
1783 	 *
1784 	 * Note: this will break down if permanent backtraces get used.
1785 	 *       if we ever go there for performance reasons,
1786 	 *       then we'll want to find another way to do this.
1787 	 */
1788 	for (uint32_t i = 0; i < btlh->btlh_count; i++) {
1789 		struct bt_hash_entry *bthe = &btlh->btlh_entries[i];
1790 		btref_t ref;
1791 
1792 		if (!bthe->bthe_addr) {
1793 			continue;
1794 		}
1795 
1796 		ref = __bt_ref(bthe->bthe_where);
1797 		bts = __btlib_deref(&bt_library, ref);
1798 
1799 		for (uint32_t j = 0; j < RECS; j++) {
1800 			if (ref == recs[j].btr_where) {
1801 				break;
1802 			}
1803 			if (bts->bts_ref_len > recs[j].btr_count) {
1804 				for (uint32_t k = j + 1; k < RECS; k++) {
1805 					recs[k] = recs[k - 1];
1806 				}
1807 				recs[j].btr_count = bts->bts_ref_len;
1808 				recs[j].btr_where = ref;
1809 				break;
1810 			}
1811 		}
1812 	}
1813 
1814 	/*
1815 	 * Then correct what we sampled by counting how many times
1816 	 * the backtrace _actually_ exists in that one log.
1817 	 */
1818 	for (uint32_t j = 0; j < RECS; j++) {
1819 		recs[j].btr_count = 0;
1820 	}
1821 
1822 	for (uint32_t i = 0; i < btlh->btlh_count; i++) {
1823 		struct bt_hash_entry *bthe = &btlh->btlh_entries[i];
1824 		btref_t ref;
1825 
1826 		if (!bthe->bthe_addr) {
1827 			continue;
1828 		}
1829 
1830 		ref = __bt_ref(bthe->bthe_where);
1831 
1832 		for (uint32_t j = 0; j < RECS; j++) {
1833 			if (recs[j].btr_where == ref) {
1834 				recs[j].btr_count++;
1835 				break;
1836 			}
1837 		}
1838 	}
1839 
1840 	for (uint32_t j = 1; j < RECS; j++) {
1841 		if (recs[0].btr_count < recs[j].btr_count) {
1842 			recs[0] = recs[j];
1843 		}
1844 	}
1845 	bts = __btlib_deref(&bt_library, recs[0].btr_where);
1846 	*len = __btstack_len(bts);
1847 
1848 	backtrace_unpack(BTP_KERN_OFFSET_32, (uintptr_t *)bt, BTLOG_MAX_DEPTH,
1849 	    (uint8_t *)bts->bts_frames, sizeof(uint32_t) * *len);
1850 
1851 disabled:
1852 	__btlog_unlock(btlu);
1853 
1854 	return recs[0].btr_count;
1855 }
1856 
1857 #if DEBUG || DEVELOPMENT
1858 
1859 void
btlog_copy_backtraces_for_elements(btlogu_t btlu,vm_address_t * instances,uint32_t * countp,uint32_t elem_size,leak_site_proc proc)1860 btlog_copy_backtraces_for_elements(
1861 	btlogu_t                btlu,
1862 	vm_address_t           *instances,
1863 	uint32_t               *countp,
1864 	uint32_t                elem_size,
1865 	leak_site_proc          proc)
1866 {
1867 	struct btlog_hash *btlh = btlu.btlh;
1868 	struct bt_hash_head *head;
1869 	uint32_t count = *countp;
1870 	uint32_t num_sites = 0;
1871 
1872 	if (btlu.btl->btl_type != BTLOG_HASH) {
1873 		return;
1874 	}
1875 
1876 	__btlog_lock(btlh);
1877 
1878 	if (btlu.btl->btl_disabled) {
1879 		goto disabled;
1880 	}
1881 
1882 	for (uint32_t i = 0; i < count; i++) {
1883 		vm_offset_t element = instances[i];
1884 		void *addr = __btlog_elem_normalize((void *)element);
1885 		btref_t ref = BTREF_NULL;
1886 		uint32_t pos;
1887 
1888 		if (kInstanceFlagReferenced & element) {
1889 			continue;
1890 		}
1891 
1892 		element = INSTANCE_PUT(element) & ~kInstanceFlags;
1893 		head = __btlog_hash_head(btlh, addr);
1894 		pos  = head->bthh_first;
1895 		while (pos != BT_HASH_END_MARKER) {
1896 			struct bt_hash_entry *bthe = &btlh->btlh_entries[pos];
1897 
1898 			if (__btlog_elem_decode(bthe->bthe_addr) == addr) {
1899 				ref = __bt_ref(bthe->bthe_where);
1900 				break;
1901 			}
1902 
1903 			pos = bthe->bthe_next;
1904 		}
1905 
1906 		if (ref != BTREF_NULL) {
1907 			element = (ref | kInstanceFlagReferenced);
1908 		}
1909 		instances[num_sites++] = INSTANCE_PUT(element);
1910 	}
1911 
1912 	for (uint32_t i = 0; i < num_sites; i++) {
1913 		vm_offset_t btref = instances[i];
1914 		uint32_t site_count, dups;
1915 
1916 		if (!(btref & kInstanceFlagReferenced)) {
1917 			continue;
1918 		}
1919 
1920 		for (site_count = 1, dups = i + 1; dups < num_sites; dups++) {
1921 			if (instances[dups] == btref) {
1922 				site_count++;
1923 				instances[dups] = 0;
1924 			}
1925 		}
1926 
1927 		btref = INSTANCE_PUT(btref) & ~kInstanceFlags;
1928 		proc(site_count, elem_size, (btref_t)btref);
1929 	}
1930 
1931 disabled:
1932 	__btlog_unlock(btlh);
1933 
1934 	*countp = num_sites;
1935 }
1936 
1937 #endif /* DEBUG || DEVELOPMENT */
1938