xref: /xnu-8020.101.4/osfmk/kern/btlog.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2012-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/assert.h>
30 #include <kern/backtrace.h>
31 #include <kern/btlog.h>
32 #include <kern/smr.h>
33 #include <kern/startup.h>
34 #include <kern/thread_call.h>
35 #include <os/hash.h>
36 #include <mach/vm_map.h>
37 #include <mach/vm_param.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_map.h>
40 #include <vm/pmap.h>
41 
42 #pragma mark btref & helpers
43 
44 static LCK_GRP_DECLARE(bt_library_lck_grp, "bt_library");
45 static SMR_DEFINE_EARLY(bt_library_smr);
46 
47 #define BTS_FRAMES_MAX          13
48 #define BTS_FRAMES_REF_MASK     0xfffffff0
49 #define BTS_FRAMES_REF_INC      0x00000010
50 #define BTS_FRAMES_LEN_MASK     0x0000000f
51 
52 typedef SMR_POINTER(btref_t)    btref_smr_t;
53 
54 typedef union bt_stack {
55 	struct {
56 		btref_smr_t     bts_next;
57 		uint32_t        bts_ref_len;
58 		uint32_t        bts_hash;
59 		uint32_t        bts_frames[BTS_FRAMES_MAX];
60 	};
61 	struct {
62 		uint32_t        bts_padding[3 + BTS_FRAMES_MAX - 1 - sizeof(long) / 4];
63 		uint32_t        bts_free_next;
64 		smr_seq_t       bts_free_seq;
65 	};
66 } *bt_stack_t;
67 
68 static_assert(sizeof(union bt_stack) == 64); /* allocation scheme needs it */
69 
70 #define BTREF_PERMANENT_BIT     0x80000000u
71 #define BTREF_OP_MASK           0x0000003fu
72 #define BTREF_VALID_MASK        0xc000003fu
73 
74 #define BTL_SIZE_INIT           (1u << 20)
75 #define BTL_SIZE_MAX            (1u << 30)
76 #define BTL_SLABS               9
77 
78 #define BTL_PARAM_INIT          0x00000020u
79 #define BTL_PARAM_PARITY(p)     ((p) >> 31)
80 #define BTL_PARAM_SHIFT(p)      (32 - ((p) & 0x3f))
81 #define BTL_PARAM_IDX(p, h)     ((uint64_t)(h) >> ((p) & 0x3f))
82 #define BTL_PARAM_NEXT(p)       ((p) - 0x80000001u)
83 
84 #define BTL_HASH_SHIFT          8
85 #define BTL_HASH_COUNT          (1u << BTL_HASH_SHIFT)
86 #define BTL_HASH_MASK           (BTL_HASH_COUNT - 1)
87 
88 static_assert((BTL_SIZE_INIT << BTL_SLABS) == BTL_SIZE_MAX / 2);
89 
90 typedef struct bt_hash {
91 	btref_smr_t             bth_array[BTL_HASH_COUNT];
92 } *bt_hash_t;
93 
94 #if DEBUG || DEVELOPMENT
95 #define BTLIB_VALIDATE          1
96 #else
97 #define BTLIB_VALIDATE          0
98 #endif
99 
100 /*!
101  * @typedef bt_library_t
102  *
103  * @brief
104  * Describes a backtrace library.
105  *
106  * @discussion
107  * A backtrace library is a scalable hash table of backtraces
108  * used for debugging purposes.
109  *
110  * By default there is a single singleton one, but the code
111  * is amenable to have several instances.
112  *
113  *
114  * <h2>Data structure design</h2>
115  *
116  * Its hash table is structured like this:
117  *
118  *     par = BTL_PARAM_PARITY(btl->btl_param);
119  *     sz  = 1u << BTL_PARAM_SHIFT(btl->btl_param);
120  *
121  *     btl->btl_hash[par]
122  *           │
123  *           │     ╭─────── array of size "sz" buckets ───────╮
124  *           ╰───> │                                          │
125  *                 ╰──────────────────────────────────┼───────╯
126  *                                                    │
127  *               ╭─────── struct bt_hash ───────╮     │
128  *               │                              │ <───╯
129  *               ╰──┼───────────────────────────╯
130  *                  │
131  *                  ╰──> Stack ──> Stack ──> Stack ──> X
132  *
133  *
134  * The "btl_hash" two entries are used with the "btl_param" switch in order
135  * to swap the outer array while growing the hash without perturbating
136  * readers.
137  *
138  * The lists of stacks are also maintained in "hash" order which allows
139  * for the rehashing to be a clean split of the lists.
140  *
141  * All stack pointers are "references" which are a smaller 32bit offset
142  * within the library backing store (slabs).
143  *
144  */
145 typedef struct bt_library {
146 	lck_ticket_t            btl_lock;
147 	SMR_POINTER(uint32_t)   btl_param;
148 
149 	bt_hash_t              *btl_hash[2];
150 	thread_call_t           btl_call;
151 	thread_t                btl_grower;
152 
153 	btref_t                *btl_free_tail;
154 	btref_t                 btl_free_head;
155 
156 	btref_t                 btl_deferred_head;
157 
158 	bool                    btl_waiters;
159 	bool                    btl_in_callout;
160 	bool                    btl_rehashing;
161 	uint8_t                 btl_slab_cur;
162 	uint32_t                btl_alloc_pos;
163 	uint32_t                btl_faulted_pos;
164 	uint32_t                btl_max_pos;
165 	vm_address_t            btl_slabs[BTL_SLABS];
166 } *bt_library_t;
167 
168 static struct bt_library        bt_library;
169 
170 static size_t
__btstack_len(bt_stack_t bts)171 __btstack_len(bt_stack_t bts)
172 {
173 	return bts->bts_ref_len & BTS_FRAMES_LEN_MASK;
174 }
175 
176 static size_t
__btstack_size(bt_stack_t bts)177 __btstack_size(bt_stack_t bts)
178 {
179 	return sizeof(uint32_t) * __btstack_len(bts);
180 }
181 
182 static bool
__btstack_same(bt_stack_t a,bt_stack_t b)183 __btstack_same(bt_stack_t a, bt_stack_t b)
184 {
185 	return a->bts_hash == b->bts_hash &&
186 	       __btstack_len(a) == __btstack_len(b) &&
187 	       memcmp(a->bts_frames, b->bts_frames, __btstack_size(a)) == 0;
188 }
189 
190 static uint32_t
__btstack_capture(bt_stack_t bts,void * fp,bool permanent)191 __btstack_capture(bt_stack_t bts, void *fp, bool permanent)
192 {
193 	struct backtrace_control ctl = {
194 		.btc_frame_addr = (vm_offset_t)fp,
195 	};
196 	size_t size;
197 
198 	size = backtrace_packed(BTP_KERN_OFFSET_32, (uint8_t *)bts->bts_frames,
199 	    sizeof(bts->bts_frames), &ctl, NULL);
200 	bts->bts_ref_len = (size / sizeof(uint32_t)) +
201 	    (permanent ? BTS_FRAMES_REF_MASK : BTS_FRAMES_REF_INC);
202 	return bts->bts_hash = os_hash_jenkins(bts->bts_frames, size);
203 }
204 
205 static btref_t
__btstack_try_retain(btref_t btref,bt_stack_t bts,btref_get_flags_t flags)206 __btstack_try_retain(btref_t btref, bt_stack_t bts, btref_get_flags_t flags)
207 {
208 	uint32_t oref, nref;
209 
210 	oref = bts->bts_ref_len;
211 
212 	do {
213 		switch (oref & BTS_FRAMES_REF_MASK) {
214 		case 0:
215 			return 0;
216 		case BTS_FRAMES_REF_MASK:
217 			return btref | BTREF_PERMANENT_BIT;
218 		}
219 
220 		nref = oref + BTS_FRAMES_REF_INC;
221 		if (flags & BTREF_GET_PERMANENT) {
222 			nref |= BTS_FRAMES_REF_MASK;
223 		}
224 	} while (!os_atomic_cmpxchgv(&bts->bts_ref_len,
225 	    oref, nref, &oref, relaxed));
226 
227 	if ((nref & BTS_FRAMES_REF_MASK) == BTS_FRAMES_REF_MASK) {
228 		btref |= BTREF_PERMANENT_BIT;
229 	}
230 
231 	return btref;
232 }
233 
234 __abortlike
235 static void
__btstack_resurrect_panic(bt_stack_t bts)236 __btstack_resurrect_panic(bt_stack_t bts)
237 {
238 	panic("trying to resurrect bt stack %p", bts);
239 }
240 
241 static btref_t
__btstack_retain(btref_t btref,bt_stack_t bts,btref_get_flags_t flags)242 __btstack_retain(btref_t btref, bt_stack_t bts, btref_get_flags_t flags)
243 {
244 	uint32_t oref, nref;
245 
246 	oref = bts->bts_ref_len;
247 
248 	do {
249 		switch (oref & BTS_FRAMES_REF_MASK) {
250 		case 0:
251 			__btstack_resurrect_panic(bts);
252 		case BTS_FRAMES_REF_MASK:
253 			return btref | BTREF_PERMANENT_BIT;
254 		}
255 
256 		nref = oref + BTS_FRAMES_REF_INC;
257 		if (flags & BTREF_GET_PERMANENT) {
258 			nref |= BTS_FRAMES_REF_MASK;
259 		}
260 	} while (!os_atomic_cmpxchgv(&bts->bts_ref_len,
261 	    oref, nref, &oref, relaxed));
262 
263 	if ((nref & BTS_FRAMES_REF_MASK) == BTS_FRAMES_REF_MASK) {
264 		btref |= BTREF_PERMANENT_BIT;
265 	}
266 
267 	return btref;
268 }
269 
270 __abortlike
271 static void
__btstack_over_release_panic(bt_stack_t bts)272 __btstack_over_release_panic(bt_stack_t bts)
273 {
274 	panic("trying to over-release bt stack %p", bts);
275 }
276 
277 static bool
__btstack_release(bt_stack_t bts)278 __btstack_release(bt_stack_t bts)
279 {
280 	uint32_t oref, nref;
281 
282 	oref = bts->bts_ref_len;
283 
284 	do {
285 		switch (oref & BTS_FRAMES_REF_MASK) {
286 		case 0:
287 			__btstack_over_release_panic(bts);
288 		case BTS_FRAMES_REF_MASK:
289 			return false;
290 		}
291 
292 		nref = oref - BTS_FRAMES_REF_INC;
293 	} while (!os_atomic_cmpxchgv(&bts->bts_ref_len,
294 	    oref, nref, &oref, relaxed));
295 
296 	return nref < BTS_FRAMES_REF_INC;
297 }
298 
299 static bt_stack_t
__btlib_deref(bt_library_t btl,btref_t ref)300 __btlib_deref(bt_library_t btl, btref_t ref)
301 {
302 	uint32_t slab = 0;
303 
304 	if (ref >= BTL_SIZE_INIT) {
305 		slab = __builtin_clz(BTL_SIZE_INIT) - __builtin_clz(ref) + 1;
306 	}
307 	return (bt_stack_t)(btl->btl_slabs[slab] + ref);
308 }
309 
310 static void
__btlib_lock(bt_library_t btl)311 __btlib_lock(bt_library_t btl)
312 {
313 	lck_ticket_lock(&btl->btl_lock, &bt_library_lck_grp);
314 }
315 
316 static void
__btlib_unlock(bt_library_t btl)317 __btlib_unlock(bt_library_t btl)
318 {
319 	lck_ticket_unlock(&btl->btl_lock);
320 }
321 
322 static inline btref_smr_t *
__btlib_head(bt_library_t btl,uint32_t param,uint32_t hash)323 __btlib_head(bt_library_t btl, uint32_t param, uint32_t hash)
324 {
325 	uint32_t par = BTL_PARAM_PARITY(param);
326 	uint32_t idx = BTL_PARAM_IDX(param, hash);
327 
328 	return &btl->btl_hash[par][idx]->bth_array[hash & BTL_HASH_MASK];
329 }
330 
331 #pragma mark btref growth & rehashing
332 
333 static void __btlib_remove_deferred_locked(bt_library_t btl);
334 
335 static bool
__btlib_growth_needed(bt_library_t btl)336 __btlib_growth_needed(bt_library_t btl)
337 {
338 	if (btl->btl_faulted_pos >= btl->btl_alloc_pos + PAGE_SIZE / 2) {
339 		return false;
340 	}
341 
342 	if (btl->btl_faulted_pos == btl->btl_max_pos &&
343 	    btl->btl_slab_cur + 1 == BTL_SLABS) {
344 		return false;
345 	}
346 
347 	return true;
348 }
349 
350 static bool
__btlib_rehash_needed(bt_library_t btl)351 __btlib_rehash_needed(bt_library_t btl)
352 {
353 	uint32_t param = smr_serialized_load(&btl->btl_param);
354 	uint32_t shift = BTL_HASH_SHIFT + BTL_PARAM_SHIFT(param);
355 
356 	return (btl->btl_faulted_pos >> (3 + shift)) >= sizeof(union bt_stack);
357 }
358 
359 static void
__btlib_callout_wakeup(bt_library_t btl)360 __btlib_callout_wakeup(bt_library_t btl)
361 {
362 	if (startup_phase >= STARTUP_SUB_THREAD_CALL &&
363 	    !btl->btl_in_callout) {
364 		thread_call_enter(btl->btl_call);
365 	}
366 }
367 
368 __attribute__((noinline))
369 static void
__btlib_grow(bt_library_t btl)370 __btlib_grow(bt_library_t btl)
371 {
372 	vm_map_t map = kernel_data_map_get() ?: kernel_map;
373 	kern_return_t kr = KERN_SUCCESS;
374 	vm_address_t addr;
375 
376 	while (btl->btl_grower) {
377 		btl->btl_waiters = true;
378 		lck_ticket_sleep_with_inheritor(&btl->btl_lock,
379 		    &bt_library_lck_grp, LCK_SLEEP_DEFAULT,
380 		    &btl->btl_grower, btl->btl_grower,
381 		    THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
382 		if (!__btlib_growth_needed(btl)) {
383 			return;
384 		}
385 	}
386 	btl->btl_grower = current_thread();
387 
388 	__btlib_unlock(btl);
389 
390 	if (btl->btl_faulted_pos == btl->btl_max_pos) {
391 		uint8_t slab = btl->btl_slab_cur + 1;
392 		vm_size_t size = btl->btl_max_pos;
393 
394 		kr = kernel_memory_allocate(map, &addr,
395 		    size, PAGE_MASK, KMA_ZERO | KMA_VAONLY,
396 		    VM_KERN_MEMORY_DIAG);
397 		if (kr != KERN_SUCCESS) {
398 			goto done;
399 		}
400 
401 		btl->btl_slab_cur = slab;
402 		btl->btl_slabs[slab] = addr - size;
403 		btl->btl_max_pos += size;
404 	}
405 
406 	if (btl->btl_faulted_pos < btl->btl_alloc_pos + PAGE_SIZE / 2) {
407 		uint8_t slab = btl->btl_slab_cur;
408 
409 		addr = btl->btl_slabs[slab] + btl->btl_faulted_pos;
410 
411 		kr = kernel_memory_populate(map, addr, PAGE_SIZE,
412 		    KMA_KOBJECT | KMA_ZERO, VM_KERN_MEMORY_DIAG);
413 	}
414 
415 done:
416 	__btlib_lock(btl);
417 
418 	if (kr == KERN_SUCCESS) {
419 		btl->btl_faulted_pos += PAGE_SIZE;
420 	}
421 
422 	btl->btl_grower = NULL;
423 
424 	if (btl->btl_waiters) {
425 		btl->btl_waiters = false;
426 		wakeup_all_with_inheritor(&btl->btl_grower, THREAD_AWAKENED);
427 	}
428 
429 	if (__btlib_rehash_needed(btl)) {
430 		__btlib_callout_wakeup(btl);
431 	}
432 }
433 
434 static void
__btlib_split_step(bt_library_t btl,bt_hash_t * bthp,uint32_t idx,uint32_t mask)435 __btlib_split_step(
436 	bt_library_t            btl,
437 	bt_hash_t              *bthp,
438 	uint32_t                idx,
439 	uint32_t                mask)
440 {
441 	btref_smr_t *head, *prev;
442 	bt_stack_t   bts;
443 	btref_t      ref;
444 
445 	__btlib_lock(btl);
446 
447 	if (__btlib_growth_needed(btl)) {
448 		__btlib_grow(btl);
449 	}
450 
451 	for (uint32_t i = 0; i < BTL_HASH_COUNT; i++) {
452 		prev = head = &bthp[idx]->bth_array[i];
453 
454 		while ((ref = smr_serialized_load(prev)) != BTREF_NULL) {
455 			bts = __btlib_deref(btl, ref);
456 			if (bts->bts_hash & mask) {
457 				break;
458 			}
459 			prev = &bts->bts_next;
460 		}
461 
462 		if (idx & 1) {
463 			smr_init_store(head, ref);
464 		} else {
465 			smr_clear_store(prev);
466 		}
467 	}
468 
469 	__btlib_unlock(btl);
470 }
471 
472 #if BTLIB_VALIDATE
473 static void
__btlib_validate(bt_library_t btl,bt_hash_t * bthp,uint32_t size,uint32_t param)474 __btlib_validate(
475 	bt_library_t            btl,
476 	bt_hash_t              *bthp,
477 	uint32_t                size,
478 	uint32_t                param)
479 {
480 	bt_stack_t bts;
481 	btref_t ref;
482 
483 	for (uint32_t i = 0; i < size; i++) {
484 		for (uint32_t j = 0; j < BTL_HASH_COUNT; j++) {
485 			ref = smr_serialized_load(&bthp[i]->bth_array[j]);
486 			if (ref == 0) {
487 				continue;
488 			}
489 			bts = __btlib_deref(btl, ref);
490 			assert3u(BTL_PARAM_IDX(param, bts->bts_hash), ==, i);
491 			assert3u(bts->bts_hash & BTL_HASH_MASK, ==, j);
492 		}
493 	}
494 }
495 #endif /* BTLIB_VALIDATE */
496 
497 __attribute__((noinline))
498 static void
__btlib_rehash_and_lock(bt_library_t btl)499 __btlib_rehash_and_lock(bt_library_t btl)
500 {
501 	uint32_t   param_old, size_old, mask;
502 	bt_hash_t *bthp_old;
503 	bt_hash_t *bthp;
504 	smr_seq_t  s1, s2;
505 
506 	/*
507 	 * Step 1: compute all the right sizes and parameters
508 	 *         and allocate the new hash table elements.
509 	 */
510 	param_old = smr_serialized_load(&btl->btl_param);
511 	bthp_old  = btl->btl_hash[BTL_PARAM_PARITY(param_old)];
512 	size_old  = 1u << BTL_PARAM_SHIFT(param_old);
513 	bthp      = kalloc_type(bt_hash_t, 2 * size_old, Z_WAITOK_ZERO);
514 	mask      = 1u << (BTL_PARAM_NEXT(param_old) & 0x1f);
515 
516 	if (bthp == NULL) {
517 		return;
518 	}
519 
520 	for (uint32_t i = 0; i < size_old; i++) {
521 		bthp[2 * i] = bthp_old[i];
522 		bthp[2 * i + 1] = kalloc_type(struct bt_hash,
523 		    Z_WAITOK_ZERO_NOFAIL);
524 	}
525 
526 	/*
527 	 * Step 2: Copy all the hash table buckets in one go.
528 	 *         And publish the new array.
529 	 *
530 	 * TODO: consider if we want to let go of the lock sometimes.
531 	 */
532 	__btlib_lock(btl);
533 
534 	btl->btl_rehashing = true;
535 
536 	for (uint32_t i = 0; i < size_old; i++) {
537 		memcpy(bthp[2 * i + 1], bthp[2 * i], sizeof(struct bt_hash));
538 	}
539 
540 	btl->btl_hash[!BTL_PARAM_PARITY(param_old)] = bthp;
541 
542 	smr_serialized_store(&btl->btl_param, BTL_PARAM_NEXT(param_old));
543 
544 	__btlib_unlock(btl);
545 
546 	smr_synchronize(&bt_library_smr);
547 
548 	/*
549 	 * Step 3: Compute the "odd" lists
550 	 *
551 	 * When we arrive here, we have 2 buckets per list working this way,
552 	 * assumnig the hash bit that we are interested in changes on "C -> D":
553 	 *
554 	 * [ even ] -> A -> B -> C -> D -> E -> 0
555 	 * [ odd  ] ---^
556 	 *
557 	 * We will now build:
558 	 *
559 	 * [ even ] -> A -> B -> C -> D -> E -> 0
560 	 * [ odd  ] ------------------^
561 	 *
562 	 * Note: we try to advance the SMR clock twice,
563 	 *       in the hope that for larger hashes it will
564 	 *       help smr_wait() not to spin.
565 	 */
566 
567 	for (uint32_t i = 0; i < size_old; i += 2) {
568 		__btlib_split_step(btl, bthp, i + 1, mask);
569 	}
570 	s1 = smr_advance(&bt_library_smr);
571 
572 	if (size_old >= 2) {
573 		for (uint32_t i = size_old; i < 2 * size_old; i += 2) {
574 			__btlib_split_step(btl, bthp, i + 1, mask);
575 		}
576 		s2 = smr_advance(&bt_library_smr);
577 	}
578 
579 	/*
580 	 * It's now possible to free the old array, do it,
581 	 * in a feeble attempt to give SMR readers more time before
582 	 * the next smr_wait().
583 	 */
584 	btl->btl_hash[BTL_PARAM_PARITY(param_old)] = NULL;
585 	kfree_type(bt_hash_t, size_old, bthp_old);
586 
587 	/*
588 	 * Step 4: Split the "even" lists
589 	 *
590 	 * We will now cut the "C -> D" link in the even bucket, ending up with:
591 	 *
592 	 * [ even ] -> A -> B -> C -> 0
593 	 * [ odd  ] ----------------> D -> E -> 0
594 	 */
595 	smr_wait(&bt_library_smr, s1);
596 	for (uint32_t i = 0; i < size_old; i += 2) {
597 		__btlib_split_step(btl, bthp, i, mask);
598 	}
599 
600 	if (size_old >= 2) {
601 		smr_wait(&bt_library_smr, s2);
602 		for (uint32_t i = size_old; i < 2 * size_old; i += 2) {
603 			__btlib_split_step(btl, bthp, i, mask);
604 		}
605 	}
606 
607 	/*
608 	 * Help readers see the cuts.
609 	 */
610 	(void)smr_advance(&bt_library_smr);
611 
612 	__btlib_lock(btl);
613 
614 	btl->btl_rehashing = false;
615 
616 #if BTLIB_VALIDATE
617 	__btlib_validate(btl, bthp, size_old * 2, BTL_PARAM_NEXT(param_old));
618 #endif /* BTLIB_VALIDATE */
619 
620 	__btlib_remove_deferred_locked(btl);
621 }
622 
623 static void
__btlib_callout(thread_call_param_t arg0,thread_call_param_t __unused arg1)624 __btlib_callout(thread_call_param_t arg0, thread_call_param_t __unused arg1)
625 {
626 	bt_library_t btl = arg0;
627 
628 	__btlib_lock(btl);
629 	btl->btl_in_callout = true;
630 
631 	if (__btlib_growth_needed(btl)) {
632 		__btlib_grow(btl);
633 	}
634 
635 	while (__btlib_rehash_needed(btl)) {
636 		__btlib_unlock(btl);
637 		__btlib_rehash_and_lock(btl);
638 	}
639 
640 	btl->btl_in_callout = false;
641 	__btlib_unlock(btl);
642 }
643 
644 static void
__btlib_init(bt_library_t btl)645 __btlib_init(bt_library_t btl)
646 {
647 	vm_map_t      map = kernel_data_map_get() ?: kernel_map;
648 	kern_return_t kr;
649 	vm_address_t  addr;
650 	bt_hash_t    *bthp;
651 
652 	lck_ticket_init(&btl->btl_lock, &bt_library_lck_grp);
653 	btl->btl_free_tail = &btl->btl_free_head;
654 	btl->btl_call = thread_call_allocate_with_options(__btlib_callout, btl,
655 	    THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
656 
657 	kr = kernel_memory_allocate(map, &addr, BTL_SIZE_INIT,
658 	    PAGE_MASK, KMA_ZERO | KMA_VAONLY, VM_KERN_MEMORY_DIAG);
659 	if (kr != KERN_SUCCESS) {
660 		panic("unable to allocate initial VA: %d", kr);
661 	}
662 
663 	bthp = kalloc_type(bt_hash_t, 1, Z_WAITOK_ZERO_NOFAIL);
664 	bthp[0] = kalloc_type(struct bt_hash, Z_WAITOK_ZERO_NOFAIL);
665 
666 	btl->btl_slabs[0]  = addr;
667 	btl->btl_max_pos   = BTL_SIZE_INIT;
668 	btl->btl_alloc_pos = sizeof(union bt_stack);
669 	btl->btl_hash[0]   = bthp;
670 	smr_init_store(&btl->btl_param, BTL_PARAM_INIT);
671 }
672 STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, __btlib_init, &bt_library);
673 
674 #pragma mark btref insertion/removal fastpaths
675 
676 __attribute__((noinline))
677 static btref_t
__btlib_insert(bt_library_t btl,bt_stack_t needle,btref_get_flags_t flags,uint32_t hash)678 __btlib_insert(
679 	bt_library_t            btl,
680 	bt_stack_t              needle,
681 	btref_get_flags_t       flags,
682 	uint32_t                hash)
683 {
684 	bt_stack_t bts;
685 	btref_smr_t *prev;
686 	btref_t ref;
687 
688 	__btlib_lock(btl);
689 
690 	if (__btlib_growth_needed(btl)) {
691 		/*
692 		 * Do this first so that we keep the lock held
693 		 * while we insert.
694 		 */
695 		if ((flags & BTREF_GET_NOWAIT) == 0) {
696 			__btlib_grow(btl);
697 		} else {
698 			__btlib_callout_wakeup(btl);
699 		}
700 	}
701 
702 	prev = __btlib_head(btl, smr_serialized_load(&btl->btl_param), hash);
703 	while ((ref = smr_serialized_load(prev)) != BTREF_NULL) {
704 		bts = __btlib_deref(btl, ref);
705 
706 #if BTLIB_VALIDATE
707 		assert3u(bts->bts_hash & BTL_HASH_MASK, ==,
708 		    hash & BTL_HASH_MASK);
709 #endif /* BTLIB_VALIDATE */
710 
711 		if (needle->bts_hash < bts->bts_hash) {
712 			break;
713 		}
714 		if (__btstack_same(needle, bts)) {
715 			ref = __btstack_try_retain(ref, bts, flags);
716 			if (ref) {
717 				__btlib_unlock(btl);
718 				return ref;
719 			}
720 			break;
721 		}
722 		prev = &bts->bts_next;
723 	}
724 
725 	if (btl->btl_free_head) {
726 		ref = btl->btl_free_head;
727 		bts = __btlib_deref(btl, btl->btl_free_head);
728 		if (smr_poll(&bt_library_smr, bts->bts_free_seq)) {
729 			if ((btl->btl_free_head = bts->bts_free_next) == 0) {
730 				btl->btl_free_tail = &btl->btl_free_head;
731 			}
732 			goto allocated;
733 		}
734 	}
735 
736 	if (__improbable(btl->btl_alloc_pos + sizeof(union bt_stack) >
737 	    btl->btl_faulted_pos)) {
738 		__btlib_unlock(btl);
739 		return BTREF_NULL;
740 	}
741 
742 	ref = btl->btl_alloc_pos;
743 	btl->btl_alloc_pos = ref + sizeof(union bt_stack);
744 	bts = __btlib_deref(btl, ref);
745 
746 allocated:
747 	*bts = *needle;
748 	smr_serialized_store(&bts->bts_next, smr_serialized_load(prev));
749 	smr_serialized_store(prev, ref);
750 
751 	__btlib_unlock(btl);
752 
753 	return ref | ((flags & BTREF_GET_PERMANENT) != 0);
754 }
755 
756 __abortlike
757 static void
__btlib_remove_notfound_panic(bt_library_t btl,bt_stack_t bts)758 __btlib_remove_notfound_panic(bt_library_t btl, bt_stack_t bts)
759 {
760 	panic("couldn't find stack %p in library %p", bts, btl);
761 }
762 
763 static void
__btlib_remove_locked(bt_library_t btl,btref_t ref,bt_stack_t bts)764 __btlib_remove_locked(bt_library_t btl, btref_t ref, bt_stack_t bts)
765 {
766 	uint32_t hash = bts->bts_hash;
767 	uint32_t param = smr_serialized_load(&btl->btl_param);
768 	btref_smr_t *prev;
769 
770 	if (btl->btl_rehashing) {
771 		/*
772 		 * We can't really delete things during rehash.
773 		 * put them on the deferred list.
774 		 */
775 		bts->bts_free_next = btl->btl_deferred_head;
776 		btl->btl_deferred_head = ref;
777 		return;
778 	}
779 
780 	prev = __btlib_head(btl, param, hash);
781 	for (;;) {
782 		btref_t tmp = smr_serialized_load(prev);
783 
784 		if (tmp == ref) {
785 			break;
786 		}
787 		if (tmp == 0) {
788 			__btlib_remove_notfound_panic(btl, bts);
789 		}
790 		prev = &__btlib_deref(btl, tmp)->bts_next;
791 	}
792 
793 	smr_serialized_store(prev, smr_serialized_load(&bts->bts_next));
794 	bts->bts_free_next = 0;
795 	*btl->btl_free_tail = ref;
796 	btl->btl_free_tail = &bts->bts_free_next;
797 	bts->bts_free_seq = smr_advance(&bt_library_smr);
798 }
799 
800 static void
__btlib_remove_deferred_locked(bt_library_t btl)801 __btlib_remove_deferred_locked(bt_library_t btl)
802 {
803 	btref_t ref, next;
804 	bt_stack_t bts;
805 
806 	next = btl->btl_deferred_head;
807 	btl->btl_deferred_head = 0;
808 	while ((ref = next)) {
809 		bts = __btlib_deref(btl, ref);
810 		next = bts->bts_free_next;
811 		__btlib_remove_locked(btl, ref, bts);
812 	}
813 }
814 
815 __attribute__((noinline))
816 static void
__btlib_remove(bt_library_t btl,btref_t ref,bt_stack_t bts)817 __btlib_remove(bt_library_t btl, btref_t ref, bt_stack_t bts)
818 {
819 	__btlib_lock(btl);
820 	__btlib_remove_locked(btl, ref, bts);
821 	__btlib_unlock(btl);
822 }
823 
824 static btref_t
__btlib_get(bt_library_t btl,void * fp,btref_get_flags_t flags)825 __btlib_get(bt_library_t btl, void *fp, btref_get_flags_t flags)
826 {
827 	union bt_stack needle;
828 	btref_smr_t *head;
829 	uint32_t hash, param;
830 	btref_t ref;
831 
832 	if (bt_library.btl_alloc_pos == 0) {
833 		return BTREF_NULL;
834 	}
835 
836 	hash  = __btstack_capture(&needle, fp, (flags & BTREF_GET_PERMANENT));
837 
838 	smr_enter(&bt_library_smr);
839 
840 	/*
841 	 * The hash "params" have a single bit to select the btl_hash[]
842 	 * pointer that is used.
843 	 *
844 	 * The compiler knows enough about this code to break
845 	 * the dependency chains that we would like, generating code like this:
846 	 *
847 	 *     bthp = btl->btl_hash[0];
848 	 *     if (BTL_PARAM_PARITY(param)) {
849 	 *             bthp = btl->btl_hash[1];
850 	 *     }
851 	 *
852 	 * We could try to play tricks but this would be brittle, so instead,
853 	 * use a proper acquire barrier on param, which pairs with
854 	 * smr_serialized_store(&btl->btl_param, ...)
855 	 * in __btlib_rehash_and_lock().
856 	 *
857 	 *
858 	 * Similarly, because the `bts_next` fields are not dereferenced
859 	 * right away but used as part of complicated arithmetics,
860 	 * trusting the compiler's maintaining of dependencies
861 	 * is a tall order, sometimes, an acquire barrier is best.
862 	 */
863 	param = smr_entered_load_acquire(&btl->btl_param);
864 	head  = __btlib_head(btl, param, hash);
865 	ref   = smr_entered_load_acquire(head);
866 
867 	while (ref) {
868 		bt_stack_t bts = __btlib_deref(btl, ref);
869 
870 #if BTLIB_VALIDATE
871 		assert3u(bts->bts_hash & BTL_HASH_MASK, ==,
872 		    hash & BTL_HASH_MASK);
873 #endif /* BTLIB_VALIDATE */
874 
875 		if (needle.bts_hash < bts->bts_hash) {
876 			break;
877 		}
878 		if (__btstack_same(&needle, bts) &&
879 		    (ref = __btstack_try_retain(ref, bts, flags))) {
880 			smr_leave(&bt_library_smr);
881 			return ref;
882 		}
883 
884 		ref = smr_entered_load_acquire(&bts->bts_next);
885 	}
886 
887 	smr_leave(&bt_library_smr);
888 
889 	return __btlib_insert(btl, &needle, flags, hash);
890 }
891 
892 btref_t
btref_get(void * fp,btref_get_flags_t flags)893 btref_get(void *fp, btref_get_flags_t flags)
894 {
895 	return __btlib_get(&bt_library, fp, flags);
896 }
897 
898 __abortlike
899 static void
__btref_invalid(btref_t btref)900 __btref_invalid(btref_t btref)
901 {
902 	panic("trying to manipulate invalid backtrace ref: 0x%08x", btref);
903 }
904 
905 static inline bool
__btref_isvalid(btref_t btref)906 __btref_isvalid(btref_t btref)
907 {
908 	return ((btref & BTREF_VALID_MASK) & ~BTREF_GET_PERMANENT) == 0;
909 }
910 
911 btref_t
btref_retain(btref_t btref)912 btref_retain(btref_t btref)
913 {
914 	uint32_t sig  = btref & BTREF_VALID_MASK;
915 
916 	if (btref && sig == 0) {
917 		bt_stack_t bts = __btlib_deref(&bt_library, btref);
918 
919 		btref = __btstack_retain(btref, bts, 0);
920 	} else if (sig & ~BTREF_PERMANENT_BIT) {
921 		__btref_invalid(btref);
922 	}
923 
924 	return btref;
925 }
926 
927 void
btref_put(btref_t btref)928 btref_put(btref_t btref)
929 {
930 	uint32_t sig = btref & BTREF_VALID_MASK;
931 
932 	if (btref && sig == 0) {
933 		bt_library_t btl = &bt_library;
934 		bt_stack_t bts = __btlib_deref(btl, btref);
935 
936 		if (__improbable(__btstack_release(bts))) {
937 			__btlib_remove(btl, btref, bts);
938 		}
939 	} else if (sig & ~BTREF_PERMANENT_BIT) {
940 		__btref_invalid(btref);
941 	}
942 }
943 
944 uint32_t
btref_decode_unslide(btref_t btref,mach_vm_address_t bt_out[])945 btref_decode_unslide(btref_t btref, mach_vm_address_t bt_out[])
946 {
947 #if __LP64__
948 	uintptr_t *bt = (uintptr_t *)bt_out;
949 #else
950 	uintptr_t  bt_buf[BTLOG_MAX_DEPTH];
951 	uintptr_t *bt = bt_buf;
952 #endif
953 
954 	if (__btref_isvalid(btref)) {
955 		bt_stack_t bts = __btlib_deref(&bt_library, btref);
956 		uint32_t   len = __btstack_len(bts);
957 		backtrace_unpack(BTP_KERN_OFFSET_32, bt, BTLOG_MAX_DEPTH,
958 		    (uint8_t *)bts->bts_frames, sizeof(uint32_t) * len);
959 
960 		for (uint32_t i = 0; i < len; i++) {
961 			bt_out[i] = VM_KERNEL_UNSLIDE(bt[i]);
962 		}
963 
964 		return len;
965 	}
966 
967 	__btref_invalid(btref);
968 }
969 
970 #pragma mark btlog types and helpers
971 
972 struct btlog {
973 	btlog_type_t            btl_type;
974 	uint32_t                btl_disabled : 1;
975 	uint32_t                btl_sample_max : 23;
976 #define BTL_SAMPLE_LIMIT        0x007fffffu
977 	uint32_t                btl_count;
978 	lck_ticket_t            btl_lock;
979 	uint32_t     *__zpercpu btl_sample;
980 };
981 
982 #if __LP64__
983 #define BTLOG_ENTRY_ADDR_BITS   48
984 #else
985 #define BTLOG_ENTRY_ADDR_BITS   32
986 #endif
987 
988 struct bt_log_entry {
989 	vm_address_t            btle_addr;
990 	btref_t                 btle_where;
991 } __attribute__((packed, aligned(4)));
992 
993 struct btlog_log {
994 	struct btlog            btll_hdr;
995 #define btll_count              btll_hdr.btl_count
996 	uint32_t                btll_pos;
997 	struct bt_log_entry     btll_entries[__counted_by(btll_count)];
998 };
999 
1000 
1001 #define BT_HASH_END_MARKER      UINT32_MAX
1002 
1003 struct bt_hash_entry {
1004 	vm_address_t            bthe_addr;
1005 	uint32_t                bthe_next;
1006 	btref_t                 bthe_where;
1007 };
1008 
1009 struct bt_hash_head {
1010 	uint32_t                bthh_first;
1011 	uint32_t                bthh_last;
1012 };
1013 
1014 struct btlog_hash {
1015 	struct btlog            btlh_hdr;
1016 #define btlh_count              btlh_hdr.btl_count
1017 	uint32_t                btlh_pos;
1018 	struct bt_hash_head     btlh_free;
1019 	struct bt_hash_entry    btlh_entries[__counted_by(btlh_count)];
1020 };
1021 
1022 typedef union {
1023 	vm_address_t            bta;
1024 	struct btlog           *btl;
1025 	struct btlog_log       *btll;
1026 	struct btlog_hash      *btlh;
1027 } __attribute__((transparent_union)) btlogu_t;
1028 
1029 static LCK_GRP_DECLARE(btlog_lck_grp, "btlog");
1030 
1031 static void
__btlog_lock(btlogu_t btlu)1032 __btlog_lock(btlogu_t btlu)
1033 {
1034 	lck_ticket_lock(&btlu.btl->btl_lock, &btlog_lck_grp);
1035 }
1036 
1037 static void
__btlog_unlock(btlogu_t btlu)1038 __btlog_unlock(btlogu_t btlu)
1039 {
1040 	lck_ticket_unlock(&btlu.btl->btl_lock);
1041 }
1042 
1043 static void *
__btlog_elem_normalize(void * addr)1044 __btlog_elem_normalize(void *addr)
1045 {
1046 #if CONFIG_KERNEL_TBI
1047 	addr = (void *)VM_KERNEL_TBI_FILL((vm_offset_t)addr);
1048 #endif /* CONFIG_KERNEL_TBI */
1049 	return addr;
1050 }
1051 
1052 static long
__btlog_elem_encode(void * addr)1053 __btlog_elem_encode(void *addr)
1054 {
1055 	return ~(long)__btlog_elem_normalize(addr);
1056 }
1057 
1058 static void *
__btlog_elem_decode(long addr)1059 __btlog_elem_decode(long addr)
1060 {
1061 	return (void *)~addr;
1062 }
1063 
1064 static struct bt_hash_head *
__btlog_hash_hash(struct btlog_hash * btlh)1065 __btlog_hash_hash(struct btlog_hash *btlh)
1066 {
1067 	return (struct bt_hash_head *)(btlh->btlh_entries + btlh->btlh_count);
1068 }
1069 
1070 static uint32_t
__btlog_hash_count(struct btlog_hash * btlh)1071 __btlog_hash_count(struct btlog_hash *btlh)
1072 {
1073 	return btlh->btlh_count >> 2;
1074 }
1075 
1076 static struct bt_hash_head *
__btlog_hash_head(struct btlog_hash * btlh,void * addr)1077 __btlog_hash_head(struct btlog_hash *btlh, void *addr)
1078 {
1079 	uint32_t h = os_hash_kernel_pointer(__btlog_elem_normalize(addr));
1080 	h &= (__btlog_hash_count(btlh) - 1);
1081 	return &__btlog_hash_hash(btlh)[h];
1082 }
1083 
1084 __attribute__((overloadable))
1085 static struct btlog_size_pair {
1086 	vm_size_t btsp_size;
1087 	uint32_t  btsp_count;
1088 }
__btlog_size(btlog_type_t type,uint32_t count)1089 __btlog_size(btlog_type_t type, uint32_t count)
1090 {
1091 	struct btlog_size_pair pair = {0};
1092 
1093 	switch (type) {
1094 	case BTLOG_LOG:
1095 		pair.btsp_size = round_page(sizeof(struct btlog_log) +
1096 		    count * sizeof(struct bt_log_entry));
1097 		pair.btsp_count = (pair.btsp_size - sizeof(struct btlog_log)) /
1098 		    sizeof(struct bt_log_entry);
1099 		break;
1100 
1101 	case BTLOG_HASH:
1102 		pair.btsp_count = MAX(1u << fls(count - 1), 128u);
1103 		pair.btsp_size = round_page(sizeof(struct btlog_hash) +
1104 		    pair.btsp_count * sizeof(struct bt_log_entry) +
1105 		    (pair.btsp_count >> 2) * sizeof(struct btlog_hash));
1106 		break;
1107 	}
1108 
1109 	return pair;
1110 }
1111 
1112 __attribute__((overloadable))
1113 static struct btlog_size_pair
__btlog_size(btlogu_t btlu)1114 __btlog_size(btlogu_t btlu)
1115 {
1116 	return __btlog_size(btlu.btl->btl_type, btlu.btl->btl_count);
1117 }
1118 
1119 static inline btref_t
__bt_ref(uint32_t stack_and_op)1120 __bt_ref(uint32_t stack_and_op)
1121 {
1122 	return stack_and_op & ~BTREF_OP_MASK;
1123 }
1124 
1125 static inline btref_t
__bt_op(uint32_t stack_and_op)1126 __bt_op(uint32_t stack_and_op)
1127 {
1128 	return stack_and_op & BTREF_OP_MASK;
1129 }
1130 
1131 #pragma mark btlog_log
1132 
1133 static void
__btlog_log_destroy(struct btlog_log * btll)1134 __btlog_log_destroy(struct btlog_log *btll)
1135 {
1136 	for (uint32_t i = 0; i < btll->btll_count; i++) {
1137 		btref_put(__bt_ref(btll->btll_entries[i].btle_where));
1138 	}
1139 }
1140 
1141 static void
__btlog_log_record(struct btlog_log * btll,void * addr,uint8_t op,btref_t btref)1142 __btlog_log_record(struct btlog_log *btll, void *addr, uint8_t op, btref_t btref)
1143 {
1144 	struct bt_log_entry *btle;
1145 	btref_t old = BTREF_NULL;
1146 	uint32_t pos;
1147 
1148 	__btlog_lock(btll);
1149 
1150 	if (__improbable(btll->btll_hdr.btl_disabled)) {
1151 		goto disabled;
1152 	}
1153 
1154 	pos = btll->btll_pos;
1155 	if (pos + 1 == btll->btll_count) {
1156 		btll->btll_pos = 0;
1157 	} else {
1158 		btll->btll_pos = pos + 1;
1159 	}
1160 
1161 	btle  = &btll->btll_entries[pos];
1162 	old   = __bt_ref(btle->btle_where);
1163 	*btle = (struct bt_log_entry){
1164 		.btle_addr  = __btlog_elem_encode(addr),
1165 		.btle_where = btref | (op & BTREF_OP_MASK),
1166 	};
1167 
1168 disabled:
1169 	__btlog_unlock(btll);
1170 
1171 	btref_put(old);
1172 }
1173 
1174 #pragma mark btlog_hash
1175 
1176 static void
__btlog_hash_init(struct btlog_hash * btlh)1177 __btlog_hash_init(struct btlog_hash *btlh)
1178 {
1179 	struct bt_hash_head *hash = __btlog_hash_hash(btlh);
1180 
1181 	btlh->btlh_free.bthh_first = BT_HASH_END_MARKER;
1182 	btlh->btlh_free.bthh_last = BT_HASH_END_MARKER;
1183 
1184 	for (size_t i = 0; i < __btlog_hash_count(btlh); i++) {
1185 		hash[i].bthh_first = BT_HASH_END_MARKER;
1186 		hash[i].bthh_last = BT_HASH_END_MARKER;
1187 	}
1188 }
1189 
1190 static void
__btlog_hash_destroy(struct btlog_hash * btlh)1191 __btlog_hash_destroy(struct btlog_hash *btlh)
1192 {
1193 	for (uint32_t i = 0; i < btlh->btlh_count; i++) {
1194 		btref_put(__bt_ref(btlh->btlh_entries[i].bthe_where));
1195 	}
1196 }
1197 
1198 static uint32_t
__btlog_hash_stailq_pop_first(struct btlog_hash * btlh,struct bt_hash_head * head)1199 __btlog_hash_stailq_pop_first(
1200 	struct btlog_hash      *btlh,
1201 	struct bt_hash_head    *head)
1202 {
1203 	struct bt_hash_entry *bthe;
1204 	uint32_t pos = head->bthh_first;
1205 
1206 	bthe = &btlh->btlh_entries[pos];
1207 	btlh->btlh_free.bthh_first = bthe->bthe_next;
1208 	if (bthe->bthe_next == BT_HASH_END_MARKER) {
1209 		btlh->btlh_free.bthh_last = BT_HASH_END_MARKER;
1210 	} else {
1211 		bthe->bthe_next = BT_HASH_END_MARKER;
1212 	}
1213 
1214 	return pos;
1215 }
1216 
1217 static void
__btlog_hash_stailq_remove(struct bt_hash_head * head,struct bt_hash_entry * bthe,uint32_t * prev,uint32_t ppos)1218 __btlog_hash_stailq_remove(
1219 	struct bt_hash_head    *head,
1220 	struct bt_hash_entry   *bthe,
1221 	uint32_t               *prev,
1222 	uint32_t                ppos)
1223 {
1224 	*prev = bthe->bthe_next;
1225 	if (bthe->bthe_next == BT_HASH_END_MARKER) {
1226 		head->bthh_last = ppos;
1227 	} else {
1228 		bthe->bthe_next = BT_HASH_END_MARKER;
1229 	}
1230 }
1231 
1232 static void
__btlog_hash_stailq_append(struct btlog_hash * btlh,struct bt_hash_head * head,uint32_t pos)1233 __btlog_hash_stailq_append(
1234 	struct btlog_hash      *btlh,
1235 	struct bt_hash_head    *head,
1236 	uint32_t                pos)
1237 {
1238 	if (head->bthh_last == BT_HASH_END_MARKER) {
1239 		head->bthh_first = head->bthh_last = pos;
1240 	} else {
1241 		btlh->btlh_entries[head->bthh_last].bthe_next = pos;
1242 		head->bthh_last = pos;
1243 	}
1244 }
1245 
1246 static void
__btlog_hash_remove(struct btlog_hash * btlh,struct bt_hash_entry * bthe)1247 __btlog_hash_remove(
1248 	struct btlog_hash      *btlh,
1249 	struct bt_hash_entry   *bthe)
1250 {
1251 	struct bt_hash_head *head;
1252 	uint32_t *prev;
1253 	uint32_t ppos;
1254 
1255 	head = __btlog_hash_head(btlh, __btlog_elem_decode(bthe->bthe_addr));
1256 	prev = &head->bthh_first;
1257 	ppos = BT_HASH_END_MARKER;
1258 
1259 	while (bthe != &btlh->btlh_entries[*prev]) {
1260 		ppos = *prev;
1261 		prev = &btlh->btlh_entries[ppos].bthe_next;
1262 	}
1263 
1264 	__btlog_hash_stailq_remove(head, bthe, prev, ppos);
1265 }
1266 
1267 static void
__btlog_hash_record(struct btlog_hash * btlh,void * addr,uint8_t op,btref_t btref)1268 __btlog_hash_record(struct btlog_hash *btlh, void *addr, uint8_t op, btref_t btref)
1269 {
1270 	struct bt_hash_head *head;
1271 	struct bt_hash_entry *bthe;
1272 	btref_t old = BTREF_NULL;
1273 	uint32_t pos;
1274 
1275 	head = __btlog_hash_head(btlh, __btlog_elem_normalize(addr));
1276 
1277 	__btlog_lock(btlh);
1278 
1279 	if (__improbable(btlh->btlh_hdr.btl_disabled)) {
1280 		goto disabled;
1281 	}
1282 
1283 	if (btlh->btlh_free.bthh_first != BT_HASH_END_MARKER) {
1284 		pos  = __btlog_hash_stailq_pop_first(btlh, &btlh->btlh_free);
1285 		bthe = &btlh->btlh_entries[pos];
1286 	} else {
1287 		pos  = btlh->btlh_pos;
1288 		if (pos + 1 == btlh->btlh_count) {
1289 			btlh->btlh_pos = 0;
1290 		} else {
1291 			btlh->btlh_pos = pos + 1;
1292 		}
1293 		bthe = &btlh->btlh_entries[pos];
1294 		if (bthe->bthe_addr) {
1295 			__btlog_hash_remove(btlh, bthe);
1296 		}
1297 	}
1298 
1299 	old   = __bt_ref(bthe->bthe_where);
1300 	*bthe = (struct bt_hash_entry){
1301 		.bthe_addr  = __btlog_elem_encode(addr),
1302 		.bthe_where = btref | (op & BTREF_OP_MASK),
1303 		.bthe_next  = BT_HASH_END_MARKER,
1304 	};
1305 
1306 	if (btref & BTREF_VALID_MASK) {
1307 		assert(__btlib_deref(&bt_library,
1308 		    btref & BTREF_VALID_MASK)->bts_ref_len >= BTS_FRAMES_REF_INC);
1309 	}
1310 
1311 	__btlog_hash_stailq_append(btlh, head, pos);
1312 
1313 disabled:
1314 	__btlog_unlock(btlh);
1315 
1316 	btref_put(old);
1317 }
1318 
1319 static void
__btlog_hash_erase(struct btlog_hash * btlh,void * addr)1320 __btlog_hash_erase(struct btlog_hash *btlh, void *addr)
1321 {
1322 	struct bt_hash_head *head;
1323 	struct bt_hash_entry *bthe;
1324 	uint32_t *prev;
1325 	uint32_t pos, ppos;
1326 
1327 	addr = __btlog_elem_normalize(addr);
1328 	head = __btlog_hash_head(btlh, addr);
1329 	prev = &head->bthh_first;
1330 	ppos = BT_HASH_END_MARKER;
1331 
1332 	__btlog_lock(btlh);
1333 
1334 	if (__improbable(btlh->btlh_hdr.btl_disabled)) {
1335 		goto disabled;
1336 	}
1337 
1338 	while ((pos = *prev) != BT_HASH_END_MARKER) {
1339 		bthe = &btlh->btlh_entries[pos];
1340 		if (__btlog_elem_decode(bthe->bthe_addr) == addr) {
1341 			bthe->bthe_addr = 0;
1342 			__btlog_hash_stailq_remove(head, bthe, prev, ppos);
1343 			__btlog_hash_stailq_append(btlh, &btlh->btlh_free, pos);
1344 		} else {
1345 			ppos = *prev;
1346 			prev = &btlh->btlh_entries[ppos].bthe_next;
1347 		}
1348 	}
1349 
1350 disabled:
1351 	__btlog_unlock(btlh);
1352 }
1353 
1354 #pragma mark btlog APIs
1355 
1356 static void
__btlog_init(btlogu_t btlu)1357 __btlog_init(btlogu_t btlu)
1358 {
1359 	switch (btlu.btl->btl_type) {
1360 	case BTLOG_HASH:
1361 		__btlog_hash_init(btlu.btlh);
1362 		break;
1363 
1364 	case BTLOG_LOG:
1365 		break;
1366 	}
1367 }
1368 
1369 btlog_t
btlog_create(btlog_type_t type,uint32_t count,uint32_t sample)1370 btlog_create(btlog_type_t type, uint32_t count, uint32_t sample)
1371 {
1372 	struct btlog_size_pair pair = __btlog_size(type, count);
1373 	kern_return_t kr;
1374 	btlogu_t btlu;
1375 
1376 	kr = kernel_memory_allocate(kernel_map, &btlu.bta,
1377 	    pair.btsp_size, 0, KMA_KOBJECT | KMA_ZERO, VM_KERN_MEMORY_DIAG);
1378 
1379 	if (kr != KERN_SUCCESS) {
1380 		return NULL;
1381 	}
1382 
1383 	if (sample > BTL_SAMPLE_LIMIT) {
1384 		sample = BTL_SAMPLE_LIMIT;
1385 	}
1386 
1387 	btlu.btl->btl_type = type;
1388 	btlu.btl->btl_sample_max = sample;
1389 	btlu.btl->btl_count = pair.btsp_count;
1390 	lck_ticket_init(&btlu.btl->btl_lock, &btlog_lck_grp);
1391 	assert3u(btlu.btl->btl_count, !=, 0);
1392 
1393 	if (sample > 1) {
1394 		btlu.btl->btl_sample = zalloc_percpu(percpu_u64_zone,
1395 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1396 		zpercpu_foreach_cpu(cpu) {
1397 			uint32_t *counter;
1398 
1399 			counter = zpercpu_get_cpu(btlu.btl->btl_sample, cpu);
1400 			*counter = (cpu + 1) * sample / zpercpu_count();
1401 		}
1402 	}
1403 
1404 	__btlog_init(btlu);
1405 
1406 	return btlu.btl;
1407 }
1408 
1409 static void
__btlog_destroy(btlogu_t btlu)1410 __btlog_destroy(btlogu_t btlu)
1411 {
1412 	switch (btlu.btl->btl_type) {
1413 	case BTLOG_LOG:
1414 		__btlog_log_destroy(btlu.btll);
1415 		break;
1416 
1417 	case BTLOG_HASH:
1418 		__btlog_hash_destroy(btlu.btlh);
1419 		break;
1420 	}
1421 }
1422 
1423 void
btlog_destroy(btlogu_t btlu)1424 btlog_destroy(btlogu_t btlu)
1425 {
1426 	if (!btlu.btl->btl_disabled) {
1427 		__btlog_destroy(btlu);
1428 	}
1429 	if (btlu.btl->btl_sample) {
1430 		zfree_percpu(percpu_u64_zone, btlu.btl->btl_sample);
1431 	}
1432 	lck_ticket_destroy(&btlu.btl->btl_lock, &btlog_lck_grp);
1433 	kmem_free(kernel_map, btlu.bta, __btlog_size(btlu).btsp_size);
1434 }
1435 
1436 void
btlog_enable(btlogu_t btlu)1437 btlog_enable(btlogu_t btlu)
1438 {
1439 	vm_size_t size;
1440 
1441 	size = __btlog_size(btlu).btsp_size;
1442 	if (size > PAGE_SIZE) {
1443 		kernel_memory_populate(kernel_map, btlu.bta + PAGE_SIZE,
1444 		    size - PAGE_SIZE, KMA_KOBJECT | KMA_ZERO,
1445 		    VM_KERN_MEMORY_DIAG);
1446 	}
1447 
1448 	__btlog_init(btlu);
1449 
1450 	__btlog_lock(btlu);
1451 	assert(btlu.btl->btl_disabled);
1452 	btlu.btl->btl_disabled = false;
1453 	__btlog_unlock(btlu);
1454 }
1455 
1456 void
btlog_disable(btlogu_t btlu)1457 btlog_disable(btlogu_t btlu)
1458 {
1459 	vm_size_t size;
1460 
1461 	__btlog_lock(btlu);
1462 	assert(!btlu.btl->btl_disabled);
1463 	btlu.btl->btl_disabled = true;
1464 	__btlog_unlock(btlu);
1465 
1466 	__btlog_destroy(btlu);
1467 
1468 	size = __btlog_size(btlu).btsp_size;
1469 	bzero((char *)btlu.bta + sizeof(*btlu.btl),
1470 	    PAGE_SIZE - sizeof(*btlu.btl));
1471 	if (size > PAGE_SIZE) {
1472 		kernel_memory_depopulate(kernel_map, btlu.bta + PAGE_SIZE,
1473 		    size - PAGE_SIZE, KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
1474 	}
1475 }
1476 
1477 btlog_type_t
btlog_get_type(btlog_t btlog)1478 btlog_get_type(btlog_t btlog)
1479 {
1480 	return btlog->btl_type;
1481 }
1482 
1483 uint32_t
btlog_get_count(btlog_t btlog)1484 btlog_get_count(btlog_t btlog)
1485 {
1486 	return btlog->btl_count;
1487 }
1488 
1489 bool
btlog_sample(btlog_t btlog)1490 btlog_sample(btlog_t btlog)
1491 {
1492 	uint32_t *counter;
1493 
1494 	if (btlog->btl_sample == NULL) {
1495 		return true;
1496 	}
1497 
1498 	counter = zpercpu_get(btlog->btl_sample);
1499 	if (os_atomic_dec_orig(counter, relaxed) != 0) {
1500 		return false;
1501 	}
1502 
1503 	os_atomic_store(counter, btlog->btl_sample_max - 1, relaxed);
1504 	return true;
1505 }
1506 
1507 void
btlog_record(btlogu_t btlu,void * addr,uint8_t op,btref_t btref)1508 btlog_record(btlogu_t btlu, void *addr, uint8_t op, btref_t btref)
1509 {
1510 	if (btlu.btl->btl_disabled) {
1511 		return;
1512 	}
1513 	switch (btlu.btl->btl_type) {
1514 	case BTLOG_LOG:
1515 		__btlog_log_record(btlu.btll, addr, op, btref);
1516 		break;
1517 
1518 	case BTLOG_HASH:
1519 		__btlog_hash_record(btlu.btlh, addr, op, btref);
1520 		break;
1521 	}
1522 }
1523 
1524 void
btlog_erase(btlogu_t btlu,void * addr)1525 btlog_erase(btlogu_t btlu, void *addr)
1526 {
1527 	if (btlu.btl->btl_disabled) {
1528 		return;
1529 	}
1530 	switch (btlu.btl->btl_type) {
1531 	case BTLOG_HASH:
1532 		__btlog_hash_erase(btlu.btlh, addr);
1533 		break;
1534 
1535 	case BTLOG_LOG:
1536 		break;
1537 	}
1538 }
1539 
1540 extern void
1541 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
1542 
1543 struct btlog_record {
1544 	uint32_t btr_where;
1545 	uint32_t btr_count;
1546 };
1547 
1548 static int
btlog_record_cmp_where(const void * e1,const void * e2)1549 btlog_record_cmp_where(const void *e1, const void *e2)
1550 {
1551 	const struct btlog_record *a = e1;
1552 	const struct btlog_record *b = e2;
1553 
1554 	if (a->btr_where == b->btr_where) {
1555 		return 0;
1556 	}
1557 	return a->btr_where > b->btr_where ? 1 : -1;
1558 }
1559 
1560 static bool
btlog_records_pack(struct btlog_record * array,uint32_t * countp)1561 btlog_records_pack(struct btlog_record *array, uint32_t *countp)
1562 {
1563 	uint32_t r, w, count = *countp;
1564 
1565 	qsort(array, count, sizeof(struct btlog_record), btlog_record_cmp_where);
1566 
1567 	for (r = 1, w = 1; r < count; r++) {
1568 		if (array[w - 1].btr_where == array[r].btr_where) {
1569 			array[w - 1].btr_count += array[r].btr_count;
1570 		} else {
1571 			array[w++] = array[r];
1572 		}
1573 	}
1574 
1575 	if (w == count) {
1576 		return false;
1577 	}
1578 
1579 	*countp = w;
1580 	return true;
1581 }
1582 
1583 static int
btlog_record_cmp_rev_count(const void * e1,const void * e2)1584 btlog_record_cmp_rev_count(const void *e1, const void *e2)
1585 {
1586 	const struct btlog_record *a = e1;
1587 	const struct btlog_record *b = e2;
1588 
1589 	if (a->btr_count == b->btr_count) {
1590 		return 0;
1591 	}
1592 	return a->btr_count > b->btr_count ? -1 : 1;
1593 }
1594 
1595 kern_return_t
btlog_get_records(btlogu_t btl,zone_btrecord_t ** records,unsigned int * numrecs)1596 btlog_get_records(
1597 	btlogu_t                btl,
1598 	zone_btrecord_t       **records,
1599 	unsigned int           *numrecs)
1600 {
1601 	struct btlog_record *btr_array;
1602 	struct btlog_record  btr;
1603 	zone_btrecord_t     *rec_array;
1604 	vm_offset_t          addr, end, size, ipc_map_size;
1605 	kern_return_t        kr;
1606 	uint32_t             count = 0;
1607 
1608 	/*
1609 	 * Step 1: collect all the backtraces in the logs in wired memory
1610 	 *
1611 	 *         note that the ipc_kernel_map is small, and we might have
1612 	 *         too little space.
1613 	 *
1614 	 *         In order to accomodate, we will deduplicate as we go.
1615 	 *         If we still overflow space, we return KERN_NO_SPACE.
1616 	 */
1617 
1618 	ipc_map_size = (vm_offset_t)(vm_map_max(ipc_kernel_map) -
1619 	    vm_map_min(ipc_kernel_map));
1620 	size = round_page(btlog_get_count(btl.btl) * sizeof(struct btlog_record));
1621 	if (size > ipc_map_size) {
1622 		size = ipc_map_size / 4;
1623 	}
1624 
1625 	for (;;) {
1626 		kr = kernel_memory_allocate(ipc_kernel_map, &addr, size, 0,
1627 		    KMA_NONE, VM_KERN_MEMORY_IPC);
1628 		if (kr == KERN_SUCCESS) {
1629 			break;
1630 		}
1631 		if (size < (1U << 19)) {
1632 			return kr;
1633 		}
1634 		size /= 2;
1635 	}
1636 
1637 	btr_array = (struct btlog_record *)addr;
1638 	rec_array = (zone_btrecord_t *)addr;
1639 	kr = KERN_NOT_FOUND;
1640 
1641 	__btlog_lock(btl);
1642 
1643 	if (btl.btl->btl_disabled) {
1644 		goto disabled;
1645 	}
1646 
1647 	switch (btl.btl->btl_type) {
1648 	case BTLOG_LOG:
1649 		for (uint32_t i = 0; i < btl.btl->btl_count; i++) {
1650 			struct bt_log_entry *btle = &btl.btll->btll_entries[i];
1651 
1652 			if (!btle->btle_addr) {
1653 				break;
1654 			}
1655 			if ((count + 1) * sizeof(struct btlog_record) > size) {
1656 				if (!btlog_records_pack(btr_array, &count)) {
1657 					kr = KERN_NO_SPACE;
1658 					count = 0;
1659 					break;
1660 				}
1661 			}
1662 			btr_array[count].btr_where = btle->btle_where;
1663 			btr_array[count].btr_count = 1;
1664 			count++;
1665 		}
1666 		break;
1667 
1668 	case BTLOG_HASH:
1669 		for (uint32_t i = 0; i < btl.btl->btl_count; i++) {
1670 			struct bt_hash_entry *bthe = &btl.btlh->btlh_entries[i];
1671 
1672 			if (!bthe->bthe_addr) {
1673 				continue;
1674 			}
1675 			if ((count + 1) * sizeof(struct btlog_record) > size) {
1676 				if (!btlog_records_pack(btr_array, &count)) {
1677 					kr = KERN_NO_SPACE;
1678 					count = 0;
1679 					break;
1680 				}
1681 			}
1682 			btr_array[count].btr_where = bthe->bthe_where;
1683 			btr_array[count].btr_count = 1;
1684 			count++;
1685 		}
1686 		break;
1687 	}
1688 
1689 	/*
1690 	 * Step 2: unique all the records, and retain them
1691 	 */
1692 
1693 	if (count) {
1694 		btlog_records_pack(btr_array, &count);
1695 		/*
1696 		 * If the backtraces won't fit,
1697 		 * sort them in reverse popularity order and clip.
1698 		 */
1699 		if (count > size / sizeof(zone_btrecord_t)) {
1700 			qsort(btr_array, count, sizeof(struct btlog_record),
1701 			    btlog_record_cmp_rev_count);
1702 			count = size / sizeof(zone_btrecord_t);
1703 		}
1704 		for (uint32_t i = 0; i < count; i++) {
1705 			btref_retain(__bt_ref(btr_array[i].btr_where));
1706 		}
1707 	}
1708 
1709 disabled:
1710 	__btlog_unlock(btl);
1711 
1712 	if (count == 0) {
1713 		kmem_free(ipc_kernel_map, addr, size);
1714 		return kr;
1715 	}
1716 
1717 	/*
1718 	 * Step 3: Expand the backtraces in place, in reverse order.
1719 	 */
1720 
1721 	for (uint32_t i = count; i-- > 0;) {
1722 		btr = *(volatile struct btlog_record *)&btr_array[i];
1723 
1724 		rec_array[i] = (zone_btrecord_t){
1725 			.ref_count      = btr.btr_count,
1726 			.operation_type = __bt_op(btr.btr_where),
1727 		};
1728 		btref_decode_unslide(__bt_ref(btr.btr_where), rec_array[i].bt);
1729 		btref_put(__bt_ref(btr.btr_where));
1730 	}
1731 
1732 	/*
1733 	 * Step 4: Free the excess memory, zero padding, and unwire the buffer.
1734 	 */
1735 
1736 	end = round_page((vm_offset_t)(rec_array + count));
1737 	bzero(rec_array + count, end - (vm_address_t)(rec_array + count));
1738 	if (end < addr + size) {
1739 		kmem_free(ipc_kernel_map, end, addr + size - end);
1740 	}
1741 
1742 	kr = vm_map_unwire(ipc_kernel_map, addr, end, FALSE);
1743 	assert(kr == KERN_SUCCESS);
1744 
1745 	*records = rec_array;
1746 	*numrecs = count;
1747 	return KERN_SUCCESS;
1748 }
1749 
1750 uint32_t
btlog_guess_top(btlogu_t btlu,vm_address_t bt[],uint32_t * len)1751 btlog_guess_top(btlogu_t btlu, vm_address_t bt[], uint32_t *len)
1752 {
1753 	struct btlog_hash *btlh = btlu.btlh;
1754 	const unsigned RECS = 8;
1755 	struct btlog_record recs[RECS] = {0};
1756 	bt_stack_t bts;
1757 
1758 	if (btlu.btl->btl_type != BTLOG_HASH) {
1759 		return 0;
1760 	}
1761 
1762 	if (!lck_ticket_lock_try(&btlu.btl->btl_lock, &btlog_lck_grp)) {
1763 		return 0;
1764 	}
1765 
1766 	if (btlu.btl->btl_disabled || btlh->btlh_count == 0) {
1767 		goto disabled;
1768 	}
1769 
1770 	/*
1771 	 * This is called from panic context, and can't really
1772 	 * do what btlog_get_records() do and allocate memory.
1773 	 *
1774 	 * Instead, we use the refcounts in the bt library
1775 	 * as a proxy for counts (of course those backtraces
1776 	 * can be inflated due to being shared with other logs,
1777 	 * which is why we use `RECS` slots in the array to find
1778 	 * the RECS more popular stacks at all).
1779 	 *
1780 	 * Note: this will break down if permanent backtraces get used.
1781 	 *       if we ever go there for performance reasons,
1782 	 *       then we'll want to find another way to do this.
1783 	 */
1784 	for (uint32_t i = 0; i < btlh->btlh_count; i++) {
1785 		struct bt_hash_entry *bthe = &btlh->btlh_entries[i];
1786 		btref_t ref;
1787 
1788 		if (!bthe->bthe_addr) {
1789 			continue;
1790 		}
1791 
1792 		ref = __bt_ref(bthe->bthe_where);
1793 		bts = __btlib_deref(&bt_library, ref);
1794 
1795 		for (uint32_t j = 0; j < RECS; j++) {
1796 			if (ref == recs[j].btr_where) {
1797 				break;
1798 			}
1799 			if (bts->bts_ref_len > recs[j].btr_count) {
1800 				for (uint32_t k = j + 1; k < RECS; k++) {
1801 					recs[k] = recs[k - 1];
1802 				}
1803 				recs[j].btr_count = bts->bts_ref_len;
1804 				recs[j].btr_where = ref;
1805 				break;
1806 			}
1807 		}
1808 	}
1809 
1810 	/*
1811 	 * Then correct what we sampled by counting how many times
1812 	 * the backtrace _actually_ exists in that one log.
1813 	 */
1814 	for (uint32_t j = 0; j < RECS; j++) {
1815 		recs[j].btr_count = 0;
1816 	}
1817 
1818 	for (uint32_t i = 0; i < btlh->btlh_count; i++) {
1819 		struct bt_hash_entry *bthe = &btlh->btlh_entries[i];
1820 		btref_t ref;
1821 
1822 		if (!bthe->bthe_addr) {
1823 			continue;
1824 		}
1825 
1826 		ref = __bt_ref(bthe->bthe_where);
1827 
1828 		for (uint32_t j = 0; j < RECS; j++) {
1829 			if (recs[j].btr_where == ref) {
1830 				recs[j].btr_count++;
1831 				break;
1832 			}
1833 		}
1834 	}
1835 
1836 	for (uint32_t j = 1; j < RECS; j++) {
1837 		if (recs[0].btr_count < recs[j].btr_count) {
1838 			recs[0] = recs[j];
1839 		}
1840 	}
1841 	bts = __btlib_deref(&bt_library, recs[0].btr_where);
1842 	*len = __btstack_len(bts);
1843 
1844 	backtrace_unpack(BTP_KERN_OFFSET_32, (uintptr_t *)bt, BTLOG_MAX_DEPTH,
1845 	    (uint8_t *)bts->bts_frames, sizeof(uint32_t) * *len);
1846 
1847 disabled:
1848 	__btlog_unlock(btlu);
1849 
1850 	return recs[0].btr_count;
1851 }
1852 
1853 #if DEBUG || DEVELOPMENT
1854 
1855 void
btlog_copy_backtraces_for_elements(btlogu_t btlu,vm_address_t * instances,uint32_t * countp,uint32_t elem_size,leak_site_proc proc)1856 btlog_copy_backtraces_for_elements(
1857 	btlogu_t                btlu,
1858 	vm_address_t           *instances,
1859 	uint32_t               *countp,
1860 	uint32_t                elem_size,
1861 	leak_site_proc          proc)
1862 {
1863 	struct btlog_hash *btlh = btlu.btlh;
1864 	struct bt_hash_head *head;
1865 	uint32_t count = *countp;
1866 	uint32_t num_sites = 0;
1867 
1868 	if (btlu.btl->btl_type != BTLOG_HASH) {
1869 		return;
1870 	}
1871 
1872 	__btlog_lock(btlh);
1873 
1874 	if (btlu.btl->btl_disabled) {
1875 		goto disabled;
1876 	}
1877 
1878 	for (uint32_t i = 0; i < count; i++) {
1879 		vm_offset_t element = instances[i];
1880 		void *addr = __btlog_elem_normalize((void *)element);
1881 		btref_t ref = BTREF_NULL;
1882 		uint32_t pos;
1883 
1884 		if (kInstanceFlagReferenced & element) {
1885 			continue;
1886 		}
1887 
1888 		element = INSTANCE_PUT(element) & ~kInstanceFlags;
1889 		head = __btlog_hash_head(btlh, addr);
1890 		pos  = head->bthh_first;
1891 		while (pos != BT_HASH_END_MARKER) {
1892 			struct bt_hash_entry *bthe = &btlh->btlh_entries[pos];
1893 
1894 			if (__btlog_elem_decode(bthe->bthe_addr) == addr) {
1895 				ref = __bt_ref(bthe->bthe_where);
1896 				break;
1897 			}
1898 
1899 			pos = bthe->bthe_next;
1900 		}
1901 
1902 		if (ref != BTREF_NULL) {
1903 			element = (ref | kInstanceFlagReferenced);
1904 		}
1905 		instances[num_sites++] = INSTANCE_PUT(element);
1906 	}
1907 
1908 	for (uint32_t i = 0; i < num_sites; i++) {
1909 		vm_offset_t btref = instances[i];
1910 		uint32_t site_count, dups;
1911 
1912 		if (!(btref & kInstanceFlagReferenced)) {
1913 			continue;
1914 		}
1915 
1916 		for (site_count = 1, dups = i + 1; dups < num_sites; dups++) {
1917 			if (instances[dups] == btref) {
1918 				site_count++;
1919 				instances[dups] = 0;
1920 			}
1921 		}
1922 
1923 		btref = INSTANCE_PUT(btref) & ~kInstanceFlags;
1924 		proc(site_count, elem_size, (btref_t)btref);
1925 	}
1926 
1927 disabled:
1928 	__btlog_unlock(btlh);
1929 
1930 	*countp = num_sites;
1931 }
1932 
1933 #endif /* DEBUG || DEVELOPMENT */
1934