xref: /xnu-10002.41.9/osfmk/kern/btlog.c (revision 699cd48037512bf4380799317ca44ca453c82f57)
1 /*
2  * Copyright (c) 2012-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/assert.h>
30 #include <kern/backtrace.h>
31 #include <kern/btlog.h>
32 #include <kern/smr.h>
33 #include <kern/startup.h>
34 #include <kern/thread_call.h>
35 #include <os/hash.h>
36 #include <mach/vm_map.h>
37 #include <mach/vm_param.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_memtag.h>
41 #include <vm/pmap.h>
42 
43 #pragma mark btref & helpers
44 
45 static LCK_GRP_DECLARE(bt_library_lck_grp, "bt_library");
46 static SMR_DEFINE(bt_library_smr, "bt library");
47 
48 #define BTS_FRAMES_MAX          13
49 #define BTS_FRAMES_REF_MASK     0xfffffff0
50 #define BTS_FRAMES_REF_INC      0x00000010
51 #define BTS_FRAMES_LEN_MASK     0x0000000f
52 
53 typedef SMR_POINTER(btref_t)    btref_smr_t;
54 
55 typedef union bt_stack {
56 	struct {
57 		btref_smr_t     bts_next;
58 		uint32_t        bts_ref_len;
59 		uint32_t        bts_hash;
60 		uint32_t        bts_frames[BTS_FRAMES_MAX];
61 	};
62 	struct {
63 		uint32_t        bts_padding[3 + BTS_FRAMES_MAX - 1 - sizeof(long) / 4];
64 		uint32_t        bts_free_next;
65 		smr_seq_t       bts_free_seq;
66 	};
67 } *bt_stack_t;
68 
69 static_assert(sizeof(union bt_stack) == 64); /* allocation scheme needs it */
70 
71 #define BTREF_PERMANENT_BIT     0x80000000u
72 #define BTREF_OP_MASK           0x0000003fu
73 #define BTREF_VALID_MASK        0xc000003fu
74 
75 #define BTL_SIZE_INIT           (1u << 20)
76 #define BTL_SIZE_MAX            (1u << 30)
77 #define BTL_SLABS               9
78 
79 #define BTL_PARAM_INIT          0x00000020u
80 #define BTL_PARAM_PARITY(p)     ((p) >> 31)
81 #define BTL_PARAM_SHIFT(p)      (32 - ((p) & 0x3f))
82 #define BTL_PARAM_IDX(p, h)     ((uint64_t)(h) >> ((p) & 0x3f))
83 #define BTL_PARAM_NEXT(p)       ((p) - 0x80000001u)
84 
85 #define BTL_HASH_SHIFT          8
86 #define BTL_HASH_COUNT          (1u << BTL_HASH_SHIFT)
87 #define BTL_HASH_MASK           (BTL_HASH_COUNT - 1)
88 
89 static_assert((BTL_SIZE_INIT << BTL_SLABS) == BTL_SIZE_MAX / 2);
90 
91 typedef struct bt_hash {
92 	btref_smr_t             bth_array[BTL_HASH_COUNT];
93 } *bt_hash_t;
94 
95 #if DEBUG || DEVELOPMENT
96 #define BTLIB_VALIDATE          1
97 #else
98 #define BTLIB_VALIDATE          0
99 #endif
100 
101 /*!
102  * @typedef bt_library_t
103  *
104  * @brief
105  * Describes a backtrace library.
106  *
107  * @discussion
108  * A backtrace library is a scalable hash table of backtraces
109  * used for debugging purposes.
110  *
111  * By default there is a single singleton one, but the code
112  * is amenable to have several instances.
113  *
114  *
115  * <h2>Data structure design</h2>
116  *
117  * Its hash table is structured like this:
118  *
119  *     par = BTL_PARAM_PARITY(btl->btl_param);
120  *     sz  = 1u << BTL_PARAM_SHIFT(btl->btl_param);
121  *
122  *     btl->btl_hash[par]
123  *           │
124  *           │     ╭─────── array of size "sz" buckets ───────╮
125  *           ╰───> │                                          │
126  *                 ╰──────────────────────────────────┼───────╯
127  *                                                    │
128  *               ╭─────── struct bt_hash ───────╮     │
129  *               │                              │ <───╯
130  *               ╰──┼───────────────────────────╯
131  *                  │
132  *                  ╰──> Stack ──> Stack ──> Stack ──> X
133  *
134  *
135  * The "btl_hash" two entries are used with the "btl_param" switch in order
136  * to swap the outer array while growing the hash without perturbating
137  * readers.
138  *
139  * The lists of stacks are also maintained in "hash" order which allows
140  * for the rehashing to be a clean split of the lists.
141  *
142  * All stack pointers are "references" which are a smaller 32bit offset
143  * within the library backing store (slabs).
144  *
145  */
146 typedef struct bt_library {
147 	lck_ticket_t            btl_lock;
148 	SMR_POINTER(uint32_t)   btl_param;
149 
150 	bt_hash_t              *btl_hash[2];
151 	thread_call_t           btl_call;
152 	thread_t                btl_grower;
153 
154 	btref_t                *btl_free_tail;
155 	btref_t                 btl_free_head;
156 
157 	btref_t                 btl_deferred_head;
158 
159 	bool                    btl_waiters;
160 	bool                    btl_in_callout;
161 	bool                    btl_rehashing;
162 	uint8_t                 btl_slab_cur;
163 	uint32_t                btl_alloc_pos;
164 	uint32_t                btl_faulted_pos;
165 	uint32_t                btl_max_pos;
166 	vm_address_t            btl_slabs[BTL_SLABS];
167 } *bt_library_t;
168 
169 static struct bt_library        bt_library;
170 
171 static size_t
__btstack_len(bt_stack_t bts)172 __btstack_len(bt_stack_t bts)
173 {
174 	return bts->bts_ref_len & BTS_FRAMES_LEN_MASK;
175 }
176 
177 static size_t
__btstack_size(bt_stack_t bts)178 __btstack_size(bt_stack_t bts)
179 {
180 	return sizeof(uint32_t) * __btstack_len(bts);
181 }
182 
183 static bool
__btstack_same(bt_stack_t a,bt_stack_t b)184 __btstack_same(bt_stack_t a, bt_stack_t b)
185 {
186 	return a->bts_hash == b->bts_hash &&
187 	       __btstack_len(a) == __btstack_len(b) &&
188 	       memcmp(a->bts_frames, b->bts_frames, __btstack_size(a)) == 0;
189 }
190 
191 static uint32_t
__btstack_capture(bt_stack_t bts,void * fp,bool permanent)192 __btstack_capture(bt_stack_t bts, void *fp, bool permanent)
193 {
194 	struct backtrace_control ctl = {
195 		.btc_frame_addr = (vm_offset_t)fp,
196 	};
197 	size_t size;
198 
199 	size = backtrace_packed(BTP_KERN_OFFSET_32, (uint8_t *)bts->bts_frames,
200 	    sizeof(bts->bts_frames), &ctl, NULL);
201 	bts->bts_ref_len = (size / sizeof(uint32_t)) +
202 	    (permanent ? BTS_FRAMES_REF_MASK : BTS_FRAMES_REF_INC);
203 	return bts->bts_hash = os_hash_jenkins(bts->bts_frames, size);
204 }
205 
206 static btref_t
__btstack_try_retain(btref_t btref,bt_stack_t bts,btref_get_flags_t flags)207 __btstack_try_retain(btref_t btref, bt_stack_t bts, btref_get_flags_t flags)
208 {
209 	uint32_t oref, nref;
210 
211 	oref = bts->bts_ref_len;
212 
213 	do {
214 		switch (oref & BTS_FRAMES_REF_MASK) {
215 		case 0:
216 			return 0;
217 		case BTS_FRAMES_REF_MASK:
218 			return btref | BTREF_PERMANENT_BIT;
219 		}
220 
221 		nref = oref + BTS_FRAMES_REF_INC;
222 		if (flags & BTREF_GET_PERMANENT) {
223 			nref |= BTS_FRAMES_REF_MASK;
224 		}
225 	} while (!os_atomic_cmpxchgv(&bts->bts_ref_len,
226 	    oref, nref, &oref, relaxed));
227 
228 	if ((nref & BTS_FRAMES_REF_MASK) == BTS_FRAMES_REF_MASK) {
229 		btref |= BTREF_PERMANENT_BIT;
230 	}
231 
232 	return btref;
233 }
234 
235 __abortlike
236 static void
__btstack_resurrect_panic(bt_stack_t bts)237 __btstack_resurrect_panic(bt_stack_t bts)
238 {
239 	panic("trying to resurrect bt stack %p", bts);
240 }
241 
242 static btref_t
__btstack_retain(btref_t btref,bt_stack_t bts,btref_get_flags_t flags)243 __btstack_retain(btref_t btref, bt_stack_t bts, btref_get_flags_t flags)
244 {
245 	uint32_t oref, nref;
246 
247 	oref = bts->bts_ref_len;
248 
249 	do {
250 		switch (oref & BTS_FRAMES_REF_MASK) {
251 		case 0:
252 			__btstack_resurrect_panic(bts);
253 		case BTS_FRAMES_REF_MASK:
254 			return btref | BTREF_PERMANENT_BIT;
255 		}
256 
257 		nref = oref + BTS_FRAMES_REF_INC;
258 		if (flags & BTREF_GET_PERMANENT) {
259 			nref |= BTS_FRAMES_REF_MASK;
260 		}
261 	} while (!os_atomic_cmpxchgv(&bts->bts_ref_len,
262 	    oref, nref, &oref, relaxed));
263 
264 	if ((nref & BTS_FRAMES_REF_MASK) == BTS_FRAMES_REF_MASK) {
265 		btref |= BTREF_PERMANENT_BIT;
266 	}
267 
268 	return btref;
269 }
270 
271 __abortlike
272 static void
__btstack_over_release_panic(bt_stack_t bts)273 __btstack_over_release_panic(bt_stack_t bts)
274 {
275 	panic("trying to over-release bt stack %p", bts);
276 }
277 
278 static bool
__btstack_release(bt_stack_t bts)279 __btstack_release(bt_stack_t bts)
280 {
281 	uint32_t oref, nref;
282 
283 	oref = bts->bts_ref_len;
284 
285 	do {
286 		switch (oref & BTS_FRAMES_REF_MASK) {
287 		case 0:
288 			__btstack_over_release_panic(bts);
289 		case BTS_FRAMES_REF_MASK:
290 			return false;
291 		}
292 
293 		nref = oref - BTS_FRAMES_REF_INC;
294 	} while (!os_atomic_cmpxchgv(&bts->bts_ref_len,
295 	    oref, nref, &oref, relaxed));
296 
297 	return nref < BTS_FRAMES_REF_INC;
298 }
299 
300 static bt_stack_t
__btlib_deref(bt_library_t btl,btref_t ref)301 __btlib_deref(bt_library_t btl, btref_t ref)
302 {
303 	uint32_t slab = 0;
304 
305 	if (ref >= BTL_SIZE_INIT) {
306 		slab = __builtin_clz(BTL_SIZE_INIT) - __builtin_clz(ref) + 1;
307 	}
308 	return (bt_stack_t)(btl->btl_slabs[slab] + ref);
309 }
310 
311 static void
__btlib_lock(bt_library_t btl)312 __btlib_lock(bt_library_t btl)
313 {
314 	lck_ticket_lock(&btl->btl_lock, &bt_library_lck_grp);
315 }
316 
317 static void
__btlib_unlock(bt_library_t btl)318 __btlib_unlock(bt_library_t btl)
319 {
320 	lck_ticket_unlock(&btl->btl_lock);
321 }
322 
323 static inline btref_smr_t *
__btlib_head(bt_library_t btl,uint32_t param,uint32_t hash)324 __btlib_head(bt_library_t btl, uint32_t param, uint32_t hash)
325 {
326 	uint32_t par = BTL_PARAM_PARITY(param);
327 	uint32_t idx = BTL_PARAM_IDX(param, hash);
328 
329 	return &btl->btl_hash[par][idx]->bth_array[hash & BTL_HASH_MASK];
330 }
331 
332 #pragma mark btref growth & rehashing
333 
334 static void __btlib_remove_deferred_locked(bt_library_t btl);
335 
336 static bool
__btlib_growth_needed(bt_library_t btl)337 __btlib_growth_needed(bt_library_t btl)
338 {
339 	if (btl->btl_faulted_pos >= btl->btl_alloc_pos + PAGE_SIZE / 2) {
340 		return false;
341 	}
342 
343 	if (btl->btl_faulted_pos == btl->btl_max_pos &&
344 	    btl->btl_slab_cur + 1 == BTL_SLABS) {
345 		return false;
346 	}
347 
348 	return true;
349 }
350 
351 static bool
__btlib_rehash_needed(bt_library_t btl)352 __btlib_rehash_needed(bt_library_t btl)
353 {
354 	uint32_t param = smr_serialized_load(&btl->btl_param);
355 	uint32_t shift = BTL_HASH_SHIFT + BTL_PARAM_SHIFT(param);
356 
357 	return (btl->btl_faulted_pos >> (3 + shift)) >= sizeof(union bt_stack);
358 }
359 
360 static void
__btlib_callout_wakeup(bt_library_t btl)361 __btlib_callout_wakeup(bt_library_t btl)
362 {
363 	if (startup_phase >= STARTUP_SUB_THREAD_CALL &&
364 	    !btl->btl_in_callout) {
365 		thread_call_enter(btl->btl_call);
366 	}
367 }
368 
369 __attribute__((noinline))
370 static void
__btlib_grow(bt_library_t btl)371 __btlib_grow(bt_library_t btl)
372 {
373 	kern_return_t kr = KERN_SUCCESS;
374 	vm_address_t addr;
375 
376 	while (btl->btl_grower) {
377 		btl->btl_waiters = true;
378 		lck_ticket_sleep_with_inheritor(&btl->btl_lock,
379 		    &bt_library_lck_grp, LCK_SLEEP_DEFAULT,
380 		    &btl->btl_grower, btl->btl_grower,
381 		    THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
382 		if (!__btlib_growth_needed(btl)) {
383 			return;
384 		}
385 	}
386 	btl->btl_grower = current_thread();
387 
388 	__btlib_unlock(btl);
389 
390 	if (btl->btl_faulted_pos == btl->btl_max_pos) {
391 		uint8_t slab = btl->btl_slab_cur + 1;
392 		vm_size_t size = btl->btl_max_pos;
393 
394 		kr = kmem_alloc(kernel_map, &addr, size,
395 		    KMA_KOBJECT | KMA_ZERO | KMA_VAONLY | KMA_DATA,
396 		    VM_KERN_MEMORY_DIAG);
397 		if (kr != KERN_SUCCESS) {
398 			goto done;
399 		}
400 
401 		btl->btl_slab_cur = slab;
402 		btl->btl_slabs[slab] = addr - size;
403 		btl->btl_max_pos += size;
404 	}
405 
406 	if (btl->btl_faulted_pos < btl->btl_alloc_pos + PAGE_SIZE / 2) {
407 		uint8_t slab = btl->btl_slab_cur;
408 
409 		addr = btl->btl_slabs[slab] + btl->btl_faulted_pos;
410 
411 		kr = kernel_memory_populate(addr, PAGE_SIZE,
412 		    KMA_KOBJECT | KMA_ZERO, VM_KERN_MEMORY_DIAG);
413 	}
414 
415 done:
416 	__btlib_lock(btl);
417 
418 	if (kr == KERN_SUCCESS) {
419 		btl->btl_faulted_pos += PAGE_SIZE;
420 	}
421 
422 	btl->btl_grower = NULL;
423 
424 	if (btl->btl_waiters) {
425 		btl->btl_waiters = false;
426 		wakeup_all_with_inheritor(&btl->btl_grower, THREAD_AWAKENED);
427 	}
428 
429 	if (__btlib_rehash_needed(btl)) {
430 		__btlib_callout_wakeup(btl);
431 	}
432 }
433 
434 static void
__btlib_split_step(bt_library_t btl,bt_hash_t * bthp,uint32_t idx,uint32_t mask)435 __btlib_split_step(
436 	bt_library_t            btl,
437 	bt_hash_t              *bthp,
438 	uint32_t                idx,
439 	uint32_t                mask)
440 {
441 	btref_smr_t *head, *prev;
442 	bt_stack_t   bts;
443 	btref_t      ref;
444 
445 	__btlib_lock(btl);
446 
447 	if (__btlib_growth_needed(btl)) {
448 		__btlib_grow(btl);
449 	}
450 
451 	for (uint32_t i = 0; i < BTL_HASH_COUNT; i++) {
452 		prev = head = &bthp[idx]->bth_array[i];
453 
454 		while ((ref = smr_serialized_load(prev)) != BTREF_NULL) {
455 			bts = __btlib_deref(btl, ref);
456 			if (bts->bts_hash & mask) {
457 				break;
458 			}
459 			prev = &bts->bts_next;
460 		}
461 
462 		if (idx & 1) {
463 			smr_init_store(head, ref);
464 		} else {
465 			smr_clear_store(prev);
466 		}
467 	}
468 
469 	__btlib_unlock(btl);
470 }
471 
472 #if BTLIB_VALIDATE
473 static void
__btlib_validate(bt_library_t btl,bt_hash_t * bthp,uint32_t size,uint32_t param)474 __btlib_validate(
475 	bt_library_t            btl,
476 	bt_hash_t              *bthp,
477 	uint32_t                size,
478 	uint32_t                param)
479 {
480 	bt_stack_t bts;
481 	btref_t ref;
482 
483 	for (uint32_t i = 0; i < size; i++) {
484 		for (uint32_t j = 0; j < BTL_HASH_COUNT; j++) {
485 			ref = smr_serialized_load(&bthp[i]->bth_array[j]);
486 			if (ref == 0) {
487 				continue;
488 			}
489 			bts = __btlib_deref(btl, ref);
490 			assert3u(BTL_PARAM_IDX(param, bts->bts_hash), ==, i);
491 			assert3u(bts->bts_hash & BTL_HASH_MASK, ==, j);
492 		}
493 	}
494 }
495 #endif /* BTLIB_VALIDATE */
496 
497 __attribute__((noinline))
498 static void
__btlib_rehash_and_lock(bt_library_t btl)499 __btlib_rehash_and_lock(bt_library_t btl)
500 {
501 	uint32_t   param_old, size_old, mask;
502 	bt_hash_t *bthp_old;
503 	bt_hash_t *bthp;
504 	smr_seq_t  s1, s2;
505 
506 	/*
507 	 * Step 1: compute all the right sizes and parameters
508 	 *         and allocate the new hash table elements.
509 	 */
510 	param_old = smr_serialized_load(&btl->btl_param);
511 	bthp_old  = btl->btl_hash[BTL_PARAM_PARITY(param_old)];
512 	size_old  = 1u << BTL_PARAM_SHIFT(param_old);
513 	bthp      = kalloc_type(bt_hash_t, 2 * size_old, Z_WAITOK_ZERO);
514 	mask      = 1u << (BTL_PARAM_NEXT(param_old) & 0x1f);
515 
516 	if (bthp == NULL) {
517 		return;
518 	}
519 
520 	for (uint32_t i = 0; i < size_old; i++) {
521 		bthp[2 * i] = bthp_old[i];
522 		bthp[2 * i + 1] = kalloc_type(struct bt_hash,
523 		    Z_WAITOK_ZERO_NOFAIL);
524 	}
525 
526 	/*
527 	 * Step 2: Copy all the hash table buckets in one go.
528 	 *         And publish the new array.
529 	 *
530 	 * TODO: consider if we want to let go of the lock sometimes.
531 	 */
532 	__btlib_lock(btl);
533 
534 	btl->btl_rehashing = true;
535 
536 	for (uint32_t i = 0; i < size_old; i++) {
537 		memcpy(bthp[2 * i + 1], bthp[2 * i], sizeof(struct bt_hash));
538 	}
539 
540 	btl->btl_hash[!BTL_PARAM_PARITY(param_old)] = bthp;
541 
542 	smr_serialized_store(&btl->btl_param, BTL_PARAM_NEXT(param_old));
543 
544 	__btlib_unlock(btl);
545 
546 	smr_synchronize(&bt_library_smr);
547 
548 	/*
549 	 * Step 3: Compute the "odd" lists
550 	 *
551 	 * When we arrive here, we have 2 buckets per list working this way,
552 	 * assumnig the hash bit that we are interested in changes on "C -> D":
553 	 *
554 	 * [ even ] -> A -> B -> C -> D -> E -> 0
555 	 * [ odd  ] ---^
556 	 *
557 	 * We will now build:
558 	 *
559 	 * [ even ] -> A -> B -> C -> D -> E -> 0
560 	 * [ odd  ] ------------------^
561 	 *
562 	 * Note: we try to advance the SMR clock twice,
563 	 *       in the hope that for larger hashes it will
564 	 *       help smr_wait() not to spin.
565 	 */
566 
567 	for (uint32_t i = 0; i < size_old; i += 2) {
568 		__btlib_split_step(btl, bthp, i + 1, mask);
569 	}
570 	s1 = smr_advance(&bt_library_smr);
571 
572 	if (size_old >= 2) {
573 		for (uint32_t i = size_old; i < 2 * size_old; i += 2) {
574 			__btlib_split_step(btl, bthp, i + 1, mask);
575 		}
576 		s2 = smr_advance(&bt_library_smr);
577 	}
578 
579 	/*
580 	 * It's now possible to free the old array, do it,
581 	 * in a feeble attempt to give SMR readers more time before
582 	 * the next smr_wait().
583 	 */
584 	btl->btl_hash[BTL_PARAM_PARITY(param_old)] = NULL;
585 	kfree_type(bt_hash_t, size_old, bthp_old);
586 
587 	/*
588 	 * Step 4: Split the "even" lists
589 	 *
590 	 * We will now cut the "C -> D" link in the even bucket, ending up with:
591 	 *
592 	 * [ even ] -> A -> B -> C -> 0
593 	 * [ odd  ] ----------------> D -> E -> 0
594 	 */
595 	smr_wait(&bt_library_smr, s1);
596 	for (uint32_t i = 0; i < size_old; i += 2) {
597 		__btlib_split_step(btl, bthp, i, mask);
598 	}
599 
600 	if (size_old >= 2) {
601 		smr_wait(&bt_library_smr, s2);
602 		for (uint32_t i = size_old; i < 2 * size_old; i += 2) {
603 			__btlib_split_step(btl, bthp, i, mask);
604 		}
605 	}
606 
607 	/*
608 	 * Help readers see the cuts.
609 	 */
610 	(void)smr_advance(&bt_library_smr);
611 
612 	__btlib_lock(btl);
613 
614 	btl->btl_rehashing = false;
615 
616 #if BTLIB_VALIDATE
617 	__btlib_validate(btl, bthp, size_old * 2, BTL_PARAM_NEXT(param_old));
618 #endif /* BTLIB_VALIDATE */
619 
620 	__btlib_remove_deferred_locked(btl);
621 }
622 
623 static void
__btlib_callout(thread_call_param_t arg0,thread_call_param_t __unused arg1)624 __btlib_callout(thread_call_param_t arg0, thread_call_param_t __unused arg1)
625 {
626 	bt_library_t btl = arg0;
627 
628 	__btlib_lock(btl);
629 	btl->btl_in_callout = true;
630 
631 	if (__btlib_growth_needed(btl)) {
632 		__btlib_grow(btl);
633 	}
634 
635 	while (__btlib_rehash_needed(btl)) {
636 		__btlib_unlock(btl);
637 		__btlib_rehash_and_lock(btl);
638 	}
639 
640 	btl->btl_in_callout = false;
641 	__btlib_unlock(btl);
642 }
643 
644 static void
__btlib_init(bt_library_t btl)645 __btlib_init(bt_library_t btl)
646 {
647 	kern_return_t kr;
648 	vm_address_t  addr;
649 	bt_hash_t    *bthp;
650 
651 	lck_ticket_init(&btl->btl_lock, &bt_library_lck_grp);
652 	btl->btl_free_tail = &btl->btl_free_head;
653 	btl->btl_call = thread_call_allocate_with_options(__btlib_callout, btl,
654 	    THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
655 
656 	kr = kmem_alloc(kernel_map, &addr, BTL_SIZE_INIT,
657 	    KMA_KOBJECT | KMA_ZERO | KMA_VAONLY | KMA_DATA,
658 	    VM_KERN_MEMORY_DIAG);
659 	if (kr != KERN_SUCCESS) {
660 		panic("unable to allocate initial VA: %d", kr);
661 	}
662 
663 	bthp = kalloc_type(bt_hash_t, 1, Z_WAITOK_ZERO_NOFAIL);
664 	bthp[0] = kalloc_type(struct bt_hash, Z_WAITOK_ZERO_NOFAIL);
665 
666 	btl->btl_slabs[0]  = addr;
667 	btl->btl_max_pos   = BTL_SIZE_INIT;
668 	btl->btl_alloc_pos = sizeof(union bt_stack);
669 	btl->btl_hash[0]   = bthp;
670 	smr_init_store(&btl->btl_param, BTL_PARAM_INIT);
671 }
672 STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, __btlib_init, &bt_library);
673 
674 #pragma mark btref insertion/removal fastpaths
675 
676 __attribute__((noinline))
677 static btref_t
__btlib_insert(bt_library_t btl,bt_stack_t needle,btref_get_flags_t flags,uint32_t hash)678 __btlib_insert(
679 	bt_library_t            btl,
680 	bt_stack_t              needle,
681 	btref_get_flags_t       flags,
682 	uint32_t                hash)
683 {
684 	bt_stack_t bts;
685 	btref_smr_t *prev;
686 	btref_t ref;
687 
688 	__btlib_lock(btl);
689 
690 	if (__btlib_growth_needed(btl)) {
691 		/*
692 		 * Do this first so that we keep the lock held
693 		 * while we insert.
694 		 */
695 		if ((flags & BTREF_GET_NOWAIT) == 0) {
696 			__btlib_grow(btl);
697 		} else {
698 			__btlib_callout_wakeup(btl);
699 		}
700 	}
701 
702 	prev = __btlib_head(btl, smr_serialized_load(&btl->btl_param), hash);
703 	while ((ref = smr_serialized_load(prev)) != BTREF_NULL) {
704 		bts = __btlib_deref(btl, ref);
705 
706 #if BTLIB_VALIDATE
707 		assert3u(bts->bts_hash & BTL_HASH_MASK, ==,
708 		    hash & BTL_HASH_MASK);
709 #endif /* BTLIB_VALIDATE */
710 
711 		if (needle->bts_hash < bts->bts_hash) {
712 			break;
713 		}
714 		if (__btstack_same(needle, bts)) {
715 			ref = __btstack_try_retain(ref, bts, flags);
716 			if (ref) {
717 				__btlib_unlock(btl);
718 				return ref;
719 			}
720 			break;
721 		}
722 		prev = &bts->bts_next;
723 	}
724 
725 	if (btl->btl_free_head) {
726 		ref = btl->btl_free_head;
727 		bts = __btlib_deref(btl, btl->btl_free_head);
728 		if (smr_poll(&bt_library_smr, bts->bts_free_seq)) {
729 			if ((btl->btl_free_head = bts->bts_free_next) == 0) {
730 				btl->btl_free_tail = &btl->btl_free_head;
731 			}
732 			goto allocated;
733 		}
734 	}
735 
736 	if (__improbable(btl->btl_alloc_pos + sizeof(union bt_stack) >
737 	    btl->btl_faulted_pos)) {
738 		__btlib_unlock(btl);
739 		return BTREF_NULL;
740 	}
741 
742 	ref = btl->btl_alloc_pos;
743 	btl->btl_alloc_pos = ref + sizeof(union bt_stack);
744 	bts = __btlib_deref(btl, ref);
745 
746 allocated:
747 	*bts = *needle;
748 	smr_serialized_store(&bts->bts_next, smr_serialized_load(prev));
749 	smr_serialized_store(prev, ref);
750 
751 	__btlib_unlock(btl);
752 
753 	return ref | ((flags & BTREF_GET_PERMANENT) != 0);
754 }
755 
756 __abortlike
757 static void
__btlib_remove_notfound_panic(bt_library_t btl,bt_stack_t bts)758 __btlib_remove_notfound_panic(bt_library_t btl, bt_stack_t bts)
759 {
760 	panic("couldn't find stack %p in library %p", bts, btl);
761 }
762 
763 static void
__btlib_remove_locked(bt_library_t btl,btref_t ref,bt_stack_t bts)764 __btlib_remove_locked(bt_library_t btl, btref_t ref, bt_stack_t bts)
765 {
766 	uint32_t hash = bts->bts_hash;
767 	uint32_t param = smr_serialized_load(&btl->btl_param);
768 	btref_smr_t *prev;
769 
770 	if (btl->btl_rehashing) {
771 		/*
772 		 * We can't really delete things during rehash.
773 		 * put them on the deferred list.
774 		 */
775 		bts->bts_free_next = btl->btl_deferred_head;
776 		btl->btl_deferred_head = ref;
777 		return;
778 	}
779 
780 	prev = __btlib_head(btl, param, hash);
781 	for (;;) {
782 		btref_t tmp = smr_serialized_load(prev);
783 
784 		if (tmp == ref) {
785 			break;
786 		}
787 		if (tmp == 0) {
788 			__btlib_remove_notfound_panic(btl, bts);
789 		}
790 		prev = &__btlib_deref(btl, tmp)->bts_next;
791 	}
792 
793 	smr_serialized_store(prev, smr_serialized_load(&bts->bts_next));
794 	bts->bts_free_next = 0;
795 	*btl->btl_free_tail = ref;
796 	btl->btl_free_tail = &bts->bts_free_next;
797 	bts->bts_free_seq = smr_advance(&bt_library_smr);
798 }
799 
800 static void
__btlib_remove_deferred_locked(bt_library_t btl)801 __btlib_remove_deferred_locked(bt_library_t btl)
802 {
803 	btref_t ref, next;
804 	bt_stack_t bts;
805 
806 	next = btl->btl_deferred_head;
807 	btl->btl_deferred_head = 0;
808 	while ((ref = next)) {
809 		bts = __btlib_deref(btl, ref);
810 		next = bts->bts_free_next;
811 		__btlib_remove_locked(btl, ref, bts);
812 	}
813 }
814 
815 __attribute__((noinline))
816 static void
__btlib_remove(bt_library_t btl,btref_t ref,bt_stack_t bts)817 __btlib_remove(bt_library_t btl, btref_t ref, bt_stack_t bts)
818 {
819 	__btlib_lock(btl);
820 	__btlib_remove_locked(btl, ref, bts);
821 	__btlib_unlock(btl);
822 }
823 
824 static btref_t
__btlib_get(bt_library_t btl,void * fp,btref_get_flags_t flags)825 __btlib_get(bt_library_t btl, void *fp, btref_get_flags_t flags)
826 {
827 	union bt_stack needle;
828 	btref_smr_t *head;
829 	uint32_t hash, param;
830 	btref_t ref;
831 
832 	if (bt_library.btl_alloc_pos == 0) {
833 		return BTREF_NULL;
834 	}
835 
836 	hash  = __btstack_capture(&needle, fp, (flags & BTREF_GET_PERMANENT));
837 
838 	smr_enter(&bt_library_smr);
839 
840 	/*
841 	 * The hash "params" have a single bit to select the btl_hash[]
842 	 * pointer that is used.
843 	 *
844 	 * The compiler knows enough about this code to break
845 	 * the dependency chains that we would like, generating code like this:
846 	 *
847 	 *     bthp = btl->btl_hash[0];
848 	 *     if (BTL_PARAM_PARITY(param)) {
849 	 *             bthp = btl->btl_hash[1];
850 	 *     }
851 	 *
852 	 * We could try to play tricks but this would be brittle, so instead,
853 	 * use a proper acquire barrier on param, which pairs with
854 	 * smr_serialized_store(&btl->btl_param, ...)
855 	 * in __btlib_rehash_and_lock().
856 	 *
857 	 *
858 	 * Similarly, because the `bts_next` fields are not dereferenced
859 	 * right away but used as part of complicated arithmetics,
860 	 * trusting the compiler's maintaining of dependencies
861 	 * is a tall order, sometimes, an acquire barrier is best.
862 	 */
863 	param = smr_entered_load_acquire(&btl->btl_param);
864 	head  = __btlib_head(btl, param, hash);
865 	ref   = smr_entered_load(head);
866 
867 	while (ref) {
868 		bt_stack_t bts = __btlib_deref(btl, ref);
869 
870 #if BTLIB_VALIDATE
871 		assert3u(bts->bts_hash & BTL_HASH_MASK, ==,
872 		    hash & BTL_HASH_MASK);
873 #endif /* BTLIB_VALIDATE */
874 
875 		if (needle.bts_hash < bts->bts_hash) {
876 			break;
877 		}
878 		if (__btstack_same(&needle, bts) &&
879 		    (ref = __btstack_try_retain(ref, bts, flags))) {
880 			smr_leave(&bt_library_smr);
881 			return ref;
882 		}
883 
884 		ref = smr_entered_load(&bts->bts_next);
885 	}
886 
887 	smr_leave(&bt_library_smr);
888 
889 	return __btlib_insert(btl, &needle, flags, hash);
890 }
891 
892 btref_t
btref_get(void * fp,btref_get_flags_t flags)893 btref_get(void *fp, btref_get_flags_t flags)
894 {
895 	return __btlib_get(&bt_library, fp, flags);
896 }
897 
898 __abortlike
899 static void
__btref_invalid(btref_t btref)900 __btref_invalid(btref_t btref)
901 {
902 	panic("trying to manipulate invalid backtrace ref: 0x%08x", btref);
903 }
904 
905 static inline bool
__btref_isvalid(btref_t btref)906 __btref_isvalid(btref_t btref)
907 {
908 	return ((btref & BTREF_VALID_MASK) & ~BTREF_GET_PERMANENT) == 0;
909 }
910 
911 btref_t
btref_retain(btref_t btref)912 btref_retain(btref_t btref)
913 {
914 	uint32_t sig  = btref & BTREF_VALID_MASK;
915 
916 	if (btref && sig == 0) {
917 		bt_stack_t bts = __btlib_deref(&bt_library, btref);
918 
919 		btref = __btstack_retain(btref, bts, 0);
920 	} else if (sig & ~BTREF_PERMANENT_BIT) {
921 		__btref_invalid(btref);
922 	}
923 
924 	return btref;
925 }
926 
927 void
btref_put(btref_t btref)928 btref_put(btref_t btref)
929 {
930 	uint32_t sig = btref & BTREF_VALID_MASK;
931 
932 	if (btref && sig == 0) {
933 		bt_library_t btl = &bt_library;
934 		bt_stack_t bts = __btlib_deref(btl, btref);
935 
936 		if (__improbable(__btstack_release(bts))) {
937 			__btlib_remove(btl, btref, bts);
938 		}
939 	} else if (sig & ~BTREF_PERMANENT_BIT) {
940 		__btref_invalid(btref);
941 	}
942 }
943 
944 uint32_t
btref_decode_unslide(btref_t btref,mach_vm_address_t bt_out[])945 btref_decode_unslide(btref_t btref, mach_vm_address_t bt_out[])
946 {
947 	static_assert(sizeof(mach_vm_address_t) == sizeof(uintptr_t));
948 
949 	if (__btref_isvalid(btref)) {
950 		bt_stack_t bts = __btlib_deref(&bt_library, btref);
951 		uint32_t   len = __btstack_len(bts);
952 
953 		backtrace_unpack(BTP_KERN_OFFSET_32, (uintptr_t *)bt_out,
954 		    BTLOG_MAX_DEPTH, (uint8_t *)bts->bts_frames,
955 		    sizeof(uint32_t) * len);
956 
957 		for (uint32_t i = 0; i < len; i++) {
958 			bt_out[i] = VM_KERNEL_UNSLIDE(bt_out[i]);
959 		}
960 
961 		return len;
962 	}
963 
964 	__btref_invalid(btref);
965 }
966 
967 #pragma mark btlog types and helpers
968 
969 struct btlog {
970 	btlog_type_t            btl_type;
971 	uint32_t                btl_disabled : 1;
972 	uint32_t                btl_sample_max : 23;
973 #define BTL_SAMPLE_LIMIT        0x007fffffu
974 	uint32_t                btl_count;
975 	lck_ticket_t            btl_lock;
976 	uint32_t     *__zpercpu btl_sample;
977 };
978 
979 struct bt_log_entry {
980 	vm_address_t            btle_addr;
981 	btref_t                 btle_where;
982 } __attribute__((packed, aligned(4)));
983 
984 struct btlog_log {
985 	struct btlog            btll_hdr;
986 #define btll_count              btll_hdr.btl_count
987 	uint32_t                btll_pos;
988 	struct bt_log_entry     btll_entries[__counted_by(btll_count)];
989 };
990 
991 
992 #define BT_HASH_END_MARKER      UINT32_MAX
993 
994 struct bt_hash_entry {
995 	vm_address_t            bthe_addr;
996 	uint32_t                bthe_next;
997 	btref_t                 bthe_where;
998 };
999 
1000 struct bt_hash_head {
1001 	uint32_t                bthh_first;
1002 	uint32_t                bthh_last;
1003 };
1004 
1005 struct btlog_hash {
1006 	struct btlog            btlh_hdr;
1007 #define btlh_count              btlh_hdr.btl_count
1008 	uint32_t                btlh_pos;
1009 	struct bt_hash_head     btlh_free;
1010 	struct bt_hash_entry    btlh_entries[__counted_by(btlh_count)];
1011 };
1012 
1013 typedef union {
1014 	vm_address_t            bta;
1015 	struct btlog           *btl;
1016 	struct btlog_log       *btll;
1017 	struct btlog_hash      *btlh;
1018 } __attribute__((transparent_union)) btlogu_t;
1019 
1020 static LCK_GRP_DECLARE(btlog_lck_grp, "btlog");
1021 
1022 static void
__btlog_lock(btlogu_t btlu)1023 __btlog_lock(btlogu_t btlu)
1024 {
1025 	lck_ticket_lock(&btlu.btl->btl_lock, &btlog_lck_grp);
1026 }
1027 
1028 static void
__btlog_unlock(btlogu_t btlu)1029 __btlog_unlock(btlogu_t btlu)
1030 {
1031 	lck_ticket_unlock(&btlu.btl->btl_lock);
1032 }
1033 
1034 static void *
__btlog_elem_normalize(void * addr)1035 __btlog_elem_normalize(void *addr)
1036 {
1037 	addr = (void *)vm_memtag_canonicalize_address((vm_offset_t)addr);
1038 	return addr;
1039 }
1040 
1041 static long
__btlog_elem_encode(void * addr)1042 __btlog_elem_encode(void *addr)
1043 {
1044 	return ~(long)__btlog_elem_normalize(addr);
1045 }
1046 
1047 static void *
__btlog_elem_decode(long addr)1048 __btlog_elem_decode(long addr)
1049 {
1050 	return (void *)~addr;
1051 }
1052 
1053 static struct bt_hash_head *
__btlog_hash_hash(struct btlog_hash * btlh)1054 __btlog_hash_hash(struct btlog_hash *btlh)
1055 {
1056 	return (struct bt_hash_head *)(btlh->btlh_entries + btlh->btlh_count);
1057 }
1058 
1059 static uint32_t
__btlog_hash_count(struct btlog_hash * btlh)1060 __btlog_hash_count(struct btlog_hash *btlh)
1061 {
1062 	return btlh->btlh_count >> 2;
1063 }
1064 
1065 static struct bt_hash_head *
__btlog_hash_head(struct btlog_hash * btlh,void * addr)1066 __btlog_hash_head(struct btlog_hash *btlh, void *addr)
1067 {
1068 	uint32_t h = os_hash_kernel_pointer(__btlog_elem_normalize(addr));
1069 	h &= (__btlog_hash_count(btlh) - 1);
1070 	return &__btlog_hash_hash(btlh)[h];
1071 }
1072 
1073 __attribute__((overloadable))
1074 static struct btlog_size_pair {
1075 	vm_size_t btsp_size;
1076 	uint32_t  btsp_count;
1077 }
__btlog_size(btlog_type_t type,uint32_t count)1078 __btlog_size(btlog_type_t type, uint32_t count)
1079 {
1080 	struct btlog_size_pair pair = {0};
1081 
1082 	switch (type) {
1083 	case BTLOG_LOG:
1084 		pair.btsp_size = round_page(sizeof(struct btlog_log) +
1085 		    count * sizeof(struct bt_log_entry));
1086 		pair.btsp_count = (pair.btsp_size - sizeof(struct btlog_log)) /
1087 		    sizeof(struct bt_log_entry);
1088 		break;
1089 
1090 	case BTLOG_HASH:
1091 		pair.btsp_count = MAX(1u << fls(count - 1), 128u);
1092 		pair.btsp_size = round_page(sizeof(struct btlog_hash) +
1093 		    pair.btsp_count * sizeof(struct bt_log_entry) +
1094 		    (pair.btsp_count >> 2) * sizeof(struct btlog_hash));
1095 		break;
1096 	}
1097 
1098 	return pair;
1099 }
1100 
1101 __attribute__((overloadable))
1102 static struct btlog_size_pair
__btlog_size(btlogu_t btlu)1103 __btlog_size(btlogu_t btlu)
1104 {
1105 	return __btlog_size(btlu.btl->btl_type, btlu.btl->btl_count);
1106 }
1107 
1108 static inline btref_t
__bt_ref(uint32_t stack_and_op)1109 __bt_ref(uint32_t stack_and_op)
1110 {
1111 	return stack_and_op & ~BTREF_OP_MASK;
1112 }
1113 
1114 static inline btref_t
__bt_op(uint32_t stack_and_op)1115 __bt_op(uint32_t stack_and_op)
1116 {
1117 	return stack_and_op & BTREF_OP_MASK;
1118 }
1119 
1120 #pragma mark btlog_log
1121 
1122 static void
__btlog_log_destroy(struct btlog_log * btll)1123 __btlog_log_destroy(struct btlog_log *btll)
1124 {
1125 	for (uint32_t i = 0; i < btll->btll_count; i++) {
1126 		btref_put(__bt_ref(btll->btll_entries[i].btle_where));
1127 	}
1128 }
1129 
1130 static void
__btlog_log_record(struct btlog_log * btll,void * addr,uint8_t op,btref_t btref)1131 __btlog_log_record(struct btlog_log *btll, void *addr, uint8_t op, btref_t btref)
1132 {
1133 	struct bt_log_entry *btle;
1134 	btref_t old = BTREF_NULL;
1135 	uint32_t pos;
1136 
1137 	__btlog_lock(btll);
1138 
1139 	if (__improbable(btll->btll_hdr.btl_disabled)) {
1140 		goto disabled;
1141 	}
1142 
1143 	pos = btll->btll_pos;
1144 	if (pos + 1 == btll->btll_count) {
1145 		btll->btll_pos = 0;
1146 	} else {
1147 		btll->btll_pos = pos + 1;
1148 	}
1149 
1150 	btle  = &btll->btll_entries[pos];
1151 	old   = __bt_ref(btle->btle_where);
1152 	*btle = (struct bt_log_entry){
1153 		.btle_addr  = __btlog_elem_encode(addr),
1154 		.btle_where = btref | (op & BTREF_OP_MASK),
1155 	};
1156 
1157 disabled:
1158 	__btlog_unlock(btll);
1159 
1160 	btref_put(old);
1161 }
1162 
1163 #pragma mark btlog_hash
1164 
1165 static void
__btlog_hash_init(struct btlog_hash * btlh)1166 __btlog_hash_init(struct btlog_hash *btlh)
1167 {
1168 	struct bt_hash_head *hash = __btlog_hash_hash(btlh);
1169 
1170 	btlh->btlh_free.bthh_first = BT_HASH_END_MARKER;
1171 	btlh->btlh_free.bthh_last = BT_HASH_END_MARKER;
1172 
1173 	for (size_t i = 0; i < __btlog_hash_count(btlh); i++) {
1174 		hash[i].bthh_first = BT_HASH_END_MARKER;
1175 		hash[i].bthh_last = BT_HASH_END_MARKER;
1176 	}
1177 }
1178 
1179 static void
__btlog_hash_destroy(struct btlog_hash * btlh)1180 __btlog_hash_destroy(struct btlog_hash *btlh)
1181 {
1182 	for (uint32_t i = 0; i < btlh->btlh_count; i++) {
1183 		btref_put(__bt_ref(btlh->btlh_entries[i].bthe_where));
1184 	}
1185 }
1186 
1187 static uint32_t
__btlog_hash_stailq_pop_first(struct btlog_hash * btlh,struct bt_hash_head * head)1188 __btlog_hash_stailq_pop_first(
1189 	struct btlog_hash      *btlh,
1190 	struct bt_hash_head    *head)
1191 {
1192 	struct bt_hash_entry *bthe;
1193 	uint32_t pos = head->bthh_first;
1194 
1195 	bthe = &btlh->btlh_entries[pos];
1196 	btlh->btlh_free.bthh_first = bthe->bthe_next;
1197 	if (bthe->bthe_next == BT_HASH_END_MARKER) {
1198 		btlh->btlh_free.bthh_last = BT_HASH_END_MARKER;
1199 	} else {
1200 		bthe->bthe_next = BT_HASH_END_MARKER;
1201 	}
1202 
1203 	return pos;
1204 }
1205 
1206 static void
__btlog_hash_stailq_remove(struct bt_hash_head * head,struct bt_hash_entry * bthe,uint32_t * prev,uint32_t ppos)1207 __btlog_hash_stailq_remove(
1208 	struct bt_hash_head    *head,
1209 	struct bt_hash_entry   *bthe,
1210 	uint32_t               *prev,
1211 	uint32_t                ppos)
1212 {
1213 	*prev = bthe->bthe_next;
1214 	if (bthe->bthe_next == BT_HASH_END_MARKER) {
1215 		head->bthh_last = ppos;
1216 	} else {
1217 		bthe->bthe_next = BT_HASH_END_MARKER;
1218 	}
1219 }
1220 
1221 static void
__btlog_hash_stailq_append(struct btlog_hash * btlh,struct bt_hash_head * head,uint32_t pos)1222 __btlog_hash_stailq_append(
1223 	struct btlog_hash      *btlh,
1224 	struct bt_hash_head    *head,
1225 	uint32_t                pos)
1226 {
1227 	if (head->bthh_last == BT_HASH_END_MARKER) {
1228 		head->bthh_first = head->bthh_last = pos;
1229 	} else {
1230 		btlh->btlh_entries[head->bthh_last].bthe_next = pos;
1231 		head->bthh_last = pos;
1232 	}
1233 }
1234 
1235 static void
__btlog_hash_remove(struct btlog_hash * btlh,struct bt_hash_entry * bthe)1236 __btlog_hash_remove(
1237 	struct btlog_hash      *btlh,
1238 	struct bt_hash_entry   *bthe)
1239 {
1240 	struct bt_hash_head *head;
1241 	uint32_t *prev;
1242 	uint32_t ppos;
1243 
1244 	head = __btlog_hash_head(btlh, __btlog_elem_decode(bthe->bthe_addr));
1245 	prev = &head->bthh_first;
1246 	ppos = BT_HASH_END_MARKER;
1247 
1248 	while (bthe != &btlh->btlh_entries[*prev]) {
1249 		ppos = *prev;
1250 		prev = &btlh->btlh_entries[ppos].bthe_next;
1251 	}
1252 
1253 	__btlog_hash_stailq_remove(head, bthe, prev, ppos);
1254 }
1255 
1256 static void
__btlog_hash_record(struct btlog_hash * btlh,void * addr,uint8_t op,btref_t btref)1257 __btlog_hash_record(struct btlog_hash *btlh, void *addr, uint8_t op, btref_t btref)
1258 {
1259 	struct bt_hash_head *head;
1260 	struct bt_hash_entry *bthe;
1261 	btref_t old = BTREF_NULL;
1262 	uint32_t pos;
1263 
1264 	head = __btlog_hash_head(btlh, __btlog_elem_normalize(addr));
1265 
1266 	__btlog_lock(btlh);
1267 
1268 	if (__improbable(btlh->btlh_hdr.btl_disabled)) {
1269 		goto disabled;
1270 	}
1271 
1272 	if (btlh->btlh_free.bthh_first != BT_HASH_END_MARKER) {
1273 		pos  = __btlog_hash_stailq_pop_first(btlh, &btlh->btlh_free);
1274 		bthe = &btlh->btlh_entries[pos];
1275 	} else {
1276 		pos  = btlh->btlh_pos;
1277 		if (pos + 1 == btlh->btlh_count) {
1278 			btlh->btlh_pos = 0;
1279 		} else {
1280 			btlh->btlh_pos = pos + 1;
1281 		}
1282 		bthe = &btlh->btlh_entries[pos];
1283 		if (bthe->bthe_addr) {
1284 			__btlog_hash_remove(btlh, bthe);
1285 		}
1286 	}
1287 
1288 	old   = __bt_ref(bthe->bthe_where);
1289 	*bthe = (struct bt_hash_entry){
1290 		.bthe_addr  = __btlog_elem_encode(addr),
1291 		.bthe_where = btref | (op & BTREF_OP_MASK),
1292 		.bthe_next  = BT_HASH_END_MARKER,
1293 	};
1294 
1295 	if (btref & BTREF_VALID_MASK) {
1296 		assert(__btlib_deref(&bt_library,
1297 		    btref & BTREF_VALID_MASK)->bts_ref_len >= BTS_FRAMES_REF_INC);
1298 	}
1299 
1300 	__btlog_hash_stailq_append(btlh, head, pos);
1301 
1302 disabled:
1303 	__btlog_unlock(btlh);
1304 
1305 	btref_put(old);
1306 }
1307 
1308 static void
__btlog_hash_erase(struct btlog_hash * btlh,void * addr)1309 __btlog_hash_erase(struct btlog_hash *btlh, void *addr)
1310 {
1311 	struct bt_hash_head *head;
1312 	struct bt_hash_entry *bthe;
1313 	uint32_t *prev;
1314 	uint32_t pos, ppos;
1315 
1316 	addr = __btlog_elem_normalize(addr);
1317 	head = __btlog_hash_head(btlh, addr);
1318 	prev = &head->bthh_first;
1319 	ppos = BT_HASH_END_MARKER;
1320 
1321 	__btlog_lock(btlh);
1322 
1323 	if (__improbable(btlh->btlh_hdr.btl_disabled)) {
1324 		goto disabled;
1325 	}
1326 
1327 	while ((pos = *prev) != BT_HASH_END_MARKER) {
1328 		bthe = &btlh->btlh_entries[pos];
1329 		if (__btlog_elem_decode(bthe->bthe_addr) == addr) {
1330 			bthe->bthe_addr = 0;
1331 			__btlog_hash_stailq_remove(head, bthe, prev, ppos);
1332 			__btlog_hash_stailq_append(btlh, &btlh->btlh_free, pos);
1333 		} else {
1334 			ppos = *prev;
1335 			prev = &btlh->btlh_entries[ppos].bthe_next;
1336 		}
1337 	}
1338 
1339 disabled:
1340 	__btlog_unlock(btlh);
1341 }
1342 
1343 #pragma mark btlog APIs
1344 
1345 static void
__btlog_init(btlogu_t btlu)1346 __btlog_init(btlogu_t btlu)
1347 {
1348 	switch (btlu.btl->btl_type) {
1349 	case BTLOG_HASH:
1350 		__btlog_hash_init(btlu.btlh);
1351 		break;
1352 
1353 	case BTLOG_LOG:
1354 		break;
1355 	}
1356 }
1357 
1358 btlog_t
btlog_create(btlog_type_t type,uint32_t count,uint32_t sample)1359 btlog_create(btlog_type_t type, uint32_t count, uint32_t sample)
1360 {
1361 	struct btlog_size_pair pair = __btlog_size(type, count);
1362 	kern_return_t kr;
1363 	btlogu_t btlu;
1364 
1365 	kr = kmem_alloc(kernel_map, &btlu.bta, pair.btsp_size,
1366 	    KMA_KOBJECT | KMA_ZERO, VM_KERN_MEMORY_DIAG);
1367 
1368 	if (kr != KERN_SUCCESS) {
1369 		return NULL;
1370 	}
1371 
1372 	if (sample > BTL_SAMPLE_LIMIT) {
1373 		sample = BTL_SAMPLE_LIMIT;
1374 	}
1375 
1376 	btlu.btl->btl_type = type;
1377 	btlu.btl->btl_sample_max = sample;
1378 	btlu.btl->btl_count = pair.btsp_count;
1379 	lck_ticket_init(&btlu.btl->btl_lock, &btlog_lck_grp);
1380 	assert3u(btlu.btl->btl_count, !=, 0);
1381 
1382 	if (sample > 1) {
1383 		btlu.btl->btl_sample = zalloc_percpu(percpu_u64_zone,
1384 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1385 		zpercpu_foreach_cpu(cpu) {
1386 			uint32_t *counter;
1387 
1388 			counter = zpercpu_get_cpu(btlu.btl->btl_sample, cpu);
1389 			*counter = (cpu + 1) * sample / zpercpu_count();
1390 		}
1391 	}
1392 
1393 	__btlog_init(btlu);
1394 
1395 	return btlu.btl;
1396 }
1397 
1398 static void
__btlog_destroy(btlogu_t btlu)1399 __btlog_destroy(btlogu_t btlu)
1400 {
1401 	switch (btlu.btl->btl_type) {
1402 	case BTLOG_LOG:
1403 		__btlog_log_destroy(btlu.btll);
1404 		break;
1405 
1406 	case BTLOG_HASH:
1407 		__btlog_hash_destroy(btlu.btlh);
1408 		break;
1409 	}
1410 }
1411 
1412 void
btlog_destroy(btlogu_t btlu)1413 btlog_destroy(btlogu_t btlu)
1414 {
1415 	if (!btlu.btl->btl_disabled) {
1416 		__btlog_destroy(btlu);
1417 	}
1418 	if (btlu.btl->btl_sample) {
1419 		zfree_percpu(percpu_u64_zone, btlu.btl->btl_sample);
1420 	}
1421 	lck_ticket_destroy(&btlu.btl->btl_lock, &btlog_lck_grp);
1422 	kmem_free(kernel_map, btlu.bta, __btlog_size(btlu).btsp_size);
1423 }
1424 
1425 kern_return_t
btlog_enable(btlogu_t btlu)1426 btlog_enable(btlogu_t btlu)
1427 {
1428 	vm_size_t size;
1429 	kern_return_t kr = KERN_SUCCESS;
1430 
1431 	size = __btlog_size(btlu).btsp_size;
1432 	if (size > PAGE_SIZE) {
1433 		kr = kernel_memory_populate(btlu.bta + PAGE_SIZE,
1434 		    size - PAGE_SIZE, KMA_KOBJECT | KMA_ZERO,
1435 		    VM_KERN_MEMORY_DIAG);
1436 	}
1437 
1438 	if (kr == KERN_SUCCESS) {
1439 		__btlog_init(btlu);
1440 
1441 		__btlog_lock(btlu);
1442 		assert(btlu.btl->btl_disabled);
1443 		btlu.btl->btl_disabled = false;
1444 		__btlog_unlock(btlu);
1445 	}
1446 
1447 	return kr;
1448 }
1449 
1450 void
btlog_disable(btlogu_t btlu)1451 btlog_disable(btlogu_t btlu)
1452 {
1453 	vm_size_t size;
1454 
1455 	__btlog_lock(btlu);
1456 	assert(!btlu.btl->btl_disabled);
1457 	btlu.btl->btl_disabled = true;
1458 	__btlog_unlock(btlu);
1459 
1460 	__btlog_destroy(btlu);
1461 
1462 	size = __btlog_size(btlu).btsp_size;
1463 	bzero((char *)btlu.bta + sizeof(*btlu.btl),
1464 	    PAGE_SIZE - sizeof(*btlu.btl));
1465 	if (size > PAGE_SIZE) {
1466 		kernel_memory_depopulate(btlu.bta + PAGE_SIZE,
1467 		    size - PAGE_SIZE, KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
1468 	}
1469 }
1470 
1471 btlog_type_t
btlog_get_type(btlog_t btlog)1472 btlog_get_type(btlog_t btlog)
1473 {
1474 	return btlog->btl_type;
1475 }
1476 
1477 uint32_t
btlog_get_count(btlog_t btlog)1478 btlog_get_count(btlog_t btlog)
1479 {
1480 	return btlog->btl_count;
1481 }
1482 
1483 bool
btlog_sample(btlog_t btlog)1484 btlog_sample(btlog_t btlog)
1485 {
1486 	uint32_t *counter;
1487 
1488 	if (btlog->btl_sample == NULL) {
1489 		return true;
1490 	}
1491 
1492 	counter = zpercpu_get(btlog->btl_sample);
1493 	if (os_atomic_dec_orig(counter, relaxed) != 0) {
1494 		return false;
1495 	}
1496 
1497 	os_atomic_store(counter, btlog->btl_sample_max - 1, relaxed);
1498 	return true;
1499 }
1500 
1501 void
btlog_record(btlogu_t btlu,void * addr,uint8_t op,btref_t btref)1502 btlog_record(btlogu_t btlu, void *addr, uint8_t op, btref_t btref)
1503 {
1504 	if (btlu.btl->btl_disabled) {
1505 		return;
1506 	}
1507 	switch (btlu.btl->btl_type) {
1508 	case BTLOG_LOG:
1509 		__btlog_log_record(btlu.btll, addr, op, btref);
1510 		break;
1511 
1512 	case BTLOG_HASH:
1513 		__btlog_hash_record(btlu.btlh, addr, op, btref);
1514 		break;
1515 	}
1516 }
1517 
1518 void
btlog_erase(btlogu_t btlu,void * addr)1519 btlog_erase(btlogu_t btlu, void *addr)
1520 {
1521 	if (btlu.btl->btl_disabled) {
1522 		return;
1523 	}
1524 	switch (btlu.btl->btl_type) {
1525 	case BTLOG_HASH:
1526 		__btlog_hash_erase(btlu.btlh, addr);
1527 		break;
1528 
1529 	case BTLOG_LOG:
1530 		break;
1531 	}
1532 }
1533 
1534 extern void
1535 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
1536 
1537 struct btlog_record {
1538 	uint32_t btr_where;
1539 	uint32_t btr_count;
1540 };
1541 
1542 static int
btlog_record_cmp_where(const void * e1,const void * e2)1543 btlog_record_cmp_where(const void *e1, const void *e2)
1544 {
1545 	const struct btlog_record *a = e1;
1546 	const struct btlog_record *b = e2;
1547 
1548 	if (a->btr_where == b->btr_where) {
1549 		return 0;
1550 	}
1551 	return a->btr_where > b->btr_where ? 1 : -1;
1552 }
1553 
1554 static bool
btlog_records_pack(struct btlog_record * array,uint32_t * countp)1555 btlog_records_pack(struct btlog_record *array, uint32_t *countp)
1556 {
1557 	uint32_t r, w, count = *countp;
1558 
1559 	qsort(array, count, sizeof(struct btlog_record), btlog_record_cmp_where);
1560 
1561 	for (r = 1, w = 1; r < count; r++) {
1562 		if (array[w - 1].btr_where == array[r].btr_where) {
1563 			array[w - 1].btr_count += array[r].btr_count;
1564 		} else {
1565 			array[w++] = array[r];
1566 		}
1567 	}
1568 
1569 	if (w == count) {
1570 		return false;
1571 	}
1572 
1573 	*countp = w;
1574 	return true;
1575 }
1576 
1577 static int
btlog_record_cmp_rev_count(const void * e1,const void * e2)1578 btlog_record_cmp_rev_count(const void *e1, const void *e2)
1579 {
1580 	const struct btlog_record *a = e1;
1581 	const struct btlog_record *b = e2;
1582 
1583 	if (a->btr_count == b->btr_count) {
1584 		return 0;
1585 	}
1586 	return a->btr_count > b->btr_count ? -1 : 1;
1587 }
1588 
1589 kern_return_t
btlog_get_records(btlogu_t btl,zone_btrecord_t ** records,unsigned int * numrecs)1590 btlog_get_records(
1591 	btlogu_t                btl,
1592 	zone_btrecord_t       **records,
1593 	unsigned int           *numrecs)
1594 {
1595 	struct btlog_record *btr_array;
1596 	struct btlog_record  btr;
1597 	zone_btrecord_t     *rec_array;
1598 	vm_offset_t          addr, end, size, ipc_map_size;
1599 	kern_return_t        kr;
1600 	uint32_t             count = 0;
1601 
1602 	/*
1603 	 * Step 1: collect all the backtraces in the logs in wired memory
1604 	 *
1605 	 *         note that the ipc_kernel_map is small, and we might have
1606 	 *         too little space.
1607 	 *
1608 	 *         In order to accomodate, we will deduplicate as we go.
1609 	 *         If we still overflow space, we return KERN_NO_SPACE.
1610 	 */
1611 
1612 	ipc_map_size = (vm_offset_t)(vm_map_max(ipc_kernel_map) -
1613 	    vm_map_min(ipc_kernel_map));
1614 	size = round_page(btlog_get_count(btl.btl) * sizeof(struct btlog_record));
1615 	if (size > ipc_map_size) {
1616 		size = ipc_map_size / 4;
1617 	}
1618 
1619 	for (;;) {
1620 		kr = kmem_alloc(ipc_kernel_map, &addr, size,
1621 		    KMA_DATA, VM_KERN_MEMORY_IPC);
1622 		if (kr == KERN_SUCCESS) {
1623 			break;
1624 		}
1625 		if (size < (1U << 19)) {
1626 			return kr;
1627 		}
1628 		size /= 2;
1629 	}
1630 
1631 	btr_array = (struct btlog_record *)addr;
1632 	rec_array = (zone_btrecord_t *)addr;
1633 	kr = KERN_NOT_FOUND;
1634 
1635 	__btlog_lock(btl);
1636 
1637 	if (btl.btl->btl_disabled) {
1638 		goto disabled;
1639 	}
1640 
1641 	switch (btl.btl->btl_type) {
1642 	case BTLOG_LOG:
1643 		for (uint32_t i = 0; i < btl.btl->btl_count; i++) {
1644 			struct bt_log_entry *btle = &btl.btll->btll_entries[i];
1645 
1646 			if (!btle->btle_addr) {
1647 				break;
1648 			}
1649 			if ((count + 1) * sizeof(struct btlog_record) > size) {
1650 				if (!btlog_records_pack(btr_array, &count)) {
1651 					kr = KERN_NO_SPACE;
1652 					count = 0;
1653 					break;
1654 				}
1655 			}
1656 			btr_array[count].btr_where = btle->btle_where;
1657 			btr_array[count].btr_count = 1;
1658 			count++;
1659 		}
1660 		break;
1661 
1662 	case BTLOG_HASH:
1663 		for (uint32_t i = 0; i < btl.btl->btl_count; i++) {
1664 			struct bt_hash_entry *bthe = &btl.btlh->btlh_entries[i];
1665 
1666 			if (!bthe->bthe_addr) {
1667 				continue;
1668 			}
1669 			if ((count + 1) * sizeof(struct btlog_record) > size) {
1670 				if (!btlog_records_pack(btr_array, &count)) {
1671 					kr = KERN_NO_SPACE;
1672 					count = 0;
1673 					break;
1674 				}
1675 			}
1676 			btr_array[count].btr_where = bthe->bthe_where;
1677 			btr_array[count].btr_count = 1;
1678 			count++;
1679 		}
1680 		break;
1681 	}
1682 
1683 	/*
1684 	 * Step 2: unique all the records, and retain them
1685 	 */
1686 
1687 	if (count) {
1688 		btlog_records_pack(btr_array, &count);
1689 		/*
1690 		 * If the backtraces won't fit,
1691 		 * sort them in reverse popularity order and clip.
1692 		 */
1693 		if (count > size / sizeof(zone_btrecord_t)) {
1694 			qsort(btr_array, count, sizeof(struct btlog_record),
1695 			    btlog_record_cmp_rev_count);
1696 			count = size / sizeof(zone_btrecord_t);
1697 		}
1698 		for (uint32_t i = 0; i < count; i++) {
1699 			btref_retain(__bt_ref(btr_array[i].btr_where));
1700 		}
1701 	}
1702 
1703 disabled:
1704 	__btlog_unlock(btl);
1705 
1706 	if (count == 0) {
1707 		kmem_free(ipc_kernel_map, addr, size);
1708 		return kr;
1709 	}
1710 
1711 	/*
1712 	 * Step 3: Expand the backtraces in place, in reverse order.
1713 	 */
1714 
1715 	for (uint32_t i = count; i-- > 0;) {
1716 		btr = *(volatile struct btlog_record *)&btr_array[i];
1717 
1718 		rec_array[i] = (zone_btrecord_t){
1719 			.ref_count      = btr.btr_count,
1720 			.operation_type = __bt_op(btr.btr_where),
1721 		};
1722 		btref_decode_unslide(__bt_ref(btr.btr_where), rec_array[i].bt);
1723 		btref_put(__bt_ref(btr.btr_where));
1724 	}
1725 
1726 	/*
1727 	 * Step 4: Free the excess memory, zero padding, and unwire the buffer.
1728 	 */
1729 
1730 	end = round_page((vm_offset_t)(rec_array + count));
1731 	bzero(rec_array + count, end - (vm_address_t)(rec_array + count));
1732 	if (end < addr + size) {
1733 		kmem_free(ipc_kernel_map, end, addr + size - end);
1734 	}
1735 
1736 	kr = vm_map_unwire(ipc_kernel_map, addr, end, FALSE);
1737 	assert(kr == KERN_SUCCESS);
1738 
1739 	*records = rec_array;
1740 	*numrecs = count;
1741 	return KERN_SUCCESS;
1742 }
1743 
1744 uint32_t
btlog_guess_top(btlogu_t btlu,vm_address_t bt[],uint32_t * len)1745 btlog_guess_top(btlogu_t btlu, vm_address_t bt[], uint32_t *len)
1746 {
1747 	struct btlog_hash *btlh = btlu.btlh;
1748 	const unsigned RECS = 8;
1749 	struct btlog_record recs[RECS] = {0};
1750 	bt_stack_t bts;
1751 
1752 	if (btlu.btl->btl_type != BTLOG_HASH) {
1753 		return 0;
1754 	}
1755 
1756 	if (!lck_ticket_lock_try(&btlu.btl->btl_lock, &btlog_lck_grp)) {
1757 		return 0;
1758 	}
1759 
1760 	if (btlu.btl->btl_disabled || btlh->btlh_count == 0) {
1761 		goto disabled;
1762 	}
1763 
1764 	/*
1765 	 * This is called from panic context, and can't really
1766 	 * do what btlog_get_records() do and allocate memory.
1767 	 *
1768 	 * Instead, we use the refcounts in the bt library
1769 	 * as a proxy for counts (of course those backtraces
1770 	 * can be inflated due to being shared with other logs,
1771 	 * which is why we use `RECS` slots in the array to find
1772 	 * the RECS more popular stacks at all).
1773 	 *
1774 	 * Note: this will break down if permanent backtraces get used.
1775 	 *       if we ever go there for performance reasons,
1776 	 *       then we'll want to find another way to do this.
1777 	 */
1778 	for (uint32_t i = 0; i < btlh->btlh_count; i++) {
1779 		struct bt_hash_entry *bthe = &btlh->btlh_entries[i];
1780 		btref_t ref;
1781 
1782 		if (!bthe->bthe_addr) {
1783 			continue;
1784 		}
1785 
1786 		ref = __bt_ref(bthe->bthe_where);
1787 		bts = __btlib_deref(&bt_library, ref);
1788 
1789 		for (uint32_t j = 0; j < RECS; j++) {
1790 			if (ref == recs[j].btr_where) {
1791 				break;
1792 			}
1793 			if (bts->bts_ref_len > recs[j].btr_count) {
1794 				for (uint32_t k = j + 1; k < RECS; k++) {
1795 					recs[k] = recs[k - 1];
1796 				}
1797 				recs[j].btr_count = bts->bts_ref_len;
1798 				recs[j].btr_where = ref;
1799 				break;
1800 			}
1801 		}
1802 	}
1803 
1804 	/*
1805 	 * Then correct what we sampled by counting how many times
1806 	 * the backtrace _actually_ exists in that one log.
1807 	 */
1808 	for (uint32_t j = 0; j < RECS; j++) {
1809 		recs[j].btr_count = 0;
1810 	}
1811 
1812 	for (uint32_t i = 0; i < btlh->btlh_count; i++) {
1813 		struct bt_hash_entry *bthe = &btlh->btlh_entries[i];
1814 		btref_t ref;
1815 
1816 		if (!bthe->bthe_addr) {
1817 			continue;
1818 		}
1819 
1820 		ref = __bt_ref(bthe->bthe_where);
1821 
1822 		for (uint32_t j = 0; j < RECS; j++) {
1823 			if (recs[j].btr_where == ref) {
1824 				recs[j].btr_count++;
1825 				break;
1826 			}
1827 		}
1828 	}
1829 
1830 	for (uint32_t j = 1; j < RECS; j++) {
1831 		if (recs[0].btr_count < recs[j].btr_count) {
1832 			recs[0] = recs[j];
1833 		}
1834 	}
1835 	bts = __btlib_deref(&bt_library, recs[0].btr_where);
1836 	*len = __btstack_len(bts);
1837 
1838 	backtrace_unpack(BTP_KERN_OFFSET_32, (uintptr_t *)bt, BTLOG_MAX_DEPTH,
1839 	    (uint8_t *)bts->bts_frames, sizeof(uint32_t) * *len);
1840 
1841 disabled:
1842 	__btlog_unlock(btlu);
1843 
1844 	return recs[0].btr_count;
1845 }
1846 
1847 #if DEBUG || DEVELOPMENT
1848 
1849 void
btlog_copy_backtraces_for_elements(btlogu_t btlu,vm_address_t * instances,uint32_t * countp,uint32_t elem_size,leak_site_proc proc)1850 btlog_copy_backtraces_for_elements(
1851 	btlogu_t                btlu,
1852 	vm_address_t           *instances,
1853 	uint32_t               *countp,
1854 	uint32_t                elem_size,
1855 	leak_site_proc          proc)
1856 {
1857 	struct btlog_hash *btlh = btlu.btlh;
1858 	struct bt_hash_head *head;
1859 	uint32_t count = *countp;
1860 	uint32_t num_sites = 0;
1861 
1862 	if (btlu.btl->btl_type != BTLOG_HASH) {
1863 		return;
1864 	}
1865 
1866 	__btlog_lock(btlh);
1867 
1868 	if (btlu.btl->btl_disabled) {
1869 		goto disabled;
1870 	}
1871 
1872 	for (uint32_t i = 0; i < count; i++) {
1873 		vm_offset_t element = instances[i];
1874 		void *addr = __btlog_elem_normalize((void *)element);
1875 		btref_t ref = BTREF_NULL;
1876 		uint32_t pos;
1877 
1878 		if (kInstanceFlagReferenced & element) {
1879 			continue;
1880 		}
1881 
1882 		element = INSTANCE_PUT(element) & ~kInstanceFlags;
1883 		head = __btlog_hash_head(btlh, addr);
1884 		pos  = head->bthh_first;
1885 		while (pos != BT_HASH_END_MARKER) {
1886 			struct bt_hash_entry *bthe = &btlh->btlh_entries[pos];
1887 
1888 			if (__btlog_elem_decode(bthe->bthe_addr) == addr) {
1889 				ref = __bt_ref(bthe->bthe_where);
1890 				break;
1891 			}
1892 
1893 			pos = bthe->bthe_next;
1894 		}
1895 
1896 		if (ref != BTREF_NULL) {
1897 			element = (ref | kInstanceFlagReferenced);
1898 		}
1899 		instances[num_sites++] = INSTANCE_PUT(element);
1900 	}
1901 
1902 	for (uint32_t i = 0; i < num_sites; i++) {
1903 		vm_offset_t btref = instances[i];
1904 		uint32_t site_count, dups;
1905 
1906 		if (!(btref & kInstanceFlagReferenced)) {
1907 			continue;
1908 		}
1909 
1910 		for (site_count = 1, dups = i + 1; dups < num_sites; dups++) {
1911 			if (instances[dups] == btref) {
1912 				site_count++;
1913 				instances[dups] = 0;
1914 			}
1915 		}
1916 
1917 		btref = INSTANCE_PUT(btref) & ~kInstanceFlags;
1918 		proc(site_count, elem_size, (btref_t)btref);
1919 	}
1920 
1921 disabled:
1922 	__btlog_unlock(btlh);
1923 
1924 	*countp = num_sites;
1925 }
1926 
1927 #endif /* DEBUG || DEVELOPMENT */
1928