xref: /xnu-12377.61.12/iokit/Kernel/IOKitDebug.cpp (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/sysctl.h>
30 extern "C" {
31 #include <vm/vm_kern_xnu.h>
32 #include <kern/task.h>
33 #include <kern/debug.h>
34 }
35 
36 #include <libkern/c++/OSContainers.h>
37 #include <libkern/OSDebug.h>
38 #include <libkern/c++/OSCPPDebug.h>
39 #include <kern/backtrace.h>
40 #include <kern/btlog.h>
41 
42 #include <IOKit/IOKitDebug.h>
43 #include <IOKit/IOLib.h>
44 #include <IOKit/assert.h>
45 #include <IOKit/IODeviceTreeSupport.h>
46 #include <IOKit/IOService.h>
47 
48 #include "IOKitKernelInternal.h"
49 
50 TUNABLE_WRITEABLE(SInt64, gIOKitDebug, "io", DEBUG_INIT_VALUE);
51 TUNABLE_DEV_WRITEABLE(SInt64, gIOKitTrace, "iotrace", 0);
52 
53 #if DEVELOPMENT || DEBUG
54 #define IODEBUG_CTLFLAGS        CTLFLAG_RW
55 #else
56 #define IODEBUG_CTLFLAGS        CTLFLAG_RD
57 #endif
58 
59 SYSCTL_QUAD(_debug, OID_AUTO, iotrace, IODEBUG_CTLFLAGS | CTLFLAG_LOCKED, &gIOKitTrace, "trace io");
60 
61 static int
sysctl_debug_iokit(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)62 sysctl_debug_iokit
63 (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
64 {
65 	SInt64 newValue;
66 	int changed, error = sysctl_io_number(req, gIOKitDebug, sizeof(gIOKitDebug), &newValue, &changed);
67 	if (changed) {
68 		gIOKitDebug = ((gIOKitDebug & ~kIOKitDebugUserOptions) | (newValue & kIOKitDebugUserOptions));
69 	}
70 	return error;
71 }
72 
73 SYSCTL_PROC(_debug, OID_AUTO, iokit,
74     CTLTYPE_QUAD | IODEBUG_CTLFLAGS | CTLFLAG_KERN | CTLFLAG_LOCKED,
75     &gIOKitDebug, 0, sysctl_debug_iokit, "Q", "boot_arg io");
76 
77 void           (*gIOTrackingLeakScanCallback)(uint32_t notification) = NULL;
78 
79 size_t          debug_malloc_size;
80 size_t          debug_iomalloc_size;
81 
82 vm_size_t       debug_iomallocpageable_size;
83 size_t          debug_container_malloc_size;
84 // int          debug_ivars_size; // in OSObject.cpp
85 
86 extern "C" {
87 #if 0
88 #define DEBG(fmt, args...)   { kprintf(fmt, ## args); }
89 #else
90 #define DEBG(fmt, args...)   { IOLog(fmt, ## args); }
91 #endif
92 
93 void
IOPrintPlane(const IORegistryPlane * plane)94 IOPrintPlane( const IORegistryPlane * plane )
95 {
96 	IORegistryEntry *           next;
97 	IORegistryIterator *        iter;
98 	OSOrderedSet *              all;
99 	IOService *                 service;
100 
101 	iter = IORegistryIterator::iterateOver( plane );
102 	assert( iter );
103 	all = iter->iterateAll();
104 	if (all) {
105 		DEBG("Count %d\n", all->getCount());
106 		all->release();
107 	} else {
108 		DEBG("Empty\n");
109 	}
110 
111 	iter->reset();
112 	while ((next = iter->getNextObjectRecursive())) {
113 		DEBG( "%*s\033[33m%s", 2 * next->getDepth( plane ), "", next->getName( plane ));
114 		if ((next->getLocation( plane ))) {
115 			DEBG("@%s", next->getLocation( plane ));
116 		}
117 		DEBG("\033[0m <class %s", next->getMetaClass()->getClassName());
118 		if ((service = OSDynamicCast(IOService, next))) {
119 			DEBG(", busy %ld", (long) service->getBusyState());
120 		}
121 		DEBG( ">\n");
122 //      IOSleep(250);
123 	}
124 	iter->release();
125 
126 #undef IOPrintPlaneFormat
127 }
128 
129 void
db_piokjunk(void)130 db_piokjunk(void)
131 {
132 }
133 
134 void
db_dumpiojunk(const IORegistryPlane * plane __unused)135 db_dumpiojunk( const IORegistryPlane * plane __unused )
136 {
137 }
138 
139 void
IOPrintMemory(void)140 IOPrintMemory( void )
141 {
142 //    OSMetaClass::printInstanceCounts();
143 
144 	IOLog("\n"
145 	    "ivar kalloc()       0x%08lx\n"
146 	    "malloc()            0x%08lx\n"
147 	    "containers kalloc() 0x%08lx\n"
148 	    "IOMalloc()          0x%08lx\n"
149 	    "----------------------------------------\n",
150 	    debug_ivars_size,
151 	    debug_malloc_size,
152 	    debug_container_malloc_size,
153 	    debug_iomalloc_size
154 	    );
155 }
156 } /* extern "C" */
157 
158 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
159 
160 #define super OSObject
OSDefineMetaClassAndStructors(IOKitDiagnostics,OSObject)161 OSDefineMetaClassAndStructors(IOKitDiagnostics, OSObject)
162 
163 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
164 
165 OSObject * IOKitDiagnostics::diagnostics( void )
166 {
167 	IOKitDiagnostics * diags;
168 
169 	diags = new IOKitDiagnostics;
170 	if (diags && !diags->init()) {
171 		diags->release();
172 		diags = NULL;
173 	}
174 
175 	return diags;
176 }
177 
178 void
updateOffset(OSDictionary * dict,UInt64 value,const char * name)179 IOKitDiagnostics::updateOffset( OSDictionary * dict,
180     UInt64 value, const char * name )
181 {
182 	OSNumber * off;
183 
184 	off = OSNumber::withNumber( value, 64 );
185 	if (!off) {
186 		return;
187 	}
188 
189 	dict->setObject( name, off );
190 	off->release();
191 }
192 
193 bool
serialize(OSSerialize * s) const194 IOKitDiagnostics::serialize(OSSerialize *s) const
195 {
196 	OSDictionary *      dict;
197 	bool                ok;
198 
199 	dict = OSDictionary::withCapacity( 5 );
200 	if (!dict) {
201 		return false;
202 	}
203 
204 	updateOffset( dict, debug_ivars_size, "Instance allocation" );
205 	updateOffset( dict, debug_container_malloc_size, "Container allocation" );
206 	updateOffset( dict, debug_iomalloc_size, "IOMalloc allocation" );
207 	updateOffset( dict, debug_iomallocpageable_size, "Pageable allocation" );
208 
209 	OSMetaClass::serializeClassDictionary(dict);
210 
211 	ok = dict->serialize( s );
212 
213 	dict->release();
214 
215 	return ok;
216 }
217 
218 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
219 
220 #if IOTRACKING
221 
222 #include <libkern/c++/OSCPPDebug.h>
223 #include <libkern/c++/OSKext.h>
224 #include <kern/zalloc.h>
225 
226 __private_extern__ "C" void qsort(
227 	void * array,
228 	size_t nmembers,
229 	size_t member_size,
230 	int (*)(const void *, const void *));
231 
232 extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
233 extern "C" ppnum_t pmap_valid_page(ppnum_t pn);
234 
235 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
236 
237 struct IOTRecursiveLock {
238 	lck_mtx_t * mutex;
239 	thread_t    thread;
240 	UInt32      count;
241 };
242 
243 struct IOTrackingQueue {
244 	queue_chain_t     link;
245 	IOTRecursiveLock  lock;
246 	const char *      name;
247 	uintptr_t         btEntry;
248 	size_t            allocSize;
249 	size_t            minCaptureSize;
250 	uint32_t          siteCount;
251 	uint32_t          type;
252 	uint32_t          numSiteQs;
253 	uint8_t           captureOn;
254 	queue_head_t      sites[];
255 };
256 
257 
258 struct IOTrackingCallSiteUser {
259 	pid_t         pid;
260 	uint8_t       user32;
261 	uint8_t       userCount;
262 	uintptr_t     bt[kIOTrackingCallSiteBTs];
263 };
264 
265 struct IOTrackingCallSite {
266 	queue_chain_t          link;
267 	queue_head_t           instances;
268 	IOTrackingQueue *      queue;
269 	IOTracking **          addresses;
270 	size_t        size[2];
271 	uint32_t               crc;
272 	uint32_t      count;
273 
274 	vm_tag_t      tag;
275 	uint8_t       user32;
276 	uint8_t       userCount;
277 	pid_t         btPID;
278 
279 	uintptr_t     bt[kIOTrackingCallSiteBTs];
280 	IOTrackingCallSiteUser     user[0];
281 };
282 
283 struct IOTrackingCallSiteWithUser {
284 	struct IOTrackingCallSite     site;
285 	struct IOTrackingCallSiteUser user;
286 };
287 
288 static void IOTrackingFreeCallSite(uint32_t type, IOTrackingCallSite ** site);
289 
290 struct IOTrackingLeaksRef {
291 	uintptr_t * instances;
292 	uint32_t    zoneSize;
293 	uint32_t    count;
294 	uint32_t    found;
295 	uint32_t    foundzlen;
296 	size_t      bytes;
297 };
298 
299 lck_mtx_t *  gIOTrackingLock;
300 queue_head_t gIOTrackingQ;
301 
302 enum{
303 	kTrackingAddressFlagAllocated    = 0x00000001
304 };
305 
306 #if defined(__LP64__)
307 #define IOTrackingAddressFlags(ptr)     (ptr->flags)
308 #else
309 #define IOTrackingAddressFlags(ptr)     (ptr->tracking.flags)
310 #endif
311 
312 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
313 
314 static void
IOTRecursiveLockLock(IOTRecursiveLock * lock)315 IOTRecursiveLockLock(IOTRecursiveLock * lock)
316 {
317 	if (lock->thread == current_thread()) {
318 		lock->count++;
319 	} else {
320 		lck_mtx_lock(lock->mutex);
321 		assert(lock->thread == NULL);
322 		assert(lock->count == 0);
323 		lock->thread = current_thread();
324 		lock->count = 1;
325 	}
326 }
327 
328 static void
IOTRecursiveLockUnlock(IOTRecursiveLock * lock)329 IOTRecursiveLockUnlock(IOTRecursiveLock * lock)
330 {
331 	assert(lock->thread == current_thread());
332 	if (0 == (--lock->count)) {
333 		lock->thread = NULL;
334 		lck_mtx_unlock(lock->mutex);
335 	}
336 }
337 
338 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
339 
340 void
IOTrackingInit(void)341 IOTrackingInit(void)
342 {
343 	queue_init(&gIOTrackingQ);
344 	gIOTrackingLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
345 }
346 
347 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
348 
349 IOTrackingQueue *
IOTrackingQueueAlloc(const char * name,uintptr_t btEntry,size_t allocSize,size_t minCaptureSize,uint32_t type,uint32_t numSiteQs)350 IOTrackingQueueAlloc(const char * name, uintptr_t btEntry,
351     size_t allocSize, size_t minCaptureSize,
352     uint32_t type, uint32_t numSiteQs)
353 {
354 	IOTrackingQueue * queue;
355 	uint32_t          idx;
356 
357 	if (!numSiteQs) {
358 		numSiteQs = 1;
359 	}
360 	queue = kalloc_type(IOTrackingQueue, queue_head_t, numSiteQs, Z_WAITOK_ZERO);
361 	queue->name           = name;
362 	queue->btEntry        = btEntry;
363 	queue->allocSize      = allocSize;
364 	queue->minCaptureSize = minCaptureSize;
365 	queue->lock.mutex     = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
366 	queue->numSiteQs      = numSiteQs;
367 	queue->type           = type;
368 	enum { kFlags = (kIOTracking | kIOTrackingBoot) };
369 	queue->captureOn = (kFlags == (kFlags & gIOKitDebug))
370 	    || (kIOTrackingQueueTypeDefaultOn & type);
371 
372 	for (idx = 0; idx < numSiteQs; idx++) {
373 		queue_init(&queue->sites[idx]);
374 	}
375 
376 	lck_mtx_lock(gIOTrackingLock);
377 	queue_enter(&gIOTrackingQ, queue, IOTrackingQueue *, link);
378 	lck_mtx_unlock(gIOTrackingLock);
379 
380 	return queue;
381 };
382 
383 void
IOTrackingQueueCollectUser(IOTrackingQueue * queue)384 IOTrackingQueueCollectUser(IOTrackingQueue * queue)
385 {
386 	assert(0 == queue->siteCount);
387 	queue->type |= kIOTrackingQueueTypeUser;
388 }
389 
390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391 
392 void
IOTrackingQueueFree(IOTrackingQueue * queue)393 IOTrackingQueueFree(IOTrackingQueue * queue)
394 {
395 	lck_mtx_lock(gIOTrackingLock);
396 	IOTrackingReset(queue);
397 	remque(&queue->link);
398 	lck_mtx_unlock(gIOTrackingLock);
399 
400 	lck_mtx_free(queue->lock.mutex, IOLockGroup);
401 
402 	kfree_type(IOTrackingQueue, queue_head_t, queue->numSiteQs, queue);
403 };
404 
405 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
406 
407 /* fasthash
408  *  The MIT License
409  *
410  *  Copyright (C) 2012 Zilong Tan ([email protected])
411  *
412  *  Permission is hereby granted, free of charge, to any person
413  *  obtaining a copy of this software and associated documentation
414  *  files (the "Software"), to deal in the Software without
415  *  restriction, including without limitation the rights to use, copy,
416  *  modify, merge, publish, distribute, sublicense, and/or sell copies
417  *  of the Software, and to permit persons to whom the Software is
418  *  furnished to do so, subject to the following conditions:
419  *
420  *  The above copyright notice and this permission notice shall be
421  *  included in all copies or substantial portions of the Software.
422  *
423  *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
424  *  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
425  *  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
426  *  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
427  *  BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
428  *  ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
429  *  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
430  *  SOFTWARE.
431  */
432 
433 
434 // Compression function for Merkle-Damgard construction.
435 // This function is generated using the framework provided.
436 #define mix(h) ({                               \
437 	          (h) ^= (h) >> 23;             \
438 	          (h) *= 0x2127599bf4325c37ULL; \
439 	          (h) ^= (h) >> 47; })
440 
441 static uint64_t
fasthash64(const void * buf,size_t len,uint64_t seed)442 fasthash64(const void *buf, size_t len, uint64_t seed)
443 {
444 	const uint64_t    m = 0x880355f21e6d1965ULL;
445 	const uint64_t *pos = (const uint64_t *)buf;
446 	const uint64_t *end = pos + (len / 8);
447 	const unsigned char *pos2;
448 	uint64_t h = seed ^ (len * m);
449 	uint64_t v;
450 
451 	while (pos != end) {
452 		v  = *pos++;
453 		h ^= mix(v);
454 		h *= m;
455 	}
456 
457 	pos2 = (const unsigned char*)pos;
458 	v = 0;
459 
460 	switch (len & 7) {
461 	case 7: v ^= (uint64_t)pos2[6] << 48;
462 		[[clang::fallthrough]];
463 	case 6: v ^= (uint64_t)pos2[5] << 40;
464 		[[clang::fallthrough]];
465 	case 5: v ^= (uint64_t)pos2[4] << 32;
466 		[[clang::fallthrough]];
467 	case 4: v ^= (uint64_t)pos2[3] << 24;
468 		[[clang::fallthrough]];
469 	case 3: v ^= (uint64_t)pos2[2] << 16;
470 		[[clang::fallthrough]];
471 	case 2: v ^= (uint64_t)pos2[1] << 8;
472 		[[clang::fallthrough]];
473 	case 1: v ^= (uint64_t)pos2[0];
474 		h ^= mix(v);
475 		h *= m;
476 	}
477 
478 	return mix(h);
479 }
480 
481 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
482 
483 static uint32_t
fasthash32(const void * buf,size_t len,uint32_t seed)484 fasthash32(const void *buf, size_t len, uint32_t seed)
485 {
486 	// the following trick converts the 64-bit hashcode to Fermat
487 	// residue, which shall retain information from both the higher
488 	// and lower parts of hashcode.
489 	uint64_t h = fasthash64(buf, len, seed);
490 	return (uint32_t) (h - (h >> 32));
491 }
492 
493 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
494 
495 void
IOTrackingAddUser(IOTrackingQueue * queue,IOTrackingUser * mem,vm_size_t size)496 IOTrackingAddUser(IOTrackingQueue * queue, IOTrackingUser * mem, vm_size_t size)
497 {
498 	uint32_t num;
499 	int pid;
500 
501 	if (!queue->captureOn) {
502 		return;
503 	}
504 	if (size < queue->minCaptureSize) {
505 		return;
506 	}
507 
508 	assert(!mem->link.next);
509 
510 	num = backtrace(&mem->bt[0], kIOTrackingCallSiteBTs, NULL, NULL);
511 	num = 0;
512 	if ((kernel_task != current_task()) && (pid = proc_selfpid())) {
513 		struct backtrace_user_info btinfo = BTUINFO_INIT;
514 		mem->btPID = pid;
515 		num = backtrace_user(&mem->btUser[0], kIOTrackingCallSiteBTs - 1,
516 		    NULL, &btinfo);
517 		mem->user32 = !(btinfo.btui_info & BTI_64_BIT);
518 	}
519 	assert(num <= kIOTrackingCallSiteBTs);
520 	static_assert(kIOTrackingCallSiteBTs <= UINT8_MAX);
521 	mem->userCount = ((uint8_t) num);
522 
523 	IOTRecursiveLockLock(&queue->lock);
524 	queue_enter/*last*/ (&queue->sites[0], mem, IOTrackingUser *, link);
525 	queue->siteCount++;
526 	IOTRecursiveLockUnlock(&queue->lock);
527 }
528 
529 void
IOTrackingRemoveUser(IOTrackingQueue * queue,IOTrackingUser * mem)530 IOTrackingRemoveUser(IOTrackingQueue * queue, IOTrackingUser * mem)
531 {
532 	if (!mem->link.next) {
533 		return;
534 	}
535 
536 	IOTRecursiveLockLock(&queue->lock);
537 	if (mem->link.next) {
538 		remque(&mem->link);
539 		assert(queue->siteCount);
540 		queue->siteCount--;
541 	}
542 	IOTRecursiveLockUnlock(&queue->lock);
543 }
544 
545 uint64_t gIOTrackingAddTime;
546 
547 void
IOTrackingAdd(IOTrackingQueue * queue,IOTracking * mem,size_t size,bool address,vm_tag_t tag)548 IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool address, vm_tag_t tag)
549 {
550 	IOTrackingCallSite * site;
551 	uint32_t             crc, num;
552 	uintptr_t            bt[kIOTrackingCallSiteBTs + 1];
553 	uintptr_t            btUser[kIOTrackingCallSiteBTs];
554 	queue_head_t       * que;
555 	bool                 user;
556 	int                  pid;
557 	int                  userCount;
558 
559 	if (mem->site) {
560 		return;
561 	}
562 	if (!queue->captureOn) {
563 		return;
564 	}
565 	if (size < queue->minCaptureSize) {
566 		return;
567 	}
568 
569 	user = (0 != (kIOTrackingQueueTypeUser & queue->type));
570 
571 	assert(!mem->link.next);
572 
573 	num  = backtrace(&bt[0], kIOTrackingCallSiteBTs + 1, NULL, NULL);
574 	if (!num) {
575 		return;
576 	}
577 	num--;
578 	crc = fasthash32(&bt[1], num * sizeof(bt[0]), 0x04C11DB7);
579 
580 	userCount = 0;
581 	pid = 0;
582 	backtrace_info_t btinfo = BTI_NONE;
583 	if (user) {
584 		if ((kernel_task != current_task()) && (pid = proc_selfpid())) {
585 			struct backtrace_user_info btuinfo = BTUINFO_INIT;
586 			userCount = backtrace_user(&btUser[0], kIOTrackingCallSiteBTs,
587 			    NULL, &btuinfo);
588 			assert(userCount <= kIOTrackingCallSiteBTs);
589 			btinfo = btuinfo.btui_info;
590 			crc = fasthash32(&btUser[0], userCount * sizeof(bt[0]), crc);
591 		}
592 	}
593 
594 	IOTRecursiveLockLock(&queue->lock);
595 	que = &queue->sites[crc % queue->numSiteQs];
596 	queue_iterate(que, site, IOTrackingCallSite *, link)
597 	{
598 		if (tag != site->tag) {
599 			continue;
600 		}
601 		if (user && (pid != site->user[0].pid)) {
602 			continue;
603 		}
604 		if (crc == site->crc) {
605 			break;
606 		}
607 	}
608 
609 	if (queue_end(que, (queue_entry_t) site)) {
610 		if (user) {
611 			site = &kalloc_type(IOTrackingCallSiteWithUser,
612 			    Z_WAITOK_ZERO_NOFAIL)->site;
613 		} else {
614 			site = kalloc_type(IOTrackingCallSite,
615 			    Z_WAITOK_ZERO_NOFAIL);
616 		}
617 
618 		queue_init(&site->instances);
619 		site->addresses  = NULL;
620 		site->queue      = queue;
621 		site->crc        = crc;
622 		site->count      = 0;
623 		site->tag        = tag;
624 		memset(&site->size[0], 0, sizeof(site->size));
625 		bcopy(&bt[1], &site->bt[0], num * sizeof(site->bt[0]));
626 		assert(num <= kIOTrackingCallSiteBTs);
627 		bzero(&site->bt[num], (kIOTrackingCallSiteBTs - num) * sizeof(site->bt[0]));
628 		if (user) {
629 			bcopy(&btUser[0], &site->user[0].bt[0], userCount * sizeof(site->user[0].bt[0]));
630 			assert(userCount <= kIOTrackingCallSiteBTs);
631 			bzero(&site->user[0].bt[userCount], (kIOTrackingCallSiteBTs - userCount) * sizeof(site->user[0].bt[0]));
632 			site->user[0].pid  = pid;
633 			site->user[0].user32 = !(btinfo & BTI_64_BIT);
634 			static_assert(kIOTrackingCallSiteBTs <= UINT8_MAX);
635 			site->user[0].userCount = ((uint8_t) userCount);
636 		}
637 		queue_enter_first(que, site, IOTrackingCallSite *, link);
638 		queue->siteCount++;
639 	}
640 
641 	if (address) {
642 		IOTrackingAddress * memAddr = (typeof(memAddr))mem;
643 		uint32_t hashIdx;
644 
645 		if (NULL == site->addresses) {
646 			site->addresses = kalloc_type(IOTracking *, queue->numSiteQs, Z_WAITOK_ZERO_NOFAIL);
647 			for (hashIdx = 0; hashIdx < queue->numSiteQs; hashIdx++) {
648 				site->addresses[hashIdx] = (IOTracking *) &site->instances;
649 			}
650 		}
651 		hashIdx = atop(memAddr->address) % queue->numSiteQs;
652 		if (queue_end(&site->instances, (queue_entry_t)site->addresses[hashIdx])) {
653 			queue_enter/*last*/ (&site->instances, mem, IOTracking *, link);
654 		} else {
655 			queue_insert_before(&site->instances, mem, site->addresses[hashIdx], IOTracking *, link);
656 		}
657 		site->addresses[hashIdx] = mem;
658 	} else {
659 		queue_enter_first(&site->instances, mem, IOTracking *, link);
660 	}
661 
662 	mem->site      = site;
663 	site->size[0] += size;
664 	site->count++;
665 
666 	IOTRecursiveLockUnlock(&queue->lock);
667 }
668 
669 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
670 
671 static void
IOTrackingRemoveInternal(IOTrackingQueue * queue,IOTracking * mem,size_t size,uint32_t addressIdx)672 IOTrackingRemoveInternal(IOTrackingQueue * queue, IOTracking * mem, size_t size, uint32_t addressIdx)
673 {
674 	IOTrackingCallSite * site;
675 	IOTrackingAddress  * nextAddress;
676 
677 	if (!mem->link.next) {
678 		return;
679 	}
680 
681 	IOTRecursiveLockLock(&queue->lock);
682 	if (mem->link.next) {
683 		assert(mem->site);
684 		site = mem->site;
685 
686 		if ((-1U != addressIdx) && (mem == site->addresses[addressIdx])) {
687 			nextAddress = (IOTrackingAddress *) queue_next(&mem->link);
688 			if (!queue_end(&site->instances, &nextAddress->tracking.link)
689 			    && (addressIdx != (atop(nextAddress->address) % queue->numSiteQs))) {
690 				nextAddress = (IOTrackingAddress *) &site->instances;
691 			}
692 			site->addresses[addressIdx] = &nextAddress->tracking;
693 		}
694 
695 		remque(&mem->link);
696 		assert(site->count);
697 		site->count--;
698 		assert(site->size[0] >= size);
699 		site->size[0] -= size;
700 		if (!site->count) {
701 			assert(queue_empty(&site->instances));
702 			assert(!site->size[0]);
703 			assert(!site->size[1]);
704 
705 			remque(&site->link);
706 			assert(queue->siteCount);
707 			queue->siteCount--;
708 			IOTrackingFreeCallSite(queue->type, &site);
709 		}
710 		mem->site = NULL;
711 	}
712 	IOTRecursiveLockUnlock(&queue->lock);
713 }
714 
715 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
716 
717 void
IOTrackingRemove(IOTrackingQueue * queue,IOTracking * mem,size_t size)718 IOTrackingRemove(IOTrackingQueue * queue, IOTracking * mem, size_t size)
719 {
720 	return IOTrackingRemoveInternal(queue, mem, size, -1U);
721 }
722 
723 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
724 
725 void
IOTrackingRemoveAddress(IOTrackingQueue * queue,IOTrackingAddress * mem,size_t size)726 IOTrackingRemoveAddress(IOTrackingQueue * queue, IOTrackingAddress * mem, size_t size)
727 {
728 	uint32_t addressIdx;
729 	uint64_t address;
730 
731 	address = mem->address;
732 	addressIdx = atop(address) % queue->numSiteQs;
733 
734 	return IOTrackingRemoveInternal(queue, &mem->tracking, size, addressIdx);
735 }
736 
737 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
738 
739 void
IOTrackingAlloc(IOTrackingQueue * queue,uintptr_t address,size_t size)740 IOTrackingAlloc(IOTrackingQueue * queue, uintptr_t address, size_t size)
741 {
742 	IOTrackingAddress * tracking;
743 
744 	if (!queue->captureOn) {
745 		return;
746 	}
747 	if (size < queue->minCaptureSize) {
748 		return;
749 	}
750 
751 	address = ~address;
752 	tracking = kalloc_type(IOTrackingAddress, (zalloc_flags_t)(Z_WAITOK | Z_ZERO));
753 	IOTrackingAddressFlags(tracking) |= kTrackingAddressFlagAllocated;
754 	tracking->address = address;
755 	tracking->size    = size;
756 
757 	IOTrackingAdd(queue, &tracking->tracking, size, true, VM_KERN_MEMORY_NONE);
758 }
759 
760 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
761 
762 void
IOTrackingFree(IOTrackingQueue * queue,uintptr_t address,size_t size)763 IOTrackingFree(IOTrackingQueue * queue, uintptr_t address, size_t size)
764 {
765 	IOTrackingCallSite * site;
766 	IOTrackingAddress  * tracking;
767 	IOTrackingAddress  * nextAddress;
768 	uint32_t             idx, hashIdx;
769 	bool                 done;
770 
771 	address = ~address;
772 	IOTRecursiveLockLock(&queue->lock);
773 
774 	hashIdx = atop(address) % queue->numSiteQs;
775 
776 	done = false;
777 	for (idx = 0; idx < queue->numSiteQs; idx++) {
778 		queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link)
779 		{
780 			if (!site->addresses) {
781 				continue;
782 			}
783 			tracking = (IOTrackingAddress *) site->addresses[hashIdx];
784 			while (!queue_end(&site->instances, &tracking->tracking.link)) {
785 				nextAddress = (IOTrackingAddress *) queue_next(&tracking->tracking.link);
786 				if (!queue_end(&site->instances, &nextAddress->tracking.link)
787 				    && (hashIdx != (atop(nextAddress->address) % queue->numSiteQs))) {
788 					nextAddress = (IOTrackingAddress *) &site->instances;
789 				}
790 				if ((done = (address == tracking->address))) {
791 					if (tracking == (IOTrackingAddress *) site->addresses[hashIdx]) {
792 						site->addresses[hashIdx] = &nextAddress->tracking;
793 					}
794 					IOTrackingRemoveInternal(queue, &tracking->tracking, size, -1U);
795 					kfree_type(IOTrackingAddress, tracking);
796 					break;
797 				}
798 				tracking = nextAddress;
799 			}
800 			if (done) {
801 				break;
802 			}
803 		}
804 		if (done) {
805 			break;
806 		}
807 	}
808 	IOTRecursiveLockUnlock(&queue->lock);
809 }
810 
811 static void
IOTrackingFreeCallSite(uint32_t type,IOTrackingCallSite ** pSite)812 IOTrackingFreeCallSite(uint32_t type, IOTrackingCallSite ** pSite)
813 {
814 	IOTrackingCallSite * site;
815 	void ** ptr;
816 
817 	site = *pSite;
818 	kfree_type(IOTracking *, site->queue->numSiteQs, site->addresses);
819 
820 	ptr = reinterpret_cast<void **>(pSite);
821 	if (kIOTrackingQueueTypeUser & type) {
822 		kfree_type(IOTrackingCallSiteWithUser, *ptr);
823 	} else {
824 		kfree_type(IOTrackingCallSite, *ptr);
825 	}
826 }
827 
828 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
829 
830 void
IOTrackingAccumSize(IOTrackingQueue * queue,IOTracking * mem,size_t size)831 IOTrackingAccumSize(IOTrackingQueue * queue, IOTracking * mem, size_t size)
832 {
833 	IOTRecursiveLockLock(&queue->lock);
834 	if (mem->link.next) {
835 		assert(mem->site);
836 		assert((size > 0) || (mem->site->size[1] >= -size));
837 		mem->site->size[1] += size;
838 	}
839 	;
840 	IOTRecursiveLockUnlock(&queue->lock);
841 }
842 
843 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
844 
845 void
IOTrackingReset(IOTrackingQueue * queue)846 IOTrackingReset(IOTrackingQueue * queue)
847 {
848 	IOTrackingCallSite * site;
849 	IOTrackingUser     * user;
850 	IOTracking         * tracking;
851 	IOTrackingAddress  * trackingAddress;
852 	uint32_t             idx, hashIdx;
853 	bool                 addresses;
854 
855 	IOTRecursiveLockLock(&queue->lock);
856 	for (idx = 0; idx < queue->numSiteQs; idx++) {
857 		while (!queue_empty(&queue->sites[idx])) {
858 			if (kIOTrackingQueueTypeMap & queue->type) {
859 				queue_remove_first(&queue->sites[idx], user, IOTrackingUser *, link);
860 				user->link.next = user->link.prev = NULL;
861 			} else {
862 				queue_remove_first(&queue->sites[idx], site, IOTrackingCallSite *, link);
863 				addresses = false;
864 				while (!queue_empty(&site->instances)) {
865 					queue_remove_first(&site->instances, tracking, IOTracking *, link);
866 					if (site->addresses) {
867 						for (hashIdx = 0; !addresses && (hashIdx < queue->numSiteQs); hashIdx++) {
868 							if (tracking == site->addresses[hashIdx]) {
869 								addresses = true;
870 							}
871 						}
872 					}
873 					if (addresses) {
874 						trackingAddress = (typeof(trackingAddress))tracking;
875 						if (kTrackingAddressFlagAllocated & IOTrackingAddressFlags(trackingAddress)) {
876 							kfree_type(IOTrackingAddress, trackingAddress);
877 						}
878 					}
879 				}
880 				IOTrackingFreeCallSite(queue->type, &site);
881 			}
882 		}
883 	}
884 	queue->siteCount = 0;
885 	IOTRecursiveLockUnlock(&queue->lock);
886 }
887 
888 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
889 
890 static int
IOTrackingCallSiteInfoCompare(const void * left,const void * right)891 IOTrackingCallSiteInfoCompare(const void * left, const void * right)
892 {
893 	IOTrackingCallSiteInfo * l = (typeof(l))left;
894 	IOTrackingCallSiteInfo * r = (typeof(r))right;
895 	size_t                   lsize, rsize;
896 
897 	rsize = r->size[0] + r->size[1];
898 	lsize = l->size[0] + l->size[1];
899 
900 	return (rsize > lsize) ? 1 : ((rsize == lsize) ? 0 : -1);
901 }
902 
903 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
904 
905 static int
IOTrackingAddressCompare(const void * left,const void * right)906 IOTrackingAddressCompare(const void * left, const void * right)
907 {
908 	IOTracking * instance;
909 	uintptr_t    inst, laddr, raddr;
910 
911 	inst = ((typeof(inst) *)left)[0];
912 	instance = (typeof(instance))INSTANCE_GET(inst);
913 	if (kInstanceFlagAddress & inst) {
914 		laddr = ~((IOTrackingAddress *)instance)->address;
915 	} else {
916 		laddr = (uintptr_t) (instance + 1);
917 	}
918 
919 	inst = ((typeof(inst) *)right)[0];
920 	instance = (typeof(instance))(inst & ~kInstanceFlags);
921 	if (kInstanceFlagAddress & inst) {
922 		raddr = ~((IOTrackingAddress *)instance)->address;
923 	} else {
924 		raddr = (uintptr_t) (instance + 1);
925 	}
926 
927 	return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1);
928 }
929 
930 
931 static int
IOTrackingZoneElementCompare(const void * left,const void * right)932 IOTrackingZoneElementCompare(const void * left, const void * right)
933 {
934 	uintptr_t    inst, laddr, raddr;
935 
936 	inst = ((typeof(inst) *)left)[0];
937 	laddr = INSTANCE_PUT(inst);
938 	inst = ((typeof(inst) *)right)[0];
939 	raddr = INSTANCE_PUT(inst);
940 
941 	return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1);
942 }
943 
944 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
945 
946 static void
CopyOutBacktraces(IOTrackingCallSite * site,IOTrackingCallSiteInfo * siteInfo)947 CopyOutBacktraces(IOTrackingCallSite * site, IOTrackingCallSiteInfo * siteInfo)
948 {
949 	uint32_t j;
950 	mach_vm_address_t bt, btEntry;
951 
952 	btEntry = site->queue->btEntry;
953 	for (j = 0; j < kIOTrackingCallSiteBTs; j++) {
954 		bt = site->bt[j];
955 		if (btEntry
956 		    && (!bt || (j == (kIOTrackingCallSiteBTs - 1)))) {
957 			bt = btEntry;
958 			btEntry = 0;
959 		}
960 		siteInfo->bt[0][j] = VM_KERNEL_UNSLIDE(bt);
961 	}
962 
963 	siteInfo->btPID = 0;
964 	if (kIOTrackingQueueTypeUser & site->queue->type) {
965 		siteInfo->btPID = site->user[0].pid;
966 		uint32_t * bt32 = (typeof(bt32))((void *) &site->user[0].bt[0]);
967 		uint64_t * bt64 = (typeof(bt64))((void *) &site->user[0].bt[0]);
968 		for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
969 			if (j >= site->user[0].userCount) {
970 				siteInfo->bt[1][j] = 0;
971 			} else if (site->user[0].user32) {
972 				siteInfo->bt[1][j] = bt32[j];
973 			} else {
974 				siteInfo->bt[1][j] = bt64[j];
975 			}
976 		}
977 	}
978 }
979 
980 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
981 
982 static void
IOTrackingLeakScan(void * refcon)983 IOTrackingLeakScan(void * refcon)
984 {
985 	IOTrackingLeaksRef * ref = (typeof(ref))refcon;
986 	uintptr_t          * instances;
987 	IOTracking         * instance;
988 	uint64_t             vaddr, vincr;
989 	ppnum_t              ppn;
990 	uintptr_t            ptr, addr, vphysaddr, inst;
991 	size_t               size, origsize;
992 	uint32_t             baseIdx, lim, ptrIdx, count;
993 	boolean_t            is;
994 	AbsoluteTime         deadline;
995 
996 	instances       = ref->instances;
997 	count           = ref->count;
998 	size = origsize = ref->zoneSize;
999 
1000 	if (gIOTrackingLeakScanCallback) {
1001 		gIOTrackingLeakScanCallback(kIOTrackingLeakScanStart);
1002 	}
1003 
1004 	for (deadline = 0, vaddr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
1005 	    ;
1006 	    vaddr += vincr) {
1007 		if ((mach_absolute_time() > deadline) || (vaddr >= VM_MAX_KERNEL_ADDRESS)) {
1008 			if (deadline) {
1009 #if SCHED_HYGIENE_DEBUG
1010 				if (is) {
1011 					// Reset the interrupt timeout to avoid panics
1012 					ml_spin_debug_clear_self();
1013 				}
1014 #endif /* SCHED_HYGIENE_DEBUG */
1015 				ml_set_interrupts_enabled(is);
1016 				IODelay(10);
1017 			}
1018 			if (vaddr >= VM_MAX_KERNEL_ADDRESS) {
1019 				break;
1020 			}
1021 			is = ml_set_interrupts_enabled(false);
1022 			clock_interval_to_deadline(10, kMillisecondScale, &deadline);
1023 		}
1024 
1025 		ppn = kernel_pmap_present_mapping(vaddr, &vincr, &vphysaddr);
1026 		// check noencrypt to avoid VM structs (map entries) with pointers
1027 		if (ppn && (!pmap_valid_page(ppn) || (!ref->zoneSize && pmap_is_noencrypt(ppn)))) {
1028 			ppn = 0;
1029 		}
1030 		if (!ppn) {
1031 			continue;
1032 		}
1033 
1034 		vm_memtag_disable_checking();
1035 		for (ptrIdx = 0; ptrIdx < (page_size / sizeof(uintptr_t)); ptrIdx++) {
1036 			ptr = ((uintptr_t *)vphysaddr)[ptrIdx];
1037 #if defined(HAS_APPLE_PAC)
1038 			// strip possible ptrauth signature from candidate data pointer
1039 			ptr = (uintptr_t)ptrauth_strip((void*)ptr, ptrauth_key_process_independent_data);
1040 #endif /* defined(HAS_APPLE_PAC) */
1041 
1042 			for (lim = count, baseIdx = 0; lim; lim >>= 1) {
1043 				inst = instances[baseIdx + (lim >> 1)];
1044 				instance = (typeof(instance))INSTANCE_GET(inst);
1045 
1046 				if (ref->zoneSize) {
1047 					addr = INSTANCE_PUT(inst) & ~kInstanceFlags;
1048 				} else if (kInstanceFlagAddress & inst) {
1049 					addr            = ~((IOTrackingAddress *)instance)->address;
1050 					origsize = size = ((IOTrackingAddress *)instance)->size;
1051 					if (!size) {
1052 						size = 1;
1053 					}
1054 				} else {
1055 					addr            = (uintptr_t) (instance + 1);
1056 					origsize = size = instance->site->queue->allocSize;
1057 				}
1058 				if ((ptr >= addr) && (ptr < (addr + size))
1059 
1060 				    && (((vaddr + ptrIdx * sizeof(uintptr_t)) < addr)
1061 				    || ((vaddr + ptrIdx * sizeof(uintptr_t)) >= (addr + size)))) {
1062 					if (!(kInstanceFlagReferenced & inst)) {
1063 						inst |= kInstanceFlagReferenced;
1064 						instances[baseIdx + (lim >> 1)] = inst;
1065 						ref->found++;
1066 						if (!origsize) {
1067 							ref->foundzlen++;
1068 						}
1069 					}
1070 					break;
1071 				}
1072 				if (ptr > addr) {
1073 					// move right
1074 					baseIdx += (lim >> 1) + 1;
1075 					lim--;
1076 				}
1077 				// else move left
1078 			}
1079 		}
1080 		vm_memtag_enable_checking();
1081 		ref->bytes += page_size;
1082 	}
1083 
1084 	if (gIOTrackingLeakScanCallback) {
1085 		gIOTrackingLeakScanCallback(kIOTrackingLeakScanEnd);
1086 	}
1087 }
1088 
1089 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1090 
1091 extern "C" void
zone_leaks_scan(uintptr_t * instances,uint32_t count,uint32_t zoneSize,uint32_t * found)1092 zone_leaks_scan(uintptr_t * instances, uint32_t count, uint32_t zoneSize, uint32_t * found)
1093 {
1094 	IOTrackingLeaksRef       ref;
1095 	IOTrackingCallSiteInfo   siteInfo;
1096 	uint32_t                 idx;
1097 
1098 	qsort(instances, count, sizeof(*instances), &IOTrackingZoneElementCompare);
1099 
1100 	bzero(&siteInfo, sizeof(siteInfo));
1101 	bzero(&ref, sizeof(ref));
1102 	ref.instances = instances;
1103 	ref.count = count;
1104 	ref.zoneSize = zoneSize;
1105 
1106 	for (idx = 0; idx < 2; idx++) {
1107 		ref.bytes = 0;
1108 		IOTrackingLeakScan(&ref);
1109 		IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d\n", idx, ref.bytes / 1024 / 1024, count, ref.found);
1110 		if (count <= ref.found) {
1111 			break;
1112 		}
1113 	}
1114 
1115 	*found = ref.found;
1116 }
1117 
1118 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1119 
1120 static OSData *
IOTrackingLeaks(LIBKERN_CONSUMED OSData * data)1121 IOTrackingLeaks(LIBKERN_CONSUMED OSData * data)
1122 {
1123 	IOTrackingLeaksRef       ref;
1124 	IOTrackingCallSiteInfo   siteInfo;
1125 	IOTrackingCallSite     * site;
1126 	OSData                 * leakData;
1127 	uintptr_t              * instances;
1128 	IOTracking             * instance;
1129 	uintptr_t                inst;
1130 	uint32_t                 count, idx, numSites, dups, siteCount;
1131 
1132 	/* BEGIN IGNORE CODESTYLE */
1133 	__typed_allocators_ignore_push
1134 	instances = (typeof(instances))data->getBytesNoCopy();
1135 	__typed_allocators_ignore_pop
1136 	/* END IGNORE CODESTYLE */
1137 	count = (data->getLength() / sizeof(*instances));
1138 	qsort(instances, count, sizeof(*instances), &IOTrackingAddressCompare);
1139 
1140 	bzero(&siteInfo, sizeof(siteInfo));
1141 	bzero(&ref, sizeof(ref));
1142 	ref.instances = instances;
1143 	ref.count = count;
1144 	for (idx = 0; idx < 2; idx++) {
1145 		ref.bytes = 0;
1146 		IOTrackingLeakScan(&ref);
1147 		IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d (zlen %d)\n", idx, ref.bytes / 1024 / 1024, count, ref.found, ref.foundzlen);
1148 		if (count <= ref.found) {
1149 			break;
1150 		}
1151 	}
1152 
1153 	/* BEGIN IGNORE CODESTYLE */
1154 	__typed_allocators_ignore_push
1155 	leakData = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo));
1156 	__typed_allocators_ignore_pop
1157 	/* END IGNORE CODESTYLE */
1158 
1159 	for (numSites = 0, idx = 0; idx < count; idx++) {
1160 		inst = instances[idx];
1161 		if (kInstanceFlagReferenced & inst) {
1162 			continue;
1163 		}
1164 		instance = (typeof(instance))INSTANCE_GET(inst);
1165 		site = instance->site;
1166 		instances[numSites] = (uintptr_t) site;
1167 		numSites++;
1168 	}
1169 
1170 	for (idx = 0; idx < numSites; idx++) {
1171 		inst = instances[idx];
1172 		if (!inst) {
1173 			continue;
1174 		}
1175 		site = (typeof(site))inst;
1176 		for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) {
1177 			if (instances[dups] == (uintptr_t) site) {
1178 				siteCount++;
1179 				instances[dups] = 0;
1180 			}
1181 		}
1182 		// leak byte size is reported as:
1183 		// (total bytes allocated by the callsite * number of leaked instances)
1184 		// divided by (number of allocations by callsite)
1185 		siteInfo.count   = siteCount;
1186 		siteInfo.size[0] = (site->size[0] * siteCount) / site->count;
1187 		siteInfo.size[1] = (site->size[1] * siteCount) / site->count;
1188 		CopyOutBacktraces(site, &siteInfo);
1189 		__typed_allocators_ignore_push
1190 		leakData->appendBytes(&siteInfo, sizeof(siteInfo));
1191 		__typed_allocators_ignore_pop
1192 	}
1193 	data->release();
1194 
1195 	return leakData;
1196 }
1197 
1198 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1199 
1200 static bool
SkipName(uint32_t options,const char * name,size_t namesLen,const char * names)1201 SkipName(uint32_t options, const char * name, size_t namesLen, const char * names)
1202 {
1203 	const char * scan;
1204 	const char * next;
1205 	bool         exclude, found;
1206 	size_t       qLen, sLen;
1207 
1208 	if (!namesLen || !names) {
1209 		return false;
1210 	}
1211 	// <len><name>...<len><name><0>
1212 	exclude = (0 != (kIOTrackingExcludeNames & options));
1213 	qLen    = strlen(name);
1214 	scan    = names;
1215 	found   = false;
1216 	do{
1217 		sLen = scan[0];
1218 		scan++;
1219 		next = scan + sLen;
1220 		if (next >= (names + namesLen)) {
1221 			break;
1222 		}
1223 		found = ((sLen == qLen) && !strncmp(scan, name, sLen));
1224 		scan = next;
1225 	}while (!found && (scan < (names + namesLen)));
1226 
1227 	return !(exclude ^ found);
1228 }
1229 
1230 #endif /* IOTRACKING */
1231 
1232 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1233 
1234 static kern_return_t
IOTrackingDebug(uint32_t selector,uint32_t options,uint64_t value,uint32_t intag,uint32_t inzsize,const char * names,size_t namesLen,size_t size,OSObject ** result)1235 IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value,
1236     uint32_t intag, uint32_t inzsize,
1237     const char * names, size_t namesLen,
1238     size_t size, OSObject ** result)
1239 {
1240 	kern_return_t            ret;
1241 	OSData                 * data;
1242 
1243 	if (result) {
1244 		*result = NULL;
1245 	}
1246 	data = NULL;
1247 	ret = kIOReturnNotReady;
1248 
1249 #if IOTRACKING
1250 
1251 	kern_return_t            kr;
1252 	IOTrackingQueue        * queue;
1253 	IOTracking             * instance;
1254 	IOTrackingCallSite     * site;
1255 	IOTrackingCallSiteInfo   siteInfo;
1256 	IOTrackingUser         * user;
1257 	task_t                   mapTask;
1258 	mach_vm_address_t        mapAddress;
1259 	mach_vm_size_t           mapSize;
1260 	uint32_t                 num, idx, qIdx;
1261 	uintptr_t                instFlags;
1262 	proc_t                   proc;
1263 	bool                     addresses;
1264 
1265 	ret = kIOReturnNotFound;
1266 	proc = NULL;
1267 	if (kIOTrackingGetMappings == selector) {
1268 		if (value != -1ULL) {
1269 			proc = proc_find((pid_t) value);
1270 			if (!proc) {
1271 				return kIOReturnNotFound;
1272 			}
1273 		}
1274 	}
1275 
1276 	bzero(&siteInfo, sizeof(siteInfo));
1277 	lck_mtx_lock(gIOTrackingLock);
1278 	queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link)
1279 	{
1280 		if (SkipName(options, queue->name, namesLen, names)) {
1281 			continue;
1282 		}
1283 
1284 		if (!(kIOTracking & gIOKitDebug) && (kIOTrackingQueueTypeAlloc & queue->type)) {
1285 			continue;
1286 		}
1287 
1288 		switch (selector) {
1289 		case kIOTrackingResetTracking:
1290 		{
1291 			IOTrackingReset(queue);
1292 			ret = kIOReturnSuccess;
1293 			break;
1294 		}
1295 
1296 		case kIOTrackingStartCapture:
1297 		case kIOTrackingStopCapture:
1298 		{
1299 			queue->captureOn = (kIOTrackingStartCapture == selector);
1300 			ret = kIOReturnSuccess;
1301 			break;
1302 		}
1303 
1304 		case kIOTrackingSetMinCaptureSize:
1305 		{
1306 			queue->minCaptureSize = size;
1307 			ret = kIOReturnSuccess;
1308 			break;
1309 		}
1310 
1311 		case kIOTrackingLeaks:
1312 		{
1313 			if (!(kIOTrackingQueueTypeAlloc & queue->type)) {
1314 				break;
1315 			}
1316 
1317 			if (!data) {
1318 				/* BEGIN IGNORE CODESTYLE */
1319 				__typed_allocators_ignore_push
1320 				data = OSData::withCapacity(1024 * sizeof(uintptr_t));
1321 				__typed_allocators_ignore_pop
1322 				/* END IGNORE CODESTYLE */
1323 			}
1324 
1325 			IOTRecursiveLockLock(&queue->lock);
1326 			for (idx = 0; idx < queue->numSiteQs; idx++) {
1327 				queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link)
1328 				{
1329 					addresses = false;
1330 					queue_iterate(&site->instances, instance, IOTracking *, link)
1331 					{
1332 						if (site->addresses) {
1333 							for (uint32_t hashIdx = 0; !addresses && (hashIdx < queue->numSiteQs); hashIdx++) {
1334 								if (instance == site->addresses[hashIdx]) {
1335 									addresses = true;
1336 								}
1337 							}
1338 						}
1339 						instFlags = (typeof(instFlags))instance;
1340 						if (addresses) {
1341 							instFlags |= kInstanceFlagAddress;
1342 						}
1343 						data->appendValue(instFlags);
1344 					}
1345 				}
1346 			}
1347 			// queue is locked
1348 			ret = kIOReturnSuccess;
1349 			break;
1350 		}
1351 
1352 
1353 		case kIOTrackingGetTracking:
1354 		{
1355 			if (kIOTrackingQueueTypeMap & queue->type) {
1356 				break;
1357 			}
1358 
1359 			if (!data) {
1360 				/* BEGIN IGNORE CODESTYLE */
1361 				__typed_allocators_ignore_push
1362 				data = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo));
1363 				__typed_allocators_ignore_pop
1364 				/* END IGNORE CODESTYLE */
1365 			}
1366 
1367 			IOTRecursiveLockLock(&queue->lock);
1368 			num = queue->siteCount;
1369 			idx = 0;
1370 			for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) {
1371 				queue_iterate(&queue->sites[qIdx], site, IOTrackingCallSite *, link)
1372 				{
1373 					assert(idx < num);
1374 					idx++;
1375 
1376 					size_t tsize[2];
1377 					uint32_t count = site->count;
1378 					tsize[0] = site->size[0];
1379 					tsize[1] = site->size[1];
1380 
1381 					if (intag || inzsize) {
1382 						uintptr_t addr;
1383 						vm_size_t size, zoneSize;
1384 						vm_tag_t  tag;
1385 
1386 						if (kIOTrackingQueueTypeAlloc & queue->type) {
1387 							addresses = false;
1388 							count = 0;
1389 							tsize[0] = tsize[1] = 0;
1390 							queue_iterate(&site->instances, instance, IOTracking *, link)
1391 							{
1392 								if (site->addresses) {
1393 									for (uint32_t hashIdx = 0; !addresses && (hashIdx < queue->numSiteQs); hashIdx++) {
1394 										if (instance == site->addresses[hashIdx]) {
1395 											addresses = true;
1396 										}
1397 									}
1398 								}
1399 
1400 								if (addresses) {
1401 									addr = ~((IOTrackingAddress *)instance)->address;
1402 								} else {
1403 									addr = (uintptr_t) (instance + 1);
1404 								}
1405 
1406 								kr = vm_kern_allocation_info(addr, &size, &tag, &zoneSize);
1407 								if (KERN_SUCCESS != kr) {
1408 									continue;
1409 								}
1410 
1411 								if ((VM_KERN_MEMORY_NONE != intag) && (intag != tag)) {
1412 									continue;
1413 								}
1414 								if (inzsize && (inzsize != zoneSize)) {
1415 									continue;
1416 								}
1417 
1418 								count++;
1419 								tsize[0] += size;
1420 							}
1421 						} else {
1422 							if (!intag || inzsize || (intag != site->tag)) {
1423 								continue;
1424 							}
1425 						}
1426 					}
1427 
1428 					if (!count) {
1429 						continue;
1430 					}
1431 					if (size && ((tsize[0] + tsize[1]) < size)) {
1432 						continue;
1433 					}
1434 					siteInfo.count   = count;
1435 					siteInfo.size[0] = tsize[0];
1436 					siteInfo.size[1] = tsize[1];
1437 					CopyOutBacktraces(site, &siteInfo);
1438 					__typed_allocators_ignore_push
1439 					data->appendBytes(&siteInfo, sizeof(siteInfo));
1440 					__typed_allocators_ignore_pop
1441 				}
1442 			}
1443 			assert(idx == num);
1444 			IOTRecursiveLockUnlock(&queue->lock);
1445 			ret = kIOReturnSuccess;
1446 			break;
1447 		}
1448 
1449 		case kIOTrackingGetMappings:
1450 		{
1451 			if (!(kIOTrackingQueueTypeMap & queue->type)) {
1452 				break;
1453 			}
1454 			if (!data) {
1455 				data = OSData::withCapacity((unsigned int) page_size);
1456 			}
1457 
1458 			IOTRecursiveLockLock(&queue->lock);
1459 			num = queue->siteCount;
1460 			idx = 0;
1461 			for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) {
1462 				queue_iterate(&queue->sites[qIdx], user, IOTrackingUser *, link)
1463 				{
1464 					assert(idx < num);
1465 					idx++;
1466 
1467 					kr = IOMemoryMapTracking(user, &mapTask, &mapAddress, &mapSize);
1468 					if (kIOReturnSuccess != kr) {
1469 						continue;
1470 					}
1471 					if (proc && (mapTask != proc_task(proc))) {
1472 						continue;
1473 					}
1474 					if (size && (mapSize < size)) {
1475 						continue;
1476 					}
1477 
1478 					siteInfo.count      = 1;
1479 					siteInfo.size[0]    = mapSize;
1480 					siteInfo.address    = mapAddress;
1481 					siteInfo.addressPID = task_pid(mapTask);
1482 					siteInfo.btPID      = user->btPID;
1483 
1484 					for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
1485 						siteInfo.bt[0][j] = VM_KERNEL_UNSLIDE(user->bt[j]);
1486 					}
1487 					uint32_t * bt32 = (typeof(bt32)) & user->btUser[0];
1488 					uint64_t * bt64 = (typeof(bt64))((void *) &user->btUser[0]);
1489 					for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) {
1490 						if (j >= user->userCount) {
1491 							siteInfo.bt[1][j] = 0;
1492 						} else if (user->user32) {
1493 							siteInfo.bt[1][j] = bt32[j];
1494 						} else {
1495 							siteInfo.bt[1][j] = bt64[j];
1496 						}
1497 					}
1498 					__typed_allocators_ignore_push
1499 					data->appendBytes(&siteInfo, sizeof(siteInfo));
1500 					__typed_allocators_ignore_pop
1501 				}
1502 			}
1503 			assert(idx == num);
1504 			IOTRecursiveLockUnlock(&queue->lock);
1505 			ret = kIOReturnSuccess;
1506 			break;
1507 		}
1508 
1509 		default:
1510 			ret = kIOReturnUnsupported;
1511 			break;
1512 		}
1513 	}
1514 
1515 	if ((kIOTrackingLeaks == selector) && data) {
1516 		data = IOTrackingLeaks(data);
1517 		queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link)
1518 		{
1519 			if (SkipName(options, queue->name, namesLen, names)) {
1520 				continue;
1521 			}
1522 			if (!(kIOTrackingQueueTypeAlloc & queue->type)) {
1523 				continue;
1524 			}
1525 			IOTRecursiveLockUnlock(&queue->lock);
1526 		}
1527 	}
1528 
1529 	lck_mtx_unlock(gIOTrackingLock);
1530 
1531 	if ((kIOTrackingLeaks == selector) && namesLen && names) {
1532 		const char * scan;
1533 		const char * next;
1534 		uint8_t      sLen;
1535 
1536 		if (!data) {
1537 			/* BEGIN IGNORE CODESTYLE */
1538 			__typed_allocators_ignore_push
1539 			data = OSData::withCapacity(4096 * sizeof(uintptr_t));
1540 			__typed_allocators_ignore_pop
1541 			/* END IGNORE CODESTYLE */
1542 		}
1543 
1544 		// <len><name>...<len><name><0>
1545 		scan    = names;
1546 		do{
1547 			sLen = ((uint8_t) scan[0]);
1548 			scan++;
1549 			next = scan + sLen;
1550 			if (next >= (names + namesLen)) {
1551 				break;
1552 			}
1553 			kr = zone_leaks(scan, sLen, ^(uint32_t count, uint32_t eSize, btref_t ref) {
1554 				IOTrackingCallSiteInfo siteInfo = {
1555 				        .count   = count,
1556 				        .size[0] = eSize * count,
1557 				};
1558 
1559 				btref_decode_unslide(ref, siteInfo.bt[0]);
1560 				__typed_allocators_ignore_push
1561 				data->appendBytes(&siteInfo, sizeof(siteInfo));
1562 				__typed_allocators_ignore_pop
1563 			});
1564 			if (KERN_SUCCESS == kr) {
1565 				ret = kIOReturnSuccess;
1566 			} else if (KERN_INVALID_NAME != kr) {
1567 				ret = kIOReturnVMError;
1568 			}
1569 			scan = next;
1570 		}while (scan < (names + namesLen));
1571 	}
1572 
1573 	if (data) {
1574 		switch (selector) {
1575 		case kIOTrackingLeaks:
1576 		case kIOTrackingGetTracking:
1577 		case kIOTrackingGetMappings:
1578 		{
1579 			IOTrackingCallSiteInfo * siteInfos;
1580 			/* BEGIN IGNORE CODESTYLE */
1581 			__typed_allocators_ignore_push
1582 			siteInfos = (typeof(siteInfos))data->getBytesNoCopy();
1583 			__typed_allocators_ignore_pop
1584 			/* END IGNORE CODESTYLE */
1585 			num = (data->getLength() / sizeof(*siteInfos));
1586 			qsort(siteInfos, num, sizeof(*siteInfos), &IOTrackingCallSiteInfoCompare);
1587 			break;
1588 		}
1589 		default: assert(false); break;
1590 		}
1591 	}
1592 
1593 	*result = data;
1594 	if (proc) {
1595 		proc_rele(proc);
1596 	}
1597 
1598 #endif /* IOTRACKING */
1599 
1600 	return ret;
1601 }
1602 
1603 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1604 
1605 #include <IOKit/IOKitDiagnosticsUserClient.h>
1606 
1607 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1608 
1609 #undef super
1610 #define super IOUserClient2022
1611 
OSDefineMetaClassAndStructors(IOKitDiagnosticsClient,IOUserClient2022)1612 OSDefineMetaClassAndStructors(IOKitDiagnosticsClient, IOUserClient2022)
1613 
1614 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1615 
1616 IOUserClient * IOKitDiagnosticsClient::withTask(task_t owningTask)
1617 {
1618 #if IOTRACKING
1619 	IOKitDiagnosticsClient * inst;
1620 
1621 	inst = new IOKitDiagnosticsClient;
1622 	if (inst && !inst->init()) {
1623 		inst->release();
1624 		inst = NULL;
1625 	}
1626 
1627 	inst->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
1628 	inst->setProperty(kIOUserClientDefaultLockingSetPropertiesKey, kOSBooleanTrue);
1629 	inst->setProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey, kOSBooleanTrue);
1630 
1631 	inst->setProperty(kIOUserClientEntitlementsKey, kOSBooleanFalse);
1632 
1633 	return inst;
1634 #else
1635 	return NULL;
1636 #endif
1637 }
1638 
1639 IOReturn
clientClose(void)1640 IOKitDiagnosticsClient::clientClose(void)
1641 {
1642 	terminate();
1643 	return kIOReturnSuccess;
1644 }
1645 
1646 IOReturn
setProperties(OSObject * properties)1647 IOKitDiagnosticsClient::setProperties(OSObject * properties)
1648 {
1649 	IOReturn kr = kIOReturnUnsupported;
1650 	return kr;
1651 }
1652 
1653 
1654 IOReturn
IOTrackingMethodDispatched(OSObject * target,void * reference,IOExternalMethodArguments * args)1655 IOTrackingMethodDispatched(OSObject * target, void * reference,
1656     IOExternalMethodArguments * args)
1657 {
1658 	IOReturn                           ret = kIOReturnBadArgument;
1659 	const IOKitDiagnosticsParameters * params;
1660 	const char * names;
1661 	size_t       namesLen;
1662 	OSObject   * result;
1663 
1664 	if (args->structureInputSize < sizeof(IOKitDiagnosticsParameters)) {
1665 		return kIOReturnBadArgument;
1666 	}
1667 	params = (typeof(params))args->structureInput;
1668 	if (!params) {
1669 		return kIOReturnBadArgument;
1670 	}
1671 
1672 	names = NULL;
1673 	namesLen = args->structureInputSize - sizeof(IOKitDiagnosticsParameters);
1674 	if (namesLen) {
1675 		names = (typeof(names))(params + 1);
1676 	}
1677 
1678 	ret = IOTrackingDebug(args->selector, params->options, params->value, params->tag, params->zsize, names, namesLen, params->size, &result);
1679 	if ((kIOReturnSuccess == ret) && args->structureVariableOutputData) {
1680 		*args->structureVariableOutputData = result;
1681 	} else if (result) {
1682 		result->release();
1683 	}
1684 	return ret;
1685 }
1686 
1687 IOReturn
externalMethod(uint32_t selector,IOExternalMethodArgumentsOpaque * args)1688 IOKitDiagnosticsClient::externalMethod(uint32_t selector, IOExternalMethodArgumentsOpaque * args)
1689 {
1690 	static const IOExternalMethodDispatch2022 dispatchArray[] = {
1691 		[kIOTrackingGetTracking] = {
1692 			.function                             = &IOTrackingMethodDispatched,
1693 			.checkScalarInputCount    = 0,
1694 			.checkStructureInputSize  = kIOUCVariableStructureSize,
1695 			.checkScalarOutputCount   = 0,
1696 			.checkStructureOutputSize = 0,
1697 			.allowAsync               = false,
1698 			.checkEntitlement         = NULL,
1699 		},
1700 		[kIOTrackingGetMappings] = {
1701 			.function                             = &IOTrackingMethodDispatched,
1702 			.checkScalarInputCount    = 0,
1703 			.checkStructureInputSize  = kIOUCVariableStructureSize,
1704 			.checkScalarOutputCount   = 0,
1705 			.checkStructureOutputSize = 0,
1706 			.allowAsync               = false,
1707 			.checkEntitlement         = NULL,
1708 		},
1709 		[kIOTrackingResetTracking] = {
1710 			.function                             = &IOTrackingMethodDispatched,
1711 			.checkScalarInputCount    = 0,
1712 			.checkStructureInputSize  = kIOUCVariableStructureSize,
1713 			.checkScalarOutputCount   = 0,
1714 			.checkStructureOutputSize = 0,
1715 			.allowAsync               = false,
1716 			.checkEntitlement         = NULL,
1717 		},
1718 		[kIOTrackingStartCapture] = {
1719 			.function                             = &IOTrackingMethodDispatched,
1720 			.checkScalarInputCount    = 0,
1721 			.checkStructureInputSize  = kIOUCVariableStructureSize,
1722 			.checkScalarOutputCount   = 0,
1723 			.checkStructureOutputSize = 0,
1724 			.allowAsync               = false,
1725 			.checkEntitlement         = NULL,
1726 		},
1727 		[kIOTrackingStopCapture] = {
1728 			.function                             = &IOTrackingMethodDispatched,
1729 			.checkScalarInputCount    = 0,
1730 			.checkStructureInputSize  = kIOUCVariableStructureSize,
1731 			.checkScalarOutputCount   = 0,
1732 			.checkStructureOutputSize = 0,
1733 			.allowAsync               = false,
1734 			.checkEntitlement         = NULL,
1735 		},
1736 		[kIOTrackingSetMinCaptureSize] = {
1737 			.function                             = &IOTrackingMethodDispatched,
1738 			.checkScalarInputCount    = 0,
1739 			.checkStructureInputSize  = kIOUCVariableStructureSize,
1740 			.checkScalarOutputCount   = 0,
1741 			.checkStructureOutputSize = 0,
1742 			.allowAsync               = false,
1743 			.checkEntitlement         = NULL,
1744 		},
1745 		[kIOTrackingLeaks] = {
1746 			.function                             = &IOTrackingMethodDispatched,
1747 			.checkScalarInputCount    = 0,
1748 			.checkStructureInputSize  = kIOUCVariableStructureSize,
1749 			.checkScalarOutputCount   = 0,
1750 			.checkStructureOutputSize = 0,
1751 			.allowAsync               = false,
1752 			.checkEntitlement         = NULL,
1753 		},
1754 	};
1755 
1756 	return dispatchExternalMethod(selector, args, dispatchArray, sizeof(dispatchArray) / sizeof(dispatchArray[0]), this, NULL);
1757 }
1758 
1759 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1760