xref: /xnu-10002.61.3/iokit/Kernel/IOUserClient.cpp (revision 0f4c859e951fba394238ab619495c4e1d54d0f34)
1 /*
2  * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <libkern/c++/OSKext.h>
30 #include <libkern/c++/OSSharedPtr.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51 #include <sys/code_signing.h>
52 
53 #include <mach/sdt.h>
54 #include <os/hash.h>
55 
56 #include <libkern/amfi/amfi.h>
57 
58 #if CONFIG_MACF
59 
60 extern "C" {
61 #include <security/mac_framework.h>
62 };
63 #include <sys/kauth.h>
64 
65 #define IOMACF_LOG 0
66 
67 #endif /* CONFIG_MACF */
68 
69 #include <IOKit/assert.h>
70 
71 #include "IOServicePrivate.h"
72 #include "IOKitKernelInternal.h"
73 
74 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
75 #define SCALAR32(x) ((uint32_t )x)
76 #define ARG32(x)    ((void *)(uintptr_t)SCALAR32(x))
77 #define REF64(x)    ((io_user_reference_t)((UInt64)(x)))
78 #define REF32(x)    ((int)(x))
79 
80 enum{
81 	kIOUCAsync0Flags          = 3ULL,
82 	kIOUCAsync64Flag          = 1ULL,
83 	kIOUCAsyncErrorLoggedFlag = 2ULL
84 };
85 
86 #if IOKITSTATS
87 
88 #define IOStatisticsRegisterCounter() \
89 do { \
90 	reserved->counter = IOStatistics::registerUserClient(this); \
91 } while (0)
92 
93 #define IOStatisticsUnregisterCounter() \
94 do { \
95 	if (reserved) \
96 	        IOStatistics::unregisterUserClient(reserved->counter); \
97 } while (0)
98 
99 #define IOStatisticsClientCall() \
100 do { \
101 	IOStatistics::countUserClientCall(client); \
102 } while (0)
103 
104 #else
105 
106 #define IOStatisticsRegisterCounter()
107 #define IOStatisticsUnregisterCounter()
108 #define IOStatisticsClientCall()
109 
110 #endif /* IOKITSTATS */
111 
112 #if DEVELOPMENT || DEBUG
113 
114 #define FAKE_STACK_FRAME(a)                                             \
115 	const void ** __frameptr;                                       \
116 	const void  * __retaddr;                                        \
117 	__frameptr = (typeof(__frameptr)) __builtin_frame_address(0);   \
118 	__retaddr = __frameptr[1];                                      \
119 	__frameptr[1] = (a);
120 
121 #define FAKE_STACK_FRAME_END()                                          \
122 	__frameptr[1] = __retaddr;
123 
124 #else /* DEVELOPMENT || DEBUG */
125 
126 #define FAKE_STACK_FRAME(a)
127 #define FAKE_STACK_FRAME_END()
128 
129 #endif /* DEVELOPMENT || DEBUG */
130 
131 #define ASYNC_REF_COUNT         (sizeof(io_async_ref_t) / sizeof(natural_t))
132 #define ASYNC_REF64_COUNT       (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
133 
134 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
135 
136 extern "C" {
137 #include <mach/mach_traps.h>
138 #include <vm/vm_map.h>
139 } /* extern "C" */
140 
141 struct IOMachPortHashList;
142 
143 static_assert(IKOT_MAX_TYPE <= 255);
144 
145 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
146 
147 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
148 class IOMachPort : public OSObject
149 {
150 	OSDeclareDefaultStructors(IOMachPort);
151 public:
152 	mach_port_mscount_t mscount;
153 	IOLock      lock;
154 	SLIST_ENTRY(IOMachPort) link;
155 	ipc_port_t  port;
156 	OSObject*   XNU_PTRAUTH_SIGNED_PTR("IOMachPort.object") object;
157 
158 	static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
159 
160 	static IOMachPortHashList* bucketForObject(OSObject *obj,
161 	    ipc_kobject_type_t type);
162 
163 	static LIBKERN_RETURNS_NOT_RETAINED IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
164 
165 	static bool noMoreSendersForObject( OSObject * obj,
166 	    ipc_kobject_type_t type, mach_port_mscount_t * mscount );
167 	static void releasePortForObject( OSObject * obj,
168 	    ipc_kobject_type_t type );
169 
170 	static mach_port_name_t makeSendRightForTask( task_t task,
171 	    io_object_t obj, ipc_kobject_type_t type );
172 
173 	virtual void free() APPLE_KEXT_OVERRIDE;
174 };
175 
176 #define super OSObject
177 OSDefineMetaClassAndStructorsWithZone(IOMachPort, OSObject, ZC_ZFREE_CLEARMEM)
178 
179 static IOLock *         gIOObjectPortLock;
180 IOLock *                gIOUserServerLock;
181 
182 SECURITY_READ_ONLY_LATE(const struct io_filter_callbacks *) gIOUCFilterCallbacks;
183 
184 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
185 
186 SLIST_HEAD(IOMachPortHashList, IOMachPort);
187 
188 #if defined(XNU_TARGET_OS_OSX)
189 #define PORT_HASH_SIZE 4096
190 #else /* defined(!XNU_TARGET_OS_OSX) */
191 #define PORT_HASH_SIZE 256
192 #endif /* !defined(!XNU_TARGET_OS_OSX) */
193 
194 IOMachPortHashList gIOMachPortHash[PORT_HASH_SIZE];
195 
196 void
IOMachPortInitialize(void)197 IOMachPortInitialize(void)
198 {
199 	for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
200 		SLIST_INIT(&gIOMachPortHash[i]);
201 	}
202 }
203 
204 IOMachPortHashList*
bucketForObject(OSObject * obj,ipc_kobject_type_t type)205 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
206 {
207 	return &gIOMachPortHash[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
208 }
209 
210 IOMachPort*
portForObjectInBucket(IOMachPortHashList * bucket,OSObject * obj,ipc_kobject_type_t type)211 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
212 {
213 	IOMachPort *machPort;
214 
215 	SLIST_FOREACH(machPort, bucket, link) {
216 		if (machPort->object == obj && iokit_port_type(machPort->port) == type) {
217 			return machPort;
218 		}
219 	}
220 	return NULL;
221 }
222 
223 IOMachPort*
withObjectAndType(OSObject * obj,ipc_kobject_type_t type)224 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
225 {
226 	IOMachPort *machPort = NULL;
227 
228 	machPort = new IOMachPort;
229 	if (__improbable(machPort && !machPort->init())) {
230 		OSSafeReleaseNULL(machPort);
231 		return NULL;
232 	}
233 
234 	machPort->object = obj;
235 	machPort->port = iokit_alloc_object_port(machPort, type);
236 	IOLockInlineInit(&machPort->lock);
237 
238 	obj->taggedRetain(OSTypeID(OSCollection));
239 	machPort->mscount++;
240 
241 	return machPort;
242 }
243 
244 bool
noMoreSendersForObject(OSObject * obj,ipc_kobject_type_t type,mach_port_mscount_t * mscount)245 IOMachPort::noMoreSendersForObject( OSObject * obj,
246     ipc_kobject_type_t type, mach_port_mscount_t * mscount )
247 {
248 	IOMachPort *machPort = NULL;
249 	IOUserClient *uc;
250 	OSAction *action;
251 	bool destroyed = true;
252 
253 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
254 
255 	obj->retain();
256 
257 	lck_mtx_lock(gIOObjectPortLock);
258 
259 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
260 
261 	if (machPort) {
262 		destroyed = (machPort->mscount <= *mscount);
263 		if (!destroyed) {
264 			*mscount = machPort->mscount;
265 			lck_mtx_unlock(gIOObjectPortLock);
266 		} else {
267 			if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
268 				uc->noMoreSenders();
269 			}
270 			SLIST_REMOVE(bucket, machPort, IOMachPort, link);
271 
272 			IOLockLock(&machPort->lock);
273 			iokit_remove_object_port(machPort->port, type);
274 			machPort->object = NULL;
275 			IOLockUnlock(&machPort->lock);
276 
277 			lck_mtx_unlock(gIOObjectPortLock);
278 
279 			OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
280 
281 			obj->taggedRelease(OSTypeID(OSCollection));
282 		}
283 	} else {
284 		lck_mtx_unlock(gIOObjectPortLock);
285 	}
286 
287 	if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
288 		action->Aborted();
289 	}
290 
291 	if (IKOT_UEXT_OBJECT == type && IOUserServer::shouldLeakObjects()) {
292 		// Leak object
293 		obj->retain();
294 	}
295 
296 	obj->release();
297 
298 	return destroyed;
299 }
300 
301 void
releasePortForObject(OSObject * obj,ipc_kobject_type_t type)302 IOMachPort::releasePortForObject( OSObject * obj,
303     ipc_kobject_type_t type )
304 {
305 	IOMachPort *machPort;
306 	IOService  *service;
307 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
308 
309 	assert(IKOT_IOKIT_CONNECT != type);
310 
311 	lck_mtx_lock(gIOObjectPortLock);
312 
313 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
314 
315 	if (machPort
316 	    && (type == IKOT_IOKIT_OBJECT)
317 	    && (service = OSDynamicCast(IOService, obj))
318 	    && !service->machPortHoldDestroy()) {
319 		obj->retain();
320 		SLIST_REMOVE(bucket, machPort, IOMachPort, link);
321 
322 		IOLockLock(&machPort->lock);
323 		iokit_remove_object_port(machPort->port, type);
324 		machPort->object = NULL;
325 		IOLockUnlock(&machPort->lock);
326 
327 		lck_mtx_unlock(gIOObjectPortLock);
328 
329 		OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
330 
331 		obj->taggedRelease(OSTypeID(OSCollection));
332 		obj->release();
333 	} else {
334 		lck_mtx_unlock(gIOObjectPortLock);
335 	}
336 }
337 
338 void
destroyUserReferences(OSObject * obj)339 IOUserClient::destroyUserReferences( OSObject * obj )
340 {
341 	IOMachPort *machPort;
342 	bool        destroyPort;
343 
344 	IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
345 
346 	// panther, 3160200
347 	// IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
348 
349 	obj->retain();
350 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
351 	IOMachPortHashList *mappingBucket = NULL;
352 
353 	lck_mtx_lock(gIOObjectPortLock);
354 
355 	IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
356 	if (uc && uc->mappings) {
357 		mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
358 	}
359 
360 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
361 
362 	if (machPort == NULL) {
363 		lck_mtx_unlock(gIOObjectPortLock);
364 		goto end;
365 	}
366 
367 	SLIST_REMOVE(bucket, machPort, IOMachPort, link);
368 	obj->taggedRelease(OSTypeID(OSCollection));
369 
370 	destroyPort = true;
371 	if (uc) {
372 		uc->noMoreSenders();
373 		if (uc->mappings) {
374 			uc->mappings->taggedRetain(OSTypeID(OSCollection));
375 			SLIST_INSERT_HEAD(mappingBucket, machPort, link);
376 
377 			IOLockLock(&machPort->lock);
378 			machPort->object = uc->mappings;
379 			IOLockUnlock(&machPort->lock);
380 
381 			lck_mtx_unlock(gIOObjectPortLock);
382 
383 			OSSafeReleaseNULL(uc->mappings);
384 			destroyPort = false;
385 		}
386 	}
387 
388 	if (destroyPort) {
389 		IOLockLock(&machPort->lock);
390 		iokit_remove_object_port(machPort->port, IKOT_IOKIT_CONNECT);
391 		machPort->object = NULL;
392 		IOLockUnlock(&machPort->lock);
393 
394 		lck_mtx_unlock(gIOObjectPortLock);
395 		OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
396 	}
397 
398 end:
399 	OSSafeReleaseNULL(obj);
400 }
401 
402 mach_port_name_t
makeSendRightForTask(task_t task,io_object_t obj,ipc_kobject_type_t type)403 IOMachPort::makeSendRightForTask( task_t task,
404     io_object_t obj, ipc_kobject_type_t type )
405 {
406 	return iokit_make_send_right( task, obj, type );
407 }
408 
409 void
free(void)410 IOMachPort::free( void )
411 {
412 	if (port) {
413 		iokit_destroy_object_port(port, iokit_port_type(port));
414 	}
415 	IOLockInlineDestroy(&lock);
416 	super::free();
417 }
418 
419 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
420 
421 static bool
IOTaskRegistryCompatibility(task_t task)422 IOTaskRegistryCompatibility(task_t task)
423 {
424 	return false;
425 }
426 
427 static void
IOTaskRegistryCompatibilityMatching(task_t task,OSDictionary * matching)428 IOTaskRegistryCompatibilityMatching(task_t task, OSDictionary * matching)
429 {
430 	matching->setObject(gIOServiceNotificationUserKey, kOSBooleanTrue);
431 	if (!IOTaskRegistryCompatibility(task)) {
432 		return;
433 	}
434 	matching->setObject(gIOCompatibilityMatchKey, kOSBooleanTrue);
435 }
436 
437 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
438 
OSDefineMetaClassAndStructors(IOUserIterator,OSIterator)439 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
440 
441 IOUserIterator *
442 IOUserIterator::withIterator(OSIterator * iter)
443 {
444 	IOUserIterator * me;
445 
446 	if (!iter) {
447 		return NULL;
448 	}
449 
450 	me = new IOUserIterator;
451 	if (me && !me->init()) {
452 		me->release();
453 		me = NULL;
454 	}
455 	if (!me) {
456 		iter->release();
457 		return me;
458 	}
459 	me->userIteratorObject = iter;
460 
461 	return me;
462 }
463 
464 bool
init(void)465 IOUserIterator::init( void )
466 {
467 	if (!OSObject::init()) {
468 		return false;
469 	}
470 
471 	IOLockInlineInit(&lock);
472 	return true;
473 }
474 
475 void
free()476 IOUserIterator::free()
477 {
478 	if (userIteratorObject) {
479 		userIteratorObject->release();
480 	}
481 	IOLockInlineDestroy(&lock);
482 	OSObject::free();
483 }
484 
485 void
reset()486 IOUserIterator::reset()
487 {
488 	IOLockLock(&lock);
489 	assert(OSDynamicCast(OSIterator, userIteratorObject));
490 	((OSIterator *)userIteratorObject)->reset();
491 	IOLockUnlock(&lock);
492 }
493 
494 bool
isValid()495 IOUserIterator::isValid()
496 {
497 	bool ret;
498 
499 	IOLockLock(&lock);
500 	assert(OSDynamicCast(OSIterator, userIteratorObject));
501 	ret = ((OSIterator *)userIteratorObject)->isValid();
502 	IOLockUnlock(&lock);
503 
504 	return ret;
505 }
506 
507 OSObject *
getNextObject()508 IOUserIterator::getNextObject()
509 {
510 	assert(false);
511 	return NULL;
512 }
513 
514 OSObject *
copyNextObject()515 IOUserIterator::copyNextObject()
516 {
517 	OSObject * ret = NULL;
518 
519 	IOLockLock(&lock);
520 	if (userIteratorObject) {
521 		ret = ((OSIterator *)userIteratorObject)->getNextObject();
522 		if (ret) {
523 			ret->retain();
524 		}
525 	}
526 	IOLockUnlock(&lock);
527 
528 	return ret;
529 }
530 
531 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
532 extern "C" {
533 // functions called from osfmk/device/iokit_rpc.c
534 
535 void
iokit_port_object_description(io_object_t obj,kobject_description_t desc)536 iokit_port_object_description(io_object_t obj, kobject_description_t desc)
537 {
538 	IORegistryEntry    * regEntry;
539 	IOUserNotification * __unused noti;
540 	_IOServiceNotifier * __unused serviceNoti;
541 	OSSerialize        * __unused s;
542 	OSDictionary       * __unused matching = NULL;
543 
544 	if ((regEntry = OSDynamicCast(IORegistryEntry, obj))) {
545 		snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(0x%qx)", obj->getMetaClass()->getClassName(), regEntry->getRegistryEntryID());
546 #if DEVELOPMENT || DEBUG
547 	} else if ((noti = OSDynamicCast(IOUserNotification, obj))) {
548 		// serviceNoti->matching may become NULL if the port gets a no-senders notification, so we have to lock gIOObjectPortLock
549 		IOLockLock(gIOObjectPortLock);
550 		serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->userIteratorObject);
551 		if (serviceNoti && (matching = serviceNoti->matching)) {
552 			matching->retain();
553 		}
554 		IOLockUnlock(gIOObjectPortLock);
555 
556 		if (matching) {
557 			s = OSSerialize::withCapacity((unsigned int) page_size);
558 			if (s && matching->serialize(s)) {
559 				snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text());
560 			}
561 			OSSafeReleaseNULL(s);
562 			OSSafeReleaseNULL(matching);
563 		}
564 #endif /* DEVELOPMENT || DEBUG */
565 	} else {
566 		snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s", obj->getMetaClass()->getClassName());
567 	}
568 }
569 
570 // FIXME: Implementation of these functions are hidden from the static analyzer.
571 // As for now, the analyzer doesn't consistently support wrapper functions
572 // for retain and release.
573 #ifndef __clang_analyzer__
574 void
iokit_add_reference(io_object_t obj,natural_t type)575 iokit_add_reference( io_object_t obj, natural_t type )
576 {
577 	if (!obj) {
578 		return;
579 	}
580 	obj->retain();
581 }
582 
583 void
iokit_remove_reference(io_object_t obj)584 iokit_remove_reference( io_object_t obj )
585 {
586 	if (obj) {
587 		obj->release();
588 	}
589 }
590 #endif // __clang_analyzer__
591 
592 void
iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj)593 iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj )
594 {
595 	if (!obj) {
596 		return;
597 	}
598 	obj->release();
599 }
600 
601 enum {
602 	kIPCLockNone  = 0,
603 	kIPCLockRead  = 1,
604 	kIPCLockWrite = 2
605 };
606 
607 void
ipcEnter(int locking)608 IOUserClient::ipcEnter(int locking)
609 {
610 	switch (locking) {
611 	case kIPCLockWrite:
612 		IORWLockWrite(&lock);
613 		break;
614 	case kIPCLockRead:
615 		IORWLockRead(&lock);
616 		break;
617 	case kIPCLockNone:
618 		break;
619 	default:
620 		panic("ipcEnter");
621 	}
622 
623 	OSIncrementAtomic(&__ipc);
624 }
625 
626 void
ipcExit(int locking)627 IOUserClient::ipcExit(int locking)
628 {
629 	bool finalize = false;
630 
631 	assert(__ipc);
632 	if (1 == OSDecrementAtomic(&__ipc) && isInactive()) {
633 		IOLockLock(gIOObjectPortLock);
634 		if ((finalize = __ipcFinal)) {
635 			__ipcFinal = false;
636 		}
637 		IOLockUnlock(gIOObjectPortLock);
638 		if (finalize) {
639 			scheduleFinalize(true);
640 		}
641 	}
642 	switch (locking) {
643 	case kIPCLockWrite:
644 	case kIPCLockRead:
645 		IORWLockUnlock(&lock);
646 		break;
647 	case kIPCLockNone:
648 		break;
649 	default:
650 		panic("ipcExit");
651 	}
652 }
653 
654 void
iokit_kobject_retain(io_kobject_t machPort)655 iokit_kobject_retain(io_kobject_t machPort)
656 {
657 	assert(OSDynamicCast(IOMachPort, machPort));
658 	machPort->retain();
659 }
660 
661 io_object_t
iokit_copy_object_for_consumed_kobject(LIBKERN_CONSUMED io_kobject_t machPort,natural_t type)662 iokit_copy_object_for_consumed_kobject(LIBKERN_CONSUMED io_kobject_t machPort, natural_t type)
663 {
664 	io_object_t  result;
665 
666 	assert(OSDynamicCast(IOMachPort, machPort));
667 
668 	IOLockLock(&machPort->lock);
669 	result = machPort->object;
670 	if (result) {
671 		iokit_add_reference(result, type);
672 	}
673 	IOLockUnlock(&machPort->lock);
674 	machPort->release();
675 	return result;
676 }
677 
678 bool
finalizeUserReferences(OSObject * obj)679 IOUserClient::finalizeUserReferences(OSObject * obj)
680 {
681 	IOUserClient * uc;
682 	bool           ok = true;
683 
684 	if ((uc = OSDynamicCast(IOUserClient, obj))) {
685 		IOLockLock(gIOObjectPortLock);
686 		if ((uc->__ipcFinal = (0 != uc->__ipc))) {
687 			ok = false;
688 		}
689 		IOLockUnlock(gIOObjectPortLock);
690 	}
691 	return ok;
692 }
693 
694 ipc_port_t
iokit_port_for_object(io_object_t obj,ipc_kobject_type_t type,ipc_kobject_t * kobj)695 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type, ipc_kobject_t * kobj )
696 {
697 	IOMachPort *machPort = NULL;
698 	ipc_port_t   port = NULL;
699 
700 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
701 
702 	lck_mtx_lock(gIOObjectPortLock);
703 
704 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
705 
706 	if (__improbable(machPort == NULL)) {
707 		machPort = IOMachPort::withObjectAndType(obj, type);
708 		if (__improbable(machPort == NULL)) {
709 			goto end;
710 		}
711 		SLIST_INSERT_HEAD(bucket, machPort, link);
712 	} else {
713 		machPort->mscount++;
714 	}
715 
716 	iokit_retain_port(machPort->port);
717 	port = machPort->port;
718 
719 end:
720 	if (kobj) {
721 		*kobj = machPort;
722 	}
723 	lck_mtx_unlock(gIOObjectPortLock);
724 
725 	return port;
726 }
727 
728 kern_return_t
iokit_client_died(io_object_t obj,ipc_port_t,ipc_kobject_type_t type,mach_port_mscount_t * mscount)729 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
730     ipc_kobject_type_t type, mach_port_mscount_t * mscount )
731 {
732 	IOUserClient *      client;
733 	IOMemoryMap *       map;
734 	IOUserNotification * notify;
735 	IOUserServerCheckInToken * token;
736 
737 	if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
738 		return kIOReturnNotReady;
739 	}
740 
741 	switch (type) {
742 	case IKOT_IOKIT_CONNECT:
743 		if ((client = OSDynamicCast( IOUserClient, obj ))) {
744 			IOStatisticsClientCall();
745 			IORWLockWrite(&client->lock);
746 			client->clientDied();
747 			IORWLockUnlock(&client->lock);
748 		}
749 		break;
750 	case IKOT_IOKIT_OBJECT:
751 		if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
752 			map->taskDied();
753 		} else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
754 			notify->setNotification( NULL );
755 		}
756 		break;
757 	case IKOT_IOKIT_IDENT:
758 		if ((token = OSDynamicCast( IOUserServerCheckInToken, obj ))) {
759 			token->cancel();
760 		}
761 		break;
762 	}
763 
764 	return kIOReturnSuccess;
765 }
766 };      /* extern "C" */
767 
768 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
769 
770 class IOServiceUserNotification : public IOUserNotification
771 {
772 	OSDeclareDefaultStructors(IOServiceUserNotification);
773 
774 	struct PingMsgKdata {
775 		mach_msg_header_t               msgHdr;
776 	};
777 	struct PingMsgUdata {
778 		OSNotificationHeader64          notifyHeader;
779 	};
780 
781 	enum { kMaxOutstanding = 1024 };
782 
783 	ipc_port_t          remotePort;
784 	void                *msgReference;
785 	mach_msg_size_t     msgReferenceSize;
786 	natural_t           msgType;
787 	OSArray     *       newSet;
788 	bool                armed;
789 	bool                ipcLogged;
790 
791 public:
792 
793 	virtual bool init( mach_port_t port, natural_t type,
794 	    void * reference, vm_size_t referenceSize,
795 	    bool clientIs64 );
796 	virtual void free() APPLE_KEXT_OVERRIDE;
797 	void invalidatePort(void);
798 
799 	static bool _handler( void * target,
800 	    void * ref, IOService * newService, IONotifier * notifier );
801 	virtual bool handler( void * ref, IOService * newService );
802 
803 	virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
804 	virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
805 };
806 
807 class IOServiceMessageUserNotification : public IOUserNotification
808 {
809 	OSDeclareDefaultStructors(IOServiceMessageUserNotification);
810 
811 	struct PingMsgKdata {
812 		mach_msg_header_t               msgHdr;
813 		mach_msg_body_t                 msgBody;
814 		mach_msg_port_descriptor_t      ports[1];
815 	};
816 	struct PingMsgUdata {
817 		OSNotificationHeader64          notifyHeader __attribute__ ((packed));
818 	};
819 
820 	ipc_port_t          remotePort;
821 	void                *msgReference;
822 	mach_msg_size_t     msgReferenceSize;
823 	mach_msg_size_t     msgExtraSize;
824 	natural_t           msgType;
825 	uint8_t             clientIs64;
826 	int                 owningPID;
827 	bool                ipcLogged;
828 
829 public:
830 
831 	virtual bool init( mach_port_t port, natural_t type,
832 	    void * reference, vm_size_t referenceSize,
833 	    bool clientIs64 );
834 
835 	virtual void free() APPLE_KEXT_OVERRIDE;
836 	void invalidatePort(void);
837 
838 	static IOReturn _handler( void * target, void * ref,
839 	    UInt32 messageType, IOService * provider,
840 	    void * messageArgument, vm_size_t argSize );
841 	virtual IOReturn handler( void * ref,
842 	    UInt32 messageType, IOService * provider,
843 	    void * messageArgument, vm_size_t argSize );
844 
845 	virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
846 	virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
847 };
848 
849 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
850 
851 #undef super
852 #define super IOUserIterator
853 OSDefineMetaClass( IOUserNotification, IOUserIterator );
854 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
855 
856 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
857 
858 void
free(void)859 IOUserNotification::free( void )
860 {
861 #if DEVELOPMENT || DEBUG
862 	IOLockLock( gIOObjectPortLock);
863 
864 	assert(userIteratorObject == NULL);
865 
866 	IOLockUnlock( gIOObjectPortLock);
867 #endif /* DEVELOPMENT || DEBUG */
868 
869 	super::free();
870 }
871 
872 
873 void
setNotification(IONotifier * notify)874 IOUserNotification::setNotification( IONotifier * notify )
875 {
876 	OSObject * previousNotify;
877 
878 	/*
879 	 * We must retain this object here before proceeding.
880 	 * Two threads may race in setNotification(). If one thread sets a new notifier while the
881 	 * other thread sets the notifier to NULL, it is possible for the second thread to call release()
882 	 * before the first thread calls retain(). Without the retain here, this thread interleaving
883 	 * would cause the object to get released and freed before it is retained by the first thread,
884 	 * which is a UaF.
885 	 */
886 	retain();
887 
888 	IOLockLock( gIOObjectPortLock);
889 
890 	previousNotify = userIteratorObject;
891 	userIteratorObject = notify;
892 
893 	IOLockUnlock( gIOObjectPortLock);
894 
895 	if (previousNotify) {
896 		assert(OSDynamicCast(IONotifier, previousNotify));
897 		((IONotifier *)previousNotify)->remove();
898 
899 		if (notify == NULL) {
900 			release();
901 		}
902 	} else if (notify) {
903 		// new IONotifier, retain the object. release() will happen in setNotification(NULL)
904 		retain();
905 	}
906 
907 	release(); // paired with retain() at beginning of this method
908 }
909 
910 void
reset()911 IOUserNotification::reset()
912 {
913 	// ?
914 }
915 
916 bool
isValid()917 IOUserNotification::isValid()
918 {
919 	return true;
920 }
921 
922 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
923 
924 #undef super
925 #define super IOUserNotification
OSDefineMetaClassAndStructors(IOServiceUserNotification,IOUserNotification)926 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
927 
928 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
929 
930 bool
931 IOServiceUserNotification::init( mach_port_t port, natural_t type,
932     void * reference, vm_size_t referenceSize,
933     bool clientIs64 )
934 {
935 	if (!super::init()) {
936 		return false;
937 	}
938 
939 	newSet = OSArray::withCapacity( 1 );
940 	if (!newSet) {
941 		return false;
942 	}
943 
944 	if (referenceSize > sizeof(OSAsyncReference64)) {
945 		return false;
946 	}
947 
948 	msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
949 	msgReference = IOMallocZeroData(msgReferenceSize);
950 	if (!msgReference) {
951 		return false;
952 	}
953 
954 	remotePort = port;
955 	msgType = type;
956 	bcopy( reference, msgReference, referenceSize );
957 
958 	return true;
959 }
960 
961 void
invalidatePort(void)962 IOServiceUserNotification::invalidatePort(void)
963 {
964 	remotePort = MACH_PORT_NULL;
965 }
966 
967 void
free(void)968 IOServiceUserNotification::free( void )
969 {
970 	if (remotePort) {
971 		iokit_release_port_send(remotePort);
972 	}
973 	IOFreeData(msgReference, msgReferenceSize);
974 	OSSafeReleaseNULL(newSet);
975 
976 	super::free();
977 }
978 
979 bool
_handler(void * target,void * ref,IOService * newService,IONotifier * notifier)980 IOServiceUserNotification::_handler( void * target,
981     void * ref, IOService * newService, IONotifier * notifier )
982 {
983 	IOServiceUserNotification * targetObj = (IOServiceUserNotification *)target;
984 	bool ret;
985 
986 	targetObj->retain();
987 	ret = targetObj->handler( ref, newService );
988 	targetObj->release();
989 	return ret;
990 }
991 
992 bool
handler(void * ref,IOService * newService)993 IOServiceUserNotification::handler( void * ref,
994     IOService * newService )
995 {
996 	unsigned int        count;
997 	kern_return_t       kr;
998 	ipc_port_t          port = NULL;
999 	bool                sendPing = false;
1000 	mach_msg_size_t     msgSize, payloadSize;
1001 
1002 	IOTakeLock( &lock );
1003 
1004 	count = newSet->getCount();
1005 	if (count < kMaxOutstanding) {
1006 		newSet->setObject( newService );
1007 		if ((sendPing = (armed && (0 == count)))) {
1008 			armed = false;
1009 		}
1010 	}
1011 
1012 	IOUnlock( &lock );
1013 
1014 	if (kIOServiceTerminatedNotificationType == msgType) {
1015 		lck_mtx_lock(gIOObjectPortLock);
1016 		newService->setMachPortHoldDestroy(true);
1017 		lck_mtx_unlock(gIOObjectPortLock);
1018 	}
1019 
1020 	if (sendPing) {
1021 		port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT, NULL );
1022 
1023 		payloadSize = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1024 		msgSize = (mach_msg_size_t)(sizeof(PingMsgKdata) + payloadSize);
1025 
1026 		kr = kernel_mach_msg_send_with_builder_internal(0, payloadSize,
1027 		    (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1028 		    MACH_MSG_TIMEOUT_NONE, NULL,
1029 		    ^(mach_msg_header_t *hdr, __assert_only mach_msg_descriptor_t *descs, void *payload){
1030 			PingMsgUdata *udata = (PingMsgUdata *)payload;
1031 
1032 			hdr->msgh_remote_port    = remotePort;
1033 			hdr->msgh_local_port     = port;
1034 			hdr->msgh_bits           = MACH_MSGH_BITS(
1035 				MACH_MSG_TYPE_COPY_SEND /*remote*/,
1036 				MACH_MSG_TYPE_MAKE_SEND /*local*/);
1037 			hdr->msgh_size           = msgSize;
1038 			hdr->msgh_id             = kOSNotificationMessageID;
1039 
1040 			assert(descs == NULL);
1041 			/* End of kernel processed data */
1042 
1043 			udata->notifyHeader.size          = 0;
1044 			udata->notifyHeader.type          = msgType;
1045 
1046 			assert((char *)udata->notifyHeader.reference + msgReferenceSize <= (char *)payload + payloadSize);
1047 			bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1048 		});
1049 
1050 		if (port) {
1051 			iokit_release_port( port );
1052 		}
1053 
1054 		if ((KERN_SUCCESS != kr) && !ipcLogged) {
1055 			ipcLogged = true;
1056 			IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1057 		}
1058 	}
1059 
1060 	return true;
1061 }
1062 OSObject *
getNextObject()1063 IOServiceUserNotification::getNextObject()
1064 {
1065 	assert(false);
1066 	return NULL;
1067 }
1068 
1069 OSObject *
copyNextObject()1070 IOServiceUserNotification::copyNextObject()
1071 {
1072 	unsigned int        count;
1073 	OSObject *          result;
1074 
1075 	IOLockLock(&lock);
1076 
1077 	count = newSet->getCount();
1078 	if (count) {
1079 		result = newSet->getObject( count - 1 );
1080 		result->retain();
1081 		newSet->removeObject( count - 1);
1082 	} else {
1083 		result = NULL;
1084 		armed = true;
1085 	}
1086 
1087 	IOLockUnlock(&lock);
1088 
1089 	return result;
1090 }
1091 
1092 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1093 
OSDefineMetaClassAndStructors(IOServiceMessageUserNotification,IOUserNotification)1094 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
1095 
1096 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1097 
1098 bool
1099 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
1100     void * reference, vm_size_t referenceSize, bool client64 )
1101 {
1102 	if (!super::init()) {
1103 		return false;
1104 	}
1105 
1106 	if (referenceSize > sizeof(OSAsyncReference64)) {
1107 		return false;
1108 	}
1109 
1110 	clientIs64 = client64;
1111 
1112 	owningPID = proc_selfpid();
1113 
1114 	msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
1115 	msgReference = IOMallocZeroData(msgReferenceSize);
1116 	if (!msgReference) {
1117 		return false;
1118 	}
1119 
1120 	remotePort = port;
1121 	msgType = type;
1122 	bcopy( reference, msgReference, referenceSize );
1123 
1124 	return true;
1125 }
1126 
1127 void
invalidatePort(void)1128 IOServiceMessageUserNotification::invalidatePort(void)
1129 {
1130 	remotePort = MACH_PORT_NULL;
1131 }
1132 
1133 void
free(void)1134 IOServiceMessageUserNotification::free( void )
1135 {
1136 	if (remotePort) {
1137 		iokit_release_port_send(remotePort);
1138 	}
1139 	IOFreeData(msgReference, msgReferenceSize);
1140 
1141 	super::free();
1142 }
1143 
1144 IOReturn
_handler(void * target,void * ref,UInt32 messageType,IOService * provider,void * argument,vm_size_t argSize)1145 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1146     UInt32 messageType, IOService * provider,
1147     void * argument, vm_size_t argSize )
1148 {
1149 	IOServiceMessageUserNotification * targetObj = (IOServiceMessageUserNotification *)target;
1150 	IOReturn ret;
1151 
1152 	targetObj->retain();
1153 	ret = targetObj->handler(
1154 		ref, messageType, provider, argument, argSize);
1155 	targetObj->release();
1156 	return ret;
1157 }
1158 
1159 IOReturn
handler(void * ref,UInt32 messageType,IOService * provider,void * messageArgument,vm_size_t callerArgSize)1160 IOServiceMessageUserNotification::handler( void * ref,
1161     UInt32 messageType, IOService * provider,
1162     void * messageArgument, vm_size_t callerArgSize )
1163 {
1164 	kern_return_t                kr;
1165 	vm_size_t                    argSize;
1166 	mach_msg_size_t              thisMsgSize;
1167 	ipc_port_t                   thisPort, providerPort;
1168 
1169 	if (kIOMessageCopyClientID == messageType) {
1170 		*((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1171 		return kIOReturnSuccess;
1172 	}
1173 
1174 	if (callerArgSize == 0) {
1175 		if (clientIs64) {
1176 			argSize = sizeof(io_user_reference_t);
1177 		} else {
1178 			argSize = sizeof(uint32_t);
1179 		}
1180 	} else {
1181 		if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1182 			callerArgSize = kIOUserNotifyMaxMessageSize;
1183 		}
1184 		argSize = callerArgSize;
1185 	}
1186 
1187 	// adjust message size for ipc restrictions
1188 	natural_t type = msgType;
1189 	type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1190 	type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1191 	argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1192 
1193 	mach_msg_size_t extraSize = kIOUserNotifyMaxMessageSize + sizeof(IOServiceInterestContent64);
1194 	mach_msg_size_t msgSize = (mach_msg_size_t) (sizeof(PingMsgKdata) +
1195 	    sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize);
1196 
1197 	if (os_add3_overflow(msgSize, offsetof(IOServiceInterestContent64, messageArgument), argSize, &thisMsgSize)) {
1198 		return kIOReturnBadArgument;
1199 	}
1200 	mach_msg_size_t payloadSize = thisMsgSize - sizeof(PingMsgKdata);
1201 
1202 	providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT, NULL );
1203 	thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT, NULL );
1204 
1205 	kr = kernel_mach_msg_send_with_builder_internal(1, payloadSize,
1206 	    (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1207 	    MACH_MSG_TIMEOUT_NONE, NULL,
1208 	    ^(mach_msg_header_t *hdr, mach_msg_descriptor_t *descs, void *payload){
1209 		mach_msg_port_descriptor_t *port_desc = (mach_msg_port_descriptor_t *)descs;
1210 		PingMsgUdata *udata = (PingMsgUdata *)payload;
1211 		IOServiceInterestContent64 * data;
1212 		mach_msg_size_t dataOffset;
1213 
1214 		hdr->msgh_remote_port    = remotePort;
1215 		hdr->msgh_local_port     = thisPort;
1216 		hdr->msgh_bits           = MACH_MSGH_BITS_COMPLEX
1217 		|  MACH_MSGH_BITS(
1218 			MACH_MSG_TYPE_COPY_SEND /*remote*/,
1219 			MACH_MSG_TYPE_MAKE_SEND /*local*/);
1220 		hdr->msgh_size           = thisMsgSize;
1221 		hdr->msgh_id             = kOSNotificationMessageID;
1222 
1223 		/* body.msgh_descriptor_count is set automatically after the closure */
1224 
1225 		port_desc[0].name              = providerPort;
1226 		port_desc[0].disposition       = MACH_MSG_TYPE_MAKE_SEND;
1227 		port_desc[0].type              = MACH_MSG_PORT_DESCRIPTOR;
1228 		/* End of kernel processed data */
1229 
1230 		udata->notifyHeader.size          = extraSize;
1231 		udata->notifyHeader.type          = type;
1232 		bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1233 
1234 		/* data is after msgReference */
1235 		dataOffset = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1236 		data = (IOServiceInterestContent64 *) (((uint8_t *) udata) + dataOffset);
1237 		data->messageType = messageType;
1238 
1239 		if (callerArgSize == 0) {
1240 		        assert((char *)data->messageArgument + argSize <= (char *)payload + payloadSize);
1241 		        data->messageArgument[0] = (io_user_reference_t) messageArgument;
1242 		        if (!clientIs64) {
1243 		                data->messageArgument[0] |= (data->messageArgument[0] << 32);
1244 			}
1245 		} else {
1246 		        assert((char *)data->messageArgument + callerArgSize <= (char *)payload + payloadSize);
1247 		        bcopy(messageArgument, data->messageArgument, callerArgSize);
1248 		}
1249 	});
1250 
1251 	if (thisPort) {
1252 		iokit_release_port( thisPort );
1253 	}
1254 	if (providerPort) {
1255 		iokit_release_port( providerPort );
1256 	}
1257 
1258 	if (kr == MACH_SEND_NO_BUFFER) {
1259 		return kIOReturnNoMemory;
1260 	}
1261 
1262 	if ((KERN_SUCCESS != kr) && !ipcLogged) {
1263 		ipcLogged = true;
1264 		IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1265 	}
1266 
1267 	return kIOReturnSuccess;
1268 }
1269 
1270 OSObject *
getNextObject()1271 IOServiceMessageUserNotification::getNextObject()
1272 {
1273 	return NULL;
1274 }
1275 
1276 OSObject *
copyNextObject()1277 IOServiceMessageUserNotification::copyNextObject()
1278 {
1279 	return NULL;
1280 }
1281 
1282 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1283 
1284 #undef super
1285 #define super IOService
1286 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1287 
1288 IOLock       * gIOUserClientOwnersLock;
1289 
1290 static_assert(offsetof(IOUserClient, __opaque_end) -
1291     offsetof(IOUserClient, __opaque_start) == sizeof(void *) * 9,
1292     "ABI check: Opaque ivars for IOUserClient must be 9 void * big");
1293 
1294 void
initialize(void)1295 IOUserClient::initialize( void )
1296 {
1297 	gIOObjectPortLock       = IOLockAlloc();
1298 	gIOUserClientOwnersLock = IOLockAlloc();
1299 	gIOUserServerLock       = IOLockAlloc();
1300 	assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1301 
1302 #if IOTRACKING
1303 	IOTrackingQueueCollectUser(IOUserIterator::gMetaClass.getTracking());
1304 	IOTrackingQueueCollectUser(IOServiceMessageUserNotification::gMetaClass.getTracking());
1305 	IOTrackingQueueCollectUser(IOServiceUserNotification::gMetaClass.getTracking());
1306 	IOTrackingQueueCollectUser(IOUserClient::gMetaClass.getTracking());
1307 	IOTrackingQueueCollectUser(IOMachPort::gMetaClass.getTracking());
1308 #endif /* IOTRACKING */
1309 }
1310 
1311 void
1312 #if __LP64__
1313 __attribute__((__noreturn__))
1314 #endif
setAsyncReference(OSAsyncReference asyncRef,mach_port_t wakePort,void * callback,void * refcon)1315 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1316     mach_port_t wakePort,
1317     void *callback, void *refcon)
1318 {
1319 #if __LP64__
1320 	panic("setAsyncReference not valid for 64b");
1321 #else
1322 	asyncRef[kIOAsyncReservedIndex]      = ((uintptr_t) wakePort)
1323 	    | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1324 	asyncRef[kIOAsyncCalloutFuncIndex]   = (uintptr_t) callback;
1325 	asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1326 #endif
1327 }
1328 
1329 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon)1330 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1331     mach_port_t wakePort,
1332     mach_vm_address_t callback, io_user_reference_t refcon)
1333 {
1334 	asyncRef[kIOAsyncReservedIndex]      = ((io_user_reference_t) wakePort)
1335 	    | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1336 	asyncRef[kIOAsyncCalloutFuncIndex]   = (io_user_reference_t) callback;
1337 	asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1338 }
1339 
1340 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon,task_t task)1341 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1342     mach_port_t wakePort,
1343     mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1344 {
1345 	setAsyncReference64(asyncRef, wakePort, callback, refcon);
1346 	if (vm_map_is_64bit(get_task_map(task))) {
1347 		asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1348 	}
1349 }
1350 
1351 static OSDictionary *
CopyConsoleUser(UInt32 uid)1352 CopyConsoleUser(UInt32 uid)
1353 {
1354 	OSArray * array;
1355 	OSDictionary * user = NULL;
1356 
1357 	OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1358 	if ((array = OSDynamicCast(OSArray, ioProperty))) {
1359 		for (unsigned int idx = 0;
1360 		    (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1361 		    idx++) {
1362 			OSNumber * num;
1363 
1364 			if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1365 			    && (uid == num->unsigned32BitValue())) {
1366 				user->retain();
1367 				break;
1368 			}
1369 		}
1370 	}
1371 	OSSafeReleaseNULL(ioProperty);
1372 	return user;
1373 }
1374 
1375 static OSDictionary *
CopyUserOnConsole(void)1376 CopyUserOnConsole(void)
1377 {
1378 	OSArray * array;
1379 	OSDictionary * user = NULL;
1380 
1381 	OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1382 	if ((array = OSDynamicCast(OSArray, ioProperty))) {
1383 		for (unsigned int idx = 0;
1384 		    (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1385 		    idx++) {
1386 			if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1387 				user->retain();
1388 				break;
1389 			}
1390 		}
1391 	}
1392 	OSSafeReleaseNULL(ioProperty);
1393 	return user;
1394 }
1395 
1396 IOReturn
clientHasAuthorization(task_t task,IOService * service)1397 IOUserClient::clientHasAuthorization( task_t task,
1398     IOService * service )
1399 {
1400 	proc_t p;
1401 
1402 	p = (proc_t) get_bsdtask_info(task);
1403 	if (p) {
1404 		uint64_t authorizationID;
1405 
1406 		authorizationID = proc_uniqueid(p);
1407 		if (authorizationID) {
1408 			if (service->getAuthorizationID() == authorizationID) {
1409 				return kIOReturnSuccess;
1410 			}
1411 		}
1412 	}
1413 
1414 	return kIOReturnNotPermitted;
1415 }
1416 
1417 IOReturn
clientHasPrivilege(void * securityToken,const char * privilegeName)1418 IOUserClient::clientHasPrivilege( void * securityToken,
1419     const char * privilegeName )
1420 {
1421 	kern_return_t           kr;
1422 	security_token_t        token;
1423 	mach_msg_type_number_t  count;
1424 	task_t                  task;
1425 	OSDictionary *          user;
1426 	bool                    secureConsole;
1427 
1428 
1429 	if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1430 	    sizeof(kIOClientPrivilegeForeground))) {
1431 		if (task_is_gpu_denied(current_task())) {
1432 			return kIOReturnNotPrivileged;
1433 		} else {
1434 			return kIOReturnSuccess;
1435 		}
1436 	}
1437 
1438 	if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1439 	    sizeof(kIOClientPrivilegeConsoleSession))) {
1440 		kauth_cred_t cred;
1441 		proc_t       p;
1442 
1443 		task = (task_t) securityToken;
1444 		if (!task) {
1445 			task = current_task();
1446 		}
1447 		p = (proc_t) get_bsdtask_info(task);
1448 		kr = kIOReturnNotPrivileged;
1449 
1450 		if (p && (cred = kauth_cred_proc_ref(p))) {
1451 			user = CopyUserOnConsole();
1452 			if (user) {
1453 				OSNumber * num;
1454 				if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1455 				    && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1456 					kr = kIOReturnSuccess;
1457 				}
1458 				user->release();
1459 			}
1460 			kauth_cred_unref(&cred);
1461 		}
1462 		return kr;
1463 	}
1464 
1465 	if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1466 	    sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1467 		task = (task_t)((IOUCProcessToken *)securityToken)->token;
1468 	} else {
1469 		task = (task_t)securityToken;
1470 	}
1471 
1472 	count = TASK_SECURITY_TOKEN_COUNT;
1473 	kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1474 
1475 	if (KERN_SUCCESS != kr) {
1476 	} else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1477 	    sizeof(kIOClientPrivilegeAdministrator))) {
1478 		if (0 != token.val[0]) {
1479 			kr = kIOReturnNotPrivileged;
1480 		}
1481 	} else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1482 	    sizeof(kIOClientPrivilegeLocalUser))) {
1483 		user = CopyConsoleUser(token.val[0]);
1484 		if (user) {
1485 			user->release();
1486 		} else {
1487 			kr = kIOReturnNotPrivileged;
1488 		}
1489 	} else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1490 	    sizeof(kIOClientPrivilegeConsoleUser))) {
1491 		user = CopyConsoleUser(token.val[0]);
1492 		if (user) {
1493 			if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1494 				kr = kIOReturnNotPrivileged;
1495 			} else if (secureConsole) {
1496 				OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1497 				if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1498 					kr = kIOReturnNotPrivileged;
1499 				}
1500 			}
1501 			user->release();
1502 		} else {
1503 			kr = kIOReturnNotPrivileged;
1504 		}
1505 	} else {
1506 		kr = kIOReturnUnsupported;
1507 	}
1508 
1509 	return kr;
1510 }
1511 
1512 OSDictionary *
copyClientEntitlements(task_t task)1513 IOUserClient::copyClientEntitlements(task_t task)
1514 {
1515 	proc_t p = NULL;
1516 	pid_t pid = 0;
1517 	OSDictionary *entitlements = NULL;
1518 
1519 	p = (proc_t)get_bsdtask_info(task);
1520 	if (p == NULL) {
1521 		return NULL;
1522 	}
1523 	pid = proc_pid(p);
1524 
1525 	if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1526 		if (entitlements) {
1527 			return entitlements;
1528 		}
1529 	}
1530 
1531 	// If the above fails, thats it
1532 	return NULL;
1533 }
1534 
1535 OSDictionary *
copyClientEntitlementsVnode(vnode_t vnode,off_t offset)1536 IOUserClient::copyClientEntitlementsVnode(vnode_t vnode, off_t offset)
1537 {
1538 	OSDictionary *entitlements = NULL;
1539 
1540 	if (cs_entitlements_dictionary_copy_vnode(vnode, offset, (void**)&entitlements) != 0) {
1541 		return NULL;
1542 	}
1543 	return entitlements;
1544 }
1545 
1546 OSObject *
copyClientEntitlement(task_t task,const char * entitlement)1547 IOUserClient::copyClientEntitlement( task_t task,
1548     const char * entitlement )
1549 {
1550 	void *entitlement_object = NULL;
1551 
1552 	if (task == NULL) {
1553 		task = current_task();
1554 	}
1555 
1556 	/* Validate input arguments */
1557 	if (task == kernel_task || entitlement == NULL) {
1558 		return NULL;
1559 	}
1560 	proc_t proc = (proc_t)get_bsdtask_info(task);
1561 
1562 	kern_return_t ret = amfi->OSEntitlements.copyEntitlementAsOSObjectWithProc(
1563 		proc,
1564 		entitlement,
1565 		&entitlement_object);
1566 
1567 	if (ret != KERN_SUCCESS) {
1568 		return NULL;
1569 	}
1570 	assert(entitlement_object != NULL);
1571 
1572 	return (OSObject*)entitlement_object;
1573 }
1574 
1575 OSObject *
copyClientEntitlementVnode(struct vnode * vnode,off_t offset,const char * entitlement)1576 IOUserClient::copyClientEntitlementVnode(
1577 	struct vnode *vnode,
1578 	off_t offset,
1579 	const char *entitlement)
1580 {
1581 	OSDictionary *entitlements;
1582 	OSObject *value;
1583 
1584 	entitlements = copyClientEntitlementsVnode(vnode, offset);
1585 	if (entitlements == NULL) {
1586 		return NULL;
1587 	}
1588 
1589 	/* Fetch the entitlement value from the dictionary. */
1590 	value = entitlements->getObject(entitlement);
1591 	if (value != NULL) {
1592 		value->retain();
1593 	}
1594 
1595 	entitlements->release();
1596 	return value;
1597 }
1598 
1599 bool
init()1600 IOUserClient::init()
1601 {
1602 	if (getPropertyTable() || super::init()) {
1603 		return reserve();
1604 	}
1605 
1606 	return false;
1607 }
1608 
1609 bool
init(OSDictionary * dictionary)1610 IOUserClient::init(OSDictionary * dictionary)
1611 {
1612 	if (getPropertyTable() || super::init(dictionary)) {
1613 		return reserve();
1614 	}
1615 
1616 	return false;
1617 }
1618 
1619 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type)1620 IOUserClient::initWithTask(task_t owningTask,
1621     void * securityID,
1622     UInt32 type )
1623 {
1624 	if (getPropertyTable() || super::init()) {
1625 		return reserve();
1626 	}
1627 
1628 	return false;
1629 }
1630 
1631 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type,OSDictionary * properties)1632 IOUserClient::initWithTask(task_t owningTask,
1633     void * securityID,
1634     UInt32 type,
1635     OSDictionary * properties )
1636 {
1637 	bool ok;
1638 
1639 	ok = super::init( properties );
1640 	ok &= initWithTask( owningTask, securityID, type );
1641 
1642 	return ok;
1643 }
1644 
1645 bool
reserve()1646 IOUserClient::reserve()
1647 {
1648 	if (!reserved) {
1649 		reserved = IOMallocType(ExpansionData);
1650 	}
1651 	setTerminateDefer(NULL, true);
1652 	IOStatisticsRegisterCounter();
1653 	IORWLockInlineInit(&lock);
1654 	IOLockInlineInit(&filterLock);
1655 
1656 	return true;
1657 }
1658 
1659 struct IOUserClientOwner {
1660 	task_t         task;
1661 	queue_chain_t  taskLink;
1662 	IOUserClient * uc;
1663 	queue_chain_t  ucLink;
1664 };
1665 
1666 IOReturn
registerOwner(task_t task)1667 IOUserClient::registerOwner(task_t task)
1668 {
1669 	IOUserClientOwner * owner;
1670 	IOReturn            ret;
1671 	bool                newOwner;
1672 
1673 	IOLockLock(gIOUserClientOwnersLock);
1674 
1675 	newOwner = true;
1676 	ret = kIOReturnSuccess;
1677 
1678 	if (!owners.next) {
1679 		queue_init(&owners);
1680 	} else {
1681 		queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1682 		{
1683 			if (task != owner->task) {
1684 				continue;
1685 			}
1686 			newOwner = false;
1687 			break;
1688 		}
1689 	}
1690 	if (newOwner) {
1691 		owner = IOMallocType(IOUserClientOwner);
1692 
1693 		owner->task = task;
1694 		owner->uc   = this;
1695 		queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1696 		queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1697 		if (messageAppSuspended) {
1698 			task_set_message_app_suspended(task, true);
1699 		}
1700 	}
1701 
1702 	IOLockUnlock(gIOUserClientOwnersLock);
1703 
1704 	return ret;
1705 }
1706 
1707 void
noMoreSenders(void)1708 IOUserClient::noMoreSenders(void)
1709 {
1710 	IOUserClientOwner * owner;
1711 	IOUserClientOwner * iter;
1712 	queue_head_t      * taskque;
1713 	bool                hasMessageAppSuspended;
1714 
1715 	IOLockLock(gIOUserClientOwnersLock);
1716 
1717 	if (owners.next) {
1718 		while (!queue_empty(&owners)) {
1719 			owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1720 			taskque = task_io_user_clients(owner->task);
1721 			queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1722 			hasMessageAppSuspended = false;
1723 			queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1724 				hasMessageAppSuspended = iter->uc->messageAppSuspended;
1725 				if (hasMessageAppSuspended) {
1726 					break;
1727 				}
1728 			}
1729 			task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1730 			queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1731 			IOFreeType(owner, IOUserClientOwner);
1732 		}
1733 		owners.next = owners.prev = NULL;
1734 	}
1735 
1736 	IOLockUnlock(gIOUserClientOwnersLock);
1737 }
1738 
1739 
1740 extern "C" void
iokit_task_app_suspended_changed(task_t task)1741 iokit_task_app_suspended_changed(task_t task)
1742 {
1743 	queue_head_t      * taskque;
1744 	IOUserClientOwner * owner;
1745 	OSSet             * set;
1746 
1747 	IOLockLock(gIOUserClientOwnersLock);
1748 
1749 	taskque = task_io_user_clients(task);
1750 	set = NULL;
1751 	queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1752 		if (!owner->uc->messageAppSuspended) {
1753 			continue;
1754 		}
1755 		if (!set) {
1756 			set = OSSet::withCapacity(4);
1757 			if (!set) {
1758 				break;
1759 			}
1760 		}
1761 		set->setObject(owner->uc);
1762 	}
1763 
1764 	IOLockUnlock(gIOUserClientOwnersLock);
1765 
1766 	if (set) {
1767 		set->iterateObjects(^bool (OSObject * obj) {
1768 			IOUserClient      * uc;
1769 
1770 			uc = (typeof(uc))obj;
1771 #if 0
1772 			{
1773 			        OSString          * str;
1774 			        str = IOCopyLogNameForPID(task_pid(task));
1775 			        IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1776 			        uc->getName(), task_is_app_suspended(task));
1777 			        OSSafeReleaseNULL(str);
1778 			}
1779 #endif
1780 			uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1781 
1782 			return false;
1783 		});
1784 		set->release();
1785 	}
1786 }
1787 
1788 static kern_return_t
iokit_task_terminate_phase1(task_t task)1789 iokit_task_terminate_phase1(task_t task)
1790 {
1791 	queue_head_t      * taskque;
1792 	IOUserClientOwner * iter;
1793 	OSSet             * userServers = NULL;
1794 
1795 	if (!task_is_driver(task)) {
1796 		return KERN_SUCCESS;
1797 	}
1798 	userServers = OSSet::withCapacity(1);
1799 
1800 	IOLockLock(gIOUserClientOwnersLock);
1801 
1802 	taskque = task_io_user_clients(task);
1803 	queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1804 		userServers->setObject(iter->uc);
1805 	}
1806 	IOLockUnlock(gIOUserClientOwnersLock);
1807 
1808 	if (userServers) {
1809 		IOUserServer * userServer;
1810 		while ((userServer = OSRequiredCast(IOUserServer, userServers->getAnyObject()))) {
1811 			userServer->clientDied();
1812 			userServers->removeObject(userServer);
1813 		}
1814 		userServers->release();
1815 	}
1816 	return KERN_SUCCESS;
1817 }
1818 
1819 static kern_return_t
iokit_task_terminate_phase2(task_t task)1820 iokit_task_terminate_phase2(task_t task)
1821 {
1822 	queue_head_t      * taskque;
1823 	IOUserClientOwner * owner;
1824 	IOUserClient      * dead;
1825 	IOUserClient      * uc;
1826 
1827 	IOLockLock(gIOUserClientOwnersLock);
1828 	taskque = task_io_user_clients(task);
1829 	dead = NULL;
1830 	while (!queue_empty(taskque)) {
1831 		owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1832 		uc = owner->uc;
1833 		queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1834 		queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1835 		if (queue_empty(&uc->owners)) {
1836 			uc->retain();
1837 			IOLog("destroying out of band connect for %s\n", uc->getName());
1838 			// now using the uc queue head as a singly linked queue,
1839 			// leaving .next as NULL to mark it empty
1840 			uc->owners.next = NULL;
1841 			uc->owners.prev = (queue_entry_t) dead;
1842 			dead = uc;
1843 		}
1844 		IOFreeType(owner, IOUserClientOwner);
1845 	}
1846 	IOLockUnlock(gIOUserClientOwnersLock);
1847 
1848 	while (dead) {
1849 		uc = dead;
1850 		dead = (IOUserClient *)(void *) dead->owners.prev;
1851 		uc->owners.prev = NULL;
1852 		if (uc->sharedInstance || !uc->closed) {
1853 			uc->clientDied();
1854 		}
1855 		uc->release();
1856 	}
1857 
1858 	return KERN_SUCCESS;
1859 }
1860 
1861 extern "C" kern_return_t
iokit_task_terminate(task_t task,int phase)1862 iokit_task_terminate(task_t task, int phase)
1863 {
1864 	switch (phase) {
1865 	case 1:
1866 		return iokit_task_terminate_phase1(task);
1867 	case 2:
1868 		return iokit_task_terminate_phase2(task);
1869 	default:
1870 		panic("iokit_task_terminate phase %d", phase);
1871 	}
1872 }
1873 
1874 struct IOUCFilterPolicy {
1875 	task_t             task;
1876 	io_filter_policy_t filterPolicy;
1877 	IOUCFilterPolicy * next;
1878 };
1879 
1880 io_filter_policy_t
filterForTask(task_t task,io_filter_policy_t addFilterPolicy)1881 IOUserClient::filterForTask(task_t task, io_filter_policy_t addFilterPolicy)
1882 {
1883 	IOUCFilterPolicy * elem;
1884 	io_filter_policy_t filterPolicy;
1885 
1886 	filterPolicy = 0;
1887 	IOLockLock(&filterLock);
1888 
1889 	for (elem = reserved->filterPolicies; elem && (elem->task != task); elem = elem->next) {
1890 	}
1891 
1892 	if (elem) {
1893 		if (addFilterPolicy) {
1894 			assert(addFilterPolicy == elem->filterPolicy);
1895 		}
1896 		filterPolicy = elem->filterPolicy;
1897 	} else if (addFilterPolicy) {
1898 		elem = IOMallocType(IOUCFilterPolicy);
1899 		elem->task               = task;
1900 		elem->filterPolicy       = addFilterPolicy;
1901 		elem->next               = reserved->filterPolicies;
1902 		reserved->filterPolicies = elem;
1903 		filterPolicy = addFilterPolicy;
1904 	}
1905 
1906 	IOLockUnlock(&filterLock);
1907 	return filterPolicy;
1908 }
1909 
1910 void
free()1911 IOUserClient::free()
1912 {
1913 	if (mappings) {
1914 		mappings->release();
1915 	}
1916 
1917 	IOStatisticsUnregisterCounter();
1918 
1919 	assert(!owners.next);
1920 	assert(!owners.prev);
1921 
1922 	if (reserved) {
1923 		IOUCFilterPolicy * elem;
1924 		IOUCFilterPolicy * nextElem;
1925 		for (elem = reserved->filterPolicies; elem; elem = nextElem) {
1926 			nextElem = elem->next;
1927 			if (elem->filterPolicy && gIOUCFilterCallbacks->io_filter_release) {
1928 				gIOUCFilterCallbacks->io_filter_release(elem->filterPolicy);
1929 			}
1930 			IOFreeType(elem, IOUCFilterPolicy);
1931 		}
1932 		IOFreeType(reserved, ExpansionData);
1933 		IORWLockInlineDestroy(&lock);
1934 		IOLockInlineDestroy(&filterLock);
1935 	}
1936 
1937 	super::free();
1938 }
1939 
1940 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1941 
OSDefineMetaClassAndAbstractStructors(IOUserClient2022,IOUserClient)1942 OSDefineMetaClassAndAbstractStructors( IOUserClient2022, IOUserClient )
1943 
1944 
1945 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1946 
1947 IOReturn
1948 IOUserClient::clientDied( void )
1949 {
1950 	IOReturn ret = kIOReturnNotReady;
1951 
1952 	if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1953 		ret = clientClose();
1954 	}
1955 
1956 	return ret;
1957 }
1958 
1959 IOReturn
clientClose(void)1960 IOUserClient::clientClose( void )
1961 {
1962 	return kIOReturnUnsupported;
1963 }
1964 
1965 IOService *
getService(void)1966 IOUserClient::getService( void )
1967 {
1968 	return NULL;
1969 }
1970 
1971 IOReturn
registerNotificationPort(mach_port_t,UInt32,UInt32)1972 IOUserClient::registerNotificationPort(
1973 	mach_port_t     /* port */,
1974 	UInt32          /* type */,
1975 	UInt32          /* refCon */)
1976 {
1977 	return kIOReturnUnsupported;
1978 }
1979 
1980 IOReturn
registerNotificationPort(mach_port_t port,UInt32 type,io_user_reference_t refCon)1981 IOUserClient::registerNotificationPort(
1982 	mach_port_t port,
1983 	UInt32          type,
1984 	io_user_reference_t refCon)
1985 {
1986 	return registerNotificationPort(port, type, (UInt32) refCon);
1987 }
1988 
1989 IOReturn
getNotificationSemaphore(UInt32 notification_type,semaphore_t * semaphore)1990 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1991     semaphore_t * semaphore )
1992 {
1993 	return kIOReturnUnsupported;
1994 }
1995 
1996 IOReturn
connectClient(IOUserClient *)1997 IOUserClient::connectClient( IOUserClient * /* client */ )
1998 {
1999 	return kIOReturnUnsupported;
2000 }
2001 
2002 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,IOMemoryDescriptor ** memory)2003 IOUserClient::clientMemoryForType( UInt32 type,
2004     IOOptionBits * options,
2005     IOMemoryDescriptor ** memory )
2006 {
2007 	return kIOReturnUnsupported;
2008 }
2009 
2010 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,OSSharedPtr<IOMemoryDescriptor> & memory)2011 IOUserClient::clientMemoryForType( UInt32 type,
2012     IOOptionBits * options,
2013     OSSharedPtr<IOMemoryDescriptor>& memory )
2014 {
2015 	IOMemoryDescriptor* memoryRaw = nullptr;
2016 	IOReturn result = clientMemoryForType(type, options, &memoryRaw);
2017 	memory.reset(memoryRaw, OSNoRetain);
2018 	return result;
2019 }
2020 
2021 #if !__LP64__
2022 IOMemoryMap *
mapClientMemory(IOOptionBits type,task_t task,IOOptionBits mapFlags,IOVirtualAddress atAddress)2023 IOUserClient::mapClientMemory(
2024 	IOOptionBits            type,
2025 	task_t                  task,
2026 	IOOptionBits            mapFlags,
2027 	IOVirtualAddress        atAddress )
2028 {
2029 	return NULL;
2030 }
2031 #endif
2032 
2033 IOMemoryMap *
mapClientMemory64(IOOptionBits type,task_t task,IOOptionBits mapFlags,mach_vm_address_t atAddress)2034 IOUserClient::mapClientMemory64(
2035 	IOOptionBits            type,
2036 	task_t                  task,
2037 	IOOptionBits            mapFlags,
2038 	mach_vm_address_t       atAddress )
2039 {
2040 	IOReturn            err;
2041 	IOOptionBits        options = 0;
2042 	IOMemoryDescriptor * memory = NULL;
2043 	IOMemoryMap *       map = NULL;
2044 
2045 	err = clientMemoryForType((UInt32) type, &options, &memory );
2046 
2047 	if (memory && (kIOReturnSuccess == err)) {
2048 		FAKE_STACK_FRAME(getMetaClass());
2049 
2050 		options = (options & ~kIOMapUserOptionsMask)
2051 		    | (mapFlags & kIOMapUserOptionsMask);
2052 		map = memory->createMappingInTask( task, atAddress, options );
2053 		memory->release();
2054 
2055 		FAKE_STACK_FRAME_END();
2056 	}
2057 
2058 	return map;
2059 }
2060 
2061 IOReturn
exportObjectToClient(task_t task,OSObject * obj,io_object_t * clientObj)2062 IOUserClient::exportObjectToClient(task_t task,
2063     OSObject *obj, io_object_t *clientObj)
2064 {
2065 	mach_port_name_t    name;
2066 
2067 	name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
2068 
2069 	*clientObj = (io_object_t)(uintptr_t) name;
2070 
2071 	if (obj) {
2072 		obj->release();
2073 	}
2074 
2075 	return kIOReturnSuccess;
2076 }
2077 
2078 IOReturn
copyPortNameForObjectInTask(task_t task,OSObject * obj,mach_port_name_t * port_name)2079 IOUserClient::copyPortNameForObjectInTask(task_t task,
2080     OSObject *obj, mach_port_name_t * port_name)
2081 {
2082 	mach_port_name_t    name;
2083 
2084 	name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
2085 
2086 	*(mach_port_name_t *) port_name = name;
2087 
2088 	return kIOReturnSuccess;
2089 }
2090 
2091 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSObject ** obj)2092 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2093     OSObject **obj)
2094 {
2095 	OSObject * object;
2096 
2097 	object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
2098 
2099 	*obj = object;
2100 
2101 	return object ? kIOReturnSuccess : kIOReturnIPCError;
2102 }
2103 
2104 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSSharedPtr<OSObject> & obj)2105 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2106     OSSharedPtr<OSObject>& obj)
2107 {
2108 	OSObject* objRaw = NULL;
2109 	IOReturn result = copyObjectForPortNameInTask(task, port_name, &objRaw);
2110 	obj.reset(objRaw, OSNoRetain);
2111 	return result;
2112 }
2113 
2114 IOReturn
adjustPortNameReferencesInTask(task_t task,mach_port_name_t port_name,mach_port_delta_t delta)2115 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
2116 {
2117 	return iokit_mod_send_right(task, port_name, delta);
2118 }
2119 
2120 IOExternalMethod *
getExternalMethodForIndex(UInt32)2121 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
2122 {
2123 	return NULL;
2124 }
2125 
2126 IOExternalAsyncMethod *
getExternalAsyncMethodForIndex(UInt32)2127 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
2128 {
2129 	return NULL;
2130 }
2131 
2132 IOExternalTrap *
2133 IOUserClient::
getExternalTrapForIndex(UInt32 index)2134 getExternalTrapForIndex(UInt32 index)
2135 {
2136 	return NULL;
2137 }
2138 
2139 #pragma clang diagnostic push
2140 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2141 
2142 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
2143 // functions can break clients of kexts implementing getExternalMethodForIndex()
2144 IOExternalMethod *
2145 IOUserClient::
getTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2146 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
2147 {
2148 	IOExternalMethod *method = getExternalMethodForIndex(index);
2149 
2150 	if (method) {
2151 		*targetP = (IOService *) method->object;
2152 	}
2153 
2154 	return method;
2155 }
2156 
2157 IOExternalMethod *
2158 IOUserClient::
getTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2159 getTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2160 {
2161 	IOService* targetPRaw = NULL;
2162 	IOExternalMethod* result = getTargetAndMethodForIndex(&targetPRaw, index);
2163 	targetP.reset(targetPRaw, OSRetain);
2164 	return result;
2165 }
2166 
2167 IOExternalAsyncMethod *
2168 IOUserClient::
getAsyncTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2169 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
2170 {
2171 	IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
2172 
2173 	if (method) {
2174 		*targetP = (IOService *) method->object;
2175 	}
2176 
2177 	return method;
2178 }
2179 
2180 IOExternalAsyncMethod *
2181 IOUserClient::
getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2182 getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2183 {
2184 	IOService* targetPRaw = NULL;
2185 	IOExternalAsyncMethod* result = getAsyncTargetAndMethodForIndex(&targetPRaw, index);
2186 	targetP.reset(targetPRaw, OSRetain);
2187 	return result;
2188 }
2189 
2190 IOExternalTrap *
2191 IOUserClient::
getTargetAndTrapForIndex(IOService ** targetP,UInt32 index)2192 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
2193 {
2194 	IOExternalTrap *trap = getExternalTrapForIndex(index);
2195 
2196 	if (trap) {
2197 		*targetP = trap->object;
2198 	}
2199 
2200 	return trap;
2201 }
2202 #pragma clang diagnostic pop
2203 
2204 IOReturn
releaseAsyncReference64(OSAsyncReference64 reference)2205 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
2206 {
2207 	mach_port_t port;
2208 	port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2209 
2210 	if (MACH_PORT_NULL != port) {
2211 		iokit_release_port_send(port);
2212 	}
2213 
2214 	return kIOReturnSuccess;
2215 }
2216 
2217 IOReturn
releaseNotificationPort(mach_port_t port)2218 IOUserClient::releaseNotificationPort(mach_port_t port)
2219 {
2220 	if (MACH_PORT_NULL != port) {
2221 		iokit_release_port_send(port);
2222 	}
2223 
2224 	return kIOReturnSuccess;
2225 }
2226 
2227 IOReturn
sendAsyncResult(OSAsyncReference reference,IOReturn result,void * args[],UInt32 numArgs)2228 IOUserClient::sendAsyncResult(OSAsyncReference reference,
2229     IOReturn result, void *args[], UInt32 numArgs)
2230 {
2231 	OSAsyncReference64  reference64;
2232 	OSBoundedArray<io_user_reference_t, kMaxAsyncArgs> args64;
2233 	unsigned int        idx;
2234 
2235 	if (numArgs > kMaxAsyncArgs) {
2236 		return kIOReturnMessageTooLarge;
2237 	}
2238 
2239 	for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2240 		reference64[idx] = REF64(reference[idx]);
2241 	}
2242 
2243 	for (idx = 0; idx < numArgs; idx++) {
2244 		args64[idx] = REF64(args[idx]);
2245 	}
2246 
2247 	return sendAsyncResult64(reference64, result, args64.data(), numArgs);
2248 }
2249 
2250 IOReturn
sendAsyncResult64WithOptions(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2251 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2252     IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2253 {
2254 	return _sendAsyncResult64(reference, result, args, numArgs, options);
2255 }
2256 
2257 IOReturn
sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs)2258 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2259     IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2260 {
2261 	return _sendAsyncResult64(reference, result, args, numArgs, 0);
2262 }
2263 
2264 IOReturn
_sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2265 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2266     IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2267 {
2268 	struct ReplyMsg {
2269 		mach_msg_header_t msgHdr;
2270 		union{
2271 			struct{
2272 				OSNotificationHeader     notifyHdr;
2273 				IOAsyncCompletionContent asyncContent;
2274 				uint32_t                 args[kMaxAsyncArgs];
2275 			} msg32;
2276 			struct{
2277 				OSNotificationHeader64   notifyHdr;
2278 				IOAsyncCompletionContent asyncContent;
2279 				io_user_reference_t      args[kMaxAsyncArgs] __attribute__ ((packed));
2280 			} msg64;
2281 		} m;
2282 	};
2283 	ReplyMsg      replyMsg;
2284 	mach_port_t   replyPort;
2285 	kern_return_t kr;
2286 
2287 	// If no reply port, do nothing.
2288 	replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2289 	if (replyPort == MACH_PORT_NULL) {
2290 		return kIOReturnSuccess;
2291 	}
2292 
2293 	if (numArgs > kMaxAsyncArgs) {
2294 		return kIOReturnMessageTooLarge;
2295 	}
2296 
2297 	bzero(&replyMsg, sizeof(replyMsg));
2298 	replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2299 	    0 /*local*/);
2300 	replyMsg.msgHdr.msgh_remote_port = replyPort;
2301 	replyMsg.msgHdr.msgh_local_port  = NULL;
2302 	replyMsg.msgHdr.msgh_id          = kOSNotificationMessageID;
2303 	if (kIOUCAsync64Flag & reference[0]) {
2304 		replyMsg.msgHdr.msgh_size =
2305 		    sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2306 		    - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2307 		replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2308 		    + numArgs * sizeof(io_user_reference_t);
2309 		replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2310 		/* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2311 		bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2312 
2313 		replyMsg.m.msg64.asyncContent.result = result;
2314 		if (numArgs) {
2315 			bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2316 		}
2317 	} else {
2318 		unsigned int idx;
2319 
2320 		replyMsg.msgHdr.msgh_size =
2321 		    sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2322 		    - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2323 
2324 		replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2325 		    + numArgs * sizeof(uint32_t);
2326 		replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2327 
2328 		/* Skip reference[0] which is left as 0 from the earlier bzero */
2329 		for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2330 			replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2331 		}
2332 
2333 		replyMsg.m.msg32.asyncContent.result = result;
2334 
2335 		for (idx = 0; idx < numArgs; idx++) {
2336 			replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2337 		}
2338 	}
2339 
2340 	if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2341 		kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2342 		    replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2343 	} else {
2344 		/* Fail on full queue. */
2345 		kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
2346 		    replyMsg.msgHdr.msgh_size);
2347 	}
2348 	if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2349 		reference[0] |= kIOUCAsyncErrorLoggedFlag;
2350 		IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
2351 	}
2352 	return kr;
2353 }
2354 
2355 
2356 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2357 
2358 extern "C" {
2359 #define CHECK(cls, obj, out)                      \
2360 	cls * out;                              \
2361 	if( !(out = OSDynamicCast( cls, obj)))  \
2362 	    return( kIOReturnBadArgument )
2363 
2364 #define CHECKLOCKED(cls, obj, out)                                        \
2365 	IOUserIterator * oIter;                                         \
2366 	cls * out;                                                      \
2367 	if( !(oIter = OSDynamicCast(IOUserIterator, obj)))              \
2368 	    return (kIOReturnBadArgument);                              \
2369 	if( !(out = OSDynamicCast(cls, oIter->userIteratorObject)))     \
2370 	    return (kIOReturnBadArgument)
2371 
2372 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2373 
2374 // Create a vm_map_copy_t or kalloc'ed data for memory
2375 // to be copied out. ipc will free after the copyout.
2376 
2377 static kern_return_t
copyoutkdata(const void * data,vm_size_t len,io_buf_ptr_t * buf)2378 copyoutkdata( const void * data, vm_size_t len,
2379     io_buf_ptr_t * buf )
2380 {
2381 	kern_return_t       err;
2382 	vm_map_copy_t       copy;
2383 
2384 	err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2385 	    false /* src_destroy */, &copy);
2386 
2387 	assert( err == KERN_SUCCESS );
2388 	if (err == KERN_SUCCESS) {
2389 		*buf = (char *) copy;
2390 	}
2391 
2392 	return err;
2393 }
2394 
2395 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2396 
2397 /* Routine io_server_version */
2398 kern_return_t
is_io_server_version(mach_port_t main_port,uint64_t * version)2399 is_io_server_version(
2400 	mach_port_t main_port,
2401 	uint64_t *version)
2402 {
2403 	*version = IOKIT_SERVER_VERSION;
2404 	return kIOReturnSuccess;
2405 }
2406 
2407 /* Routine io_object_get_class */
2408 kern_return_t
is_io_object_get_class(io_object_t object,io_name_t className)2409 is_io_object_get_class(
2410 	io_object_t object,
2411 	io_name_t className )
2412 {
2413 	const OSMetaClass* my_obj = NULL;
2414 
2415 	if (!object) {
2416 		return kIOReturnBadArgument;
2417 	}
2418 
2419 	my_obj = object->getMetaClass();
2420 	if (!my_obj) {
2421 		return kIOReturnNotFound;
2422 	}
2423 
2424 	strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2425 
2426 	return kIOReturnSuccess;
2427 }
2428 
2429 /* Routine io_object_get_superclass */
2430 kern_return_t
is_io_object_get_superclass(mach_port_t main_port,io_name_t obj_name,io_name_t class_name)2431 is_io_object_get_superclass(
2432 	mach_port_t main_port,
2433 	io_name_t obj_name,
2434 	io_name_t class_name)
2435 {
2436 	IOReturn            ret;
2437 	const OSMetaClass * meta;
2438 	const OSMetaClass * super;
2439 	const OSSymbol    * name;
2440 	const char        * cstr;
2441 
2442 	if (!obj_name || !class_name) {
2443 		return kIOReturnBadArgument;
2444 	}
2445 	if (main_port != main_device_port) {
2446 		return kIOReturnNotPrivileged;
2447 	}
2448 
2449 	ret = kIOReturnNotFound;
2450 	meta = NULL;
2451 	do{
2452 		name = OSSymbol::withCString(obj_name);
2453 		if (!name) {
2454 			break;
2455 		}
2456 		meta = OSMetaClass::copyMetaClassWithName(name);
2457 		if (!meta) {
2458 			break;
2459 		}
2460 		super = meta->getSuperClass();
2461 		if (!super) {
2462 			break;
2463 		}
2464 		cstr = super->getClassName();
2465 		if (!cstr) {
2466 			break;
2467 		}
2468 		strlcpy(class_name, cstr, sizeof(io_name_t));
2469 		ret = kIOReturnSuccess;
2470 	}while (false);
2471 
2472 	OSSafeReleaseNULL(name);
2473 	if (meta) {
2474 		meta->releaseMetaClass();
2475 	}
2476 
2477 	return ret;
2478 }
2479 
2480 /* Routine io_object_get_bundle_identifier */
2481 kern_return_t
is_io_object_get_bundle_identifier(mach_port_t main_port,io_name_t obj_name,io_name_t bundle_name)2482 is_io_object_get_bundle_identifier(
2483 	mach_port_t main_port,
2484 	io_name_t obj_name,
2485 	io_name_t bundle_name)
2486 {
2487 	IOReturn            ret;
2488 	const OSMetaClass * meta;
2489 	const OSSymbol    * name;
2490 	const OSSymbol    * identifier;
2491 	const char        * cstr;
2492 
2493 	if (!obj_name || !bundle_name) {
2494 		return kIOReturnBadArgument;
2495 	}
2496 	if (main_port != main_device_port) {
2497 		return kIOReturnNotPrivileged;
2498 	}
2499 
2500 	ret = kIOReturnNotFound;
2501 	meta = NULL;
2502 	do{
2503 		name = OSSymbol::withCString(obj_name);
2504 		if (!name) {
2505 			break;
2506 		}
2507 		meta = OSMetaClass::copyMetaClassWithName(name);
2508 		if (!meta) {
2509 			break;
2510 		}
2511 		identifier = meta->getKmodName();
2512 		if (!identifier) {
2513 			break;
2514 		}
2515 		cstr = identifier->getCStringNoCopy();
2516 		if (!cstr) {
2517 			break;
2518 		}
2519 		strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2520 		ret = kIOReturnSuccess;
2521 	}while (false);
2522 
2523 	OSSafeReleaseNULL(name);
2524 	if (meta) {
2525 		meta->releaseMetaClass();
2526 	}
2527 
2528 	return ret;
2529 }
2530 
2531 /* Routine io_object_conforms_to */
2532 kern_return_t
is_io_object_conforms_to(io_object_t object,io_name_t className,boolean_t * conforms)2533 is_io_object_conforms_to(
2534 	io_object_t object,
2535 	io_name_t className,
2536 	boolean_t *conforms )
2537 {
2538 	if (!object) {
2539 		return kIOReturnBadArgument;
2540 	}
2541 
2542 	*conforms = (NULL != object->metaCast( className ));
2543 
2544 	return kIOReturnSuccess;
2545 }
2546 
2547 /* Routine io_object_get_retain_count */
2548 kern_return_t
is_io_object_get_retain_count(io_object_t object,uint32_t * retainCount)2549 is_io_object_get_retain_count(
2550 	io_object_t object,
2551 	uint32_t *retainCount )
2552 {
2553 	if (!object) {
2554 		return kIOReturnBadArgument;
2555 	}
2556 
2557 	*retainCount = object->getRetainCount();
2558 	return kIOReturnSuccess;
2559 }
2560 
2561 /* Routine io_iterator_next */
2562 kern_return_t
is_io_iterator_next(io_object_t iterator,io_object_t * object)2563 is_io_iterator_next(
2564 	io_object_t iterator,
2565 	io_object_t *object )
2566 {
2567 	IOReturn    ret;
2568 	OSObject *  obj;
2569 	OSIterator * iter;
2570 	IOUserIterator * uiter;
2571 
2572 	if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2573 		obj = uiter->copyNextObject();
2574 	} else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2575 		obj = iter->getNextObject();
2576 		if (obj) {
2577 			obj->retain();
2578 		}
2579 	} else {
2580 		return kIOReturnBadArgument;
2581 	}
2582 
2583 	if (obj) {
2584 		*object = obj;
2585 		ret = kIOReturnSuccess;
2586 	} else {
2587 		ret = kIOReturnNoDevice;
2588 	}
2589 
2590 	return ret;
2591 }
2592 
2593 /* Routine io_iterator_reset */
2594 kern_return_t
is_io_iterator_reset(io_object_t iterator)2595 is_io_iterator_reset(
2596 	io_object_t iterator )
2597 {
2598 	CHECK( OSIterator, iterator, iter );
2599 
2600 	iter->reset();
2601 
2602 	return kIOReturnSuccess;
2603 }
2604 
2605 /* Routine io_iterator_is_valid */
2606 kern_return_t
is_io_iterator_is_valid(io_object_t iterator,boolean_t * is_valid)2607 is_io_iterator_is_valid(
2608 	io_object_t iterator,
2609 	boolean_t *is_valid )
2610 {
2611 	CHECK( OSIterator, iterator, iter );
2612 
2613 	*is_valid = iter->isValid();
2614 
2615 	return kIOReturnSuccess;
2616 }
2617 
2618 static kern_return_t
internal_io_service_match_property_table(io_service_t _service,const char * matching,mach_msg_type_number_t matching_size,boolean_t * matches)2619 internal_io_service_match_property_table(
2620 	io_service_t _service,
2621 	const char * matching,
2622 	mach_msg_type_number_t matching_size,
2623 	boolean_t *matches)
2624 {
2625 	CHECK( IOService, _service, service );
2626 
2627 	kern_return_t       kr;
2628 	OSObject *          obj;
2629 	OSDictionary *      dict;
2630 
2631 	assert(matching_size);
2632 
2633 
2634 	obj = OSUnserializeXML(matching, matching_size);
2635 
2636 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2637 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2638 		*matches = service->passiveMatch( dict );
2639 		kr = kIOReturnSuccess;
2640 	} else {
2641 		kr = kIOReturnBadArgument;
2642 	}
2643 
2644 	if (obj) {
2645 		obj->release();
2646 	}
2647 
2648 	return kr;
2649 }
2650 
2651 /* Routine io_service_match_property_table */
2652 kern_return_t
is_io_service_match_property_table(io_service_t service,io_string_t matching,boolean_t * matches)2653 is_io_service_match_property_table(
2654 	io_service_t service,
2655 	io_string_t matching,
2656 	boolean_t *matches )
2657 {
2658 	return kIOReturnUnsupported;
2659 }
2660 
2661 
2662 /* Routine io_service_match_property_table_ool */
2663 kern_return_t
is_io_service_match_property_table_ool(io_object_t service,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,boolean_t * matches)2664 is_io_service_match_property_table_ool(
2665 	io_object_t service,
2666 	io_buf_ptr_t matching,
2667 	mach_msg_type_number_t matchingCnt,
2668 	kern_return_t *result,
2669 	boolean_t *matches )
2670 {
2671 	kern_return_t         kr;
2672 	vm_offset_t           data;
2673 	vm_map_offset_t       map_data;
2674 
2675 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2676 	data = CAST_DOWN(vm_offset_t, map_data);
2677 
2678 	if (KERN_SUCCESS == kr) {
2679 		// must return success after vm_map_copyout() succeeds
2680 		*result = internal_io_service_match_property_table(service,
2681 		    (const char *)data, matchingCnt, matches );
2682 		vm_deallocate( kernel_map, data, matchingCnt );
2683 	}
2684 
2685 	return kr;
2686 }
2687 
2688 /* Routine io_service_match_property_table_bin */
2689 kern_return_t
is_io_service_match_property_table_bin(io_object_t service,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,boolean_t * matches)2690 is_io_service_match_property_table_bin(
2691 	io_object_t service,
2692 	io_struct_inband_t matching,
2693 	mach_msg_type_number_t matchingCnt,
2694 	boolean_t *matches)
2695 {
2696 	return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2697 }
2698 
2699 static kern_return_t
internal_io_service_get_matching_services(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_iterator_t * existing)2700 internal_io_service_get_matching_services(
2701 	mach_port_t main_port,
2702 	const char * matching,
2703 	mach_msg_type_number_t matching_size,
2704 	io_iterator_t *existing )
2705 {
2706 	kern_return_t       kr;
2707 	OSObject *          obj;
2708 	OSDictionary *      dict;
2709 
2710 	if (main_port != main_device_port) {
2711 		return kIOReturnNotPrivileged;
2712 	}
2713 
2714 	assert(matching_size);
2715 	obj = OSUnserializeXML(matching, matching_size);
2716 
2717 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2718 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2719 		*existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2720 		kr = kIOReturnSuccess;
2721 	} else {
2722 		kr = kIOReturnBadArgument;
2723 	}
2724 
2725 	if (obj) {
2726 		obj->release();
2727 	}
2728 
2729 	return kr;
2730 }
2731 
2732 /* Routine io_service_get_matching_services */
2733 kern_return_t
is_io_service_get_matching_services(mach_port_t main_port,io_string_t matching,io_iterator_t * existing)2734 is_io_service_get_matching_services(
2735 	mach_port_t main_port,
2736 	io_string_t matching,
2737 	io_iterator_t *existing )
2738 {
2739 	return kIOReturnUnsupported;
2740 }
2741 
2742 /* Routine io_service_get_matching_services_ool */
2743 kern_return_t
is_io_service_get_matching_services_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * existing)2744 is_io_service_get_matching_services_ool(
2745 	mach_port_t main_port,
2746 	io_buf_ptr_t matching,
2747 	mach_msg_type_number_t matchingCnt,
2748 	kern_return_t *result,
2749 	io_object_t *existing )
2750 {
2751 	kern_return_t       kr;
2752 	vm_offset_t         data;
2753 	vm_map_offset_t     map_data;
2754 
2755 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2756 	data = CAST_DOWN(vm_offset_t, map_data);
2757 
2758 	if (KERN_SUCCESS == kr) {
2759 		// must return success after vm_map_copyout() succeeds
2760 		// and mig will copy out objects on success
2761 		*existing = NULL;
2762 		*result = internal_io_service_get_matching_services(main_port,
2763 		    (const char *) data, matchingCnt, existing);
2764 		vm_deallocate( kernel_map, data, matchingCnt );
2765 	}
2766 
2767 	return kr;
2768 }
2769 
2770 /* Routine io_service_get_matching_services_bin */
2771 kern_return_t
is_io_service_get_matching_services_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * existing)2772 is_io_service_get_matching_services_bin(
2773 	mach_port_t main_port,
2774 	io_struct_inband_t matching,
2775 	mach_msg_type_number_t matchingCnt,
2776 	io_object_t *existing)
2777 {
2778 	return internal_io_service_get_matching_services(main_port, matching, matchingCnt, existing);
2779 }
2780 
2781 
2782 static kern_return_t
internal_io_service_get_matching_service(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_service_t * service)2783 internal_io_service_get_matching_service(
2784 	mach_port_t main_port,
2785 	const char * matching,
2786 	mach_msg_type_number_t matching_size,
2787 	io_service_t *service )
2788 {
2789 	kern_return_t       kr;
2790 	OSObject *          obj;
2791 	OSDictionary *      dict;
2792 
2793 	if (main_port != main_device_port) {
2794 		return kIOReturnNotPrivileged;
2795 	}
2796 
2797 	assert(matching_size);
2798 	obj = OSUnserializeXML(matching, matching_size);
2799 
2800 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2801 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2802 		*service = IOService::copyMatchingService( dict );
2803 		kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2804 	} else {
2805 		kr = kIOReturnBadArgument;
2806 	}
2807 
2808 	if (obj) {
2809 		obj->release();
2810 	}
2811 
2812 	return kr;
2813 }
2814 
2815 /* Routine io_service_get_matching_service */
2816 kern_return_t
is_io_service_get_matching_service(mach_port_t main_port,io_string_t matching,io_service_t * service)2817 is_io_service_get_matching_service(
2818 	mach_port_t main_port,
2819 	io_string_t matching,
2820 	io_service_t *service )
2821 {
2822 	return kIOReturnUnsupported;
2823 }
2824 
2825 /* Routine io_service_get_matching_services_ool */
2826 kern_return_t
is_io_service_get_matching_service_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * service)2827 is_io_service_get_matching_service_ool(
2828 	mach_port_t main_port,
2829 	io_buf_ptr_t matching,
2830 	mach_msg_type_number_t matchingCnt,
2831 	kern_return_t *result,
2832 	io_object_t *service )
2833 {
2834 	kern_return_t       kr;
2835 	vm_offset_t         data;
2836 	vm_map_offset_t     map_data;
2837 
2838 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2839 	data = CAST_DOWN(vm_offset_t, map_data);
2840 
2841 	if (KERN_SUCCESS == kr) {
2842 		// must return success after vm_map_copyout() succeeds
2843 		// and mig will copy out objects on success
2844 		*service = NULL;
2845 		*result = internal_io_service_get_matching_service(main_port,
2846 		    (const char *) data, matchingCnt, service );
2847 		vm_deallocate( kernel_map, data, matchingCnt );
2848 	}
2849 
2850 	return kr;
2851 }
2852 
2853 /* Routine io_service_get_matching_service_bin */
2854 kern_return_t
is_io_service_get_matching_service_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * service)2855 is_io_service_get_matching_service_bin(
2856 	mach_port_t main_port,
2857 	io_struct_inband_t matching,
2858 	mach_msg_type_number_t matchingCnt,
2859 	io_object_t *service)
2860 {
2861 	return internal_io_service_get_matching_service(main_port, matching, matchingCnt, service);
2862 }
2863 
2864 static kern_return_t
internal_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,const char * matching,size_t matching_size,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)2865 internal_io_service_add_notification(
2866 	mach_port_t main_port,
2867 	io_name_t notification_type,
2868 	const char * matching,
2869 	size_t matching_size,
2870 	mach_port_t port,
2871 	void * reference,
2872 	vm_size_t referenceSize,
2873 	bool client64,
2874 	io_object_t * notification )
2875 {
2876 	IOServiceUserNotification * userNotify = NULL;
2877 	IONotifier *                notify = NULL;
2878 	const OSSymbol *            sym;
2879 	OSObject *                  obj;
2880 	OSDictionary *              dict;
2881 	IOReturn                    err;
2882 	natural_t                   userMsgType;
2883 
2884 	if (main_port != main_device_port) {
2885 		return kIOReturnNotPrivileged;
2886 	}
2887 
2888 	do {
2889 		err = kIOReturnNoResources;
2890 
2891 		if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2892 			return kIOReturnMessageTooLarge;
2893 		}
2894 
2895 		if (!(sym = OSSymbol::withCString( notification_type ))) {
2896 			err = kIOReturnNoResources;
2897 		}
2898 
2899 		assert(matching_size);
2900 		obj = OSUnserializeXML(matching, matching_size);
2901 		dict = OSDynamicCast(OSDictionary, obj);
2902 		if (!dict) {
2903 			err = kIOReturnBadArgument;
2904 			continue;
2905 		}
2906 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2907 
2908 		if ((sym == gIOPublishNotification)
2909 		    || (sym == gIOFirstPublishNotification)) {
2910 			userMsgType = kIOServicePublishNotificationType;
2911 		} else if ((sym == gIOMatchedNotification)
2912 		    || (sym == gIOFirstMatchNotification)) {
2913 			userMsgType = kIOServiceMatchedNotificationType;
2914 		} else if ((sym == gIOTerminatedNotification)
2915 		    || (sym == gIOWillTerminateNotification)) {
2916 			userMsgType = kIOServiceTerminatedNotificationType;
2917 		} else {
2918 			userMsgType = kLastIOKitNotificationType;
2919 		}
2920 
2921 		userNotify = new IOServiceUserNotification;
2922 
2923 		if (userNotify && !userNotify->init( port, userMsgType,
2924 		    reference, referenceSize, client64)) {
2925 			userNotify->release();
2926 			userNotify = NULL;
2927 		}
2928 		if (!userNotify) {
2929 			continue;
2930 		}
2931 
2932 		notify = IOService::addMatchingNotification( sym, dict,
2933 		    &userNotify->_handler, userNotify );
2934 		if (notify) {
2935 			*notification = userNotify;
2936 			userNotify->setNotification( notify );
2937 			err = kIOReturnSuccess;
2938 		} else {
2939 			err = kIOReturnUnsupported;
2940 		}
2941 	} while (false);
2942 
2943 	if ((kIOReturnSuccess != err) && userNotify) {
2944 		userNotify->setNotification(NULL);
2945 		userNotify->invalidatePort();
2946 		userNotify->release();
2947 		userNotify = NULL;
2948 	}
2949 
2950 	if (sym) {
2951 		sym->release();
2952 	}
2953 	if (obj) {
2954 		obj->release();
2955 	}
2956 
2957 	return err;
2958 }
2959 
2960 
2961 /* Routine io_service_add_notification */
2962 kern_return_t
is_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2963 is_io_service_add_notification(
2964 	mach_port_t main_port,
2965 	io_name_t notification_type,
2966 	io_string_t matching,
2967 	mach_port_t port,
2968 	io_async_ref_t reference,
2969 	mach_msg_type_number_t referenceCnt,
2970 	io_object_t * notification )
2971 {
2972 	return kIOReturnUnsupported;
2973 }
2974 
2975 /* Routine io_service_add_notification_64 */
2976 kern_return_t
is_io_service_add_notification_64(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2977 is_io_service_add_notification_64(
2978 	mach_port_t main_port,
2979 	io_name_t notification_type,
2980 	io_string_t matching,
2981 	mach_port_t wake_port,
2982 	io_async_ref64_t reference,
2983 	mach_msg_type_number_t referenceCnt,
2984 	io_object_t *notification )
2985 {
2986 	return kIOReturnUnsupported;
2987 }
2988 
2989 /* Routine io_service_add_notification_bin */
2990 kern_return_t
is_io_service_add_notification_bin(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2991 is_io_service_add_notification_bin
2992 (
2993 	mach_port_t main_port,
2994 	io_name_t notification_type,
2995 	io_struct_inband_t matching,
2996 	mach_msg_type_number_t matchingCnt,
2997 	mach_port_t wake_port,
2998 	io_async_ref_t reference,
2999 	mach_msg_type_number_t referenceCnt,
3000 	io_object_t *notification)
3001 {
3002 	io_async_ref_t zreference;
3003 
3004 	if (referenceCnt > ASYNC_REF_COUNT) {
3005 		return kIOReturnBadArgument;
3006 	}
3007 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3008 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3009 
3010 	return internal_io_service_add_notification(main_port, notification_type,
3011 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3012 	           false, notification);
3013 }
3014 
3015 /* Routine io_service_add_notification_bin_64 */
3016 kern_return_t
is_io_service_add_notification_bin_64(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3017 is_io_service_add_notification_bin_64
3018 (
3019 	mach_port_t main_port,
3020 	io_name_t notification_type,
3021 	io_struct_inband_t matching,
3022 	mach_msg_type_number_t matchingCnt,
3023 	mach_port_t wake_port,
3024 	io_async_ref64_t reference,
3025 	mach_msg_type_number_t referenceCnt,
3026 	io_object_t *notification)
3027 {
3028 	io_async_ref64_t zreference;
3029 
3030 	if (referenceCnt > ASYNC_REF64_COUNT) {
3031 		return kIOReturnBadArgument;
3032 	}
3033 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3034 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3035 
3036 	return internal_io_service_add_notification(main_port, notification_type,
3037 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3038 	           true, notification);
3039 }
3040 
3041 static kern_return_t
internal_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,void * reference,vm_size_t referenceSize,bool client64,kern_return_t * result,io_object_t * notification)3042 internal_io_service_add_notification_ool(
3043 	mach_port_t main_port,
3044 	io_name_t notification_type,
3045 	io_buf_ptr_t matching,
3046 	mach_msg_type_number_t matchingCnt,
3047 	mach_port_t wake_port,
3048 	void * reference,
3049 	vm_size_t referenceSize,
3050 	bool client64,
3051 	kern_return_t *result,
3052 	io_object_t *notification )
3053 {
3054 	kern_return_t       kr;
3055 	vm_offset_t         data;
3056 	vm_map_offset_t     map_data;
3057 
3058 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
3059 	data = CAST_DOWN(vm_offset_t, map_data);
3060 
3061 	if (KERN_SUCCESS == kr) {
3062 		// must return success after vm_map_copyout() succeeds
3063 		// and mig will copy out objects on success
3064 		*notification = NULL;
3065 		*result = internal_io_service_add_notification( main_port, notification_type,
3066 		    (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
3067 		vm_deallocate( kernel_map, data, matchingCnt );
3068 	}
3069 
3070 	return kr;
3071 }
3072 
3073 /* Routine io_service_add_notification_ool */
3074 kern_return_t
is_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3075 is_io_service_add_notification_ool(
3076 	mach_port_t main_port,
3077 	io_name_t notification_type,
3078 	io_buf_ptr_t matching,
3079 	mach_msg_type_number_t matchingCnt,
3080 	mach_port_t wake_port,
3081 	io_async_ref_t reference,
3082 	mach_msg_type_number_t referenceCnt,
3083 	kern_return_t *result,
3084 	io_object_t *notification )
3085 {
3086 	io_async_ref_t zreference;
3087 
3088 	if (referenceCnt > ASYNC_REF_COUNT) {
3089 		return kIOReturnBadArgument;
3090 	}
3091 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3092 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3093 
3094 	return internal_io_service_add_notification_ool(main_port, notification_type,
3095 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3096 	           false, result, notification);
3097 }
3098 
3099 /* Routine io_service_add_notification_ool_64 */
3100 kern_return_t
is_io_service_add_notification_ool_64(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3101 is_io_service_add_notification_ool_64(
3102 	mach_port_t main_port,
3103 	io_name_t notification_type,
3104 	io_buf_ptr_t matching,
3105 	mach_msg_type_number_t matchingCnt,
3106 	mach_port_t wake_port,
3107 	io_async_ref64_t reference,
3108 	mach_msg_type_number_t referenceCnt,
3109 	kern_return_t *result,
3110 	io_object_t *notification )
3111 {
3112 	io_async_ref64_t zreference;
3113 
3114 	if (referenceCnt > ASYNC_REF64_COUNT) {
3115 		return kIOReturnBadArgument;
3116 	}
3117 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3118 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3119 
3120 	return internal_io_service_add_notification_ool(main_port, notification_type,
3121 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3122 	           true, result, notification);
3123 }
3124 
3125 /* Routine io_service_add_notification_old */
3126 kern_return_t
is_io_service_add_notification_old(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,natural_t ref,io_object_t * notification)3127 is_io_service_add_notification_old(
3128 	mach_port_t main_port,
3129 	io_name_t notification_type,
3130 	io_string_t matching,
3131 	mach_port_t port,
3132 	// for binary compatibility reasons, this must be natural_t for ILP32
3133 	natural_t ref,
3134 	io_object_t * notification )
3135 {
3136 	return is_io_service_add_notification( main_port, notification_type,
3137 	           matching, port, &ref, 1, notification );
3138 }
3139 
3140 
3141 static kern_return_t
internal_io_service_add_interest_notification(io_object_t _service,io_name_t type_of_interest,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)3142 internal_io_service_add_interest_notification(
3143 	io_object_t _service,
3144 	io_name_t type_of_interest,
3145 	mach_port_t port,
3146 	void * reference,
3147 	vm_size_t referenceSize,
3148 	bool client64,
3149 	io_object_t * notification )
3150 {
3151 	IOServiceMessageUserNotification *  userNotify = NULL;
3152 	IONotifier *                        notify = NULL;
3153 	const OSSymbol *                    sym;
3154 	IOReturn                            err;
3155 
3156 	CHECK( IOService, _service, service );
3157 
3158 	err = kIOReturnNoResources;
3159 	if ((sym = OSSymbol::withCString( type_of_interest ))) {
3160 		do {
3161 			userNotify = new IOServiceMessageUserNotification;
3162 
3163 			if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
3164 			    reference, referenceSize, client64 )) {
3165 				userNotify->release();
3166 				userNotify = NULL;
3167 			}
3168 			if (!userNotify) {
3169 				continue;
3170 			}
3171 
3172 			notify = service->registerInterest( sym,
3173 			    &userNotify->_handler, userNotify );
3174 			if (notify) {
3175 				*notification = userNotify;
3176 				userNotify->setNotification( notify );
3177 				err = kIOReturnSuccess;
3178 			} else {
3179 				err = kIOReturnUnsupported;
3180 			}
3181 		} while (false);
3182 
3183 		sym->release();
3184 	}
3185 
3186 	if ((kIOReturnSuccess != err) && userNotify) {
3187 		userNotify->setNotification(NULL);
3188 		userNotify->invalidatePort();
3189 		userNotify->release();
3190 		userNotify = NULL;
3191 	}
3192 
3193 	return err;
3194 }
3195 
3196 /* Routine io_service_add_message_notification */
3197 kern_return_t
is_io_service_add_interest_notification(io_object_t service,io_name_t type_of_interest,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3198 is_io_service_add_interest_notification(
3199 	io_object_t service,
3200 	io_name_t type_of_interest,
3201 	mach_port_t port,
3202 	io_async_ref_t reference,
3203 	mach_msg_type_number_t referenceCnt,
3204 	io_object_t * notification )
3205 {
3206 	io_async_ref_t zreference;
3207 
3208 	if (referenceCnt > ASYNC_REF_COUNT) {
3209 		return kIOReturnBadArgument;
3210 	}
3211 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3212 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3213 
3214 	return internal_io_service_add_interest_notification(service, type_of_interest,
3215 	           port, &zreference[0], sizeof(io_async_ref_t), false, notification);
3216 }
3217 
3218 /* Routine io_service_add_interest_notification_64 */
3219 kern_return_t
is_io_service_add_interest_notification_64(io_object_t service,io_name_t type_of_interest,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3220 is_io_service_add_interest_notification_64(
3221 	io_object_t service,
3222 	io_name_t type_of_interest,
3223 	mach_port_t wake_port,
3224 	io_async_ref64_t reference,
3225 	mach_msg_type_number_t referenceCnt,
3226 	io_object_t *notification )
3227 {
3228 	io_async_ref64_t zreference;
3229 
3230 	if (referenceCnt > ASYNC_REF64_COUNT) {
3231 		return kIOReturnBadArgument;
3232 	}
3233 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3234 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3235 
3236 	return internal_io_service_add_interest_notification(service, type_of_interest,
3237 	           wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
3238 }
3239 
3240 
3241 /* Routine io_service_acknowledge_notification */
3242 kern_return_t
is_io_service_acknowledge_notification(io_object_t _service,natural_t notify_ref,natural_t response)3243 is_io_service_acknowledge_notification(
3244 	io_object_t _service,
3245 	natural_t notify_ref,
3246 	natural_t response )
3247 {
3248 	CHECK( IOService, _service, service );
3249 
3250 	return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3251 	           (IOOptionBits) response );
3252 }
3253 
3254 /* Routine io_connect_get_semaphore */
3255 kern_return_t
is_io_connect_get_notification_semaphore(io_connect_t connection,natural_t notification_type,semaphore_t * semaphore)3256 is_io_connect_get_notification_semaphore(
3257 	io_connect_t connection,
3258 	natural_t notification_type,
3259 	semaphore_t *semaphore )
3260 {
3261 	IOReturn ret;
3262 	CHECK( IOUserClient, connection, client );
3263 
3264 	IOStatisticsClientCall();
3265 	client->ipcEnter(kIPCLockWrite);
3266 	ret = client->getNotificationSemaphore((UInt32) notification_type,
3267 	    semaphore );
3268 	client->ipcExit(kIPCLockWrite);
3269 
3270 	return ret;
3271 }
3272 
3273 /* Routine io_registry_get_root_entry */
3274 kern_return_t
is_io_registry_get_root_entry(mach_port_t main_port,io_object_t * root)3275 is_io_registry_get_root_entry(
3276 	mach_port_t main_port,
3277 	io_object_t *root )
3278 {
3279 	IORegistryEntry *   entry;
3280 
3281 	if (main_port != main_device_port) {
3282 		return kIOReturnNotPrivileged;
3283 	}
3284 
3285 	entry = IORegistryEntry::getRegistryRoot();
3286 	if (entry) {
3287 		entry->retain();
3288 	}
3289 	*root = entry;
3290 
3291 	return kIOReturnSuccess;
3292 }
3293 
3294 /* Routine io_registry_create_iterator */
3295 kern_return_t
is_io_registry_create_iterator(mach_port_t main_port,io_name_t plane,uint32_t options,io_object_t * iterator)3296 is_io_registry_create_iterator(
3297 	mach_port_t main_port,
3298 	io_name_t plane,
3299 	uint32_t options,
3300 	io_object_t *iterator )
3301 {
3302 	if (main_port != main_device_port) {
3303 		return kIOReturnNotPrivileged;
3304 	}
3305 
3306 	*iterator = IOUserIterator::withIterator(
3307 		IORegistryIterator::iterateOver(
3308 			IORegistryEntry::getPlane( plane ), options ));
3309 
3310 	return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3311 }
3312 
3313 /* Routine io_registry_entry_create_iterator */
3314 kern_return_t
is_io_registry_entry_create_iterator(io_object_t registry_entry,io_name_t plane,uint32_t options,io_object_t * iterator)3315 is_io_registry_entry_create_iterator(
3316 	io_object_t registry_entry,
3317 	io_name_t plane,
3318 	uint32_t options,
3319 	io_object_t *iterator )
3320 {
3321 	CHECK( IORegistryEntry, registry_entry, entry );
3322 
3323 	*iterator = IOUserIterator::withIterator(
3324 		IORegistryIterator::iterateOver( entry,
3325 		IORegistryEntry::getPlane( plane ), options ));
3326 
3327 	return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3328 }
3329 
3330 /* Routine io_registry_iterator_enter */
3331 kern_return_t
is_io_registry_iterator_enter_entry(io_object_t iterator)3332 is_io_registry_iterator_enter_entry(
3333 	io_object_t iterator )
3334 {
3335 	CHECKLOCKED( IORegistryIterator, iterator, iter );
3336 
3337 	IOLockLock(&oIter->lock);
3338 	iter->enterEntry();
3339 	IOLockUnlock(&oIter->lock);
3340 
3341 	return kIOReturnSuccess;
3342 }
3343 
3344 /* Routine io_registry_iterator_exit */
3345 kern_return_t
is_io_registry_iterator_exit_entry(io_object_t iterator)3346 is_io_registry_iterator_exit_entry(
3347 	io_object_t iterator )
3348 {
3349 	bool        didIt;
3350 
3351 	CHECKLOCKED( IORegistryIterator, iterator, iter );
3352 
3353 	IOLockLock(&oIter->lock);
3354 	didIt = iter->exitEntry();
3355 	IOLockUnlock(&oIter->lock);
3356 
3357 	return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3358 }
3359 
3360 /* Routine io_registry_entry_from_path */
3361 kern_return_t
is_io_registry_entry_from_path(mach_port_t main_port,io_string_t path,io_object_t * registry_entry)3362 is_io_registry_entry_from_path(
3363 	mach_port_t main_port,
3364 	io_string_t path,
3365 	io_object_t *registry_entry )
3366 {
3367 	IORegistryEntry *   entry;
3368 
3369 	if (main_port != main_device_port) {
3370 		return kIOReturnNotPrivileged;
3371 	}
3372 
3373 	entry = IORegistryEntry::fromPath( path );
3374 
3375 	if (!entry && IOTaskRegistryCompatibility(current_task())) {
3376 		OSDictionary * matching;
3377 		const OSObject * objects[2] = { kOSBooleanTrue, NULL };
3378 		const OSSymbol * keys[2]    = { gIOCompatibilityMatchKey, gIOPathMatchKey };
3379 
3380 		objects[1] = OSString::withCStringNoCopy(path);
3381 		matching = OSDictionary::withObjects(objects, keys, 2, 2);
3382 		if (matching) {
3383 			entry = IOService::copyMatchingService(matching);
3384 		}
3385 		OSSafeReleaseNULL(matching);
3386 		OSSafeReleaseNULL(objects[1]);
3387 	}
3388 
3389 	*registry_entry = entry;
3390 
3391 	return kIOReturnSuccess;
3392 }
3393 
3394 
3395 /* Routine io_registry_entry_from_path */
3396 kern_return_t
is_io_registry_entry_from_path_ool(mach_port_t main_port,io_string_inband_t path,io_buf_ptr_t path_ool,mach_msg_type_number_t path_oolCnt,kern_return_t * result,io_object_t * registry_entry)3397 is_io_registry_entry_from_path_ool(
3398 	mach_port_t main_port,
3399 	io_string_inband_t path,
3400 	io_buf_ptr_t path_ool,
3401 	mach_msg_type_number_t path_oolCnt,
3402 	kern_return_t *result,
3403 	io_object_t *registry_entry)
3404 {
3405 	IORegistryEntry *   entry;
3406 	vm_map_offset_t     map_data;
3407 	const char *        cpath;
3408 	IOReturn            res;
3409 	kern_return_t       err;
3410 
3411 	if (main_port != main_device_port) {
3412 		return kIOReturnNotPrivileged;
3413 	}
3414 
3415 	map_data = 0;
3416 	entry    = NULL;
3417 	res = err = KERN_SUCCESS;
3418 	if (path[0]) {
3419 		cpath = path;
3420 	} else {
3421 		if (!path_oolCnt) {
3422 			return kIOReturnBadArgument;
3423 		}
3424 		if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3425 			return kIOReturnMessageTooLarge;
3426 		}
3427 
3428 		err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3429 		if (KERN_SUCCESS == err) {
3430 			// must return success to mig after vm_map_copyout() succeeds, so result is actual
3431 			cpath = CAST_DOWN(const char *, map_data);
3432 			if (cpath[path_oolCnt - 1]) {
3433 				res = kIOReturnBadArgument;
3434 			}
3435 		}
3436 	}
3437 
3438 	if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3439 		entry = IORegistryEntry::fromPath(cpath);
3440 		res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3441 	}
3442 
3443 	if (map_data) {
3444 		vm_deallocate(kernel_map, map_data, path_oolCnt);
3445 	}
3446 
3447 	if (KERN_SUCCESS != err) {
3448 		res = err;
3449 	}
3450 	*registry_entry = entry;
3451 	*result = res;
3452 
3453 	return err;
3454 }
3455 
3456 
3457 /* Routine io_registry_entry_in_plane */
3458 kern_return_t
is_io_registry_entry_in_plane(io_object_t registry_entry,io_name_t plane,boolean_t * inPlane)3459 is_io_registry_entry_in_plane(
3460 	io_object_t registry_entry,
3461 	io_name_t plane,
3462 	boolean_t *inPlane )
3463 {
3464 	CHECK( IORegistryEntry, registry_entry, entry );
3465 
3466 	*inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3467 
3468 	return kIOReturnSuccess;
3469 }
3470 
3471 
3472 /* Routine io_registry_entry_get_path */
3473 kern_return_t
is_io_registry_entry_get_path(io_object_t registry_entry,io_name_t plane,io_string_t path)3474 is_io_registry_entry_get_path(
3475 	io_object_t registry_entry,
3476 	io_name_t plane,
3477 	io_string_t path )
3478 {
3479 	int         length;
3480 	CHECK( IORegistryEntry, registry_entry, entry );
3481 
3482 	length = sizeof(io_string_t);
3483 	if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3484 		return kIOReturnSuccess;
3485 	} else {
3486 		return kIOReturnBadArgument;
3487 	}
3488 }
3489 
3490 /* Routine io_registry_entry_get_path */
3491 kern_return_t
is_io_registry_entry_get_path_ool(io_object_t registry_entry,io_name_t plane,io_string_inband_t path,io_buf_ptr_t * path_ool,mach_msg_type_number_t * path_oolCnt)3492 is_io_registry_entry_get_path_ool(
3493 	io_object_t registry_entry,
3494 	io_name_t plane,
3495 	io_string_inband_t path,
3496 	io_buf_ptr_t *path_ool,
3497 	mach_msg_type_number_t *path_oolCnt)
3498 {
3499 	enum   { kMaxPath = 16384 };
3500 	IOReturn err;
3501 	int      length;
3502 	char   * buf;
3503 
3504 	CHECK( IORegistryEntry, registry_entry, entry );
3505 
3506 	*path_ool    = NULL;
3507 	*path_oolCnt = 0;
3508 	length = sizeof(io_string_inband_t);
3509 	if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3510 		err = kIOReturnSuccess;
3511 	} else {
3512 		length = kMaxPath;
3513 		buf = IONewData(char, length);
3514 		if (!buf) {
3515 			err = kIOReturnNoMemory;
3516 		} else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3517 			err = kIOReturnError;
3518 		} else {
3519 			*path_oolCnt = length;
3520 			err = copyoutkdata(buf, length, path_ool);
3521 		}
3522 		if (buf) {
3523 			IODeleteData(buf, char, kMaxPath);
3524 		}
3525 	}
3526 
3527 	return err;
3528 }
3529 
3530 
3531 /* Routine io_registry_entry_get_name */
3532 kern_return_t
is_io_registry_entry_get_name(io_object_t registry_entry,io_name_t name)3533 is_io_registry_entry_get_name(
3534 	io_object_t registry_entry,
3535 	io_name_t name )
3536 {
3537 	CHECK( IORegistryEntry, registry_entry, entry );
3538 
3539 	strncpy( name, entry->getName(), sizeof(io_name_t));
3540 
3541 	return kIOReturnSuccess;
3542 }
3543 
3544 /* Routine io_registry_entry_get_name_in_plane */
3545 kern_return_t
is_io_registry_entry_get_name_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t name)3546 is_io_registry_entry_get_name_in_plane(
3547 	io_object_t registry_entry,
3548 	io_name_t planeName,
3549 	io_name_t name )
3550 {
3551 	const IORegistryPlane * plane;
3552 	CHECK( IORegistryEntry, registry_entry, entry );
3553 
3554 	if (planeName[0]) {
3555 		plane = IORegistryEntry::getPlane( planeName );
3556 	} else {
3557 		plane = NULL;
3558 	}
3559 
3560 	strncpy( name, entry->getName( plane), sizeof(io_name_t));
3561 
3562 	return kIOReturnSuccess;
3563 }
3564 
3565 /* Routine io_registry_entry_get_location_in_plane */
3566 kern_return_t
is_io_registry_entry_get_location_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t location)3567 is_io_registry_entry_get_location_in_plane(
3568 	io_object_t registry_entry,
3569 	io_name_t planeName,
3570 	io_name_t location )
3571 {
3572 	const IORegistryPlane * plane;
3573 	CHECK( IORegistryEntry, registry_entry, entry );
3574 
3575 	if (planeName[0]) {
3576 		plane = IORegistryEntry::getPlane( planeName );
3577 	} else {
3578 		plane = NULL;
3579 	}
3580 
3581 	const char * cstr = entry->getLocation( plane );
3582 
3583 	if (cstr) {
3584 		strncpy( location, cstr, sizeof(io_name_t));
3585 		return kIOReturnSuccess;
3586 	} else {
3587 		return kIOReturnNotFound;
3588 	}
3589 }
3590 
3591 /* Routine io_registry_entry_get_registry_entry_id */
3592 kern_return_t
is_io_registry_entry_get_registry_entry_id(io_object_t registry_entry,uint64_t * entry_id)3593 is_io_registry_entry_get_registry_entry_id(
3594 	io_object_t registry_entry,
3595 	uint64_t *entry_id )
3596 {
3597 	CHECK( IORegistryEntry, registry_entry, entry );
3598 
3599 	*entry_id = entry->getRegistryEntryID();
3600 
3601 	return kIOReturnSuccess;
3602 }
3603 
3604 
3605 static OSObject *
IOCopyPropertyCompatible(IORegistryEntry * regEntry,const char * name)3606 IOCopyPropertyCompatible(IORegistryEntry * regEntry, const char * name)
3607 {
3608 	OSObject     * obj;
3609 	OSObject     * compatProperties;
3610 	OSDictionary * props;
3611 
3612 	obj = regEntry->copyProperty(name);
3613 	if (obj) {
3614 		return obj;
3615 	}
3616 
3617 	compatProperties = regEntry->copyProperty(gIOUserServicePropertiesKey);
3618 	if (!compatProperties
3619 	    && IOTaskRegistryCompatibility(current_task())) {
3620 		compatProperties = regEntry->copyProperty(gIOCompatibilityPropertiesKey);
3621 	}
3622 	if (compatProperties) {
3623 		props = OSDynamicCast(OSDictionary, compatProperties);
3624 		if (props) {
3625 			obj = props->getObject(name);
3626 			if (obj) {
3627 				obj->retain();
3628 			}
3629 		}
3630 		compatProperties->release();
3631 	}
3632 
3633 	return obj;
3634 }
3635 
3636 /* Routine io_registry_entry_get_property */
3637 kern_return_t
is_io_registry_entry_get_property_bytes(io_object_t registry_entry,io_name_t property_name,io_struct_inband_t buf,mach_msg_type_number_t * dataCnt)3638 is_io_registry_entry_get_property_bytes(
3639 	io_object_t registry_entry,
3640 	io_name_t property_name,
3641 	io_struct_inband_t buf,
3642 	mach_msg_type_number_t *dataCnt )
3643 {
3644 	OSObject    *       obj;
3645 	OSData      *       data;
3646 	OSString    *       str;
3647 	OSBoolean   *       boo;
3648 	OSNumber    *       off;
3649 	UInt64              offsetBytes;
3650 	unsigned int        len = 0;
3651 	const void *        bytes = NULL;
3652 	IOReturn            ret = kIOReturnSuccess;
3653 
3654 	CHECK( IORegistryEntry, registry_entry, entry );
3655 
3656 #if CONFIG_MACF
3657 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3658 		return kIOReturnNotPermitted;
3659 	}
3660 #endif
3661 
3662 	obj = IOCopyPropertyCompatible(entry, property_name);
3663 	if (!obj) {
3664 		return kIOReturnNoResources;
3665 	}
3666 
3667 	// One day OSData will be a common container base class
3668 	// until then...
3669 	if ((data = OSDynamicCast( OSData, obj ))) {
3670 		len = data->getLength();
3671 		bytes = data->getBytesNoCopy();
3672 		if (!data->isSerializable()) {
3673 			len = 0;
3674 		}
3675 	} else if ((str = OSDynamicCast( OSString, obj ))) {
3676 		len = str->getLength() + 1;
3677 		bytes = str->getCStringNoCopy();
3678 	} else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3679 		len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3680 		bytes = boo->isTrue() ? "Yes" : "No";
3681 	} else if ((off = OSDynamicCast( OSNumber, obj ))) {
3682 		offsetBytes = off->unsigned64BitValue();
3683 		len = off->numberOfBytes();
3684 		if (len > sizeof(offsetBytes)) {
3685 			len = sizeof(offsetBytes);
3686 		}
3687 		bytes = &offsetBytes;
3688 #ifdef __BIG_ENDIAN__
3689 		bytes = (const void *)
3690 		    (((UInt32) bytes) + (sizeof(UInt64) - len));
3691 #endif
3692 	} else {
3693 		ret = kIOReturnBadArgument;
3694 	}
3695 
3696 	if (bytes) {
3697 		if (*dataCnt < len) {
3698 			ret = kIOReturnIPCError;
3699 		} else {
3700 			*dataCnt = len;
3701 			bcopy( bytes, buf, len );
3702 		}
3703 	}
3704 	obj->release();
3705 
3706 	return ret;
3707 }
3708 
3709 
3710 /* Routine io_registry_entry_get_property */
3711 kern_return_t
is_io_registry_entry_get_property(io_object_t registry_entry,io_name_t property_name,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3712 is_io_registry_entry_get_property(
3713 	io_object_t registry_entry,
3714 	io_name_t property_name,
3715 	io_buf_ptr_t *properties,
3716 	mach_msg_type_number_t *propertiesCnt )
3717 {
3718 	kern_return_t       err;
3719 	unsigned int        len;
3720 	OSObject *          obj;
3721 
3722 	CHECK( IORegistryEntry, registry_entry, entry );
3723 
3724 #if CONFIG_MACF
3725 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3726 		return kIOReturnNotPermitted;
3727 	}
3728 #endif
3729 
3730 	obj = IOCopyPropertyCompatible(entry, property_name);
3731 	if (!obj) {
3732 		return kIOReturnNotFound;
3733 	}
3734 
3735 	OSSerialize * s = OSSerialize::withCapacity(4096);
3736 	if (!s) {
3737 		obj->release();
3738 		return kIOReturnNoMemory;
3739 	}
3740 
3741 	if (obj->serialize( s )) {
3742 		len = s->getLength();
3743 		*propertiesCnt = len;
3744 		err = copyoutkdata( s->text(), len, properties );
3745 	} else {
3746 		err = kIOReturnUnsupported;
3747 	}
3748 
3749 	s->release();
3750 	obj->release();
3751 
3752 	return err;
3753 }
3754 
3755 /* Routine io_registry_entry_get_property_recursively */
3756 kern_return_t
is_io_registry_entry_get_property_recursively(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3757 is_io_registry_entry_get_property_recursively(
3758 	io_object_t registry_entry,
3759 	io_name_t plane,
3760 	io_name_t property_name,
3761 	uint32_t options,
3762 	io_buf_ptr_t *properties,
3763 	mach_msg_type_number_t *propertiesCnt )
3764 {
3765 	kern_return_t       err;
3766 	unsigned int        len;
3767 	OSObject *          obj;
3768 
3769 	CHECK( IORegistryEntry, registry_entry, entry );
3770 
3771 #if CONFIG_MACF
3772 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3773 		return kIOReturnNotPermitted;
3774 	}
3775 #endif
3776 
3777 	obj = entry->copyProperty( property_name,
3778 	    IORegistryEntry::getPlane( plane ), options );
3779 	if (!obj) {
3780 		return kIOReturnNotFound;
3781 	}
3782 
3783 	OSSerialize * s = OSSerialize::withCapacity(4096);
3784 	if (!s) {
3785 		obj->release();
3786 		return kIOReturnNoMemory;
3787 	}
3788 
3789 	if (obj->serialize( s )) {
3790 		len = s->getLength();
3791 		*propertiesCnt = len;
3792 		err = copyoutkdata( s->text(), len, properties );
3793 	} else {
3794 		err = kIOReturnUnsupported;
3795 	}
3796 
3797 	s->release();
3798 	obj->release();
3799 
3800 	return err;
3801 }
3802 
3803 /* Routine io_registry_entry_get_properties */
3804 kern_return_t
is_io_registry_entry_get_properties(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3805 is_io_registry_entry_get_properties(
3806 	io_object_t registry_entry,
3807 	io_buf_ptr_t *properties,
3808 	mach_msg_type_number_t *propertiesCnt )
3809 {
3810 	return kIOReturnUnsupported;
3811 }
3812 
3813 #if CONFIG_MACF
3814 
3815 struct GetPropertiesEditorRef {
3816 	kauth_cred_t      cred;
3817 	IORegistryEntry * entry;
3818 	OSCollection    * root;
3819 };
3820 
3821 static const LIBKERN_RETURNS_RETAINED OSMetaClassBase *
GetPropertiesEditor(void * reference,OSSerialize * s,OSCollection * container,const OSSymbol * name,const OSMetaClassBase * value)3822 GetPropertiesEditor(void                  * reference,
3823     OSSerialize           * s,
3824     OSCollection          * container,
3825     const OSSymbol        * name,
3826     const OSMetaClassBase * value)
3827 {
3828 	GetPropertiesEditorRef * ref = (typeof(ref))reference;
3829 
3830 	if (!ref->root) {
3831 		ref->root = container;
3832 	}
3833 	if (ref->root == container) {
3834 		if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3835 			value = NULL;
3836 		}
3837 	}
3838 	if (value) {
3839 		value->retain();
3840 	}
3841 	return value;
3842 }
3843 
3844 #endif /* CONFIG_MACF */
3845 
3846 /* Routine io_registry_entry_get_properties_bin_buf */
3847 kern_return_t
is_io_registry_entry_get_properties_bin_buf(io_object_t registry_entry,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3848 is_io_registry_entry_get_properties_bin_buf(
3849 	io_object_t registry_entry,
3850 	mach_vm_address_t buf,
3851 	mach_vm_size_t *bufsize,
3852 	io_buf_ptr_t *properties,
3853 	mach_msg_type_number_t *propertiesCnt)
3854 {
3855 	kern_return_t          err = kIOReturnSuccess;
3856 	unsigned int           len;
3857 	OSObject             * compatProperties;
3858 	OSSerialize          * s;
3859 	OSSerialize::Editor    editor = NULL;
3860 	void                 * editRef = NULL;
3861 
3862 	CHECK(IORegistryEntry, registry_entry, entry);
3863 
3864 #if CONFIG_MACF
3865 	GetPropertiesEditorRef ref;
3866 	if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3867 		editor    = &GetPropertiesEditor;
3868 		editRef   = &ref;
3869 		ref.cred  = kauth_cred_get();
3870 		ref.entry = entry;
3871 		ref.root  = NULL;
3872 	}
3873 #endif
3874 
3875 	s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3876 	if (!s) {
3877 		return kIOReturnNoMemory;
3878 	}
3879 
3880 
3881 	compatProperties = entry->copyProperty(gIOUserServicePropertiesKey);
3882 	if (!compatProperties
3883 	    && IOTaskRegistryCompatibility(current_task())) {
3884 		compatProperties = entry->copyProperty(gIOCompatibilityPropertiesKey);
3885 	}
3886 
3887 	if (compatProperties) {
3888 		OSDictionary * dict;
3889 
3890 		dict = entry->dictionaryWithProperties();
3891 		if (!dict) {
3892 			err = kIOReturnNoMemory;
3893 		} else {
3894 			dict->removeObject(gIOUserServicePropertiesKey);
3895 			dict->removeObject(gIOCompatibilityPropertiesKey);
3896 			dict->merge(OSDynamicCast(OSDictionary, compatProperties));
3897 			if (!dict->serialize(s)) {
3898 				err = kIOReturnUnsupported;
3899 			}
3900 			dict->release();
3901 		}
3902 		compatProperties->release();
3903 	} else if (!entry->serializeProperties(s)) {
3904 		err = kIOReturnUnsupported;
3905 	}
3906 
3907 	if (kIOReturnSuccess == err) {
3908 		len = s->getLength();
3909 		if (buf && bufsize && len <= *bufsize) {
3910 			*bufsize = len;
3911 			*propertiesCnt = 0;
3912 			*properties = nullptr;
3913 			if (copyout(s->text(), buf, len)) {
3914 				err = kIOReturnVMError;
3915 			} else {
3916 				err = kIOReturnSuccess;
3917 			}
3918 		} else {
3919 			if (bufsize) {
3920 				*bufsize = 0;
3921 			}
3922 			*propertiesCnt = len;
3923 			err = copyoutkdata( s->text(), len, properties );
3924 		}
3925 	}
3926 	s->release();
3927 
3928 	return err;
3929 }
3930 
3931 /* Routine io_registry_entry_get_properties_bin */
3932 kern_return_t
is_io_registry_entry_get_properties_bin(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3933 is_io_registry_entry_get_properties_bin(
3934 	io_object_t registry_entry,
3935 	io_buf_ptr_t *properties,
3936 	mach_msg_type_number_t *propertiesCnt)
3937 {
3938 	return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3939 	           0, NULL, properties, propertiesCnt);
3940 }
3941 
3942 /* Routine io_registry_entry_get_property_bin_buf */
3943 kern_return_t
is_io_registry_entry_get_property_bin_buf(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3944 is_io_registry_entry_get_property_bin_buf(
3945 	io_object_t registry_entry,
3946 	io_name_t plane,
3947 	io_name_t property_name,
3948 	uint32_t options,
3949 	mach_vm_address_t buf,
3950 	mach_vm_size_t *bufsize,
3951 	io_buf_ptr_t *properties,
3952 	mach_msg_type_number_t *propertiesCnt )
3953 {
3954 	kern_return_t       err;
3955 	unsigned int        len;
3956 	OSObject *          obj;
3957 	const OSSymbol *    sym;
3958 
3959 	CHECK( IORegistryEntry, registry_entry, entry );
3960 
3961 #if CONFIG_MACF
3962 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3963 		return kIOReturnNotPermitted;
3964 	}
3965 #endif
3966 
3967 	sym = OSSymbol::withCString(property_name);
3968 	if (!sym) {
3969 		return kIOReturnNoMemory;
3970 	}
3971 
3972 	err = kIOReturnNotFound;
3973 	if (gIORegistryEntryPropertyKeysKey == sym) {
3974 		obj = entry->copyPropertyKeys();
3975 	} else {
3976 		if ((kIORegistryIterateRecursively & options) && plane[0]) {
3977 			obj = IOCopyPropertyCompatible(entry, property_name);
3978 			if (obj == NULL) {
3979 				IORegistryIterator * iter = IORegistryIterator::iterateOver(entry, IORegistryEntry::getPlane(plane), options);
3980 				if (iter) {
3981 					while ((NULL == obj) && (entry = iter->getNextObject())) {
3982 						OSObject * currentObj = IOCopyPropertyCompatible(entry, property_name);
3983 #if CONFIG_MACF
3984 						if (currentObj != NULL && 0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3985 							// Record that MAC hook blocked this entry and property, and continue to next entry
3986 							err = kIOReturnNotPermitted;
3987 							OSSafeReleaseNULL(currentObj);
3988 							continue;
3989 						}
3990 #endif
3991 						obj = currentObj;
3992 					}
3993 					iter->release();
3994 				}
3995 			}
3996 		} else {
3997 			obj = IOCopyPropertyCompatible(entry, property_name);
3998 		}
3999 		if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
4000 			entry->removeProperty(sym);
4001 		}
4002 	}
4003 
4004 	sym->release();
4005 	if (!obj) {
4006 		return err;
4007 	}
4008 
4009 	OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
4010 	if (!s) {
4011 		obj->release();
4012 		return kIOReturnNoMemory;
4013 	}
4014 
4015 	if (obj->serialize( s )) {
4016 		len = s->getLength();
4017 		if (buf && bufsize && len <= *bufsize) {
4018 			*bufsize = len;
4019 			*propertiesCnt = 0;
4020 			*properties = nullptr;
4021 			if (copyout(s->text(), buf, len)) {
4022 				err = kIOReturnVMError;
4023 			} else {
4024 				err = kIOReturnSuccess;
4025 			}
4026 		} else {
4027 			if (bufsize) {
4028 				*bufsize = 0;
4029 			}
4030 			*propertiesCnt = len;
4031 			err = copyoutkdata( s->text(), len, properties );
4032 		}
4033 	} else {
4034 		err = kIOReturnUnsupported;
4035 	}
4036 
4037 	s->release();
4038 	obj->release();
4039 
4040 	return err;
4041 }
4042 
4043 /* Routine io_registry_entry_get_property_bin */
4044 kern_return_t
is_io_registry_entry_get_property_bin(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)4045 is_io_registry_entry_get_property_bin(
4046 	io_object_t registry_entry,
4047 	io_name_t plane,
4048 	io_name_t property_name,
4049 	uint32_t options,
4050 	io_buf_ptr_t *properties,
4051 	mach_msg_type_number_t *propertiesCnt )
4052 {
4053 	return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
4054 	           property_name, options, 0, NULL, properties, propertiesCnt);
4055 }
4056 
4057 
4058 /* Routine io_registry_entry_set_properties */
4059 kern_return_t
is_io_registry_entry_set_properties(io_object_t registry_entry,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4060 is_io_registry_entry_set_properties
4061 (
4062 	io_object_t registry_entry,
4063 	io_buf_ptr_t properties,
4064 	mach_msg_type_number_t propertiesCnt,
4065 	kern_return_t * result)
4066 {
4067 	OSObject *          obj;
4068 	kern_return_t       err;
4069 	IOReturn            res;
4070 	vm_offset_t         data;
4071 	vm_map_offset_t     map_data;
4072 
4073 	CHECK( IORegistryEntry, registry_entry, entry );
4074 
4075 	if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
4076 		return kIOReturnMessageTooLarge;
4077 	}
4078 
4079 	err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4080 	data = CAST_DOWN(vm_offset_t, map_data);
4081 
4082 	if (KERN_SUCCESS == err) {
4083 		FAKE_STACK_FRAME(entry->getMetaClass());
4084 
4085 		// must return success after vm_map_copyout() succeeds
4086 		obj = OSUnserializeXML((const char *) data, propertiesCnt );
4087 		vm_deallocate( kernel_map, data, propertiesCnt );
4088 
4089 		if (!obj) {
4090 			res = kIOReturnBadArgument;
4091 		}
4092 #if CONFIG_MACF
4093 		else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
4094 		    registry_entry, obj)) {
4095 			res = kIOReturnNotPermitted;
4096 		}
4097 #endif
4098 		else {
4099 			IOService    * service = OSDynamicCast(IOService, entry);
4100 			OSDictionary * props = OSDynamicCast(OSDictionary, obj);
4101 			OSObject     * allowable = entry->copyProperty(gIORegistryEntryAllowableSetPropertiesKey);
4102 			OSArray      * allowableArray;
4103 
4104 			if (!allowable) {
4105 				res = kIOReturnSuccess;
4106 			} else {
4107 				if (!props) {
4108 					res = kIOReturnNotPermitted;
4109 				} else if (!(allowableArray = OSDynamicCast(OSArray, allowable))) {
4110 					res = kIOReturnNotPermitted;
4111 				} else {
4112 					bool allFound __block, found __block;
4113 
4114 					allFound = true;
4115 					props->iterateObjects(^(const OSSymbol * key, OSObject * value) {
4116 							found = false;
4117 							for (unsigned int idx = 0; !found; idx++) {
4118 							        OSObject * next = allowableArray->getObject(idx);
4119 							        if (!next) {
4120 							                break;
4121 								}
4122 							        found = next->isEqualTo(key);
4123 							}
4124 							allFound &= found;
4125 							if (!found) {
4126 							        IOLog("IORegistryEntrySetProperties(%s, %s) disallowed due to " kIORegistryEntryAllowableSetPropertiesKey "\n",
4127 							        entry->getName(), key->getCStringNoCopy());
4128 							}
4129 							return !allFound;
4130 						});
4131 					res =  allFound ? kIOReturnSuccess : kIOReturnBadArgument;
4132 				}
4133 			}
4134 			if (kIOReturnSuccess == res) {
4135 				IOUserClient *
4136 				    client = OSDynamicCast(IOUserClient, entry);
4137 
4138 				if (client && client->defaultLockingSetProperties) {
4139 					IORWLockWrite(&client->lock);
4140 				}
4141 
4142 				if (!client && (kOSBooleanTrue == entry->getProperty(gIORegistryEntryDefaultLockingSetPropertiesKey))) {
4143 					res = entry->runPropertyActionBlock(^IOReturn (void) {
4144 							return entry->setProperties( obj );
4145 						});
4146 				} else {
4147 					res = entry->setProperties( obj );
4148 				}
4149 
4150 				if (client && client->defaultLockingSetProperties) {
4151 					IORWLockUnlock(&client->lock);
4152 				}
4153 				if (service && props && service->hasUserServer()) {
4154 					res = service->UserSetProperties(props);
4155 				}
4156 			}
4157 			OSSafeReleaseNULL(allowable);
4158 		}
4159 		if (obj) {
4160 			obj->release();
4161 		}
4162 
4163 		FAKE_STACK_FRAME_END();
4164 	} else {
4165 		res = err;
4166 	}
4167 
4168 	*result = res;
4169 	return err;
4170 }
4171 
4172 /* Routine io_registry_entry_get_child_iterator */
4173 kern_return_t
is_io_registry_entry_get_child_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4174 is_io_registry_entry_get_child_iterator(
4175 	io_object_t registry_entry,
4176 	io_name_t plane,
4177 	io_object_t *iterator )
4178 {
4179 	CHECK( IORegistryEntry, registry_entry, entry );
4180 
4181 	*iterator = IOUserIterator::withIterator(entry->getChildIterator(
4182 		    IORegistryEntry::getPlane( plane )));
4183 
4184 	return kIOReturnSuccess;
4185 }
4186 
4187 /* Routine io_registry_entry_get_parent_iterator */
4188 kern_return_t
is_io_registry_entry_get_parent_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4189 is_io_registry_entry_get_parent_iterator(
4190 	io_object_t registry_entry,
4191 	io_name_t plane,
4192 	io_object_t *iterator)
4193 {
4194 	CHECK( IORegistryEntry, registry_entry, entry );
4195 
4196 	*iterator = IOUserIterator::withIterator(entry->getParentIterator(
4197 		    IORegistryEntry::getPlane( plane )));
4198 
4199 	return kIOReturnSuccess;
4200 }
4201 
4202 /* Routine io_service_get_busy_state */
4203 kern_return_t
is_io_service_get_busy_state(io_object_t _service,uint32_t * busyState)4204 is_io_service_get_busy_state(
4205 	io_object_t _service,
4206 	uint32_t *busyState )
4207 {
4208 	CHECK( IOService, _service, service );
4209 
4210 	*busyState = service->getBusyState();
4211 
4212 	return kIOReturnSuccess;
4213 }
4214 
4215 /* Routine io_service_get_state */
4216 kern_return_t
is_io_service_get_state(io_object_t _service,uint64_t * state,uint32_t * busy_state,uint64_t * accumulated_busy_time)4217 is_io_service_get_state(
4218 	io_object_t _service,
4219 	uint64_t *state,
4220 	uint32_t *busy_state,
4221 	uint64_t *accumulated_busy_time )
4222 {
4223 	CHECK( IOService, _service, service );
4224 
4225 	*state                 = service->getState();
4226 	*busy_state            = service->getBusyState();
4227 	*accumulated_busy_time = service->getAccumulatedBusyTime();
4228 
4229 	return kIOReturnSuccess;
4230 }
4231 
4232 /* Routine io_service_wait_quiet */
4233 kern_return_t
is_io_service_wait_quiet(io_object_t _service,mach_timespec_t wait_time)4234 is_io_service_wait_quiet(
4235 	io_object_t _service,
4236 	mach_timespec_t wait_time )
4237 {
4238 	uint64_t    timeoutNS;
4239 
4240 	CHECK( IOService, _service, service );
4241 
4242 	timeoutNS = wait_time.tv_sec;
4243 	timeoutNS *= kSecondScale;
4244 	timeoutNS += wait_time.tv_nsec;
4245 
4246 	return service->waitQuiet(timeoutNS);
4247 }
4248 
4249 /* Routine io_service_wait_quiet_with_options */
4250 kern_return_t
is_io_service_wait_quiet_with_options(io_object_t _service,mach_timespec_t wait_time,uint32_t options)4251 is_io_service_wait_quiet_with_options(
4252 	io_object_t _service,
4253 	mach_timespec_t wait_time,
4254 	uint32_t options )
4255 {
4256 	uint64_t    timeoutNS;
4257 
4258 	CHECK( IOService, _service, service );
4259 
4260 	timeoutNS = wait_time.tv_sec;
4261 	timeoutNS *= kSecondScale;
4262 	timeoutNS += wait_time.tv_nsec;
4263 
4264 	if ((options & kIOWaitQuietPanicOnFailure) && !IOCurrentTaskHasEntitlement(kIOWaitQuietPanicsEntitlement)) {
4265 		OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4266 		IOLog("IOServiceWaitQuietWithOptions(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4267 		OSSafeReleaseNULL(taskName);
4268 
4269 		/* strip this option from the options before calling waitQuietWithOptions */
4270 		options &= ~kIOWaitQuietPanicOnFailure;
4271 	}
4272 
4273 	return service->waitQuietWithOptions(timeoutNS, options);
4274 }
4275 
4276 
4277 /* Routine io_service_request_probe */
4278 kern_return_t
is_io_service_request_probe(io_object_t _service,uint32_t options)4279 is_io_service_request_probe(
4280 	io_object_t _service,
4281 	uint32_t options )
4282 {
4283 	CHECK( IOService, _service, service );
4284 
4285 	return service->requestProbe( options );
4286 }
4287 
4288 /* Routine io_service_get_authorization_id */
4289 kern_return_t
is_io_service_get_authorization_id(io_object_t _service,uint64_t * authorization_id)4290 is_io_service_get_authorization_id(
4291 	io_object_t _service,
4292 	uint64_t *authorization_id )
4293 {
4294 	kern_return_t          kr;
4295 
4296 	CHECK( IOService, _service, service );
4297 
4298 	kr = IOUserClient::clientHasPrivilege((void *) current_task(),
4299 	    kIOClientPrivilegeAdministrator );
4300 	if (kIOReturnSuccess != kr) {
4301 		return kr;
4302 	}
4303 
4304 #if defined(XNU_TARGET_OS_OSX)
4305 	*authorization_id = service->getAuthorizationID();
4306 #else /* defined(XNU_TARGET_OS_OSX) */
4307 	*authorization_id = 0;
4308 	kr = kIOReturnUnsupported;
4309 #endif /* defined(XNU_TARGET_OS_OSX) */
4310 
4311 	return kr;
4312 }
4313 
4314 /* Routine io_service_set_authorization_id */
4315 kern_return_t
is_io_service_set_authorization_id(io_object_t _service,uint64_t authorization_id)4316 is_io_service_set_authorization_id(
4317 	io_object_t _service,
4318 	uint64_t authorization_id )
4319 {
4320 	CHECK( IOService, _service, service );
4321 
4322 #if defined(XNU_TARGET_OS_OSX)
4323 	return service->setAuthorizationID( authorization_id );
4324 #else /* defined(XNU_TARGET_OS_OSX) */
4325 	return kIOReturnUnsupported;
4326 #endif /* defined(XNU_TARGET_OS_OSX) */
4327 }
4328 
4329 /* Routine io_service_open_ndr */
4330 kern_return_t
is_io_service_open_extended(io_object_t _service,task_t owningTask,uint32_t connect_type,NDR_record_t ndr,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result,io_object_t * connection)4331 is_io_service_open_extended(
4332 	io_object_t _service,
4333 	task_t owningTask,
4334 	uint32_t connect_type,
4335 	NDR_record_t ndr,
4336 	io_buf_ptr_t properties,
4337 	mach_msg_type_number_t propertiesCnt,
4338 	kern_return_t * result,
4339 	io_object_t *connection )
4340 {
4341 	IOUserClient * client = NULL;
4342 	kern_return_t  err = KERN_SUCCESS;
4343 	IOReturn       res = kIOReturnSuccess;
4344 	OSDictionary * propertiesDict = NULL;
4345 	bool           disallowAccess = false;
4346 
4347 	CHECK( IOService, _service, service );
4348 
4349 	if (!owningTask) {
4350 		return kIOReturnBadArgument;
4351 	}
4352 	assert(owningTask == current_task());
4353 	if (owningTask != current_task()) {
4354 		return kIOReturnBadArgument;
4355 	}
4356 
4357 #if CONFIG_MACF
4358 	if (mac_iokit_check_open_service(kauth_cred_get(), service, connect_type) != 0) {
4359 		return kIOReturnNotPermitted;
4360 	}
4361 #endif
4362 	do{
4363 		if (properties) {
4364 			return kIOReturnUnsupported;
4365 		}
4366 #if 0
4367 		{
4368 			OSObject *      obj;
4369 			vm_offset_t     data;
4370 			vm_map_offset_t map_data;
4371 
4372 			if (propertiesCnt > sizeof(io_struct_inband_t)) {
4373 				return kIOReturnMessageTooLarge;
4374 			}
4375 
4376 			err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4377 			res = err;
4378 			data = CAST_DOWN(vm_offset_t, map_data);
4379 			if (KERN_SUCCESS == err) {
4380 				// must return success after vm_map_copyout() succeeds
4381 				obj = OSUnserializeXML((const char *) data, propertiesCnt );
4382 				vm_deallocate( kernel_map, data, propertiesCnt );
4383 				propertiesDict = OSDynamicCast(OSDictionary, obj);
4384 				if (!propertiesDict) {
4385 					res = kIOReturnBadArgument;
4386 					if (obj) {
4387 						obj->release();
4388 					}
4389 				}
4390 			}
4391 			if (kIOReturnSuccess != res) {
4392 				break;
4393 			}
4394 		}
4395 #endif
4396 		res = service->newUserClient( owningTask, (void *) owningTask,
4397 		    connect_type, propertiesDict, &client );
4398 
4399 		if (propertiesDict) {
4400 			propertiesDict->release();
4401 		}
4402 
4403 		if (res == kIOReturnSuccess && OSDynamicCast(IOUserClient, client) == NULL) {
4404 			// client should always be a IOUserClient
4405 			res = kIOReturnError;
4406 		}
4407 
4408 		if (res == kIOReturnSuccess) {
4409 			if (!client->reserved) {
4410 				if (!client->reserve()) {
4411 					client->clientClose();
4412 					OSSafeReleaseNULL(client);
4413 					res = kIOReturnNoMemory;
4414 				}
4415 			}
4416 		}
4417 
4418 		if (res == kIOReturnSuccess) {
4419 			OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4420 			if (creatorName) {
4421 				client->setProperty(kIOUserClientCreatorKey, creatorName);
4422 			}
4423 			const char * creatorNameCStr = creatorName ? creatorName->getCStringNoCopy() : "<unknown>";
4424 			client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
4425 			if (client->sharedInstance) {
4426 				IOLockLock(gIOUserClientOwnersLock);
4427 			}
4428 			if (!client->opened) {
4429 				client->opened = true;
4430 
4431 				client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
4432 				{
4433 					OSObject * obj;
4434 					extern const OSSymbol * gIOSurfaceIdentifier;
4435 					obj = client->getProperty(kIOUserClientDefaultLockingKey);
4436 					bool hasProps = false;
4437 
4438 					client->uc2022 = (NULL != OSDynamicCast(IOUserClient2022, client));
4439 					if (obj) {
4440 						hasProps = true;
4441 						client->defaultLocking = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingKey));
4442 					} else if (client->uc2022) {
4443 						res = kIOReturnError;
4444 					}
4445 					obj = client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey);
4446 					if (obj) {
4447 						hasProps = true;
4448 						client->defaultLockingSetProperties = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey));
4449 					} else if (client->uc2022) {
4450 						res = kIOReturnError;
4451 					}
4452 					obj = client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey);
4453 					if (obj) {
4454 						hasProps = true;
4455 						client->defaultLockingSingleThreadExternalMethod = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey));
4456 					} else if (client->uc2022) {
4457 						res = kIOReturnError;
4458 					}
4459 					if (kIOReturnSuccess != res) {
4460 						IOLog("IOUC %s requires kIOUserClientDefaultLockingKey, kIOUserClientDefaultLockingSetPropertiesKey, kIOUserClientDefaultLockingSingleThreadExternalMethodKey\n",
4461 						    client->getMetaClass()->getClassName());
4462 					}
4463 					if (!hasProps) {
4464 						const OSMetaClass * meta;
4465 						OSKext            * kext;
4466 						meta = client->getMetaClass();
4467 						kext = meta->getKext();
4468 						if (!kext || !kext->hasDependency(gIOSurfaceIdentifier)) {
4469 							client->defaultLocking = true;
4470 							client->defaultLockingSetProperties = false;
4471 							client->defaultLockingSingleThreadExternalMethod = false;
4472 							client->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
4473 						}
4474 					}
4475 				}
4476 			}
4477 			if (client->sharedInstance) {
4478 				IOLockUnlock(gIOUserClientOwnersLock);
4479 			}
4480 
4481 			OSObject     * requiredEntitlement = client->copyProperty(gIOUserClientEntitlementsKey);
4482 			OSString * requiredEntitlementString = OSDynamicCast(OSString, requiredEntitlement);
4483 			//If this is an IOUserClient2022, having kIOUserClientEntitlementsKey is mandatory.
4484 			//If it has kIOUserClientEntitlementsKey, the value must be either kOSBooleanFalse or an OSString
4485 			//If the value is kOSBooleanFalse, we allow access.
4486 			//If the value is an OSString, we allow access if the task has the named entitlement
4487 			if (client->uc2022) {
4488 				if (!requiredEntitlement) {
4489 					IOLog("IOUC %s missing " kIOUserClientEntitlementsKey " property\n",
4490 					    client->getMetaClass()->getClassName());
4491 					disallowAccess = true;
4492 				} else if (!requiredEntitlementString && requiredEntitlement != kOSBooleanFalse) {
4493 					IOLog("IOUC %s had " kIOUserClientEntitlementsKey "with value not boolean false or string\n", client->getMetaClass()->getClassName());
4494 					disallowAccess = true;
4495 				}
4496 			}
4497 
4498 			if (requiredEntitlement && disallowAccess == false) {
4499 				if (kOSBooleanFalse == requiredEntitlement) {
4500 					// allow
4501 					disallowAccess = false;
4502 				} else {
4503 					disallowAccess = !IOTaskHasEntitlement(owningTask, requiredEntitlementString->getCStringNoCopy());
4504 					if (disallowAccess) {
4505 						IOLog("IOUC %s missing entitlement in process %s\n",
4506 						    client->getMetaClass()->getClassName(), creatorNameCStr);
4507 					}
4508 				}
4509 			}
4510 
4511 			OSSafeReleaseNULL(requiredEntitlement);
4512 
4513 			if (disallowAccess) {
4514 				res = kIOReturnNotPrivileged;
4515 			}
4516 #if CONFIG_MACF
4517 			else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4518 				IOLog("IOUC %s failed MACF in process %s\n",
4519 				    client->getMetaClass()->getClassName(), creatorNameCStr);
4520 				res = kIOReturnNotPermitted;
4521 			}
4522 #endif
4523 
4524 			if ((kIOReturnSuccess == res)
4525 			    && gIOUCFilterCallbacks
4526 			    && gIOUCFilterCallbacks->io_filter_resolver) {
4527 				io_filter_policy_t filterPolicy;
4528 				filterPolicy = client->filterForTask(owningTask, 0);
4529 				if (!filterPolicy) {
4530 					res = gIOUCFilterCallbacks->io_filter_resolver(owningTask, client, connect_type, &filterPolicy);
4531 					if (kIOReturnUnsupported == res) {
4532 						res = kIOReturnSuccess;
4533 					} else if (kIOReturnSuccess == res) {
4534 						client->filterForTask(owningTask, filterPolicy);
4535 					} else {
4536 						IOLog("IOUC %s failed sandbox in process %s\n",
4537 						    client->getMetaClass()->getClassName(), creatorNameCStr);
4538 					}
4539 				}
4540 			}
4541 
4542 			if (kIOReturnSuccess == res) {
4543 				res = client->registerOwner(owningTask);
4544 			}
4545 			OSSafeReleaseNULL(creatorName);
4546 
4547 			if (kIOReturnSuccess != res) {
4548 				IOStatisticsClientCall();
4549 				client->clientClose();
4550 				client->setTerminateDefer(service, false);
4551 				client->release();
4552 				client = NULL;
4553 				break;
4554 			}
4555 			client->setTerminateDefer(service, false);
4556 		}
4557 	}while (false);
4558 
4559 	*connection = client;
4560 	*result = res;
4561 
4562 	return err;
4563 }
4564 
4565 /* Routine io_service_close */
4566 kern_return_t
is_io_service_close(io_connect_t connection)4567 is_io_service_close(
4568 	io_connect_t connection )
4569 {
4570 	OSSet * mappings;
4571 	if ((mappings = OSDynamicCast(OSSet, connection))) {
4572 		return kIOReturnSuccess;
4573 	}
4574 
4575 	CHECK( IOUserClient, connection, client );
4576 
4577 	IOStatisticsClientCall();
4578 
4579 	if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4580 		client->ipcEnter(kIPCLockWrite);
4581 		client->clientClose();
4582 		client->ipcExit(kIPCLockWrite);
4583 	} else {
4584 		IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4585 		    client->getRegistryEntryID(), client->getName());
4586 	}
4587 
4588 	return kIOReturnSuccess;
4589 }
4590 
4591 /* Routine io_connect_get_service */
4592 kern_return_t
is_io_connect_get_service(io_connect_t connection,io_object_t * service)4593 is_io_connect_get_service(
4594 	io_connect_t connection,
4595 	io_object_t *service )
4596 {
4597 	IOService * theService;
4598 
4599 	CHECK( IOUserClient, connection, client );
4600 
4601 	client->ipcEnter(kIPCLockNone);
4602 
4603 	theService = client->getService();
4604 	if (theService) {
4605 		theService->retain();
4606 	}
4607 
4608 	client->ipcExit(kIPCLockNone);
4609 
4610 	*service = theService;
4611 
4612 	return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4613 }
4614 
4615 /* Routine io_connect_set_notification_port */
4616 kern_return_t
is_io_connect_set_notification_port(io_connect_t connection,uint32_t notification_type,mach_port_t port,uint32_t reference)4617 is_io_connect_set_notification_port(
4618 	io_connect_t connection,
4619 	uint32_t notification_type,
4620 	mach_port_t port,
4621 	uint32_t reference)
4622 {
4623 	kern_return_t ret;
4624 	CHECK( IOUserClient, connection, client );
4625 
4626 	IOStatisticsClientCall();
4627 
4628 	client->ipcEnter(kIPCLockWrite);
4629 	ret = client->registerNotificationPort( port, notification_type,
4630 	    (io_user_reference_t) reference );
4631 	client->ipcExit(kIPCLockWrite);
4632 
4633 	return ret;
4634 }
4635 
4636 /* Routine io_connect_set_notification_port */
4637 kern_return_t
is_io_connect_set_notification_port_64(io_connect_t connection,uint32_t notification_type,mach_port_t port,io_user_reference_t reference)4638 is_io_connect_set_notification_port_64(
4639 	io_connect_t connection,
4640 	uint32_t notification_type,
4641 	mach_port_t port,
4642 	io_user_reference_t reference)
4643 {
4644 	kern_return_t ret;
4645 	CHECK( IOUserClient, connection, client );
4646 
4647 	IOStatisticsClientCall();
4648 
4649 	client->ipcEnter(kIPCLockWrite);
4650 	ret = client->registerNotificationPort( port, notification_type,
4651 	    reference );
4652 	client->ipcExit(kIPCLockWrite);
4653 
4654 	return ret;
4655 }
4656 
4657 /* Routine io_connect_map_memory_into_task */
4658 kern_return_t
is_io_connect_map_memory_into_task(io_connect_t connection,uint32_t memory_type,task_t into_task,mach_vm_address_t * address,mach_vm_size_t * size,uint32_t flags)4659 is_io_connect_map_memory_into_task
4660 (
4661 	io_connect_t connection,
4662 	uint32_t memory_type,
4663 	task_t into_task,
4664 	mach_vm_address_t *address,
4665 	mach_vm_size_t *size,
4666 	uint32_t flags
4667 )
4668 {
4669 	IOReturn            err;
4670 	IOMemoryMap *       map;
4671 
4672 	CHECK( IOUserClient, connection, client );
4673 
4674 	if (!into_task) {
4675 		return kIOReturnBadArgument;
4676 	}
4677 
4678 	IOStatisticsClientCall();
4679 
4680 	client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4681 	map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4682 
4683 	if (map) {
4684 		*address = map->getAddress();
4685 		if (size) {
4686 			*size = map->getSize();
4687 		}
4688 
4689 		if (client->sharedInstance
4690 		    || (into_task != current_task())) {
4691 			// push a name out to the task owning the map,
4692 			// so we can clean up maps
4693 			mach_port_name_t name __unused =
4694 			    IOMachPort::makeSendRightForTask(
4695 				into_task, map, IKOT_IOKIT_OBJECT );
4696 			map->release();
4697 		} else {
4698 			// keep it with the user client
4699 			IOLockLock( gIOObjectPortLock);
4700 			if (NULL == client->mappings) {
4701 				client->mappings = OSSet::withCapacity(2);
4702 			}
4703 			if (client->mappings) {
4704 				client->mappings->setObject( map);
4705 			}
4706 			IOLockUnlock( gIOObjectPortLock);
4707 			map->release();
4708 		}
4709 		err = kIOReturnSuccess;
4710 	} else {
4711 		err = kIOReturnBadArgument;
4712 	}
4713 
4714 	client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4715 
4716 	return err;
4717 }
4718 
4719 /* Routine is_io_connect_map_memory */
4720 kern_return_t
is_io_connect_map_memory(io_object_t connect,uint32_t type,task_t task,uint32_t * mapAddr,uint32_t * mapSize,uint32_t flags)4721 is_io_connect_map_memory(
4722 	io_object_t     connect,
4723 	uint32_t        type,
4724 	task_t          task,
4725 	uint32_t  *     mapAddr,
4726 	uint32_t  *     mapSize,
4727 	uint32_t        flags )
4728 {
4729 	IOReturn          err;
4730 	mach_vm_address_t address;
4731 	mach_vm_size_t    size;
4732 
4733 	address = SCALAR64(*mapAddr);
4734 	size    = SCALAR64(*mapSize);
4735 
4736 	err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4737 
4738 	*mapAddr = SCALAR32(address);
4739 	*mapSize = SCALAR32(size);
4740 
4741 	return err;
4742 }
4743 } /* extern "C" */
4744 
4745 IOMemoryMap *
removeMappingForDescriptor(IOMemoryDescriptor * mem)4746 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4747 {
4748 	OSIterator *  iter;
4749 	IOMemoryMap * map = NULL;
4750 
4751 	IOLockLock(gIOObjectPortLock);
4752 
4753 	iter = OSCollectionIterator::withCollection(mappings);
4754 	if (iter) {
4755 		while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4756 			if (mem == map->getMemoryDescriptor()) {
4757 				map->retain();
4758 				mappings->removeObject(map);
4759 				break;
4760 			}
4761 		}
4762 		iter->release();
4763 	}
4764 
4765 	IOLockUnlock(gIOObjectPortLock);
4766 
4767 	return map;
4768 }
4769 
4770 extern "C" {
4771 /* Routine io_connect_unmap_memory_from_task */
4772 kern_return_t
is_io_connect_unmap_memory_from_task(io_connect_t connection,uint32_t memory_type,task_t from_task,mach_vm_address_t address)4773 is_io_connect_unmap_memory_from_task
4774 (
4775 	io_connect_t connection,
4776 	uint32_t memory_type,
4777 	task_t from_task,
4778 	mach_vm_address_t address)
4779 {
4780 	IOReturn            err;
4781 	IOOptionBits        options = 0;
4782 	IOMemoryDescriptor * memory = NULL;
4783 	IOMemoryMap *       map;
4784 
4785 	CHECK( IOUserClient, connection, client );
4786 
4787 	if (!from_task) {
4788 		return kIOReturnBadArgument;
4789 	}
4790 
4791 	IOStatisticsClientCall();
4792 
4793 	client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4794 	err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4795 
4796 	if (memory && (kIOReturnSuccess == err)) {
4797 		options = (options & ~kIOMapUserOptionsMask)
4798 		    | kIOMapAnywhere | kIOMapReference;
4799 
4800 		map = memory->createMappingInTask( from_task, address, options );
4801 		memory->release();
4802 		if (map) {
4803 			IOLockLock( gIOObjectPortLock);
4804 			if (client->mappings) {
4805 				client->mappings->removeObject( map);
4806 			}
4807 			IOLockUnlock( gIOObjectPortLock);
4808 
4809 			mach_port_name_t name = 0;
4810 			bool is_shared_instance_or_from_current_task = from_task != current_task() || client->sharedInstance;
4811 			if (is_shared_instance_or_from_current_task) {
4812 				name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4813 				map->release();
4814 			}
4815 
4816 			if (name) {
4817 				map->userClientUnmap();
4818 				err = iokit_mod_send_right( from_task, name, -2 );
4819 				err = kIOReturnSuccess;
4820 			} else {
4821 				IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4822 			}
4823 			if (!is_shared_instance_or_from_current_task) {
4824 				map->release();
4825 			}
4826 		} else {
4827 			err = kIOReturnBadArgument;
4828 		}
4829 	}
4830 
4831 	client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4832 
4833 	return err;
4834 }
4835 
4836 kern_return_t
is_io_connect_unmap_memory(io_object_t connect,uint32_t type,task_t task,uint32_t mapAddr)4837 is_io_connect_unmap_memory(
4838 	io_object_t     connect,
4839 	uint32_t        type,
4840 	task_t          task,
4841 	uint32_t        mapAddr )
4842 {
4843 	IOReturn            err;
4844 	mach_vm_address_t   address;
4845 
4846 	address = SCALAR64(mapAddr);
4847 
4848 	err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4849 
4850 	return err;
4851 }
4852 
4853 
4854 /* Routine io_connect_add_client */
4855 kern_return_t
is_io_connect_add_client(io_connect_t connection,io_object_t connect_to)4856 is_io_connect_add_client(
4857 	io_connect_t connection,
4858 	io_object_t connect_to)
4859 {
4860 	CHECK( IOUserClient, connection, client );
4861 	CHECK( IOUserClient, connect_to, to );
4862 
4863 	IOReturn ret;
4864 
4865 	IOStatisticsClientCall();
4866 
4867 	client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4868 	ret = client->connectClient( to );
4869 	client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4870 
4871 	return ret;
4872 }
4873 
4874 
4875 /* Routine io_connect_set_properties */
4876 kern_return_t
is_io_connect_set_properties(io_connect_t connection,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4877 is_io_connect_set_properties(
4878 	io_connect_t connection,
4879 	io_buf_ptr_t properties,
4880 	mach_msg_type_number_t propertiesCnt,
4881 	kern_return_t * result)
4882 {
4883 	return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4884 }
4885 
4886 /* Routine io_user_client_method */
4887 kern_return_t
is_io_connect_method_var_output(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,io_buf_ptr_t * var_output,mach_msg_type_number_t * var_outputCnt)4888 is_io_connect_method_var_output
4889 (
4890 	io_connect_t connection,
4891 	uint32_t selector,
4892 	io_scalar_inband64_t scalar_input,
4893 	mach_msg_type_number_t scalar_inputCnt,
4894 	io_struct_inband_t inband_input,
4895 	mach_msg_type_number_t inband_inputCnt,
4896 	mach_vm_address_t ool_input,
4897 	mach_vm_size_t ool_input_size,
4898 	io_struct_inband_t inband_output,
4899 	mach_msg_type_number_t *inband_outputCnt,
4900 	io_scalar_inband64_t scalar_output,
4901 	mach_msg_type_number_t *scalar_outputCnt,
4902 	io_buf_ptr_t *var_output,
4903 	mach_msg_type_number_t *var_outputCnt
4904 )
4905 {
4906 	CHECK( IOUserClient, connection, client );
4907 
4908 	IOExternalMethodArguments args;
4909 	IOReturn ret;
4910 	IOMemoryDescriptor * inputMD  = NULL;
4911 	OSObject *           structureVariableOutputData = NULL;
4912 
4913 	bzero(&args.__reserved[0], sizeof(args.__reserved));
4914 	args.__reservedA = 0;
4915 	args.version = kIOExternalMethodArgumentsCurrentVersion;
4916 
4917 	args.selector = selector;
4918 
4919 	args.asyncWakePort               = MACH_PORT_NULL;
4920 	args.asyncReference              = NULL;
4921 	args.asyncReferenceCount         = 0;
4922 	args.structureVariableOutputData = &structureVariableOutputData;
4923 
4924 	args.scalarInput = scalar_input;
4925 	args.scalarInputCount = scalar_inputCnt;
4926 	args.structureInput = inband_input;
4927 	args.structureInputSize = inband_inputCnt;
4928 
4929 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4930 		return kIOReturnIPCError;
4931 	}
4932 
4933 	if (ool_input) {
4934 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4935 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
4936 		    current_task());
4937 	}
4938 
4939 	args.structureInputDescriptor = inputMD;
4940 
4941 	args.scalarOutput = scalar_output;
4942 	args.scalarOutputCount = *scalar_outputCnt;
4943 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4944 	args.structureOutput = inband_output;
4945 	args.structureOutputSize = *inband_outputCnt;
4946 	args.structureOutputDescriptor = NULL;
4947 	args.structureOutputDescriptorSize = 0;
4948 
4949 	IOStatisticsClientCall();
4950 	ret = kIOReturnSuccess;
4951 
4952 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4953 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4954 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
4955 	}
4956 
4957 	if (kIOReturnSuccess == ret) {
4958 		ret = client->callExternalMethod(selector, &args);
4959 	}
4960 
4961 	*scalar_outputCnt = args.scalarOutputCount;
4962 	*inband_outputCnt = args.structureOutputSize;
4963 
4964 	if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4965 		OSSerialize * serialize;
4966 		OSData      * data;
4967 		unsigned int  len;
4968 
4969 		if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4970 			len = serialize->getLength();
4971 			*var_outputCnt = len;
4972 			ret = copyoutkdata(serialize->text(), len, var_output);
4973 		} else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4974 			data->clipForCopyout();
4975 			len = data->getLength();
4976 			*var_outputCnt = len;
4977 			ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4978 		} else {
4979 			ret = kIOReturnUnderrun;
4980 		}
4981 	}
4982 
4983 	if (inputMD) {
4984 		inputMD->release();
4985 	}
4986 	if (structureVariableOutputData) {
4987 		structureVariableOutputData->release();
4988 	}
4989 
4990 	return ret;
4991 }
4992 
4993 /* Routine io_user_client_method */
4994 kern_return_t
is_io_connect_method(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)4995 is_io_connect_method
4996 (
4997 	io_connect_t connection,
4998 	uint32_t selector,
4999 	io_scalar_inband64_t scalar_input,
5000 	mach_msg_type_number_t scalar_inputCnt,
5001 	io_struct_inband_t inband_input,
5002 	mach_msg_type_number_t inband_inputCnt,
5003 	mach_vm_address_t ool_input,
5004 	mach_vm_size_t ool_input_size,
5005 	io_struct_inband_t inband_output,
5006 	mach_msg_type_number_t *inband_outputCnt,
5007 	io_scalar_inband64_t scalar_output,
5008 	mach_msg_type_number_t *scalar_outputCnt,
5009 	mach_vm_address_t ool_output,
5010 	mach_vm_size_t *ool_output_size
5011 )
5012 {
5013 	CHECK( IOUserClient, connection, client );
5014 
5015 	IOExternalMethodArguments args;
5016 	IOReturn ret;
5017 	IOMemoryDescriptor * inputMD  = NULL;
5018 	IOMemoryDescriptor * outputMD = NULL;
5019 
5020 	bzero(&args.__reserved[0], sizeof(args.__reserved));
5021 	args.__reservedA = 0;
5022 	args.version = kIOExternalMethodArgumentsCurrentVersion;
5023 
5024 	args.selector = selector;
5025 
5026 	args.asyncWakePort               = MACH_PORT_NULL;
5027 	args.asyncReference              = NULL;
5028 	args.asyncReferenceCount         = 0;
5029 	args.structureVariableOutputData = NULL;
5030 
5031 	args.scalarInput = scalar_input;
5032 	args.scalarInputCount = scalar_inputCnt;
5033 	args.structureInput = inband_input;
5034 	args.structureInputSize = inband_inputCnt;
5035 
5036 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5037 		return kIOReturnIPCError;
5038 	}
5039 	if (ool_output) {
5040 		if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5041 			return kIOReturnIPCError;
5042 		}
5043 		if (*ool_output_size > UINT_MAX) {
5044 			return kIOReturnIPCError;
5045 		}
5046 	}
5047 
5048 	if (ool_input) {
5049 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5050 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
5051 		    current_task());
5052 	}
5053 
5054 	args.structureInputDescriptor = inputMD;
5055 
5056 	args.scalarOutput = scalar_output;
5057 	args.scalarOutputCount = *scalar_outputCnt;
5058 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5059 	args.structureOutput = inband_output;
5060 	args.structureOutputSize = *inband_outputCnt;
5061 
5062 	if (ool_output && ool_output_size) {
5063 		outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5064 		    kIODirectionIn, current_task());
5065 	}
5066 
5067 	args.structureOutputDescriptor = outputMD;
5068 	args.structureOutputDescriptorSize = ool_output_size
5069 	    ? ((typeof(args.structureOutputDescriptorSize)) * ool_output_size)
5070 	    : 0;
5071 
5072 	IOStatisticsClientCall();
5073 	ret = kIOReturnSuccess;
5074 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5075 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5076 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
5077 	}
5078 	if (kIOReturnSuccess == ret) {
5079 		ret = client->callExternalMethod( selector, &args );
5080 	}
5081 
5082 	*scalar_outputCnt = args.scalarOutputCount;
5083 	*inband_outputCnt = args.structureOutputSize;
5084 	*ool_output_size  = args.structureOutputDescriptorSize;
5085 
5086 	if (inputMD) {
5087 		inputMD->release();
5088 	}
5089 	if (outputMD) {
5090 		outputMD->release();
5091 	}
5092 
5093 	return ret;
5094 }
5095 
5096 /* Routine io_async_user_client_method */
5097 kern_return_t
is_io_connect_async_method(io_connect_t connection,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)5098 is_io_connect_async_method
5099 (
5100 	io_connect_t connection,
5101 	mach_port_t wake_port,
5102 	io_async_ref64_t reference,
5103 	mach_msg_type_number_t referenceCnt,
5104 	uint32_t selector,
5105 	io_scalar_inband64_t scalar_input,
5106 	mach_msg_type_number_t scalar_inputCnt,
5107 	io_struct_inband_t inband_input,
5108 	mach_msg_type_number_t inband_inputCnt,
5109 	mach_vm_address_t ool_input,
5110 	mach_vm_size_t ool_input_size,
5111 	io_struct_inband_t inband_output,
5112 	mach_msg_type_number_t *inband_outputCnt,
5113 	io_scalar_inband64_t scalar_output,
5114 	mach_msg_type_number_t *scalar_outputCnt,
5115 	mach_vm_address_t ool_output,
5116 	mach_vm_size_t * ool_output_size
5117 )
5118 {
5119 	CHECK( IOUserClient, connection, client );
5120 
5121 	IOExternalMethodArguments args;
5122 	IOReturn ret;
5123 	IOMemoryDescriptor * inputMD  = NULL;
5124 	IOMemoryDescriptor * outputMD = NULL;
5125 
5126 	if (referenceCnt < 1) {
5127 		return kIOReturnBadArgument;
5128 	}
5129 
5130 	bzero(&args.__reserved[0], sizeof(args.__reserved));
5131 	args.__reservedA = 0;
5132 	args.version = kIOExternalMethodArgumentsCurrentVersion;
5133 
5134 	reference[0]             = (io_user_reference_t) wake_port;
5135 	if (vm_map_is_64bit(get_task_map(current_task()))) {
5136 		reference[0]         |= kIOUCAsync64Flag;
5137 	}
5138 
5139 	args.selector = selector;
5140 
5141 	args.asyncWakePort       = wake_port;
5142 	args.asyncReference      = reference;
5143 	args.asyncReferenceCount = referenceCnt;
5144 
5145 	args.structureVariableOutputData = NULL;
5146 
5147 	args.scalarInput = scalar_input;
5148 	args.scalarInputCount = scalar_inputCnt;
5149 	args.structureInput = inband_input;
5150 	args.structureInputSize = inband_inputCnt;
5151 
5152 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5153 		return kIOReturnIPCError;
5154 	}
5155 	if (ool_output) {
5156 		if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5157 			return kIOReturnIPCError;
5158 		}
5159 		if (*ool_output_size > UINT_MAX) {
5160 			return kIOReturnIPCError;
5161 		}
5162 	}
5163 
5164 	if (ool_input) {
5165 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5166 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
5167 		    current_task());
5168 	}
5169 
5170 	args.structureInputDescriptor = inputMD;
5171 
5172 	args.scalarOutput = scalar_output;
5173 	args.scalarOutputCount = *scalar_outputCnt;
5174 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5175 	args.structureOutput = inband_output;
5176 	args.structureOutputSize = *inband_outputCnt;
5177 
5178 	if (ool_output) {
5179 		outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5180 		    kIODirectionIn, current_task());
5181 	}
5182 
5183 	args.structureOutputDescriptor = outputMD;
5184 	args.structureOutputDescriptorSize = ((typeof(args.structureOutputDescriptorSize)) * ool_output_size);
5185 
5186 	IOStatisticsClientCall();
5187 	ret = kIOReturnSuccess;
5188 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5189 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5190 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_async_method, selector);
5191 	}
5192 	if (kIOReturnSuccess == ret) {
5193 		ret = client->callExternalMethod( selector, &args );
5194 	}
5195 
5196 	*scalar_outputCnt = args.scalarOutputCount;
5197 	*inband_outputCnt = args.structureOutputSize;
5198 	*ool_output_size  = args.structureOutputDescriptorSize;
5199 
5200 	if (inputMD) {
5201 		inputMD->release();
5202 	}
5203 	if (outputMD) {
5204 		outputMD->release();
5205 	}
5206 
5207 	return ret;
5208 }
5209 
5210 /* Routine io_connect_method_scalarI_scalarO */
5211 kern_return_t
is_io_connect_method_scalarI_scalarO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5212 is_io_connect_method_scalarI_scalarO(
5213 	io_object_t        connect,
5214 	uint32_t           index,
5215 	io_scalar_inband_t       input,
5216 	mach_msg_type_number_t   inputCount,
5217 	io_scalar_inband_t       output,
5218 	mach_msg_type_number_t * outputCount )
5219 {
5220 	IOReturn err;
5221 	uint32_t i;
5222 	io_scalar_inband64_t _input;
5223 	io_scalar_inband64_t _output;
5224 
5225 	mach_msg_type_number_t struct_outputCnt = 0;
5226 	mach_vm_size_t ool_output_size = 0;
5227 
5228 	bzero(&_output[0], sizeof(_output));
5229 	for (i = 0; i < inputCount; i++) {
5230 		_input[i] = SCALAR64(input[i]);
5231 	}
5232 
5233 	err = is_io_connect_method(connect, index,
5234 	    _input, inputCount,
5235 	    NULL, 0,
5236 	    0, 0,
5237 	    NULL, &struct_outputCnt,
5238 	    _output, outputCount,
5239 	    0, &ool_output_size);
5240 
5241 	for (i = 0; i < *outputCount; i++) {
5242 		output[i] = SCALAR32(_output[i]);
5243 	}
5244 
5245 	return err;
5246 }
5247 
5248 kern_return_t
shim_io_connect_method_scalarI_scalarO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5249 shim_io_connect_method_scalarI_scalarO(
5250 	IOExternalMethod *      method,
5251 	IOService *             object,
5252 	const io_user_scalar_t * input,
5253 	mach_msg_type_number_t   inputCount,
5254 	io_user_scalar_t * output,
5255 	mach_msg_type_number_t * outputCount )
5256 {
5257 	IOMethod            func;
5258 	io_scalar_inband_t  _output;
5259 	IOReturn            err;
5260 	err = kIOReturnBadArgument;
5261 
5262 	bzero(&_output[0], sizeof(_output));
5263 	do {
5264 		if (inputCount != method->count0) {
5265 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5266 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5267 			continue;
5268 		}
5269 		if (*outputCount != method->count1) {
5270 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5271 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5272 			continue;
5273 		}
5274 
5275 		func = method->func;
5276 
5277 		switch (inputCount) {
5278 		case 6:
5279 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5280 			    ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5281 			break;
5282 		case 5:
5283 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5284 			    ARG32(input[3]), ARG32(input[4]),
5285 			    &_output[0] );
5286 			break;
5287 		case 4:
5288 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5289 			    ARG32(input[3]),
5290 			    &_output[0], &_output[1] );
5291 			break;
5292 		case 3:
5293 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5294 			    &_output[0], &_output[1], &_output[2] );
5295 			break;
5296 		case 2:
5297 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]),
5298 			    &_output[0], &_output[1], &_output[2],
5299 			    &_output[3] );
5300 			break;
5301 		case 1:
5302 			err = (object->*func)(  ARG32(input[0]),
5303 			    &_output[0], &_output[1], &_output[2],
5304 			    &_output[3], &_output[4] );
5305 			break;
5306 		case 0:
5307 			err = (object->*func)(  &_output[0], &_output[1], &_output[2],
5308 			    &_output[3], &_output[4], &_output[5] );
5309 			break;
5310 
5311 		default:
5312 			IOLog("%s: Bad method table\n", object->getName());
5313 		}
5314 	}while (false);
5315 
5316 	uint32_t i;
5317 	for (i = 0; i < *outputCount; i++) {
5318 		output[i] = SCALAR32(_output[i]);
5319 	}
5320 
5321 	return err;
5322 }
5323 
5324 /* Routine io_async_method_scalarI_scalarO */
5325 kern_return_t
is_io_async_method_scalarI_scalarO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5326 is_io_async_method_scalarI_scalarO(
5327 	io_object_t        connect,
5328 	mach_port_t wake_port,
5329 	io_async_ref_t reference,
5330 	mach_msg_type_number_t referenceCnt,
5331 	uint32_t           index,
5332 	io_scalar_inband_t       input,
5333 	mach_msg_type_number_t   inputCount,
5334 	io_scalar_inband_t       output,
5335 	mach_msg_type_number_t * outputCount )
5336 {
5337 	IOReturn err;
5338 	uint32_t i;
5339 	io_scalar_inband64_t _input;
5340 	io_scalar_inband64_t _output;
5341 	io_async_ref64_t _reference;
5342 
5343 	if (referenceCnt > ASYNC_REF64_COUNT) {
5344 		return kIOReturnBadArgument;
5345 	}
5346 	bzero(&_output[0], sizeof(_output));
5347 	for (i = 0; i < referenceCnt; i++) {
5348 		_reference[i] = REF64(reference[i]);
5349 	}
5350 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5351 
5352 	mach_msg_type_number_t struct_outputCnt = 0;
5353 	mach_vm_size_t ool_output_size = 0;
5354 
5355 	for (i = 0; i < inputCount; i++) {
5356 		_input[i] = SCALAR64(input[i]);
5357 	}
5358 
5359 	err = is_io_connect_async_method(connect,
5360 	    wake_port, _reference, referenceCnt,
5361 	    index,
5362 	    _input, inputCount,
5363 	    NULL, 0,
5364 	    0, 0,
5365 	    NULL, &struct_outputCnt,
5366 	    _output, outputCount,
5367 	    0, &ool_output_size);
5368 
5369 	for (i = 0; i < *outputCount; i++) {
5370 		output[i] = SCALAR32(_output[i]);
5371 	}
5372 
5373 	return err;
5374 }
5375 /* Routine io_async_method_scalarI_structureO */
5376 kern_return_t
is_io_async_method_scalarI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5377 is_io_async_method_scalarI_structureO(
5378 	io_object_t     connect,
5379 	mach_port_t wake_port,
5380 	io_async_ref_t reference,
5381 	mach_msg_type_number_t referenceCnt,
5382 	uint32_t        index,
5383 	io_scalar_inband_t input,
5384 	mach_msg_type_number_t  inputCount,
5385 	io_struct_inband_t              output,
5386 	mach_msg_type_number_t *        outputCount )
5387 {
5388 	uint32_t i;
5389 	io_scalar_inband64_t _input;
5390 	io_async_ref64_t _reference;
5391 
5392 	if (referenceCnt > ASYNC_REF64_COUNT) {
5393 		return kIOReturnBadArgument;
5394 	}
5395 	for (i = 0; i < referenceCnt; i++) {
5396 		_reference[i] = REF64(reference[i]);
5397 	}
5398 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5399 
5400 	mach_msg_type_number_t scalar_outputCnt = 0;
5401 	mach_vm_size_t ool_output_size = 0;
5402 
5403 	for (i = 0; i < inputCount; i++) {
5404 		_input[i] = SCALAR64(input[i]);
5405 	}
5406 
5407 	return is_io_connect_async_method(connect,
5408 	           wake_port, _reference, referenceCnt,
5409 	           index,
5410 	           _input, inputCount,
5411 	           NULL, 0,
5412 	           0, 0,
5413 	           output, outputCount,
5414 	           NULL, &scalar_outputCnt,
5415 	           0, &ool_output_size);
5416 }
5417 
5418 /* Routine io_async_method_scalarI_structureI */
5419 kern_return_t
is_io_async_method_scalarI_structureI(io_connect_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5420 is_io_async_method_scalarI_structureI(
5421 	io_connect_t            connect,
5422 	mach_port_t wake_port,
5423 	io_async_ref_t reference,
5424 	mach_msg_type_number_t referenceCnt,
5425 	uint32_t                index,
5426 	io_scalar_inband_t      input,
5427 	mach_msg_type_number_t  inputCount,
5428 	io_struct_inband_t      inputStruct,
5429 	mach_msg_type_number_t  inputStructCount )
5430 {
5431 	uint32_t i;
5432 	io_scalar_inband64_t _input;
5433 	io_async_ref64_t _reference;
5434 
5435 	if (referenceCnt > ASYNC_REF64_COUNT) {
5436 		return kIOReturnBadArgument;
5437 	}
5438 	for (i = 0; i < referenceCnt; i++) {
5439 		_reference[i] = REF64(reference[i]);
5440 	}
5441 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5442 
5443 	mach_msg_type_number_t scalar_outputCnt = 0;
5444 	mach_msg_type_number_t inband_outputCnt = 0;
5445 	mach_vm_size_t ool_output_size = 0;
5446 
5447 	for (i = 0; i < inputCount; i++) {
5448 		_input[i] = SCALAR64(input[i]);
5449 	}
5450 
5451 	return is_io_connect_async_method(connect,
5452 	           wake_port, _reference, referenceCnt,
5453 	           index,
5454 	           _input, inputCount,
5455 	           inputStruct, inputStructCount,
5456 	           0, 0,
5457 	           NULL, &inband_outputCnt,
5458 	           NULL, &scalar_outputCnt,
5459 	           0, &ool_output_size);
5460 }
5461 
5462 /* Routine io_async_method_structureI_structureO */
5463 kern_return_t
is_io_async_method_structureI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5464 is_io_async_method_structureI_structureO(
5465 	io_object_t     connect,
5466 	mach_port_t wake_port,
5467 	io_async_ref_t reference,
5468 	mach_msg_type_number_t referenceCnt,
5469 	uint32_t        index,
5470 	io_struct_inband_t              input,
5471 	mach_msg_type_number_t  inputCount,
5472 	io_struct_inband_t              output,
5473 	mach_msg_type_number_t *        outputCount )
5474 {
5475 	uint32_t i;
5476 	mach_msg_type_number_t scalar_outputCnt = 0;
5477 	mach_vm_size_t ool_output_size = 0;
5478 	io_async_ref64_t _reference;
5479 
5480 	if (referenceCnt > ASYNC_REF64_COUNT) {
5481 		return kIOReturnBadArgument;
5482 	}
5483 	for (i = 0; i < referenceCnt; i++) {
5484 		_reference[i] = REF64(reference[i]);
5485 	}
5486 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5487 
5488 	return is_io_connect_async_method(connect,
5489 	           wake_port, _reference, referenceCnt,
5490 	           index,
5491 	           NULL, 0,
5492 	           input, inputCount,
5493 	           0, 0,
5494 	           output, outputCount,
5495 	           NULL, &scalar_outputCnt,
5496 	           0, &ool_output_size);
5497 }
5498 
5499 
5500 kern_return_t
shim_io_async_method_scalarI_scalarO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5501 shim_io_async_method_scalarI_scalarO(
5502 	IOExternalAsyncMethod * method,
5503 	IOService *             object,
5504 	mach_port_t             asyncWakePort,
5505 	io_user_reference_t *   asyncReference,
5506 	uint32_t                asyncReferenceCount,
5507 	const io_user_scalar_t * input,
5508 	mach_msg_type_number_t   inputCount,
5509 	io_user_scalar_t * output,
5510 	mach_msg_type_number_t * outputCount )
5511 {
5512 	IOAsyncMethod       func;
5513 	uint32_t            i;
5514 	io_scalar_inband_t  _output;
5515 	IOReturn            err;
5516 	io_async_ref_t      reference;
5517 
5518 	bzero(&_output[0], sizeof(_output));
5519 	for (i = 0; i < asyncReferenceCount; i++) {
5520 		reference[i] = REF32(asyncReference[i]);
5521 	}
5522 
5523 	err = kIOReturnBadArgument;
5524 
5525 	do {
5526 		if (inputCount != method->count0) {
5527 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5528 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5529 			continue;
5530 		}
5531 		if (*outputCount != method->count1) {
5532 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5533 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5534 			continue;
5535 		}
5536 
5537 		func = method->func;
5538 
5539 		switch (inputCount) {
5540 		case 6:
5541 			err = (object->*func)(  reference,
5542 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5543 			    ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5544 			break;
5545 		case 5:
5546 			err = (object->*func)(  reference,
5547 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5548 			    ARG32(input[3]), ARG32(input[4]),
5549 			    &_output[0] );
5550 			break;
5551 		case 4:
5552 			err = (object->*func)(  reference,
5553 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5554 			    ARG32(input[3]),
5555 			    &_output[0], &_output[1] );
5556 			break;
5557 		case 3:
5558 			err = (object->*func)(  reference,
5559 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5560 			    &_output[0], &_output[1], &_output[2] );
5561 			break;
5562 		case 2:
5563 			err = (object->*func)(  reference,
5564 			    ARG32(input[0]), ARG32(input[1]),
5565 			    &_output[0], &_output[1], &_output[2],
5566 			    &_output[3] );
5567 			break;
5568 		case 1:
5569 			err = (object->*func)(  reference,
5570 			    ARG32(input[0]),
5571 			    &_output[0], &_output[1], &_output[2],
5572 			    &_output[3], &_output[4] );
5573 			break;
5574 		case 0:
5575 			err = (object->*func)(  reference,
5576 			    &_output[0], &_output[1], &_output[2],
5577 			    &_output[3], &_output[4], &_output[5] );
5578 			break;
5579 
5580 		default:
5581 			IOLog("%s: Bad method table\n", object->getName());
5582 		}
5583 	}while (false);
5584 
5585 	for (i = 0; i < *outputCount; i++) {
5586 		output[i] = SCALAR32(_output[i]);
5587 	}
5588 
5589 	return err;
5590 }
5591 
5592 
5593 /* Routine io_connect_method_scalarI_structureO */
5594 kern_return_t
is_io_connect_method_scalarI_structureO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5595 is_io_connect_method_scalarI_structureO(
5596 	io_object_t     connect,
5597 	uint32_t        index,
5598 	io_scalar_inband_t input,
5599 	mach_msg_type_number_t  inputCount,
5600 	io_struct_inband_t              output,
5601 	mach_msg_type_number_t *        outputCount )
5602 {
5603 	uint32_t i;
5604 	io_scalar_inband64_t _input;
5605 
5606 	mach_msg_type_number_t scalar_outputCnt = 0;
5607 	mach_vm_size_t ool_output_size = 0;
5608 
5609 	for (i = 0; i < inputCount; i++) {
5610 		_input[i] = SCALAR64(input[i]);
5611 	}
5612 
5613 	return is_io_connect_method(connect, index,
5614 	           _input, inputCount,
5615 	           NULL, 0,
5616 	           0, 0,
5617 	           output, outputCount,
5618 	           NULL, &scalar_outputCnt,
5619 	           0, &ool_output_size);
5620 }
5621 
5622 kern_return_t
shim_io_connect_method_scalarI_structureO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5623 shim_io_connect_method_scalarI_structureO(
5624 
5625 	IOExternalMethod *      method,
5626 	IOService *             object,
5627 	const io_user_scalar_t * input,
5628 	mach_msg_type_number_t  inputCount,
5629 	io_struct_inband_t              output,
5630 	IOByteCount *   outputCount )
5631 {
5632 	IOMethod            func;
5633 	IOReturn            err;
5634 
5635 	err = kIOReturnBadArgument;
5636 
5637 	do {
5638 		if (inputCount != method->count0) {
5639 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5640 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5641 			continue;
5642 		}
5643 		if ((kIOUCVariableStructureSize != method->count1)
5644 		    && (*outputCount != method->count1)) {
5645 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5646 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5647 			continue;
5648 		}
5649 
5650 		func = method->func;
5651 
5652 		switch (inputCount) {
5653 		case 5:
5654 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5655 			    ARG32(input[3]), ARG32(input[4]),
5656 			    output );
5657 			break;
5658 		case 4:
5659 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5660 			    ARG32(input[3]),
5661 			    output, (void *)outputCount );
5662 			break;
5663 		case 3:
5664 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5665 			    output, (void *)outputCount, NULL );
5666 			break;
5667 		case 2:
5668 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]),
5669 			    output, (void *)outputCount, NULL, NULL );
5670 			break;
5671 		case 1:
5672 			err = (object->*func)(  ARG32(input[0]),
5673 			    output, (void *)outputCount, NULL, NULL, NULL );
5674 			break;
5675 		case 0:
5676 			err = (object->*func)(  output, (void *)outputCount, NULL, NULL, NULL, NULL );
5677 			break;
5678 
5679 		default:
5680 			IOLog("%s: Bad method table\n", object->getName());
5681 		}
5682 	}while (false);
5683 
5684 	return err;
5685 }
5686 
5687 
5688 kern_return_t
shim_io_async_method_scalarI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5689 shim_io_async_method_scalarI_structureO(
5690 	IOExternalAsyncMethod * method,
5691 	IOService *             object,
5692 	mach_port_t             asyncWakePort,
5693 	io_user_reference_t *   asyncReference,
5694 	uint32_t                asyncReferenceCount,
5695 	const io_user_scalar_t * input,
5696 	mach_msg_type_number_t  inputCount,
5697 	io_struct_inband_t              output,
5698 	mach_msg_type_number_t *        outputCount )
5699 {
5700 	IOAsyncMethod       func;
5701 	uint32_t            i;
5702 	IOReturn            err;
5703 	io_async_ref_t      reference;
5704 
5705 	for (i = 0; i < asyncReferenceCount; i++) {
5706 		reference[i] = REF32(asyncReference[i]);
5707 	}
5708 
5709 	err = kIOReturnBadArgument;
5710 	do {
5711 		if (inputCount != method->count0) {
5712 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5713 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5714 			continue;
5715 		}
5716 		if ((kIOUCVariableStructureSize != method->count1)
5717 		    && (*outputCount != method->count1)) {
5718 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5719 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5720 			continue;
5721 		}
5722 
5723 		func = method->func;
5724 
5725 		switch (inputCount) {
5726 		case 5:
5727 			err = (object->*func)(  reference,
5728 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5729 			    ARG32(input[3]), ARG32(input[4]),
5730 			    output );
5731 			break;
5732 		case 4:
5733 			err = (object->*func)(  reference,
5734 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5735 			    ARG32(input[3]),
5736 			    output, (void *)outputCount );
5737 			break;
5738 		case 3:
5739 			err = (object->*func)(  reference,
5740 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5741 			    output, (void *)outputCount, NULL );
5742 			break;
5743 		case 2:
5744 			err = (object->*func)(  reference,
5745 			    ARG32(input[0]), ARG32(input[1]),
5746 			    output, (void *)outputCount, NULL, NULL );
5747 			break;
5748 		case 1:
5749 			err = (object->*func)(  reference,
5750 			    ARG32(input[0]),
5751 			    output, (void *)outputCount, NULL, NULL, NULL );
5752 			break;
5753 		case 0:
5754 			err = (object->*func)(  reference,
5755 			    output, (void *)outputCount, NULL, NULL, NULL, NULL );
5756 			break;
5757 
5758 		default:
5759 			IOLog("%s: Bad method table\n", object->getName());
5760 		}
5761 	}while (false);
5762 
5763 	return err;
5764 }
5765 
5766 /* Routine io_connect_method_scalarI_structureI */
5767 kern_return_t
is_io_connect_method_scalarI_structureI(io_connect_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5768 is_io_connect_method_scalarI_structureI(
5769 	io_connect_t            connect,
5770 	uint32_t                index,
5771 	io_scalar_inband_t      input,
5772 	mach_msg_type_number_t  inputCount,
5773 	io_struct_inband_t      inputStruct,
5774 	mach_msg_type_number_t  inputStructCount )
5775 {
5776 	uint32_t i;
5777 	io_scalar_inband64_t _input;
5778 
5779 	mach_msg_type_number_t scalar_outputCnt = 0;
5780 	mach_msg_type_number_t inband_outputCnt = 0;
5781 	mach_vm_size_t ool_output_size = 0;
5782 
5783 	for (i = 0; i < inputCount; i++) {
5784 		_input[i] = SCALAR64(input[i]);
5785 	}
5786 
5787 	return is_io_connect_method(connect, index,
5788 	           _input, inputCount,
5789 	           inputStruct, inputStructCount,
5790 	           0, 0,
5791 	           NULL, &inband_outputCnt,
5792 	           NULL, &scalar_outputCnt,
5793 	           0, &ool_output_size);
5794 }
5795 
5796 kern_return_t
shim_io_connect_method_scalarI_structureI(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5797 shim_io_connect_method_scalarI_structureI(
5798 	IOExternalMethod *  method,
5799 	IOService *         object,
5800 	const io_user_scalar_t * input,
5801 	mach_msg_type_number_t  inputCount,
5802 	io_struct_inband_t              inputStruct,
5803 	mach_msg_type_number_t  inputStructCount )
5804 {
5805 	IOMethod            func;
5806 	IOReturn            err = kIOReturnBadArgument;
5807 
5808 	do{
5809 		if (inputCount != method->count0) {
5810 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5811 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5812 			continue;
5813 		}
5814 		if ((kIOUCVariableStructureSize != method->count1)
5815 		    && (inputStructCount != method->count1)) {
5816 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5817 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5818 			continue;
5819 		}
5820 
5821 		func = method->func;
5822 
5823 		switch (inputCount) {
5824 		case 5:
5825 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5826 			    ARG32(input[3]), ARG32(input[4]),
5827 			    inputStruct );
5828 			break;
5829 		case 4:
5830 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *)  input[2],
5831 			    ARG32(input[3]),
5832 			    inputStruct, (void *)(uintptr_t)inputStructCount );
5833 			break;
5834 		case 3:
5835 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5836 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5837 			    NULL );
5838 			break;
5839 		case 2:
5840 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5841 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5842 			    NULL, NULL );
5843 			break;
5844 		case 1:
5845 			err = (object->*func)( ARG32(input[0]),
5846 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5847 			    NULL, NULL, NULL );
5848 			break;
5849 		case 0:
5850 			err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5851 			    NULL, NULL, NULL, NULL );
5852 			break;
5853 
5854 		default:
5855 			IOLog("%s: Bad method table\n", object->getName());
5856 		}
5857 	}while (false);
5858 
5859 	return err;
5860 }
5861 
5862 kern_return_t
shim_io_async_method_scalarI_structureI(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5863 shim_io_async_method_scalarI_structureI(
5864 	IOExternalAsyncMethod * method,
5865 	IOService *             object,
5866 	mach_port_t             asyncWakePort,
5867 	io_user_reference_t *   asyncReference,
5868 	uint32_t                asyncReferenceCount,
5869 	const io_user_scalar_t * input,
5870 	mach_msg_type_number_t  inputCount,
5871 	io_struct_inband_t              inputStruct,
5872 	mach_msg_type_number_t  inputStructCount )
5873 {
5874 	IOAsyncMethod       func;
5875 	uint32_t            i;
5876 	IOReturn            err = kIOReturnBadArgument;
5877 	io_async_ref_t      reference;
5878 
5879 	for (i = 0; i < asyncReferenceCount; i++) {
5880 		reference[i] = REF32(asyncReference[i]);
5881 	}
5882 
5883 	do{
5884 		if (inputCount != method->count0) {
5885 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5886 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5887 			continue;
5888 		}
5889 		if ((kIOUCVariableStructureSize != method->count1)
5890 		    && (inputStructCount != method->count1)) {
5891 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5892 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5893 			continue;
5894 		}
5895 
5896 		func = method->func;
5897 
5898 		switch (inputCount) {
5899 		case 5:
5900 			err = (object->*func)(  reference,
5901 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5902 			    ARG32(input[3]), ARG32(input[4]),
5903 			    inputStruct );
5904 			break;
5905 		case 4:
5906 			err = (object->*func)(  reference,
5907 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5908 			    ARG32(input[3]),
5909 			    inputStruct, (void *)(uintptr_t)inputStructCount );
5910 			break;
5911 		case 3:
5912 			err = (object->*func)(  reference,
5913 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5914 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5915 			    NULL );
5916 			break;
5917 		case 2:
5918 			err = (object->*func)(  reference,
5919 			    ARG32(input[0]), ARG32(input[1]),
5920 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5921 			    NULL, NULL );
5922 			break;
5923 		case 1:
5924 			err = (object->*func)(  reference,
5925 			    ARG32(input[0]),
5926 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5927 			    NULL, NULL, NULL );
5928 			break;
5929 		case 0:
5930 			err = (object->*func)(  reference,
5931 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5932 			    NULL, NULL, NULL, NULL );
5933 			break;
5934 
5935 		default:
5936 			IOLog("%s: Bad method table\n", object->getName());
5937 		}
5938 	}while (false);
5939 
5940 	return err;
5941 }
5942 
5943 /* Routine io_connect_method_structureI_structureO */
5944 kern_return_t
is_io_connect_method_structureI_structureO(io_object_t connect,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5945 is_io_connect_method_structureI_structureO(
5946 	io_object_t     connect,
5947 	uint32_t        index,
5948 	io_struct_inband_t              input,
5949 	mach_msg_type_number_t  inputCount,
5950 	io_struct_inband_t              output,
5951 	mach_msg_type_number_t *        outputCount )
5952 {
5953 	mach_msg_type_number_t scalar_outputCnt = 0;
5954 	mach_vm_size_t ool_output_size = 0;
5955 
5956 	return is_io_connect_method(connect, index,
5957 	           NULL, 0,
5958 	           input, inputCount,
5959 	           0, 0,
5960 	           output, outputCount,
5961 	           NULL, &scalar_outputCnt,
5962 	           0, &ool_output_size);
5963 }
5964 
5965 kern_return_t
shim_io_connect_method_structureI_structureO(IOExternalMethod * method,IOService * object,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5966 shim_io_connect_method_structureI_structureO(
5967 	IOExternalMethod *  method,
5968 	IOService *         object,
5969 	io_struct_inband_t              input,
5970 	mach_msg_type_number_t  inputCount,
5971 	io_struct_inband_t              output,
5972 	IOByteCount *   outputCount )
5973 {
5974 	IOMethod            func;
5975 	IOReturn            err = kIOReturnBadArgument;
5976 
5977 	do{
5978 		if ((kIOUCVariableStructureSize != method->count0)
5979 		    && (inputCount != method->count0)) {
5980 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5981 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5982 			continue;
5983 		}
5984 		if ((kIOUCVariableStructureSize != method->count1)
5985 		    && (*outputCount != method->count1)) {
5986 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5987 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5988 			continue;
5989 		}
5990 
5991 		func = method->func;
5992 
5993 		if (method->count1) {
5994 			if (method->count0) {
5995 				err = (object->*func)( input, output,
5996 				    (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5997 			} else {
5998 				err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5999 			}
6000 		} else {
6001 			err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
6002 		}
6003 	}while (false);
6004 
6005 
6006 	return err;
6007 }
6008 
6009 kern_return_t
shim_io_async_method_structureI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)6010 shim_io_async_method_structureI_structureO(
6011 	IOExternalAsyncMethod * method,
6012 	IOService *             object,
6013 	mach_port_t           asyncWakePort,
6014 	io_user_reference_t * asyncReference,
6015 	uint32_t              asyncReferenceCount,
6016 	io_struct_inband_t              input,
6017 	mach_msg_type_number_t  inputCount,
6018 	io_struct_inband_t              output,
6019 	mach_msg_type_number_t *        outputCount )
6020 {
6021 	IOAsyncMethod       func;
6022 	uint32_t            i;
6023 	IOReturn            err;
6024 	io_async_ref_t      reference;
6025 
6026 	for (i = 0; i < asyncReferenceCount; i++) {
6027 		reference[i] = REF32(asyncReference[i]);
6028 	}
6029 
6030 	err = kIOReturnBadArgument;
6031 	do{
6032 		if ((kIOUCVariableStructureSize != method->count0)
6033 		    && (inputCount != method->count0)) {
6034 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
6035 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
6036 			continue;
6037 		}
6038 		if ((kIOUCVariableStructureSize != method->count1)
6039 		    && (*outputCount != method->count1)) {
6040 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
6041 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
6042 			continue;
6043 		}
6044 
6045 		func = method->func;
6046 
6047 		if (method->count1) {
6048 			if (method->count0) {
6049 				err = (object->*func)( reference,
6050 				    input, output,
6051 				    (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
6052 			} else {
6053 				err = (object->*func)( reference,
6054 				    output, outputCount, NULL, NULL, NULL, NULL );
6055 			}
6056 		} else {
6057 			err = (object->*func)( reference,
6058 			    input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
6059 		}
6060 	}while (false);
6061 
6062 	return err;
6063 }
6064 
6065 /* Routine io_catalog_send_data */
6066 kern_return_t
is_io_catalog_send_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t inData,mach_msg_type_number_t inDataCount,kern_return_t * result)6067 is_io_catalog_send_data(
6068 	mach_port_t             main_port,
6069 	uint32_t                flag,
6070 	io_buf_ptr_t            inData,
6071 	mach_msg_type_number_t  inDataCount,
6072 	kern_return_t *         result)
6073 {
6074 	// Allow sending catalog data if there is no kextd and the kernel is DEVELOPMENT || DEBUG
6075 #if NO_KEXTD && !(DEVELOPMENT || DEBUG)
6076 	return kIOReturnNotPrivileged;
6077 #else /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6078 	OSObject * obj = NULL;
6079 	vm_offset_t data;
6080 	kern_return_t kr = kIOReturnError;
6081 
6082 	//printf("io_catalog_send_data called. flag: %d\n", flag);
6083 
6084 	if (main_port != main_device_port) {
6085 		return kIOReturnNotPrivileged;
6086 	}
6087 
6088 	if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
6089 	    flag != kIOCatalogKextdActive &&
6090 	    flag != kIOCatalogKextdFinishedLaunching) &&
6091 	    (!inData || !inDataCount)) {
6092 		return kIOReturnBadArgument;
6093 	}
6094 
6095 	if (!IOCurrentTaskHasEntitlement(kIOCatalogManagementEntitlement)) {
6096 		OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
6097 		IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
6098 		OSSafeReleaseNULL(taskName);
6099 		// For now, fake success to not break applications relying on this function succeeding.
6100 		// See <rdar://problem/32554970> for more details.
6101 		return kIOReturnSuccess;
6102 	}
6103 
6104 	if (inData) {
6105 		vm_map_offset_t map_data;
6106 
6107 		if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
6108 			return kIOReturnMessageTooLarge;
6109 		}
6110 
6111 		kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
6112 		data = CAST_DOWN(vm_offset_t, map_data);
6113 
6114 		if (kr != KERN_SUCCESS) {
6115 			return kr;
6116 		}
6117 
6118 		// must return success after vm_map_copyout() succeeds
6119 
6120 		if (inDataCount) {
6121 			obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
6122 			vm_deallocate( kernel_map, data, inDataCount );
6123 			if (!obj) {
6124 				*result = kIOReturnNoMemory;
6125 				return KERN_SUCCESS;
6126 			}
6127 		}
6128 	}
6129 
6130 	switch (flag) {
6131 	case kIOCatalogResetDrivers:
6132 	case kIOCatalogResetDriversNoMatch: {
6133 		OSArray * array;
6134 
6135 		array = OSDynamicCast(OSArray, obj);
6136 		if (array) {
6137 			if (!gIOCatalogue->resetAndAddDrivers(array,
6138 			    flag == kIOCatalogResetDrivers)) {
6139 				kr = kIOReturnError;
6140 			}
6141 		} else {
6142 			kr = kIOReturnBadArgument;
6143 		}
6144 	}
6145 	break;
6146 
6147 	case kIOCatalogAddDrivers:
6148 	case kIOCatalogAddDriversNoMatch: {
6149 		OSArray * array;
6150 
6151 		array = OSDynamicCast(OSArray, obj);
6152 		if (array) {
6153 			if (!gIOCatalogue->addDrivers( array,
6154 			    flag == kIOCatalogAddDrivers)) {
6155 				kr = kIOReturnError;
6156 			}
6157 		} else {
6158 			kr = kIOReturnBadArgument;
6159 		}
6160 	}
6161 	break;
6162 
6163 	case kIOCatalogRemoveDrivers:
6164 	case kIOCatalogRemoveDriversNoMatch: {
6165 		OSDictionary * dict;
6166 
6167 		dict = OSDynamicCast(OSDictionary, obj);
6168 		if (dict) {
6169 			if (!gIOCatalogue->removeDrivers( dict,
6170 			    flag == kIOCatalogRemoveDrivers )) {
6171 				kr = kIOReturnError;
6172 			}
6173 		} else {
6174 			kr = kIOReturnBadArgument;
6175 		}
6176 	}
6177 	break;
6178 
6179 	case kIOCatalogStartMatching__Removed:
6180 	case kIOCatalogRemoveKernelLinker__Removed:
6181 	case kIOCatalogKextdActive:
6182 	case kIOCatalogKextdFinishedLaunching:
6183 		kr = KERN_NOT_SUPPORTED;
6184 		break;
6185 
6186 	default:
6187 		kr = kIOReturnBadArgument;
6188 		break;
6189 	}
6190 
6191 	if (obj) {
6192 		obj->release();
6193 	}
6194 
6195 	*result = kr;
6196 	return KERN_SUCCESS;
6197 #endif /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6198 }
6199 
6200 /* Routine io_catalog_terminate */
6201 kern_return_t
is_io_catalog_terminate(mach_port_t main_port,uint32_t flag,io_name_t name)6202 is_io_catalog_terminate(
6203 	mach_port_t main_port,
6204 	uint32_t flag,
6205 	io_name_t name )
6206 {
6207 	kern_return_t          kr;
6208 
6209 	if (main_port != main_device_port) {
6210 		return kIOReturnNotPrivileged;
6211 	}
6212 
6213 	kr = IOUserClient::clientHasPrivilege((void *) current_task(),
6214 	    kIOClientPrivilegeAdministrator );
6215 	if (kIOReturnSuccess != kr) {
6216 		return kr;
6217 	}
6218 
6219 	switch (flag) {
6220 #if !defined(SECURE_KERNEL)
6221 	case kIOCatalogServiceTerminate:
6222 		kr = gIOCatalogue->terminateDrivers(NULL, name, false);
6223 		break;
6224 
6225 	case kIOCatalogModuleUnload:
6226 	case kIOCatalogModuleTerminate:
6227 		kr = gIOCatalogue->terminateDriversForModule(name,
6228 		    flag == kIOCatalogModuleUnload);
6229 		break;
6230 #endif
6231 
6232 	default:
6233 		kr = kIOReturnBadArgument;
6234 		break;
6235 	}
6236 
6237 	return kr;
6238 }
6239 
6240 /* Routine io_catalog_get_data */
6241 kern_return_t
is_io_catalog_get_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t * outData,mach_msg_type_number_t * outDataCount)6242 is_io_catalog_get_data(
6243 	mach_port_t             main_port,
6244 	uint32_t                flag,
6245 	io_buf_ptr_t            *outData,
6246 	mach_msg_type_number_t  *outDataCount)
6247 {
6248 	kern_return_t kr = kIOReturnSuccess;
6249 	OSSerialize * s;
6250 
6251 	if (main_port != main_device_port) {
6252 		return kIOReturnNotPrivileged;
6253 	}
6254 
6255 	//printf("io_catalog_get_data called. flag: %d\n", flag);
6256 
6257 	s = OSSerialize::withCapacity(4096);
6258 	if (!s) {
6259 		return kIOReturnNoMemory;
6260 	}
6261 
6262 	kr = gIOCatalogue->serializeData(flag, s);
6263 
6264 	if (kr == kIOReturnSuccess) {
6265 		mach_vm_address_t data;
6266 		vm_map_copy_t copy;
6267 		unsigned int size;
6268 
6269 		size = s->getLength();
6270 		kr = mach_vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
6271 		if (kr == kIOReturnSuccess) {
6272 			bcopy(s->text(), (void *)data, size);
6273 			kr = vm_map_copyin(kernel_map, data, size, true, &copy);
6274 			*outData = (char *)copy;
6275 			*outDataCount = size;
6276 		}
6277 	}
6278 
6279 	s->release();
6280 
6281 	return kr;
6282 }
6283 
6284 /* Routine io_catalog_get_gen_count */
6285 kern_return_t
is_io_catalog_get_gen_count(mach_port_t main_port,uint32_t * genCount)6286 is_io_catalog_get_gen_count(
6287 	mach_port_t             main_port,
6288 	uint32_t                *genCount)
6289 {
6290 	if (main_port != main_device_port) {
6291 		return kIOReturnNotPrivileged;
6292 	}
6293 
6294 	//printf("io_catalog_get_gen_count called.\n");
6295 
6296 	if (!genCount) {
6297 		return kIOReturnBadArgument;
6298 	}
6299 
6300 	*genCount = gIOCatalogue->getGenerationCount();
6301 
6302 	return kIOReturnSuccess;
6303 }
6304 
6305 /* Routine io_catalog_module_loaded.
6306  * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
6307  */
6308 kern_return_t
is_io_catalog_module_loaded(mach_port_t main_port,io_name_t name)6309 is_io_catalog_module_loaded(
6310 	mach_port_t             main_port,
6311 	io_name_t               name)
6312 {
6313 	if (main_port != main_device_port) {
6314 		return kIOReturnNotPrivileged;
6315 	}
6316 
6317 	//printf("io_catalog_module_loaded called. name %s\n", name);
6318 
6319 	if (!name) {
6320 		return kIOReturnBadArgument;
6321 	}
6322 
6323 	gIOCatalogue->moduleHasLoaded(name);
6324 
6325 	return kIOReturnSuccess;
6326 }
6327 
6328 kern_return_t
is_io_catalog_reset(mach_port_t main_port,uint32_t flag)6329 is_io_catalog_reset(
6330 	mach_port_t             main_port,
6331 	uint32_t                flag)
6332 {
6333 	if (main_port != main_device_port) {
6334 		return kIOReturnNotPrivileged;
6335 	}
6336 
6337 	switch (flag) {
6338 	case kIOCatalogResetDefault:
6339 		gIOCatalogue->reset();
6340 		break;
6341 
6342 	default:
6343 		return kIOReturnBadArgument;
6344 	}
6345 
6346 	return kIOReturnSuccess;
6347 }
6348 
6349 kern_return_t
iokit_user_client_trap(struct iokit_user_client_trap_args * args)6350 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
6351 {
6352 	kern_return_t    result = kIOReturnBadArgument;
6353 	IOUserClient   * userClient;
6354 	OSObject       * object;
6355 	uintptr_t        ref;
6356 	mach_port_name_t portName;
6357 
6358 	ref     = (uintptr_t) args->userClientRef;
6359 
6360 	if ((ref == MACH_PORT_DEAD) || (ref == (uintptr_t) MACH_PORT_NULL)) {
6361 		return kIOReturnBadArgument;
6362 	}
6363 	// kobject port names always have b0-1 set, so we use these bits as flags to
6364 	// iokit_user_client_trap()
6365 	// keep this up to date with ipc_entry_name_mask();
6366 	portName = (mach_port_name_t) (ref | 3);
6367 	if (((1ULL << 32) & ref) || !(1 & ref)) {
6368 		object = iokit_lookup_uext_ref_current_task(portName);
6369 		if (object) {
6370 			result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6371 		}
6372 		OSSafeReleaseNULL(object);
6373 	} else {
6374 		io_object_t ref_current_task = iokit_lookup_connect_ref_current_task((mach_port_name_t) ref);
6375 		if ((userClient = OSDynamicCast(IOUserClient, ref_current_task))) {
6376 			IOExternalTrap *trap = NULL;
6377 			IOService *target = NULL;
6378 
6379 			result = kIOReturnSuccess;
6380 			io_filter_policy_t filterPolicy = userClient->filterForTask(current_task(), 0);
6381 			if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
6382 				result = gIOUCFilterCallbacks->io_filter_applier(userClient, filterPolicy, io_filter_type_trap, args->index);
6383 			}
6384 			if (kIOReturnSuccess == result) {
6385 				trap = userClient->getTargetAndTrapForIndex(&target, args->index);
6386 			}
6387 			if (trap && target) {
6388 				IOTrap func;
6389 
6390 				func = trap->func;
6391 
6392 				if (func) {
6393 					result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6394 				}
6395 			}
6396 
6397 			iokit_remove_connect_reference(userClient);
6398 		} else {
6399 			OSSafeReleaseNULL(ref_current_task);
6400 		}
6401 	}
6402 
6403 	return result;
6404 }
6405 
6406 /* Routine io_device_tree_entry_exists_with_name */
6407 kern_return_t
is_io_device_tree_entry_exists_with_name(mach_port_t main_port,io_name_t name,boolean_t * exists)6408 is_io_device_tree_entry_exists_with_name(
6409 	mach_port_t main_port,
6410 	io_name_t name,
6411 	boolean_t *exists )
6412 {
6413 	OSCollectionIterator *iter;
6414 
6415 	if (main_port != main_device_port) {
6416 		return kIOReturnNotPrivileged;
6417 	}
6418 
6419 	iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
6420 	*exists = iter && iter->getNextObject();
6421 	OSSafeReleaseNULL(iter);
6422 
6423 	return kIOReturnSuccess;
6424 }
6425 } /* extern "C" */
6426 
6427 IOReturn
callExternalMethod(uint32_t selector,IOExternalMethodArguments * args)6428 IOUserClient::callExternalMethod(uint32_t selector, IOExternalMethodArguments * args)
6429 {
6430 	IOReturn ret;
6431 
6432 	ipcEnter(defaultLocking ? (defaultLockingSingleThreadExternalMethod ? kIPCLockWrite : kIPCLockRead) : kIPCLockNone);
6433 	if (uc2022) {
6434 		ret = ((IOUserClient2022 *) this)->externalMethod(selector, (IOExternalMethodArgumentsOpaque *) args);
6435 	} else {
6436 		ret = externalMethod(selector, args);
6437 	}
6438 	ipcExit(defaultLocking ? (defaultLockingSingleThreadExternalMethod ? kIPCLockWrite : kIPCLockRead) : kIPCLockNone);
6439 
6440 	return ret;
6441 }
6442 
6443 MIG_SERVER_ROUTINE IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * arguments,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6444 IOUserClient2022::externalMethod(uint32_t selector, IOExternalMethodArguments * arguments,
6445     IOExternalMethodDispatch *dispatch,
6446     OSObject *target, void *reference)
6447 {
6448 	panic("wrong externalMethod for IOUserClient2022");
6449 }
6450 
6451 IOReturn
dispatchExternalMethod(uint32_t selector,IOExternalMethodArgumentsOpaque * arguments,const IOExternalMethodDispatch2022 dispatchArray[],size_t dispatchArrayCount,OSObject * target,void * reference)6452 IOUserClient2022::dispatchExternalMethod(uint32_t selector, IOExternalMethodArgumentsOpaque *arguments,
6453     const IOExternalMethodDispatch2022 dispatchArray[], size_t dispatchArrayCount,
6454     OSObject * target, void * reference)
6455 {
6456 	IOReturn    err;
6457 	IOExternalMethodArguments * args = (typeof(args))arguments;
6458 	const IOExternalMethodDispatch2022 * dispatch;
6459 
6460 	if (!dispatchArray) {
6461 		return kIOReturnError;
6462 	}
6463 	if (selector >= dispatchArrayCount) {
6464 		return kIOReturnBadArgument;
6465 	}
6466 	dispatch = &dispatchArray[selector];
6467 
6468 	uint32_t count;
6469 	count = dispatch->checkScalarInputCount;
6470 	if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6471 		return kIOReturnBadArgument;
6472 	}
6473 
6474 	count = dispatch->checkStructureInputSize;
6475 	if ((kIOUCVariableStructureSize != count)
6476 	    && (count != ((args->structureInputDescriptor)
6477 	    ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6478 		return kIOReturnBadArgument;
6479 	}
6480 
6481 	count = dispatch->checkScalarOutputCount;
6482 	if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6483 		return kIOReturnBadArgument;
6484 	}
6485 
6486 	count = dispatch->checkStructureOutputSize;
6487 	if ((kIOUCVariableStructureSize != count)
6488 	    && (count != ((args->structureOutputDescriptor)
6489 	    ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6490 		return kIOReturnBadArgument;
6491 	}
6492 
6493 	if (args->asyncWakePort && !dispatch->allowAsync) {
6494 		return kIOReturnBadArgument;
6495 	}
6496 
6497 	if (dispatch->checkEntitlement) {
6498 		if (!IOCurrentTaskHasEntitlement(dispatch->checkEntitlement)) {
6499 			return kIOReturnNotPrivileged;
6500 		}
6501 	}
6502 
6503 	if (dispatch->function) {
6504 		err = (*dispatch->function)(target, reference, args);
6505 	} else {
6506 		err = kIOReturnNoCompletion; /* implementer can dispatch */
6507 	}
6508 	return err;
6509 }
6510 
6511 IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * args,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6512 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
6513     IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
6514 {
6515 	IOReturn    err;
6516 	IOService * object;
6517 	IOByteCount structureOutputSize;
6518 
6519 	if (dispatch) {
6520 		uint32_t count;
6521 		count = dispatch->checkScalarInputCount;
6522 		if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6523 			return kIOReturnBadArgument;
6524 		}
6525 
6526 		count = dispatch->checkStructureInputSize;
6527 		if ((kIOUCVariableStructureSize != count)
6528 		    && (count != ((args->structureInputDescriptor)
6529 		    ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6530 			return kIOReturnBadArgument;
6531 		}
6532 
6533 		count = dispatch->checkScalarOutputCount;
6534 		if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6535 			return kIOReturnBadArgument;
6536 		}
6537 
6538 		count = dispatch->checkStructureOutputSize;
6539 		if ((kIOUCVariableStructureSize != count)
6540 		    && (count != ((args->structureOutputDescriptor)
6541 		    ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6542 			return kIOReturnBadArgument;
6543 		}
6544 
6545 		if (dispatch->function) {
6546 			err = (*dispatch->function)(target, reference, args);
6547 		} else {
6548 			err = kIOReturnNoCompletion; /* implementer can dispatch */
6549 		}
6550 		return err;
6551 	}
6552 
6553 
6554 	// pre-Leopard API's don't do ool structs
6555 	if (args->structureInputDescriptor || args->structureOutputDescriptor) {
6556 		err = kIOReturnIPCError;
6557 		return err;
6558 	}
6559 
6560 	structureOutputSize = args->structureOutputSize;
6561 
6562 	if (args->asyncWakePort) {
6563 		IOExternalAsyncMethod * method;
6564 		object = NULL;
6565 		if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
6566 			return kIOReturnUnsupported;
6567 		}
6568 
6569 		if (kIOUCForegroundOnly & method->flags) {
6570 			if (task_is_gpu_denied(current_task())) {
6571 				return kIOReturnNotPermitted;
6572 			}
6573 		}
6574 
6575 		switch (method->flags & kIOUCTypeMask) {
6576 		case kIOUCScalarIStructI:
6577 			err = shim_io_async_method_scalarI_structureI( method, object,
6578 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6579 			    args->scalarInput, args->scalarInputCount,
6580 			    (char *)args->structureInput, args->structureInputSize );
6581 			break;
6582 
6583 		case kIOUCScalarIScalarO:
6584 			err = shim_io_async_method_scalarI_scalarO( method, object,
6585 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6586 			    args->scalarInput, args->scalarInputCount,
6587 			    args->scalarOutput, &args->scalarOutputCount );
6588 			break;
6589 
6590 		case kIOUCScalarIStructO:
6591 			err = shim_io_async_method_scalarI_structureO( method, object,
6592 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6593 			    args->scalarInput, args->scalarInputCount,
6594 			    (char *) args->structureOutput, &args->structureOutputSize );
6595 			break;
6596 
6597 
6598 		case kIOUCStructIStructO:
6599 			err = shim_io_async_method_structureI_structureO( method, object,
6600 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6601 			    (char *)args->structureInput, args->structureInputSize,
6602 			    (char *) args->structureOutput, &args->structureOutputSize );
6603 			break;
6604 
6605 		default:
6606 			err = kIOReturnBadArgument;
6607 			break;
6608 		}
6609 	} else {
6610 		IOExternalMethod *      method;
6611 		object = NULL;
6612 		if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
6613 			return kIOReturnUnsupported;
6614 		}
6615 
6616 		if (kIOUCForegroundOnly & method->flags) {
6617 			if (task_is_gpu_denied(current_task())) {
6618 				return kIOReturnNotPermitted;
6619 			}
6620 		}
6621 
6622 		switch (method->flags & kIOUCTypeMask) {
6623 		case kIOUCScalarIStructI:
6624 			err = shim_io_connect_method_scalarI_structureI( method, object,
6625 			    args->scalarInput, args->scalarInputCount,
6626 			    (char *) args->structureInput, args->structureInputSize );
6627 			break;
6628 
6629 		case kIOUCScalarIScalarO:
6630 			err = shim_io_connect_method_scalarI_scalarO( method, object,
6631 			    args->scalarInput, args->scalarInputCount,
6632 			    args->scalarOutput, &args->scalarOutputCount );
6633 			break;
6634 
6635 		case kIOUCScalarIStructO:
6636 			err = shim_io_connect_method_scalarI_structureO( method, object,
6637 			    args->scalarInput, args->scalarInputCount,
6638 			    (char *) args->structureOutput, &structureOutputSize );
6639 			break;
6640 
6641 
6642 		case kIOUCStructIStructO:
6643 			err = shim_io_connect_method_structureI_structureO( method, object,
6644 			    (char *) args->structureInput, args->structureInputSize,
6645 			    (char *) args->structureOutput, &structureOutputSize );
6646 			break;
6647 
6648 		default:
6649 			err = kIOReturnBadArgument;
6650 			break;
6651 		}
6652 	}
6653 
6654 	if (structureOutputSize > UINT_MAX) {
6655 		structureOutputSize = 0;
6656 		err = kIOReturnBadArgument;
6657 	}
6658 
6659 	args->structureOutputSize = ((typeof(args->structureOutputSize))structureOutputSize);
6660 
6661 	return err;
6662 }
6663 
6664 IOReturn
registerFilterCallbacks(const struct io_filter_callbacks * callbacks,size_t size)6665 IOUserClient::registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size)
6666 {
6667 	if (size < sizeof(*callbacks)) {
6668 		return kIOReturnBadArgument;
6669 	}
6670 	if (!OSCompareAndSwapPtr(NULL, __DECONST(void *, callbacks), &gIOUCFilterCallbacks)) {
6671 		return kIOReturnBusy;
6672 	}
6673 	return kIOReturnSuccess;
6674 }
6675 
6676 
6677 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6678 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6679 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6680 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6681 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6682 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6683 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6684 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6685 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6686 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6687 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6688 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6689 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6690 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6691 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6692 OSMetaClassDefineReservedUnused(IOUserClient, 15);
6693 
6694 OSMetaClassDefineReservedUnused(IOUserClient2022, 0);
6695 OSMetaClassDefineReservedUnused(IOUserClient2022, 1);
6696 OSMetaClassDefineReservedUnused(IOUserClient2022, 2);
6697 OSMetaClassDefineReservedUnused(IOUserClient2022, 3);
6698