xref: /xnu-11417.101.15/iokit/Kernel/IOUserClient.cpp (revision e3723e1f17661b24996789d8afc084c0c3303b26)
1 /*
2  * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <libkern/c++/OSKext.h>
30 #include <libkern/c++/OSSharedPtr.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51 #include <sys/code_signing.h>
52 #include <vm/vm_kern_xnu.h>
53 
54 #include <mach/sdt.h>
55 #include <os/hash.h>
56 
57 #include <libkern/amfi/amfi.h>
58 
59 #if CONFIG_MACF
60 
61 extern "C" {
62 #include <security/mac_framework.h>
63 };
64 #include <sys/kauth.h>
65 
66 #define IOMACF_LOG 0
67 
68 #endif /* CONFIG_MACF */
69 
70 #include <IOKit/assert.h>
71 
72 #include "IOServicePrivate.h"
73 #include "IOKitKernelInternal.h"
74 
75 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
76 #define SCALAR32(x) ((uint32_t )x)
77 #define ARG32(x)    ((void *)(uintptr_t)SCALAR32(x))
78 #define REF64(x)    ((io_user_reference_t)((UInt64)(x)))
79 #define REF32(x)    ((int)(x))
80 
81 enum{
82 	kIOUCAsync0Flags          = 3ULL,
83 	kIOUCAsync64Flag          = 1ULL,
84 	kIOUCAsyncErrorLoggedFlag = 2ULL
85 };
86 
87 #if IOKITSTATS
88 
89 #define IOStatisticsRegisterCounter() \
90 do { \
91 	reserved->counter = IOStatistics::registerUserClient(this); \
92 } while (0)
93 
94 #define IOStatisticsUnregisterCounter() \
95 do { \
96 	if (reserved) \
97 	        IOStatistics::unregisterUserClient(reserved->counter); \
98 } while (0)
99 
100 #define IOStatisticsClientCall() \
101 do { \
102 	IOStatistics::countUserClientCall(client); \
103 } while (0)
104 
105 #else
106 
107 #define IOStatisticsRegisterCounter()
108 #define IOStatisticsUnregisterCounter()
109 #define IOStatisticsClientCall()
110 
111 #endif /* IOKITSTATS */
112 
113 #if DEVELOPMENT || DEBUG
114 
115 #define FAKE_STACK_FRAME(a)                                             \
116 	const void ** __frameptr;                                       \
117 	const void  * __retaddr;                                        \
118 	__frameptr = (typeof(__frameptr)) __builtin_frame_address(0);   \
119 	__retaddr = __frameptr[1];                                      \
120 	__frameptr[1] = (a);
121 
122 #define FAKE_STACK_FRAME_END()                                          \
123 	__frameptr[1] = __retaddr;
124 
125 #else /* DEVELOPMENT || DEBUG */
126 
127 #define FAKE_STACK_FRAME(a)
128 #define FAKE_STACK_FRAME_END()
129 
130 #endif /* DEVELOPMENT || DEBUG */
131 
132 #define ASYNC_REF_COUNT         (sizeof(io_async_ref_t) / sizeof(natural_t))
133 #define ASYNC_REF64_COUNT       (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
134 
135 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
136 
137 extern "C" {
138 #include <mach/mach_traps.h>
139 #include <vm/vm_map_xnu.h>
140 } /* extern "C" */
141 
142 struct IOMachPortHashList;
143 
144 static_assert(IKOT_MAX_TYPE <= 255);
145 
146 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
147 
148 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
149 class IOMachPort : public OSObject
150 {
151 	OSDeclareDefaultStructors(IOMachPort);
152 public:
153 	mach_port_mscount_t mscount;
154 	IOLock      lock;
155 	SLIST_ENTRY(IOMachPort) link;
156 	ipc_port_t  port;
157 	OSObject*   XNU_PTRAUTH_SIGNED_PTR("IOMachPort.object") object;
158 
159 	static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
160 
161 	static IOMachPortHashList* bucketForObject(OSObject *obj,
162 	    ipc_kobject_type_t type);
163 
164 	static LIBKERN_RETURNS_NOT_RETAINED IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
165 
166 	static bool noMoreSendersForObject( OSObject * obj,
167 	    ipc_kobject_type_t type, mach_port_mscount_t * mscount );
168 	static void releasePortForObject( OSObject * obj,
169 	    ipc_kobject_type_t type );
170 
171 	static mach_port_name_t makeSendRightForTask( task_t task,
172 	    io_object_t obj, ipc_kobject_type_t type );
173 
174 	virtual void free() APPLE_KEXT_OVERRIDE;
175 };
176 
177 #define super OSObject
178 OSDefineMetaClassAndStructorsWithZone(IOMachPort, OSObject, ZC_ZFREE_CLEARMEM)
179 
180 static IOLock *         gIOObjectPortLock;
181 IOLock *                gIOUserServerLock;
182 
183 SECURITY_READ_ONLY_LATE(const struct io_filter_callbacks *) gIOUCFilterCallbacks;
184 
185 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
186 
187 SLIST_HEAD(IOMachPortHashList, IOMachPort);
188 
189 #if defined(XNU_TARGET_OS_OSX)
190 #define PORT_HASH_SIZE 4096
191 #else /* defined(!XNU_TARGET_OS_OSX) */
192 #define PORT_HASH_SIZE 256
193 #endif /* !defined(!XNU_TARGET_OS_OSX) */
194 
195 IOMachPortHashList gIOMachPortHash[PORT_HASH_SIZE];
196 
197 void
IOMachPortInitialize(void)198 IOMachPortInitialize(void)
199 {
200 	for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
201 		SLIST_INIT(&gIOMachPortHash[i]);
202 	}
203 }
204 
205 IOMachPortHashList*
bucketForObject(OSObject * obj,ipc_kobject_type_t type)206 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
207 {
208 	return &gIOMachPortHash[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
209 }
210 
211 IOMachPort*
portForObjectInBucket(IOMachPortHashList * bucket,OSObject * obj,ipc_kobject_type_t type)212 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
213 {
214 	IOMachPort *machPort;
215 
216 	SLIST_FOREACH(machPort, bucket, link) {
217 		if (machPort->object == obj && iokit_port_type(machPort->port) == type) {
218 			return machPort;
219 		}
220 	}
221 	return NULL;
222 }
223 
224 IOMachPort*
withObjectAndType(OSObject * obj,ipc_kobject_type_t type)225 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
226 {
227 	IOMachPort *machPort = NULL;
228 
229 	machPort = new IOMachPort;
230 	if (__improbable(machPort && !machPort->init())) {
231 		OSSafeReleaseNULL(machPort);
232 		return NULL;
233 	}
234 
235 	machPort->object = obj;
236 	machPort->port = iokit_alloc_object_port(machPort, type);
237 	IOLockInlineInit(&machPort->lock);
238 
239 	obj->taggedRetain(OSTypeID(OSCollection));
240 	machPort->mscount++;
241 
242 	return machPort;
243 }
244 
245 bool
noMoreSendersForObject(OSObject * obj,ipc_kobject_type_t type,mach_port_mscount_t * mscount)246 IOMachPort::noMoreSendersForObject( OSObject * obj,
247     ipc_kobject_type_t type, mach_port_mscount_t * mscount )
248 {
249 	IOMachPort *machPort = NULL;
250 	IOUserClient *uc;
251 	OSAction *action;
252 	bool destroyed = true;
253 
254 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
255 
256 	obj->retain();
257 
258 	lck_mtx_lock(gIOObjectPortLock);
259 
260 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
261 
262 	if (machPort) {
263 		destroyed = (machPort->mscount <= *mscount);
264 		if (!destroyed) {
265 			*mscount = machPort->mscount;
266 			lck_mtx_unlock(gIOObjectPortLock);
267 		} else {
268 			if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
269 				uc->noMoreSenders();
270 			}
271 			SLIST_REMOVE(bucket, machPort, IOMachPort, link);
272 
273 			IOLockLock(&machPort->lock);
274 			iokit_remove_object_port(machPort->port, type);
275 			machPort->object = NULL;
276 			IOLockUnlock(&machPort->lock);
277 
278 			lck_mtx_unlock(gIOObjectPortLock);
279 
280 			OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
281 
282 			obj->taggedRelease(OSTypeID(OSCollection));
283 		}
284 	} else {
285 		lck_mtx_unlock(gIOObjectPortLock);
286 	}
287 
288 	if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
289 		action->Aborted();
290 	}
291 
292 	if (IKOT_UEXT_OBJECT == type && IOUserServer::shouldLeakObjects()) {
293 		// Leak object
294 		obj->retain();
295 	}
296 
297 	obj->release();
298 
299 	return destroyed;
300 }
301 
302 void
releasePortForObject(OSObject * obj,ipc_kobject_type_t type)303 IOMachPort::releasePortForObject( OSObject * obj,
304     ipc_kobject_type_t type )
305 {
306 	IOMachPort *machPort;
307 	IOService  *service;
308 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
309 
310 	assert(IKOT_IOKIT_CONNECT != type);
311 
312 	lck_mtx_lock(gIOObjectPortLock);
313 
314 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
315 
316 	if (machPort
317 	    && (type == IKOT_IOKIT_OBJECT)
318 	    && (service = OSDynamicCast(IOService, obj))
319 	    && !service->machPortHoldDestroy()) {
320 		obj->retain();
321 		SLIST_REMOVE(bucket, machPort, IOMachPort, link);
322 
323 		IOLockLock(&machPort->lock);
324 		iokit_remove_object_port(machPort->port, type);
325 		machPort->object = NULL;
326 		IOLockUnlock(&machPort->lock);
327 
328 		lck_mtx_unlock(gIOObjectPortLock);
329 
330 		OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
331 
332 		obj->taggedRelease(OSTypeID(OSCollection));
333 		obj->release();
334 	} else {
335 		lck_mtx_unlock(gIOObjectPortLock);
336 	}
337 }
338 
339 void
destroyUserReferences(OSObject * obj)340 IOUserClient::destroyUserReferences( OSObject * obj )
341 {
342 	IOMachPort *machPort;
343 	bool        destroyPort;
344 
345 	IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
346 
347 	// panther, 3160200
348 	// IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
349 
350 	obj->retain();
351 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
352 	IOMachPortHashList *mappingBucket = NULL;
353 
354 	lck_mtx_lock(gIOObjectPortLock);
355 
356 	IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
357 	if (uc && uc->mappings) {
358 		mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
359 	}
360 
361 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
362 
363 	if (machPort == NULL) {
364 		lck_mtx_unlock(gIOObjectPortLock);
365 		goto end;
366 	}
367 
368 	SLIST_REMOVE(bucket, machPort, IOMachPort, link);
369 	obj->taggedRelease(OSTypeID(OSCollection));
370 
371 	destroyPort = true;
372 	if (uc) {
373 		uc->noMoreSenders();
374 		if (uc->mappings) {
375 			uc->mappings->taggedRetain(OSTypeID(OSCollection));
376 			SLIST_INSERT_HEAD(mappingBucket, machPort, link);
377 
378 			IOLockLock(&machPort->lock);
379 			machPort->object = uc->mappings;
380 			IOLockUnlock(&machPort->lock);
381 
382 			lck_mtx_unlock(gIOObjectPortLock);
383 
384 			OSSafeReleaseNULL(uc->mappings);
385 			destroyPort = false;
386 		}
387 	}
388 
389 	if (destroyPort) {
390 		IOLockLock(&machPort->lock);
391 		iokit_remove_object_port(machPort->port, IKOT_IOKIT_CONNECT);
392 		machPort->object = NULL;
393 		IOLockUnlock(&machPort->lock);
394 
395 		lck_mtx_unlock(gIOObjectPortLock);
396 		OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
397 	}
398 
399 end:
400 	OSSafeReleaseNULL(obj);
401 }
402 
403 mach_port_name_t
makeSendRightForTask(task_t task,io_object_t obj,ipc_kobject_type_t type)404 IOMachPort::makeSendRightForTask( task_t task,
405     io_object_t obj, ipc_kobject_type_t type )
406 {
407 	return iokit_make_send_right( task, obj, type );
408 }
409 
410 void
free(void)411 IOMachPort::free( void )
412 {
413 	if (port) {
414 		iokit_destroy_object_port(port, iokit_port_type(port));
415 	}
416 	IOLockInlineDestroy(&lock);
417 	super::free();
418 }
419 
420 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
421 
422 static bool
IOTaskRegistryCompatibility(task_t task)423 IOTaskRegistryCompatibility(task_t task)
424 {
425 	return false;
426 }
427 
428 static void
IOTaskRegistryCompatibilityMatching(task_t task,OSDictionary * matching)429 IOTaskRegistryCompatibilityMatching(task_t task, OSDictionary * matching)
430 {
431 	matching->setObject(gIOServiceNotificationUserKey, kOSBooleanTrue);
432 	if (!IOTaskRegistryCompatibility(task)) {
433 		return;
434 	}
435 	matching->setObject(gIOCompatibilityMatchKey, kOSBooleanTrue);
436 }
437 
438 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
439 
OSDefineMetaClassAndStructors(IOUserIterator,OSIterator)440 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
441 
442 IOUserIterator *
443 IOUserIterator::withIterator(OSIterator * iter)
444 {
445 	IOUserIterator * me;
446 
447 	if (!iter) {
448 		return NULL;
449 	}
450 
451 	me = new IOUserIterator;
452 	if (me && !me->init()) {
453 		me->release();
454 		me = NULL;
455 	}
456 	if (!me) {
457 		iter->release();
458 		return me;
459 	}
460 	me->userIteratorObject = iter;
461 
462 	return me;
463 }
464 
465 bool
init(void)466 IOUserIterator::init( void )
467 {
468 	if (!OSObject::init()) {
469 		return false;
470 	}
471 
472 	IOLockInlineInit(&lock);
473 	return true;
474 }
475 
476 void
free()477 IOUserIterator::free()
478 {
479 	if (userIteratorObject) {
480 		userIteratorObject->release();
481 	}
482 	IOLockInlineDestroy(&lock);
483 	OSObject::free();
484 }
485 
486 void
reset()487 IOUserIterator::reset()
488 {
489 	IOLockLock(&lock);
490 	assert(OSDynamicCast(OSIterator, userIteratorObject));
491 	((OSIterator *)userIteratorObject)->reset();
492 	IOLockUnlock(&lock);
493 }
494 
495 bool
isValid()496 IOUserIterator::isValid()
497 {
498 	bool ret;
499 
500 	IOLockLock(&lock);
501 	assert(OSDynamicCast(OSIterator, userIteratorObject));
502 	ret = ((OSIterator *)userIteratorObject)->isValid();
503 	IOLockUnlock(&lock);
504 
505 	return ret;
506 }
507 
508 OSObject *
getNextObject()509 IOUserIterator::getNextObject()
510 {
511 	assert(false);
512 	return NULL;
513 }
514 
515 OSObject *
copyNextObject()516 IOUserIterator::copyNextObject()
517 {
518 	OSObject * ret = NULL;
519 
520 	IOLockLock(&lock);
521 	if (userIteratorObject) {
522 		ret = ((OSIterator *)userIteratorObject)->getNextObject();
523 		if (ret) {
524 			ret->retain();
525 		}
526 	}
527 	IOLockUnlock(&lock);
528 
529 	return ret;
530 }
531 
532 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
533 extern "C" {
534 // functions called from osfmk/device/iokit_rpc.c
535 
536 void
iokit_port_object_description(io_object_t obj,kobject_description_t desc)537 iokit_port_object_description(io_object_t obj, kobject_description_t desc)
538 {
539 	IORegistryEntry    * regEntry;
540 	IOUserNotification * __unused noti;
541 	_IOServiceNotifier * __unused serviceNoti;
542 	OSSerialize        * __unused s;
543 	OSDictionary       * __unused matching = NULL;
544 
545 	if ((regEntry = OSDynamicCast(IORegistryEntry, obj))) {
546 		snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(0x%qx)", obj->getMetaClass()->getClassName(), regEntry->getRegistryEntryID());
547 #if DEVELOPMENT || DEBUG
548 	} else if ((noti = OSDynamicCast(IOUserNotification, obj))) {
549 		// serviceNoti->matching may become NULL if the port gets a no-senders notification, so we have to lock gIOObjectPortLock
550 		IOLockLock(gIOObjectPortLock);
551 		serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->userIteratorObject);
552 		if (serviceNoti && (matching = serviceNoti->matching)) {
553 			matching->retain();
554 		}
555 		IOLockUnlock(gIOObjectPortLock);
556 
557 		if (matching) {
558 			s = OSSerialize::withCapacity((unsigned int) page_size);
559 			if (s && matching->serialize(s)) {
560 				snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text());
561 			}
562 			OSSafeReleaseNULL(s);
563 			OSSafeReleaseNULL(matching);
564 		}
565 #endif /* DEVELOPMENT || DEBUG */
566 	} else {
567 		snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s", obj->getMetaClass()->getClassName());
568 	}
569 }
570 
571 // FIXME: Implementation of these functions are hidden from the static analyzer.
572 // As for now, the analyzer doesn't consistently support wrapper functions
573 // for retain and release.
574 #ifndef __clang_analyzer__
575 void
iokit_add_reference(io_object_t obj,natural_t type)576 iokit_add_reference( io_object_t obj, natural_t type )
577 {
578 	if (!obj) {
579 		return;
580 	}
581 	obj->retain();
582 }
583 
584 void
iokit_remove_reference(io_object_t obj)585 iokit_remove_reference( io_object_t obj )
586 {
587 	if (obj) {
588 		obj->release();
589 	}
590 }
591 #endif // __clang_analyzer__
592 
593 void
iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj)594 iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj )
595 {
596 	if (!obj) {
597 		return;
598 	}
599 	obj->release();
600 }
601 
602 enum {
603 	kIPCLockNone  = 0,
604 	kIPCLockRead  = 1,
605 	kIPCLockWrite = 2
606 };
607 
608 void
ipcEnter(int locking)609 IOUserClient::ipcEnter(int locking)
610 {
611 	switch (locking) {
612 	case kIPCLockWrite:
613 		IORWLockWrite(&lock);
614 		break;
615 	case kIPCLockRead:
616 		IORWLockRead(&lock);
617 		break;
618 	case kIPCLockNone:
619 		break;
620 	default:
621 		panic("ipcEnter");
622 	}
623 
624 	OSIncrementAtomic(&__ipc);
625 }
626 
627 void
ipcExit(int locking)628 IOUserClient::ipcExit(int locking)
629 {
630 	bool finalize = false;
631 
632 	assert(__ipc);
633 	if (1 == OSDecrementAtomic(&__ipc) && isInactive()) {
634 		IOLockLock(gIOObjectPortLock);
635 		if ((finalize = __ipcFinal)) {
636 			__ipcFinal = false;
637 		}
638 		IOLockUnlock(gIOObjectPortLock);
639 		if (finalize) {
640 			scheduleFinalize(true);
641 		}
642 	}
643 	switch (locking) {
644 	case kIPCLockWrite:
645 	case kIPCLockRead:
646 		IORWLockUnlock(&lock);
647 		break;
648 	case kIPCLockNone:
649 		break;
650 	default:
651 		panic("ipcExit");
652 	}
653 }
654 
655 void
iokit_kobject_retain(io_kobject_t machPort)656 iokit_kobject_retain(io_kobject_t machPort)
657 {
658 	assert(OSDynamicCast(IOMachPort, machPort));
659 	machPort->retain();
660 }
661 
662 io_object_t
iokit_copy_object_for_consumed_kobject(LIBKERN_CONSUMED io_kobject_t machPort,natural_t type)663 iokit_copy_object_for_consumed_kobject(LIBKERN_CONSUMED io_kobject_t machPort, natural_t type)
664 {
665 	io_object_t  result;
666 
667 	assert(OSDynamicCast(IOMachPort, machPort));
668 
669 	IOLockLock(&machPort->lock);
670 	result = machPort->object;
671 	if (result) {
672 		iokit_add_reference(result, type);
673 	}
674 	IOLockUnlock(&machPort->lock);
675 	machPort->release();
676 	return result;
677 }
678 
679 bool
finalizeUserReferences(OSObject * obj)680 IOUserClient::finalizeUserReferences(OSObject * obj)
681 {
682 	IOUserClient * uc;
683 	bool           ok = true;
684 
685 	if ((uc = OSDynamicCast(IOUserClient, obj))) {
686 		IOLockLock(gIOObjectPortLock);
687 		if ((uc->__ipcFinal = (0 != uc->__ipc))) {
688 			ok = false;
689 		}
690 		IOLockUnlock(gIOObjectPortLock);
691 	}
692 	return ok;
693 }
694 
695 ipc_port_t
iokit_port_for_object(io_object_t obj,ipc_kobject_type_t type,ipc_kobject_t * kobj)696 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type, ipc_kobject_t * kobj )
697 {
698 	IOMachPort *machPort = NULL;
699 	ipc_port_t   port = NULL;
700 
701 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
702 
703 	lck_mtx_lock(gIOObjectPortLock);
704 
705 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
706 
707 	if (__improbable(machPort == NULL)) {
708 		machPort = IOMachPort::withObjectAndType(obj, type);
709 		if (__improbable(machPort == NULL)) {
710 			goto end;
711 		}
712 		SLIST_INSERT_HEAD(bucket, machPort, link);
713 	} else {
714 		machPort->mscount++;
715 	}
716 
717 	iokit_retain_port(machPort->port);
718 	port = machPort->port;
719 
720 end:
721 	if (kobj) {
722 		*kobj = machPort;
723 	}
724 	lck_mtx_unlock(gIOObjectPortLock);
725 
726 	return port;
727 }
728 
729 kern_return_t
iokit_client_died(io_object_t obj,ipc_port_t,ipc_kobject_type_t type,mach_port_mscount_t * mscount)730 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
731     ipc_kobject_type_t type, mach_port_mscount_t * mscount )
732 {
733 	IOUserClient *      client;
734 	IOMemoryMap *       map;
735 	IOUserNotification * notify;
736 	IOUserServerCheckInToken * token;
737 	IOUserUserClient * uc;
738 
739 	if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
740 		return kIOReturnNotReady;
741 	}
742 
743 	switch (type) {
744 	case IKOT_IOKIT_CONNECT:
745 		if ((client = OSDynamicCast( IOUserClient, obj ))) {
746 			IOStatisticsClientCall();
747 			IORWLockWrite(&client->lock);
748 			client->clientDied();
749 			IORWLockUnlock(&client->lock);
750 		}
751 		break;
752 	case IKOT_IOKIT_OBJECT:
753 		if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
754 			map->taskDied();
755 		} else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
756 			notify->setNotification( NULL );
757 		}
758 		break;
759 	case IKOT_IOKIT_IDENT:
760 		if ((token = OSDynamicCast( IOUserServerCheckInToken, obj ))) {
761 			token->cancel();
762 		}
763 		break;
764 	case IKOT_UEXT_OBJECT:
765 		if ((uc = OSDynamicCast(IOUserUserClient, obj))) {
766 			IOService *provider = NULL;
767 			uc->lockForArbitration();
768 			provider = uc->getProvider();
769 			if (provider) {
770 				provider->retain();
771 			}
772 			uc->unlockForArbitration();
773 			uc->setTerminateDefer(provider, false);
774 			OSSafeReleaseNULL(provider);
775 		}
776 		break;
777 	}
778 
779 	return kIOReturnSuccess;
780 }
781 };      /* extern "C" */
782 
783 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
784 
785 class IOServiceUserNotification : public IOUserNotification
786 {
787 	OSDeclareDefaultStructors(IOServiceUserNotification);
788 
789 	struct PingMsgKdata {
790 		mach_msg_header_t               msgHdr;
791 	};
792 	struct PingMsgUdata {
793 		OSNotificationHeader64          notifyHeader;
794 	};
795 
796 	enum { kMaxOutstanding = 1024 };
797 
798 	ipc_port_t          remotePort;
799 	void                *msgReference;
800 	mach_msg_size_t     msgReferenceSize;
801 	natural_t           msgType;
802 	OSArray     *       newSet;
803 	bool                armed;
804 	bool                ipcLogged;
805 
806 public:
807 
808 	virtual bool init( mach_port_t port, natural_t type,
809 	    void * reference, vm_size_t referenceSize,
810 	    bool clientIs64 );
811 	virtual void free() APPLE_KEXT_OVERRIDE;
812 	void invalidatePort(void);
813 
814 	static bool _handler( void * target,
815 	    void * ref, IOService * newService, IONotifier * notifier );
816 	virtual bool handler( void * ref, IOService * newService );
817 
818 	virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
819 	virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
820 };
821 
822 class IOServiceMessageUserNotification : public IOUserNotification
823 {
824 	OSDeclareDefaultStructors(IOServiceMessageUserNotification);
825 
826 	struct PingMsgKdata {
827 		mach_msg_header_t               msgHdr;
828 		mach_msg_body_t                 msgBody;
829 		mach_msg_port_descriptor_t      ports[1];
830 	};
831 	struct PingMsgUdata {
832 		OSNotificationHeader64          notifyHeader __attribute__ ((packed));
833 	};
834 
835 	ipc_port_t          remotePort;
836 	void                *msgReference;
837 	mach_msg_size_t     msgReferenceSize;
838 	mach_msg_size_t     msgExtraSize;
839 	natural_t           msgType;
840 	uint8_t             clientIs64;
841 	int                 owningPID;
842 	bool                ipcLogged;
843 
844 public:
845 
846 	virtual bool init( mach_port_t port, natural_t type,
847 	    void * reference, vm_size_t referenceSize,
848 	    bool clientIs64 );
849 
850 	virtual void free() APPLE_KEXT_OVERRIDE;
851 	void invalidatePort(void);
852 
853 	static IOReturn _handler( void * target, void * ref,
854 	    UInt32 messageType, IOService * provider,
855 	    void * messageArgument, vm_size_t argSize );
856 	virtual IOReturn handler( void * ref,
857 	    UInt32 messageType, IOService * provider,
858 	    void * messageArgument, vm_size_t argSize );
859 
860 	virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
861 	virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
862 };
863 
864 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
865 
866 #undef super
867 #define super IOUserIterator
868 OSDefineMetaClass( IOUserNotification, IOUserIterator );
869 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
870 
871 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
872 
873 void
free(void)874 IOUserNotification::free( void )
875 {
876 #if DEVELOPMENT || DEBUG
877 	IOLockLock( gIOObjectPortLock);
878 
879 	assert(userIteratorObject == NULL);
880 
881 	IOLockUnlock( gIOObjectPortLock);
882 #endif /* DEVELOPMENT || DEBUG */
883 
884 	super::free();
885 }
886 
887 
888 void
setNotification(IONotifier * notify)889 IOUserNotification::setNotification( IONotifier * notify )
890 {
891 	OSObject * previousNotify;
892 
893 	/*
894 	 * We must retain this object here before proceeding.
895 	 * Two threads may race in setNotification(). If one thread sets a new notifier while the
896 	 * other thread sets the notifier to NULL, it is possible for the second thread to call release()
897 	 * before the first thread calls retain(). Without the retain here, this thread interleaving
898 	 * would cause the object to get released and freed before it is retained by the first thread,
899 	 * which is a UaF.
900 	 */
901 	retain();
902 
903 	IOLockLock( gIOObjectPortLock);
904 
905 	previousNotify = userIteratorObject;
906 	userIteratorObject = notify;
907 
908 	IOLockUnlock( gIOObjectPortLock);
909 
910 	if (previousNotify) {
911 		assert(OSDynamicCast(IONotifier, previousNotify));
912 		((IONotifier *)previousNotify)->remove();
913 
914 		if (notify == NULL) {
915 			release();
916 		}
917 	} else if (notify) {
918 		// new IONotifier, retain the object. release() will happen in setNotification(NULL)
919 		retain();
920 	}
921 
922 	release(); // paired with retain() at beginning of this method
923 }
924 
925 void
reset()926 IOUserNotification::reset()
927 {
928 	// ?
929 }
930 
931 bool
isValid()932 IOUserNotification::isValid()
933 {
934 	return true;
935 }
936 
937 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
938 
939 #undef super
940 #define super IOUserNotification
OSDefineMetaClassAndStructors(IOServiceUserNotification,IOUserNotification)941 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
942 
943 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
944 
945 bool
946 IOServiceUserNotification::init( mach_port_t port, natural_t type,
947     void * reference, vm_size_t referenceSize,
948     bool clientIs64 )
949 {
950 	if (!super::init()) {
951 		return false;
952 	}
953 
954 	newSet = OSArray::withCapacity( 1 );
955 	if (!newSet) {
956 		return false;
957 	}
958 
959 	if (referenceSize > sizeof(OSAsyncReference64)) {
960 		return false;
961 	}
962 
963 	msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
964 	msgReference = IOMallocZeroData(msgReferenceSize);
965 	if (!msgReference) {
966 		return false;
967 	}
968 
969 	remotePort = port;
970 	msgType = type;
971 	bcopy( reference, msgReference, referenceSize );
972 
973 	return true;
974 }
975 
976 void
invalidatePort(void)977 IOServiceUserNotification::invalidatePort(void)
978 {
979 	remotePort = MACH_PORT_NULL;
980 }
981 
982 void
free(void)983 IOServiceUserNotification::free( void )
984 {
985 	if (remotePort) {
986 		iokit_release_port_send(remotePort);
987 	}
988 	IOFreeData(msgReference, msgReferenceSize);
989 	OSSafeReleaseNULL(newSet);
990 
991 	super::free();
992 }
993 
994 bool
_handler(void * target,void * ref,IOService * newService,IONotifier * notifier)995 IOServiceUserNotification::_handler( void * target,
996     void * ref, IOService * newService, IONotifier * notifier )
997 {
998 	IOServiceUserNotification * targetObj = (IOServiceUserNotification *)target;
999 	bool ret;
1000 
1001 	targetObj->retain();
1002 	ret = targetObj->handler( ref, newService );
1003 	targetObj->release();
1004 	return ret;
1005 }
1006 
1007 bool
handler(void * ref,IOService * newService)1008 IOServiceUserNotification::handler( void * ref,
1009     IOService * newService )
1010 {
1011 	unsigned int        count;
1012 	kern_return_t       kr;
1013 	ipc_port_t          port = NULL;
1014 	bool                sendPing = false;
1015 	mach_msg_size_t     msgSize, payloadSize;
1016 
1017 	IOTakeLock( &lock );
1018 
1019 	count = newSet->getCount();
1020 	if (count < kMaxOutstanding) {
1021 		newSet->setObject( newService );
1022 		if ((sendPing = (armed && (0 == count)))) {
1023 			armed = false;
1024 		}
1025 	}
1026 
1027 	IOUnlock( &lock );
1028 
1029 	if (kIOServiceTerminatedNotificationType == msgType) {
1030 		lck_mtx_lock(gIOObjectPortLock);
1031 		newService->setMachPortHoldDestroy(true);
1032 		lck_mtx_unlock(gIOObjectPortLock);
1033 	}
1034 
1035 	if (sendPing) {
1036 		port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT, NULL );
1037 
1038 		payloadSize = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1039 		msgSize = (mach_msg_size_t)(sizeof(PingMsgKdata) + payloadSize);
1040 
1041 		kr = kernel_mach_msg_send_with_builder_internal(0, payloadSize,
1042 		    MACH_SEND_KERNEL_IMPORTANCE, MACH_MSG_TIMEOUT_NONE, NULL,
1043 		    ^(mach_msg_header_t *hdr, __assert_only mach_msg_descriptor_t *descs, void *payload){
1044 			PingMsgUdata *udata = (PingMsgUdata *)payload;
1045 
1046 			hdr->msgh_remote_port    = remotePort;
1047 			hdr->msgh_local_port     = port;
1048 			hdr->msgh_bits           = MACH_MSGH_BITS(
1049 				MACH_MSG_TYPE_COPY_SEND /*remote*/,
1050 				MACH_MSG_TYPE_MAKE_SEND /*local*/);
1051 			hdr->msgh_size           = msgSize;
1052 			hdr->msgh_id             = kOSNotificationMessageID;
1053 
1054 			assert(descs == NULL);
1055 			/* End of kernel processed data */
1056 
1057 			udata->notifyHeader.size          = 0;
1058 			udata->notifyHeader.type          = msgType;
1059 
1060 			assert((char *)udata->notifyHeader.reference + msgReferenceSize <= (char *)payload + payloadSize);
1061 			bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1062 		});
1063 
1064 		if (port) {
1065 			iokit_release_port( port );
1066 		}
1067 
1068 		if ((KERN_SUCCESS != kr) && !ipcLogged) {
1069 			ipcLogged = true;
1070 			IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1071 		}
1072 	}
1073 
1074 	return true;
1075 }
1076 OSObject *
getNextObject()1077 IOServiceUserNotification::getNextObject()
1078 {
1079 	assert(false);
1080 	return NULL;
1081 }
1082 
1083 OSObject *
copyNextObject()1084 IOServiceUserNotification::copyNextObject()
1085 {
1086 	unsigned int        count;
1087 	OSObject *          result;
1088 
1089 	IOLockLock(&lock);
1090 
1091 	count = newSet->getCount();
1092 	if (count) {
1093 		result = newSet->getObject( count - 1 );
1094 		result->retain();
1095 		newSet->removeObject( count - 1);
1096 	} else {
1097 		result = NULL;
1098 		armed = true;
1099 	}
1100 
1101 	IOLockUnlock(&lock);
1102 
1103 	return result;
1104 }
1105 
1106 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1107 
OSDefineMetaClassAndStructors(IOServiceMessageUserNotification,IOUserNotification)1108 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
1109 
1110 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1111 
1112 bool
1113 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
1114     void * reference, vm_size_t referenceSize, bool client64 )
1115 {
1116 	if (!super::init()) {
1117 		return false;
1118 	}
1119 
1120 	if (referenceSize > sizeof(OSAsyncReference64)) {
1121 		return false;
1122 	}
1123 
1124 	clientIs64 = client64;
1125 
1126 	owningPID = proc_selfpid();
1127 
1128 	msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
1129 	msgReference = IOMallocZeroData(msgReferenceSize);
1130 	if (!msgReference) {
1131 		return false;
1132 	}
1133 
1134 	remotePort = port;
1135 	msgType = type;
1136 	bcopy( reference, msgReference, referenceSize );
1137 
1138 	return true;
1139 }
1140 
1141 void
invalidatePort(void)1142 IOServiceMessageUserNotification::invalidatePort(void)
1143 {
1144 	remotePort = MACH_PORT_NULL;
1145 }
1146 
1147 void
free(void)1148 IOServiceMessageUserNotification::free( void )
1149 {
1150 	if (remotePort) {
1151 		iokit_release_port_send(remotePort);
1152 	}
1153 	IOFreeData(msgReference, msgReferenceSize);
1154 
1155 	super::free();
1156 }
1157 
1158 IOReturn
_handler(void * target,void * ref,UInt32 messageType,IOService * provider,void * argument,vm_size_t argSize)1159 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1160     UInt32 messageType, IOService * provider,
1161     void * argument, vm_size_t argSize )
1162 {
1163 	IOServiceMessageUserNotification * targetObj = (IOServiceMessageUserNotification *)target;
1164 	IOReturn ret;
1165 
1166 	targetObj->retain();
1167 	ret = targetObj->handler(
1168 		ref, messageType, provider, argument, argSize);
1169 	targetObj->release();
1170 	return ret;
1171 }
1172 
1173 IOReturn
handler(void * ref,UInt32 messageType,IOService * provider,void * messageArgument,vm_size_t callerArgSize)1174 IOServiceMessageUserNotification::handler( void * ref,
1175     UInt32 messageType, IOService * provider,
1176     void * messageArgument, vm_size_t callerArgSize )
1177 {
1178 	kern_return_t                kr;
1179 	vm_size_t                    argSize;
1180 	mach_msg_size_t              thisMsgSize;
1181 	ipc_port_t                   thisPort, providerPort;
1182 
1183 	if (kIOMessageCopyClientID == messageType) {
1184 		*((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1185 		return kIOReturnSuccess;
1186 	}
1187 
1188 	if (callerArgSize == 0) {
1189 		if (clientIs64) {
1190 			argSize = sizeof(io_user_reference_t);
1191 		} else {
1192 			argSize = sizeof(uint32_t);
1193 		}
1194 	} else {
1195 		if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1196 			callerArgSize = kIOUserNotifyMaxMessageSize;
1197 		}
1198 		argSize = callerArgSize;
1199 	}
1200 
1201 	// adjust message size for ipc restrictions
1202 	natural_t type = msgType;
1203 	type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1204 	type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1205 	argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1206 
1207 	mach_msg_size_t extraSize = kIOUserNotifyMaxMessageSize + sizeof(IOServiceInterestContent64);
1208 	mach_msg_size_t msgSize = (mach_msg_size_t) (sizeof(PingMsgKdata) +
1209 	    sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize);
1210 
1211 	if (os_add3_overflow(msgSize, offsetof(IOServiceInterestContent64, messageArgument), argSize, &thisMsgSize)) {
1212 		return kIOReturnBadArgument;
1213 	}
1214 	mach_msg_size_t payloadSize = thisMsgSize - sizeof(PingMsgKdata);
1215 
1216 	providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT, NULL );
1217 	thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT, NULL );
1218 
1219 	kr = kernel_mach_msg_send_with_builder_internal(1, payloadSize,
1220 	    MACH_SEND_KERNEL_IMPORTANCE, MACH_MSG_TIMEOUT_NONE, NULL,
1221 	    ^(mach_msg_header_t *hdr, mach_msg_descriptor_t *descs, void *payload){
1222 		mach_msg_port_descriptor_t *port_desc = (mach_msg_port_descriptor_t *)descs;
1223 		PingMsgUdata *udata = (PingMsgUdata *)payload;
1224 		IOServiceInterestContent64 * data;
1225 		mach_msg_size_t dataOffset;
1226 
1227 		hdr->msgh_remote_port    = remotePort;
1228 		hdr->msgh_local_port     = thisPort;
1229 		hdr->msgh_bits           = MACH_MSGH_BITS_COMPLEX
1230 		|  MACH_MSGH_BITS(
1231 			MACH_MSG_TYPE_COPY_SEND /*remote*/,
1232 			MACH_MSG_TYPE_MAKE_SEND /*local*/);
1233 		hdr->msgh_size           = thisMsgSize;
1234 		hdr->msgh_id             = kOSNotificationMessageID;
1235 
1236 		/* body.msgh_descriptor_count is set automatically after the closure */
1237 
1238 		port_desc[0].name              = providerPort;
1239 		port_desc[0].disposition       = MACH_MSG_TYPE_MAKE_SEND;
1240 		port_desc[0].type              = MACH_MSG_PORT_DESCRIPTOR;
1241 		/* End of kernel processed data */
1242 
1243 		udata->notifyHeader.size          = extraSize;
1244 		udata->notifyHeader.type          = type;
1245 		bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1246 
1247 		/* data is after msgReference */
1248 		dataOffset = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1249 		data = (IOServiceInterestContent64 *) (((uint8_t *) udata) + dataOffset);
1250 		data->messageType = messageType;
1251 
1252 		if (callerArgSize == 0) {
1253 		        assert((char *)data->messageArgument + argSize <= (char *)payload + payloadSize);
1254 		        data->messageArgument[0] = (io_user_reference_t) messageArgument;
1255 		        if (!clientIs64) {
1256 		                data->messageArgument[0] |= (data->messageArgument[0] << 32);
1257 			}
1258 		} else {
1259 		        assert((char *)data->messageArgument + callerArgSize <= (char *)payload + payloadSize);
1260 		        bcopy(messageArgument, data->messageArgument, callerArgSize);
1261 		}
1262 	});
1263 
1264 	if (thisPort) {
1265 		iokit_release_port( thisPort );
1266 	}
1267 	if (providerPort) {
1268 		iokit_release_port( providerPort );
1269 	}
1270 
1271 	if (kr == MACH_SEND_NO_BUFFER) {
1272 		return kIOReturnNoMemory;
1273 	}
1274 
1275 	if ((KERN_SUCCESS != kr) && !ipcLogged) {
1276 		ipcLogged = true;
1277 		IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1278 	}
1279 
1280 	return kIOReturnSuccess;
1281 }
1282 
1283 OSObject *
getNextObject()1284 IOServiceMessageUserNotification::getNextObject()
1285 {
1286 	return NULL;
1287 }
1288 
1289 OSObject *
copyNextObject()1290 IOServiceMessageUserNotification::copyNextObject()
1291 {
1292 	return NULL;
1293 }
1294 
1295 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1296 
1297 #undef super
1298 #define super IOService
1299 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1300 
1301 IOLock       * gIOUserClientOwnersLock;
1302 
1303 static_assert(offsetof(IOUserClient, __opaque_end) -
1304     offsetof(IOUserClient, __opaque_start) == sizeof(void *) * 9,
1305     "ABI check: Opaque ivars for IOUserClient must be 9 void * big");
1306 
1307 void
initialize(void)1308 IOUserClient::initialize( void )
1309 {
1310 	gIOObjectPortLock       = IOLockAlloc();
1311 	gIOUserClientOwnersLock = IOLockAlloc();
1312 	gIOUserServerLock       = IOLockAlloc();
1313 	assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1314 
1315 #if IOTRACKING
1316 	IOTrackingQueueCollectUser(IOUserIterator::gMetaClass.getTracking());
1317 	IOTrackingQueueCollectUser(IOServiceMessageUserNotification::gMetaClass.getTracking());
1318 	IOTrackingQueueCollectUser(IOServiceUserNotification::gMetaClass.getTracking());
1319 	IOTrackingQueueCollectUser(IOUserClient::gMetaClass.getTracking());
1320 	IOTrackingQueueCollectUser(IOMachPort::gMetaClass.getTracking());
1321 #endif /* IOTRACKING */
1322 }
1323 
1324 void
1325 #if __LP64__
1326 __attribute__((__noreturn__))
1327 #endif
setAsyncReference(OSAsyncReference asyncRef,mach_port_t wakePort,void * callback,void * refcon)1328 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1329     mach_port_t wakePort,
1330     void *callback, void *refcon)
1331 {
1332 #if __LP64__
1333 	panic("setAsyncReference not valid for 64b");
1334 #else
1335 	asyncRef[kIOAsyncReservedIndex]      = ((uintptr_t) wakePort)
1336 	    | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1337 	asyncRef[kIOAsyncCalloutFuncIndex]   = (uintptr_t) callback;
1338 	asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1339 #endif
1340 }
1341 
1342 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon)1343 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1344     mach_port_t wakePort,
1345     mach_vm_address_t callback, io_user_reference_t refcon)
1346 {
1347 	asyncRef[kIOAsyncReservedIndex]      = ((io_user_reference_t) wakePort)
1348 	    | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1349 	asyncRef[kIOAsyncCalloutFuncIndex]   = (io_user_reference_t) callback;
1350 	asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1351 }
1352 
1353 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon,task_t task)1354 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1355     mach_port_t wakePort,
1356     mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1357 {
1358 	setAsyncReference64(asyncRef, wakePort, callback, refcon);
1359 	if (vm_map_is_64bit(get_task_map(task))) {
1360 		asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1361 	}
1362 }
1363 
1364 static OSDictionary *
CopyConsoleUser(UInt32 uid)1365 CopyConsoleUser(UInt32 uid)
1366 {
1367 	OSArray * array;
1368 	OSDictionary * user = NULL;
1369 
1370 	OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1371 	if ((array = OSDynamicCast(OSArray, ioProperty))) {
1372 		for (unsigned int idx = 0;
1373 		    (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1374 		    idx++) {
1375 			OSNumber * num;
1376 
1377 			if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1378 			    && (uid == num->unsigned32BitValue())) {
1379 				user->retain();
1380 				break;
1381 			}
1382 		}
1383 	}
1384 	OSSafeReleaseNULL(ioProperty);
1385 	return user;
1386 }
1387 
1388 static OSDictionary *
CopyUserOnConsole(void)1389 CopyUserOnConsole(void)
1390 {
1391 	OSArray * array;
1392 	OSDictionary * user = NULL;
1393 
1394 	OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1395 	if ((array = OSDynamicCast(OSArray, ioProperty))) {
1396 		for (unsigned int idx = 0;
1397 		    (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1398 		    idx++) {
1399 			if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1400 				user->retain();
1401 				break;
1402 			}
1403 		}
1404 	}
1405 	OSSafeReleaseNULL(ioProperty);
1406 	return user;
1407 }
1408 
1409 IOReturn
clientHasAuthorization(task_t task,IOService * service)1410 IOUserClient::clientHasAuthorization( task_t task,
1411     IOService * service )
1412 {
1413 	proc_t p;
1414 
1415 	p = (proc_t) get_bsdtask_info(task);
1416 	if (p) {
1417 		uint64_t authorizationID;
1418 
1419 		authorizationID = proc_uniqueid(p);
1420 		if (authorizationID) {
1421 			if (service->getAuthorizationID() == authorizationID) {
1422 				return kIOReturnSuccess;
1423 			}
1424 		}
1425 	}
1426 
1427 	return kIOReturnNotPermitted;
1428 }
1429 
1430 IOReturn
clientHasPrivilege(void * securityToken,const char * privilegeName)1431 IOUserClient::clientHasPrivilege( void * securityToken,
1432     const char * privilegeName )
1433 {
1434 	kern_return_t           kr;
1435 	security_token_t        token;
1436 	mach_msg_type_number_t  count;
1437 	task_t                  task;
1438 	OSDictionary *          user;
1439 	bool                    secureConsole;
1440 
1441 
1442 	if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1443 	    sizeof(kIOClientPrivilegeForeground))) {
1444 		if (task_is_gpu_denied(current_task())) {
1445 			return kIOReturnNotPrivileged;
1446 		} else {
1447 			return kIOReturnSuccess;
1448 		}
1449 	}
1450 
1451 	if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1452 	    sizeof(kIOClientPrivilegeConsoleSession))) {
1453 		kauth_cred_t cred;
1454 		proc_t       p;
1455 
1456 		task = (task_t) securityToken;
1457 		if (!task) {
1458 			task = current_task();
1459 		}
1460 		p = (proc_t) get_bsdtask_info(task);
1461 		kr = kIOReturnNotPrivileged;
1462 
1463 		if (p && (cred = kauth_cred_proc_ref(p))) {
1464 			user = CopyUserOnConsole();
1465 			if (user) {
1466 				OSNumber * num;
1467 				if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1468 				    && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1469 					kr = kIOReturnSuccess;
1470 				}
1471 				user->release();
1472 			}
1473 			kauth_cred_unref(&cred);
1474 		}
1475 		return kr;
1476 	}
1477 
1478 	if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1479 	    sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1480 		task = (task_t)((IOUCProcessToken *)securityToken)->token;
1481 	} else {
1482 		task = (task_t)securityToken;
1483 	}
1484 
1485 	count = TASK_SECURITY_TOKEN_COUNT;
1486 	kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1487 
1488 	if (KERN_SUCCESS != kr) {
1489 	} else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1490 	    sizeof(kIOClientPrivilegeAdministrator))) {
1491 		if (0 != token.val[0]) {
1492 			kr = kIOReturnNotPrivileged;
1493 		}
1494 	} else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1495 	    sizeof(kIOClientPrivilegeLocalUser))) {
1496 		user = CopyConsoleUser(token.val[0]);
1497 		if (user) {
1498 			user->release();
1499 		} else {
1500 			kr = kIOReturnNotPrivileged;
1501 		}
1502 	} else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1503 	    sizeof(kIOClientPrivilegeConsoleUser))) {
1504 		user = CopyConsoleUser(token.val[0]);
1505 		if (user) {
1506 			if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1507 				kr = kIOReturnNotPrivileged;
1508 			} else if (secureConsole) {
1509 				OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1510 				if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1511 					kr = kIOReturnNotPrivileged;
1512 				}
1513 			}
1514 			user->release();
1515 		} else {
1516 			kr = kIOReturnNotPrivileged;
1517 		}
1518 	} else {
1519 		kr = kIOReturnUnsupported;
1520 	}
1521 
1522 	return kr;
1523 }
1524 
1525 OSDictionary *
copyClientEntitlements(task_t task)1526 IOUserClient::copyClientEntitlements(task_t task)
1527 {
1528 	proc_t p = NULL;
1529 	pid_t pid = 0;
1530 	OSDictionary *entitlements = NULL;
1531 
1532 	p = (proc_t)get_bsdtask_info(task);
1533 	if (p == NULL) {
1534 		return NULL;
1535 	}
1536 	pid = proc_pid(p);
1537 
1538 	if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1539 		if (entitlements) {
1540 			return entitlements;
1541 		}
1542 	}
1543 
1544 	// If the above fails, thats it
1545 	return NULL;
1546 }
1547 
1548 OSDictionary *
copyClientEntitlementsVnode(vnode_t vnode,off_t offset)1549 IOUserClient::copyClientEntitlementsVnode(vnode_t vnode, off_t offset)
1550 {
1551 	OSDictionary *entitlements = NULL;
1552 
1553 	if (cs_entitlements_dictionary_copy_vnode(vnode, offset, (void**)&entitlements) != 0) {
1554 		return NULL;
1555 	}
1556 	return entitlements;
1557 }
1558 
1559 OSObject *
copyClientEntitlement(task_t task,const char * entitlement)1560 IOUserClient::copyClientEntitlement( task_t task,
1561     const char * entitlement )
1562 {
1563 	void *entitlement_object = NULL;
1564 
1565 	if (task == NULL) {
1566 		task = current_task();
1567 	}
1568 
1569 	/* Validate input arguments */
1570 	if (task == kernel_task || entitlement == NULL) {
1571 		return NULL;
1572 	}
1573 	proc_t proc = (proc_t)get_bsdtask_info(task);
1574 
1575 	kern_return_t ret = amfi->OSEntitlements.copyEntitlementAsOSObjectWithProc(
1576 		proc,
1577 		entitlement,
1578 		&entitlement_object);
1579 
1580 	if (ret != KERN_SUCCESS) {
1581 		return NULL;
1582 	}
1583 	assert(entitlement_object != NULL);
1584 
1585 	return (OSObject*)entitlement_object;
1586 }
1587 
1588 OSObject *
copyClientEntitlementVnode(struct vnode * vnode,off_t offset,const char * entitlement)1589 IOUserClient::copyClientEntitlementVnode(
1590 	struct vnode *vnode,
1591 	off_t offset,
1592 	const char *entitlement)
1593 {
1594 	OSDictionary *entitlements;
1595 	OSObject *value;
1596 
1597 	entitlements = copyClientEntitlementsVnode(vnode, offset);
1598 	if (entitlements == NULL) {
1599 		return NULL;
1600 	}
1601 
1602 	/* Fetch the entitlement value from the dictionary. */
1603 	value = entitlements->getObject(entitlement);
1604 	if (value != NULL) {
1605 		value->retain();
1606 	}
1607 
1608 	entitlements->release();
1609 	return value;
1610 }
1611 
1612 bool
init()1613 IOUserClient::init()
1614 {
1615 	if (getPropertyTable() || super::init()) {
1616 		return reserve();
1617 	}
1618 
1619 	return false;
1620 }
1621 
1622 bool
init(OSDictionary * dictionary)1623 IOUserClient::init(OSDictionary * dictionary)
1624 {
1625 	if (getPropertyTable() || super::init(dictionary)) {
1626 		return reserve();
1627 	}
1628 
1629 	return false;
1630 }
1631 
1632 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type)1633 IOUserClient::initWithTask(task_t owningTask,
1634     void * securityID,
1635     UInt32 type )
1636 {
1637 	if (getPropertyTable() || super::init()) {
1638 		return reserve();
1639 	}
1640 
1641 	return false;
1642 }
1643 
1644 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type,OSDictionary * properties)1645 IOUserClient::initWithTask(task_t owningTask,
1646     void * securityID,
1647     UInt32 type,
1648     OSDictionary * properties )
1649 {
1650 	bool ok;
1651 
1652 	ok = super::init( properties );
1653 	ok &= initWithTask( owningTask, securityID, type );
1654 
1655 	return ok;
1656 }
1657 
1658 bool
reserve()1659 IOUserClient::reserve()
1660 {
1661 	if (!reserved) {
1662 		reserved = IOMallocType(ExpansionData);
1663 	}
1664 	setTerminateDefer(NULL, true);
1665 	IOStatisticsRegisterCounter();
1666 	IORWLockInlineInit(&lock);
1667 	IOLockInlineInit(&filterLock);
1668 
1669 	return true;
1670 }
1671 
1672 struct IOUserClientOwner {
1673 	task_t         task;
1674 	queue_chain_t  taskLink;
1675 	IOUserClient * uc;
1676 	queue_chain_t  ucLink;
1677 };
1678 
1679 IOReturn
registerOwner(task_t task)1680 IOUserClient::registerOwner(task_t task)
1681 {
1682 	IOUserClientOwner * owner;
1683 	IOReturn            ret;
1684 	bool                newOwner;
1685 
1686 	IOLockLock(gIOUserClientOwnersLock);
1687 
1688 	newOwner = true;
1689 	ret = kIOReturnSuccess;
1690 
1691 	if (!owners.next) {
1692 		queue_init(&owners);
1693 	} else {
1694 		queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1695 		{
1696 			if (task != owner->task) {
1697 				continue;
1698 			}
1699 			newOwner = false;
1700 			break;
1701 		}
1702 	}
1703 	if (newOwner) {
1704 		owner = IOMallocType(IOUserClientOwner);
1705 
1706 		owner->task = task;
1707 		owner->uc   = this;
1708 		queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1709 		queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1710 		if (messageAppSuspended) {
1711 			task_set_message_app_suspended(task, true);
1712 		}
1713 	}
1714 
1715 	IOLockUnlock(gIOUserClientOwnersLock);
1716 
1717 	return ret;
1718 }
1719 
1720 void
noMoreSenders(void)1721 IOUserClient::noMoreSenders(void)
1722 {
1723 	IOUserClientOwner * owner;
1724 	IOUserClientOwner * iter;
1725 	queue_head_t      * taskque;
1726 	bool                hasMessageAppSuspended;
1727 
1728 	IOLockLock(gIOUserClientOwnersLock);
1729 
1730 	if (owners.next) {
1731 		while (!queue_empty(&owners)) {
1732 			owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1733 			taskque = task_io_user_clients(owner->task);
1734 			queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1735 			hasMessageAppSuspended = false;
1736 			queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1737 				hasMessageAppSuspended = iter->uc->messageAppSuspended;
1738 				if (hasMessageAppSuspended) {
1739 					break;
1740 				}
1741 			}
1742 			task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1743 			queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1744 			IOFreeType(owner, IOUserClientOwner);
1745 		}
1746 		owners.next = owners.prev = NULL;
1747 	}
1748 
1749 	IOLockUnlock(gIOUserClientOwnersLock);
1750 }
1751 
1752 
1753 extern "C" void
iokit_task_app_suspended_changed(task_t task)1754 iokit_task_app_suspended_changed(task_t task)
1755 {
1756 	queue_head_t      * taskque;
1757 	IOUserClientOwner * owner;
1758 	OSSet             * set;
1759 
1760 	IOLockLock(gIOUserClientOwnersLock);
1761 
1762 	taskque = task_io_user_clients(task);
1763 	set = NULL;
1764 	queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1765 		if (!owner->uc->messageAppSuspended) {
1766 			continue;
1767 		}
1768 		if (!set) {
1769 			set = OSSet::withCapacity(4);
1770 			if (!set) {
1771 				break;
1772 			}
1773 		}
1774 		set->setObject(owner->uc);
1775 	}
1776 
1777 	IOLockUnlock(gIOUserClientOwnersLock);
1778 
1779 	if (set) {
1780 		set->iterateObjects(^bool (OSObject * obj) {
1781 			IOUserClient      * uc;
1782 
1783 			uc = (typeof(uc))obj;
1784 #if 0
1785 			{
1786 			        OSString          * str;
1787 			        str = IOCopyLogNameForPID(task_pid(task));
1788 			        IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1789 			        uc->getName(), task_is_app_suspended(task));
1790 			        OSSafeReleaseNULL(str);
1791 			}
1792 #endif
1793 			uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1794 
1795 			return false;
1796 		});
1797 		set->release();
1798 	}
1799 }
1800 
1801 static kern_return_t
iokit_task_terminate_phase1(task_t task)1802 iokit_task_terminate_phase1(task_t task)
1803 {
1804 	queue_head_t      * taskque;
1805 	IOUserClientOwner * iter;
1806 	OSSet             * userServers = NULL;
1807 
1808 	if (!task_is_driver(task)) {
1809 		return KERN_SUCCESS;
1810 	}
1811 	userServers = OSSet::withCapacity(1);
1812 
1813 	IOLockLock(gIOUserClientOwnersLock);
1814 
1815 	taskque = task_io_user_clients(task);
1816 	queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1817 		userServers->setObject(iter->uc);
1818 	}
1819 	IOLockUnlock(gIOUserClientOwnersLock);
1820 
1821 	if (userServers) {
1822 		IOUserServer * userServer;
1823 		while ((userServer = OSRequiredCast(IOUserServer, userServers->getAnyObject()))) {
1824 			userServer->clientDied();
1825 			userServers->removeObject(userServer);
1826 		}
1827 		userServers->release();
1828 	}
1829 	return KERN_SUCCESS;
1830 }
1831 
1832 static kern_return_t
iokit_task_terminate_phase2(task_t task)1833 iokit_task_terminate_phase2(task_t task)
1834 {
1835 	queue_head_t      * taskque;
1836 	IOUserClientOwner * owner;
1837 	IOUserClient      * dead;
1838 	IOUserClient      * uc;
1839 
1840 	IOLockLock(gIOUserClientOwnersLock);
1841 	taskque = task_io_user_clients(task);
1842 	dead = NULL;
1843 	while (!queue_empty(taskque)) {
1844 		owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1845 		uc = owner->uc;
1846 		queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1847 		queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1848 		if (queue_empty(&uc->owners)) {
1849 			uc->retain();
1850 			IOLog("destroying out of band connect for %s\n", uc->getName());
1851 			// now using the uc queue head as a singly linked queue,
1852 			// leaving .next as NULL to mark it empty
1853 			uc->owners.next = NULL;
1854 			uc->owners.prev = (queue_entry_t) dead;
1855 			dead = uc;
1856 		}
1857 		IOFreeType(owner, IOUserClientOwner);
1858 	}
1859 	IOLockUnlock(gIOUserClientOwnersLock);
1860 
1861 	while (dead) {
1862 		uc = dead;
1863 		dead = (IOUserClient *)(void *) dead->owners.prev;
1864 		uc->owners.prev = NULL;
1865 		if (uc->sharedInstance || !uc->closed) {
1866 			uc->clientDied();
1867 		}
1868 		uc->release();
1869 	}
1870 
1871 	return KERN_SUCCESS;
1872 }
1873 
1874 extern "C" kern_return_t
iokit_task_terminate(task_t task,int phase)1875 iokit_task_terminate(task_t task, int phase)
1876 {
1877 	switch (phase) {
1878 	case 1:
1879 		return iokit_task_terminate_phase1(task);
1880 	case 2:
1881 		return iokit_task_terminate_phase2(task);
1882 	default:
1883 		panic("iokit_task_terminate phase %d", phase);
1884 	}
1885 }
1886 
1887 struct IOUCFilterPolicy {
1888 	task_t             task;
1889 	io_filter_policy_t filterPolicy;
1890 	IOUCFilterPolicy * next;
1891 };
1892 
1893 io_filter_policy_t
filterForTask(task_t task,io_filter_policy_t addFilterPolicy)1894 IOUserClient::filterForTask(task_t task, io_filter_policy_t addFilterPolicy)
1895 {
1896 	IOUCFilterPolicy * elem;
1897 	io_filter_policy_t filterPolicy;
1898 
1899 	filterPolicy = 0;
1900 	IOLockLock(&filterLock);
1901 
1902 	for (elem = reserved->filterPolicies; elem && (elem->task != task); elem = elem->next) {
1903 	}
1904 
1905 	if (elem) {
1906 		if (addFilterPolicy) {
1907 			assert(addFilterPolicy == elem->filterPolicy);
1908 		}
1909 		filterPolicy = elem->filterPolicy;
1910 	} else if (addFilterPolicy) {
1911 		elem = IOMallocType(IOUCFilterPolicy);
1912 		elem->task               = task;
1913 		elem->filterPolicy       = addFilterPolicy;
1914 		elem->next               = reserved->filterPolicies;
1915 		reserved->filterPolicies = elem;
1916 		filterPolicy = addFilterPolicy;
1917 	}
1918 
1919 	IOLockUnlock(&filterLock);
1920 	return filterPolicy;
1921 }
1922 
1923 void
free()1924 IOUserClient::free()
1925 {
1926 	if (mappings) {
1927 		mappings->release();
1928 	}
1929 
1930 	IOStatisticsUnregisterCounter();
1931 
1932 	assert(!owners.next);
1933 	assert(!owners.prev);
1934 
1935 	if (reserved) {
1936 		IOUCFilterPolicy * elem;
1937 		IOUCFilterPolicy * nextElem;
1938 		for (elem = reserved->filterPolicies; elem; elem = nextElem) {
1939 			nextElem = elem->next;
1940 			if (elem->filterPolicy && gIOUCFilterCallbacks->io_filter_release) {
1941 				gIOUCFilterCallbacks->io_filter_release(elem->filterPolicy);
1942 			}
1943 			IOFreeType(elem, IOUCFilterPolicy);
1944 		}
1945 		IOFreeType(reserved, ExpansionData);
1946 		IORWLockInlineDestroy(&lock);
1947 		IOLockInlineDestroy(&filterLock);
1948 	}
1949 
1950 	super::free();
1951 }
1952 
1953 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1954 
OSDefineMetaClassAndAbstractStructors(IOUserClient2022,IOUserClient)1955 OSDefineMetaClassAndAbstractStructors( IOUserClient2022, IOUserClient )
1956 
1957 
1958 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1959 
1960 IOReturn
1961 IOUserClient::clientDied( void )
1962 {
1963 	IOReturn ret = kIOReturnNotReady;
1964 
1965 	if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1966 		ret = clientClose();
1967 	}
1968 
1969 	return ret;
1970 }
1971 
1972 IOReturn
clientClose(void)1973 IOUserClient::clientClose( void )
1974 {
1975 	return kIOReturnUnsupported;
1976 }
1977 
1978 IOService *
getService(void)1979 IOUserClient::getService( void )
1980 {
1981 	return NULL;
1982 }
1983 
1984 IOReturn
registerNotificationPort(mach_port_t,UInt32,UInt32)1985 IOUserClient::registerNotificationPort(
1986 	mach_port_t     /* port */,
1987 	UInt32          /* type */,
1988 	UInt32          /* refCon */)
1989 {
1990 	return kIOReturnUnsupported;
1991 }
1992 
1993 IOReturn
registerNotificationPort(mach_port_t port,UInt32 type,io_user_reference_t refCon)1994 IOUserClient::registerNotificationPort(
1995 	mach_port_t port,
1996 	UInt32          type,
1997 	io_user_reference_t refCon)
1998 {
1999 	return registerNotificationPort(port, type, (UInt32) refCon);
2000 }
2001 
2002 IOReturn
getNotificationSemaphore(UInt32 notification_type,semaphore_t * semaphore)2003 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
2004     semaphore_t * semaphore )
2005 {
2006 	return kIOReturnUnsupported;
2007 }
2008 
2009 IOReturn
connectClient(IOUserClient *)2010 IOUserClient::connectClient( IOUserClient * /* client */ )
2011 {
2012 	return kIOReturnUnsupported;
2013 }
2014 
2015 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,IOMemoryDescriptor ** memory)2016 IOUserClient::clientMemoryForType( UInt32 type,
2017     IOOptionBits * options,
2018     IOMemoryDescriptor ** memory )
2019 {
2020 	return kIOReturnUnsupported;
2021 }
2022 
2023 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,OSSharedPtr<IOMemoryDescriptor> & memory)2024 IOUserClient::clientMemoryForType( UInt32 type,
2025     IOOptionBits * options,
2026     OSSharedPtr<IOMemoryDescriptor>& memory )
2027 {
2028 	IOMemoryDescriptor* memoryRaw = nullptr;
2029 	IOReturn result = clientMemoryForType(type, options, &memoryRaw);
2030 	memory.reset(memoryRaw, OSNoRetain);
2031 	return result;
2032 }
2033 
2034 #if !__LP64__
2035 IOMemoryMap *
mapClientMemory(IOOptionBits type,task_t task,IOOptionBits mapFlags,IOVirtualAddress atAddress)2036 IOUserClient::mapClientMemory(
2037 	IOOptionBits            type,
2038 	task_t                  task,
2039 	IOOptionBits            mapFlags,
2040 	IOVirtualAddress        atAddress )
2041 {
2042 	return NULL;
2043 }
2044 #endif
2045 
2046 IOMemoryMap *
mapClientMemory64(IOOptionBits type,task_t task,IOOptionBits mapFlags,mach_vm_address_t atAddress)2047 IOUserClient::mapClientMemory64(
2048 	IOOptionBits            type,
2049 	task_t                  task,
2050 	IOOptionBits            mapFlags,
2051 	mach_vm_address_t       atAddress )
2052 {
2053 	IOReturn            err;
2054 	IOOptionBits        options = 0;
2055 	IOMemoryDescriptor * memory = NULL;
2056 	IOMemoryMap *       map = NULL;
2057 
2058 	err = clientMemoryForType((UInt32) type, &options, &memory );
2059 
2060 	if (memory && (kIOReturnSuccess == err)) {
2061 		FAKE_STACK_FRAME(getMetaClass());
2062 
2063 		options = (options & ~kIOMapUserOptionsMask)
2064 		    | (mapFlags & kIOMapUserOptionsMask);
2065 		map = memory->createMappingInTask( task, atAddress, options );
2066 		memory->release();
2067 
2068 		FAKE_STACK_FRAME_END();
2069 	}
2070 
2071 	return map;
2072 }
2073 
2074 IOReturn
exportObjectToClient(task_t task,OSObject * obj,io_object_t * clientObj)2075 IOUserClient::exportObjectToClient(task_t task,
2076     OSObject *obj, io_object_t *clientObj)
2077 {
2078 	mach_port_name_t    name;
2079 
2080 	name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
2081 
2082 	*clientObj = (io_object_t)(uintptr_t) name;
2083 
2084 	if (obj) {
2085 		obj->release();
2086 	}
2087 
2088 	return kIOReturnSuccess;
2089 }
2090 
2091 IOReturn
copyPortNameForObjectInTask(task_t task,OSObject * obj,mach_port_name_t * port_name)2092 IOUserClient::copyPortNameForObjectInTask(task_t task,
2093     OSObject *obj, mach_port_name_t * port_name)
2094 {
2095 	mach_port_name_t    name;
2096 
2097 	name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
2098 
2099 	*(mach_port_name_t *) port_name = name;
2100 
2101 	return kIOReturnSuccess;
2102 }
2103 
2104 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSObject ** obj)2105 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2106     OSObject **obj)
2107 {
2108 	OSObject * object;
2109 
2110 	object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
2111 
2112 	*obj = object;
2113 
2114 	return object ? kIOReturnSuccess : kIOReturnIPCError;
2115 }
2116 
2117 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSSharedPtr<OSObject> & obj)2118 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2119     OSSharedPtr<OSObject>& obj)
2120 {
2121 	OSObject* objRaw = NULL;
2122 	IOReturn result = copyObjectForPortNameInTask(task, port_name, &objRaw);
2123 	obj.reset(objRaw, OSNoRetain);
2124 	return result;
2125 }
2126 
2127 IOReturn
adjustPortNameReferencesInTask(task_t task,mach_port_name_t port_name,mach_port_delta_t delta)2128 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
2129 {
2130 	return iokit_mod_send_right(task, port_name, delta);
2131 }
2132 
2133 IOExternalMethod *
getExternalMethodForIndex(UInt32)2134 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
2135 {
2136 	return NULL;
2137 }
2138 
2139 IOExternalAsyncMethod *
getExternalAsyncMethodForIndex(UInt32)2140 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
2141 {
2142 	return NULL;
2143 }
2144 
2145 IOExternalTrap *
2146 IOUserClient::
getExternalTrapForIndex(UInt32 index)2147 getExternalTrapForIndex(UInt32 index)
2148 {
2149 	return NULL;
2150 }
2151 
2152 #pragma clang diagnostic push
2153 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2154 
2155 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
2156 // functions can break clients of kexts implementing getExternalMethodForIndex()
2157 IOExternalMethod *
2158 IOUserClient::
getTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2159 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
2160 {
2161 	IOExternalMethod *method = getExternalMethodForIndex(index);
2162 
2163 	if (method) {
2164 		*targetP = (IOService *) method->object;
2165 	}
2166 
2167 	return method;
2168 }
2169 
2170 IOExternalMethod *
2171 IOUserClient::
getTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2172 getTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2173 {
2174 	IOService* targetPRaw = NULL;
2175 	IOExternalMethod* result = getTargetAndMethodForIndex(&targetPRaw, index);
2176 	targetP.reset(targetPRaw, OSRetain);
2177 	return result;
2178 }
2179 
2180 IOExternalAsyncMethod *
2181 IOUserClient::
getAsyncTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2182 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
2183 {
2184 	IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
2185 
2186 	if (method) {
2187 		*targetP = (IOService *) method->object;
2188 	}
2189 
2190 	return method;
2191 }
2192 
2193 IOExternalAsyncMethod *
2194 IOUserClient::
getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2195 getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2196 {
2197 	IOService* targetPRaw = NULL;
2198 	IOExternalAsyncMethod* result = getAsyncTargetAndMethodForIndex(&targetPRaw, index);
2199 	targetP.reset(targetPRaw, OSRetain);
2200 	return result;
2201 }
2202 
2203 IOExternalTrap *
2204 IOUserClient::
getTargetAndTrapForIndex(IOService ** targetP,UInt32 index)2205 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
2206 {
2207 	IOExternalTrap *trap = getExternalTrapForIndex(index);
2208 
2209 	if (trap) {
2210 		*targetP = trap->object;
2211 	}
2212 
2213 	return trap;
2214 }
2215 #pragma clang diagnostic pop
2216 
2217 IOReturn
releaseAsyncReference64(OSAsyncReference64 reference)2218 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
2219 {
2220 	mach_port_t port;
2221 	port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2222 
2223 	if (MACH_PORT_NULL != port) {
2224 		iokit_release_port_send(port);
2225 	}
2226 
2227 	return kIOReturnSuccess;
2228 }
2229 
2230 IOReturn
releaseNotificationPort(mach_port_t port)2231 IOUserClient::releaseNotificationPort(mach_port_t port)
2232 {
2233 	if (MACH_PORT_NULL != port) {
2234 		iokit_release_port_send(port);
2235 	}
2236 
2237 	return kIOReturnSuccess;
2238 }
2239 
2240 IOReturn
sendAsyncResult(OSAsyncReference reference,IOReturn result,void * args[],UInt32 numArgs)2241 IOUserClient::sendAsyncResult(OSAsyncReference reference,
2242     IOReturn result, void *args[], UInt32 numArgs)
2243 {
2244 	OSAsyncReference64  reference64;
2245 	OSBoundedArray<io_user_reference_t, kMaxAsyncArgs> args64;
2246 	unsigned int        idx;
2247 
2248 	if (numArgs > kMaxAsyncArgs) {
2249 		return kIOReturnMessageTooLarge;
2250 	}
2251 
2252 	for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2253 		reference64[idx] = REF64(reference[idx]);
2254 	}
2255 
2256 	for (idx = 0; idx < numArgs; idx++) {
2257 		args64[idx] = REF64(args[idx]);
2258 	}
2259 
2260 	return sendAsyncResult64(reference64, result, args64.data(), numArgs);
2261 }
2262 
2263 IOReturn
sendAsyncResult64WithOptions(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2264 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2265     IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2266 {
2267 	return _sendAsyncResult64(reference, result, args, numArgs, options);
2268 }
2269 
2270 IOReturn
sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs)2271 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2272     IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2273 {
2274 	return _sendAsyncResult64(reference, result, args, numArgs, 0);
2275 }
2276 
2277 IOReturn
_sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2278 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2279     IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2280 {
2281 	struct ReplyMsg {
2282 		mach_msg_header_t msgHdr;
2283 		union{
2284 			struct{
2285 				OSNotificationHeader     notifyHdr;
2286 				IOAsyncCompletionContent asyncContent;
2287 				uint32_t                 args[kMaxAsyncArgs];
2288 			} msg32;
2289 			struct{
2290 				OSNotificationHeader64   notifyHdr;
2291 				IOAsyncCompletionContent asyncContent;
2292 				io_user_reference_t      args[kMaxAsyncArgs] __attribute__ ((packed));
2293 			} msg64;
2294 		} m;
2295 	};
2296 	ReplyMsg      replyMsg;
2297 	mach_port_t   replyPort;
2298 	kern_return_t kr;
2299 
2300 	// If no reply port, do nothing.
2301 	replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2302 	if (replyPort == MACH_PORT_NULL) {
2303 		return kIOReturnSuccess;
2304 	}
2305 
2306 	if (numArgs > kMaxAsyncArgs) {
2307 		return kIOReturnMessageTooLarge;
2308 	}
2309 
2310 	bzero(&replyMsg, sizeof(replyMsg));
2311 	replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2312 	    0 /*local*/);
2313 	replyMsg.msgHdr.msgh_remote_port = replyPort;
2314 	replyMsg.msgHdr.msgh_local_port  = NULL;
2315 	replyMsg.msgHdr.msgh_id          = kOSNotificationMessageID;
2316 	if (kIOUCAsync64Flag & reference[0]) {
2317 		replyMsg.msgHdr.msgh_size =
2318 		    sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2319 		    - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2320 		replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2321 		    + numArgs * sizeof(io_user_reference_t);
2322 		replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2323 		/* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2324 		bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2325 
2326 		replyMsg.m.msg64.asyncContent.result = result;
2327 		if (numArgs) {
2328 			bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2329 		}
2330 	} else {
2331 		unsigned int idx;
2332 
2333 		replyMsg.msgHdr.msgh_size =
2334 		    sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2335 		    - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2336 
2337 		replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2338 		    + numArgs * sizeof(uint32_t);
2339 		replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2340 
2341 		/* Skip reference[0] which is left as 0 from the earlier bzero */
2342 		for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2343 			replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2344 		}
2345 
2346 		replyMsg.m.msg32.asyncContent.result = result;
2347 
2348 		for (idx = 0; idx < numArgs; idx++) {
2349 			replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2350 		}
2351 	}
2352 
2353 	if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2354 		kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2355 		    replyMsg.msgHdr.msgh_size, MACH64_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2356 	} else {
2357 		/* Fail on full queue. */
2358 		kr = mach_msg_send_from_kernel(&replyMsg.msgHdr,
2359 		    replyMsg.msgHdr.msgh_size);
2360 	}
2361 	if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2362 		reference[0] |= kIOUCAsyncErrorLoggedFlag;
2363 		IOLog("%s: mach_msg_send_from_kernel(0x%x)\n", __PRETTY_FUNCTION__, kr );
2364 	}
2365 	return kr;
2366 }
2367 
2368 
2369 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2370 
2371 extern "C" {
2372 #define CHECK(cls, obj, out)                      \
2373 	cls * out;                              \
2374 	if( !(out = OSDynamicCast( cls, obj)))  \
2375 	    return( kIOReturnBadArgument )
2376 
2377 #define CHECKLOCKED(cls, obj, out)                                        \
2378 	IOUserIterator * oIter;                                         \
2379 	cls * out;                                                      \
2380 	if( !(oIter = OSDynamicCast(IOUserIterator, obj)))              \
2381 	    return (kIOReturnBadArgument);                              \
2382 	if( !(out = OSDynamicCast(cls, oIter->userIteratorObject)))     \
2383 	    return (kIOReturnBadArgument)
2384 
2385 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2386 
2387 // Create a vm_map_copy_t or kalloc'ed data for memory
2388 // to be copied out. ipc will free after the copyout.
2389 
2390 static kern_return_t
copyoutkdata(const void * data,vm_size_t len,io_buf_ptr_t * buf)2391 copyoutkdata( const void * data, vm_size_t len,
2392     io_buf_ptr_t * buf )
2393 {
2394 	kern_return_t       err;
2395 	vm_map_copy_t       copy;
2396 
2397 	err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2398 	    false /* src_destroy */, &copy);
2399 
2400 	assert( err == KERN_SUCCESS );
2401 	if (err == KERN_SUCCESS) {
2402 		*buf = (char *) copy;
2403 	}
2404 
2405 	return err;
2406 }
2407 
2408 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2409 
2410 /* Routine io_server_version */
2411 kern_return_t
is_io_server_version(mach_port_t main_port,uint64_t * version)2412 is_io_server_version(
2413 	mach_port_t main_port,
2414 	uint64_t *version)
2415 {
2416 	*version = IOKIT_SERVER_VERSION;
2417 	return kIOReturnSuccess;
2418 }
2419 
2420 /* Routine io_object_get_class */
2421 kern_return_t
is_io_object_get_class(io_object_t object,io_name_t className)2422 is_io_object_get_class(
2423 	io_object_t object,
2424 	io_name_t className )
2425 {
2426 	const OSMetaClass* my_obj = NULL;
2427 
2428 	if (!object) {
2429 		return kIOReturnBadArgument;
2430 	}
2431 
2432 	my_obj = object->getMetaClass();
2433 	if (!my_obj) {
2434 		return kIOReturnNotFound;
2435 	}
2436 
2437 	strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2438 
2439 	return kIOReturnSuccess;
2440 }
2441 
2442 /* Routine io_object_get_superclass */
2443 kern_return_t
is_io_object_get_superclass(mach_port_t main_port,io_name_t obj_name,io_name_t class_name)2444 is_io_object_get_superclass(
2445 	mach_port_t main_port,
2446 	io_name_t obj_name,
2447 	io_name_t class_name)
2448 {
2449 	IOReturn            ret;
2450 	const OSMetaClass * meta;
2451 	const OSMetaClass * super;
2452 	const OSSymbol    * name;
2453 	const char        * cstr;
2454 
2455 	if (!obj_name || !class_name) {
2456 		return kIOReturnBadArgument;
2457 	}
2458 	if (main_port != main_device_port) {
2459 		return kIOReturnNotPrivileged;
2460 	}
2461 
2462 	ret = kIOReturnNotFound;
2463 	meta = NULL;
2464 	do{
2465 		name = OSSymbol::withCString(obj_name);
2466 		if (!name) {
2467 			break;
2468 		}
2469 		meta = OSMetaClass::copyMetaClassWithName(name);
2470 		if (!meta) {
2471 			break;
2472 		}
2473 		super = meta->getSuperClass();
2474 		if (!super) {
2475 			break;
2476 		}
2477 		cstr = super->getClassName();
2478 		if (!cstr) {
2479 			break;
2480 		}
2481 		strlcpy(class_name, cstr, sizeof(io_name_t));
2482 		ret = kIOReturnSuccess;
2483 	}while (false);
2484 
2485 	OSSafeReleaseNULL(name);
2486 	if (meta) {
2487 		meta->releaseMetaClass();
2488 	}
2489 
2490 	return ret;
2491 }
2492 
2493 /* Routine io_object_get_bundle_identifier */
2494 kern_return_t
is_io_object_get_bundle_identifier(mach_port_t main_port,io_name_t obj_name,io_name_t bundle_name)2495 is_io_object_get_bundle_identifier(
2496 	mach_port_t main_port,
2497 	io_name_t obj_name,
2498 	io_name_t bundle_name)
2499 {
2500 	IOReturn            ret;
2501 	const OSMetaClass * meta;
2502 	const OSSymbol    * name;
2503 	const OSSymbol    * identifier;
2504 	const char        * cstr;
2505 
2506 	if (!obj_name || !bundle_name) {
2507 		return kIOReturnBadArgument;
2508 	}
2509 	if (main_port != main_device_port) {
2510 		return kIOReturnNotPrivileged;
2511 	}
2512 
2513 	ret = kIOReturnNotFound;
2514 	meta = NULL;
2515 	do{
2516 		name = OSSymbol::withCString(obj_name);
2517 		if (!name) {
2518 			break;
2519 		}
2520 		meta = OSMetaClass::copyMetaClassWithName(name);
2521 		if (!meta) {
2522 			break;
2523 		}
2524 		identifier = meta->getKmodName();
2525 		if (!identifier) {
2526 			break;
2527 		}
2528 		cstr = identifier->getCStringNoCopy();
2529 		if (!cstr) {
2530 			break;
2531 		}
2532 		strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2533 		ret = kIOReturnSuccess;
2534 	}while (false);
2535 
2536 	OSSafeReleaseNULL(name);
2537 	if (meta) {
2538 		meta->releaseMetaClass();
2539 	}
2540 
2541 	return ret;
2542 }
2543 
2544 /* Routine io_object_conforms_to */
2545 kern_return_t
is_io_object_conforms_to(io_object_t object,io_name_t className,boolean_t * conforms)2546 is_io_object_conforms_to(
2547 	io_object_t object,
2548 	io_name_t className,
2549 	boolean_t *conforms )
2550 {
2551 	if (!object) {
2552 		return kIOReturnBadArgument;
2553 	}
2554 
2555 	*conforms = (NULL != object->metaCast( className ));
2556 
2557 	return kIOReturnSuccess;
2558 }
2559 
2560 /* Routine io_object_get_retain_count */
2561 kern_return_t
is_io_object_get_retain_count(io_object_t object,uint32_t * retainCount)2562 is_io_object_get_retain_count(
2563 	io_object_t object,
2564 	uint32_t *retainCount )
2565 {
2566 	if (!object) {
2567 		return kIOReturnBadArgument;
2568 	}
2569 
2570 	*retainCount = object->getRetainCount();
2571 	return kIOReturnSuccess;
2572 }
2573 
2574 /* Routine io_iterator_next */
2575 kern_return_t
is_io_iterator_next(io_object_t iterator,io_object_t * object)2576 is_io_iterator_next(
2577 	io_object_t iterator,
2578 	io_object_t *object )
2579 {
2580 	IOReturn    ret;
2581 	OSObject *  obj;
2582 	OSIterator * iter;
2583 	IOUserIterator * uiter;
2584 
2585 	if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2586 		obj = uiter->copyNextObject();
2587 	} else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2588 		obj = iter->getNextObject();
2589 		if (obj) {
2590 			obj->retain();
2591 		}
2592 	} else {
2593 		return kIOReturnBadArgument;
2594 	}
2595 
2596 	if (obj) {
2597 		*object = obj;
2598 		ret = kIOReturnSuccess;
2599 	} else {
2600 		ret = kIOReturnNoDevice;
2601 	}
2602 
2603 	return ret;
2604 }
2605 
2606 /* Routine io_iterator_reset */
2607 kern_return_t
is_io_iterator_reset(io_object_t iterator)2608 is_io_iterator_reset(
2609 	io_object_t iterator )
2610 {
2611 	CHECK( OSIterator, iterator, iter );
2612 
2613 	iter->reset();
2614 
2615 	return kIOReturnSuccess;
2616 }
2617 
2618 /* Routine io_iterator_is_valid */
2619 kern_return_t
is_io_iterator_is_valid(io_object_t iterator,boolean_t * is_valid)2620 is_io_iterator_is_valid(
2621 	io_object_t iterator,
2622 	boolean_t *is_valid )
2623 {
2624 	CHECK( OSIterator, iterator, iter );
2625 
2626 	*is_valid = iter->isValid();
2627 
2628 	return kIOReturnSuccess;
2629 }
2630 
2631 static kern_return_t
internal_io_service_match_property_table(io_service_t _service,const char * matching,mach_msg_type_number_t matching_size,boolean_t * matches)2632 internal_io_service_match_property_table(
2633 	io_service_t _service,
2634 	const char * matching,
2635 	mach_msg_type_number_t matching_size,
2636 	boolean_t *matches)
2637 {
2638 	CHECK( IOService, _service, service );
2639 
2640 	kern_return_t       kr;
2641 	OSObject *          obj;
2642 	OSDictionary *      dict;
2643 
2644 	assert(matching_size);
2645 
2646 
2647 	obj = OSUnserializeXML(matching, matching_size);
2648 
2649 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2650 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2651 		*matches = service->passiveMatch( dict );
2652 		kr = kIOReturnSuccess;
2653 	} else {
2654 		kr = kIOReturnBadArgument;
2655 	}
2656 
2657 	if (obj) {
2658 		obj->release();
2659 	}
2660 
2661 	return kr;
2662 }
2663 
2664 /* Routine io_service_match_property_table */
2665 kern_return_t
is_io_service_match_property_table(io_service_t service,io_string_t matching,boolean_t * matches)2666 is_io_service_match_property_table(
2667 	io_service_t service,
2668 	io_string_t matching,
2669 	boolean_t *matches )
2670 {
2671 	return kIOReturnUnsupported;
2672 }
2673 
2674 
2675 /* Routine io_service_match_property_table_ool */
2676 kern_return_t
is_io_service_match_property_table_ool(io_object_t service,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,boolean_t * matches)2677 is_io_service_match_property_table_ool(
2678 	io_object_t service,
2679 	io_buf_ptr_t matching,
2680 	mach_msg_type_number_t matchingCnt,
2681 	kern_return_t *result,
2682 	boolean_t *matches )
2683 {
2684 	kern_return_t         kr;
2685 	vm_offset_t           data;
2686 	vm_map_offset_t       map_data;
2687 
2688 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2689 	data = CAST_DOWN(vm_offset_t, map_data);
2690 
2691 	if (KERN_SUCCESS == kr) {
2692 		// must return success after vm_map_copyout() succeeds
2693 		*result = internal_io_service_match_property_table(service,
2694 		    (const char *)data, matchingCnt, matches );
2695 		vm_deallocate( kernel_map, data, matchingCnt );
2696 	}
2697 
2698 	return kr;
2699 }
2700 
2701 /* Routine io_service_match_property_table_bin */
2702 kern_return_t
is_io_service_match_property_table_bin(io_object_t service,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,boolean_t * matches)2703 is_io_service_match_property_table_bin(
2704 	io_object_t service,
2705 	io_struct_inband_t matching,
2706 	mach_msg_type_number_t matchingCnt,
2707 	boolean_t *matches)
2708 {
2709 	return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2710 }
2711 
2712 static kern_return_t
internal_io_service_get_matching_services(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_iterator_t * existing)2713 internal_io_service_get_matching_services(
2714 	mach_port_t main_port,
2715 	const char * matching,
2716 	mach_msg_type_number_t matching_size,
2717 	io_iterator_t *existing )
2718 {
2719 	kern_return_t       kr;
2720 	OSObject *          obj;
2721 	OSDictionary *      dict;
2722 
2723 	if (main_port != main_device_port) {
2724 		return kIOReturnNotPrivileged;
2725 	}
2726 
2727 	assert(matching_size);
2728 	obj = OSUnserializeXML(matching, matching_size);
2729 
2730 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2731 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2732 		*existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2733 		kr = kIOReturnSuccess;
2734 	} else {
2735 		kr = kIOReturnBadArgument;
2736 	}
2737 
2738 	if (obj) {
2739 		obj->release();
2740 	}
2741 
2742 	return kr;
2743 }
2744 
2745 /* Routine io_service_get_matching_services */
2746 kern_return_t
is_io_service_get_matching_services(mach_port_t main_port,io_string_t matching,io_iterator_t * existing)2747 is_io_service_get_matching_services(
2748 	mach_port_t main_port,
2749 	io_string_t matching,
2750 	io_iterator_t *existing )
2751 {
2752 	return kIOReturnUnsupported;
2753 }
2754 
2755 /* Routine io_service_get_matching_services_ool */
2756 kern_return_t
is_io_service_get_matching_services_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * existing)2757 is_io_service_get_matching_services_ool(
2758 	mach_port_t main_port,
2759 	io_buf_ptr_t matching,
2760 	mach_msg_type_number_t matchingCnt,
2761 	kern_return_t *result,
2762 	io_object_t *existing )
2763 {
2764 	kern_return_t       kr;
2765 	vm_offset_t         data;
2766 	vm_map_offset_t     map_data;
2767 
2768 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2769 	data = CAST_DOWN(vm_offset_t, map_data);
2770 
2771 	if (KERN_SUCCESS == kr) {
2772 		// must return success after vm_map_copyout() succeeds
2773 		// and mig will copy out objects on success
2774 		*existing = NULL;
2775 		*result = internal_io_service_get_matching_services(main_port,
2776 		    (const char *) data, matchingCnt, existing);
2777 		vm_deallocate( kernel_map, data, matchingCnt );
2778 	}
2779 
2780 	return kr;
2781 }
2782 
2783 /* Routine io_service_get_matching_services_bin */
2784 kern_return_t
is_io_service_get_matching_services_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * existing)2785 is_io_service_get_matching_services_bin(
2786 	mach_port_t main_port,
2787 	io_struct_inband_t matching,
2788 	mach_msg_type_number_t matchingCnt,
2789 	io_object_t *existing)
2790 {
2791 	return internal_io_service_get_matching_services(main_port, matching, matchingCnt, existing);
2792 }
2793 
2794 
2795 static kern_return_t
internal_io_service_get_matching_service(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_service_t * service)2796 internal_io_service_get_matching_service(
2797 	mach_port_t main_port,
2798 	const char * matching,
2799 	mach_msg_type_number_t matching_size,
2800 	io_service_t *service )
2801 {
2802 	kern_return_t       kr;
2803 	OSObject *          obj;
2804 	OSDictionary *      dict;
2805 
2806 	if (main_port != main_device_port) {
2807 		return kIOReturnNotPrivileged;
2808 	}
2809 
2810 	assert(matching_size);
2811 	obj = OSUnserializeXML(matching, matching_size);
2812 
2813 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2814 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2815 		*service = IOService::copyMatchingService( dict );
2816 		kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2817 	} else {
2818 		kr = kIOReturnBadArgument;
2819 	}
2820 
2821 	if (obj) {
2822 		obj->release();
2823 	}
2824 
2825 	return kr;
2826 }
2827 
2828 /* Routine io_service_get_matching_service */
2829 kern_return_t
is_io_service_get_matching_service(mach_port_t main_port,io_string_t matching,io_service_t * service)2830 is_io_service_get_matching_service(
2831 	mach_port_t main_port,
2832 	io_string_t matching,
2833 	io_service_t *service )
2834 {
2835 	return kIOReturnUnsupported;
2836 }
2837 
2838 /* Routine io_service_get_matching_services_ool */
2839 kern_return_t
is_io_service_get_matching_service_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * service)2840 is_io_service_get_matching_service_ool(
2841 	mach_port_t main_port,
2842 	io_buf_ptr_t matching,
2843 	mach_msg_type_number_t matchingCnt,
2844 	kern_return_t *result,
2845 	io_object_t *service )
2846 {
2847 	kern_return_t       kr;
2848 	vm_offset_t         data;
2849 	vm_map_offset_t     map_data;
2850 
2851 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2852 	data = CAST_DOWN(vm_offset_t, map_data);
2853 
2854 	if (KERN_SUCCESS == kr) {
2855 		// must return success after vm_map_copyout() succeeds
2856 		// and mig will copy out objects on success
2857 		*service = NULL;
2858 		*result = internal_io_service_get_matching_service(main_port,
2859 		    (const char *) data, matchingCnt, service );
2860 		vm_deallocate( kernel_map, data, matchingCnt );
2861 	}
2862 
2863 	return kr;
2864 }
2865 
2866 /* Routine io_service_get_matching_service_bin */
2867 kern_return_t
is_io_service_get_matching_service_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * service)2868 is_io_service_get_matching_service_bin(
2869 	mach_port_t main_port,
2870 	io_struct_inband_t matching,
2871 	mach_msg_type_number_t matchingCnt,
2872 	io_object_t *service)
2873 {
2874 	return internal_io_service_get_matching_service(main_port, matching, matchingCnt, service);
2875 }
2876 
2877 static kern_return_t
internal_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,const char * matching,size_t matching_size,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)2878 internal_io_service_add_notification(
2879 	mach_port_t main_port,
2880 	io_name_t notification_type,
2881 	const char * matching,
2882 	size_t matching_size,
2883 	mach_port_t port,
2884 	void * reference,
2885 	vm_size_t referenceSize,
2886 	bool client64,
2887 	io_object_t * notification )
2888 {
2889 	IOServiceUserNotification * userNotify = NULL;
2890 	IONotifier *                notify = NULL;
2891 	const OSSymbol *            sym;
2892 	OSObject *                  obj;
2893 	OSDictionary *              dict;
2894 	IOReturn                    err;
2895 	natural_t                   userMsgType;
2896 
2897 	if (main_port != main_device_port) {
2898 		return kIOReturnNotPrivileged;
2899 	}
2900 
2901 	do {
2902 		err = kIOReturnNoResources;
2903 
2904 		if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2905 			return kIOReturnMessageTooLarge;
2906 		}
2907 
2908 		if (!(sym = OSSymbol::withCString( notification_type ))) {
2909 			err = kIOReturnNoResources;
2910 		}
2911 
2912 		assert(matching_size);
2913 		obj = OSUnserializeXML(matching, matching_size);
2914 		dict = OSDynamicCast(OSDictionary, obj);
2915 		if (!dict) {
2916 			err = kIOReturnBadArgument;
2917 			continue;
2918 		}
2919 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2920 
2921 		if ((sym == gIOPublishNotification)
2922 		    || (sym == gIOFirstPublishNotification)) {
2923 			userMsgType = kIOServicePublishNotificationType;
2924 		} else if ((sym == gIOMatchedNotification)
2925 		    || (sym == gIOFirstMatchNotification)) {
2926 			userMsgType = kIOServiceMatchedNotificationType;
2927 		} else if ((sym == gIOTerminatedNotification)
2928 		    || (sym == gIOWillTerminateNotification)) {
2929 			userMsgType = kIOServiceTerminatedNotificationType;
2930 		} else {
2931 			userMsgType = kLastIOKitNotificationType;
2932 		}
2933 
2934 		userNotify = new IOServiceUserNotification;
2935 
2936 		if (userNotify && !userNotify->init( port, userMsgType,
2937 		    reference, referenceSize, client64)) {
2938 			userNotify->release();
2939 			userNotify = NULL;
2940 		}
2941 		if (!userNotify) {
2942 			continue;
2943 		}
2944 
2945 		notify = IOService::addMatchingNotification( sym, dict,
2946 		    &userNotify->_handler, userNotify );
2947 		if (notify) {
2948 			*notification = userNotify;
2949 			userNotify->setNotification( notify );
2950 			err = kIOReturnSuccess;
2951 		} else {
2952 			err = kIOReturnUnsupported;
2953 		}
2954 	} while (false);
2955 
2956 	if ((kIOReturnSuccess != err) && userNotify) {
2957 		userNotify->setNotification(NULL);
2958 		userNotify->invalidatePort();
2959 		userNotify->release();
2960 		userNotify = NULL;
2961 	}
2962 
2963 	if (sym) {
2964 		sym->release();
2965 	}
2966 	if (obj) {
2967 		obj->release();
2968 	}
2969 
2970 	return err;
2971 }
2972 
2973 
2974 /* Routine io_service_add_notification */
2975 kern_return_t
is_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2976 is_io_service_add_notification(
2977 	mach_port_t main_port,
2978 	io_name_t notification_type,
2979 	io_string_t matching,
2980 	mach_port_t port,
2981 	io_async_ref_t reference,
2982 	mach_msg_type_number_t referenceCnt,
2983 	io_object_t * notification )
2984 {
2985 	return kIOReturnUnsupported;
2986 }
2987 
2988 /* Routine io_service_add_notification_64 */
2989 kern_return_t
is_io_service_add_notification_64(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2990 is_io_service_add_notification_64(
2991 	mach_port_t main_port,
2992 	io_name_t notification_type,
2993 	io_string_t matching,
2994 	mach_port_t wake_port,
2995 	io_async_ref64_t reference,
2996 	mach_msg_type_number_t referenceCnt,
2997 	io_object_t *notification )
2998 {
2999 	return kIOReturnUnsupported;
3000 }
3001 
3002 /* Routine io_service_add_notification_bin */
3003 kern_return_t
is_io_service_add_notification_bin(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3004 is_io_service_add_notification_bin
3005 (
3006 	mach_port_t main_port,
3007 	io_name_t notification_type,
3008 	io_struct_inband_t matching,
3009 	mach_msg_type_number_t matchingCnt,
3010 	mach_port_t wake_port,
3011 	io_async_ref_t reference,
3012 	mach_msg_type_number_t referenceCnt,
3013 	io_object_t *notification)
3014 {
3015 	io_async_ref_t zreference;
3016 
3017 	if (referenceCnt > ASYNC_REF_COUNT) {
3018 		return kIOReturnBadArgument;
3019 	}
3020 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3021 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3022 
3023 	return internal_io_service_add_notification(main_port, notification_type,
3024 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3025 	           false, notification);
3026 }
3027 
3028 /* Routine io_service_add_notification_bin_64 */
3029 kern_return_t
is_io_service_add_notification_bin_64(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3030 is_io_service_add_notification_bin_64
3031 (
3032 	mach_port_t main_port,
3033 	io_name_t notification_type,
3034 	io_struct_inband_t matching,
3035 	mach_msg_type_number_t matchingCnt,
3036 	mach_port_t wake_port,
3037 	io_async_ref64_t reference,
3038 	mach_msg_type_number_t referenceCnt,
3039 	io_object_t *notification)
3040 {
3041 	io_async_ref64_t zreference;
3042 
3043 	if (referenceCnt > ASYNC_REF64_COUNT) {
3044 		return kIOReturnBadArgument;
3045 	}
3046 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3047 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3048 
3049 	return internal_io_service_add_notification(main_port, notification_type,
3050 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3051 	           true, notification);
3052 }
3053 
3054 static kern_return_t
internal_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,void * reference,vm_size_t referenceSize,bool client64,kern_return_t * result,io_object_t * notification)3055 internal_io_service_add_notification_ool(
3056 	mach_port_t main_port,
3057 	io_name_t notification_type,
3058 	io_buf_ptr_t matching,
3059 	mach_msg_type_number_t matchingCnt,
3060 	mach_port_t wake_port,
3061 	void * reference,
3062 	vm_size_t referenceSize,
3063 	bool client64,
3064 	kern_return_t *result,
3065 	io_object_t *notification )
3066 {
3067 	kern_return_t       kr;
3068 	vm_offset_t         data;
3069 	vm_map_offset_t     map_data;
3070 
3071 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
3072 	data = CAST_DOWN(vm_offset_t, map_data);
3073 
3074 	if (KERN_SUCCESS == kr) {
3075 		// must return success after vm_map_copyout() succeeds
3076 		// and mig will copy out objects on success
3077 		*notification = NULL;
3078 		*result = internal_io_service_add_notification( main_port, notification_type,
3079 		    (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
3080 		vm_deallocate( kernel_map, data, matchingCnt );
3081 	}
3082 
3083 	return kr;
3084 }
3085 
3086 /* Routine io_service_add_notification_ool */
3087 kern_return_t
is_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3088 is_io_service_add_notification_ool(
3089 	mach_port_t main_port,
3090 	io_name_t notification_type,
3091 	io_buf_ptr_t matching,
3092 	mach_msg_type_number_t matchingCnt,
3093 	mach_port_t wake_port,
3094 	io_async_ref_t reference,
3095 	mach_msg_type_number_t referenceCnt,
3096 	kern_return_t *result,
3097 	io_object_t *notification )
3098 {
3099 	io_async_ref_t zreference;
3100 
3101 	if (referenceCnt > ASYNC_REF_COUNT) {
3102 		return kIOReturnBadArgument;
3103 	}
3104 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3105 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3106 
3107 	return internal_io_service_add_notification_ool(main_port, notification_type,
3108 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3109 	           false, result, notification);
3110 }
3111 
3112 /* Routine io_service_add_notification_ool_64 */
3113 kern_return_t
is_io_service_add_notification_ool_64(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3114 is_io_service_add_notification_ool_64(
3115 	mach_port_t main_port,
3116 	io_name_t notification_type,
3117 	io_buf_ptr_t matching,
3118 	mach_msg_type_number_t matchingCnt,
3119 	mach_port_t wake_port,
3120 	io_async_ref64_t reference,
3121 	mach_msg_type_number_t referenceCnt,
3122 	kern_return_t *result,
3123 	io_object_t *notification )
3124 {
3125 	io_async_ref64_t zreference;
3126 
3127 	if (referenceCnt > ASYNC_REF64_COUNT) {
3128 		return kIOReturnBadArgument;
3129 	}
3130 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3131 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3132 
3133 	return internal_io_service_add_notification_ool(main_port, notification_type,
3134 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3135 	           true, result, notification);
3136 }
3137 
3138 /* Routine io_service_add_notification_old */
3139 kern_return_t
is_io_service_add_notification_old(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,natural_t ref,io_object_t * notification)3140 is_io_service_add_notification_old(
3141 	mach_port_t main_port,
3142 	io_name_t notification_type,
3143 	io_string_t matching,
3144 	mach_port_t port,
3145 	// for binary compatibility reasons, this must be natural_t for ILP32
3146 	natural_t ref,
3147 	io_object_t * notification )
3148 {
3149 	return is_io_service_add_notification( main_port, notification_type,
3150 	           matching, port, &ref, 1, notification );
3151 }
3152 
3153 
3154 static kern_return_t
internal_io_service_add_interest_notification(io_object_t _service,io_name_t type_of_interest,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)3155 internal_io_service_add_interest_notification(
3156 	io_object_t _service,
3157 	io_name_t type_of_interest,
3158 	mach_port_t port,
3159 	void * reference,
3160 	vm_size_t referenceSize,
3161 	bool client64,
3162 	io_object_t * notification )
3163 {
3164 	IOServiceMessageUserNotification *  userNotify = NULL;
3165 	IONotifier *                        notify = NULL;
3166 	const OSSymbol *                    sym;
3167 	IOReturn                            err;
3168 
3169 	CHECK( IOService, _service, service );
3170 
3171 	err = kIOReturnNoResources;
3172 	if ((sym = OSSymbol::withCString( type_of_interest ))) {
3173 		do {
3174 			userNotify = new IOServiceMessageUserNotification;
3175 
3176 			if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
3177 			    reference, referenceSize, client64 )) {
3178 				userNotify->release();
3179 				userNotify = NULL;
3180 			}
3181 			if (!userNotify) {
3182 				continue;
3183 			}
3184 
3185 			notify = service->registerInterest( sym,
3186 			    &userNotify->_handler, userNotify );
3187 			if (notify) {
3188 				*notification = userNotify;
3189 				userNotify->setNotification( notify );
3190 				err = kIOReturnSuccess;
3191 			} else {
3192 				err = kIOReturnUnsupported;
3193 			}
3194 		} while (false);
3195 
3196 		sym->release();
3197 	}
3198 
3199 	if ((kIOReturnSuccess != err) && userNotify) {
3200 		userNotify->setNotification(NULL);
3201 		userNotify->invalidatePort();
3202 		userNotify->release();
3203 		userNotify = NULL;
3204 	}
3205 
3206 	return err;
3207 }
3208 
3209 /* Routine io_service_add_message_notification */
3210 kern_return_t
is_io_service_add_interest_notification(io_object_t service,io_name_t type_of_interest,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3211 is_io_service_add_interest_notification(
3212 	io_object_t service,
3213 	io_name_t type_of_interest,
3214 	mach_port_t port,
3215 	io_async_ref_t reference,
3216 	mach_msg_type_number_t referenceCnt,
3217 	io_object_t * notification )
3218 {
3219 	io_async_ref_t zreference;
3220 
3221 	if (referenceCnt > ASYNC_REF_COUNT) {
3222 		return kIOReturnBadArgument;
3223 	}
3224 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3225 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3226 
3227 	return internal_io_service_add_interest_notification(service, type_of_interest,
3228 	           port, &zreference[0], sizeof(io_async_ref_t), false, notification);
3229 }
3230 
3231 /* Routine io_service_add_interest_notification_64 */
3232 kern_return_t
is_io_service_add_interest_notification_64(io_object_t service,io_name_t type_of_interest,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3233 is_io_service_add_interest_notification_64(
3234 	io_object_t service,
3235 	io_name_t type_of_interest,
3236 	mach_port_t wake_port,
3237 	io_async_ref64_t reference,
3238 	mach_msg_type_number_t referenceCnt,
3239 	io_object_t *notification )
3240 {
3241 	io_async_ref64_t zreference;
3242 
3243 	if (referenceCnt > ASYNC_REF64_COUNT) {
3244 		return kIOReturnBadArgument;
3245 	}
3246 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3247 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3248 
3249 	return internal_io_service_add_interest_notification(service, type_of_interest,
3250 	           wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
3251 }
3252 
3253 
3254 /* Routine io_service_acknowledge_notification */
3255 kern_return_t
is_io_service_acknowledge_notification(io_object_t _service,natural_t notify_ref,natural_t response)3256 is_io_service_acknowledge_notification(
3257 	io_object_t _service,
3258 	natural_t notify_ref,
3259 	natural_t response )
3260 {
3261 	CHECK( IOService, _service, service );
3262 
3263 	return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3264 	           (IOOptionBits) response );
3265 }
3266 
3267 /* Routine io_connect_get_semaphore */
3268 kern_return_t
is_io_connect_get_notification_semaphore(io_connect_t connection,natural_t notification_type,semaphore_t * semaphore)3269 is_io_connect_get_notification_semaphore(
3270 	io_connect_t connection,
3271 	natural_t notification_type,
3272 	semaphore_t *semaphore )
3273 {
3274 	IOReturn ret;
3275 	CHECK( IOUserClient, connection, client );
3276 
3277 	IOStatisticsClientCall();
3278 	client->ipcEnter(kIPCLockWrite);
3279 	ret = client->getNotificationSemaphore((UInt32) notification_type,
3280 	    semaphore );
3281 	client->ipcExit(kIPCLockWrite);
3282 
3283 	return ret;
3284 }
3285 
3286 /* Routine io_registry_get_root_entry */
3287 kern_return_t
is_io_registry_get_root_entry(mach_port_t main_port,io_object_t * root)3288 is_io_registry_get_root_entry(
3289 	mach_port_t main_port,
3290 	io_object_t *root )
3291 {
3292 	IORegistryEntry *   entry;
3293 
3294 	if (main_port != main_device_port) {
3295 		return kIOReturnNotPrivileged;
3296 	}
3297 
3298 	entry = IORegistryEntry::getRegistryRoot();
3299 	if (entry) {
3300 		entry->retain();
3301 	}
3302 	*root = entry;
3303 
3304 	return kIOReturnSuccess;
3305 }
3306 
3307 /* Routine io_registry_create_iterator */
3308 kern_return_t
is_io_registry_create_iterator(mach_port_t main_port,io_name_t plane,uint32_t options,io_object_t * iterator)3309 is_io_registry_create_iterator(
3310 	mach_port_t main_port,
3311 	io_name_t plane,
3312 	uint32_t options,
3313 	io_object_t *iterator )
3314 {
3315 	if (main_port != main_device_port) {
3316 		return kIOReturnNotPrivileged;
3317 	}
3318 
3319 	*iterator = IOUserIterator::withIterator(
3320 		IORegistryIterator::iterateOver(
3321 			IORegistryEntry::getPlane( plane ), options ));
3322 
3323 	return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3324 }
3325 
3326 /* Routine io_registry_entry_create_iterator */
3327 kern_return_t
is_io_registry_entry_create_iterator(io_object_t registry_entry,io_name_t plane,uint32_t options,io_object_t * iterator)3328 is_io_registry_entry_create_iterator(
3329 	io_object_t registry_entry,
3330 	io_name_t plane,
3331 	uint32_t options,
3332 	io_object_t *iterator )
3333 {
3334 	CHECK( IORegistryEntry, registry_entry, entry );
3335 
3336 	*iterator = IOUserIterator::withIterator(
3337 		IORegistryIterator::iterateOver( entry,
3338 		IORegistryEntry::getPlane( plane ), options ));
3339 
3340 	return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3341 }
3342 
3343 /* Routine io_registry_iterator_enter */
3344 kern_return_t
is_io_registry_iterator_enter_entry(io_object_t iterator)3345 is_io_registry_iterator_enter_entry(
3346 	io_object_t iterator )
3347 {
3348 	CHECKLOCKED( IORegistryIterator, iterator, iter );
3349 
3350 	IOLockLock(&oIter->lock);
3351 	iter->enterEntry();
3352 	IOLockUnlock(&oIter->lock);
3353 
3354 	return kIOReturnSuccess;
3355 }
3356 
3357 /* Routine io_registry_iterator_exit */
3358 kern_return_t
is_io_registry_iterator_exit_entry(io_object_t iterator)3359 is_io_registry_iterator_exit_entry(
3360 	io_object_t iterator )
3361 {
3362 	bool        didIt;
3363 
3364 	CHECKLOCKED( IORegistryIterator, iterator, iter );
3365 
3366 	IOLockLock(&oIter->lock);
3367 	didIt = iter->exitEntry();
3368 	IOLockUnlock(&oIter->lock);
3369 
3370 	return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3371 }
3372 
3373 /* Routine io_registry_entry_from_path */
3374 kern_return_t
is_io_registry_entry_from_path(mach_port_t main_port,io_string_t path,io_object_t * registry_entry)3375 is_io_registry_entry_from_path(
3376 	mach_port_t main_port,
3377 	io_string_t path,
3378 	io_object_t *registry_entry )
3379 {
3380 	IORegistryEntry *   entry;
3381 
3382 	if (main_port != main_device_port) {
3383 		return kIOReturnNotPrivileged;
3384 	}
3385 
3386 	entry = IORegistryEntry::fromPath( path );
3387 
3388 	if (!entry && IOTaskRegistryCompatibility(current_task())) {
3389 		OSDictionary * matching;
3390 		const OSObject * objects[2] = { kOSBooleanTrue, NULL };
3391 		const OSSymbol * keys[2]    = { gIOCompatibilityMatchKey, gIOPathMatchKey };
3392 
3393 		objects[1] = OSString::withCStringNoCopy(path);
3394 		matching = OSDictionary::withObjects(objects, keys, 2, 2);
3395 		if (matching) {
3396 			entry = IOService::copyMatchingService(matching);
3397 		}
3398 		OSSafeReleaseNULL(matching);
3399 		OSSafeReleaseNULL(objects[1]);
3400 	}
3401 
3402 	*registry_entry = entry;
3403 
3404 	return kIOReturnSuccess;
3405 }
3406 
3407 
3408 /* Routine io_registry_entry_from_path */
3409 kern_return_t
is_io_registry_entry_from_path_ool(mach_port_t main_port,io_string_inband_t path,io_buf_ptr_t path_ool,mach_msg_type_number_t path_oolCnt,kern_return_t * result,io_object_t * registry_entry)3410 is_io_registry_entry_from_path_ool(
3411 	mach_port_t main_port,
3412 	io_string_inband_t path,
3413 	io_buf_ptr_t path_ool,
3414 	mach_msg_type_number_t path_oolCnt,
3415 	kern_return_t *result,
3416 	io_object_t *registry_entry)
3417 {
3418 	IORegistryEntry *   entry;
3419 	vm_map_offset_t     map_data;
3420 	const char *        cpath;
3421 	IOReturn            res;
3422 	kern_return_t       err;
3423 
3424 	if (main_port != main_device_port) {
3425 		return kIOReturnNotPrivileged;
3426 	}
3427 
3428 	map_data = 0;
3429 	entry    = NULL;
3430 	res = err = KERN_SUCCESS;
3431 	if (path[0]) {
3432 		cpath = path;
3433 	} else {
3434 		if (!path_oolCnt) {
3435 			return kIOReturnBadArgument;
3436 		}
3437 		if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3438 			return kIOReturnMessageTooLarge;
3439 		}
3440 
3441 		err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3442 		if (KERN_SUCCESS == err) {
3443 			// must return success to mig after vm_map_copyout() succeeds, so result is actual
3444 			cpath = CAST_DOWN(const char *, map_data);
3445 			if (cpath[path_oolCnt - 1]) {
3446 				res = kIOReturnBadArgument;
3447 			}
3448 		}
3449 	}
3450 
3451 	if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3452 		entry = IORegistryEntry::fromPath(cpath);
3453 		res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3454 	}
3455 
3456 	if (map_data) {
3457 		vm_deallocate(kernel_map, map_data, path_oolCnt);
3458 	}
3459 
3460 	if (KERN_SUCCESS != err) {
3461 		res = err;
3462 	}
3463 	*registry_entry = entry;
3464 	*result = res;
3465 
3466 	return err;
3467 }
3468 
3469 
3470 /* Routine io_registry_entry_in_plane */
3471 kern_return_t
is_io_registry_entry_in_plane(io_object_t registry_entry,io_name_t plane,boolean_t * inPlane)3472 is_io_registry_entry_in_plane(
3473 	io_object_t registry_entry,
3474 	io_name_t plane,
3475 	boolean_t *inPlane )
3476 {
3477 	CHECK( IORegistryEntry, registry_entry, entry );
3478 
3479 	*inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3480 
3481 	return kIOReturnSuccess;
3482 }
3483 
3484 
3485 /* Routine io_registry_entry_get_path */
3486 kern_return_t
is_io_registry_entry_get_path(io_object_t registry_entry,io_name_t plane,io_string_t path)3487 is_io_registry_entry_get_path(
3488 	io_object_t registry_entry,
3489 	io_name_t plane,
3490 	io_string_t path )
3491 {
3492 	int         length;
3493 	CHECK( IORegistryEntry, registry_entry, entry );
3494 
3495 	length = sizeof(io_string_t);
3496 	if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3497 		return kIOReturnSuccess;
3498 	} else {
3499 		return kIOReturnBadArgument;
3500 	}
3501 }
3502 
3503 /* Routine io_registry_entry_get_path */
3504 kern_return_t
is_io_registry_entry_get_path_ool(io_object_t registry_entry,io_name_t plane,io_string_inband_t path,io_buf_ptr_t * path_ool,mach_msg_type_number_t * path_oolCnt)3505 is_io_registry_entry_get_path_ool(
3506 	io_object_t registry_entry,
3507 	io_name_t plane,
3508 	io_string_inband_t path,
3509 	io_buf_ptr_t *path_ool,
3510 	mach_msg_type_number_t *path_oolCnt)
3511 {
3512 	enum   { kMaxPath = 16384 };
3513 	IOReturn err;
3514 	int      length;
3515 	char   * buf;
3516 
3517 	CHECK( IORegistryEntry, registry_entry, entry );
3518 
3519 	*path_ool    = NULL;
3520 	*path_oolCnt = 0;
3521 	length = sizeof(io_string_inband_t);
3522 	if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3523 		err = kIOReturnSuccess;
3524 	} else {
3525 		length = kMaxPath;
3526 		buf = IONewData(char, length);
3527 		if (!buf) {
3528 			err = kIOReturnNoMemory;
3529 		} else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3530 			err = kIOReturnError;
3531 		} else {
3532 			*path_oolCnt = length;
3533 			err = copyoutkdata(buf, length, path_ool);
3534 		}
3535 		if (buf) {
3536 			IODeleteData(buf, char, kMaxPath);
3537 		}
3538 	}
3539 
3540 	return err;
3541 }
3542 
3543 
3544 /* Routine io_registry_entry_get_name */
3545 kern_return_t
is_io_registry_entry_get_name(io_object_t registry_entry,io_name_t name)3546 is_io_registry_entry_get_name(
3547 	io_object_t registry_entry,
3548 	io_name_t name )
3549 {
3550 	CHECK( IORegistryEntry, registry_entry, entry );
3551 
3552 	strncpy( name, entry->getName(), sizeof(io_name_t));
3553 
3554 	return kIOReturnSuccess;
3555 }
3556 
3557 /* Routine io_registry_entry_get_name_in_plane */
3558 kern_return_t
is_io_registry_entry_get_name_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t name)3559 is_io_registry_entry_get_name_in_plane(
3560 	io_object_t registry_entry,
3561 	io_name_t planeName,
3562 	io_name_t name )
3563 {
3564 	const IORegistryPlane * plane;
3565 	CHECK( IORegistryEntry, registry_entry, entry );
3566 
3567 	if (planeName[0]) {
3568 		plane = IORegistryEntry::getPlane( planeName );
3569 	} else {
3570 		plane = NULL;
3571 	}
3572 
3573 	strncpy( name, entry->getName( plane), sizeof(io_name_t));
3574 
3575 	return kIOReturnSuccess;
3576 }
3577 
3578 /* Routine io_registry_entry_get_location_in_plane */
3579 kern_return_t
is_io_registry_entry_get_location_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t location)3580 is_io_registry_entry_get_location_in_plane(
3581 	io_object_t registry_entry,
3582 	io_name_t planeName,
3583 	io_name_t location )
3584 {
3585 	const IORegistryPlane * plane;
3586 	CHECK( IORegistryEntry, registry_entry, entry );
3587 
3588 	if (planeName[0]) {
3589 		plane = IORegistryEntry::getPlane( planeName );
3590 	} else {
3591 		plane = NULL;
3592 	}
3593 
3594 	const char * cstr = entry->getLocation( plane );
3595 
3596 	if (cstr) {
3597 		strncpy( location, cstr, sizeof(io_name_t));
3598 		return kIOReturnSuccess;
3599 	} else {
3600 		return kIOReturnNotFound;
3601 	}
3602 }
3603 
3604 /* Routine io_registry_entry_get_registry_entry_id */
3605 kern_return_t
is_io_registry_entry_get_registry_entry_id(io_object_t registry_entry,uint64_t * entry_id)3606 is_io_registry_entry_get_registry_entry_id(
3607 	io_object_t registry_entry,
3608 	uint64_t *entry_id )
3609 {
3610 	CHECK( IORegistryEntry, registry_entry, entry );
3611 
3612 	*entry_id = entry->getRegistryEntryID();
3613 
3614 	return kIOReturnSuccess;
3615 }
3616 
3617 
3618 static OSObject *
IOCopyPropertyCompatible(IORegistryEntry * regEntry,const char * name)3619 IOCopyPropertyCompatible(IORegistryEntry * regEntry, const char * name)
3620 {
3621 	OSObject     * obj;
3622 	OSObject     * compatProperties;
3623 	OSDictionary * props;
3624 
3625 	obj = regEntry->copyProperty(name);
3626 	if (obj) {
3627 		return obj;
3628 	}
3629 
3630 	compatProperties = regEntry->copyProperty(gIOUserServicePropertiesKey);
3631 	if (!compatProperties
3632 	    && IOTaskRegistryCompatibility(current_task())) {
3633 		compatProperties = regEntry->copyProperty(gIOCompatibilityPropertiesKey);
3634 	}
3635 	if (compatProperties) {
3636 		props = OSDynamicCast(OSDictionary, compatProperties);
3637 		if (props) {
3638 			obj = props->getObject(name);
3639 			if (obj) {
3640 				obj->retain();
3641 			}
3642 		}
3643 		compatProperties->release();
3644 	}
3645 
3646 	return obj;
3647 }
3648 
3649 /* Routine io_registry_entry_get_property */
3650 kern_return_t
is_io_registry_entry_get_property_bytes(io_object_t registry_entry,io_name_t property_name,io_struct_inband_t buf,mach_msg_type_number_t * dataCnt)3651 is_io_registry_entry_get_property_bytes(
3652 	io_object_t registry_entry,
3653 	io_name_t property_name,
3654 	io_struct_inband_t buf,
3655 	mach_msg_type_number_t *dataCnt )
3656 {
3657 	OSObject    *       obj;
3658 	OSData      *       data;
3659 	OSString    *       str;
3660 	OSBoolean   *       boo;
3661 	OSNumber    *       off;
3662 	UInt64              offsetBytes;
3663 	unsigned int        len = 0;
3664 	const void *        bytes = NULL;
3665 	IOReturn            ret = kIOReturnSuccess;
3666 
3667 	CHECK( IORegistryEntry, registry_entry, entry );
3668 
3669 #if CONFIG_MACF
3670 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3671 		return kIOReturnNotPermitted;
3672 	}
3673 #endif
3674 
3675 	obj = IOCopyPropertyCompatible(entry, property_name);
3676 	if (!obj) {
3677 		return kIOReturnNoResources;
3678 	}
3679 
3680 	// One day OSData will be a common container base class
3681 	// until then...
3682 	if ((data = OSDynamicCast( OSData, obj ))) {
3683 		len = data->getLength();
3684 		bytes = data->getBytesNoCopy();
3685 		if (!data->isSerializable()) {
3686 			len = 0;
3687 		}
3688 	} else if ((str = OSDynamicCast( OSString, obj ))) {
3689 		len = str->getLength() + 1;
3690 		bytes = str->getCStringNoCopy();
3691 	} else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3692 		len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3693 		bytes = boo->isTrue() ? "Yes" : "No";
3694 	} else if ((off = OSDynamicCast( OSNumber, obj ))) {
3695 		offsetBytes = off->unsigned64BitValue();
3696 		len = off->numberOfBytes();
3697 		if (len > sizeof(offsetBytes)) {
3698 			len = sizeof(offsetBytes);
3699 		}
3700 		bytes = &offsetBytes;
3701 #ifdef __BIG_ENDIAN__
3702 		bytes = (const void *)
3703 		    (((UInt32) bytes) + (sizeof(UInt64) - len));
3704 #endif
3705 	} else {
3706 		ret = kIOReturnBadArgument;
3707 	}
3708 
3709 	if (bytes) {
3710 		if (*dataCnt < len) {
3711 			ret = kIOReturnIPCError;
3712 		} else {
3713 			*dataCnt = len;
3714 			bcopy( bytes, buf, len );
3715 		}
3716 	}
3717 	obj->release();
3718 
3719 	return ret;
3720 }
3721 
3722 
3723 /* Routine io_registry_entry_get_property */
3724 kern_return_t
is_io_registry_entry_get_property(io_object_t registry_entry,io_name_t property_name,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3725 is_io_registry_entry_get_property(
3726 	io_object_t registry_entry,
3727 	io_name_t property_name,
3728 	io_buf_ptr_t *properties,
3729 	mach_msg_type_number_t *propertiesCnt )
3730 {
3731 	kern_return_t       err;
3732 	unsigned int        len;
3733 	OSObject *          obj;
3734 
3735 	CHECK( IORegistryEntry, registry_entry, entry );
3736 
3737 #if CONFIG_MACF
3738 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3739 		return kIOReturnNotPermitted;
3740 	}
3741 #endif
3742 
3743 	obj = IOCopyPropertyCompatible(entry, property_name);
3744 	if (!obj) {
3745 		return kIOReturnNotFound;
3746 	}
3747 
3748 	OSSerialize * s = OSSerialize::withCapacity(4096);
3749 	if (!s) {
3750 		obj->release();
3751 		return kIOReturnNoMemory;
3752 	}
3753 
3754 	if (obj->serialize( s )) {
3755 		len = s->getLength();
3756 		*propertiesCnt = len;
3757 		err = copyoutkdata( s->text(), len, properties );
3758 	} else {
3759 		err = kIOReturnUnsupported;
3760 	}
3761 
3762 	s->release();
3763 	obj->release();
3764 
3765 	return err;
3766 }
3767 
3768 /* Routine io_registry_entry_get_property_recursively */
3769 kern_return_t
is_io_registry_entry_get_property_recursively(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3770 is_io_registry_entry_get_property_recursively(
3771 	io_object_t registry_entry,
3772 	io_name_t plane,
3773 	io_name_t property_name,
3774 	uint32_t options,
3775 	io_buf_ptr_t *properties,
3776 	mach_msg_type_number_t *propertiesCnt )
3777 {
3778 	kern_return_t       err;
3779 	unsigned int        len;
3780 	OSObject *          obj;
3781 
3782 	CHECK( IORegistryEntry, registry_entry, entry );
3783 
3784 #if CONFIG_MACF
3785 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3786 		return kIOReturnNotPermitted;
3787 	}
3788 #endif
3789 
3790 	obj = entry->copyProperty( property_name,
3791 	    IORegistryEntry::getPlane( plane ), options );
3792 	if (!obj) {
3793 		return kIOReturnNotFound;
3794 	}
3795 
3796 	OSSerialize * s = OSSerialize::withCapacity(4096);
3797 	if (!s) {
3798 		obj->release();
3799 		return kIOReturnNoMemory;
3800 	}
3801 
3802 	if (obj->serialize( s )) {
3803 		len = s->getLength();
3804 		*propertiesCnt = len;
3805 		err = copyoutkdata( s->text(), len, properties );
3806 	} else {
3807 		err = kIOReturnUnsupported;
3808 	}
3809 
3810 	s->release();
3811 	obj->release();
3812 
3813 	return err;
3814 }
3815 
3816 /* Routine io_registry_entry_get_properties */
3817 kern_return_t
is_io_registry_entry_get_properties(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3818 is_io_registry_entry_get_properties(
3819 	io_object_t registry_entry,
3820 	io_buf_ptr_t *properties,
3821 	mach_msg_type_number_t *propertiesCnt )
3822 {
3823 	return kIOReturnUnsupported;
3824 }
3825 
3826 #if CONFIG_MACF
3827 
3828 struct GetPropertiesEditorRef {
3829 	kauth_cred_t      cred;
3830 	IORegistryEntry * entry;
3831 	OSCollection    * root;
3832 };
3833 
3834 static const LIBKERN_RETURNS_RETAINED OSMetaClassBase *
GetPropertiesEditor(void * reference,OSSerialize * s,OSCollection * container,const OSSymbol * name,const OSMetaClassBase * value)3835 GetPropertiesEditor(void                  * reference,
3836     OSSerialize           * s,
3837     OSCollection          * container,
3838     const OSSymbol        * name,
3839     const OSMetaClassBase * value)
3840 {
3841 	GetPropertiesEditorRef * ref = (typeof(ref))reference;
3842 
3843 	if (!ref->root) {
3844 		ref->root = container;
3845 	}
3846 	if (ref->root == container) {
3847 		if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3848 			value = NULL;
3849 		}
3850 	}
3851 	if (value) {
3852 		value->retain();
3853 	}
3854 	return value;
3855 }
3856 
3857 #endif /* CONFIG_MACF */
3858 
3859 /* Routine io_registry_entry_get_properties_bin_buf */
3860 kern_return_t
is_io_registry_entry_get_properties_bin_buf(io_object_t registry_entry,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3861 is_io_registry_entry_get_properties_bin_buf(
3862 	io_object_t registry_entry,
3863 	mach_vm_address_t buf,
3864 	mach_vm_size_t *bufsize,
3865 	io_buf_ptr_t *properties,
3866 	mach_msg_type_number_t *propertiesCnt)
3867 {
3868 	kern_return_t          err = kIOReturnSuccess;
3869 	unsigned int           len;
3870 	OSObject             * compatProperties;
3871 	OSSerialize          * s;
3872 	OSSerialize::Editor    editor = NULL;
3873 	void                 * editRef = NULL;
3874 
3875 	CHECK(IORegistryEntry, registry_entry, entry);
3876 
3877 #if CONFIG_MACF
3878 	GetPropertiesEditorRef ref;
3879 	if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3880 		editor    = &GetPropertiesEditor;
3881 		editRef   = &ref;
3882 		ref.cred  = kauth_cred_get();
3883 		ref.entry = entry;
3884 		ref.root  = NULL;
3885 	}
3886 #endif
3887 
3888 	s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3889 	if (!s) {
3890 		return kIOReturnNoMemory;
3891 	}
3892 
3893 
3894 	compatProperties = entry->copyProperty(gIOUserServicePropertiesKey);
3895 	if (!compatProperties
3896 	    && IOTaskRegistryCompatibility(current_task())) {
3897 		compatProperties = entry->copyProperty(gIOCompatibilityPropertiesKey);
3898 	}
3899 
3900 	if (compatProperties) {
3901 		OSDictionary * dict;
3902 
3903 		dict = entry->dictionaryWithProperties();
3904 		if (!dict) {
3905 			err = kIOReturnNoMemory;
3906 		} else {
3907 			dict->removeObject(gIOUserServicePropertiesKey);
3908 			dict->removeObject(gIOCompatibilityPropertiesKey);
3909 			dict->merge(OSDynamicCast(OSDictionary, compatProperties));
3910 			if (!dict->serialize(s)) {
3911 				err = kIOReturnUnsupported;
3912 			}
3913 			dict->release();
3914 		}
3915 		compatProperties->release();
3916 	} else if (!entry->serializeProperties(s)) {
3917 		err = kIOReturnUnsupported;
3918 	}
3919 
3920 	if (kIOReturnSuccess == err) {
3921 		len = s->getLength();
3922 		if (buf && bufsize && len <= *bufsize) {
3923 			*bufsize = len;
3924 			*propertiesCnt = 0;
3925 			*properties = nullptr;
3926 			if (copyout(s->text(), buf, len)) {
3927 				err = kIOReturnVMError;
3928 			} else {
3929 				err = kIOReturnSuccess;
3930 			}
3931 		} else {
3932 			if (bufsize) {
3933 				*bufsize = 0;
3934 			}
3935 			*propertiesCnt = len;
3936 			err = copyoutkdata( s->text(), len, properties );
3937 		}
3938 	}
3939 	s->release();
3940 
3941 	return err;
3942 }
3943 
3944 /* Routine io_registry_entry_get_properties_bin */
3945 kern_return_t
is_io_registry_entry_get_properties_bin(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3946 is_io_registry_entry_get_properties_bin(
3947 	io_object_t registry_entry,
3948 	io_buf_ptr_t *properties,
3949 	mach_msg_type_number_t *propertiesCnt)
3950 {
3951 	return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3952 	           0, NULL, properties, propertiesCnt);
3953 }
3954 
3955 /* Routine io_registry_entry_get_property_bin_buf */
3956 kern_return_t
is_io_registry_entry_get_property_bin_buf(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3957 is_io_registry_entry_get_property_bin_buf(
3958 	io_object_t registry_entry,
3959 	io_name_t plane,
3960 	io_name_t property_name,
3961 	uint32_t options,
3962 	mach_vm_address_t buf,
3963 	mach_vm_size_t *bufsize,
3964 	io_buf_ptr_t *properties,
3965 	mach_msg_type_number_t *propertiesCnt )
3966 {
3967 	kern_return_t       err;
3968 	unsigned int        len;
3969 	OSObject *          obj;
3970 	const OSSymbol *    sym;
3971 
3972 	CHECK( IORegistryEntry, registry_entry, entry );
3973 
3974 #if CONFIG_MACF
3975 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3976 		return kIOReturnNotPermitted;
3977 	}
3978 #endif
3979 
3980 	sym = OSSymbol::withCString(property_name);
3981 	if (!sym) {
3982 		return kIOReturnNoMemory;
3983 	}
3984 
3985 	err = kIOReturnNotFound;
3986 	if (gIORegistryEntryPropertyKeysKey == sym) {
3987 		obj = entry->copyPropertyKeys();
3988 	} else {
3989 		if ((kIORegistryIterateRecursively & options) && plane[0]) {
3990 			obj = IOCopyPropertyCompatible(entry, property_name);
3991 			if (obj == NULL) {
3992 				IORegistryIterator * iter = IORegistryIterator::iterateOver(entry, IORegistryEntry::getPlane(plane), options);
3993 				if (iter) {
3994 					while ((NULL == obj) && (entry = iter->getNextObject())) {
3995 						OSObject * currentObj = IOCopyPropertyCompatible(entry, property_name);
3996 #if CONFIG_MACF
3997 						if (currentObj != NULL && 0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3998 							// Record that MAC hook blocked this entry and property, and continue to next entry
3999 							err = kIOReturnNotPermitted;
4000 							OSSafeReleaseNULL(currentObj);
4001 							continue;
4002 						}
4003 #endif
4004 						obj = currentObj;
4005 					}
4006 					iter->release();
4007 				}
4008 			}
4009 		} else {
4010 			obj = IOCopyPropertyCompatible(entry, property_name);
4011 		}
4012 		if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
4013 			entry->removeProperty(sym);
4014 		}
4015 	}
4016 
4017 	sym->release();
4018 	if (!obj) {
4019 		return err;
4020 	}
4021 
4022 	OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
4023 	if (!s) {
4024 		obj->release();
4025 		return kIOReturnNoMemory;
4026 	}
4027 
4028 	if (obj->serialize( s )) {
4029 		len = s->getLength();
4030 		if (buf && bufsize && len <= *bufsize) {
4031 			*bufsize = len;
4032 			*propertiesCnt = 0;
4033 			*properties = nullptr;
4034 			if (copyout(s->text(), buf, len)) {
4035 				err = kIOReturnVMError;
4036 			} else {
4037 				err = kIOReturnSuccess;
4038 			}
4039 		} else {
4040 			if (bufsize) {
4041 				*bufsize = 0;
4042 			}
4043 			*propertiesCnt = len;
4044 			err = copyoutkdata( s->text(), len, properties );
4045 		}
4046 	} else {
4047 		err = kIOReturnUnsupported;
4048 	}
4049 
4050 	s->release();
4051 	obj->release();
4052 
4053 	return err;
4054 }
4055 
4056 /* Routine io_registry_entry_get_property_bin */
4057 kern_return_t
is_io_registry_entry_get_property_bin(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)4058 is_io_registry_entry_get_property_bin(
4059 	io_object_t registry_entry,
4060 	io_name_t plane,
4061 	io_name_t property_name,
4062 	uint32_t options,
4063 	io_buf_ptr_t *properties,
4064 	mach_msg_type_number_t *propertiesCnt )
4065 {
4066 	return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
4067 	           property_name, options, 0, NULL, properties, propertiesCnt);
4068 }
4069 
4070 
4071 /* Routine io_registry_entry_set_properties */
4072 kern_return_t
is_io_registry_entry_set_properties(io_object_t registry_entry,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4073 is_io_registry_entry_set_properties
4074 (
4075 	io_object_t registry_entry,
4076 	io_buf_ptr_t properties,
4077 	mach_msg_type_number_t propertiesCnt,
4078 	kern_return_t * result)
4079 {
4080 	OSObject *          obj;
4081 	kern_return_t       err;
4082 	IOReturn            res;
4083 	vm_offset_t         data;
4084 	vm_map_offset_t     map_data;
4085 
4086 	CHECK( IORegistryEntry, registry_entry, entry );
4087 
4088 	if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
4089 		return kIOReturnMessageTooLarge;
4090 	}
4091 
4092 	err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4093 	data = CAST_DOWN(vm_offset_t, map_data);
4094 
4095 	if (KERN_SUCCESS == err) {
4096 		FAKE_STACK_FRAME(entry->getMetaClass());
4097 
4098 		// must return success after vm_map_copyout() succeeds
4099 		obj = OSUnserializeXML((const char *) data, propertiesCnt );
4100 		vm_deallocate( kernel_map, data, propertiesCnt );
4101 
4102 		if (!obj) {
4103 			res = kIOReturnBadArgument;
4104 		}
4105 #if CONFIG_MACF
4106 		else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
4107 		    registry_entry, obj)) {
4108 			res = kIOReturnNotPermitted;
4109 		}
4110 #endif
4111 		else {
4112 			IOService    * service = OSDynamicCast(IOService, entry);
4113 			OSDictionary * props = OSDynamicCast(OSDictionary, obj);
4114 			OSObject     * allowable = entry->copyProperty(gIORegistryEntryAllowableSetPropertiesKey);
4115 			OSArray      * allowableArray;
4116 
4117 			if (!allowable) {
4118 				res = kIOReturnSuccess;
4119 			} else {
4120 				if (!props) {
4121 					res = kIOReturnNotPermitted;
4122 				} else if (!(allowableArray = OSDynamicCast(OSArray, allowable))) {
4123 					res = kIOReturnNotPermitted;
4124 				} else {
4125 					bool allFound __block, found __block;
4126 
4127 					allFound = true;
4128 					props->iterateObjects(^(const OSSymbol * key, OSObject * value) {
4129 							found = false;
4130 							for (unsigned int idx = 0; !found; idx++) {
4131 							        OSObject * next = allowableArray->getObject(idx);
4132 							        if (!next) {
4133 							                break;
4134 								}
4135 							        found = next->isEqualTo(key);
4136 							}
4137 							allFound &= found;
4138 							if (!found) {
4139 							        IOLog("IORegistryEntrySetProperties(%s, %s) disallowed due to " kIORegistryEntryAllowableSetPropertiesKey "\n",
4140 							        entry->getName(), key->getCStringNoCopy());
4141 							}
4142 							return !allFound;
4143 						});
4144 					res =  allFound ? kIOReturnSuccess : kIOReturnBadArgument;
4145 				}
4146 			}
4147 			if (kIOReturnSuccess == res) {
4148 				IOUserClient *
4149 				    client = OSDynamicCast(IOUserClient, entry);
4150 
4151 				if (client && client->defaultLockingSetProperties) {
4152 					IORWLockWrite(&client->lock);
4153 				}
4154 
4155 				if (!client && (kOSBooleanTrue == entry->getProperty(gIORegistryEntryDefaultLockingSetPropertiesKey))) {
4156 					res = entry->runPropertyActionBlock(^IOReturn (void) {
4157 							return entry->setProperties( obj );
4158 						});
4159 				} else {
4160 					res = entry->setProperties( obj );
4161 				}
4162 
4163 				if (client && client->defaultLockingSetProperties) {
4164 					IORWLockUnlock(&client->lock);
4165 				}
4166 				if (service && props && service->hasUserServer()) {
4167 					res = service->UserSetProperties(props);
4168 				}
4169 			}
4170 			OSSafeReleaseNULL(allowable);
4171 		}
4172 		if (obj) {
4173 			obj->release();
4174 		}
4175 
4176 		FAKE_STACK_FRAME_END();
4177 	} else {
4178 		res = err;
4179 	}
4180 
4181 	*result = res;
4182 	return err;
4183 }
4184 
4185 /* Routine io_registry_entry_get_child_iterator */
4186 kern_return_t
is_io_registry_entry_get_child_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4187 is_io_registry_entry_get_child_iterator(
4188 	io_object_t registry_entry,
4189 	io_name_t plane,
4190 	io_object_t *iterator )
4191 {
4192 	CHECK( IORegistryEntry, registry_entry, entry );
4193 
4194 	*iterator = IOUserIterator::withIterator(entry->getChildIterator(
4195 		    IORegistryEntry::getPlane( plane )));
4196 
4197 	return kIOReturnSuccess;
4198 }
4199 
4200 /* Routine io_registry_entry_get_parent_iterator */
4201 kern_return_t
is_io_registry_entry_get_parent_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4202 is_io_registry_entry_get_parent_iterator(
4203 	io_object_t registry_entry,
4204 	io_name_t plane,
4205 	io_object_t *iterator)
4206 {
4207 	CHECK( IORegistryEntry, registry_entry, entry );
4208 
4209 	*iterator = IOUserIterator::withIterator(entry->getParentIterator(
4210 		    IORegistryEntry::getPlane( plane )));
4211 
4212 	return kIOReturnSuccess;
4213 }
4214 
4215 /* Routine io_service_get_busy_state */
4216 kern_return_t
is_io_service_get_busy_state(io_object_t _service,uint32_t * busyState)4217 is_io_service_get_busy_state(
4218 	io_object_t _service,
4219 	uint32_t *busyState )
4220 {
4221 	CHECK( IOService, _service, service );
4222 
4223 	*busyState = service->getBusyState();
4224 
4225 	return kIOReturnSuccess;
4226 }
4227 
4228 /* Routine io_service_get_state */
4229 kern_return_t
is_io_service_get_state(io_object_t _service,uint64_t * state,uint32_t * busy_state,uint64_t * accumulated_busy_time)4230 is_io_service_get_state(
4231 	io_object_t _service,
4232 	uint64_t *state,
4233 	uint32_t *busy_state,
4234 	uint64_t *accumulated_busy_time )
4235 {
4236 	CHECK( IOService, _service, service );
4237 
4238 	*state                 = service->getState();
4239 	*busy_state            = service->getBusyState();
4240 	*accumulated_busy_time = service->getAccumulatedBusyTime();
4241 
4242 	return kIOReturnSuccess;
4243 }
4244 
4245 /* Routine io_service_wait_quiet */
4246 kern_return_t
is_io_service_wait_quiet(io_object_t _service,mach_timespec_t wait_time)4247 is_io_service_wait_quiet(
4248 	io_object_t _service,
4249 	mach_timespec_t wait_time )
4250 {
4251 	uint64_t    timeoutNS;
4252 
4253 	CHECK( IOService, _service, service );
4254 
4255 	timeoutNS = wait_time.tv_sec;
4256 	timeoutNS *= kSecondScale;
4257 	timeoutNS += wait_time.tv_nsec;
4258 
4259 	return service->waitQuiet(timeoutNS);
4260 }
4261 
4262 /* Routine io_service_wait_quiet_with_options */
4263 kern_return_t
is_io_service_wait_quiet_with_options(io_object_t _service,mach_timespec_t wait_time,uint32_t options)4264 is_io_service_wait_quiet_with_options(
4265 	io_object_t _service,
4266 	mach_timespec_t wait_time,
4267 	uint32_t options )
4268 {
4269 	uint64_t    timeoutNS;
4270 
4271 	CHECK( IOService, _service, service );
4272 
4273 	timeoutNS = wait_time.tv_sec;
4274 	timeoutNS *= kSecondScale;
4275 	timeoutNS += wait_time.tv_nsec;
4276 
4277 	if ((options & kIOWaitQuietPanicOnFailure) && !IOCurrentTaskHasEntitlement(kIOWaitQuietPanicsEntitlement)) {
4278 		OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4279 		IOLog("IOServiceWaitQuietWithOptions(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4280 		OSSafeReleaseNULL(taskName);
4281 
4282 		/* strip this option from the options before calling waitQuietWithOptions */
4283 		options &= ~kIOWaitQuietPanicOnFailure;
4284 	}
4285 
4286 	return service->waitQuietWithOptions(timeoutNS, options);
4287 }
4288 
4289 
4290 /* Routine io_service_request_probe */
4291 kern_return_t
is_io_service_request_probe(io_object_t _service,uint32_t options)4292 is_io_service_request_probe(
4293 	io_object_t _service,
4294 	uint32_t options )
4295 {
4296 	CHECK( IOService, _service, service );
4297 
4298 	return service->requestProbe( options );
4299 }
4300 
4301 /* Routine io_service_get_authorization_id */
4302 kern_return_t
is_io_service_get_authorization_id(io_object_t _service,uint64_t * authorization_id)4303 is_io_service_get_authorization_id(
4304 	io_object_t _service,
4305 	uint64_t *authorization_id )
4306 {
4307 	kern_return_t          kr;
4308 
4309 	CHECK( IOService, _service, service );
4310 
4311 	kr = IOUserClient::clientHasPrivilege((void *) current_task(),
4312 	    kIOClientPrivilegeAdministrator );
4313 	if (kIOReturnSuccess != kr) {
4314 		return kr;
4315 	}
4316 
4317 #if defined(XNU_TARGET_OS_OSX)
4318 	*authorization_id = service->getAuthorizationID();
4319 #else /* defined(XNU_TARGET_OS_OSX) */
4320 	*authorization_id = 0;
4321 	kr = kIOReturnUnsupported;
4322 #endif /* defined(XNU_TARGET_OS_OSX) */
4323 
4324 	return kr;
4325 }
4326 
4327 /* Routine io_service_set_authorization_id */
4328 kern_return_t
is_io_service_set_authorization_id(io_object_t _service,uint64_t authorization_id)4329 is_io_service_set_authorization_id(
4330 	io_object_t _service,
4331 	uint64_t authorization_id )
4332 {
4333 	CHECK( IOService, _service, service );
4334 
4335 #if defined(XNU_TARGET_OS_OSX)
4336 	return service->setAuthorizationID( authorization_id );
4337 #else /* defined(XNU_TARGET_OS_OSX) */
4338 	return kIOReturnUnsupported;
4339 #endif /* defined(XNU_TARGET_OS_OSX) */
4340 }
4341 
4342 /* Routine io_service_open_ndr */
4343 kern_return_t
is_io_service_open_extended(io_object_t _service,task_t owningTask,uint32_t connect_type,NDR_record_t ndr,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result,io_object_t * connection)4344 is_io_service_open_extended(
4345 	io_object_t _service,
4346 	task_t owningTask,
4347 	uint32_t connect_type,
4348 	NDR_record_t ndr,
4349 	io_buf_ptr_t properties,
4350 	mach_msg_type_number_t propertiesCnt,
4351 	kern_return_t * result,
4352 	io_object_t *connection )
4353 {
4354 	IOUserClient * client = NULL;
4355 	kern_return_t  err = KERN_SUCCESS;
4356 	IOReturn       res = kIOReturnSuccess;
4357 	OSDictionary * propertiesDict = NULL;
4358 	bool           disallowAccess = false;
4359 
4360 	CHECK( IOService, _service, service );
4361 
4362 	if (!owningTask) {
4363 		return kIOReturnBadArgument;
4364 	}
4365 	assert(owningTask == current_task());
4366 	if (owningTask != current_task()) {
4367 		return kIOReturnBadArgument;
4368 	}
4369 
4370 #if CONFIG_MACF
4371 	if (mac_iokit_check_open_service(kauth_cred_get(), service, connect_type) != 0) {
4372 		return kIOReturnNotPermitted;
4373 	}
4374 #endif
4375 	do{
4376 		if (properties) {
4377 			return kIOReturnUnsupported;
4378 		}
4379 #if 0
4380 		{
4381 			OSObject *      obj;
4382 			vm_offset_t     data;
4383 			vm_map_offset_t map_data;
4384 
4385 			if (propertiesCnt > sizeof(io_struct_inband_t)) {
4386 				return kIOReturnMessageTooLarge;
4387 			}
4388 
4389 			err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4390 			res = err;
4391 			data = CAST_DOWN(vm_offset_t, map_data);
4392 			if (KERN_SUCCESS == err) {
4393 				// must return success after vm_map_copyout() succeeds
4394 				obj = OSUnserializeXML((const char *) data, propertiesCnt );
4395 				vm_deallocate( kernel_map, data, propertiesCnt );
4396 				propertiesDict = OSDynamicCast(OSDictionary, obj);
4397 				if (!propertiesDict) {
4398 					res = kIOReturnBadArgument;
4399 					if (obj) {
4400 						obj->release();
4401 					}
4402 				}
4403 			}
4404 			if (kIOReturnSuccess != res) {
4405 				break;
4406 			}
4407 		}
4408 #endif
4409 		res = service->newUserClient( owningTask, (void *) owningTask,
4410 		    connect_type, propertiesDict, &client );
4411 
4412 		if (propertiesDict) {
4413 			propertiesDict->release();
4414 		}
4415 
4416 		if (res == kIOReturnSuccess && OSDynamicCast(IOUserClient, client) == NULL) {
4417 			// client should always be a IOUserClient
4418 			res = kIOReturnError;
4419 		}
4420 
4421 		if (res == kIOReturnSuccess) {
4422 			if (!client->reserved) {
4423 				if (!client->reserve()) {
4424 					client->clientClose();
4425 					OSSafeReleaseNULL(client);
4426 					res = kIOReturnNoMemory;
4427 				}
4428 			}
4429 		}
4430 
4431 		if (res == kIOReturnSuccess) {
4432 			OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4433 			if (creatorName) {
4434 				client->setProperty(kIOUserClientCreatorKey, creatorName);
4435 			}
4436 			const char * creatorNameCStr = creatorName ? creatorName->getCStringNoCopy() : "<unknown>";
4437 			client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
4438 			if (client->sharedInstance) {
4439 				IOLockLock(gIOUserClientOwnersLock);
4440 			}
4441 			if (!client->opened) {
4442 				client->opened = true;
4443 
4444 				client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
4445 				{
4446 					OSObject * obj;
4447 					extern const OSSymbol * gIOSurfaceIdentifier;
4448 					obj = client->getProperty(kIOUserClientDefaultLockingKey);
4449 					bool hasProps = false;
4450 
4451 					client->uc2022 = (NULL != OSDynamicCast(IOUserClient2022, client));
4452 					if (obj) {
4453 						hasProps = true;
4454 						client->defaultLocking = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingKey));
4455 					} else if (client->uc2022) {
4456 						res = kIOReturnError;
4457 					}
4458 					obj = client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey);
4459 					if (obj) {
4460 						hasProps = true;
4461 						client->defaultLockingSetProperties = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey));
4462 					} else if (client->uc2022) {
4463 						res = kIOReturnError;
4464 					}
4465 					obj = client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey);
4466 					if (obj) {
4467 						hasProps = true;
4468 						client->defaultLockingSingleThreadExternalMethod = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey));
4469 					} else if (client->uc2022) {
4470 						res = kIOReturnError;
4471 					}
4472 					if (kIOReturnSuccess != res) {
4473 						IOLog("IOUC %s requires kIOUserClientDefaultLockingKey, kIOUserClientDefaultLockingSetPropertiesKey, kIOUserClientDefaultLockingSingleThreadExternalMethodKey\n",
4474 						    client->getMetaClass()->getClassName());
4475 					}
4476 					if (!hasProps) {
4477 						const OSMetaClass * meta;
4478 						OSKext            * kext;
4479 						meta = client->getMetaClass();
4480 						kext = meta->getKext();
4481 						if (!kext || !kext->hasDependency(gIOSurfaceIdentifier)) {
4482 							client->defaultLocking = true;
4483 							client->defaultLockingSetProperties = false;
4484 							client->defaultLockingSingleThreadExternalMethod = false;
4485 							client->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
4486 						}
4487 					}
4488 				}
4489 			}
4490 			if (client->sharedInstance) {
4491 				IOLockUnlock(gIOUserClientOwnersLock);
4492 			}
4493 
4494 			OSObject     * requiredEntitlement = client->copyProperty(gIOUserClientEntitlementsKey);
4495 			OSString * requiredEntitlementString = OSDynamicCast(OSString, requiredEntitlement);
4496 			//If this is an IOUserClient2022, having kIOUserClientEntitlementsKey is mandatory.
4497 			//If it has kIOUserClientEntitlementsKey, the value must be either kOSBooleanFalse or an OSString
4498 			//If the value is kOSBooleanFalse, we allow access.
4499 			//If the value is an OSString, we allow access if the task has the named entitlement
4500 			if (client->uc2022) {
4501 				if (!requiredEntitlement) {
4502 					IOLog("IOUC %s missing " kIOUserClientEntitlementsKey " property\n",
4503 					    client->getMetaClass()->getClassName());
4504 					disallowAccess = true;
4505 				} else if (!requiredEntitlementString && requiredEntitlement != kOSBooleanFalse) {
4506 					IOLog("IOUC %s had " kIOUserClientEntitlementsKey "with value not boolean false or string\n", client->getMetaClass()->getClassName());
4507 					disallowAccess = true;
4508 				}
4509 			}
4510 
4511 			if (requiredEntitlement && disallowAccess == false) {
4512 				if (kOSBooleanFalse == requiredEntitlement) {
4513 					// allow
4514 					disallowAccess = false;
4515 				} else {
4516 					disallowAccess = !IOTaskHasEntitlement(owningTask, requiredEntitlementString->getCStringNoCopy());
4517 					if (disallowAccess) {
4518 						IOLog("IOUC %s missing entitlement in process %s\n",
4519 						    client->getMetaClass()->getClassName(), creatorNameCStr);
4520 					}
4521 				}
4522 			}
4523 
4524 			OSSafeReleaseNULL(requiredEntitlement);
4525 
4526 			if (disallowAccess) {
4527 				res = kIOReturnNotPrivileged;
4528 			}
4529 #if CONFIG_MACF
4530 			else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4531 				IOLog("IOUC %s failed MACF in process %s\n",
4532 				    client->getMetaClass()->getClassName(), creatorNameCStr);
4533 				res = kIOReturnNotPermitted;
4534 			}
4535 #endif
4536 
4537 			if ((kIOReturnSuccess == res)
4538 			    && gIOUCFilterCallbacks
4539 			    && gIOUCFilterCallbacks->io_filter_resolver) {
4540 				io_filter_policy_t filterPolicy;
4541 				filterPolicy = client->filterForTask(owningTask, 0);
4542 				if (!filterPolicy) {
4543 					res = gIOUCFilterCallbacks->io_filter_resolver(owningTask, client, connect_type, &filterPolicy);
4544 					if (kIOReturnUnsupported == res) {
4545 						res = kIOReturnSuccess;
4546 					} else if (kIOReturnSuccess == res) {
4547 						client->filterForTask(owningTask, filterPolicy);
4548 					} else {
4549 						IOLog("IOUC %s failed sandbox in process %s\n",
4550 						    client->getMetaClass()->getClassName(), creatorNameCStr);
4551 					}
4552 				}
4553 			}
4554 
4555 			if (kIOReturnSuccess == res) {
4556 				res = client->registerOwner(owningTask);
4557 			}
4558 			OSSafeReleaseNULL(creatorName);
4559 
4560 			if (kIOReturnSuccess != res) {
4561 				IOStatisticsClientCall();
4562 				client->clientClose();
4563 				client->setTerminateDefer(service, false);
4564 				client->release();
4565 				client = NULL;
4566 				break;
4567 			}
4568 			client->setTerminateDefer(service, false);
4569 		}
4570 	}while (false);
4571 
4572 	*connection = client;
4573 	*result = res;
4574 
4575 	return err;
4576 }
4577 
4578 /* Routine io_service_close */
4579 kern_return_t
is_io_service_close(io_connect_t connection)4580 is_io_service_close(
4581 	io_connect_t connection )
4582 {
4583 	OSSet * mappings;
4584 	if ((mappings = OSDynamicCast(OSSet, connection))) {
4585 		return kIOReturnSuccess;
4586 	}
4587 
4588 	CHECK( IOUserClient, connection, client );
4589 
4590 	IOStatisticsClientCall();
4591 
4592 	if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4593 		client->ipcEnter(kIPCLockWrite);
4594 		client->clientClose();
4595 		client->ipcExit(kIPCLockWrite);
4596 	} else {
4597 		IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4598 		    client->getRegistryEntryID(), client->getName());
4599 	}
4600 
4601 	return kIOReturnSuccess;
4602 }
4603 
4604 /* Routine io_connect_get_service */
4605 kern_return_t
is_io_connect_get_service(io_connect_t connection,io_object_t * service)4606 is_io_connect_get_service(
4607 	io_connect_t connection,
4608 	io_object_t *service )
4609 {
4610 	IOService * theService;
4611 
4612 	CHECK( IOUserClient, connection, client );
4613 
4614 	client->ipcEnter(kIPCLockNone);
4615 
4616 	theService = client->getService();
4617 	if (theService) {
4618 		theService->retain();
4619 	}
4620 
4621 	client->ipcExit(kIPCLockNone);
4622 
4623 	*service = theService;
4624 
4625 	return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4626 }
4627 
4628 /* Routine io_connect_set_notification_port */
4629 kern_return_t
is_io_connect_set_notification_port(io_connect_t connection,uint32_t notification_type,mach_port_t port,uint32_t reference)4630 is_io_connect_set_notification_port(
4631 	io_connect_t connection,
4632 	uint32_t notification_type,
4633 	mach_port_t port,
4634 	uint32_t reference)
4635 {
4636 	kern_return_t ret;
4637 	CHECK( IOUserClient, connection, client );
4638 
4639 	IOStatisticsClientCall();
4640 
4641 	client->ipcEnter(kIPCLockWrite);
4642 	ret = client->registerNotificationPort( port, notification_type,
4643 	    (io_user_reference_t) reference );
4644 	client->ipcExit(kIPCLockWrite);
4645 
4646 	return ret;
4647 }
4648 
4649 /* Routine io_connect_set_notification_port */
4650 kern_return_t
is_io_connect_set_notification_port_64(io_connect_t connection,uint32_t notification_type,mach_port_t port,io_user_reference_t reference)4651 is_io_connect_set_notification_port_64(
4652 	io_connect_t connection,
4653 	uint32_t notification_type,
4654 	mach_port_t port,
4655 	io_user_reference_t reference)
4656 {
4657 	kern_return_t ret;
4658 	CHECK( IOUserClient, connection, client );
4659 
4660 	IOStatisticsClientCall();
4661 
4662 	client->ipcEnter(kIPCLockWrite);
4663 	ret = client->registerNotificationPort( port, notification_type,
4664 	    reference );
4665 	client->ipcExit(kIPCLockWrite);
4666 
4667 	return ret;
4668 }
4669 
4670 /* Routine io_connect_map_memory_into_task */
4671 kern_return_t
is_io_connect_map_memory_into_task(io_connect_t connection,uint32_t memory_type,task_t into_task,mach_vm_address_t * address,mach_vm_size_t * size,uint32_t flags)4672 is_io_connect_map_memory_into_task
4673 (
4674 	io_connect_t connection,
4675 	uint32_t memory_type,
4676 	task_t into_task,
4677 	mach_vm_address_t *address,
4678 	mach_vm_size_t *size,
4679 	uint32_t flags
4680 )
4681 {
4682 	IOReturn            err;
4683 	IOMemoryMap *       map;
4684 
4685 	CHECK( IOUserClient, connection, client );
4686 
4687 	if (!into_task) {
4688 		return kIOReturnBadArgument;
4689 	}
4690 
4691 	IOStatisticsClientCall();
4692 
4693 	client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4694 	map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4695 
4696 	if (map) {
4697 		*address = map->getAddress();
4698 		if (size) {
4699 			*size = map->getSize();
4700 		}
4701 
4702 		if (client->sharedInstance
4703 		    || (into_task != current_task())) {
4704 			// push a name out to the task owning the map,
4705 			// so we can clean up maps
4706 			mach_port_name_t name __unused =
4707 			    IOMachPort::makeSendRightForTask(
4708 				into_task, map, IKOT_IOKIT_OBJECT );
4709 			map->release();
4710 		} else {
4711 			// keep it with the user client
4712 			IOLockLock( gIOObjectPortLock);
4713 			if (NULL == client->mappings) {
4714 				client->mappings = OSSet::withCapacity(2);
4715 			}
4716 			if (client->mappings) {
4717 				client->mappings->setObject( map);
4718 			}
4719 			IOLockUnlock( gIOObjectPortLock);
4720 			map->release();
4721 		}
4722 		err = kIOReturnSuccess;
4723 	} else {
4724 		err = kIOReturnBadArgument;
4725 	}
4726 
4727 	client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4728 
4729 	return err;
4730 }
4731 
4732 /* Routine is_io_connect_map_memory */
4733 kern_return_t
is_io_connect_map_memory(io_object_t connect,uint32_t type,task_t task,uint32_t * mapAddr,uint32_t * mapSize,uint32_t flags)4734 is_io_connect_map_memory(
4735 	io_object_t     connect,
4736 	uint32_t        type,
4737 	task_t          task,
4738 	uint32_t  *     mapAddr,
4739 	uint32_t  *     mapSize,
4740 	uint32_t        flags )
4741 {
4742 	IOReturn          err;
4743 	mach_vm_address_t address;
4744 	mach_vm_size_t    size;
4745 
4746 	address = SCALAR64(*mapAddr);
4747 	size    = SCALAR64(*mapSize);
4748 
4749 	err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4750 
4751 	*mapAddr = SCALAR32(address);
4752 	*mapSize = SCALAR32(size);
4753 
4754 	return err;
4755 }
4756 } /* extern "C" */
4757 
4758 IOMemoryMap *
removeMappingForDescriptor(IOMemoryDescriptor * mem)4759 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4760 {
4761 	OSIterator *  iter;
4762 	IOMemoryMap * map = NULL;
4763 
4764 	IOLockLock(gIOObjectPortLock);
4765 
4766 	iter = OSCollectionIterator::withCollection(mappings);
4767 	if (iter) {
4768 		while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4769 			if (mem == map->getMemoryDescriptor()) {
4770 				map->retain();
4771 				mappings->removeObject(map);
4772 				break;
4773 			}
4774 		}
4775 		iter->release();
4776 	}
4777 
4778 	IOLockUnlock(gIOObjectPortLock);
4779 
4780 	return map;
4781 }
4782 
4783 extern "C" {
4784 /* Routine io_connect_unmap_memory_from_task */
4785 kern_return_t
is_io_connect_unmap_memory_from_task(io_connect_t connection,uint32_t memory_type,task_t from_task,mach_vm_address_t address)4786 is_io_connect_unmap_memory_from_task
4787 (
4788 	io_connect_t connection,
4789 	uint32_t memory_type,
4790 	task_t from_task,
4791 	mach_vm_address_t address)
4792 {
4793 	IOReturn            err;
4794 	IOOptionBits        options = 0;
4795 	IOMemoryDescriptor * memory = NULL;
4796 	IOMemoryMap *       map;
4797 
4798 	CHECK( IOUserClient, connection, client );
4799 
4800 	if (!from_task) {
4801 		return kIOReturnBadArgument;
4802 	}
4803 
4804 	IOStatisticsClientCall();
4805 
4806 	client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4807 	err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4808 
4809 	if (memory && (kIOReturnSuccess == err)) {
4810 		options = (options & ~kIOMapUserOptionsMask)
4811 		    | kIOMapAnywhere | kIOMapReference;
4812 
4813 		map = memory->createMappingInTask( from_task, address, options );
4814 		memory->release();
4815 		if (map) {
4816 			IOLockLock( gIOObjectPortLock);
4817 			if (client->mappings) {
4818 				client->mappings->removeObject( map);
4819 			}
4820 			IOLockUnlock( gIOObjectPortLock);
4821 
4822 			mach_port_name_t name = 0;
4823 			bool is_shared_instance_or_from_current_task = from_task != current_task() || client->sharedInstance;
4824 			if (is_shared_instance_or_from_current_task) {
4825 				name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4826 				map->release();
4827 			}
4828 
4829 			if (name) {
4830 				map->userClientUnmap();
4831 				err = iokit_mod_send_right( from_task, name, -2 );
4832 				err = kIOReturnSuccess;
4833 			} else {
4834 				IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4835 			}
4836 			if (!is_shared_instance_or_from_current_task) {
4837 				map->release();
4838 			}
4839 		} else {
4840 			err = kIOReturnBadArgument;
4841 		}
4842 	}
4843 
4844 	client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4845 
4846 	return err;
4847 }
4848 
4849 kern_return_t
is_io_connect_unmap_memory(io_object_t connect,uint32_t type,task_t task,uint32_t mapAddr)4850 is_io_connect_unmap_memory(
4851 	io_object_t     connect,
4852 	uint32_t        type,
4853 	task_t          task,
4854 	uint32_t        mapAddr )
4855 {
4856 	IOReturn            err;
4857 	mach_vm_address_t   address;
4858 
4859 	address = SCALAR64(mapAddr);
4860 
4861 	err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4862 
4863 	return err;
4864 }
4865 
4866 
4867 /* Routine io_connect_add_client */
4868 kern_return_t
is_io_connect_add_client(io_connect_t connection,io_object_t connect_to)4869 is_io_connect_add_client(
4870 	io_connect_t connection,
4871 	io_object_t connect_to)
4872 {
4873 	CHECK( IOUserClient, connection, client );
4874 	CHECK( IOUserClient, connect_to, to );
4875 
4876 	IOReturn ret;
4877 
4878 	IOStatisticsClientCall();
4879 
4880 	client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4881 	ret = client->connectClient( to );
4882 	client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4883 
4884 	return ret;
4885 }
4886 
4887 
4888 /* Routine io_connect_set_properties */
4889 kern_return_t
is_io_connect_set_properties(io_connect_t connection,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4890 is_io_connect_set_properties(
4891 	io_connect_t connection,
4892 	io_buf_ptr_t properties,
4893 	mach_msg_type_number_t propertiesCnt,
4894 	kern_return_t * result)
4895 {
4896 	return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4897 }
4898 
4899 /* Routine io_user_client_method */
4900 kern_return_t
is_io_connect_method_var_output(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,io_buf_ptr_t * var_output,mach_msg_type_number_t * var_outputCnt)4901 is_io_connect_method_var_output
4902 (
4903 	io_connect_t connection,
4904 	uint32_t selector,
4905 	io_scalar_inband64_t scalar_input,
4906 	mach_msg_type_number_t scalar_inputCnt,
4907 	io_struct_inband_t inband_input,
4908 	mach_msg_type_number_t inband_inputCnt,
4909 	mach_vm_address_t ool_input,
4910 	mach_vm_size_t ool_input_size,
4911 	io_struct_inband_t inband_output,
4912 	mach_msg_type_number_t *inband_outputCnt,
4913 	io_scalar_inband64_t scalar_output,
4914 	mach_msg_type_number_t *scalar_outputCnt,
4915 	io_buf_ptr_t *var_output,
4916 	mach_msg_type_number_t *var_outputCnt
4917 )
4918 {
4919 	CHECK( IOUserClient, connection, client );
4920 
4921 	IOExternalMethodArguments args;
4922 	IOReturn ret;
4923 	IOMemoryDescriptor * inputMD  = NULL;
4924 	OSObject *           structureVariableOutputData = NULL;
4925 
4926 	bzero(&args.__reserved[0], sizeof(args.__reserved));
4927 	args.__reservedA = 0;
4928 	args.version = kIOExternalMethodArgumentsCurrentVersion;
4929 
4930 	args.selector = selector;
4931 
4932 	args.asyncWakePort               = MACH_PORT_NULL;
4933 	args.asyncReference              = NULL;
4934 	args.asyncReferenceCount         = 0;
4935 	args.structureVariableOutputData = &structureVariableOutputData;
4936 
4937 	args.scalarInput = scalar_input;
4938 	args.scalarInputCount = scalar_inputCnt;
4939 	args.structureInput = inband_input;
4940 	args.structureInputSize = inband_inputCnt;
4941 
4942 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4943 		return kIOReturnIPCError;
4944 	}
4945 
4946 	if (ool_input) {
4947 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4948 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
4949 		    current_task());
4950 	}
4951 
4952 	args.structureInputDescriptor = inputMD;
4953 
4954 	args.scalarOutput = scalar_output;
4955 	args.scalarOutputCount = *scalar_outputCnt;
4956 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4957 	args.structureOutput = inband_output;
4958 	args.structureOutputSize = *inband_outputCnt;
4959 	args.structureOutputDescriptor = NULL;
4960 	args.structureOutputDescriptorSize = 0;
4961 
4962 	IOStatisticsClientCall();
4963 	ret = kIOReturnSuccess;
4964 
4965 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4966 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4967 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
4968 	}
4969 
4970 	if (kIOReturnSuccess == ret) {
4971 		ret = client->callExternalMethod(selector, &args);
4972 	}
4973 
4974 	*scalar_outputCnt = args.scalarOutputCount;
4975 	*inband_outputCnt = args.structureOutputSize;
4976 
4977 	if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4978 		OSSerialize * serialize;
4979 		OSData      * data;
4980 		unsigned int  len;
4981 
4982 		if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4983 			len = serialize->getLength();
4984 			*var_outputCnt = len;
4985 			ret = copyoutkdata(serialize->text(), len, var_output);
4986 		} else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4987 			data->clipForCopyout();
4988 			len = data->getLength();
4989 			*var_outputCnt = len;
4990 			ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4991 		} else {
4992 			ret = kIOReturnUnderrun;
4993 		}
4994 	}
4995 
4996 	if (inputMD) {
4997 		inputMD->release();
4998 	}
4999 	if (structureVariableOutputData) {
5000 		structureVariableOutputData->release();
5001 	}
5002 
5003 	return ret;
5004 }
5005 
5006 /* Routine io_user_client_method */
5007 kern_return_t
is_io_connect_method(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)5008 is_io_connect_method
5009 (
5010 	io_connect_t connection,
5011 	uint32_t selector,
5012 	io_scalar_inband64_t scalar_input,
5013 	mach_msg_type_number_t scalar_inputCnt,
5014 	io_struct_inband_t inband_input,
5015 	mach_msg_type_number_t inband_inputCnt,
5016 	mach_vm_address_t ool_input,
5017 	mach_vm_size_t ool_input_size,
5018 	io_struct_inband_t inband_output,
5019 	mach_msg_type_number_t *inband_outputCnt,
5020 	io_scalar_inband64_t scalar_output,
5021 	mach_msg_type_number_t *scalar_outputCnt,
5022 	mach_vm_address_t ool_output,
5023 	mach_vm_size_t *ool_output_size
5024 )
5025 {
5026 	CHECK( IOUserClient, connection, client );
5027 
5028 	IOExternalMethodArguments args;
5029 	IOReturn ret;
5030 	IOMemoryDescriptor * inputMD  = NULL;
5031 	IOMemoryDescriptor * outputMD = NULL;
5032 
5033 	bzero(&args.__reserved[0], sizeof(args.__reserved));
5034 	args.__reservedA = 0;
5035 	args.version = kIOExternalMethodArgumentsCurrentVersion;
5036 
5037 	args.selector = selector;
5038 
5039 	args.asyncWakePort               = MACH_PORT_NULL;
5040 	args.asyncReference              = NULL;
5041 	args.asyncReferenceCount         = 0;
5042 	args.structureVariableOutputData = NULL;
5043 
5044 	args.scalarInput = scalar_input;
5045 	args.scalarInputCount = scalar_inputCnt;
5046 	args.structureInput = inband_input;
5047 	args.structureInputSize = inband_inputCnt;
5048 
5049 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5050 		return kIOReturnIPCError;
5051 	}
5052 	if (ool_output) {
5053 		if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5054 			return kIOReturnIPCError;
5055 		}
5056 		if (*ool_output_size > UINT_MAX) {
5057 			return kIOReturnIPCError;
5058 		}
5059 	}
5060 
5061 	if (ool_input) {
5062 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5063 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
5064 		    current_task());
5065 	}
5066 
5067 	args.structureInputDescriptor = inputMD;
5068 
5069 	args.scalarOutput = scalar_output;
5070 	args.scalarOutputCount = *scalar_outputCnt;
5071 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5072 	args.structureOutput = inband_output;
5073 	args.structureOutputSize = *inband_outputCnt;
5074 
5075 	if (ool_output && ool_output_size) {
5076 		outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5077 		    kIODirectionIn, current_task());
5078 	}
5079 
5080 	args.structureOutputDescriptor = outputMD;
5081 	args.structureOutputDescriptorSize = ool_output_size
5082 	    ? ((typeof(args.structureOutputDescriptorSize)) * ool_output_size)
5083 	    : 0;
5084 
5085 	IOStatisticsClientCall();
5086 	ret = kIOReturnSuccess;
5087 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5088 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5089 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
5090 	}
5091 	if (kIOReturnSuccess == ret) {
5092 		ret = client->callExternalMethod( selector, &args );
5093 	}
5094 
5095 	*scalar_outputCnt = args.scalarOutputCount;
5096 	*inband_outputCnt = args.structureOutputSize;
5097 	*ool_output_size  = args.structureOutputDescriptorSize;
5098 
5099 	if (inputMD) {
5100 		inputMD->release();
5101 	}
5102 	if (outputMD) {
5103 		outputMD->release();
5104 	}
5105 
5106 	return ret;
5107 }
5108 
5109 /* Routine io_async_user_client_method */
5110 kern_return_t
is_io_connect_async_method(io_connect_t connection,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)5111 is_io_connect_async_method
5112 (
5113 	io_connect_t connection,
5114 	mach_port_t wake_port,
5115 	io_async_ref64_t reference,
5116 	mach_msg_type_number_t referenceCnt,
5117 	uint32_t selector,
5118 	io_scalar_inband64_t scalar_input,
5119 	mach_msg_type_number_t scalar_inputCnt,
5120 	io_struct_inband_t inband_input,
5121 	mach_msg_type_number_t inband_inputCnt,
5122 	mach_vm_address_t ool_input,
5123 	mach_vm_size_t ool_input_size,
5124 	io_struct_inband_t inband_output,
5125 	mach_msg_type_number_t *inband_outputCnt,
5126 	io_scalar_inband64_t scalar_output,
5127 	mach_msg_type_number_t *scalar_outputCnt,
5128 	mach_vm_address_t ool_output,
5129 	mach_vm_size_t * ool_output_size
5130 )
5131 {
5132 	CHECK( IOUserClient, connection, client );
5133 
5134 	IOExternalMethodArguments args;
5135 	IOReturn ret;
5136 	IOMemoryDescriptor * inputMD  = NULL;
5137 	IOMemoryDescriptor * outputMD = NULL;
5138 
5139 	if (referenceCnt < 1) {
5140 		return kIOReturnBadArgument;
5141 	}
5142 
5143 	bzero(&args.__reserved[0], sizeof(args.__reserved));
5144 	args.__reservedA = 0;
5145 	args.version = kIOExternalMethodArgumentsCurrentVersion;
5146 
5147 	reference[0]             = (io_user_reference_t) wake_port;
5148 	if (vm_map_is_64bit(get_task_map(current_task()))) {
5149 		reference[0]         |= kIOUCAsync64Flag;
5150 	}
5151 
5152 	args.selector = selector;
5153 
5154 	args.asyncWakePort       = wake_port;
5155 	args.asyncReference      = reference;
5156 	args.asyncReferenceCount = referenceCnt;
5157 
5158 	args.structureVariableOutputData = NULL;
5159 
5160 	args.scalarInput = scalar_input;
5161 	args.scalarInputCount = scalar_inputCnt;
5162 	args.structureInput = inband_input;
5163 	args.structureInputSize = inband_inputCnt;
5164 
5165 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5166 		return kIOReturnIPCError;
5167 	}
5168 	if (ool_output) {
5169 		if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5170 			return kIOReturnIPCError;
5171 		}
5172 		if (*ool_output_size > UINT_MAX) {
5173 			return kIOReturnIPCError;
5174 		}
5175 	}
5176 
5177 	if (ool_input) {
5178 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5179 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
5180 		    current_task());
5181 	}
5182 
5183 	args.structureInputDescriptor = inputMD;
5184 
5185 	args.scalarOutput = scalar_output;
5186 	args.scalarOutputCount = *scalar_outputCnt;
5187 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5188 	args.structureOutput = inband_output;
5189 	args.structureOutputSize = *inband_outputCnt;
5190 
5191 	if (ool_output) {
5192 		outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5193 		    kIODirectionIn, current_task());
5194 	}
5195 
5196 	args.structureOutputDescriptor = outputMD;
5197 	args.structureOutputDescriptorSize = ((typeof(args.structureOutputDescriptorSize)) * ool_output_size);
5198 
5199 	IOStatisticsClientCall();
5200 	ret = kIOReturnSuccess;
5201 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5202 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5203 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_async_method, selector);
5204 	}
5205 	if (kIOReturnSuccess == ret) {
5206 		ret = client->callExternalMethod( selector, &args );
5207 	}
5208 
5209 	*scalar_outputCnt = args.scalarOutputCount;
5210 	*inband_outputCnt = args.structureOutputSize;
5211 	*ool_output_size  = args.structureOutputDescriptorSize;
5212 
5213 	if (inputMD) {
5214 		inputMD->release();
5215 	}
5216 	if (outputMD) {
5217 		outputMD->release();
5218 	}
5219 
5220 	return ret;
5221 }
5222 
5223 /* Routine io_connect_method_scalarI_scalarO */
5224 kern_return_t
is_io_connect_method_scalarI_scalarO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5225 is_io_connect_method_scalarI_scalarO(
5226 	io_object_t        connect,
5227 	uint32_t           index,
5228 	io_scalar_inband_t       input,
5229 	mach_msg_type_number_t   inputCount,
5230 	io_scalar_inband_t       output,
5231 	mach_msg_type_number_t * outputCount )
5232 {
5233 	IOReturn err;
5234 	uint32_t i;
5235 	io_scalar_inband64_t _input;
5236 	io_scalar_inband64_t _output;
5237 
5238 	mach_msg_type_number_t struct_outputCnt = 0;
5239 	mach_vm_size_t ool_output_size = 0;
5240 
5241 	bzero(&_output[0], sizeof(_output));
5242 	for (i = 0; i < inputCount; i++) {
5243 		_input[i] = SCALAR64(input[i]);
5244 	}
5245 
5246 	err = is_io_connect_method(connect, index,
5247 	    _input, inputCount,
5248 	    NULL, 0,
5249 	    0, 0,
5250 	    NULL, &struct_outputCnt,
5251 	    _output, outputCount,
5252 	    0, &ool_output_size);
5253 
5254 	for (i = 0; i < *outputCount; i++) {
5255 		output[i] = SCALAR32(_output[i]);
5256 	}
5257 
5258 	return err;
5259 }
5260 
5261 kern_return_t
shim_io_connect_method_scalarI_scalarO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5262 shim_io_connect_method_scalarI_scalarO(
5263 	IOExternalMethod *      method,
5264 	IOService *             object,
5265 	const io_user_scalar_t * input,
5266 	mach_msg_type_number_t   inputCount,
5267 	io_user_scalar_t * output,
5268 	mach_msg_type_number_t * outputCount )
5269 {
5270 	IOMethod            func;
5271 	io_scalar_inband_t  _output;
5272 	IOReturn            err;
5273 	err = kIOReturnBadArgument;
5274 
5275 	bzero(&_output[0], sizeof(_output));
5276 	do {
5277 		if (inputCount != method->count0) {
5278 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5279 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5280 			continue;
5281 		}
5282 		if (*outputCount != method->count1) {
5283 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5284 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5285 			continue;
5286 		}
5287 
5288 		func = method->func;
5289 
5290 		switch (inputCount) {
5291 		case 6:
5292 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5293 			    ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5294 			break;
5295 		case 5:
5296 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5297 			    ARG32(input[3]), ARG32(input[4]),
5298 			    &_output[0] );
5299 			break;
5300 		case 4:
5301 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5302 			    ARG32(input[3]),
5303 			    &_output[0], &_output[1] );
5304 			break;
5305 		case 3:
5306 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5307 			    &_output[0], &_output[1], &_output[2] );
5308 			break;
5309 		case 2:
5310 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]),
5311 			    &_output[0], &_output[1], &_output[2],
5312 			    &_output[3] );
5313 			break;
5314 		case 1:
5315 			err = (object->*func)(  ARG32(input[0]),
5316 			    &_output[0], &_output[1], &_output[2],
5317 			    &_output[3], &_output[4] );
5318 			break;
5319 		case 0:
5320 			err = (object->*func)(  &_output[0], &_output[1], &_output[2],
5321 			    &_output[3], &_output[4], &_output[5] );
5322 			break;
5323 
5324 		default:
5325 			IOLog("%s: Bad method table\n", object->getName());
5326 		}
5327 	}while (false);
5328 
5329 	uint32_t i;
5330 	for (i = 0; i < *outputCount; i++) {
5331 		output[i] = SCALAR32(_output[i]);
5332 	}
5333 
5334 	return err;
5335 }
5336 
5337 /* Routine io_async_method_scalarI_scalarO */
5338 kern_return_t
is_io_async_method_scalarI_scalarO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5339 is_io_async_method_scalarI_scalarO(
5340 	io_object_t        connect,
5341 	mach_port_t wake_port,
5342 	io_async_ref_t reference,
5343 	mach_msg_type_number_t referenceCnt,
5344 	uint32_t           index,
5345 	io_scalar_inband_t       input,
5346 	mach_msg_type_number_t   inputCount,
5347 	io_scalar_inband_t       output,
5348 	mach_msg_type_number_t * outputCount )
5349 {
5350 	IOReturn err;
5351 	uint32_t i;
5352 	io_scalar_inband64_t _input;
5353 	io_scalar_inband64_t _output;
5354 	io_async_ref64_t _reference;
5355 
5356 	if (referenceCnt > ASYNC_REF64_COUNT) {
5357 		return kIOReturnBadArgument;
5358 	}
5359 	bzero(&_output[0], sizeof(_output));
5360 	for (i = 0; i < referenceCnt; i++) {
5361 		_reference[i] = REF64(reference[i]);
5362 	}
5363 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5364 
5365 	mach_msg_type_number_t struct_outputCnt = 0;
5366 	mach_vm_size_t ool_output_size = 0;
5367 
5368 	for (i = 0; i < inputCount; i++) {
5369 		_input[i] = SCALAR64(input[i]);
5370 	}
5371 
5372 	err = is_io_connect_async_method(connect,
5373 	    wake_port, _reference, referenceCnt,
5374 	    index,
5375 	    _input, inputCount,
5376 	    NULL, 0,
5377 	    0, 0,
5378 	    NULL, &struct_outputCnt,
5379 	    _output, outputCount,
5380 	    0, &ool_output_size);
5381 
5382 	for (i = 0; i < *outputCount; i++) {
5383 		output[i] = SCALAR32(_output[i]);
5384 	}
5385 
5386 	return err;
5387 }
5388 /* Routine io_async_method_scalarI_structureO */
5389 kern_return_t
is_io_async_method_scalarI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5390 is_io_async_method_scalarI_structureO(
5391 	io_object_t     connect,
5392 	mach_port_t wake_port,
5393 	io_async_ref_t reference,
5394 	mach_msg_type_number_t referenceCnt,
5395 	uint32_t        index,
5396 	io_scalar_inband_t input,
5397 	mach_msg_type_number_t  inputCount,
5398 	io_struct_inband_t              output,
5399 	mach_msg_type_number_t *        outputCount )
5400 {
5401 	uint32_t i;
5402 	io_scalar_inband64_t _input;
5403 	io_async_ref64_t _reference;
5404 
5405 	if (referenceCnt > ASYNC_REF64_COUNT) {
5406 		return kIOReturnBadArgument;
5407 	}
5408 	for (i = 0; i < referenceCnt; i++) {
5409 		_reference[i] = REF64(reference[i]);
5410 	}
5411 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5412 
5413 	mach_msg_type_number_t scalar_outputCnt = 0;
5414 	mach_vm_size_t ool_output_size = 0;
5415 
5416 	for (i = 0; i < inputCount; i++) {
5417 		_input[i] = SCALAR64(input[i]);
5418 	}
5419 
5420 	return is_io_connect_async_method(connect,
5421 	           wake_port, _reference, referenceCnt,
5422 	           index,
5423 	           _input, inputCount,
5424 	           NULL, 0,
5425 	           0, 0,
5426 	           output, outputCount,
5427 	           NULL, &scalar_outputCnt,
5428 	           0, &ool_output_size);
5429 }
5430 
5431 /* Routine io_async_method_scalarI_structureI */
5432 kern_return_t
is_io_async_method_scalarI_structureI(io_connect_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5433 is_io_async_method_scalarI_structureI(
5434 	io_connect_t            connect,
5435 	mach_port_t wake_port,
5436 	io_async_ref_t reference,
5437 	mach_msg_type_number_t referenceCnt,
5438 	uint32_t                index,
5439 	io_scalar_inband_t      input,
5440 	mach_msg_type_number_t  inputCount,
5441 	io_struct_inband_t      inputStruct,
5442 	mach_msg_type_number_t  inputStructCount )
5443 {
5444 	uint32_t i;
5445 	io_scalar_inband64_t _input;
5446 	io_async_ref64_t _reference;
5447 
5448 	if (referenceCnt > ASYNC_REF64_COUNT) {
5449 		return kIOReturnBadArgument;
5450 	}
5451 	for (i = 0; i < referenceCnt; i++) {
5452 		_reference[i] = REF64(reference[i]);
5453 	}
5454 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5455 
5456 	mach_msg_type_number_t scalar_outputCnt = 0;
5457 	mach_msg_type_number_t inband_outputCnt = 0;
5458 	mach_vm_size_t ool_output_size = 0;
5459 
5460 	for (i = 0; i < inputCount; i++) {
5461 		_input[i] = SCALAR64(input[i]);
5462 	}
5463 
5464 	return is_io_connect_async_method(connect,
5465 	           wake_port, _reference, referenceCnt,
5466 	           index,
5467 	           _input, inputCount,
5468 	           inputStruct, inputStructCount,
5469 	           0, 0,
5470 	           NULL, &inband_outputCnt,
5471 	           NULL, &scalar_outputCnt,
5472 	           0, &ool_output_size);
5473 }
5474 
5475 /* Routine io_async_method_structureI_structureO */
5476 kern_return_t
is_io_async_method_structureI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5477 is_io_async_method_structureI_structureO(
5478 	io_object_t     connect,
5479 	mach_port_t wake_port,
5480 	io_async_ref_t reference,
5481 	mach_msg_type_number_t referenceCnt,
5482 	uint32_t        index,
5483 	io_struct_inband_t              input,
5484 	mach_msg_type_number_t  inputCount,
5485 	io_struct_inband_t              output,
5486 	mach_msg_type_number_t *        outputCount )
5487 {
5488 	uint32_t i;
5489 	mach_msg_type_number_t scalar_outputCnt = 0;
5490 	mach_vm_size_t ool_output_size = 0;
5491 	io_async_ref64_t _reference;
5492 
5493 	if (referenceCnt > ASYNC_REF64_COUNT) {
5494 		return kIOReturnBadArgument;
5495 	}
5496 	for (i = 0; i < referenceCnt; i++) {
5497 		_reference[i] = REF64(reference[i]);
5498 	}
5499 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5500 
5501 	return is_io_connect_async_method(connect,
5502 	           wake_port, _reference, referenceCnt,
5503 	           index,
5504 	           NULL, 0,
5505 	           input, inputCount,
5506 	           0, 0,
5507 	           output, outputCount,
5508 	           NULL, &scalar_outputCnt,
5509 	           0, &ool_output_size);
5510 }
5511 
5512 
5513 kern_return_t
shim_io_async_method_scalarI_scalarO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5514 shim_io_async_method_scalarI_scalarO(
5515 	IOExternalAsyncMethod * method,
5516 	IOService *             object,
5517 	mach_port_t             asyncWakePort,
5518 	io_user_reference_t *   asyncReference,
5519 	uint32_t                asyncReferenceCount,
5520 	const io_user_scalar_t * input,
5521 	mach_msg_type_number_t   inputCount,
5522 	io_user_scalar_t * output,
5523 	mach_msg_type_number_t * outputCount )
5524 {
5525 	IOAsyncMethod       func;
5526 	uint32_t            i;
5527 	io_scalar_inband_t  _output;
5528 	IOReturn            err;
5529 	io_async_ref_t      reference;
5530 
5531 	bzero(&_output[0], sizeof(_output));
5532 	for (i = 0; i < asyncReferenceCount; i++) {
5533 		reference[i] = REF32(asyncReference[i]);
5534 	}
5535 
5536 	err = kIOReturnBadArgument;
5537 
5538 	do {
5539 		if (inputCount != method->count0) {
5540 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5541 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5542 			continue;
5543 		}
5544 		if (*outputCount != method->count1) {
5545 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5546 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5547 			continue;
5548 		}
5549 
5550 		func = method->func;
5551 
5552 		switch (inputCount) {
5553 		case 6:
5554 			err = (object->*func)(  reference,
5555 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5556 			    ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5557 			break;
5558 		case 5:
5559 			err = (object->*func)(  reference,
5560 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5561 			    ARG32(input[3]), ARG32(input[4]),
5562 			    &_output[0] );
5563 			break;
5564 		case 4:
5565 			err = (object->*func)(  reference,
5566 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5567 			    ARG32(input[3]),
5568 			    &_output[0], &_output[1] );
5569 			break;
5570 		case 3:
5571 			err = (object->*func)(  reference,
5572 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5573 			    &_output[0], &_output[1], &_output[2] );
5574 			break;
5575 		case 2:
5576 			err = (object->*func)(  reference,
5577 			    ARG32(input[0]), ARG32(input[1]),
5578 			    &_output[0], &_output[1], &_output[2],
5579 			    &_output[3] );
5580 			break;
5581 		case 1:
5582 			err = (object->*func)(  reference,
5583 			    ARG32(input[0]),
5584 			    &_output[0], &_output[1], &_output[2],
5585 			    &_output[3], &_output[4] );
5586 			break;
5587 		case 0:
5588 			err = (object->*func)(  reference,
5589 			    &_output[0], &_output[1], &_output[2],
5590 			    &_output[3], &_output[4], &_output[5] );
5591 			break;
5592 
5593 		default:
5594 			IOLog("%s: Bad method table\n", object->getName());
5595 		}
5596 	}while (false);
5597 
5598 	for (i = 0; i < *outputCount; i++) {
5599 		output[i] = SCALAR32(_output[i]);
5600 	}
5601 
5602 	return err;
5603 }
5604 
5605 
5606 /* Routine io_connect_method_scalarI_structureO */
5607 kern_return_t
is_io_connect_method_scalarI_structureO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5608 is_io_connect_method_scalarI_structureO(
5609 	io_object_t     connect,
5610 	uint32_t        index,
5611 	io_scalar_inband_t input,
5612 	mach_msg_type_number_t  inputCount,
5613 	io_struct_inband_t              output,
5614 	mach_msg_type_number_t *        outputCount )
5615 {
5616 	uint32_t i;
5617 	io_scalar_inband64_t _input;
5618 
5619 	mach_msg_type_number_t scalar_outputCnt = 0;
5620 	mach_vm_size_t ool_output_size = 0;
5621 
5622 	for (i = 0; i < inputCount; i++) {
5623 		_input[i] = SCALAR64(input[i]);
5624 	}
5625 
5626 	return is_io_connect_method(connect, index,
5627 	           _input, inputCount,
5628 	           NULL, 0,
5629 	           0, 0,
5630 	           output, outputCount,
5631 	           NULL, &scalar_outputCnt,
5632 	           0, &ool_output_size);
5633 }
5634 
5635 kern_return_t
shim_io_connect_method_scalarI_structureO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5636 shim_io_connect_method_scalarI_structureO(
5637 
5638 	IOExternalMethod *      method,
5639 	IOService *             object,
5640 	const io_user_scalar_t * input,
5641 	mach_msg_type_number_t  inputCount,
5642 	io_struct_inband_t              output,
5643 	IOByteCount *   outputCount )
5644 {
5645 	IOMethod            func;
5646 	IOReturn            err;
5647 
5648 	err = kIOReturnBadArgument;
5649 
5650 	do {
5651 		if (inputCount != method->count0) {
5652 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5653 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5654 			continue;
5655 		}
5656 		if ((kIOUCVariableStructureSize != method->count1)
5657 		    && (*outputCount != method->count1)) {
5658 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5659 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5660 			continue;
5661 		}
5662 
5663 		func = method->func;
5664 
5665 		switch (inputCount) {
5666 		case 5:
5667 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5668 			    ARG32(input[3]), ARG32(input[4]),
5669 			    output );
5670 			break;
5671 		case 4:
5672 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5673 			    ARG32(input[3]),
5674 			    output, (void *)outputCount );
5675 			break;
5676 		case 3:
5677 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5678 			    output, (void *)outputCount, NULL );
5679 			break;
5680 		case 2:
5681 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]),
5682 			    output, (void *)outputCount, NULL, NULL );
5683 			break;
5684 		case 1:
5685 			err = (object->*func)(  ARG32(input[0]),
5686 			    output, (void *)outputCount, NULL, NULL, NULL );
5687 			break;
5688 		case 0:
5689 			err = (object->*func)(  output, (void *)outputCount, NULL, NULL, NULL, NULL );
5690 			break;
5691 
5692 		default:
5693 			IOLog("%s: Bad method table\n", object->getName());
5694 		}
5695 	}while (false);
5696 
5697 	return err;
5698 }
5699 
5700 
5701 kern_return_t
shim_io_async_method_scalarI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5702 shim_io_async_method_scalarI_structureO(
5703 	IOExternalAsyncMethod * method,
5704 	IOService *             object,
5705 	mach_port_t             asyncWakePort,
5706 	io_user_reference_t *   asyncReference,
5707 	uint32_t                asyncReferenceCount,
5708 	const io_user_scalar_t * input,
5709 	mach_msg_type_number_t  inputCount,
5710 	io_struct_inband_t              output,
5711 	mach_msg_type_number_t *        outputCount )
5712 {
5713 	IOAsyncMethod       func;
5714 	uint32_t            i;
5715 	IOReturn            err;
5716 	io_async_ref_t      reference;
5717 
5718 	for (i = 0; i < asyncReferenceCount; i++) {
5719 		reference[i] = REF32(asyncReference[i]);
5720 	}
5721 
5722 	err = kIOReturnBadArgument;
5723 	do {
5724 		if (inputCount != method->count0) {
5725 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5726 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5727 			continue;
5728 		}
5729 		if ((kIOUCVariableStructureSize != method->count1)
5730 		    && (*outputCount != method->count1)) {
5731 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5732 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5733 			continue;
5734 		}
5735 
5736 		func = method->func;
5737 
5738 		switch (inputCount) {
5739 		case 5:
5740 			err = (object->*func)(  reference,
5741 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5742 			    ARG32(input[3]), ARG32(input[4]),
5743 			    output );
5744 			break;
5745 		case 4:
5746 			err = (object->*func)(  reference,
5747 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5748 			    ARG32(input[3]),
5749 			    output, (void *)outputCount );
5750 			break;
5751 		case 3:
5752 			err = (object->*func)(  reference,
5753 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5754 			    output, (void *)outputCount, NULL );
5755 			break;
5756 		case 2:
5757 			err = (object->*func)(  reference,
5758 			    ARG32(input[0]), ARG32(input[1]),
5759 			    output, (void *)outputCount, NULL, NULL );
5760 			break;
5761 		case 1:
5762 			err = (object->*func)(  reference,
5763 			    ARG32(input[0]),
5764 			    output, (void *)outputCount, NULL, NULL, NULL );
5765 			break;
5766 		case 0:
5767 			err = (object->*func)(  reference,
5768 			    output, (void *)outputCount, NULL, NULL, NULL, NULL );
5769 			break;
5770 
5771 		default:
5772 			IOLog("%s: Bad method table\n", object->getName());
5773 		}
5774 	}while (false);
5775 
5776 	return err;
5777 }
5778 
5779 /* Routine io_connect_method_scalarI_structureI */
5780 kern_return_t
is_io_connect_method_scalarI_structureI(io_connect_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5781 is_io_connect_method_scalarI_structureI(
5782 	io_connect_t            connect,
5783 	uint32_t                index,
5784 	io_scalar_inband_t      input,
5785 	mach_msg_type_number_t  inputCount,
5786 	io_struct_inband_t      inputStruct,
5787 	mach_msg_type_number_t  inputStructCount )
5788 {
5789 	uint32_t i;
5790 	io_scalar_inband64_t _input;
5791 
5792 	mach_msg_type_number_t scalar_outputCnt = 0;
5793 	mach_msg_type_number_t inband_outputCnt = 0;
5794 	mach_vm_size_t ool_output_size = 0;
5795 
5796 	for (i = 0; i < inputCount; i++) {
5797 		_input[i] = SCALAR64(input[i]);
5798 	}
5799 
5800 	return is_io_connect_method(connect, index,
5801 	           _input, inputCount,
5802 	           inputStruct, inputStructCount,
5803 	           0, 0,
5804 	           NULL, &inband_outputCnt,
5805 	           NULL, &scalar_outputCnt,
5806 	           0, &ool_output_size);
5807 }
5808 
5809 kern_return_t
shim_io_connect_method_scalarI_structureI(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5810 shim_io_connect_method_scalarI_structureI(
5811 	IOExternalMethod *  method,
5812 	IOService *         object,
5813 	const io_user_scalar_t * input,
5814 	mach_msg_type_number_t  inputCount,
5815 	io_struct_inband_t              inputStruct,
5816 	mach_msg_type_number_t  inputStructCount )
5817 {
5818 	IOMethod            func;
5819 	IOReturn            err = kIOReturnBadArgument;
5820 
5821 	do{
5822 		if (inputCount != method->count0) {
5823 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5824 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5825 			continue;
5826 		}
5827 		if ((kIOUCVariableStructureSize != method->count1)
5828 		    && (inputStructCount != method->count1)) {
5829 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5830 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5831 			continue;
5832 		}
5833 
5834 		func = method->func;
5835 
5836 		switch (inputCount) {
5837 		case 5:
5838 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5839 			    ARG32(input[3]), ARG32(input[4]),
5840 			    inputStruct );
5841 			break;
5842 		case 4:
5843 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *)  input[2],
5844 			    ARG32(input[3]),
5845 			    inputStruct, (void *)(uintptr_t)inputStructCount );
5846 			break;
5847 		case 3:
5848 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5849 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5850 			    NULL );
5851 			break;
5852 		case 2:
5853 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5854 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5855 			    NULL, NULL );
5856 			break;
5857 		case 1:
5858 			err = (object->*func)( ARG32(input[0]),
5859 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5860 			    NULL, NULL, NULL );
5861 			break;
5862 		case 0:
5863 			err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5864 			    NULL, NULL, NULL, NULL );
5865 			break;
5866 
5867 		default:
5868 			IOLog("%s: Bad method table\n", object->getName());
5869 		}
5870 	}while (false);
5871 
5872 	return err;
5873 }
5874 
5875 kern_return_t
shim_io_async_method_scalarI_structureI(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5876 shim_io_async_method_scalarI_structureI(
5877 	IOExternalAsyncMethod * method,
5878 	IOService *             object,
5879 	mach_port_t             asyncWakePort,
5880 	io_user_reference_t *   asyncReference,
5881 	uint32_t                asyncReferenceCount,
5882 	const io_user_scalar_t * input,
5883 	mach_msg_type_number_t  inputCount,
5884 	io_struct_inband_t              inputStruct,
5885 	mach_msg_type_number_t  inputStructCount )
5886 {
5887 	IOAsyncMethod       func;
5888 	uint32_t            i;
5889 	IOReturn            err = kIOReturnBadArgument;
5890 	io_async_ref_t      reference;
5891 
5892 	for (i = 0; i < asyncReferenceCount; i++) {
5893 		reference[i] = REF32(asyncReference[i]);
5894 	}
5895 
5896 	do{
5897 		if (inputCount != method->count0) {
5898 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5899 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5900 			continue;
5901 		}
5902 		if ((kIOUCVariableStructureSize != method->count1)
5903 		    && (inputStructCount != method->count1)) {
5904 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5905 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5906 			continue;
5907 		}
5908 
5909 		func = method->func;
5910 
5911 		switch (inputCount) {
5912 		case 5:
5913 			err = (object->*func)(  reference,
5914 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5915 			    ARG32(input[3]), ARG32(input[4]),
5916 			    inputStruct );
5917 			break;
5918 		case 4:
5919 			err = (object->*func)(  reference,
5920 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5921 			    ARG32(input[3]),
5922 			    inputStruct, (void *)(uintptr_t)inputStructCount );
5923 			break;
5924 		case 3:
5925 			err = (object->*func)(  reference,
5926 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5927 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5928 			    NULL );
5929 			break;
5930 		case 2:
5931 			err = (object->*func)(  reference,
5932 			    ARG32(input[0]), ARG32(input[1]),
5933 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5934 			    NULL, NULL );
5935 			break;
5936 		case 1:
5937 			err = (object->*func)(  reference,
5938 			    ARG32(input[0]),
5939 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5940 			    NULL, NULL, NULL );
5941 			break;
5942 		case 0:
5943 			err = (object->*func)(  reference,
5944 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5945 			    NULL, NULL, NULL, NULL );
5946 			break;
5947 
5948 		default:
5949 			IOLog("%s: Bad method table\n", object->getName());
5950 		}
5951 	}while (false);
5952 
5953 	return err;
5954 }
5955 
5956 /* Routine io_connect_method_structureI_structureO */
5957 kern_return_t
is_io_connect_method_structureI_structureO(io_object_t connect,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5958 is_io_connect_method_structureI_structureO(
5959 	io_object_t     connect,
5960 	uint32_t        index,
5961 	io_struct_inband_t              input,
5962 	mach_msg_type_number_t  inputCount,
5963 	io_struct_inband_t              output,
5964 	mach_msg_type_number_t *        outputCount )
5965 {
5966 	mach_msg_type_number_t scalar_outputCnt = 0;
5967 	mach_vm_size_t ool_output_size = 0;
5968 
5969 	return is_io_connect_method(connect, index,
5970 	           NULL, 0,
5971 	           input, inputCount,
5972 	           0, 0,
5973 	           output, outputCount,
5974 	           NULL, &scalar_outputCnt,
5975 	           0, &ool_output_size);
5976 }
5977 
5978 kern_return_t
shim_io_connect_method_structureI_structureO(IOExternalMethod * method,IOService * object,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5979 shim_io_connect_method_structureI_structureO(
5980 	IOExternalMethod *  method,
5981 	IOService *         object,
5982 	io_struct_inband_t              input,
5983 	mach_msg_type_number_t  inputCount,
5984 	io_struct_inband_t              output,
5985 	IOByteCount *   outputCount )
5986 {
5987 	IOMethod            func;
5988 	IOReturn            err = kIOReturnBadArgument;
5989 
5990 	do{
5991 		if ((kIOUCVariableStructureSize != method->count0)
5992 		    && (inputCount != method->count0)) {
5993 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5994 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5995 			continue;
5996 		}
5997 		if ((kIOUCVariableStructureSize != method->count1)
5998 		    && (*outputCount != method->count1)) {
5999 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
6000 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
6001 			continue;
6002 		}
6003 
6004 		func = method->func;
6005 
6006 		if (method->count1) {
6007 			if (method->count0) {
6008 				err = (object->*func)( input, output,
6009 				    (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
6010 			} else {
6011 				err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
6012 			}
6013 		} else {
6014 			err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
6015 		}
6016 	}while (false);
6017 
6018 
6019 	return err;
6020 }
6021 
6022 kern_return_t
shim_io_async_method_structureI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)6023 shim_io_async_method_structureI_structureO(
6024 	IOExternalAsyncMethod * method,
6025 	IOService *             object,
6026 	mach_port_t           asyncWakePort,
6027 	io_user_reference_t * asyncReference,
6028 	uint32_t              asyncReferenceCount,
6029 	io_struct_inband_t              input,
6030 	mach_msg_type_number_t  inputCount,
6031 	io_struct_inband_t              output,
6032 	mach_msg_type_number_t *        outputCount )
6033 {
6034 	IOAsyncMethod       func;
6035 	uint32_t            i;
6036 	IOReturn            err;
6037 	io_async_ref_t      reference;
6038 
6039 	for (i = 0; i < asyncReferenceCount; i++) {
6040 		reference[i] = REF32(asyncReference[i]);
6041 	}
6042 
6043 	err = kIOReturnBadArgument;
6044 	do{
6045 		if ((kIOUCVariableStructureSize != method->count0)
6046 		    && (inputCount != method->count0)) {
6047 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
6048 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
6049 			continue;
6050 		}
6051 		if ((kIOUCVariableStructureSize != method->count1)
6052 		    && (*outputCount != method->count1)) {
6053 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
6054 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
6055 			continue;
6056 		}
6057 
6058 		func = method->func;
6059 
6060 		if (method->count1) {
6061 			if (method->count0) {
6062 				err = (object->*func)( reference,
6063 				    input, output,
6064 				    (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
6065 			} else {
6066 				err = (object->*func)( reference,
6067 				    output, outputCount, NULL, NULL, NULL, NULL );
6068 			}
6069 		} else {
6070 			err = (object->*func)( reference,
6071 			    input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
6072 		}
6073 	}while (false);
6074 
6075 	return err;
6076 }
6077 
6078 /* Routine io_catalog_send_data */
6079 kern_return_t
is_io_catalog_send_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t inData,mach_msg_type_number_t inDataCount,kern_return_t * result)6080 is_io_catalog_send_data(
6081 	mach_port_t             main_port,
6082 	uint32_t                flag,
6083 	io_buf_ptr_t            inData,
6084 	mach_msg_type_number_t  inDataCount,
6085 	kern_return_t *         result)
6086 {
6087 	// Allow sending catalog data if there is no kextd and the kernel is DEVELOPMENT || DEBUG
6088 #if NO_KEXTD && !(DEVELOPMENT || DEBUG)
6089 	return kIOReturnNotPrivileged;
6090 #else /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6091 	OSObject * obj = NULL;
6092 	vm_offset_t data;
6093 	kern_return_t kr = kIOReturnError;
6094 
6095 	//printf("io_catalog_send_data called. flag: %d\n", flag);
6096 
6097 	if (main_port != main_device_port) {
6098 		return kIOReturnNotPrivileged;
6099 	}
6100 
6101 	if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
6102 	    flag != kIOCatalogKextdActive &&
6103 	    flag != kIOCatalogKextdFinishedLaunching) &&
6104 	    (!inData || !inDataCount)) {
6105 		return kIOReturnBadArgument;
6106 	}
6107 
6108 	if (!IOCurrentTaskHasEntitlement(kIOCatalogManagementEntitlement)) {
6109 		OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
6110 		IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
6111 		OSSafeReleaseNULL(taskName);
6112 		// For now, fake success to not break applications relying on this function succeeding.
6113 		// See <rdar://problem/32554970> for more details.
6114 		return kIOReturnSuccess;
6115 	}
6116 
6117 	if (inData) {
6118 		vm_map_offset_t map_data;
6119 
6120 		if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
6121 			return kIOReturnMessageTooLarge;
6122 		}
6123 
6124 		kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
6125 		data = CAST_DOWN(vm_offset_t, map_data);
6126 
6127 		if (kr != KERN_SUCCESS) {
6128 			return kr;
6129 		}
6130 
6131 		// must return success after vm_map_copyout() succeeds
6132 
6133 		if (inDataCount) {
6134 			obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
6135 			vm_deallocate( kernel_map, data, inDataCount );
6136 			if (!obj) {
6137 				*result = kIOReturnNoMemory;
6138 				return KERN_SUCCESS;
6139 			}
6140 		}
6141 	}
6142 
6143 	switch (flag) {
6144 	case kIOCatalogResetDrivers:
6145 	case kIOCatalogResetDriversNoMatch: {
6146 		OSArray * array;
6147 
6148 		array = OSDynamicCast(OSArray, obj);
6149 		if (array) {
6150 			if (!gIOCatalogue->resetAndAddDrivers(array,
6151 			    flag == kIOCatalogResetDrivers)) {
6152 				kr = kIOReturnError;
6153 			}
6154 		} else {
6155 			kr = kIOReturnBadArgument;
6156 		}
6157 	}
6158 	break;
6159 
6160 	case kIOCatalogAddDrivers:
6161 	case kIOCatalogAddDriversNoMatch: {
6162 		OSArray * array;
6163 
6164 		array = OSDynamicCast(OSArray, obj);
6165 		if (array) {
6166 			if (!gIOCatalogue->addDrivers( array,
6167 			    flag == kIOCatalogAddDrivers)) {
6168 				kr = kIOReturnError;
6169 			}
6170 		} else {
6171 			kr = kIOReturnBadArgument;
6172 		}
6173 	}
6174 	break;
6175 
6176 	case kIOCatalogRemoveDrivers:
6177 	case kIOCatalogRemoveDriversNoMatch: {
6178 		OSDictionary * dict;
6179 
6180 		dict = OSDynamicCast(OSDictionary, obj);
6181 		if (dict) {
6182 			if (!gIOCatalogue->removeDrivers( dict,
6183 			    flag == kIOCatalogRemoveDrivers )) {
6184 				kr = kIOReturnError;
6185 			}
6186 		} else {
6187 			kr = kIOReturnBadArgument;
6188 		}
6189 	}
6190 	break;
6191 
6192 	case kIOCatalogStartMatching__Removed:
6193 	case kIOCatalogRemoveKernelLinker__Removed:
6194 	case kIOCatalogKextdActive:
6195 	case kIOCatalogKextdFinishedLaunching:
6196 		kr = KERN_NOT_SUPPORTED;
6197 		break;
6198 
6199 	default:
6200 		kr = kIOReturnBadArgument;
6201 		break;
6202 	}
6203 
6204 	if (obj) {
6205 		obj->release();
6206 	}
6207 
6208 	*result = kr;
6209 	return KERN_SUCCESS;
6210 #endif /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6211 }
6212 
6213 /* Routine io_catalog_terminate */
6214 kern_return_t
is_io_catalog_terminate(mach_port_t main_port,uint32_t flag,io_name_t name)6215 is_io_catalog_terminate(
6216 	mach_port_t main_port,
6217 	uint32_t flag,
6218 	io_name_t name )
6219 {
6220 	kern_return_t          kr;
6221 
6222 	if (main_port != main_device_port) {
6223 		return kIOReturnNotPrivileged;
6224 	}
6225 
6226 	kr = IOUserClient::clientHasPrivilege((void *) current_task(),
6227 	    kIOClientPrivilegeAdministrator );
6228 	if (kIOReturnSuccess != kr) {
6229 		return kr;
6230 	}
6231 
6232 	switch (flag) {
6233 #if !defined(SECURE_KERNEL)
6234 	case kIOCatalogServiceTerminate:
6235 		kr = gIOCatalogue->terminateDrivers(NULL, name, false);
6236 		break;
6237 
6238 	case kIOCatalogModuleUnload:
6239 	case kIOCatalogModuleTerminate:
6240 		kr = gIOCatalogue->terminateDriversForModule(name,
6241 		    flag == kIOCatalogModuleUnload);
6242 		break;
6243 #endif
6244 
6245 	default:
6246 		kr = kIOReturnBadArgument;
6247 		break;
6248 	}
6249 
6250 	return kr;
6251 }
6252 
6253 /* Routine io_catalog_get_data */
6254 kern_return_t
is_io_catalog_get_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t * outData,mach_msg_type_number_t * outDataCount)6255 is_io_catalog_get_data(
6256 	mach_port_t             main_port,
6257 	uint32_t                flag,
6258 	io_buf_ptr_t            *outData,
6259 	mach_msg_type_number_t  *outDataCount)
6260 {
6261 	kern_return_t kr = kIOReturnSuccess;
6262 	OSSerialize * s;
6263 
6264 	if (main_port != main_device_port) {
6265 		return kIOReturnNotPrivileged;
6266 	}
6267 
6268 	//printf("io_catalog_get_data called. flag: %d\n", flag);
6269 
6270 	s = OSSerialize::withCapacity(4096);
6271 	if (!s) {
6272 		return kIOReturnNoMemory;
6273 	}
6274 
6275 	kr = gIOCatalogue->serializeData(flag, s);
6276 
6277 	if (kr == kIOReturnSuccess) {
6278 		mach_vm_address_t data;
6279 		vm_map_copy_t copy;
6280 		unsigned int size;
6281 
6282 		size = s->getLength();
6283 		kr = mach_vm_allocate_kernel(kernel_map, &data, size,
6284 		    VM_MAP_KERNEL_FLAGS_ANYWHERE(.vm_tag = VM_KERN_MEMORY_IOKIT));
6285 		if (kr == kIOReturnSuccess) {
6286 			bcopy(s->text(), (void *)data, size);
6287 			kr = vm_map_copyin(kernel_map, data, size, true, &copy);
6288 			*outData = (char *)copy;
6289 			*outDataCount = size;
6290 		}
6291 	}
6292 
6293 	s->release();
6294 
6295 	return kr;
6296 }
6297 
6298 /* Routine io_catalog_get_gen_count */
6299 kern_return_t
is_io_catalog_get_gen_count(mach_port_t main_port,uint32_t * genCount)6300 is_io_catalog_get_gen_count(
6301 	mach_port_t             main_port,
6302 	uint32_t                *genCount)
6303 {
6304 	if (main_port != main_device_port) {
6305 		return kIOReturnNotPrivileged;
6306 	}
6307 
6308 	//printf("io_catalog_get_gen_count called.\n");
6309 
6310 	if (!genCount) {
6311 		return kIOReturnBadArgument;
6312 	}
6313 
6314 	*genCount = gIOCatalogue->getGenerationCount();
6315 
6316 	return kIOReturnSuccess;
6317 }
6318 
6319 /* Routine io_catalog_module_loaded.
6320  * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
6321  */
6322 kern_return_t
is_io_catalog_module_loaded(mach_port_t main_port,io_name_t name)6323 is_io_catalog_module_loaded(
6324 	mach_port_t             main_port,
6325 	io_name_t               name)
6326 {
6327 	if (main_port != main_device_port) {
6328 		return kIOReturnNotPrivileged;
6329 	}
6330 
6331 	//printf("io_catalog_module_loaded called. name %s\n", name);
6332 
6333 	if (!name) {
6334 		return kIOReturnBadArgument;
6335 	}
6336 
6337 	gIOCatalogue->moduleHasLoaded(name);
6338 
6339 	return kIOReturnSuccess;
6340 }
6341 
6342 kern_return_t
is_io_catalog_reset(mach_port_t main_port,uint32_t flag)6343 is_io_catalog_reset(
6344 	mach_port_t             main_port,
6345 	uint32_t                flag)
6346 {
6347 	if (main_port != main_device_port) {
6348 		return kIOReturnNotPrivileged;
6349 	}
6350 
6351 	switch (flag) {
6352 	case kIOCatalogResetDefault:
6353 		gIOCatalogue->reset();
6354 		break;
6355 
6356 	default:
6357 		return kIOReturnBadArgument;
6358 	}
6359 
6360 	return kIOReturnSuccess;
6361 }
6362 
6363 kern_return_t
iokit_user_client_trap(struct iokit_user_client_trap_args * args)6364 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
6365 {
6366 	kern_return_t    result = kIOReturnBadArgument;
6367 	IOUserClient   * userClient;
6368 	OSObject       * object;
6369 	uintptr_t        ref;
6370 	mach_port_name_t portName;
6371 
6372 	ref     = (uintptr_t) args->userClientRef;
6373 
6374 	if ((ref == MACH_PORT_DEAD) || (ref == (uintptr_t) MACH_PORT_NULL)) {
6375 		return kIOReturnBadArgument;
6376 	}
6377 	// kobject port names always have b0-1 set, so we use these bits as flags to
6378 	// iokit_user_client_trap()
6379 	// keep this up to date with ipc_entry_name_mask();
6380 	portName = (mach_port_name_t) (ref | 3);
6381 	if (((1ULL << 32) & ref) || !(1 & ref)) {
6382 		object = iokit_lookup_uext_ref_current_task(portName);
6383 		if (object) {
6384 			result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6385 		}
6386 		OSSafeReleaseNULL(object);
6387 	} else {
6388 		io_object_t ref_current_task = iokit_lookup_connect_ref_current_task((mach_port_name_t) ref);
6389 		if ((userClient = OSDynamicCast(IOUserClient, ref_current_task))) {
6390 			IOExternalTrap *trap = NULL;
6391 			IOService *target = NULL;
6392 
6393 			result = kIOReturnSuccess;
6394 			io_filter_policy_t filterPolicy = userClient->filterForTask(current_task(), 0);
6395 			if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
6396 				result = gIOUCFilterCallbacks->io_filter_applier(userClient, filterPolicy, io_filter_type_trap, args->index);
6397 			}
6398 			if (kIOReturnSuccess == result) {
6399 				trap = userClient->getTargetAndTrapForIndex(&target, args->index);
6400 			}
6401 			if (trap && target) {
6402 				IOTrap func;
6403 
6404 				func = trap->func;
6405 
6406 				if (func) {
6407 					result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6408 				}
6409 			}
6410 
6411 			iokit_remove_connect_reference(userClient);
6412 		} else {
6413 			OSSafeReleaseNULL(ref_current_task);
6414 		}
6415 	}
6416 
6417 	return result;
6418 }
6419 
6420 /* Routine io_device_tree_entry_exists_with_name */
6421 kern_return_t
is_io_device_tree_entry_exists_with_name(mach_port_t main_port,io_name_t name,boolean_t * exists)6422 is_io_device_tree_entry_exists_with_name(
6423 	mach_port_t main_port,
6424 	io_name_t name,
6425 	boolean_t *exists )
6426 {
6427 	OSCollectionIterator *iter;
6428 
6429 	if (main_port != main_device_port) {
6430 		return kIOReturnNotPrivileged;
6431 	}
6432 
6433 	iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
6434 	*exists = iter && iter->getNextObject();
6435 	OSSafeReleaseNULL(iter);
6436 
6437 	return kIOReturnSuccess;
6438 }
6439 } /* extern "C" */
6440 
6441 IOReturn
callExternalMethod(uint32_t selector,IOExternalMethodArguments * args)6442 IOUserClient::callExternalMethod(uint32_t selector, IOExternalMethodArguments * args)
6443 {
6444 	IOReturn ret;
6445 
6446 	ipcEnter(defaultLocking ? (defaultLockingSingleThreadExternalMethod ? kIPCLockWrite : kIPCLockRead) : kIPCLockNone);
6447 	if (uc2022) {
6448 		ret = ((IOUserClient2022 *) this)->externalMethod(selector, (IOExternalMethodArgumentsOpaque *) args);
6449 	} else {
6450 		ret = externalMethod(selector, args);
6451 	}
6452 	ipcExit(defaultLocking ? (defaultLockingSingleThreadExternalMethod ? kIPCLockWrite : kIPCLockRead) : kIPCLockNone);
6453 
6454 	return ret;
6455 }
6456 
6457 MIG_SERVER_ROUTINE IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * arguments,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6458 IOUserClient2022::externalMethod(uint32_t selector, IOExternalMethodArguments * arguments,
6459     IOExternalMethodDispatch *dispatch,
6460     OSObject *target, void *reference)
6461 {
6462 	panic("wrong externalMethod for IOUserClient2022");
6463 }
6464 
6465 IOReturn
dispatchExternalMethod(uint32_t selector,IOExternalMethodArgumentsOpaque * arguments,const IOExternalMethodDispatch2022 dispatchArray[],size_t dispatchArrayCount,OSObject * target,void * reference)6466 IOUserClient2022::dispatchExternalMethod(uint32_t selector, IOExternalMethodArgumentsOpaque *arguments,
6467     const IOExternalMethodDispatch2022 dispatchArray[], size_t dispatchArrayCount,
6468     OSObject * target, void * reference)
6469 {
6470 	IOReturn    err;
6471 	IOExternalMethodArguments * args = (typeof(args))arguments;
6472 	const IOExternalMethodDispatch2022 * dispatch;
6473 
6474 	if (!dispatchArray) {
6475 		return kIOReturnError;
6476 	}
6477 	if (selector >= dispatchArrayCount) {
6478 		return kIOReturnBadArgument;
6479 	}
6480 	dispatch = &dispatchArray[selector];
6481 
6482 	uint32_t count;
6483 	count = dispatch->checkScalarInputCount;
6484 	if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6485 		return kIOReturnBadArgument;
6486 	}
6487 
6488 	count = dispatch->checkStructureInputSize;
6489 	if ((kIOUCVariableStructureSize != count)
6490 	    && (count != ((args->structureInputDescriptor)
6491 	    ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6492 		return kIOReturnBadArgument;
6493 	}
6494 
6495 	count = dispatch->checkScalarOutputCount;
6496 	if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6497 		return kIOReturnBadArgument;
6498 	}
6499 
6500 	count = dispatch->checkStructureOutputSize;
6501 	if ((kIOUCVariableStructureSize != count)
6502 	    && (count != ((args->structureOutputDescriptor)
6503 	    ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6504 		return kIOReturnBadArgument;
6505 	}
6506 
6507 	if (args->asyncWakePort && !dispatch->allowAsync) {
6508 		return kIOReturnBadArgument;
6509 	}
6510 
6511 	if (dispatch->checkEntitlement) {
6512 		if (!IOCurrentTaskHasEntitlement(dispatch->checkEntitlement)) {
6513 			return kIOReturnNotPrivileged;
6514 		}
6515 	}
6516 
6517 	if (dispatch->function) {
6518 		err = (*dispatch->function)(target, reference, args);
6519 	} else {
6520 		err = kIOReturnNoCompletion; /* implementer can dispatch */
6521 	}
6522 	return err;
6523 }
6524 
6525 IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * args,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6526 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
6527     IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
6528 {
6529 	IOReturn    err;
6530 	IOService * object;
6531 	IOByteCount structureOutputSize;
6532 
6533 	if (dispatch) {
6534 		uint32_t count;
6535 		count = dispatch->checkScalarInputCount;
6536 		if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6537 			return kIOReturnBadArgument;
6538 		}
6539 
6540 		count = dispatch->checkStructureInputSize;
6541 		if ((kIOUCVariableStructureSize != count)
6542 		    && (count != ((args->structureInputDescriptor)
6543 		    ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6544 			return kIOReturnBadArgument;
6545 		}
6546 
6547 		count = dispatch->checkScalarOutputCount;
6548 		if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6549 			return kIOReturnBadArgument;
6550 		}
6551 
6552 		count = dispatch->checkStructureOutputSize;
6553 		if ((kIOUCVariableStructureSize != count)
6554 		    && (count != ((args->structureOutputDescriptor)
6555 		    ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6556 			return kIOReturnBadArgument;
6557 		}
6558 
6559 		if (dispatch->function) {
6560 			err = (*dispatch->function)(target, reference, args);
6561 		} else {
6562 			err = kIOReturnNoCompletion; /* implementer can dispatch */
6563 		}
6564 		return err;
6565 	}
6566 
6567 
6568 	// pre-Leopard API's don't do ool structs
6569 	if (args->structureInputDescriptor || args->structureOutputDescriptor) {
6570 		err = kIOReturnIPCError;
6571 		return err;
6572 	}
6573 
6574 	structureOutputSize = args->structureOutputSize;
6575 
6576 	if (args->asyncWakePort) {
6577 		IOExternalAsyncMethod * method;
6578 		object = NULL;
6579 		if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
6580 			return kIOReturnUnsupported;
6581 		}
6582 
6583 		if (kIOUCForegroundOnly & method->flags) {
6584 			if (task_is_gpu_denied(current_task())) {
6585 				return kIOReturnNotPermitted;
6586 			}
6587 		}
6588 
6589 		switch (method->flags & kIOUCTypeMask) {
6590 		case kIOUCScalarIStructI:
6591 			err = shim_io_async_method_scalarI_structureI( method, object,
6592 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6593 			    args->scalarInput, args->scalarInputCount,
6594 			    (char *)args->structureInput, args->structureInputSize );
6595 			break;
6596 
6597 		case kIOUCScalarIScalarO:
6598 			err = shim_io_async_method_scalarI_scalarO( method, object,
6599 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6600 			    args->scalarInput, args->scalarInputCount,
6601 			    args->scalarOutput, &args->scalarOutputCount );
6602 			break;
6603 
6604 		case kIOUCScalarIStructO:
6605 			err = shim_io_async_method_scalarI_structureO( method, object,
6606 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6607 			    args->scalarInput, args->scalarInputCount,
6608 			    (char *) args->structureOutput, &args->structureOutputSize );
6609 			break;
6610 
6611 
6612 		case kIOUCStructIStructO:
6613 			err = shim_io_async_method_structureI_structureO( method, object,
6614 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6615 			    (char *)args->structureInput, args->structureInputSize,
6616 			    (char *) args->structureOutput, &args->structureOutputSize );
6617 			break;
6618 
6619 		default:
6620 			err = kIOReturnBadArgument;
6621 			break;
6622 		}
6623 	} else {
6624 		IOExternalMethod *      method;
6625 		object = NULL;
6626 		if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
6627 			return kIOReturnUnsupported;
6628 		}
6629 
6630 		if (kIOUCForegroundOnly & method->flags) {
6631 			if (task_is_gpu_denied(current_task())) {
6632 				return kIOReturnNotPermitted;
6633 			}
6634 		}
6635 
6636 		switch (method->flags & kIOUCTypeMask) {
6637 		case kIOUCScalarIStructI:
6638 			err = shim_io_connect_method_scalarI_structureI( method, object,
6639 			    args->scalarInput, args->scalarInputCount,
6640 			    (char *) args->structureInput, args->structureInputSize );
6641 			break;
6642 
6643 		case kIOUCScalarIScalarO:
6644 			err = shim_io_connect_method_scalarI_scalarO( method, object,
6645 			    args->scalarInput, args->scalarInputCount,
6646 			    args->scalarOutput, &args->scalarOutputCount );
6647 			break;
6648 
6649 		case kIOUCScalarIStructO:
6650 			err = shim_io_connect_method_scalarI_structureO( method, object,
6651 			    args->scalarInput, args->scalarInputCount,
6652 			    (char *) args->structureOutput, &structureOutputSize );
6653 			break;
6654 
6655 
6656 		case kIOUCStructIStructO:
6657 			err = shim_io_connect_method_structureI_structureO( method, object,
6658 			    (char *) args->structureInput, args->structureInputSize,
6659 			    (char *) args->structureOutput, &structureOutputSize );
6660 			break;
6661 
6662 		default:
6663 			err = kIOReturnBadArgument;
6664 			break;
6665 		}
6666 	}
6667 
6668 	if (structureOutputSize > UINT_MAX) {
6669 		structureOutputSize = 0;
6670 		err = kIOReturnBadArgument;
6671 	}
6672 
6673 	args->structureOutputSize = ((typeof(args->structureOutputSize))structureOutputSize);
6674 
6675 	return err;
6676 }
6677 
6678 IOReturn
registerFilterCallbacks(const struct io_filter_callbacks * callbacks,size_t size)6679 IOUserClient::registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size)
6680 {
6681 	if (size < sizeof(*callbacks)) {
6682 		return kIOReturnBadArgument;
6683 	}
6684 	if (!OSCompareAndSwapPtr(NULL, __DECONST(void *, callbacks), &gIOUCFilterCallbacks)) {
6685 		return kIOReturnBusy;
6686 	}
6687 	return kIOReturnSuccess;
6688 }
6689 
6690 
6691 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6692 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6693 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6694 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6695 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6696 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6697 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6698 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6699 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6700 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6701 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6702 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6703 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6704 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6705 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6706 OSMetaClassDefineReservedUnused(IOUserClient, 15);
6707 
6708 OSMetaClassDefineReservedUnused(IOUserClient2022, 0);
6709 OSMetaClassDefineReservedUnused(IOUserClient2022, 1);
6710 OSMetaClassDefineReservedUnused(IOUserClient2022, 2);
6711 OSMetaClassDefineReservedUnused(IOUserClient2022, 3);
6712