xref: /xnu-8792.81.2/iokit/Kernel/IOUserClient.cpp (revision 19c3b8c28c31cb8130e034cfb5df6bf9ba342d90)
1 /*
2  * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <libkern/c++/OSKext.h>
30 #include <libkern/c++/OSSharedPtr.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51 
52 #include <mach/sdt.h>
53 #include <os/hash.h>
54 
55 #include <libkern/amfi/amfi.h>
56 
57 #if CONFIG_MACF
58 
59 extern "C" {
60 #include <security/mac_framework.h>
61 };
62 #include <sys/kauth.h>
63 
64 #define IOMACF_LOG 0
65 
66 #endif /* CONFIG_MACF */
67 
68 #include <IOKit/assert.h>
69 
70 #include "IOServicePrivate.h"
71 #include "IOKitKernelInternal.h"
72 
73 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
74 #define SCALAR32(x) ((uint32_t )x)
75 #define ARG32(x)    ((void *)(uintptr_t)SCALAR32(x))
76 #define REF64(x)    ((io_user_reference_t)((UInt64)(x)))
77 #define REF32(x)    ((int)(x))
78 
79 enum{
80 	kIOUCAsync0Flags          = 3ULL,
81 	kIOUCAsync64Flag          = 1ULL,
82 	kIOUCAsyncErrorLoggedFlag = 2ULL
83 };
84 
85 #if IOKITSTATS
86 
87 #define IOStatisticsRegisterCounter() \
88 do { \
89 	reserved->counter = IOStatistics::registerUserClient(this); \
90 } while (0)
91 
92 #define IOStatisticsUnregisterCounter() \
93 do { \
94 	if (reserved) \
95 	        IOStatistics::unregisterUserClient(reserved->counter); \
96 } while (0)
97 
98 #define IOStatisticsClientCall() \
99 do { \
100 	IOStatistics::countUserClientCall(client); \
101 } while (0)
102 
103 #else
104 
105 #define IOStatisticsRegisterCounter()
106 #define IOStatisticsUnregisterCounter()
107 #define IOStatisticsClientCall()
108 
109 #endif /* IOKITSTATS */
110 
111 #if DEVELOPMENT || DEBUG
112 
113 #define FAKE_STACK_FRAME(a)                                             \
114 	const void ** __frameptr;                                       \
115 	const void  * __retaddr;                                        \
116 	__frameptr = (typeof(__frameptr)) __builtin_frame_address(0);   \
117 	__retaddr = __frameptr[1];                                      \
118 	__frameptr[1] = (a);
119 
120 #define FAKE_STACK_FRAME_END()                                          \
121 	__frameptr[1] = __retaddr;
122 
123 #else /* DEVELOPMENT || DEBUG */
124 
125 #define FAKE_STACK_FRAME(a)
126 #define FAKE_STACK_FRAME_END()
127 
128 #endif /* DEVELOPMENT || DEBUG */
129 
130 #define ASYNC_REF_COUNT         (sizeof(io_async_ref_t) / sizeof(natural_t))
131 #define ASYNC_REF64_COUNT       (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
132 
133 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
134 
135 extern "C" {
136 #include <mach/mach_traps.h>
137 #include <vm/vm_map.h>
138 } /* extern "C" */
139 
140 struct IOMachPortHashList;
141 
142 static_assert(IKOT_MAX_TYPE <= 255);
143 
144 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
145 
146 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
147 class IOMachPort : public OSObject
148 {
149 	OSDeclareDefaultStructors(IOMachPort);
150 public:
151 	SLIST_ENTRY(IOMachPort) link;
152 	ipc_port_t  port;
153 	OSObject*   object;
154 	UInt32      mscount;
155 	UInt8       holdDestroy;
156 	UInt8       type;
157 
158 	static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
159 
160 	static IOMachPortHashList* bucketForObject(OSObject *obj,
161 	    ipc_kobject_type_t type);
162 
163 	static LIBKERN_RETURNS_NOT_RETAINED IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
164 
165 	static bool noMoreSendersForObject( OSObject * obj,
166 	    ipc_kobject_type_t type, mach_port_mscount_t * mscount );
167 	static void releasePortForObject( OSObject * obj,
168 	    ipc_kobject_type_t type );
169 	static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
170 
171 	static mach_port_name_t makeSendRightForTask( task_t task,
172 	    io_object_t obj, ipc_kobject_type_t type );
173 
174 	virtual void free() APPLE_KEXT_OVERRIDE;
175 };
176 
177 #define super OSObject
178 OSDefineMetaClassAndStructorsWithZone(IOMachPort, OSObject, ZC_ZFREE_CLEARMEM)
179 
180 static IOLock *         gIOObjectPortLock;
181 IOLock *                gIOUserServerLock;
182 
183 SECURITY_READ_ONLY_LATE(const struct io_filter_callbacks *) gIOUCFilterCallbacks;
184 
185 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
186 
187 SLIST_HEAD(IOMachPortHashList, IOMachPort);
188 
189 #if defined(XNU_TARGET_OS_OSX)
190 #define PORT_HASH_SIZE 4096
191 #else /* defined(!XNU_TARGET_OS_OSX) */
192 #define PORT_HASH_SIZE 256
193 #endif /* !defined(!XNU_TARGET_OS_OSX) */
194 
195 IOMachPortHashList gIOMachPortHash[PORT_HASH_SIZE];
196 
197 void
IOMachPortInitialize(void)198 IOMachPortInitialize(void)
199 {
200 	for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
201 		SLIST_INIT(&gIOMachPortHash[i]);
202 	}
203 }
204 
205 IOMachPortHashList*
bucketForObject(OSObject * obj,ipc_kobject_type_t type)206 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
207 {
208 	return &gIOMachPortHash[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
209 }
210 
211 IOMachPort*
portForObjectInBucket(IOMachPortHashList * bucket,OSObject * obj,ipc_kobject_type_t type)212 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
213 {
214 	IOMachPort *machPort;
215 
216 	SLIST_FOREACH(machPort, bucket, link) {
217 		if (machPort->object == obj && machPort->type == type) {
218 			return machPort;
219 		}
220 	}
221 	return NULL;
222 }
223 
224 IOMachPort*
withObjectAndType(OSObject * obj,ipc_kobject_type_t type)225 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
226 {
227 	IOMachPort *machPort = NULL;
228 
229 	machPort = new IOMachPort;
230 	if (__improbable(machPort && !machPort->init())) {
231 		OSSafeReleaseNULL(machPort);
232 		return NULL;
233 	}
234 
235 	machPort->object = obj;
236 	machPort->type = (typeof(machPort->type))type;
237 	machPort->port = iokit_alloc_object_port(obj, type);
238 
239 	obj->taggedRetain(OSTypeID(OSCollection));
240 	machPort->mscount++;
241 
242 	return machPort;
243 }
244 
245 bool
noMoreSendersForObject(OSObject * obj,ipc_kobject_type_t type,mach_port_mscount_t * mscount)246 IOMachPort::noMoreSendersForObject( OSObject * obj,
247     ipc_kobject_type_t type, mach_port_mscount_t * mscount )
248 {
249 	IOMachPort *machPort = NULL;
250 	IOUserClient *uc;
251 	OSAction *action;
252 	bool destroyed = true;
253 
254 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
255 
256 	obj->retain();
257 
258 	lck_mtx_lock(gIOObjectPortLock);
259 
260 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
261 
262 	if (machPort) {
263 		destroyed = (machPort->mscount <= *mscount);
264 		if (!destroyed) {
265 			*mscount = machPort->mscount;
266 			lck_mtx_unlock(gIOObjectPortLock);
267 		} else {
268 			if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
269 				uc->noMoreSenders();
270 			}
271 			SLIST_REMOVE(bucket, machPort, IOMachPort, link);
272 
273 			lck_mtx_unlock(gIOObjectPortLock);
274 
275 			OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
276 
277 			obj->taggedRelease(OSTypeID(OSCollection));
278 		}
279 	} else {
280 		lck_mtx_unlock(gIOObjectPortLock);
281 	}
282 
283 	if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
284 		action->Aborted();
285 	}
286 
287 	if (IKOT_UEXT_OBJECT == type && IOUserServer::shouldLeakObjects()) {
288 		// Leak object
289 		obj->retain();
290 	}
291 
292 	obj->release();
293 
294 	return destroyed;
295 }
296 
297 void
releasePortForObject(OSObject * obj,ipc_kobject_type_t type)298 IOMachPort::releasePortForObject( OSObject * obj,
299     ipc_kobject_type_t type )
300 {
301 	IOMachPort *machPort;
302 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
303 
304 	assert(IKOT_IOKIT_CONNECT != type);
305 
306 	lck_mtx_lock(gIOObjectPortLock);
307 
308 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
309 
310 	if (machPort && !machPort->holdDestroy) {
311 		obj->retain();
312 		SLIST_REMOVE(bucket, machPort, IOMachPort, link);
313 
314 		lck_mtx_unlock(gIOObjectPortLock);
315 
316 		OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
317 
318 		obj->taggedRelease(OSTypeID(OSCollection));
319 		obj->release();
320 	} else {
321 		lck_mtx_unlock(gIOObjectPortLock);
322 	}
323 }
324 
325 void
setHoldDestroy(OSObject * obj,ipc_kobject_type_t type)326 IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
327 {
328 	IOMachPort *        machPort;
329 
330 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
331 	lck_mtx_lock(gIOObjectPortLock);
332 
333 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
334 
335 	if (machPort) {
336 		machPort->holdDestroy = true;
337 	}
338 
339 	lck_mtx_unlock(gIOObjectPortLock);
340 }
341 
342 void
IOMachPortDestroyUserReferences(OSObject * obj,natural_t type)343 IOMachPortDestroyUserReferences(OSObject * obj, natural_t type)
344 {
345 	IOMachPort::releasePortForObject(obj, type);
346 }
347 
348 void
destroyUserReferences(OSObject * obj)349 IOUserClient::destroyUserReferences( OSObject * obj )
350 {
351 	IOMachPort *machPort;
352 
353 	IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
354 
355 	// panther, 3160200
356 	// IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
357 
358 	obj->retain();
359 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
360 	IOMachPortHashList *mappingBucket = NULL;
361 
362 	lck_mtx_lock(gIOObjectPortLock);
363 
364 	IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
365 	if (uc && uc->mappings) {
366 		mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
367 	}
368 
369 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
370 
371 	if (machPort == NULL) {
372 		lck_mtx_unlock(gIOObjectPortLock);
373 		goto end;
374 	}
375 
376 	SLIST_REMOVE(bucket, machPort, IOMachPort, link);
377 	obj->taggedRelease(OSTypeID(OSCollection));
378 
379 	if (uc) {
380 		uc->noMoreSenders();
381 		if (uc->mappings) {
382 			uc->mappings->taggedRetain(OSTypeID(OSCollection));
383 			machPort->object = uc->mappings;
384 			SLIST_INSERT_HEAD(mappingBucket, machPort, link);
385 			iokit_switch_object_port(machPort->port, uc->mappings, IKOT_IOKIT_CONNECT);
386 
387 			lck_mtx_unlock(gIOObjectPortLock);
388 
389 			OSSafeReleaseNULL(uc->mappings);
390 		} else {
391 			lck_mtx_unlock(gIOObjectPortLock);
392 			OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
393 		}
394 	} else {
395 		lck_mtx_unlock(gIOObjectPortLock);
396 		OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
397 	}
398 
399 
400 end:
401 	OSSafeReleaseNULL(obj);
402 }
403 
404 mach_port_name_t
makeSendRightForTask(task_t task,io_object_t obj,ipc_kobject_type_t type)405 IOMachPort::makeSendRightForTask( task_t task,
406     io_object_t obj, ipc_kobject_type_t type )
407 {
408 	return iokit_make_send_right( task, obj, type );
409 }
410 
411 void
free(void)412 IOMachPort::free( void )
413 {
414 	if (port) {
415 		iokit_destroy_object_port( port, type );
416 	}
417 	super::free();
418 }
419 
420 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
421 
422 static bool
IOTaskRegistryCompatibility(task_t task)423 IOTaskRegistryCompatibility(task_t task)
424 {
425 	return false;
426 }
427 
428 static void
IOTaskRegistryCompatibilityMatching(task_t task,OSDictionary * matching)429 IOTaskRegistryCompatibilityMatching(task_t task, OSDictionary * matching)
430 {
431 	matching->setObject(gIOServiceNotificationUserKey, kOSBooleanTrue);
432 	if (!IOTaskRegistryCompatibility(task)) {
433 		return;
434 	}
435 	matching->setObject(gIOCompatibilityMatchKey, kOSBooleanTrue);
436 }
437 
438 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
439 
OSDefineMetaClassAndStructors(IOUserIterator,OSIterator)440 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
441 
442 IOUserIterator *
443 IOUserIterator::withIterator(OSIterator * iter)
444 {
445 	IOUserIterator * me;
446 
447 	if (!iter) {
448 		return NULL;
449 	}
450 
451 	me = new IOUserIterator;
452 	if (me && !me->init()) {
453 		me->release();
454 		me = NULL;
455 	}
456 	if (!me) {
457 		iter->release();
458 		return me;
459 	}
460 	me->userIteratorObject = iter;
461 
462 	return me;
463 }
464 
465 bool
init(void)466 IOUserIterator::init( void )
467 {
468 	if (!OSObject::init()) {
469 		return false;
470 	}
471 
472 	lock = IOLockAlloc();
473 	if (!lock) {
474 		return false;
475 	}
476 
477 	return true;
478 }
479 
480 void
free()481 IOUserIterator::free()
482 {
483 	if (userIteratorObject) {
484 		userIteratorObject->release();
485 	}
486 	if (lock) {
487 		IOLockFree(lock);
488 	}
489 	OSObject::free();
490 }
491 
492 void
reset()493 IOUserIterator::reset()
494 {
495 	IOLockLock(lock);
496 	assert(OSDynamicCast(OSIterator, userIteratorObject));
497 	((OSIterator *)userIteratorObject)->reset();
498 	IOLockUnlock(lock);
499 }
500 
501 bool
isValid()502 IOUserIterator::isValid()
503 {
504 	bool ret;
505 
506 	IOLockLock(lock);
507 	assert(OSDynamicCast(OSIterator, userIteratorObject));
508 	ret = ((OSIterator *)userIteratorObject)->isValid();
509 	IOLockUnlock(lock);
510 
511 	return ret;
512 }
513 
514 OSObject *
getNextObject()515 IOUserIterator::getNextObject()
516 {
517 	assert(false);
518 	return NULL;
519 }
520 
521 OSObject *
copyNextObject()522 IOUserIterator::copyNextObject()
523 {
524 	OSObject * ret = NULL;
525 
526 	IOLockLock(lock);
527 	if (userIteratorObject) {
528 		ret = ((OSIterator *)userIteratorObject)->getNextObject();
529 		if (ret) {
530 			ret->retain();
531 		}
532 	}
533 	IOLockUnlock(lock);
534 
535 	return ret;
536 }
537 
538 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
539 extern "C" {
540 // functions called from osfmk/device/iokit_rpc.c
541 
542 void
iokit_port_object_description(io_object_t obj,kobject_description_t desc)543 iokit_port_object_description(io_object_t obj, kobject_description_t desc)
544 {
545 	IORegistryEntry    * regEntry;
546 	IOUserNotification * __unused noti;
547 	_IOServiceNotifier * __unused serviceNoti;
548 	OSSerialize        * __unused s;
549 	OSDictionary       * __unused matching = NULL;
550 
551 	if ((regEntry = OSDynamicCast(IORegistryEntry, obj))) {
552 		snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(0x%qx)", obj->getMetaClass()->getClassName(), regEntry->getRegistryEntryID());
553 #if DEVELOPMENT || DEBUG
554 	} else if ((noti = OSDynamicCast(IOUserNotification, obj))) {
555 		// serviceNoti->matching may become NULL if the port gets a no-senders notification, so we have to lock gIOObjectPortLock
556 		IOLockLock(gIOObjectPortLock);
557 		serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->userIteratorObject);
558 		if (serviceNoti && (matching = serviceNoti->matching)) {
559 			matching->retain();
560 		}
561 		IOLockUnlock(gIOObjectPortLock);
562 
563 		if (matching) {
564 			s = OSSerialize::withCapacity((unsigned int) page_size);
565 			if (s && matching->serialize(s)) {
566 				snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text());
567 			}
568 			OSSafeReleaseNULL(s);
569 			OSSafeReleaseNULL(matching);
570 		}
571 #endif /* DEVELOPMENT || DEBUG */
572 	} else {
573 		snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s", obj->getMetaClass()->getClassName());
574 	}
575 }
576 
577 // FIXME: Implementation of these functions are hidden from the static analyzer.
578 // As for now, the analyzer doesn't consistently support wrapper functions
579 // for retain and release.
580 #ifndef __clang_analyzer__
581 void
iokit_add_reference(io_object_t obj,natural_t type)582 iokit_add_reference( io_object_t obj, natural_t type )
583 {
584 	IOUserClient * uc;
585 
586 	if (!obj) {
587 		return;
588 	}
589 
590 	if ((IKOT_IOKIT_CONNECT == type)
591 	    && (uc = OSDynamicCast(IOUserClient, obj))) {
592 		OSIncrementAtomic(&uc->__ipc);
593 	}
594 
595 	obj->retain();
596 }
597 
598 void
iokit_remove_reference(io_object_t obj)599 iokit_remove_reference( io_object_t obj )
600 {
601 	if (obj) {
602 		obj->release();
603 	}
604 }
605 #endif // __clang_analyzer__
606 
607 void
iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj)608 iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj )
609 {
610 	IOUserClient * uc;
611 	bool           finalize = false;
612 
613 	if (!obj) {
614 		return;
615 	}
616 
617 	if ((uc = OSDynamicCast(IOUserClient, obj))) {
618 		if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive()) {
619 			IOLockLock(gIOObjectPortLock);
620 			if ((finalize = uc->__ipcFinal)) {
621 				uc->__ipcFinal = false;
622 			}
623 			IOLockUnlock(gIOObjectPortLock);
624 		}
625 		if (finalize) {
626 			uc->scheduleFinalize(true);
627 		}
628 	}
629 
630 	obj->release();
631 }
632 
633 bool
finalizeUserReferences(OSObject * obj)634 IOUserClient::finalizeUserReferences(OSObject * obj)
635 {
636 	IOUserClient * uc;
637 	bool           ok = true;
638 
639 	if ((uc = OSDynamicCast(IOUserClient, obj))) {
640 		IOLockLock(gIOObjectPortLock);
641 		if ((uc->__ipcFinal = (0 != uc->__ipc))) {
642 			ok = false;
643 		}
644 		IOLockUnlock(gIOObjectPortLock);
645 	}
646 	return ok;
647 }
648 
649 ipc_port_t
iokit_port_for_object(io_object_t obj,ipc_kobject_type_t type)650 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
651 {
652 	IOMachPort *machPort = NULL;
653 	ipc_port_t   port = NULL;
654 
655 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
656 
657 	lck_mtx_lock(gIOObjectPortLock);
658 
659 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
660 
661 	if (__improbable(machPort == NULL)) {
662 		machPort = IOMachPort::withObjectAndType(obj, type);
663 		if (__improbable(machPort == NULL)) {
664 			goto end;
665 		}
666 		SLIST_INSERT_HEAD(bucket, machPort, link);
667 	} else {
668 		machPort->mscount++;
669 	}
670 
671 	iokit_retain_port(machPort->port);
672 	port = machPort->port;
673 
674 end:
675 	lck_mtx_unlock(gIOObjectPortLock);
676 
677 	return port;
678 }
679 
680 kern_return_t
iokit_client_died(io_object_t obj,ipc_port_t,ipc_kobject_type_t type,mach_port_mscount_t * mscount)681 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
682     ipc_kobject_type_t type, mach_port_mscount_t * mscount )
683 {
684 	IOUserClient *      client;
685 	IOMemoryMap *       map;
686 	IOUserNotification * notify;
687 	IOUserServerCheckInToken * token;
688 
689 	if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
690 		return kIOReturnNotReady;
691 	}
692 
693 	switch (type) {
694 	case IKOT_IOKIT_CONNECT:
695 		if ((client = OSDynamicCast( IOUserClient, obj ))) {
696 			IOStatisticsClientCall();
697 			IORWLockWrite(client->lock);
698 			client->clientDied();
699 			IORWLockUnlock(client->lock);
700 		}
701 		break;
702 	case IKOT_IOKIT_OBJECT:
703 		if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
704 			map->taskDied();
705 		} else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
706 			notify->setNotification( NULL );
707 		}
708 		break;
709 	case IKOT_IOKIT_IDENT:
710 		if ((token = OSDynamicCast( IOUserServerCheckInToken, obj ))) {
711 			token->cancel();
712 		}
713 		break;
714 	}
715 
716 	return kIOReturnSuccess;
717 }
718 };      /* extern "C" */
719 
720 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
721 
722 class IOServiceUserNotification : public IOUserNotification
723 {
724 	OSDeclareDefaultStructors(IOServiceUserNotification);
725 
726 	struct PingMsgKdata {
727 		mach_msg_header_t               msgHdr;
728 	};
729 	struct PingMsgUdata {
730 		OSNotificationHeader64          notifyHeader;
731 	};
732 
733 	enum { kMaxOutstanding = 1024 };
734 
735 	ipc_port_t          remotePort;
736 	void                *msgReference;
737 	mach_msg_size_t     msgReferenceSize;
738 	natural_t           msgType;
739 	OSArray     *       newSet;
740 	bool                armed;
741 	bool                ipcLogged;
742 
743 public:
744 
745 	virtual bool init( mach_port_t port, natural_t type,
746 	    void * reference, vm_size_t referenceSize,
747 	    bool clientIs64 );
748 	virtual void free() APPLE_KEXT_OVERRIDE;
749 	void invalidatePort(void);
750 
751 	static bool _handler( void * target,
752 	    void * ref, IOService * newService, IONotifier * notifier );
753 	virtual bool handler( void * ref, IOService * newService );
754 
755 	virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
756 	virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
757 };
758 
759 class IOServiceMessageUserNotification : public IOUserNotification
760 {
761 	OSDeclareDefaultStructors(IOServiceMessageUserNotification);
762 
763 	struct PingMsgKdata {
764 		mach_msg_header_t               msgHdr;
765 		mach_msg_body_t                 msgBody;
766 		mach_msg_port_descriptor_t      ports[1];
767 	};
768 	struct PingMsgUdata {
769 		OSNotificationHeader64          notifyHeader __attribute__ ((packed));
770 	};
771 
772 	ipc_port_t          remotePort;
773 	void                *msgReference;
774 	mach_msg_size_t     msgReferenceSize;
775 	mach_msg_size_t     msgExtraSize;
776 	natural_t           msgType;
777 	uint8_t             clientIs64;
778 	int                 owningPID;
779 	bool                ipcLogged;
780 
781 public:
782 
783 	virtual bool init( mach_port_t port, natural_t type,
784 	    void * reference, vm_size_t referenceSize,
785 	    bool clientIs64 );
786 
787 	virtual void free() APPLE_KEXT_OVERRIDE;
788 	void invalidatePort(void);
789 
790 	static IOReturn _handler( void * target, void * ref,
791 	    UInt32 messageType, IOService * provider,
792 	    void * messageArgument, vm_size_t argSize );
793 	virtual IOReturn handler( void * ref,
794 	    UInt32 messageType, IOService * provider,
795 	    void * messageArgument, vm_size_t argSize );
796 
797 	virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
798 	virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
799 };
800 
801 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
802 
803 #undef super
804 #define super IOUserIterator
805 OSDefineMetaClass( IOUserNotification, IOUserIterator );
806 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
807 
808 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
809 
810 void
free(void)811 IOUserNotification::free( void )
812 {
813 #if DEVELOPMENT || DEBUG
814 	IOLockLock( gIOObjectPortLock);
815 
816 	assert(userIteratorObject == NULL);
817 
818 	IOLockUnlock( gIOObjectPortLock);
819 #endif /* DEVELOPMENT || DEBUG */
820 
821 	super::free();
822 }
823 
824 
825 void
setNotification(IONotifier * notify)826 IOUserNotification::setNotification( IONotifier * notify )
827 {
828 	OSObject * previousNotify;
829 
830 	/*
831 	 * We must retain this object here before proceeding.
832 	 * Two threads may race in setNotification(). If one thread sets a new notifier while the
833 	 * other thread sets the notifier to NULL, it is possible for the second thread to call release()
834 	 * before the first thread calls retain(). Without the retain here, this thread interleaving
835 	 * would cause the object to get released and freed before it is retained by the first thread,
836 	 * which is a UaF.
837 	 */
838 	retain();
839 
840 	IOLockLock( gIOObjectPortLock);
841 
842 	previousNotify = userIteratorObject;
843 	userIteratorObject = notify;
844 
845 	IOLockUnlock( gIOObjectPortLock);
846 
847 	if (previousNotify) {
848 		assert(OSDynamicCast(IONotifier, previousNotify));
849 		((IONotifier *)previousNotify)->remove();
850 
851 		if (notify == NULL) {
852 			release();
853 		}
854 	} else if (notify) {
855 		// new IONotifier, retain the object. release() will happen in setNotification(NULL)
856 		retain();
857 	}
858 
859 	release(); // paired with retain() at beginning of this method
860 }
861 
862 void
reset()863 IOUserNotification::reset()
864 {
865 	// ?
866 }
867 
868 bool
isValid()869 IOUserNotification::isValid()
870 {
871 	return true;
872 }
873 
874 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
875 
876 #undef super
877 #define super IOUserNotification
OSDefineMetaClassAndStructors(IOServiceUserNotification,IOUserNotification)878 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
879 
880 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
881 
882 bool
883 IOServiceUserNotification::init( mach_port_t port, natural_t type,
884     void * reference, vm_size_t referenceSize,
885     bool clientIs64 )
886 {
887 	if (!super::init()) {
888 		return false;
889 	}
890 
891 	newSet = OSArray::withCapacity( 1 );
892 	if (!newSet) {
893 		return false;
894 	}
895 
896 	if (referenceSize > sizeof(OSAsyncReference64)) {
897 		return false;
898 	}
899 
900 	msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
901 	msgReference = IOMallocZeroData(msgReferenceSize);
902 	if (!msgReference) {
903 		return false;
904 	}
905 
906 	remotePort = port;
907 	msgType = type;
908 	bcopy( reference, msgReference, referenceSize );
909 
910 	return true;
911 }
912 
913 void
invalidatePort(void)914 IOServiceUserNotification::invalidatePort(void)
915 {
916 	remotePort = MACH_PORT_NULL;
917 }
918 
919 void
free(void)920 IOServiceUserNotification::free( void )
921 {
922 	if (remotePort) {
923 		iokit_release_port_send(remotePort);
924 	}
925 	IOFreeData(msgReference, msgReferenceSize);
926 	OSSafeReleaseNULL(newSet);
927 
928 	super::free();
929 }
930 
931 bool
_handler(void * target,void * ref,IOService * newService,IONotifier * notifier)932 IOServiceUserNotification::_handler( void * target,
933     void * ref, IOService * newService, IONotifier * notifier )
934 {
935 	IOServiceUserNotification * targetObj = (IOServiceUserNotification *)target;
936 	bool ret;
937 
938 	targetObj->retain();
939 	ret = targetObj->handler( ref, newService );
940 	targetObj->release();
941 	return ret;
942 }
943 
944 bool
handler(void * ref,IOService * newService)945 IOServiceUserNotification::handler( void * ref,
946     IOService * newService )
947 {
948 	unsigned int        count;
949 	kern_return_t       kr;
950 	ipc_port_t          port = NULL;
951 	bool                sendPing = false;
952 	mach_msg_size_t     msgSize, payloadSize;
953 
954 	IOTakeLock( lock );
955 
956 	count = newSet->getCount();
957 	if (count < kMaxOutstanding) {
958 		newSet->setObject( newService );
959 		if ((sendPing = (armed && (0 == count)))) {
960 			armed = false;
961 		}
962 	}
963 
964 	IOUnlock( lock );
965 
966 	if (kIOServiceTerminatedNotificationType == msgType) {
967 		IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
968 	}
969 
970 	if (sendPing) {
971 		port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
972 
973 		payloadSize = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
974 		msgSize = (mach_msg_size_t)(sizeof(PingMsgKdata) + payloadSize);
975 
976 		kr = kernel_mach_msg_send_with_builder_internal(0, payloadSize,
977 		    (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
978 		    MACH_MSG_TIMEOUT_NONE, NULL,
979 		    ^(mach_msg_header_t *hdr, __assert_only mach_msg_descriptor_t *descs, void *payload){
980 			PingMsgUdata *udata = (PingMsgUdata *)payload;
981 
982 			hdr->msgh_remote_port    = remotePort;
983 			hdr->msgh_local_port     = port;
984 			hdr->msgh_bits           = MACH_MSGH_BITS(
985 				MACH_MSG_TYPE_COPY_SEND /*remote*/,
986 				MACH_MSG_TYPE_MAKE_SEND /*local*/);
987 			hdr->msgh_size           = msgSize;
988 			hdr->msgh_id             = kOSNotificationMessageID;
989 
990 			assert(descs == NULL);
991 			/* End of kernel processed data */
992 
993 			udata->notifyHeader.size          = 0;
994 			udata->notifyHeader.type          = msgType;
995 
996 			assert((char *)udata->notifyHeader.reference + msgReferenceSize <= (char *)payload + payloadSize);
997 			bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
998 		});
999 
1000 		if (port) {
1001 			iokit_release_port( port );
1002 		}
1003 
1004 		if ((KERN_SUCCESS != kr) && !ipcLogged) {
1005 			ipcLogged = true;
1006 			IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1007 		}
1008 	}
1009 
1010 	return true;
1011 }
1012 OSObject *
getNextObject()1013 IOServiceUserNotification::getNextObject()
1014 {
1015 	assert(false);
1016 	return NULL;
1017 }
1018 
1019 OSObject *
copyNextObject()1020 IOServiceUserNotification::copyNextObject()
1021 {
1022 	unsigned int        count;
1023 	OSObject *          result;
1024 
1025 	IOLockLock(lock);
1026 
1027 	count = newSet->getCount();
1028 	if (count) {
1029 		result = newSet->getObject( count - 1 );
1030 		result->retain();
1031 		newSet->removeObject( count - 1);
1032 	} else {
1033 		result = NULL;
1034 		armed = true;
1035 	}
1036 
1037 	IOLockUnlock(lock);
1038 
1039 	return result;
1040 }
1041 
1042 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1043 
OSDefineMetaClassAndStructors(IOServiceMessageUserNotification,IOUserNotification)1044 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
1045 
1046 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1047 
1048 bool
1049 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
1050     void * reference, vm_size_t referenceSize, bool client64 )
1051 {
1052 	if (!super::init()) {
1053 		return false;
1054 	}
1055 
1056 	if (referenceSize > sizeof(OSAsyncReference64)) {
1057 		return false;
1058 	}
1059 
1060 	clientIs64 = client64;
1061 
1062 	owningPID = proc_selfpid();
1063 
1064 	msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
1065 	msgReference = IOMallocZeroData(msgReferenceSize);
1066 	if (!msgReference) {
1067 		return false;
1068 	}
1069 
1070 	remotePort = port;
1071 	msgType = type;
1072 	bcopy( reference, msgReference, referenceSize );
1073 
1074 	return true;
1075 }
1076 
1077 void
invalidatePort(void)1078 IOServiceMessageUserNotification::invalidatePort(void)
1079 {
1080 	remotePort = MACH_PORT_NULL;
1081 }
1082 
1083 void
free(void)1084 IOServiceMessageUserNotification::free( void )
1085 {
1086 	if (remotePort) {
1087 		iokit_release_port_send(remotePort);
1088 	}
1089 	IOFreeData(msgReference, msgReferenceSize);
1090 
1091 	super::free();
1092 }
1093 
1094 IOReturn
_handler(void * target,void * ref,UInt32 messageType,IOService * provider,void * argument,vm_size_t argSize)1095 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1096     UInt32 messageType, IOService * provider,
1097     void * argument, vm_size_t argSize )
1098 {
1099 	IOServiceMessageUserNotification * targetObj = (IOServiceMessageUserNotification *)target;
1100 	IOReturn ret;
1101 
1102 	targetObj->retain();
1103 	ret = targetObj->handler(
1104 		ref, messageType, provider, argument, argSize);
1105 	targetObj->release();
1106 	return ret;
1107 }
1108 
1109 IOReturn
handler(void * ref,UInt32 messageType,IOService * provider,void * messageArgument,vm_size_t callerArgSize)1110 IOServiceMessageUserNotification::handler( void * ref,
1111     UInt32 messageType, IOService * provider,
1112     void * messageArgument, vm_size_t callerArgSize )
1113 {
1114 	kern_return_t                kr;
1115 	vm_size_t                    argSize;
1116 	mach_msg_size_t              thisMsgSize;
1117 	ipc_port_t                   thisPort, providerPort;
1118 
1119 	if (kIOMessageCopyClientID == messageType) {
1120 		*((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1121 		return kIOReturnSuccess;
1122 	}
1123 
1124 	if (callerArgSize == 0) {
1125 		if (clientIs64) {
1126 			argSize = sizeof(io_user_reference_t);
1127 		} else {
1128 			argSize = sizeof(uint32_t);
1129 		}
1130 	} else {
1131 		if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1132 			callerArgSize = kIOUserNotifyMaxMessageSize;
1133 		}
1134 		argSize = callerArgSize;
1135 	}
1136 
1137 	// adjust message size for ipc restrictions
1138 	natural_t type = msgType;
1139 	type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1140 	type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1141 	argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1142 
1143 	mach_msg_size_t extraSize = kIOUserNotifyMaxMessageSize + sizeof(IOServiceInterestContent64);
1144 	mach_msg_size_t msgSize = (mach_msg_size_t) (sizeof(PingMsgKdata) +
1145 	    sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize);
1146 
1147 	if (os_add3_overflow(msgSize, offsetof(IOServiceInterestContent64, messageArgument), argSize, &thisMsgSize)) {
1148 		return kIOReturnBadArgument;
1149 	}
1150 	mach_msg_size_t payloadSize = thisMsgSize - sizeof(PingMsgKdata);
1151 
1152 	providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1153 	thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1154 
1155 	kr = kernel_mach_msg_send_with_builder_internal(1, payloadSize,
1156 	    (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1157 	    MACH_MSG_TIMEOUT_NONE, NULL,
1158 	    ^(mach_msg_header_t *hdr, mach_msg_descriptor_t *descs, void *payload){
1159 		mach_msg_port_descriptor_t *port_desc = (mach_msg_port_descriptor_t *)descs;
1160 		PingMsgUdata *udata = (PingMsgUdata *)payload;
1161 		IOServiceInterestContent64 * data;
1162 		mach_msg_size_t dataOffset;
1163 
1164 		hdr->msgh_remote_port    = remotePort;
1165 		hdr->msgh_local_port     = thisPort;
1166 		hdr->msgh_bits           = MACH_MSGH_BITS_COMPLEX
1167 		|  MACH_MSGH_BITS(
1168 			MACH_MSG_TYPE_COPY_SEND /*remote*/,
1169 			MACH_MSG_TYPE_MAKE_SEND /*local*/);
1170 		hdr->msgh_size           = thisMsgSize;
1171 		hdr->msgh_id             = kOSNotificationMessageID;
1172 
1173 		/* body.msgh_descriptor_count is set automatically after the closure */
1174 
1175 		port_desc[0].name              = providerPort;
1176 		port_desc[0].disposition       = MACH_MSG_TYPE_MAKE_SEND;
1177 		port_desc[0].type              = MACH_MSG_PORT_DESCRIPTOR;
1178 		/* End of kernel processed data */
1179 
1180 		udata->notifyHeader.size          = extraSize;
1181 		udata->notifyHeader.type          = type;
1182 		bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1183 
1184 		/* data is after msgReference */
1185 		dataOffset = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1186 		data = (IOServiceInterestContent64 *) (((uint8_t *) udata) + dataOffset);
1187 		data->messageType = messageType;
1188 
1189 		if (callerArgSize == 0) {
1190 		        assert((char *)data->messageArgument + argSize <= (char *)payload + payloadSize);
1191 		        data->messageArgument[0] = (io_user_reference_t) messageArgument;
1192 		        if (!clientIs64) {
1193 		                data->messageArgument[0] |= (data->messageArgument[0] << 32);
1194 			}
1195 		} else {
1196 		        assert((char *)data->messageArgument + callerArgSize <= (char *)payload + payloadSize);
1197 		        bcopy(messageArgument, data->messageArgument, callerArgSize);
1198 		}
1199 	});
1200 
1201 	if (thisPort) {
1202 		iokit_release_port( thisPort );
1203 	}
1204 	if (providerPort) {
1205 		iokit_release_port( providerPort );
1206 	}
1207 
1208 	if (kr == MACH_SEND_NO_BUFFER) {
1209 		return kIOReturnNoMemory;
1210 	}
1211 
1212 	if ((KERN_SUCCESS != kr) && !ipcLogged) {
1213 		ipcLogged = true;
1214 		IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1215 	}
1216 
1217 	return kIOReturnSuccess;
1218 }
1219 
1220 OSObject *
getNextObject()1221 IOServiceMessageUserNotification::getNextObject()
1222 {
1223 	return NULL;
1224 }
1225 
1226 OSObject *
copyNextObject()1227 IOServiceMessageUserNotification::copyNextObject()
1228 {
1229 	return NULL;
1230 }
1231 
1232 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1233 
1234 #undef super
1235 #define super IOService
1236 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1237 
1238 IOLock       * gIOUserClientOwnersLock;
1239 
1240 void
initialize(void)1241 IOUserClient::initialize( void )
1242 {
1243 	gIOObjectPortLock       = IOLockAlloc();
1244 	gIOUserClientOwnersLock = IOLockAlloc();
1245 	gIOUserServerLock       = IOLockAlloc();
1246 	assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1247 
1248 #if IOTRACKING
1249 	IOTrackingQueueCollectUser(IOUserIterator::gMetaClass.getTracking());
1250 	IOTrackingQueueCollectUser(IOServiceMessageUserNotification::gMetaClass.getTracking());
1251 	IOTrackingQueueCollectUser(IOServiceUserNotification::gMetaClass.getTracking());
1252 	IOTrackingQueueCollectUser(IOUserClient::gMetaClass.getTracking());
1253 	IOTrackingQueueCollectUser(IOMachPort::gMetaClass.getTracking());
1254 #endif /* IOTRACKING */
1255 }
1256 
1257 void
1258 #if __LP64__
1259 __attribute__((__noreturn__))
1260 #endif
setAsyncReference(OSAsyncReference asyncRef,mach_port_t wakePort,void * callback,void * refcon)1261 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1262     mach_port_t wakePort,
1263     void *callback, void *refcon)
1264 {
1265 #if __LP64__
1266 	panic("setAsyncReference not valid for 64b");
1267 #else
1268 	asyncRef[kIOAsyncReservedIndex]      = ((uintptr_t) wakePort)
1269 	    | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1270 	asyncRef[kIOAsyncCalloutFuncIndex]   = (uintptr_t) callback;
1271 	asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1272 #endif
1273 }
1274 
1275 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon)1276 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1277     mach_port_t wakePort,
1278     mach_vm_address_t callback, io_user_reference_t refcon)
1279 {
1280 	asyncRef[kIOAsyncReservedIndex]      = ((io_user_reference_t) wakePort)
1281 	    | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1282 	asyncRef[kIOAsyncCalloutFuncIndex]   = (io_user_reference_t) callback;
1283 	asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1284 }
1285 
1286 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon,task_t task)1287 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1288     mach_port_t wakePort,
1289     mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1290 {
1291 	setAsyncReference64(asyncRef, wakePort, callback, refcon);
1292 	if (vm_map_is_64bit(get_task_map(task))) {
1293 		asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1294 	}
1295 }
1296 
1297 static OSDictionary *
CopyConsoleUser(UInt32 uid)1298 CopyConsoleUser(UInt32 uid)
1299 {
1300 	OSArray * array;
1301 	OSDictionary * user = NULL;
1302 
1303 	OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1304 	if ((array = OSDynamicCast(OSArray, ioProperty))) {
1305 		for (unsigned int idx = 0;
1306 		    (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1307 		    idx++) {
1308 			OSNumber * num;
1309 
1310 			if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1311 			    && (uid == num->unsigned32BitValue())) {
1312 				user->retain();
1313 				break;
1314 			}
1315 		}
1316 	}
1317 	OSSafeReleaseNULL(ioProperty);
1318 	return user;
1319 }
1320 
1321 static OSDictionary *
CopyUserOnConsole(void)1322 CopyUserOnConsole(void)
1323 {
1324 	OSArray * array;
1325 	OSDictionary * user = NULL;
1326 
1327 	OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1328 	if ((array = OSDynamicCast(OSArray, ioProperty))) {
1329 		for (unsigned int idx = 0;
1330 		    (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1331 		    idx++) {
1332 			if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1333 				user->retain();
1334 				break;
1335 			}
1336 		}
1337 	}
1338 	OSSafeReleaseNULL(ioProperty);
1339 	return user;
1340 }
1341 
1342 IOReturn
clientHasAuthorization(task_t task,IOService * service)1343 IOUserClient::clientHasAuthorization( task_t task,
1344     IOService * service )
1345 {
1346 	proc_t p;
1347 
1348 	p = (proc_t) get_bsdtask_info(task);
1349 	if (p) {
1350 		uint64_t authorizationID;
1351 
1352 		authorizationID = proc_uniqueid(p);
1353 		if (authorizationID) {
1354 			if (service->getAuthorizationID() == authorizationID) {
1355 				return kIOReturnSuccess;
1356 			}
1357 		}
1358 	}
1359 
1360 	return kIOReturnNotPermitted;
1361 }
1362 
1363 IOReturn
clientHasPrivilege(void * securityToken,const char * privilegeName)1364 IOUserClient::clientHasPrivilege( void * securityToken,
1365     const char * privilegeName )
1366 {
1367 	kern_return_t           kr;
1368 	security_token_t        token;
1369 	mach_msg_type_number_t  count;
1370 	task_t                  task;
1371 	OSDictionary *          user;
1372 	bool                    secureConsole;
1373 
1374 
1375 	if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1376 	    sizeof(kIOClientPrivilegeForeground))) {
1377 		if (task_is_gpu_denied(current_task())) {
1378 			return kIOReturnNotPrivileged;
1379 		} else {
1380 			return kIOReturnSuccess;
1381 		}
1382 	}
1383 
1384 	if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1385 	    sizeof(kIOClientPrivilegeConsoleSession))) {
1386 		kauth_cred_t cred;
1387 		proc_t       p;
1388 
1389 		task = (task_t) securityToken;
1390 		if (!task) {
1391 			task = current_task();
1392 		}
1393 		p = (proc_t) get_bsdtask_info(task);
1394 		kr = kIOReturnNotPrivileged;
1395 
1396 		if (p && (cred = kauth_cred_proc_ref(p))) {
1397 			user = CopyUserOnConsole();
1398 			if (user) {
1399 				OSNumber * num;
1400 				if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1401 				    && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1402 					kr = kIOReturnSuccess;
1403 				}
1404 				user->release();
1405 			}
1406 			kauth_cred_unref(&cred);
1407 		}
1408 		return kr;
1409 	}
1410 
1411 	if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1412 	    sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1413 		task = (task_t)((IOUCProcessToken *)securityToken)->token;
1414 	} else {
1415 		task = (task_t)securityToken;
1416 	}
1417 
1418 	count = TASK_SECURITY_TOKEN_COUNT;
1419 	kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1420 
1421 	if (KERN_SUCCESS != kr) {
1422 	} else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1423 	    sizeof(kIOClientPrivilegeAdministrator))) {
1424 		if (0 != token.val[0]) {
1425 			kr = kIOReturnNotPrivileged;
1426 		}
1427 	} else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1428 	    sizeof(kIOClientPrivilegeLocalUser))) {
1429 		user = CopyConsoleUser(token.val[0]);
1430 		if (user) {
1431 			user->release();
1432 		} else {
1433 			kr = kIOReturnNotPrivileged;
1434 		}
1435 	} else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1436 	    sizeof(kIOClientPrivilegeConsoleUser))) {
1437 		user = CopyConsoleUser(token.val[0]);
1438 		if (user) {
1439 			if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1440 				kr = kIOReturnNotPrivileged;
1441 			} else if (secureConsole) {
1442 				OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1443 				if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1444 					kr = kIOReturnNotPrivileged;
1445 				}
1446 			}
1447 			user->release();
1448 		} else {
1449 			kr = kIOReturnNotPrivileged;
1450 		}
1451 	} else {
1452 		kr = kIOReturnUnsupported;
1453 	}
1454 
1455 	return kr;
1456 }
1457 
1458 OSDictionary *
copyClientEntitlements(task_t task)1459 IOUserClient::copyClientEntitlements(task_t task)
1460 {
1461 	proc_t p = NULL;
1462 	pid_t pid = 0;
1463 	OSDictionary *entitlements = NULL;
1464 
1465 	p = (proc_t)get_bsdtask_info(task);
1466 	if (p == NULL) {
1467 		return NULL;
1468 	}
1469 	pid = proc_pid(p);
1470 
1471 	if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1472 		if (entitlements) {
1473 			return entitlements;
1474 		}
1475 	}
1476 
1477 	// If the above fails, thats it
1478 	return NULL;
1479 }
1480 
1481 OSDictionary *
copyClientEntitlementsVnode(vnode_t vnode,off_t offset)1482 IOUserClient::copyClientEntitlementsVnode(vnode_t vnode, off_t offset)
1483 {
1484 	OSDictionary *entitlements = NULL;
1485 
1486 	if (cs_entitlements_dictionary_copy_vnode(vnode, offset, (void**)&entitlements) != 0) {
1487 		return NULL;
1488 	}
1489 	return entitlements;
1490 }
1491 
1492 OSObject *
copyClientEntitlement(task_t task,const char * entitlement)1493 IOUserClient::copyClientEntitlement( task_t task,
1494     const char * entitlement )
1495 {
1496 	OSDictionary *entitlements;
1497 	OSObject *value;
1498 
1499 	#if PMAP_CS_ENABLE && !CONFIG_X86_64_COMPAT
1500 	if (pmap_cs_enabled() && amfi->query_context_to_object) {
1501 		struct CEQueryContext queryCtx = {};
1502 		size_t entlen = strlen(entitlement);
1503 		CEQuery_t query = {
1504 			/*
1505 			 *       We only select the dict value, if it exists this we will get
1506 			 *       a value CEQueryContext back to points to pmap backed memory
1507 			 */
1508 			CESelectDictValueDynamic((const uint8_t*)entitlement, entlen)
1509 		};
1510 		if (task == current_task()) {
1511 			// NULL task means current task, which translated to the current pmap
1512 			if (!pmap_query_entitlements(NULL, query, 1, &queryCtx)) {
1513 				return NULL;
1514 			}
1515 		} else {
1516 			vm_map_t task_map = get_task_map_reference(task);
1517 			if (task_map) {
1518 				pmap_t pmap = vm_map_get_pmap(task_map);
1519 				if (!pmap || !pmap_query_entitlements(pmap, query, 1, &queryCtx)) {
1520 					vm_map_deallocate(task_map);
1521 					return NULL;
1522 				}
1523 				vm_map_deallocate(task_map);
1524 			}
1525 		}
1526 		value = (OSObject*)amfi->query_context_to_object(&queryCtx);
1527 		return value;
1528 	}
1529 	#endif
1530 
1531 	entitlements = copyClientEntitlements(task);
1532 	if (entitlements == NULL) {
1533 		return NULL;
1534 	}
1535 
1536 	/* Fetch the entitlement value from the dictionary. */
1537 	value = entitlements->getObject(entitlement);
1538 	if (value != NULL) {
1539 		value->retain();
1540 	}
1541 
1542 	entitlements->release();
1543 	return value;
1544 }
1545 
1546 OSObject *
copyClientEntitlementVnode(struct vnode * vnode,off_t offset,const char * entitlement)1547 IOUserClient::copyClientEntitlementVnode(
1548 	struct vnode *vnode,
1549 	off_t offset,
1550 	const char *entitlement)
1551 {
1552 	OSDictionary *entitlements;
1553 	OSObject *value;
1554 
1555 	entitlements = copyClientEntitlementsVnode(vnode, offset);
1556 	if (entitlements == NULL) {
1557 		return NULL;
1558 	}
1559 
1560 	/* Fetch the entitlement value from the dictionary. */
1561 	value = entitlements->getObject(entitlement);
1562 	if (value != NULL) {
1563 		value->retain();
1564 	}
1565 
1566 	entitlements->release();
1567 	return value;
1568 }
1569 
1570 bool
init()1571 IOUserClient::init()
1572 {
1573 	if (getPropertyTable() || super::init()) {
1574 		return reserve();
1575 	}
1576 
1577 	return false;
1578 }
1579 
1580 bool
init(OSDictionary * dictionary)1581 IOUserClient::init(OSDictionary * dictionary)
1582 {
1583 	if (getPropertyTable() || super::init(dictionary)) {
1584 		return reserve();
1585 	}
1586 
1587 	return false;
1588 }
1589 
1590 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type)1591 IOUserClient::initWithTask(task_t owningTask,
1592     void * securityID,
1593     UInt32 type )
1594 {
1595 	if (getPropertyTable() || super::init()) {
1596 		return reserve();
1597 	}
1598 
1599 	return false;
1600 }
1601 
1602 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type,OSDictionary * properties)1603 IOUserClient::initWithTask(task_t owningTask,
1604     void * securityID,
1605     UInt32 type,
1606     OSDictionary * properties )
1607 {
1608 	bool ok;
1609 
1610 	ok = super::init( properties );
1611 	ok &= initWithTask( owningTask, securityID, type );
1612 
1613 	return ok;
1614 }
1615 
1616 bool
reserve()1617 IOUserClient::reserve()
1618 {
1619 	if (!reserved) {
1620 		reserved = IOMallocType(ExpansionData);
1621 	}
1622 	setTerminateDefer(NULL, true);
1623 	IOStatisticsRegisterCounter();
1624 
1625 	return true;
1626 }
1627 
1628 struct IOUserClientOwner {
1629 	task_t         task;
1630 	queue_chain_t  taskLink;
1631 	IOUserClient * uc;
1632 	queue_chain_t  ucLink;
1633 };
1634 
1635 IOReturn
registerOwner(task_t task)1636 IOUserClient::registerOwner(task_t task)
1637 {
1638 	IOUserClientOwner * owner;
1639 	IOReturn            ret;
1640 	bool                newOwner;
1641 
1642 	IOLockLock(gIOUserClientOwnersLock);
1643 
1644 	newOwner = true;
1645 	ret = kIOReturnSuccess;
1646 
1647 	if (!owners.next) {
1648 		queue_init(&owners);
1649 	} else {
1650 		queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1651 		{
1652 			if (task != owner->task) {
1653 				continue;
1654 			}
1655 			newOwner = false;
1656 			break;
1657 		}
1658 	}
1659 	if (newOwner) {
1660 		owner = IOMallocType(IOUserClientOwner);
1661 
1662 		owner->task = task;
1663 		owner->uc   = this;
1664 		queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1665 		queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1666 		if (messageAppSuspended) {
1667 			task_set_message_app_suspended(task, true);
1668 		}
1669 	}
1670 
1671 	IOLockUnlock(gIOUserClientOwnersLock);
1672 
1673 	return ret;
1674 }
1675 
1676 void
noMoreSenders(void)1677 IOUserClient::noMoreSenders(void)
1678 {
1679 	IOUserClientOwner * owner;
1680 	IOUserClientOwner * iter;
1681 	queue_head_t      * taskque;
1682 	bool                hasMessageAppSuspended;
1683 
1684 	IOLockLock(gIOUserClientOwnersLock);
1685 
1686 	if (owners.next) {
1687 		while (!queue_empty(&owners)) {
1688 			owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1689 			taskque = task_io_user_clients(owner->task);
1690 			queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1691 			hasMessageAppSuspended = false;
1692 			queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1693 				hasMessageAppSuspended = iter->uc->messageAppSuspended;
1694 				if (hasMessageAppSuspended) {
1695 					break;
1696 				}
1697 			}
1698 			task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1699 			queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1700 			IOFreeType(owner, IOUserClientOwner);
1701 		}
1702 		owners.next = owners.prev = NULL;
1703 	}
1704 
1705 	IOLockUnlock(gIOUserClientOwnersLock);
1706 }
1707 
1708 
1709 extern "C" void
iokit_task_app_suspended_changed(task_t task)1710 iokit_task_app_suspended_changed(task_t task)
1711 {
1712 	queue_head_t      * taskque;
1713 	IOUserClientOwner * owner;
1714 	OSSet             * set;
1715 
1716 	IOLockLock(gIOUserClientOwnersLock);
1717 
1718 	taskque = task_io_user_clients(task);
1719 	set = NULL;
1720 	queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1721 		if (!owner->uc->messageAppSuspended) {
1722 			continue;
1723 		}
1724 		if (!set) {
1725 			set = OSSet::withCapacity(4);
1726 			if (!set) {
1727 				break;
1728 			}
1729 		}
1730 		set->setObject(owner->uc);
1731 	}
1732 
1733 	IOLockUnlock(gIOUserClientOwnersLock);
1734 
1735 	if (set) {
1736 		set->iterateObjects(^bool (OSObject * obj) {
1737 			IOUserClient      * uc;
1738 
1739 			uc = (typeof(uc))obj;
1740 #if 0
1741 			{
1742 			        OSString          * str;
1743 			        str = IOCopyLogNameForPID(task_pid(task));
1744 			        IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1745 			        uc->getName(), task_is_app_suspended(task));
1746 			        OSSafeReleaseNULL(str);
1747 			}
1748 #endif
1749 			uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1750 
1751 			return false;
1752 		});
1753 		set->release();
1754 	}
1755 }
1756 
1757 extern "C" kern_return_t
iokit_task_terminate(task_t task)1758 iokit_task_terminate(task_t task)
1759 {
1760 	IOUserClientOwner * owner;
1761 	IOUserClient      * dead;
1762 	IOUserClient      * uc;
1763 	queue_head_t      * taskque;
1764 
1765 	IOLockLock(gIOUserClientOwnersLock);
1766 
1767 	taskque = task_io_user_clients(task);
1768 	dead = NULL;
1769 	while (!queue_empty(taskque)) {
1770 		owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1771 		uc = owner->uc;
1772 		queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1773 		queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1774 		if (queue_empty(&uc->owners)) {
1775 			uc->retain();
1776 			IOLog("destroying out of band connect for %s\n", uc->getName());
1777 			// now using the uc queue head as a singly linked queue,
1778 			// leaving .next as NULL to mark it empty
1779 			uc->owners.next = NULL;
1780 			uc->owners.prev = (queue_entry_t) dead;
1781 			dead = uc;
1782 		}
1783 		IOFreeType(owner, IOUserClientOwner);
1784 	}
1785 
1786 	IOLockUnlock(gIOUserClientOwnersLock);
1787 
1788 	while (dead) {
1789 		uc = dead;
1790 		dead = (IOUserClient *)(void *) dead->owners.prev;
1791 		uc->owners.prev = NULL;
1792 		if (uc->sharedInstance || !uc->closed) {
1793 			uc->clientDied();
1794 		}
1795 		uc->release();
1796 	}
1797 
1798 	return KERN_SUCCESS;
1799 }
1800 
1801 struct IOUCFilterPolicy {
1802 	task_t             task;
1803 	io_filter_policy_t filterPolicy;
1804 	IOUCFilterPolicy * next;
1805 };
1806 
1807 io_filter_policy_t
filterForTask(task_t task,io_filter_policy_t addFilterPolicy)1808 IOUserClient::filterForTask(task_t task, io_filter_policy_t addFilterPolicy)
1809 {
1810 	IOUCFilterPolicy * elem;
1811 	io_filter_policy_t filterPolicy;
1812 
1813 	filterPolicy = 0;
1814 	IOLockLock(filterLock);
1815 
1816 	for (elem = reserved->filterPolicies; elem && (elem->task != task); elem = elem->next) {
1817 	}
1818 
1819 	if (elem) {
1820 		if (addFilterPolicy) {
1821 			assert(addFilterPolicy == elem->filterPolicy);
1822 		}
1823 		filterPolicy = elem->filterPolicy;
1824 	} else if (addFilterPolicy) {
1825 		elem = IOMallocType(IOUCFilterPolicy);
1826 		elem->task               = task;
1827 		elem->filterPolicy       = addFilterPolicy;
1828 		elem->next               = reserved->filterPolicies;
1829 		reserved->filterPolicies = elem;
1830 		filterPolicy = addFilterPolicy;
1831 	}
1832 
1833 	IOLockUnlock(filterLock);
1834 	return filterPolicy;
1835 }
1836 
1837 void
free()1838 IOUserClient::free()
1839 {
1840 	if (mappings) {
1841 		mappings->release();
1842 	}
1843 	if (lock) {
1844 		IORWLockFree(lock);
1845 	}
1846 	if (filterLock) {
1847 		IOLockFree(filterLock);
1848 	}
1849 
1850 	IOStatisticsUnregisterCounter();
1851 
1852 	assert(!owners.next);
1853 	assert(!owners.prev);
1854 
1855 	if (reserved) {
1856 		IOUCFilterPolicy * elem;
1857 		IOUCFilterPolicy * nextElem;
1858 		for (elem = reserved->filterPolicies; elem; elem = nextElem) {
1859 			nextElem = elem->next;
1860 			if (elem->filterPolicy && gIOUCFilterCallbacks->io_filter_release) {
1861 				gIOUCFilterCallbacks->io_filter_release(elem->filterPolicy);
1862 			}
1863 			IOFreeType(elem, IOUCFilterPolicy);
1864 		}
1865 		IOFreeType(reserved, ExpansionData);
1866 	}
1867 
1868 	super::free();
1869 }
1870 
1871 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1872 
OSDefineMetaClassAndAbstractStructors(IOUserClient2022,IOUserClient)1873 OSDefineMetaClassAndAbstractStructors( IOUserClient2022, IOUserClient )
1874 
1875 
1876 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1877 
1878 IOReturn
1879 IOUserClient::clientDied( void )
1880 {
1881 	IOReturn ret = kIOReturnNotReady;
1882 
1883 	if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1884 		ret = clientClose();
1885 	}
1886 
1887 	return ret;
1888 }
1889 
1890 IOReturn
clientClose(void)1891 IOUserClient::clientClose( void )
1892 {
1893 	return kIOReturnUnsupported;
1894 }
1895 
1896 IOService *
getService(void)1897 IOUserClient::getService( void )
1898 {
1899 	return NULL;
1900 }
1901 
1902 IOReturn
registerNotificationPort(mach_port_t,UInt32,UInt32)1903 IOUserClient::registerNotificationPort(
1904 	mach_port_t     /* port */,
1905 	UInt32          /* type */,
1906 	UInt32          /* refCon */)
1907 {
1908 	return kIOReturnUnsupported;
1909 }
1910 
1911 IOReturn
registerNotificationPort(mach_port_t port,UInt32 type,io_user_reference_t refCon)1912 IOUserClient::registerNotificationPort(
1913 	mach_port_t port,
1914 	UInt32          type,
1915 	io_user_reference_t refCon)
1916 {
1917 	return registerNotificationPort(port, type, (UInt32) refCon);
1918 }
1919 
1920 IOReturn
getNotificationSemaphore(UInt32 notification_type,semaphore_t * semaphore)1921 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1922     semaphore_t * semaphore )
1923 {
1924 	return kIOReturnUnsupported;
1925 }
1926 
1927 IOReturn
connectClient(IOUserClient *)1928 IOUserClient::connectClient( IOUserClient * /* client */ )
1929 {
1930 	return kIOReturnUnsupported;
1931 }
1932 
1933 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,IOMemoryDescriptor ** memory)1934 IOUserClient::clientMemoryForType( UInt32 type,
1935     IOOptionBits * options,
1936     IOMemoryDescriptor ** memory )
1937 {
1938 	return kIOReturnUnsupported;
1939 }
1940 
1941 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,OSSharedPtr<IOMemoryDescriptor> & memory)1942 IOUserClient::clientMemoryForType( UInt32 type,
1943     IOOptionBits * options,
1944     OSSharedPtr<IOMemoryDescriptor>& memory )
1945 {
1946 	IOMemoryDescriptor* memoryRaw = nullptr;
1947 	IOReturn result = clientMemoryForType(type, options, &memoryRaw);
1948 	memory.reset(memoryRaw, OSNoRetain);
1949 	return result;
1950 }
1951 
1952 #if !__LP64__
1953 IOMemoryMap *
mapClientMemory(IOOptionBits type,task_t task,IOOptionBits mapFlags,IOVirtualAddress atAddress)1954 IOUserClient::mapClientMemory(
1955 	IOOptionBits            type,
1956 	task_t                  task,
1957 	IOOptionBits            mapFlags,
1958 	IOVirtualAddress        atAddress )
1959 {
1960 	return NULL;
1961 }
1962 #endif
1963 
1964 IOMemoryMap *
mapClientMemory64(IOOptionBits type,task_t task,IOOptionBits mapFlags,mach_vm_address_t atAddress)1965 IOUserClient::mapClientMemory64(
1966 	IOOptionBits            type,
1967 	task_t                  task,
1968 	IOOptionBits            mapFlags,
1969 	mach_vm_address_t       atAddress )
1970 {
1971 	IOReturn            err;
1972 	IOOptionBits        options = 0;
1973 	IOMemoryDescriptor * memory = NULL;
1974 	IOMemoryMap *       map = NULL;
1975 
1976 	err = clientMemoryForType((UInt32) type, &options, &memory );
1977 
1978 	if (memory && (kIOReturnSuccess == err)) {
1979 		FAKE_STACK_FRAME(getMetaClass());
1980 
1981 		options = (options & ~kIOMapUserOptionsMask)
1982 		    | (mapFlags & kIOMapUserOptionsMask);
1983 		map = memory->createMappingInTask( task, atAddress, options );
1984 		memory->release();
1985 
1986 		FAKE_STACK_FRAME_END();
1987 	}
1988 
1989 	return map;
1990 }
1991 
1992 IOReturn
exportObjectToClient(task_t task,OSObject * obj,io_object_t * clientObj)1993 IOUserClient::exportObjectToClient(task_t task,
1994     OSObject *obj, io_object_t *clientObj)
1995 {
1996 	mach_port_name_t    name;
1997 
1998 	name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1999 
2000 	*clientObj = (io_object_t)(uintptr_t) name;
2001 
2002 	if (obj) {
2003 		obj->release();
2004 	}
2005 
2006 	return kIOReturnSuccess;
2007 }
2008 
2009 IOReturn
copyPortNameForObjectInTask(task_t task,OSObject * obj,mach_port_name_t * port_name)2010 IOUserClient::copyPortNameForObjectInTask(task_t task,
2011     OSObject *obj, mach_port_name_t * port_name)
2012 {
2013 	mach_port_name_t    name;
2014 
2015 	name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
2016 
2017 	*(mach_port_name_t *) port_name = name;
2018 
2019 	return kIOReturnSuccess;
2020 }
2021 
2022 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSObject ** obj)2023 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2024     OSObject **obj)
2025 {
2026 	OSObject * object;
2027 
2028 	object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
2029 
2030 	*obj = object;
2031 
2032 	return object ? kIOReturnSuccess : kIOReturnIPCError;
2033 }
2034 
2035 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSSharedPtr<OSObject> & obj)2036 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2037     OSSharedPtr<OSObject>& obj)
2038 {
2039 	OSObject* objRaw = NULL;
2040 	IOReturn result = copyObjectForPortNameInTask(task, port_name, &objRaw);
2041 	obj.reset(objRaw, OSNoRetain);
2042 	return result;
2043 }
2044 
2045 IOReturn
adjustPortNameReferencesInTask(task_t task,mach_port_name_t port_name,mach_port_delta_t delta)2046 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
2047 {
2048 	return iokit_mod_send_right(task, port_name, delta);
2049 }
2050 
2051 IOExternalMethod *
getExternalMethodForIndex(UInt32)2052 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
2053 {
2054 	return NULL;
2055 }
2056 
2057 IOExternalAsyncMethod *
getExternalAsyncMethodForIndex(UInt32)2058 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
2059 {
2060 	return NULL;
2061 }
2062 
2063 IOExternalTrap *
2064 IOUserClient::
getExternalTrapForIndex(UInt32 index)2065 getExternalTrapForIndex(UInt32 index)
2066 {
2067 	return NULL;
2068 }
2069 
2070 #pragma clang diagnostic push
2071 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2072 
2073 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
2074 // functions can break clients of kexts implementing getExternalMethodForIndex()
2075 IOExternalMethod *
2076 IOUserClient::
getTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2077 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
2078 {
2079 	IOExternalMethod *method = getExternalMethodForIndex(index);
2080 
2081 	if (method) {
2082 		*targetP = (IOService *) method->object;
2083 	}
2084 
2085 	return method;
2086 }
2087 
2088 IOExternalMethod *
2089 IOUserClient::
getTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2090 getTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2091 {
2092 	IOService* targetPRaw = NULL;
2093 	IOExternalMethod* result = getTargetAndMethodForIndex(&targetPRaw, index);
2094 	targetP.reset(targetPRaw, OSRetain);
2095 	return result;
2096 }
2097 
2098 IOExternalAsyncMethod *
2099 IOUserClient::
getAsyncTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2100 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
2101 {
2102 	IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
2103 
2104 	if (method) {
2105 		*targetP = (IOService *) method->object;
2106 	}
2107 
2108 	return method;
2109 }
2110 
2111 IOExternalAsyncMethod *
2112 IOUserClient::
getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2113 getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2114 {
2115 	IOService* targetPRaw = NULL;
2116 	IOExternalAsyncMethod* result = getAsyncTargetAndMethodForIndex(&targetPRaw, index);
2117 	targetP.reset(targetPRaw, OSRetain);
2118 	return result;
2119 }
2120 
2121 IOExternalTrap *
2122 IOUserClient::
getTargetAndTrapForIndex(IOService ** targetP,UInt32 index)2123 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
2124 {
2125 	IOExternalTrap *trap = getExternalTrapForIndex(index);
2126 
2127 	if (trap) {
2128 		*targetP = trap->object;
2129 	}
2130 
2131 	return trap;
2132 }
2133 #pragma clang diagnostic pop
2134 
2135 IOReturn
releaseAsyncReference64(OSAsyncReference64 reference)2136 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
2137 {
2138 	mach_port_t port;
2139 	port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2140 
2141 	if (MACH_PORT_NULL != port) {
2142 		iokit_release_port_send(port);
2143 	}
2144 
2145 	return kIOReturnSuccess;
2146 }
2147 
2148 IOReturn
releaseNotificationPort(mach_port_t port)2149 IOUserClient::releaseNotificationPort(mach_port_t port)
2150 {
2151 	if (MACH_PORT_NULL != port) {
2152 		iokit_release_port_send(port);
2153 	}
2154 
2155 	return kIOReturnSuccess;
2156 }
2157 
2158 IOReturn
sendAsyncResult(OSAsyncReference reference,IOReturn result,void * args[],UInt32 numArgs)2159 IOUserClient::sendAsyncResult(OSAsyncReference reference,
2160     IOReturn result, void *args[], UInt32 numArgs)
2161 {
2162 	OSAsyncReference64  reference64;
2163 	OSBoundedArray<io_user_reference_t, kMaxAsyncArgs> args64;
2164 	unsigned int        idx;
2165 
2166 	if (numArgs > kMaxAsyncArgs) {
2167 		return kIOReturnMessageTooLarge;
2168 	}
2169 
2170 	for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2171 		reference64[idx] = REF64(reference[idx]);
2172 	}
2173 
2174 	for (idx = 0; idx < numArgs; idx++) {
2175 		args64[idx] = REF64(args[idx]);
2176 	}
2177 
2178 	return sendAsyncResult64(reference64, result, args64.data(), numArgs);
2179 }
2180 
2181 IOReturn
sendAsyncResult64WithOptions(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2182 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2183     IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2184 {
2185 	return _sendAsyncResult64(reference, result, args, numArgs, options);
2186 }
2187 
2188 IOReturn
sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs)2189 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2190     IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2191 {
2192 	return _sendAsyncResult64(reference, result, args, numArgs, 0);
2193 }
2194 
2195 IOReturn
_sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2196 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2197     IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2198 {
2199 	struct ReplyMsg {
2200 		mach_msg_header_t msgHdr;
2201 		union{
2202 			struct{
2203 				OSNotificationHeader     notifyHdr;
2204 				IOAsyncCompletionContent asyncContent;
2205 				uint32_t                 args[kMaxAsyncArgs];
2206 			} msg32;
2207 			struct{
2208 				OSNotificationHeader64   notifyHdr;
2209 				IOAsyncCompletionContent asyncContent;
2210 				io_user_reference_t      args[kMaxAsyncArgs] __attribute__ ((packed));
2211 			} msg64;
2212 		} m;
2213 	};
2214 	ReplyMsg      replyMsg;
2215 	mach_port_t   replyPort;
2216 	kern_return_t kr;
2217 
2218 	// If no reply port, do nothing.
2219 	replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2220 	if (replyPort == MACH_PORT_NULL) {
2221 		return kIOReturnSuccess;
2222 	}
2223 
2224 	if (numArgs > kMaxAsyncArgs) {
2225 		return kIOReturnMessageTooLarge;
2226 	}
2227 
2228 	bzero(&replyMsg, sizeof(replyMsg));
2229 	replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2230 	    0 /*local*/);
2231 	replyMsg.msgHdr.msgh_remote_port = replyPort;
2232 	replyMsg.msgHdr.msgh_local_port  = NULL;
2233 	replyMsg.msgHdr.msgh_id          = kOSNotificationMessageID;
2234 	if (kIOUCAsync64Flag & reference[0]) {
2235 		replyMsg.msgHdr.msgh_size =
2236 		    sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2237 		    - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2238 		replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2239 		    + numArgs * sizeof(io_user_reference_t);
2240 		replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2241 		/* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2242 		bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2243 
2244 		replyMsg.m.msg64.asyncContent.result = result;
2245 		if (numArgs) {
2246 			bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2247 		}
2248 	} else {
2249 		unsigned int idx;
2250 
2251 		replyMsg.msgHdr.msgh_size =
2252 		    sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2253 		    - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2254 
2255 		replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2256 		    + numArgs * sizeof(uint32_t);
2257 		replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2258 
2259 		/* Skip reference[0] which is left as 0 from the earlier bzero */
2260 		for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2261 			replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2262 		}
2263 
2264 		replyMsg.m.msg32.asyncContent.result = result;
2265 
2266 		for (idx = 0; idx < numArgs; idx++) {
2267 			replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2268 		}
2269 	}
2270 
2271 	if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2272 		kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2273 		    replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2274 	} else {
2275 		/* Fail on full queue. */
2276 		kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
2277 		    replyMsg.msgHdr.msgh_size);
2278 	}
2279 	if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2280 		reference[0] |= kIOUCAsyncErrorLoggedFlag;
2281 		IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
2282 	}
2283 	return kr;
2284 }
2285 
2286 
2287 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2288 
2289 extern "C" {
2290 #define CHECK(cls, obj, out)                      \
2291 	cls * out;                              \
2292 	if( !(out = OSDynamicCast( cls, obj)))  \
2293 	    return( kIOReturnBadArgument )
2294 
2295 #define CHECKLOCKED(cls, obj, out)                                        \
2296 	IOUserIterator * oIter;                                         \
2297 	cls * out;                                                      \
2298 	if( !(oIter = OSDynamicCast(IOUserIterator, obj)))              \
2299 	    return (kIOReturnBadArgument);                              \
2300 	if( !(out = OSDynamicCast(cls, oIter->userIteratorObject)))     \
2301 	    return (kIOReturnBadArgument)
2302 
2303 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2304 
2305 // Create a vm_map_copy_t or kalloc'ed data for memory
2306 // to be copied out. ipc will free after the copyout.
2307 
2308 static kern_return_t
copyoutkdata(const void * data,vm_size_t len,io_buf_ptr_t * buf)2309 copyoutkdata( const void * data, vm_size_t len,
2310     io_buf_ptr_t * buf )
2311 {
2312 	kern_return_t       err;
2313 	vm_map_copy_t       copy;
2314 
2315 	err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2316 	    false /* src_destroy */, &copy);
2317 
2318 	assert( err == KERN_SUCCESS );
2319 	if (err == KERN_SUCCESS) {
2320 		*buf = (char *) copy;
2321 	}
2322 
2323 	return err;
2324 }
2325 
2326 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2327 
2328 /* Routine io_server_version */
2329 kern_return_t
is_io_server_version(mach_port_t main_port,uint64_t * version)2330 is_io_server_version(
2331 	mach_port_t main_port,
2332 	uint64_t *version)
2333 {
2334 	*version = IOKIT_SERVER_VERSION;
2335 	return kIOReturnSuccess;
2336 }
2337 
2338 /* Routine io_object_get_class */
2339 kern_return_t
is_io_object_get_class(io_object_t object,io_name_t className)2340 is_io_object_get_class(
2341 	io_object_t object,
2342 	io_name_t className )
2343 {
2344 	const OSMetaClass* my_obj = NULL;
2345 
2346 	if (!object) {
2347 		return kIOReturnBadArgument;
2348 	}
2349 
2350 	my_obj = object->getMetaClass();
2351 	if (!my_obj) {
2352 		return kIOReturnNotFound;
2353 	}
2354 
2355 	strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2356 
2357 	return kIOReturnSuccess;
2358 }
2359 
2360 /* Routine io_object_get_superclass */
2361 kern_return_t
is_io_object_get_superclass(mach_port_t main_port,io_name_t obj_name,io_name_t class_name)2362 is_io_object_get_superclass(
2363 	mach_port_t main_port,
2364 	io_name_t obj_name,
2365 	io_name_t class_name)
2366 {
2367 	IOReturn            ret;
2368 	const OSMetaClass * meta;
2369 	const OSMetaClass * super;
2370 	const OSSymbol    * name;
2371 	const char        * cstr;
2372 
2373 	if (!obj_name || !class_name) {
2374 		return kIOReturnBadArgument;
2375 	}
2376 	if (main_port != main_device_port) {
2377 		return kIOReturnNotPrivileged;
2378 	}
2379 
2380 	ret = kIOReturnNotFound;
2381 	meta = NULL;
2382 	do{
2383 		name = OSSymbol::withCString(obj_name);
2384 		if (!name) {
2385 			break;
2386 		}
2387 		meta = OSMetaClass::copyMetaClassWithName(name);
2388 		if (!meta) {
2389 			break;
2390 		}
2391 		super = meta->getSuperClass();
2392 		if (!super) {
2393 			break;
2394 		}
2395 		cstr = super->getClassName();
2396 		if (!cstr) {
2397 			break;
2398 		}
2399 		strlcpy(class_name, cstr, sizeof(io_name_t));
2400 		ret = kIOReturnSuccess;
2401 	}while (false);
2402 
2403 	OSSafeReleaseNULL(name);
2404 	if (meta) {
2405 		meta->releaseMetaClass();
2406 	}
2407 
2408 	return ret;
2409 }
2410 
2411 /* Routine io_object_get_bundle_identifier */
2412 kern_return_t
is_io_object_get_bundle_identifier(mach_port_t main_port,io_name_t obj_name,io_name_t bundle_name)2413 is_io_object_get_bundle_identifier(
2414 	mach_port_t main_port,
2415 	io_name_t obj_name,
2416 	io_name_t bundle_name)
2417 {
2418 	IOReturn            ret;
2419 	const OSMetaClass * meta;
2420 	const OSSymbol    * name;
2421 	const OSSymbol    * identifier;
2422 	const char        * cstr;
2423 
2424 	if (!obj_name || !bundle_name) {
2425 		return kIOReturnBadArgument;
2426 	}
2427 	if (main_port != main_device_port) {
2428 		return kIOReturnNotPrivileged;
2429 	}
2430 
2431 	ret = kIOReturnNotFound;
2432 	meta = NULL;
2433 	do{
2434 		name = OSSymbol::withCString(obj_name);
2435 		if (!name) {
2436 			break;
2437 		}
2438 		meta = OSMetaClass::copyMetaClassWithName(name);
2439 		if (!meta) {
2440 			break;
2441 		}
2442 		identifier = meta->getKmodName();
2443 		if (!identifier) {
2444 			break;
2445 		}
2446 		cstr = identifier->getCStringNoCopy();
2447 		if (!cstr) {
2448 			break;
2449 		}
2450 		strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2451 		ret = kIOReturnSuccess;
2452 	}while (false);
2453 
2454 	OSSafeReleaseNULL(name);
2455 	if (meta) {
2456 		meta->releaseMetaClass();
2457 	}
2458 
2459 	return ret;
2460 }
2461 
2462 /* Routine io_object_conforms_to */
2463 kern_return_t
is_io_object_conforms_to(io_object_t object,io_name_t className,boolean_t * conforms)2464 is_io_object_conforms_to(
2465 	io_object_t object,
2466 	io_name_t className,
2467 	boolean_t *conforms )
2468 {
2469 	if (!object) {
2470 		return kIOReturnBadArgument;
2471 	}
2472 
2473 	*conforms = (NULL != object->metaCast( className ));
2474 
2475 	return kIOReturnSuccess;
2476 }
2477 
2478 /* Routine io_object_get_retain_count */
2479 kern_return_t
is_io_object_get_retain_count(io_object_t object,uint32_t * retainCount)2480 is_io_object_get_retain_count(
2481 	io_object_t object,
2482 	uint32_t *retainCount )
2483 {
2484 	if (!object) {
2485 		return kIOReturnBadArgument;
2486 	}
2487 
2488 	*retainCount = object->getRetainCount();
2489 	return kIOReturnSuccess;
2490 }
2491 
2492 /* Routine io_iterator_next */
2493 kern_return_t
is_io_iterator_next(io_object_t iterator,io_object_t * object)2494 is_io_iterator_next(
2495 	io_object_t iterator,
2496 	io_object_t *object )
2497 {
2498 	IOReturn    ret;
2499 	OSObject *  obj;
2500 	OSIterator * iter;
2501 	IOUserIterator * uiter;
2502 
2503 	if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2504 		obj = uiter->copyNextObject();
2505 	} else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2506 		obj = iter->getNextObject();
2507 		if (obj) {
2508 			obj->retain();
2509 		}
2510 	} else {
2511 		return kIOReturnBadArgument;
2512 	}
2513 
2514 	if (obj) {
2515 		*object = obj;
2516 		ret = kIOReturnSuccess;
2517 	} else {
2518 		ret = kIOReturnNoDevice;
2519 	}
2520 
2521 	return ret;
2522 }
2523 
2524 /* Routine io_iterator_reset */
2525 kern_return_t
is_io_iterator_reset(io_object_t iterator)2526 is_io_iterator_reset(
2527 	io_object_t iterator )
2528 {
2529 	CHECK( OSIterator, iterator, iter );
2530 
2531 	iter->reset();
2532 
2533 	return kIOReturnSuccess;
2534 }
2535 
2536 /* Routine io_iterator_is_valid */
2537 kern_return_t
is_io_iterator_is_valid(io_object_t iterator,boolean_t * is_valid)2538 is_io_iterator_is_valid(
2539 	io_object_t iterator,
2540 	boolean_t *is_valid )
2541 {
2542 	CHECK( OSIterator, iterator, iter );
2543 
2544 	*is_valid = iter->isValid();
2545 
2546 	return kIOReturnSuccess;
2547 }
2548 
2549 static kern_return_t
internal_io_service_match_property_table(io_service_t _service,const char * matching,mach_msg_type_number_t matching_size,boolean_t * matches)2550 internal_io_service_match_property_table(
2551 	io_service_t _service,
2552 	const char * matching,
2553 	mach_msg_type_number_t matching_size,
2554 	boolean_t *matches)
2555 {
2556 	CHECK( IOService, _service, service );
2557 
2558 	kern_return_t       kr;
2559 	OSObject *          obj;
2560 	OSDictionary *      dict;
2561 
2562 	assert(matching_size);
2563 
2564 
2565 	obj = OSUnserializeXML(matching, matching_size);
2566 
2567 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2568 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2569 		*matches = service->passiveMatch( dict );
2570 		kr = kIOReturnSuccess;
2571 	} else {
2572 		kr = kIOReturnBadArgument;
2573 	}
2574 
2575 	if (obj) {
2576 		obj->release();
2577 	}
2578 
2579 	return kr;
2580 }
2581 
2582 /* Routine io_service_match_property_table */
2583 kern_return_t
is_io_service_match_property_table(io_service_t service,io_string_t matching,boolean_t * matches)2584 is_io_service_match_property_table(
2585 	io_service_t service,
2586 	io_string_t matching,
2587 	boolean_t *matches )
2588 {
2589 	return kIOReturnUnsupported;
2590 }
2591 
2592 
2593 /* Routine io_service_match_property_table_ool */
2594 kern_return_t
is_io_service_match_property_table_ool(io_object_t service,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,boolean_t * matches)2595 is_io_service_match_property_table_ool(
2596 	io_object_t service,
2597 	io_buf_ptr_t matching,
2598 	mach_msg_type_number_t matchingCnt,
2599 	kern_return_t *result,
2600 	boolean_t *matches )
2601 {
2602 	kern_return_t         kr;
2603 	vm_offset_t           data;
2604 	vm_map_offset_t       map_data;
2605 
2606 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2607 	data = CAST_DOWN(vm_offset_t, map_data);
2608 
2609 	if (KERN_SUCCESS == kr) {
2610 		// must return success after vm_map_copyout() succeeds
2611 		*result = internal_io_service_match_property_table(service,
2612 		    (const char *)data, matchingCnt, matches );
2613 		vm_deallocate( kernel_map, data, matchingCnt );
2614 	}
2615 
2616 	return kr;
2617 }
2618 
2619 /* Routine io_service_match_property_table_bin */
2620 kern_return_t
is_io_service_match_property_table_bin(io_object_t service,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,boolean_t * matches)2621 is_io_service_match_property_table_bin(
2622 	io_object_t service,
2623 	io_struct_inband_t matching,
2624 	mach_msg_type_number_t matchingCnt,
2625 	boolean_t *matches)
2626 {
2627 	return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2628 }
2629 
2630 static kern_return_t
internal_io_service_get_matching_services(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_iterator_t * existing)2631 internal_io_service_get_matching_services(
2632 	mach_port_t main_port,
2633 	const char * matching,
2634 	mach_msg_type_number_t matching_size,
2635 	io_iterator_t *existing )
2636 {
2637 	kern_return_t       kr;
2638 	OSObject *          obj;
2639 	OSDictionary *      dict;
2640 
2641 	if (main_port != main_device_port) {
2642 		return kIOReturnNotPrivileged;
2643 	}
2644 
2645 	assert(matching_size);
2646 	obj = OSUnserializeXML(matching, matching_size);
2647 
2648 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2649 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2650 		*existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2651 		kr = kIOReturnSuccess;
2652 	} else {
2653 		kr = kIOReturnBadArgument;
2654 	}
2655 
2656 	if (obj) {
2657 		obj->release();
2658 	}
2659 
2660 	return kr;
2661 }
2662 
2663 /* Routine io_service_get_matching_services */
2664 kern_return_t
is_io_service_get_matching_services(mach_port_t main_port,io_string_t matching,io_iterator_t * existing)2665 is_io_service_get_matching_services(
2666 	mach_port_t main_port,
2667 	io_string_t matching,
2668 	io_iterator_t *existing )
2669 {
2670 	return kIOReturnUnsupported;
2671 }
2672 
2673 /* Routine io_service_get_matching_services_ool */
2674 kern_return_t
is_io_service_get_matching_services_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * existing)2675 is_io_service_get_matching_services_ool(
2676 	mach_port_t main_port,
2677 	io_buf_ptr_t matching,
2678 	mach_msg_type_number_t matchingCnt,
2679 	kern_return_t *result,
2680 	io_object_t *existing )
2681 {
2682 	kern_return_t       kr;
2683 	vm_offset_t         data;
2684 	vm_map_offset_t     map_data;
2685 
2686 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2687 	data = CAST_DOWN(vm_offset_t, map_data);
2688 
2689 	if (KERN_SUCCESS == kr) {
2690 		// must return success after vm_map_copyout() succeeds
2691 		// and mig will copy out objects on success
2692 		*existing = NULL;
2693 		*result = internal_io_service_get_matching_services(main_port,
2694 		    (const char *) data, matchingCnt, existing);
2695 		vm_deallocate( kernel_map, data, matchingCnt );
2696 	}
2697 
2698 	return kr;
2699 }
2700 
2701 /* Routine io_service_get_matching_services_bin */
2702 kern_return_t
is_io_service_get_matching_services_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * existing)2703 is_io_service_get_matching_services_bin(
2704 	mach_port_t main_port,
2705 	io_struct_inband_t matching,
2706 	mach_msg_type_number_t matchingCnt,
2707 	io_object_t *existing)
2708 {
2709 	return internal_io_service_get_matching_services(main_port, matching, matchingCnt, existing);
2710 }
2711 
2712 
2713 static kern_return_t
internal_io_service_get_matching_service(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_service_t * service)2714 internal_io_service_get_matching_service(
2715 	mach_port_t main_port,
2716 	const char * matching,
2717 	mach_msg_type_number_t matching_size,
2718 	io_service_t *service )
2719 {
2720 	kern_return_t       kr;
2721 	OSObject *          obj;
2722 	OSDictionary *      dict;
2723 
2724 	if (main_port != main_device_port) {
2725 		return kIOReturnNotPrivileged;
2726 	}
2727 
2728 	assert(matching_size);
2729 	obj = OSUnserializeXML(matching, matching_size);
2730 
2731 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2732 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2733 		*service = IOService::copyMatchingService( dict );
2734 		kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2735 	} else {
2736 		kr = kIOReturnBadArgument;
2737 	}
2738 
2739 	if (obj) {
2740 		obj->release();
2741 	}
2742 
2743 	return kr;
2744 }
2745 
2746 /* Routine io_service_get_matching_service */
2747 kern_return_t
is_io_service_get_matching_service(mach_port_t main_port,io_string_t matching,io_service_t * service)2748 is_io_service_get_matching_service(
2749 	mach_port_t main_port,
2750 	io_string_t matching,
2751 	io_service_t *service )
2752 {
2753 	return kIOReturnUnsupported;
2754 }
2755 
2756 /* Routine io_service_get_matching_services_ool */
2757 kern_return_t
is_io_service_get_matching_service_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * service)2758 is_io_service_get_matching_service_ool(
2759 	mach_port_t main_port,
2760 	io_buf_ptr_t matching,
2761 	mach_msg_type_number_t matchingCnt,
2762 	kern_return_t *result,
2763 	io_object_t *service )
2764 {
2765 	kern_return_t       kr;
2766 	vm_offset_t         data;
2767 	vm_map_offset_t     map_data;
2768 
2769 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2770 	data = CAST_DOWN(vm_offset_t, map_data);
2771 
2772 	if (KERN_SUCCESS == kr) {
2773 		// must return success after vm_map_copyout() succeeds
2774 		// and mig will copy out objects on success
2775 		*service = NULL;
2776 		*result = internal_io_service_get_matching_service(main_port,
2777 		    (const char *) data, matchingCnt, service );
2778 		vm_deallocate( kernel_map, data, matchingCnt );
2779 	}
2780 
2781 	return kr;
2782 }
2783 
2784 /* Routine io_service_get_matching_service_bin */
2785 kern_return_t
is_io_service_get_matching_service_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * service)2786 is_io_service_get_matching_service_bin(
2787 	mach_port_t main_port,
2788 	io_struct_inband_t matching,
2789 	mach_msg_type_number_t matchingCnt,
2790 	io_object_t *service)
2791 {
2792 	return internal_io_service_get_matching_service(main_port, matching, matchingCnt, service);
2793 }
2794 
2795 static kern_return_t
internal_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,const char * matching,size_t matching_size,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)2796 internal_io_service_add_notification(
2797 	mach_port_t main_port,
2798 	io_name_t notification_type,
2799 	const char * matching,
2800 	size_t matching_size,
2801 	mach_port_t port,
2802 	void * reference,
2803 	vm_size_t referenceSize,
2804 	bool client64,
2805 	io_object_t * notification )
2806 {
2807 	IOServiceUserNotification * userNotify = NULL;
2808 	IONotifier *                notify = NULL;
2809 	const OSSymbol *            sym;
2810 	OSObject *                  obj;
2811 	OSDictionary *              dict;
2812 	IOReturn                    err;
2813 	natural_t                   userMsgType;
2814 
2815 	if (main_port != main_device_port) {
2816 		return kIOReturnNotPrivileged;
2817 	}
2818 
2819 	do {
2820 		err = kIOReturnNoResources;
2821 
2822 		if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2823 			return kIOReturnMessageTooLarge;
2824 		}
2825 
2826 		if (!(sym = OSSymbol::withCString( notification_type ))) {
2827 			err = kIOReturnNoResources;
2828 		}
2829 
2830 		assert(matching_size);
2831 		obj = OSUnserializeXML(matching, matching_size);
2832 		dict = OSDynamicCast(OSDictionary, obj);
2833 		if (!dict) {
2834 			err = kIOReturnBadArgument;
2835 			continue;
2836 		}
2837 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2838 
2839 		if ((sym == gIOPublishNotification)
2840 		    || (sym == gIOFirstPublishNotification)) {
2841 			userMsgType = kIOServicePublishNotificationType;
2842 		} else if ((sym == gIOMatchedNotification)
2843 		    || (sym == gIOFirstMatchNotification)) {
2844 			userMsgType = kIOServiceMatchedNotificationType;
2845 		} else if ((sym == gIOTerminatedNotification)
2846 		    || (sym == gIOWillTerminateNotification)) {
2847 			userMsgType = kIOServiceTerminatedNotificationType;
2848 		} else {
2849 			userMsgType = kLastIOKitNotificationType;
2850 		}
2851 
2852 		userNotify = new IOServiceUserNotification;
2853 
2854 		if (userNotify && !userNotify->init( port, userMsgType,
2855 		    reference, referenceSize, client64)) {
2856 			userNotify->release();
2857 			userNotify = NULL;
2858 		}
2859 		if (!userNotify) {
2860 			continue;
2861 		}
2862 
2863 		notify = IOService::addMatchingNotification( sym, dict,
2864 		    &userNotify->_handler, userNotify );
2865 		if (notify) {
2866 			*notification = userNotify;
2867 			userNotify->setNotification( notify );
2868 			err = kIOReturnSuccess;
2869 		} else {
2870 			err = kIOReturnUnsupported;
2871 		}
2872 	} while (false);
2873 
2874 	if ((kIOReturnSuccess != err) && userNotify) {
2875 		userNotify->setNotification(NULL);
2876 		userNotify->invalidatePort();
2877 		userNotify->release();
2878 		userNotify = NULL;
2879 	}
2880 
2881 	if (sym) {
2882 		sym->release();
2883 	}
2884 	if (obj) {
2885 		obj->release();
2886 	}
2887 
2888 	return err;
2889 }
2890 
2891 
2892 /* Routine io_service_add_notification */
2893 kern_return_t
is_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2894 is_io_service_add_notification(
2895 	mach_port_t main_port,
2896 	io_name_t notification_type,
2897 	io_string_t matching,
2898 	mach_port_t port,
2899 	io_async_ref_t reference,
2900 	mach_msg_type_number_t referenceCnt,
2901 	io_object_t * notification )
2902 {
2903 	return kIOReturnUnsupported;
2904 }
2905 
2906 /* Routine io_service_add_notification_64 */
2907 kern_return_t
is_io_service_add_notification_64(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2908 is_io_service_add_notification_64(
2909 	mach_port_t main_port,
2910 	io_name_t notification_type,
2911 	io_string_t matching,
2912 	mach_port_t wake_port,
2913 	io_async_ref64_t reference,
2914 	mach_msg_type_number_t referenceCnt,
2915 	io_object_t *notification )
2916 {
2917 	return kIOReturnUnsupported;
2918 }
2919 
2920 /* Routine io_service_add_notification_bin */
2921 kern_return_t
is_io_service_add_notification_bin(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2922 is_io_service_add_notification_bin
2923 (
2924 	mach_port_t main_port,
2925 	io_name_t notification_type,
2926 	io_struct_inband_t matching,
2927 	mach_msg_type_number_t matchingCnt,
2928 	mach_port_t wake_port,
2929 	io_async_ref_t reference,
2930 	mach_msg_type_number_t referenceCnt,
2931 	io_object_t *notification)
2932 {
2933 	io_async_ref_t zreference;
2934 
2935 	if (referenceCnt > ASYNC_REF_COUNT) {
2936 		return kIOReturnBadArgument;
2937 	}
2938 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2939 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2940 
2941 	return internal_io_service_add_notification(main_port, notification_type,
2942 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2943 	           false, notification);
2944 }
2945 
2946 /* Routine io_service_add_notification_bin_64 */
2947 kern_return_t
is_io_service_add_notification_bin_64(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2948 is_io_service_add_notification_bin_64
2949 (
2950 	mach_port_t main_port,
2951 	io_name_t notification_type,
2952 	io_struct_inband_t matching,
2953 	mach_msg_type_number_t matchingCnt,
2954 	mach_port_t wake_port,
2955 	io_async_ref64_t reference,
2956 	mach_msg_type_number_t referenceCnt,
2957 	io_object_t *notification)
2958 {
2959 	io_async_ref64_t zreference;
2960 
2961 	if (referenceCnt > ASYNC_REF64_COUNT) {
2962 		return kIOReturnBadArgument;
2963 	}
2964 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2965 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2966 
2967 	return internal_io_service_add_notification(main_port, notification_type,
2968 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2969 	           true, notification);
2970 }
2971 
2972 static kern_return_t
internal_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,void * reference,vm_size_t referenceSize,bool client64,kern_return_t * result,io_object_t * notification)2973 internal_io_service_add_notification_ool(
2974 	mach_port_t main_port,
2975 	io_name_t notification_type,
2976 	io_buf_ptr_t matching,
2977 	mach_msg_type_number_t matchingCnt,
2978 	mach_port_t wake_port,
2979 	void * reference,
2980 	vm_size_t referenceSize,
2981 	bool client64,
2982 	kern_return_t *result,
2983 	io_object_t *notification )
2984 {
2985 	kern_return_t       kr;
2986 	vm_offset_t         data;
2987 	vm_map_offset_t     map_data;
2988 
2989 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2990 	data = CAST_DOWN(vm_offset_t, map_data);
2991 
2992 	if (KERN_SUCCESS == kr) {
2993 		// must return success after vm_map_copyout() succeeds
2994 		// and mig will copy out objects on success
2995 		*notification = NULL;
2996 		*result = internal_io_service_add_notification( main_port, notification_type,
2997 		    (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2998 		vm_deallocate( kernel_map, data, matchingCnt );
2999 	}
3000 
3001 	return kr;
3002 }
3003 
3004 /* Routine io_service_add_notification_ool */
3005 kern_return_t
is_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3006 is_io_service_add_notification_ool(
3007 	mach_port_t main_port,
3008 	io_name_t notification_type,
3009 	io_buf_ptr_t matching,
3010 	mach_msg_type_number_t matchingCnt,
3011 	mach_port_t wake_port,
3012 	io_async_ref_t reference,
3013 	mach_msg_type_number_t referenceCnt,
3014 	kern_return_t *result,
3015 	io_object_t *notification )
3016 {
3017 	io_async_ref_t zreference;
3018 
3019 	if (referenceCnt > ASYNC_REF_COUNT) {
3020 		return kIOReturnBadArgument;
3021 	}
3022 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3023 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3024 
3025 	return internal_io_service_add_notification_ool(main_port, notification_type,
3026 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3027 	           false, result, notification);
3028 }
3029 
3030 /* Routine io_service_add_notification_ool_64 */
3031 kern_return_t
is_io_service_add_notification_ool_64(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3032 is_io_service_add_notification_ool_64(
3033 	mach_port_t main_port,
3034 	io_name_t notification_type,
3035 	io_buf_ptr_t matching,
3036 	mach_msg_type_number_t matchingCnt,
3037 	mach_port_t wake_port,
3038 	io_async_ref64_t reference,
3039 	mach_msg_type_number_t referenceCnt,
3040 	kern_return_t *result,
3041 	io_object_t *notification )
3042 {
3043 	io_async_ref64_t zreference;
3044 
3045 	if (referenceCnt > ASYNC_REF64_COUNT) {
3046 		return kIOReturnBadArgument;
3047 	}
3048 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3049 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3050 
3051 	return internal_io_service_add_notification_ool(main_port, notification_type,
3052 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3053 	           true, result, notification);
3054 }
3055 
3056 /* Routine io_service_add_notification_old */
3057 kern_return_t
is_io_service_add_notification_old(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,natural_t ref,io_object_t * notification)3058 is_io_service_add_notification_old(
3059 	mach_port_t main_port,
3060 	io_name_t notification_type,
3061 	io_string_t matching,
3062 	mach_port_t port,
3063 	// for binary compatibility reasons, this must be natural_t for ILP32
3064 	natural_t ref,
3065 	io_object_t * notification )
3066 {
3067 	return is_io_service_add_notification( main_port, notification_type,
3068 	           matching, port, &ref, 1, notification );
3069 }
3070 
3071 
3072 static kern_return_t
internal_io_service_add_interest_notification(io_object_t _service,io_name_t type_of_interest,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)3073 internal_io_service_add_interest_notification(
3074 	io_object_t _service,
3075 	io_name_t type_of_interest,
3076 	mach_port_t port,
3077 	void * reference,
3078 	vm_size_t referenceSize,
3079 	bool client64,
3080 	io_object_t * notification )
3081 {
3082 	IOServiceMessageUserNotification *  userNotify = NULL;
3083 	IONotifier *                        notify = NULL;
3084 	const OSSymbol *                    sym;
3085 	IOReturn                            err;
3086 
3087 	CHECK( IOService, _service, service );
3088 
3089 	err = kIOReturnNoResources;
3090 	if ((sym = OSSymbol::withCString( type_of_interest ))) {
3091 		do {
3092 			userNotify = new IOServiceMessageUserNotification;
3093 
3094 			if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
3095 			    reference, referenceSize, client64 )) {
3096 				userNotify->release();
3097 				userNotify = NULL;
3098 			}
3099 			if (!userNotify) {
3100 				continue;
3101 			}
3102 
3103 			notify = service->registerInterest( sym,
3104 			    &userNotify->_handler, userNotify );
3105 			if (notify) {
3106 				*notification = userNotify;
3107 				userNotify->setNotification( notify );
3108 				err = kIOReturnSuccess;
3109 			} else {
3110 				err = kIOReturnUnsupported;
3111 			}
3112 		} while (false);
3113 
3114 		sym->release();
3115 	}
3116 
3117 	if ((kIOReturnSuccess != err) && userNotify) {
3118 		userNotify->setNotification(NULL);
3119 		userNotify->invalidatePort();
3120 		userNotify->release();
3121 		userNotify = NULL;
3122 	}
3123 
3124 	return err;
3125 }
3126 
3127 /* Routine io_service_add_message_notification */
3128 kern_return_t
is_io_service_add_interest_notification(io_object_t service,io_name_t type_of_interest,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3129 is_io_service_add_interest_notification(
3130 	io_object_t service,
3131 	io_name_t type_of_interest,
3132 	mach_port_t port,
3133 	io_async_ref_t reference,
3134 	mach_msg_type_number_t referenceCnt,
3135 	io_object_t * notification )
3136 {
3137 	io_async_ref_t zreference;
3138 
3139 	if (referenceCnt > ASYNC_REF_COUNT) {
3140 		return kIOReturnBadArgument;
3141 	}
3142 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3143 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3144 
3145 	return internal_io_service_add_interest_notification(service, type_of_interest,
3146 	           port, &zreference[0], sizeof(io_async_ref_t), false, notification);
3147 }
3148 
3149 /* Routine io_service_add_interest_notification_64 */
3150 kern_return_t
is_io_service_add_interest_notification_64(io_object_t service,io_name_t type_of_interest,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3151 is_io_service_add_interest_notification_64(
3152 	io_object_t service,
3153 	io_name_t type_of_interest,
3154 	mach_port_t wake_port,
3155 	io_async_ref64_t reference,
3156 	mach_msg_type_number_t referenceCnt,
3157 	io_object_t *notification )
3158 {
3159 	io_async_ref64_t zreference;
3160 
3161 	if (referenceCnt > ASYNC_REF64_COUNT) {
3162 		return kIOReturnBadArgument;
3163 	}
3164 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3165 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3166 
3167 	return internal_io_service_add_interest_notification(service, type_of_interest,
3168 	           wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
3169 }
3170 
3171 
3172 /* Routine io_service_acknowledge_notification */
3173 kern_return_t
is_io_service_acknowledge_notification(io_object_t _service,natural_t notify_ref,natural_t response)3174 is_io_service_acknowledge_notification(
3175 	io_object_t _service,
3176 	natural_t notify_ref,
3177 	natural_t response )
3178 {
3179 	CHECK( IOService, _service, service );
3180 
3181 	return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3182 	           (IOOptionBits) response );
3183 }
3184 
3185 /* Routine io_connect_get_semaphore */
3186 kern_return_t
is_io_connect_get_notification_semaphore(io_connect_t connection,natural_t notification_type,semaphore_t * semaphore)3187 is_io_connect_get_notification_semaphore(
3188 	io_connect_t connection,
3189 	natural_t notification_type,
3190 	semaphore_t *semaphore )
3191 {
3192 	IOReturn ret;
3193 	CHECK( IOUserClient, connection, client );
3194 
3195 	IOStatisticsClientCall();
3196 	IORWLockWrite(client->lock);
3197 	ret = client->getNotificationSemaphore((UInt32) notification_type,
3198 	    semaphore );
3199 	IORWLockUnlock(client->lock);
3200 
3201 	return ret;
3202 }
3203 
3204 /* Routine io_registry_get_root_entry */
3205 kern_return_t
is_io_registry_get_root_entry(mach_port_t main_port,io_object_t * root)3206 is_io_registry_get_root_entry(
3207 	mach_port_t main_port,
3208 	io_object_t *root )
3209 {
3210 	IORegistryEntry *   entry;
3211 
3212 	if (main_port != main_device_port) {
3213 		return kIOReturnNotPrivileged;
3214 	}
3215 
3216 	entry = IORegistryEntry::getRegistryRoot();
3217 	if (entry) {
3218 		entry->retain();
3219 	}
3220 	*root = entry;
3221 
3222 	return kIOReturnSuccess;
3223 }
3224 
3225 /* Routine io_registry_create_iterator */
3226 kern_return_t
is_io_registry_create_iterator(mach_port_t main_port,io_name_t plane,uint32_t options,io_object_t * iterator)3227 is_io_registry_create_iterator(
3228 	mach_port_t main_port,
3229 	io_name_t plane,
3230 	uint32_t options,
3231 	io_object_t *iterator )
3232 {
3233 	if (main_port != main_device_port) {
3234 		return kIOReturnNotPrivileged;
3235 	}
3236 
3237 	*iterator = IOUserIterator::withIterator(
3238 		IORegistryIterator::iterateOver(
3239 			IORegistryEntry::getPlane( plane ), options ));
3240 
3241 	return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3242 }
3243 
3244 /* Routine io_registry_entry_create_iterator */
3245 kern_return_t
is_io_registry_entry_create_iterator(io_object_t registry_entry,io_name_t plane,uint32_t options,io_object_t * iterator)3246 is_io_registry_entry_create_iterator(
3247 	io_object_t registry_entry,
3248 	io_name_t plane,
3249 	uint32_t options,
3250 	io_object_t *iterator )
3251 {
3252 	CHECK( IORegistryEntry, registry_entry, entry );
3253 
3254 	*iterator = IOUserIterator::withIterator(
3255 		IORegistryIterator::iterateOver( entry,
3256 		IORegistryEntry::getPlane( plane ), options ));
3257 
3258 	return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3259 }
3260 
3261 /* Routine io_registry_iterator_enter */
3262 kern_return_t
is_io_registry_iterator_enter_entry(io_object_t iterator)3263 is_io_registry_iterator_enter_entry(
3264 	io_object_t iterator )
3265 {
3266 	CHECKLOCKED( IORegistryIterator, iterator, iter );
3267 
3268 	IOLockLock(oIter->lock);
3269 	iter->enterEntry();
3270 	IOLockUnlock(oIter->lock);
3271 
3272 	return kIOReturnSuccess;
3273 }
3274 
3275 /* Routine io_registry_iterator_exit */
3276 kern_return_t
is_io_registry_iterator_exit_entry(io_object_t iterator)3277 is_io_registry_iterator_exit_entry(
3278 	io_object_t iterator )
3279 {
3280 	bool        didIt;
3281 
3282 	CHECKLOCKED( IORegistryIterator, iterator, iter );
3283 
3284 	IOLockLock(oIter->lock);
3285 	didIt = iter->exitEntry();
3286 	IOLockUnlock(oIter->lock);
3287 
3288 	return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3289 }
3290 
3291 /* Routine io_registry_entry_from_path */
3292 kern_return_t
is_io_registry_entry_from_path(mach_port_t main_port,io_string_t path,io_object_t * registry_entry)3293 is_io_registry_entry_from_path(
3294 	mach_port_t main_port,
3295 	io_string_t path,
3296 	io_object_t *registry_entry )
3297 {
3298 	IORegistryEntry *   entry;
3299 
3300 	if (main_port != main_device_port) {
3301 		return kIOReturnNotPrivileged;
3302 	}
3303 
3304 	entry = IORegistryEntry::fromPath( path );
3305 
3306 	if (!entry && IOTaskRegistryCompatibility(current_task())) {
3307 		OSDictionary * matching;
3308 		const OSObject * objects[2] = { kOSBooleanTrue, NULL };
3309 		const OSSymbol * keys[2]    = { gIOCompatibilityMatchKey, gIOPathMatchKey };
3310 
3311 		objects[1] = OSString::withCStringNoCopy(path);
3312 		matching = OSDictionary::withObjects(objects, keys, 2, 2);
3313 		if (matching) {
3314 			entry = IOService::copyMatchingService(matching);
3315 		}
3316 		OSSafeReleaseNULL(matching);
3317 		OSSafeReleaseNULL(objects[1]);
3318 	}
3319 
3320 	*registry_entry = entry;
3321 
3322 	return kIOReturnSuccess;
3323 }
3324 
3325 
3326 /* Routine io_registry_entry_from_path */
3327 kern_return_t
is_io_registry_entry_from_path_ool(mach_port_t main_port,io_string_inband_t path,io_buf_ptr_t path_ool,mach_msg_type_number_t path_oolCnt,kern_return_t * result,io_object_t * registry_entry)3328 is_io_registry_entry_from_path_ool(
3329 	mach_port_t main_port,
3330 	io_string_inband_t path,
3331 	io_buf_ptr_t path_ool,
3332 	mach_msg_type_number_t path_oolCnt,
3333 	kern_return_t *result,
3334 	io_object_t *registry_entry)
3335 {
3336 	IORegistryEntry *   entry;
3337 	vm_map_offset_t     map_data;
3338 	const char *        cpath;
3339 	IOReturn            res;
3340 	kern_return_t       err;
3341 
3342 	if (main_port != main_device_port) {
3343 		return kIOReturnNotPrivileged;
3344 	}
3345 
3346 	map_data = 0;
3347 	entry    = NULL;
3348 	res = err = KERN_SUCCESS;
3349 	if (path[0]) {
3350 		cpath = path;
3351 	} else {
3352 		if (!path_oolCnt) {
3353 			return kIOReturnBadArgument;
3354 		}
3355 		if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3356 			return kIOReturnMessageTooLarge;
3357 		}
3358 
3359 		err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3360 		if (KERN_SUCCESS == err) {
3361 			// must return success to mig after vm_map_copyout() succeeds, so result is actual
3362 			cpath = CAST_DOWN(const char *, map_data);
3363 			if (cpath[path_oolCnt - 1]) {
3364 				res = kIOReturnBadArgument;
3365 			}
3366 		}
3367 	}
3368 
3369 	if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3370 		entry = IORegistryEntry::fromPath(cpath);
3371 		res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3372 	}
3373 
3374 	if (map_data) {
3375 		vm_deallocate(kernel_map, map_data, path_oolCnt);
3376 	}
3377 
3378 	if (KERN_SUCCESS != err) {
3379 		res = err;
3380 	}
3381 	*registry_entry = entry;
3382 	*result = res;
3383 
3384 	return err;
3385 }
3386 
3387 
3388 /* Routine io_registry_entry_in_plane */
3389 kern_return_t
is_io_registry_entry_in_plane(io_object_t registry_entry,io_name_t plane,boolean_t * inPlane)3390 is_io_registry_entry_in_plane(
3391 	io_object_t registry_entry,
3392 	io_name_t plane,
3393 	boolean_t *inPlane )
3394 {
3395 	CHECK( IORegistryEntry, registry_entry, entry );
3396 
3397 	*inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3398 
3399 	return kIOReturnSuccess;
3400 }
3401 
3402 
3403 /* Routine io_registry_entry_get_path */
3404 kern_return_t
is_io_registry_entry_get_path(io_object_t registry_entry,io_name_t plane,io_string_t path)3405 is_io_registry_entry_get_path(
3406 	io_object_t registry_entry,
3407 	io_name_t plane,
3408 	io_string_t path )
3409 {
3410 	int         length;
3411 	CHECK( IORegistryEntry, registry_entry, entry );
3412 
3413 	length = sizeof(io_string_t);
3414 	if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3415 		return kIOReturnSuccess;
3416 	} else {
3417 		return kIOReturnBadArgument;
3418 	}
3419 }
3420 
3421 /* Routine io_registry_entry_get_path */
3422 kern_return_t
is_io_registry_entry_get_path_ool(io_object_t registry_entry,io_name_t plane,io_string_inband_t path,io_buf_ptr_t * path_ool,mach_msg_type_number_t * path_oolCnt)3423 is_io_registry_entry_get_path_ool(
3424 	io_object_t registry_entry,
3425 	io_name_t plane,
3426 	io_string_inband_t path,
3427 	io_buf_ptr_t *path_ool,
3428 	mach_msg_type_number_t *path_oolCnt)
3429 {
3430 	enum   { kMaxPath = 16384 };
3431 	IOReturn err;
3432 	int      length;
3433 	char   * buf;
3434 
3435 	CHECK( IORegistryEntry, registry_entry, entry );
3436 
3437 	*path_ool    = NULL;
3438 	*path_oolCnt = 0;
3439 	length = sizeof(io_string_inband_t);
3440 	if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3441 		err = kIOReturnSuccess;
3442 	} else {
3443 		length = kMaxPath;
3444 		buf = IONewData(char, length);
3445 		if (!buf) {
3446 			err = kIOReturnNoMemory;
3447 		} else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3448 			err = kIOReturnError;
3449 		} else {
3450 			*path_oolCnt = length;
3451 			err = copyoutkdata(buf, length, path_ool);
3452 		}
3453 		if (buf) {
3454 			IODeleteData(buf, char, kMaxPath);
3455 		}
3456 	}
3457 
3458 	return err;
3459 }
3460 
3461 
3462 /* Routine io_registry_entry_get_name */
3463 kern_return_t
is_io_registry_entry_get_name(io_object_t registry_entry,io_name_t name)3464 is_io_registry_entry_get_name(
3465 	io_object_t registry_entry,
3466 	io_name_t name )
3467 {
3468 	CHECK( IORegistryEntry, registry_entry, entry );
3469 
3470 	strncpy( name, entry->getName(), sizeof(io_name_t));
3471 
3472 	return kIOReturnSuccess;
3473 }
3474 
3475 /* Routine io_registry_entry_get_name_in_plane */
3476 kern_return_t
is_io_registry_entry_get_name_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t name)3477 is_io_registry_entry_get_name_in_plane(
3478 	io_object_t registry_entry,
3479 	io_name_t planeName,
3480 	io_name_t name )
3481 {
3482 	const IORegistryPlane * plane;
3483 	CHECK( IORegistryEntry, registry_entry, entry );
3484 
3485 	if (planeName[0]) {
3486 		plane = IORegistryEntry::getPlane( planeName );
3487 	} else {
3488 		plane = NULL;
3489 	}
3490 
3491 	strncpy( name, entry->getName( plane), sizeof(io_name_t));
3492 
3493 	return kIOReturnSuccess;
3494 }
3495 
3496 /* Routine io_registry_entry_get_location_in_plane */
3497 kern_return_t
is_io_registry_entry_get_location_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t location)3498 is_io_registry_entry_get_location_in_plane(
3499 	io_object_t registry_entry,
3500 	io_name_t planeName,
3501 	io_name_t location )
3502 {
3503 	const IORegistryPlane * plane;
3504 	CHECK( IORegistryEntry, registry_entry, entry );
3505 
3506 	if (planeName[0]) {
3507 		plane = IORegistryEntry::getPlane( planeName );
3508 	} else {
3509 		plane = NULL;
3510 	}
3511 
3512 	const char * cstr = entry->getLocation( plane );
3513 
3514 	if (cstr) {
3515 		strncpy( location, cstr, sizeof(io_name_t));
3516 		return kIOReturnSuccess;
3517 	} else {
3518 		return kIOReturnNotFound;
3519 	}
3520 }
3521 
3522 /* Routine io_registry_entry_get_registry_entry_id */
3523 kern_return_t
is_io_registry_entry_get_registry_entry_id(io_object_t registry_entry,uint64_t * entry_id)3524 is_io_registry_entry_get_registry_entry_id(
3525 	io_object_t registry_entry,
3526 	uint64_t *entry_id )
3527 {
3528 	CHECK( IORegistryEntry, registry_entry, entry );
3529 
3530 	*entry_id = entry->getRegistryEntryID();
3531 
3532 	return kIOReturnSuccess;
3533 }
3534 
3535 
3536 static OSObject *
IOCopyPropertyCompatible(IORegistryEntry * regEntry,const char * name)3537 IOCopyPropertyCompatible(IORegistryEntry * regEntry, const char * name)
3538 {
3539 	OSObject     * obj;
3540 	OSObject     * compatProperties;
3541 	OSDictionary * props;
3542 
3543 	obj = regEntry->copyProperty(name);
3544 	if (obj) {
3545 		return obj;
3546 	}
3547 
3548 	compatProperties = regEntry->copyProperty(gIOUserServicePropertiesKey);
3549 	if (!compatProperties
3550 	    && IOTaskRegistryCompatibility(current_task())) {
3551 		compatProperties = regEntry->copyProperty(gIOCompatibilityPropertiesKey);
3552 	}
3553 	if (compatProperties) {
3554 		props = OSDynamicCast(OSDictionary, compatProperties);
3555 		if (props) {
3556 			obj = props->getObject(name);
3557 			if (obj) {
3558 				obj->retain();
3559 			}
3560 		}
3561 		compatProperties->release();
3562 	}
3563 
3564 	return obj;
3565 }
3566 
3567 /* Routine io_registry_entry_get_property */
3568 kern_return_t
is_io_registry_entry_get_property_bytes(io_object_t registry_entry,io_name_t property_name,io_struct_inband_t buf,mach_msg_type_number_t * dataCnt)3569 is_io_registry_entry_get_property_bytes(
3570 	io_object_t registry_entry,
3571 	io_name_t property_name,
3572 	io_struct_inband_t buf,
3573 	mach_msg_type_number_t *dataCnt )
3574 {
3575 	OSObject    *       obj;
3576 	OSData      *       data;
3577 	OSString    *       str;
3578 	OSBoolean   *       boo;
3579 	OSNumber    *       off;
3580 	UInt64              offsetBytes;
3581 	unsigned int        len = 0;
3582 	const void *        bytes = NULL;
3583 	IOReturn            ret = kIOReturnSuccess;
3584 
3585 	CHECK( IORegistryEntry, registry_entry, entry );
3586 
3587 #if CONFIG_MACF
3588 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3589 		return kIOReturnNotPermitted;
3590 	}
3591 #endif
3592 
3593 	obj = IOCopyPropertyCompatible(entry, property_name);
3594 	if (!obj) {
3595 		return kIOReturnNoResources;
3596 	}
3597 
3598 	// One day OSData will be a common container base class
3599 	// until then...
3600 	if ((data = OSDynamicCast( OSData, obj ))) {
3601 		len = data->getLength();
3602 		bytes = data->getBytesNoCopy();
3603 		if (!data->isSerializable()) {
3604 			len = 0;
3605 		}
3606 	} else if ((str = OSDynamicCast( OSString, obj ))) {
3607 		len = str->getLength() + 1;
3608 		bytes = str->getCStringNoCopy();
3609 	} else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3610 		len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3611 		bytes = boo->isTrue() ? "Yes" : "No";
3612 	} else if ((off = OSDynamicCast( OSNumber, obj ))) {
3613 		offsetBytes = off->unsigned64BitValue();
3614 		len = off->numberOfBytes();
3615 		if (len > sizeof(offsetBytes)) {
3616 			len = sizeof(offsetBytes);
3617 		}
3618 		bytes = &offsetBytes;
3619 #ifdef __BIG_ENDIAN__
3620 		bytes = (const void *)
3621 		    (((UInt32) bytes) + (sizeof(UInt64) - len));
3622 #endif
3623 	} else {
3624 		ret = kIOReturnBadArgument;
3625 	}
3626 
3627 	if (bytes) {
3628 		if (*dataCnt < len) {
3629 			ret = kIOReturnIPCError;
3630 		} else {
3631 			*dataCnt = len;
3632 			bcopy( bytes, buf, len );
3633 		}
3634 	}
3635 	obj->release();
3636 
3637 	return ret;
3638 }
3639 
3640 
3641 /* Routine io_registry_entry_get_property */
3642 kern_return_t
is_io_registry_entry_get_property(io_object_t registry_entry,io_name_t property_name,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3643 is_io_registry_entry_get_property(
3644 	io_object_t registry_entry,
3645 	io_name_t property_name,
3646 	io_buf_ptr_t *properties,
3647 	mach_msg_type_number_t *propertiesCnt )
3648 {
3649 	kern_return_t       err;
3650 	unsigned int        len;
3651 	OSObject *          obj;
3652 
3653 	CHECK( IORegistryEntry, registry_entry, entry );
3654 
3655 #if CONFIG_MACF
3656 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3657 		return kIOReturnNotPermitted;
3658 	}
3659 #endif
3660 
3661 	obj = IOCopyPropertyCompatible(entry, property_name);
3662 	if (!obj) {
3663 		return kIOReturnNotFound;
3664 	}
3665 
3666 	OSSerialize * s = OSSerialize::withCapacity(4096);
3667 	if (!s) {
3668 		obj->release();
3669 		return kIOReturnNoMemory;
3670 	}
3671 
3672 	if (obj->serialize( s )) {
3673 		len = s->getLength();
3674 		*propertiesCnt = len;
3675 		err = copyoutkdata( s->text(), len, properties );
3676 	} else {
3677 		err = kIOReturnUnsupported;
3678 	}
3679 
3680 	s->release();
3681 	obj->release();
3682 
3683 	return err;
3684 }
3685 
3686 /* Routine io_registry_entry_get_property_recursively */
3687 kern_return_t
is_io_registry_entry_get_property_recursively(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3688 is_io_registry_entry_get_property_recursively(
3689 	io_object_t registry_entry,
3690 	io_name_t plane,
3691 	io_name_t property_name,
3692 	uint32_t options,
3693 	io_buf_ptr_t *properties,
3694 	mach_msg_type_number_t *propertiesCnt )
3695 {
3696 	kern_return_t       err;
3697 	unsigned int        len;
3698 	OSObject *          obj;
3699 
3700 	CHECK( IORegistryEntry, registry_entry, entry );
3701 
3702 #if CONFIG_MACF
3703 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3704 		return kIOReturnNotPermitted;
3705 	}
3706 #endif
3707 
3708 	obj = entry->copyProperty( property_name,
3709 	    IORegistryEntry::getPlane( plane ), options );
3710 	if (!obj) {
3711 		return kIOReturnNotFound;
3712 	}
3713 
3714 	OSSerialize * s = OSSerialize::withCapacity(4096);
3715 	if (!s) {
3716 		obj->release();
3717 		return kIOReturnNoMemory;
3718 	}
3719 
3720 	if (obj->serialize( s )) {
3721 		len = s->getLength();
3722 		*propertiesCnt = len;
3723 		err = copyoutkdata( s->text(), len, properties );
3724 	} else {
3725 		err = kIOReturnUnsupported;
3726 	}
3727 
3728 	s->release();
3729 	obj->release();
3730 
3731 	return err;
3732 }
3733 
3734 /* Routine io_registry_entry_get_properties */
3735 kern_return_t
is_io_registry_entry_get_properties(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3736 is_io_registry_entry_get_properties(
3737 	io_object_t registry_entry,
3738 	io_buf_ptr_t *properties,
3739 	mach_msg_type_number_t *propertiesCnt )
3740 {
3741 	return kIOReturnUnsupported;
3742 }
3743 
3744 #if CONFIG_MACF
3745 
3746 struct GetPropertiesEditorRef {
3747 	kauth_cred_t      cred;
3748 	IORegistryEntry * entry;
3749 	OSCollection    * root;
3750 };
3751 
3752 static const LIBKERN_RETURNS_RETAINED OSMetaClassBase *
GetPropertiesEditor(void * reference,OSSerialize * s,OSCollection * container,const OSSymbol * name,const OSMetaClassBase * value)3753 GetPropertiesEditor(void                  * reference,
3754     OSSerialize           * s,
3755     OSCollection          * container,
3756     const OSSymbol        * name,
3757     const OSMetaClassBase * value)
3758 {
3759 	GetPropertiesEditorRef * ref = (typeof(ref))reference;
3760 
3761 	if (!ref->root) {
3762 		ref->root = container;
3763 	}
3764 	if (ref->root == container) {
3765 		if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3766 			value = NULL;
3767 		}
3768 	}
3769 	if (value) {
3770 		value->retain();
3771 	}
3772 	return value;
3773 }
3774 
3775 #endif /* CONFIG_MACF */
3776 
3777 /* Routine io_registry_entry_get_properties_bin_buf */
3778 kern_return_t
is_io_registry_entry_get_properties_bin_buf(io_object_t registry_entry,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3779 is_io_registry_entry_get_properties_bin_buf(
3780 	io_object_t registry_entry,
3781 	mach_vm_address_t buf,
3782 	mach_vm_size_t *bufsize,
3783 	io_buf_ptr_t *properties,
3784 	mach_msg_type_number_t *propertiesCnt)
3785 {
3786 	kern_return_t          err = kIOReturnSuccess;
3787 	unsigned int           len;
3788 	OSObject             * compatProperties;
3789 	OSSerialize          * s;
3790 	OSSerialize::Editor    editor = NULL;
3791 	void                 * editRef = NULL;
3792 
3793 	CHECK(IORegistryEntry, registry_entry, entry);
3794 
3795 #if CONFIG_MACF
3796 	GetPropertiesEditorRef ref;
3797 	if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3798 		editor    = &GetPropertiesEditor;
3799 		editRef   = &ref;
3800 		ref.cred  = kauth_cred_get();
3801 		ref.entry = entry;
3802 		ref.root  = NULL;
3803 	}
3804 #endif
3805 
3806 	s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3807 	if (!s) {
3808 		return kIOReturnNoMemory;
3809 	}
3810 
3811 
3812 	compatProperties = entry->copyProperty(gIOUserServicePropertiesKey);
3813 	if (!compatProperties
3814 	    && IOTaskRegistryCompatibility(current_task())) {
3815 		compatProperties = entry->copyProperty(gIOCompatibilityPropertiesKey);
3816 	}
3817 
3818 	if (compatProperties) {
3819 		OSDictionary * dict;
3820 
3821 		dict = entry->dictionaryWithProperties();
3822 		if (!dict) {
3823 			err = kIOReturnNoMemory;
3824 		} else {
3825 			dict->removeObject(gIOUserServicePropertiesKey);
3826 			dict->removeObject(gIOCompatibilityPropertiesKey);
3827 			dict->merge(OSDynamicCast(OSDictionary, compatProperties));
3828 			if (!dict->serialize(s)) {
3829 				err = kIOReturnUnsupported;
3830 			}
3831 			dict->release();
3832 		}
3833 		compatProperties->release();
3834 	} else if (!entry->serializeProperties(s)) {
3835 		err = kIOReturnUnsupported;
3836 	}
3837 
3838 	if (kIOReturnSuccess == err) {
3839 		len = s->getLength();
3840 		if (buf && bufsize && len <= *bufsize) {
3841 			*bufsize = len;
3842 			*propertiesCnt = 0;
3843 			*properties = nullptr;
3844 			if (copyout(s->text(), buf, len)) {
3845 				err = kIOReturnVMError;
3846 			} else {
3847 				err = kIOReturnSuccess;
3848 			}
3849 		} else {
3850 			if (bufsize) {
3851 				*bufsize = 0;
3852 			}
3853 			*propertiesCnt = len;
3854 			err = copyoutkdata( s->text(), len, properties );
3855 		}
3856 	}
3857 	s->release();
3858 
3859 	return err;
3860 }
3861 
3862 /* Routine io_registry_entry_get_properties_bin */
3863 kern_return_t
is_io_registry_entry_get_properties_bin(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3864 is_io_registry_entry_get_properties_bin(
3865 	io_object_t registry_entry,
3866 	io_buf_ptr_t *properties,
3867 	mach_msg_type_number_t *propertiesCnt)
3868 {
3869 	return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3870 	           0, NULL, properties, propertiesCnt);
3871 }
3872 
3873 /* Routine io_registry_entry_get_property_bin_buf */
3874 kern_return_t
is_io_registry_entry_get_property_bin_buf(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3875 is_io_registry_entry_get_property_bin_buf(
3876 	io_object_t registry_entry,
3877 	io_name_t plane,
3878 	io_name_t property_name,
3879 	uint32_t options,
3880 	mach_vm_address_t buf,
3881 	mach_vm_size_t *bufsize,
3882 	io_buf_ptr_t *properties,
3883 	mach_msg_type_number_t *propertiesCnt )
3884 {
3885 	kern_return_t       err;
3886 	unsigned int        len;
3887 	OSObject *          obj;
3888 	const OSSymbol *    sym;
3889 
3890 	CHECK( IORegistryEntry, registry_entry, entry );
3891 
3892 #if CONFIG_MACF
3893 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3894 		return kIOReturnNotPermitted;
3895 	}
3896 #endif
3897 
3898 	sym = OSSymbol::withCString(property_name);
3899 	if (!sym) {
3900 		return kIOReturnNoMemory;
3901 	}
3902 
3903 	err = kIOReturnNotFound;
3904 	if (gIORegistryEntryPropertyKeysKey == sym) {
3905 		obj = entry->copyPropertyKeys();
3906 	} else {
3907 		if ((kIORegistryIterateRecursively & options) && plane[0]) {
3908 			obj = IOCopyPropertyCompatible(entry, property_name);
3909 			if (obj == NULL) {
3910 				IORegistryIterator * iter = IORegistryIterator::iterateOver(entry, IORegistryEntry::getPlane(plane), options);
3911 				if (iter) {
3912 					while ((NULL == obj) && (entry = iter->getNextObject())) {
3913 						OSObject * currentObj = IOCopyPropertyCompatible(entry, property_name);
3914 #if CONFIG_MACF
3915 						if (currentObj != NULL && 0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3916 							// Record that MAC hook blocked this entry and property, and continue to next entry
3917 							err = kIOReturnNotPermitted;
3918 							OSSafeReleaseNULL(currentObj);
3919 							continue;
3920 						}
3921 #endif
3922 						obj = currentObj;
3923 					}
3924 					iter->release();
3925 				}
3926 			}
3927 		} else {
3928 			obj = IOCopyPropertyCompatible(entry, property_name);
3929 		}
3930 		if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3931 			entry->removeProperty(sym);
3932 		}
3933 	}
3934 
3935 	sym->release();
3936 	if (!obj) {
3937 		return err;
3938 	}
3939 
3940 	OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3941 	if (!s) {
3942 		obj->release();
3943 		return kIOReturnNoMemory;
3944 	}
3945 
3946 	if (obj->serialize( s )) {
3947 		len = s->getLength();
3948 		if (buf && bufsize && len <= *bufsize) {
3949 			*bufsize = len;
3950 			*propertiesCnt = 0;
3951 			*properties = nullptr;
3952 			if (copyout(s->text(), buf, len)) {
3953 				err = kIOReturnVMError;
3954 			} else {
3955 				err = kIOReturnSuccess;
3956 			}
3957 		} else {
3958 			if (bufsize) {
3959 				*bufsize = 0;
3960 			}
3961 			*propertiesCnt = len;
3962 			err = copyoutkdata( s->text(), len, properties );
3963 		}
3964 	} else {
3965 		err = kIOReturnUnsupported;
3966 	}
3967 
3968 	s->release();
3969 	obj->release();
3970 
3971 	return err;
3972 }
3973 
3974 /* Routine io_registry_entry_get_property_bin */
3975 kern_return_t
is_io_registry_entry_get_property_bin(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3976 is_io_registry_entry_get_property_bin(
3977 	io_object_t registry_entry,
3978 	io_name_t plane,
3979 	io_name_t property_name,
3980 	uint32_t options,
3981 	io_buf_ptr_t *properties,
3982 	mach_msg_type_number_t *propertiesCnt )
3983 {
3984 	return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
3985 	           property_name, options, 0, NULL, properties, propertiesCnt);
3986 }
3987 
3988 
3989 /* Routine io_registry_entry_set_properties */
3990 kern_return_t
is_io_registry_entry_set_properties(io_object_t registry_entry,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)3991 is_io_registry_entry_set_properties
3992 (
3993 	io_object_t registry_entry,
3994 	io_buf_ptr_t properties,
3995 	mach_msg_type_number_t propertiesCnt,
3996 	kern_return_t * result)
3997 {
3998 	OSObject *          obj;
3999 	kern_return_t       err;
4000 	IOReturn            res;
4001 	vm_offset_t         data;
4002 	vm_map_offset_t     map_data;
4003 
4004 	CHECK( IORegistryEntry, registry_entry, entry );
4005 
4006 	if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
4007 		return kIOReturnMessageTooLarge;
4008 	}
4009 
4010 	err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4011 	data = CAST_DOWN(vm_offset_t, map_data);
4012 
4013 	if (KERN_SUCCESS == err) {
4014 		FAKE_STACK_FRAME(entry->getMetaClass());
4015 
4016 		// must return success after vm_map_copyout() succeeds
4017 		obj = OSUnserializeXML((const char *) data, propertiesCnt );
4018 		vm_deallocate( kernel_map, data, propertiesCnt );
4019 
4020 		if (!obj) {
4021 			res = kIOReturnBadArgument;
4022 		}
4023 #if CONFIG_MACF
4024 		else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
4025 		    registry_entry, obj)) {
4026 			res = kIOReturnNotPermitted;
4027 		}
4028 #endif
4029 		else {
4030 			IOService    * service = OSDynamicCast(IOService, entry);
4031 			OSDictionary * props = OSDynamicCast(OSDictionary, obj);
4032 			OSObject     * allowable = entry->copyProperty(gIORegistryEntryAllowableSetPropertiesKey);
4033 			OSArray      * allowableArray;
4034 
4035 			if (!allowable) {
4036 				res = kIOReturnSuccess;
4037 			} else {
4038 				if (!props) {
4039 					res = kIOReturnNotPermitted;
4040 				} else if (!(allowableArray = OSDynamicCast(OSArray, allowable))) {
4041 					res = kIOReturnNotPermitted;
4042 				} else {
4043 					bool allFound __block, found __block;
4044 
4045 					allFound = true;
4046 					props->iterateObjects(^(const OSSymbol * key, OSObject * value) {
4047 							found = false;
4048 							for (unsigned int idx = 0; !found; idx++) {
4049 							        OSObject * next = allowableArray->getObject(idx);
4050 							        if (!next) {
4051 							                break;
4052 								}
4053 							        found = next->isEqualTo(key);
4054 							}
4055 							allFound &= found;
4056 							if (!found) {
4057 							        IOLog("IORegistryEntrySetProperties(%s, %s) disallowed due to " kIORegistryEntryAllowableSetPropertiesKey "\n",
4058 							        entry->getName(), key->getCStringNoCopy());
4059 							}
4060 							return !allFound;
4061 						});
4062 					res =  allFound ? kIOReturnSuccess : kIOReturnBadArgument;
4063 				}
4064 			}
4065 			if (kIOReturnSuccess == res) {
4066 				IOUserClient *
4067 				    client = OSDynamicCast(IOUserClient, entry);
4068 
4069 				if (client && client->defaultLockingSetProperties) {
4070 					IORWLockWrite(client->lock);
4071 				}
4072 
4073 				if (!client && (kOSBooleanTrue == entry->getProperty(gIORegistryEntryDefaultLockingSetPropertiesKey))) {
4074 					res = entry->runPropertyActionBlock(^IOReturn (void) {
4075 							return entry->setProperties( obj );
4076 						});
4077 				} else {
4078 					res = entry->setProperties( obj );
4079 				}
4080 
4081 				if (client && client->defaultLockingSetProperties) {
4082 					IORWLockUnlock(client->lock);
4083 				}
4084 				if (service && props && service->hasUserServer()) {
4085 					res = service->UserSetProperties(props);
4086 				}
4087 			}
4088 			OSSafeReleaseNULL(allowable);
4089 		}
4090 		if (obj) {
4091 			obj->release();
4092 		}
4093 
4094 		FAKE_STACK_FRAME_END();
4095 	} else {
4096 		res = err;
4097 	}
4098 
4099 	*result = res;
4100 	return err;
4101 }
4102 
4103 /* Routine io_registry_entry_get_child_iterator */
4104 kern_return_t
is_io_registry_entry_get_child_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4105 is_io_registry_entry_get_child_iterator(
4106 	io_object_t registry_entry,
4107 	io_name_t plane,
4108 	io_object_t *iterator )
4109 {
4110 	CHECK( IORegistryEntry, registry_entry, entry );
4111 
4112 	*iterator = IOUserIterator::withIterator(entry->getChildIterator(
4113 		    IORegistryEntry::getPlane( plane )));
4114 
4115 	return kIOReturnSuccess;
4116 }
4117 
4118 /* Routine io_registry_entry_get_parent_iterator */
4119 kern_return_t
is_io_registry_entry_get_parent_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4120 is_io_registry_entry_get_parent_iterator(
4121 	io_object_t registry_entry,
4122 	io_name_t plane,
4123 	io_object_t *iterator)
4124 {
4125 	CHECK( IORegistryEntry, registry_entry, entry );
4126 
4127 	*iterator = IOUserIterator::withIterator(entry->getParentIterator(
4128 		    IORegistryEntry::getPlane( plane )));
4129 
4130 	return kIOReturnSuccess;
4131 }
4132 
4133 /* Routine io_service_get_busy_state */
4134 kern_return_t
is_io_service_get_busy_state(io_object_t _service,uint32_t * busyState)4135 is_io_service_get_busy_state(
4136 	io_object_t _service,
4137 	uint32_t *busyState )
4138 {
4139 	CHECK( IOService, _service, service );
4140 
4141 	*busyState = service->getBusyState();
4142 
4143 	return kIOReturnSuccess;
4144 }
4145 
4146 /* Routine io_service_get_state */
4147 kern_return_t
is_io_service_get_state(io_object_t _service,uint64_t * state,uint32_t * busy_state,uint64_t * accumulated_busy_time)4148 is_io_service_get_state(
4149 	io_object_t _service,
4150 	uint64_t *state,
4151 	uint32_t *busy_state,
4152 	uint64_t *accumulated_busy_time )
4153 {
4154 	CHECK( IOService, _service, service );
4155 
4156 	*state                 = service->getState();
4157 	*busy_state            = service->getBusyState();
4158 	*accumulated_busy_time = service->getAccumulatedBusyTime();
4159 
4160 	return kIOReturnSuccess;
4161 }
4162 
4163 /* Routine io_service_wait_quiet */
4164 kern_return_t
is_io_service_wait_quiet(io_object_t _service,mach_timespec_t wait_time)4165 is_io_service_wait_quiet(
4166 	io_object_t _service,
4167 	mach_timespec_t wait_time )
4168 {
4169 	uint64_t    timeoutNS;
4170 
4171 	CHECK( IOService, _service, service );
4172 
4173 	timeoutNS = wait_time.tv_sec;
4174 	timeoutNS *= kSecondScale;
4175 	timeoutNS += wait_time.tv_nsec;
4176 
4177 	return service->waitQuiet(timeoutNS);
4178 }
4179 
4180 /* Routine io_service_wait_quiet_with_options */
4181 kern_return_t
is_io_service_wait_quiet_with_options(io_object_t _service,mach_timespec_t wait_time,uint32_t options)4182 is_io_service_wait_quiet_with_options(
4183 	io_object_t _service,
4184 	mach_timespec_t wait_time,
4185 	uint32_t options )
4186 {
4187 	uint64_t    timeoutNS;
4188 
4189 	CHECK( IOService, _service, service );
4190 
4191 	timeoutNS = wait_time.tv_sec;
4192 	timeoutNS *= kSecondScale;
4193 	timeoutNS += wait_time.tv_nsec;
4194 
4195 	if ((options & kIOWaitQuietPanicOnFailure) && !IOCurrentTaskHasEntitlement(kIOWaitQuietPanicsEntitlement)) {
4196 		OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4197 		IOLog("IOServiceWaitQuietWithOptions(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4198 		OSSafeReleaseNULL(taskName);
4199 
4200 		/* strip this option from the options before calling waitQuietWithOptions */
4201 		options &= ~kIOWaitQuietPanicOnFailure;
4202 	}
4203 
4204 	return service->waitQuietWithOptions(timeoutNS, options);
4205 }
4206 
4207 
4208 /* Routine io_service_request_probe */
4209 kern_return_t
is_io_service_request_probe(io_object_t _service,uint32_t options)4210 is_io_service_request_probe(
4211 	io_object_t _service,
4212 	uint32_t options )
4213 {
4214 	CHECK( IOService, _service, service );
4215 
4216 	return service->requestProbe( options );
4217 }
4218 
4219 /* Routine io_service_get_authorization_id */
4220 kern_return_t
is_io_service_get_authorization_id(io_object_t _service,uint64_t * authorization_id)4221 is_io_service_get_authorization_id(
4222 	io_object_t _service,
4223 	uint64_t *authorization_id )
4224 {
4225 	kern_return_t          kr;
4226 
4227 	CHECK( IOService, _service, service );
4228 
4229 	kr = IOUserClient::clientHasPrivilege((void *) current_task(),
4230 	    kIOClientPrivilegeAdministrator );
4231 	if (kIOReturnSuccess != kr) {
4232 		return kr;
4233 	}
4234 
4235 	*authorization_id = service->getAuthorizationID();
4236 
4237 	return kr;
4238 }
4239 
4240 /* Routine io_service_set_authorization_id */
4241 kern_return_t
is_io_service_set_authorization_id(io_object_t _service,uint64_t authorization_id)4242 is_io_service_set_authorization_id(
4243 	io_object_t _service,
4244 	uint64_t authorization_id )
4245 {
4246 	CHECK( IOService, _service, service );
4247 
4248 	return service->setAuthorizationID( authorization_id );
4249 }
4250 
4251 /* Routine io_service_open_ndr */
4252 kern_return_t
is_io_service_open_extended(io_object_t _service,task_t owningTask,uint32_t connect_type,NDR_record_t ndr,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result,io_object_t * connection)4253 is_io_service_open_extended(
4254 	io_object_t _service,
4255 	task_t owningTask,
4256 	uint32_t connect_type,
4257 	NDR_record_t ndr,
4258 	io_buf_ptr_t properties,
4259 	mach_msg_type_number_t propertiesCnt,
4260 	kern_return_t * result,
4261 	io_object_t *connection )
4262 {
4263 	IOUserClient * client = NULL;
4264 	kern_return_t  err = KERN_SUCCESS;
4265 	IOReturn       res = kIOReturnSuccess;
4266 	OSDictionary * propertiesDict = NULL;
4267 	bool           disallowAccess = false;
4268 
4269 	CHECK( IOService, _service, service );
4270 
4271 	if (!owningTask) {
4272 		return kIOReturnBadArgument;
4273 	}
4274 	assert(owningTask == current_task());
4275 	if (owningTask != current_task()) {
4276 		return kIOReturnBadArgument;
4277 	}
4278 
4279 #if CONFIG_MACF
4280 	if (mac_iokit_check_open_service(kauth_cred_get(), service, connect_type) != 0) {
4281 		return kIOReturnNotPermitted;
4282 	}
4283 #endif
4284 	do{
4285 		if (properties) {
4286 			return kIOReturnUnsupported;
4287 		}
4288 #if 0
4289 		{
4290 			OSObject *      obj;
4291 			vm_offset_t     data;
4292 			vm_map_offset_t map_data;
4293 
4294 			if (propertiesCnt > sizeof(io_struct_inband_t)) {
4295 				return kIOReturnMessageTooLarge;
4296 			}
4297 
4298 			err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4299 			res = err;
4300 			data = CAST_DOWN(vm_offset_t, map_data);
4301 			if (KERN_SUCCESS == err) {
4302 				// must return success after vm_map_copyout() succeeds
4303 				obj = OSUnserializeXML((const char *) data, propertiesCnt );
4304 				vm_deallocate( kernel_map, data, propertiesCnt );
4305 				propertiesDict = OSDynamicCast(OSDictionary, obj);
4306 				if (!propertiesDict) {
4307 					res = kIOReturnBadArgument;
4308 					if (obj) {
4309 						obj->release();
4310 					}
4311 				}
4312 			}
4313 			if (kIOReturnSuccess != res) {
4314 				break;
4315 			}
4316 		}
4317 #endif
4318 		res = service->newUserClient( owningTask, (void *) owningTask,
4319 		    connect_type, propertiesDict, &client );
4320 
4321 		if (propertiesDict) {
4322 			propertiesDict->release();
4323 		}
4324 
4325 		if (res == kIOReturnSuccess && OSDynamicCast(IOUserClient, client) == NULL) {
4326 			// client should always be a IOUserClient
4327 			res = kIOReturnError;
4328 		}
4329 
4330 		if (res == kIOReturnSuccess) {
4331 			if (!client->reserved) {
4332 				if (!client->reserve()) {
4333 					client->clientClose();
4334 					OSSafeReleaseNULL(client);
4335 					res = kIOReturnNoMemory;
4336 				}
4337 			}
4338 		}
4339 
4340 		if (res == kIOReturnSuccess) {
4341 			OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4342 			if (creatorName) {
4343 				client->setProperty(kIOUserClientCreatorKey, creatorName);
4344 			}
4345 			const char * creatorNameCStr = creatorName ? creatorName->getCStringNoCopy() : "<unknown>";
4346 			client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
4347 			if (client->sharedInstance) {
4348 				IOLockLock(gIOUserClientOwnersLock);
4349 			}
4350 			if (!client->lock) {
4351 				client->lock       = IORWLockAlloc();
4352 				client->filterLock = IOLockAlloc();
4353 
4354 				client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
4355 				{
4356 					OSObject * obj;
4357 					extern const OSSymbol * gIOSurfaceIdentifier;
4358 					obj = client->getProperty(kIOUserClientDefaultLockingKey);
4359 					bool hasProps = false;
4360 
4361 					client->uc2022 = (NULL != OSDynamicCast(IOUserClient2022, client));
4362 					if (obj) {
4363 						hasProps = true;
4364 						client->defaultLocking = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingKey));
4365 					} else if (client->uc2022) {
4366 						res = kIOReturnError;
4367 					}
4368 					obj = client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey);
4369 					if (obj) {
4370 						hasProps = true;
4371 						client->defaultLockingSetProperties = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey));
4372 					} else if (client->uc2022) {
4373 						res = kIOReturnError;
4374 					}
4375 					obj = client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey);
4376 					if (obj) {
4377 						hasProps = true;
4378 						client->defaultLockingSingleThreadExternalMethod = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey));
4379 					} else if (client->uc2022) {
4380 						res = kIOReturnError;
4381 					}
4382 					if (kIOReturnSuccess != res) {
4383 						IOLog("IOUC %s requires kIOUserClientDefaultLockingKey, kIOUserClientDefaultLockingSetPropertiesKey, kIOUserClientDefaultLockingSingleThreadExternalMethodKey\n",
4384 						    client->getMetaClass()->getClassName());
4385 					}
4386 					if (!hasProps) {
4387 						const OSMetaClass * meta;
4388 						OSKext            * kext;
4389 						meta = client->getMetaClass();
4390 						kext = meta->getKext();
4391 						if (!kext || !kext->hasDependency(gIOSurfaceIdentifier)) {
4392 							client->defaultLocking = true;
4393 							client->defaultLockingSetProperties = false;
4394 							client->defaultLockingSingleThreadExternalMethod = false;
4395 							client->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
4396 						}
4397 					}
4398 				}
4399 			}
4400 			if (client->sharedInstance) {
4401 				IOLockUnlock(gIOUserClientOwnersLock);
4402 			}
4403 
4404 			OSObject     * requiredEntitlement = client->copyProperty(gIOUserClientEntitlementsKey);
4405 			OSString * requiredEntitlementString = OSDynamicCast(OSString, requiredEntitlement);
4406 			//If this is an IOUserClient2022, having kIOUserClientEntitlementsKey is mandatory.
4407 			//If it has kIOUserClientEntitlementsKey, the value must be either kOSBooleanFalse or an OSString
4408 			//If the value is kOSBooleanFalse, we allow access.
4409 			//If the value is an OSString, we allow access if the task has the named entitlement
4410 			if (client->uc2022) {
4411 				if (!requiredEntitlement) {
4412 					IOLog("IOUC %s missing " kIOUserClientEntitlementsKey " property\n",
4413 					    client->getMetaClass()->getClassName());
4414 					disallowAccess = true;
4415 				} else if (!requiredEntitlementString && requiredEntitlement != kOSBooleanFalse) {
4416 					IOLog("IOUC %s had " kIOUserClientEntitlementsKey "with value not boolean false or string\n", client->getMetaClass()->getClassName());
4417 					disallowAccess = true;
4418 				}
4419 			}
4420 
4421 			if (requiredEntitlement && disallowAccess == false) {
4422 				if (kOSBooleanFalse == requiredEntitlement) {
4423 					// allow
4424 					disallowAccess = false;
4425 				} else {
4426 					disallowAccess = !IOTaskHasEntitlement(owningTask, requiredEntitlementString->getCStringNoCopy());
4427 					if (disallowAccess) {
4428 						IOLog("IOUC %s missing entitlement in process %s\n",
4429 						    client->getMetaClass()->getClassName(), creatorNameCStr);
4430 					}
4431 				}
4432 			}
4433 
4434 			OSSafeReleaseNULL(requiredEntitlement);
4435 
4436 			if (disallowAccess) {
4437 				res = kIOReturnNotPrivileged;
4438 			}
4439 #if CONFIG_MACF
4440 			else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4441 				IOLog("IOUC %s failed MACF in process %s\n",
4442 				    client->getMetaClass()->getClassName(), creatorNameCStr);
4443 				res = kIOReturnNotPermitted;
4444 			}
4445 #endif
4446 
4447 			if ((kIOReturnSuccess == res)
4448 			    && gIOUCFilterCallbacks
4449 			    && gIOUCFilterCallbacks->io_filter_resolver) {
4450 				io_filter_policy_t filterPolicy;
4451 				filterPolicy = client->filterForTask(owningTask, 0);
4452 				if (!filterPolicy) {
4453 					res = gIOUCFilterCallbacks->io_filter_resolver(owningTask, client, connect_type, &filterPolicy);
4454 					if (kIOReturnUnsupported == res) {
4455 						res = kIOReturnSuccess;
4456 					} else if (kIOReturnSuccess == res) {
4457 						client->filterForTask(owningTask, filterPolicy);
4458 					} else {
4459 						IOLog("IOUC %s failed sandbox in process %s\n",
4460 						    client->getMetaClass()->getClassName(), creatorNameCStr);
4461 					}
4462 				}
4463 			}
4464 
4465 			if (kIOReturnSuccess == res) {
4466 				res = client->registerOwner(owningTask);
4467 			}
4468 			OSSafeReleaseNULL(creatorName);
4469 
4470 			if (kIOReturnSuccess != res) {
4471 				IOStatisticsClientCall();
4472 				client->clientClose();
4473 				client->setTerminateDefer(service, false);
4474 				client->release();
4475 				client = NULL;
4476 				break;
4477 			}
4478 			client->setTerminateDefer(service, false);
4479 		}
4480 	}while (false);
4481 
4482 	*connection = client;
4483 	*result = res;
4484 
4485 	return err;
4486 }
4487 
4488 /* Routine io_service_close */
4489 kern_return_t
is_io_service_close(io_object_t connection)4490 is_io_service_close(
4491 	io_object_t connection )
4492 {
4493 	OSSet * mappings;
4494 	if ((mappings = OSDynamicCast(OSSet, connection))) {
4495 		return kIOReturnSuccess;
4496 	}
4497 
4498 	CHECK( IOUserClient, connection, client );
4499 
4500 	IOStatisticsClientCall();
4501 
4502 	if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4503 		IORWLockWrite(client->lock);
4504 		client->clientClose();
4505 		IORWLockUnlock(client->lock);
4506 	} else {
4507 		IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4508 		    client->getRegistryEntryID(), client->getName());
4509 	}
4510 
4511 	return kIOReturnSuccess;
4512 }
4513 
4514 /* Routine io_connect_get_service */
4515 kern_return_t
is_io_connect_get_service(io_object_t connection,io_object_t * service)4516 is_io_connect_get_service(
4517 	io_object_t connection,
4518 	io_object_t *service )
4519 {
4520 	IOService * theService;
4521 
4522 	CHECK( IOUserClient, connection, client );
4523 
4524 	theService = client->getService();
4525 	if (theService) {
4526 		theService->retain();
4527 	}
4528 
4529 	*service = theService;
4530 
4531 	return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4532 }
4533 
4534 /* Routine io_connect_set_notification_port */
4535 kern_return_t
is_io_connect_set_notification_port(io_object_t connection,uint32_t notification_type,mach_port_t port,uint32_t reference)4536 is_io_connect_set_notification_port(
4537 	io_object_t connection,
4538 	uint32_t notification_type,
4539 	mach_port_t port,
4540 	uint32_t reference)
4541 {
4542 	kern_return_t ret;
4543 	CHECK( IOUserClient, connection, client );
4544 
4545 	IOStatisticsClientCall();
4546 	IORWLockWrite(client->lock);
4547 	ret = client->registerNotificationPort( port, notification_type,
4548 	    (io_user_reference_t) reference );
4549 	IORWLockUnlock(client->lock);
4550 	return ret;
4551 }
4552 
4553 /* Routine io_connect_set_notification_port */
4554 kern_return_t
is_io_connect_set_notification_port_64(io_object_t connection,uint32_t notification_type,mach_port_t port,io_user_reference_t reference)4555 is_io_connect_set_notification_port_64(
4556 	io_object_t connection,
4557 	uint32_t notification_type,
4558 	mach_port_t port,
4559 	io_user_reference_t reference)
4560 {
4561 	kern_return_t ret;
4562 	CHECK( IOUserClient, connection, client );
4563 
4564 	IOStatisticsClientCall();
4565 	IORWLockWrite(client->lock);
4566 	ret = client->registerNotificationPort( port, notification_type,
4567 	    reference );
4568 	IORWLockUnlock(client->lock);
4569 	return ret;
4570 }
4571 
4572 /* Routine io_connect_map_memory_into_task */
4573 kern_return_t
is_io_connect_map_memory_into_task(io_connect_t connection,uint32_t memory_type,task_t into_task,mach_vm_address_t * address,mach_vm_size_t * size,uint32_t flags)4574 is_io_connect_map_memory_into_task
4575 (
4576 	io_connect_t connection,
4577 	uint32_t memory_type,
4578 	task_t into_task,
4579 	mach_vm_address_t *address,
4580 	mach_vm_size_t *size,
4581 	uint32_t flags
4582 )
4583 {
4584 	IOReturn            err;
4585 	IOMemoryMap *       map;
4586 
4587 	CHECK( IOUserClient, connection, client );
4588 
4589 	if (!into_task) {
4590 		return kIOReturnBadArgument;
4591 	}
4592 
4593 	IOStatisticsClientCall();
4594 	if (client->defaultLocking) {
4595 		IORWLockWrite(client->lock);
4596 	}
4597 	map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4598 	if (client->defaultLocking) {
4599 		IORWLockUnlock(client->lock);
4600 	}
4601 
4602 	if (map) {
4603 		*address = map->getAddress();
4604 		if (size) {
4605 			*size = map->getSize();
4606 		}
4607 
4608 		if (client->sharedInstance
4609 		    || (into_task != current_task())) {
4610 			// push a name out to the task owning the map,
4611 			// so we can clean up maps
4612 			mach_port_name_t name __unused =
4613 			    IOMachPort::makeSendRightForTask(
4614 				into_task, map, IKOT_IOKIT_OBJECT );
4615 			map->release();
4616 		} else {
4617 			// keep it with the user client
4618 			IOLockLock( gIOObjectPortLock);
4619 			if (NULL == client->mappings) {
4620 				client->mappings = OSSet::withCapacity(2);
4621 			}
4622 			if (client->mappings) {
4623 				client->mappings->setObject( map);
4624 			}
4625 			IOLockUnlock( gIOObjectPortLock);
4626 			map->release();
4627 		}
4628 		err = kIOReturnSuccess;
4629 	} else {
4630 		err = kIOReturnBadArgument;
4631 	}
4632 
4633 	return err;
4634 }
4635 
4636 /* Routine is_io_connect_map_memory */
4637 kern_return_t
is_io_connect_map_memory(io_object_t connect,uint32_t type,task_t task,uint32_t * mapAddr,uint32_t * mapSize,uint32_t flags)4638 is_io_connect_map_memory(
4639 	io_object_t     connect,
4640 	uint32_t        type,
4641 	task_t          task,
4642 	uint32_t  *     mapAddr,
4643 	uint32_t  *     mapSize,
4644 	uint32_t        flags )
4645 {
4646 	IOReturn          err;
4647 	mach_vm_address_t address;
4648 	mach_vm_size_t    size;
4649 
4650 	address = SCALAR64(*mapAddr);
4651 	size    = SCALAR64(*mapSize);
4652 
4653 	err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4654 
4655 	*mapAddr = SCALAR32(address);
4656 	*mapSize = SCALAR32(size);
4657 
4658 	return err;
4659 }
4660 } /* extern "C" */
4661 
4662 IOMemoryMap *
removeMappingForDescriptor(IOMemoryDescriptor * mem)4663 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4664 {
4665 	OSIterator *  iter;
4666 	IOMemoryMap * map = NULL;
4667 
4668 	IOLockLock(gIOObjectPortLock);
4669 
4670 	iter = OSCollectionIterator::withCollection(mappings);
4671 	if (iter) {
4672 		while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4673 			if (mem == map->getMemoryDescriptor()) {
4674 				map->retain();
4675 				mappings->removeObject(map);
4676 				break;
4677 			}
4678 		}
4679 		iter->release();
4680 	}
4681 
4682 	IOLockUnlock(gIOObjectPortLock);
4683 
4684 	return map;
4685 }
4686 
4687 extern "C" {
4688 /* Routine io_connect_unmap_memory_from_task */
4689 kern_return_t
is_io_connect_unmap_memory_from_task(io_connect_t connection,uint32_t memory_type,task_t from_task,mach_vm_address_t address)4690 is_io_connect_unmap_memory_from_task
4691 (
4692 	io_connect_t connection,
4693 	uint32_t memory_type,
4694 	task_t from_task,
4695 	mach_vm_address_t address)
4696 {
4697 	IOReturn            err;
4698 	IOOptionBits        options = 0;
4699 	IOMemoryDescriptor * memory = NULL;
4700 	IOMemoryMap *       map;
4701 
4702 	CHECK( IOUserClient, connection, client );
4703 
4704 	if (!from_task) {
4705 		return kIOReturnBadArgument;
4706 	}
4707 
4708 	IOStatisticsClientCall();
4709 	if (client->defaultLocking) {
4710 		IORWLockWrite(client->lock);
4711 	}
4712 	err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4713 	if (client->defaultLocking) {
4714 		IORWLockUnlock(client->lock);
4715 	}
4716 
4717 	if (memory && (kIOReturnSuccess == err)) {
4718 		options = (options & ~kIOMapUserOptionsMask)
4719 		    | kIOMapAnywhere | kIOMapReference;
4720 
4721 		map = memory->createMappingInTask( from_task, address, options );
4722 		memory->release();
4723 		if (map) {
4724 			IOLockLock( gIOObjectPortLock);
4725 			if (client->mappings) {
4726 				client->mappings->removeObject( map);
4727 			}
4728 			IOLockUnlock( gIOObjectPortLock);
4729 
4730 			mach_port_name_t name = 0;
4731 			bool is_shared_instance_or_from_current_task = from_task != current_task() || client->sharedInstance;
4732 			if (is_shared_instance_or_from_current_task) {
4733 				name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4734 				map->release();
4735 			}
4736 
4737 			if (name) {
4738 				map->userClientUnmap();
4739 				err = iokit_mod_send_right( from_task, name, -2 );
4740 				err = kIOReturnSuccess;
4741 			} else {
4742 				IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4743 			}
4744 			if (!is_shared_instance_or_from_current_task) {
4745 				map->release();
4746 			}
4747 		} else {
4748 			err = kIOReturnBadArgument;
4749 		}
4750 	}
4751 
4752 	return err;
4753 }
4754 
4755 kern_return_t
is_io_connect_unmap_memory(io_object_t connect,uint32_t type,task_t task,uint32_t mapAddr)4756 is_io_connect_unmap_memory(
4757 	io_object_t     connect,
4758 	uint32_t        type,
4759 	task_t          task,
4760 	uint32_t        mapAddr )
4761 {
4762 	IOReturn            err;
4763 	mach_vm_address_t   address;
4764 
4765 	address = SCALAR64(mapAddr);
4766 
4767 	err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4768 
4769 	return err;
4770 }
4771 
4772 
4773 /* Routine io_connect_add_client */
4774 kern_return_t
is_io_connect_add_client(io_object_t connection,io_object_t connect_to)4775 is_io_connect_add_client(
4776 	io_object_t connection,
4777 	io_object_t connect_to)
4778 {
4779 	CHECK( IOUserClient, connection, client );
4780 	CHECK( IOUserClient, connect_to, to );
4781 
4782 	IOReturn ret;
4783 
4784 	IOStatisticsClientCall();
4785 	if (client->defaultLocking) {
4786 		IORWLockWrite(client->lock);
4787 	}
4788 	ret = client->connectClient( to );
4789 	if (client->defaultLocking) {
4790 		IORWLockUnlock(client->lock);
4791 	}
4792 	return ret;
4793 }
4794 
4795 
4796 /* Routine io_connect_set_properties */
4797 kern_return_t
is_io_connect_set_properties(io_object_t connection,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4798 is_io_connect_set_properties(
4799 	io_object_t connection,
4800 	io_buf_ptr_t properties,
4801 	mach_msg_type_number_t propertiesCnt,
4802 	kern_return_t * result)
4803 {
4804 	return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4805 }
4806 
4807 /* Routine io_user_client_method */
4808 kern_return_t
is_io_connect_method_var_output(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,io_buf_ptr_t * var_output,mach_msg_type_number_t * var_outputCnt)4809 is_io_connect_method_var_output
4810 (
4811 	io_connect_t connection,
4812 	uint32_t selector,
4813 	io_scalar_inband64_t scalar_input,
4814 	mach_msg_type_number_t scalar_inputCnt,
4815 	io_struct_inband_t inband_input,
4816 	mach_msg_type_number_t inband_inputCnt,
4817 	mach_vm_address_t ool_input,
4818 	mach_vm_size_t ool_input_size,
4819 	io_struct_inband_t inband_output,
4820 	mach_msg_type_number_t *inband_outputCnt,
4821 	io_scalar_inband64_t scalar_output,
4822 	mach_msg_type_number_t *scalar_outputCnt,
4823 	io_buf_ptr_t *var_output,
4824 	mach_msg_type_number_t *var_outputCnt
4825 )
4826 {
4827 	CHECK( IOUserClient, connection, client );
4828 
4829 	IOExternalMethodArguments args;
4830 	IOReturn ret;
4831 	IOMemoryDescriptor * inputMD  = NULL;
4832 	OSObject *           structureVariableOutputData = NULL;
4833 
4834 	bzero(&args.__reserved[0], sizeof(args.__reserved));
4835 	args.__reservedA = 0;
4836 	args.version = kIOExternalMethodArgumentsCurrentVersion;
4837 
4838 	args.selector = selector;
4839 
4840 	args.asyncWakePort               = MACH_PORT_NULL;
4841 	args.asyncReference              = NULL;
4842 	args.asyncReferenceCount         = 0;
4843 	args.structureVariableOutputData = &structureVariableOutputData;
4844 
4845 	args.scalarInput = scalar_input;
4846 	args.scalarInputCount = scalar_inputCnt;
4847 	args.structureInput = inband_input;
4848 	args.structureInputSize = inband_inputCnt;
4849 
4850 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4851 		return kIOReturnIPCError;
4852 	}
4853 
4854 	if (ool_input) {
4855 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4856 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
4857 		    current_task());
4858 	}
4859 
4860 	args.structureInputDescriptor = inputMD;
4861 
4862 	args.scalarOutput = scalar_output;
4863 	args.scalarOutputCount = *scalar_outputCnt;
4864 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4865 	args.structureOutput = inband_output;
4866 	args.structureOutputSize = *inband_outputCnt;
4867 	args.structureOutputDescriptor = NULL;
4868 	args.structureOutputDescriptorSize = 0;
4869 
4870 	IOStatisticsClientCall();
4871 	ret = kIOReturnSuccess;
4872 
4873 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4874 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4875 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
4876 	}
4877 
4878 	if (kIOReturnSuccess == ret) {
4879 		ret = client->callExternalMethod(selector, &args);
4880 	}
4881 
4882 	*scalar_outputCnt = args.scalarOutputCount;
4883 	*inband_outputCnt = args.structureOutputSize;
4884 
4885 	if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4886 		OSSerialize * serialize;
4887 		OSData      * data;
4888 		unsigned int  len;
4889 
4890 		if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4891 			len = serialize->getLength();
4892 			*var_outputCnt = len;
4893 			ret = copyoutkdata(serialize->text(), len, var_output);
4894 		} else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4895 			data->clipForCopyout();
4896 			len = data->getLength();
4897 			*var_outputCnt = len;
4898 			ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4899 		} else {
4900 			ret = kIOReturnUnderrun;
4901 		}
4902 	}
4903 
4904 	if (inputMD) {
4905 		inputMD->release();
4906 	}
4907 	if (structureVariableOutputData) {
4908 		structureVariableOutputData->release();
4909 	}
4910 
4911 	return ret;
4912 }
4913 
4914 /* Routine io_user_client_method */
4915 kern_return_t
is_io_connect_method(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)4916 is_io_connect_method
4917 (
4918 	io_connect_t connection,
4919 	uint32_t selector,
4920 	io_scalar_inband64_t scalar_input,
4921 	mach_msg_type_number_t scalar_inputCnt,
4922 	io_struct_inband_t inband_input,
4923 	mach_msg_type_number_t inband_inputCnt,
4924 	mach_vm_address_t ool_input,
4925 	mach_vm_size_t ool_input_size,
4926 	io_struct_inband_t inband_output,
4927 	mach_msg_type_number_t *inband_outputCnt,
4928 	io_scalar_inband64_t scalar_output,
4929 	mach_msg_type_number_t *scalar_outputCnt,
4930 	mach_vm_address_t ool_output,
4931 	mach_vm_size_t *ool_output_size
4932 )
4933 {
4934 	CHECK( IOUserClient, connection, client );
4935 
4936 	IOExternalMethodArguments args;
4937 	IOReturn ret;
4938 	IOMemoryDescriptor * inputMD  = NULL;
4939 	IOMemoryDescriptor * outputMD = NULL;
4940 
4941 	bzero(&args.__reserved[0], sizeof(args.__reserved));
4942 	args.__reservedA = 0;
4943 	args.version = kIOExternalMethodArgumentsCurrentVersion;
4944 
4945 	args.selector = selector;
4946 
4947 	args.asyncWakePort               = MACH_PORT_NULL;
4948 	args.asyncReference              = NULL;
4949 	args.asyncReferenceCount         = 0;
4950 	args.structureVariableOutputData = NULL;
4951 
4952 	args.scalarInput = scalar_input;
4953 	args.scalarInputCount = scalar_inputCnt;
4954 	args.structureInput = inband_input;
4955 	args.structureInputSize = inband_inputCnt;
4956 
4957 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4958 		return kIOReturnIPCError;
4959 	}
4960 	if (ool_output) {
4961 		if (*ool_output_size <= sizeof(io_struct_inband_t)) {
4962 			return kIOReturnIPCError;
4963 		}
4964 		if (*ool_output_size > UINT_MAX) {
4965 			return kIOReturnIPCError;
4966 		}
4967 	}
4968 
4969 	if (ool_input) {
4970 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4971 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
4972 		    current_task());
4973 	}
4974 
4975 	args.structureInputDescriptor = inputMD;
4976 
4977 	args.scalarOutput = scalar_output;
4978 	args.scalarOutputCount = *scalar_outputCnt;
4979 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4980 	args.structureOutput = inband_output;
4981 	args.structureOutputSize = *inband_outputCnt;
4982 
4983 	if (ool_output && ool_output_size) {
4984 		outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4985 		    kIODirectionIn, current_task());
4986 	}
4987 
4988 	args.structureOutputDescriptor = outputMD;
4989 	args.structureOutputDescriptorSize = ool_output_size
4990 	    ? ((typeof(args.structureOutputDescriptorSize)) * ool_output_size)
4991 	    : 0;
4992 
4993 	IOStatisticsClientCall();
4994 	ret = kIOReturnSuccess;
4995 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4996 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4997 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
4998 	}
4999 	if (kIOReturnSuccess == ret) {
5000 		ret = client->callExternalMethod( selector, &args );
5001 	}
5002 
5003 	*scalar_outputCnt = args.scalarOutputCount;
5004 	*inband_outputCnt = args.structureOutputSize;
5005 	*ool_output_size  = args.structureOutputDescriptorSize;
5006 
5007 	if (inputMD) {
5008 		inputMD->release();
5009 	}
5010 	if (outputMD) {
5011 		outputMD->release();
5012 	}
5013 
5014 	return ret;
5015 }
5016 
5017 /* Routine io_async_user_client_method */
5018 kern_return_t
is_io_connect_async_method(io_connect_t connection,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)5019 is_io_connect_async_method
5020 (
5021 	io_connect_t connection,
5022 	mach_port_t wake_port,
5023 	io_async_ref64_t reference,
5024 	mach_msg_type_number_t referenceCnt,
5025 	uint32_t selector,
5026 	io_scalar_inband64_t scalar_input,
5027 	mach_msg_type_number_t scalar_inputCnt,
5028 	io_struct_inband_t inband_input,
5029 	mach_msg_type_number_t inband_inputCnt,
5030 	mach_vm_address_t ool_input,
5031 	mach_vm_size_t ool_input_size,
5032 	io_struct_inband_t inband_output,
5033 	mach_msg_type_number_t *inband_outputCnt,
5034 	io_scalar_inband64_t scalar_output,
5035 	mach_msg_type_number_t *scalar_outputCnt,
5036 	mach_vm_address_t ool_output,
5037 	mach_vm_size_t * ool_output_size
5038 )
5039 {
5040 	CHECK( IOUserClient, connection, client );
5041 
5042 	IOExternalMethodArguments args;
5043 	IOReturn ret;
5044 	IOMemoryDescriptor * inputMD  = NULL;
5045 	IOMemoryDescriptor * outputMD = NULL;
5046 
5047 	if (referenceCnt < 1) {
5048 		return kIOReturnBadArgument;
5049 	}
5050 
5051 	bzero(&args.__reserved[0], sizeof(args.__reserved));
5052 	args.__reservedA = 0;
5053 	args.version = kIOExternalMethodArgumentsCurrentVersion;
5054 
5055 	reference[0]             = (io_user_reference_t) wake_port;
5056 	if (vm_map_is_64bit(get_task_map(current_task()))) {
5057 		reference[0]         |= kIOUCAsync64Flag;
5058 	}
5059 
5060 	args.selector = selector;
5061 
5062 	args.asyncWakePort       = wake_port;
5063 	args.asyncReference      = reference;
5064 	args.asyncReferenceCount = referenceCnt;
5065 
5066 	args.structureVariableOutputData = NULL;
5067 
5068 	args.scalarInput = scalar_input;
5069 	args.scalarInputCount = scalar_inputCnt;
5070 	args.structureInput = inband_input;
5071 	args.structureInputSize = inband_inputCnt;
5072 
5073 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5074 		return kIOReturnIPCError;
5075 	}
5076 	if (ool_output) {
5077 		if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5078 			return kIOReturnIPCError;
5079 		}
5080 		if (*ool_output_size > UINT_MAX) {
5081 			return kIOReturnIPCError;
5082 		}
5083 	}
5084 
5085 	if (ool_input) {
5086 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5087 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
5088 		    current_task());
5089 	}
5090 
5091 	args.structureInputDescriptor = inputMD;
5092 
5093 	args.scalarOutput = scalar_output;
5094 	args.scalarOutputCount = *scalar_outputCnt;
5095 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5096 	args.structureOutput = inband_output;
5097 	args.structureOutputSize = *inband_outputCnt;
5098 
5099 	if (ool_output) {
5100 		outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5101 		    kIODirectionIn, current_task());
5102 	}
5103 
5104 	args.structureOutputDescriptor = outputMD;
5105 	args.structureOutputDescriptorSize = ((typeof(args.structureOutputDescriptorSize)) * ool_output_size);
5106 
5107 	IOStatisticsClientCall();
5108 	ret = kIOReturnSuccess;
5109 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5110 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5111 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_async_method, selector);
5112 	}
5113 	if (kIOReturnSuccess == ret) {
5114 		ret = client->callExternalMethod( selector, &args );
5115 	}
5116 
5117 	*scalar_outputCnt = args.scalarOutputCount;
5118 	*inband_outputCnt = args.structureOutputSize;
5119 	*ool_output_size  = args.structureOutputDescriptorSize;
5120 
5121 	if (inputMD) {
5122 		inputMD->release();
5123 	}
5124 	if (outputMD) {
5125 		outputMD->release();
5126 	}
5127 
5128 	return ret;
5129 }
5130 
5131 /* Routine io_connect_method_scalarI_scalarO */
5132 kern_return_t
is_io_connect_method_scalarI_scalarO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5133 is_io_connect_method_scalarI_scalarO(
5134 	io_object_t        connect,
5135 	uint32_t           index,
5136 	io_scalar_inband_t       input,
5137 	mach_msg_type_number_t   inputCount,
5138 	io_scalar_inband_t       output,
5139 	mach_msg_type_number_t * outputCount )
5140 {
5141 	IOReturn err;
5142 	uint32_t i;
5143 	io_scalar_inband64_t _input;
5144 	io_scalar_inband64_t _output;
5145 
5146 	mach_msg_type_number_t struct_outputCnt = 0;
5147 	mach_vm_size_t ool_output_size = 0;
5148 
5149 	bzero(&_output[0], sizeof(_output));
5150 	for (i = 0; i < inputCount; i++) {
5151 		_input[i] = SCALAR64(input[i]);
5152 	}
5153 
5154 	err = is_io_connect_method(connect, index,
5155 	    _input, inputCount,
5156 	    NULL, 0,
5157 	    0, 0,
5158 	    NULL, &struct_outputCnt,
5159 	    _output, outputCount,
5160 	    0, &ool_output_size);
5161 
5162 	for (i = 0; i < *outputCount; i++) {
5163 		output[i] = SCALAR32(_output[i]);
5164 	}
5165 
5166 	return err;
5167 }
5168 
5169 kern_return_t
shim_io_connect_method_scalarI_scalarO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5170 shim_io_connect_method_scalarI_scalarO(
5171 	IOExternalMethod *      method,
5172 	IOService *             object,
5173 	const io_user_scalar_t * input,
5174 	mach_msg_type_number_t   inputCount,
5175 	io_user_scalar_t * output,
5176 	mach_msg_type_number_t * outputCount )
5177 {
5178 	IOMethod            func;
5179 	io_scalar_inband_t  _output;
5180 	IOReturn            err;
5181 	err = kIOReturnBadArgument;
5182 
5183 	bzero(&_output[0], sizeof(_output));
5184 	do {
5185 		if (inputCount != method->count0) {
5186 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5187 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5188 			continue;
5189 		}
5190 		if (*outputCount != method->count1) {
5191 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5192 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5193 			continue;
5194 		}
5195 
5196 		func = method->func;
5197 
5198 		switch (inputCount) {
5199 		case 6:
5200 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5201 			    ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5202 			break;
5203 		case 5:
5204 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5205 			    ARG32(input[3]), ARG32(input[4]),
5206 			    &_output[0] );
5207 			break;
5208 		case 4:
5209 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5210 			    ARG32(input[3]),
5211 			    &_output[0], &_output[1] );
5212 			break;
5213 		case 3:
5214 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5215 			    &_output[0], &_output[1], &_output[2] );
5216 			break;
5217 		case 2:
5218 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]),
5219 			    &_output[0], &_output[1], &_output[2],
5220 			    &_output[3] );
5221 			break;
5222 		case 1:
5223 			err = (object->*func)(  ARG32(input[0]),
5224 			    &_output[0], &_output[1], &_output[2],
5225 			    &_output[3], &_output[4] );
5226 			break;
5227 		case 0:
5228 			err = (object->*func)(  &_output[0], &_output[1], &_output[2],
5229 			    &_output[3], &_output[4], &_output[5] );
5230 			break;
5231 
5232 		default:
5233 			IOLog("%s: Bad method table\n", object->getName());
5234 		}
5235 	}while (false);
5236 
5237 	uint32_t i;
5238 	for (i = 0; i < *outputCount; i++) {
5239 		output[i] = SCALAR32(_output[i]);
5240 	}
5241 
5242 	return err;
5243 }
5244 
5245 /* Routine io_async_method_scalarI_scalarO */
5246 kern_return_t
is_io_async_method_scalarI_scalarO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5247 is_io_async_method_scalarI_scalarO(
5248 	io_object_t        connect,
5249 	mach_port_t wake_port,
5250 	io_async_ref_t reference,
5251 	mach_msg_type_number_t referenceCnt,
5252 	uint32_t           index,
5253 	io_scalar_inband_t       input,
5254 	mach_msg_type_number_t   inputCount,
5255 	io_scalar_inband_t       output,
5256 	mach_msg_type_number_t * outputCount )
5257 {
5258 	IOReturn err;
5259 	uint32_t i;
5260 	io_scalar_inband64_t _input;
5261 	io_scalar_inband64_t _output;
5262 	io_async_ref64_t _reference;
5263 
5264 	if (referenceCnt > ASYNC_REF64_COUNT) {
5265 		return kIOReturnBadArgument;
5266 	}
5267 	bzero(&_output[0], sizeof(_output));
5268 	for (i = 0; i < referenceCnt; i++) {
5269 		_reference[i] = REF64(reference[i]);
5270 	}
5271 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5272 
5273 	mach_msg_type_number_t struct_outputCnt = 0;
5274 	mach_vm_size_t ool_output_size = 0;
5275 
5276 	for (i = 0; i < inputCount; i++) {
5277 		_input[i] = SCALAR64(input[i]);
5278 	}
5279 
5280 	err = is_io_connect_async_method(connect,
5281 	    wake_port, _reference, referenceCnt,
5282 	    index,
5283 	    _input, inputCount,
5284 	    NULL, 0,
5285 	    0, 0,
5286 	    NULL, &struct_outputCnt,
5287 	    _output, outputCount,
5288 	    0, &ool_output_size);
5289 
5290 	for (i = 0; i < *outputCount; i++) {
5291 		output[i] = SCALAR32(_output[i]);
5292 	}
5293 
5294 	return err;
5295 }
5296 /* Routine io_async_method_scalarI_structureO */
5297 kern_return_t
is_io_async_method_scalarI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5298 is_io_async_method_scalarI_structureO(
5299 	io_object_t     connect,
5300 	mach_port_t wake_port,
5301 	io_async_ref_t reference,
5302 	mach_msg_type_number_t referenceCnt,
5303 	uint32_t        index,
5304 	io_scalar_inband_t input,
5305 	mach_msg_type_number_t  inputCount,
5306 	io_struct_inband_t              output,
5307 	mach_msg_type_number_t *        outputCount )
5308 {
5309 	uint32_t i;
5310 	io_scalar_inband64_t _input;
5311 	io_async_ref64_t _reference;
5312 
5313 	if (referenceCnt > ASYNC_REF64_COUNT) {
5314 		return kIOReturnBadArgument;
5315 	}
5316 	for (i = 0; i < referenceCnt; i++) {
5317 		_reference[i] = REF64(reference[i]);
5318 	}
5319 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5320 
5321 	mach_msg_type_number_t scalar_outputCnt = 0;
5322 	mach_vm_size_t ool_output_size = 0;
5323 
5324 	for (i = 0; i < inputCount; i++) {
5325 		_input[i] = SCALAR64(input[i]);
5326 	}
5327 
5328 	return is_io_connect_async_method(connect,
5329 	           wake_port, _reference, referenceCnt,
5330 	           index,
5331 	           _input, inputCount,
5332 	           NULL, 0,
5333 	           0, 0,
5334 	           output, outputCount,
5335 	           NULL, &scalar_outputCnt,
5336 	           0, &ool_output_size);
5337 }
5338 
5339 /* Routine io_async_method_scalarI_structureI */
5340 kern_return_t
is_io_async_method_scalarI_structureI(io_connect_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5341 is_io_async_method_scalarI_structureI(
5342 	io_connect_t            connect,
5343 	mach_port_t wake_port,
5344 	io_async_ref_t reference,
5345 	mach_msg_type_number_t referenceCnt,
5346 	uint32_t                index,
5347 	io_scalar_inband_t      input,
5348 	mach_msg_type_number_t  inputCount,
5349 	io_struct_inband_t      inputStruct,
5350 	mach_msg_type_number_t  inputStructCount )
5351 {
5352 	uint32_t i;
5353 	io_scalar_inband64_t _input;
5354 	io_async_ref64_t _reference;
5355 
5356 	if (referenceCnt > ASYNC_REF64_COUNT) {
5357 		return kIOReturnBadArgument;
5358 	}
5359 	for (i = 0; i < referenceCnt; i++) {
5360 		_reference[i] = REF64(reference[i]);
5361 	}
5362 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5363 
5364 	mach_msg_type_number_t scalar_outputCnt = 0;
5365 	mach_msg_type_number_t inband_outputCnt = 0;
5366 	mach_vm_size_t ool_output_size = 0;
5367 
5368 	for (i = 0; i < inputCount; i++) {
5369 		_input[i] = SCALAR64(input[i]);
5370 	}
5371 
5372 	return is_io_connect_async_method(connect,
5373 	           wake_port, _reference, referenceCnt,
5374 	           index,
5375 	           _input, inputCount,
5376 	           inputStruct, inputStructCount,
5377 	           0, 0,
5378 	           NULL, &inband_outputCnt,
5379 	           NULL, &scalar_outputCnt,
5380 	           0, &ool_output_size);
5381 }
5382 
5383 /* Routine io_async_method_structureI_structureO */
5384 kern_return_t
is_io_async_method_structureI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5385 is_io_async_method_structureI_structureO(
5386 	io_object_t     connect,
5387 	mach_port_t wake_port,
5388 	io_async_ref_t reference,
5389 	mach_msg_type_number_t referenceCnt,
5390 	uint32_t        index,
5391 	io_struct_inband_t              input,
5392 	mach_msg_type_number_t  inputCount,
5393 	io_struct_inband_t              output,
5394 	mach_msg_type_number_t *        outputCount )
5395 {
5396 	uint32_t i;
5397 	mach_msg_type_number_t scalar_outputCnt = 0;
5398 	mach_vm_size_t ool_output_size = 0;
5399 	io_async_ref64_t _reference;
5400 
5401 	if (referenceCnt > ASYNC_REF64_COUNT) {
5402 		return kIOReturnBadArgument;
5403 	}
5404 	for (i = 0; i < referenceCnt; i++) {
5405 		_reference[i] = REF64(reference[i]);
5406 	}
5407 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5408 
5409 	return is_io_connect_async_method(connect,
5410 	           wake_port, _reference, referenceCnt,
5411 	           index,
5412 	           NULL, 0,
5413 	           input, inputCount,
5414 	           0, 0,
5415 	           output, outputCount,
5416 	           NULL, &scalar_outputCnt,
5417 	           0, &ool_output_size);
5418 }
5419 
5420 
5421 kern_return_t
shim_io_async_method_scalarI_scalarO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5422 shim_io_async_method_scalarI_scalarO(
5423 	IOExternalAsyncMethod * method,
5424 	IOService *             object,
5425 	mach_port_t             asyncWakePort,
5426 	io_user_reference_t *   asyncReference,
5427 	uint32_t                asyncReferenceCount,
5428 	const io_user_scalar_t * input,
5429 	mach_msg_type_number_t   inputCount,
5430 	io_user_scalar_t * output,
5431 	mach_msg_type_number_t * outputCount )
5432 {
5433 	IOAsyncMethod       func;
5434 	uint32_t            i;
5435 	io_scalar_inband_t  _output;
5436 	IOReturn            err;
5437 	io_async_ref_t      reference;
5438 
5439 	bzero(&_output[0], sizeof(_output));
5440 	for (i = 0; i < asyncReferenceCount; i++) {
5441 		reference[i] = REF32(asyncReference[i]);
5442 	}
5443 
5444 	err = kIOReturnBadArgument;
5445 
5446 	do {
5447 		if (inputCount != method->count0) {
5448 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5449 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5450 			continue;
5451 		}
5452 		if (*outputCount != method->count1) {
5453 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5454 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5455 			continue;
5456 		}
5457 
5458 		func = method->func;
5459 
5460 		switch (inputCount) {
5461 		case 6:
5462 			err = (object->*func)(  reference,
5463 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5464 			    ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5465 			break;
5466 		case 5:
5467 			err = (object->*func)(  reference,
5468 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5469 			    ARG32(input[3]), ARG32(input[4]),
5470 			    &_output[0] );
5471 			break;
5472 		case 4:
5473 			err = (object->*func)(  reference,
5474 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5475 			    ARG32(input[3]),
5476 			    &_output[0], &_output[1] );
5477 			break;
5478 		case 3:
5479 			err = (object->*func)(  reference,
5480 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5481 			    &_output[0], &_output[1], &_output[2] );
5482 			break;
5483 		case 2:
5484 			err = (object->*func)(  reference,
5485 			    ARG32(input[0]), ARG32(input[1]),
5486 			    &_output[0], &_output[1], &_output[2],
5487 			    &_output[3] );
5488 			break;
5489 		case 1:
5490 			err = (object->*func)(  reference,
5491 			    ARG32(input[0]),
5492 			    &_output[0], &_output[1], &_output[2],
5493 			    &_output[3], &_output[4] );
5494 			break;
5495 		case 0:
5496 			err = (object->*func)(  reference,
5497 			    &_output[0], &_output[1], &_output[2],
5498 			    &_output[3], &_output[4], &_output[5] );
5499 			break;
5500 
5501 		default:
5502 			IOLog("%s: Bad method table\n", object->getName());
5503 		}
5504 	}while (false);
5505 
5506 	for (i = 0; i < *outputCount; i++) {
5507 		output[i] = SCALAR32(_output[i]);
5508 	}
5509 
5510 	return err;
5511 }
5512 
5513 
5514 /* Routine io_connect_method_scalarI_structureO */
5515 kern_return_t
is_io_connect_method_scalarI_structureO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5516 is_io_connect_method_scalarI_structureO(
5517 	io_object_t     connect,
5518 	uint32_t        index,
5519 	io_scalar_inband_t input,
5520 	mach_msg_type_number_t  inputCount,
5521 	io_struct_inband_t              output,
5522 	mach_msg_type_number_t *        outputCount )
5523 {
5524 	uint32_t i;
5525 	io_scalar_inband64_t _input;
5526 
5527 	mach_msg_type_number_t scalar_outputCnt = 0;
5528 	mach_vm_size_t ool_output_size = 0;
5529 
5530 	for (i = 0; i < inputCount; i++) {
5531 		_input[i] = SCALAR64(input[i]);
5532 	}
5533 
5534 	return is_io_connect_method(connect, index,
5535 	           _input, inputCount,
5536 	           NULL, 0,
5537 	           0, 0,
5538 	           output, outputCount,
5539 	           NULL, &scalar_outputCnt,
5540 	           0, &ool_output_size);
5541 }
5542 
5543 kern_return_t
shim_io_connect_method_scalarI_structureO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5544 shim_io_connect_method_scalarI_structureO(
5545 
5546 	IOExternalMethod *      method,
5547 	IOService *             object,
5548 	const io_user_scalar_t * input,
5549 	mach_msg_type_number_t  inputCount,
5550 	io_struct_inband_t              output,
5551 	IOByteCount *   outputCount )
5552 {
5553 	IOMethod            func;
5554 	IOReturn            err;
5555 
5556 	err = kIOReturnBadArgument;
5557 
5558 	do {
5559 		if (inputCount != method->count0) {
5560 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5561 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5562 			continue;
5563 		}
5564 		if ((kIOUCVariableStructureSize != method->count1)
5565 		    && (*outputCount != method->count1)) {
5566 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5567 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5568 			continue;
5569 		}
5570 
5571 		func = method->func;
5572 
5573 		switch (inputCount) {
5574 		case 5:
5575 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5576 			    ARG32(input[3]), ARG32(input[4]),
5577 			    output );
5578 			break;
5579 		case 4:
5580 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5581 			    ARG32(input[3]),
5582 			    output, (void *)outputCount );
5583 			break;
5584 		case 3:
5585 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5586 			    output, (void *)outputCount, NULL );
5587 			break;
5588 		case 2:
5589 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]),
5590 			    output, (void *)outputCount, NULL, NULL );
5591 			break;
5592 		case 1:
5593 			err = (object->*func)(  ARG32(input[0]),
5594 			    output, (void *)outputCount, NULL, NULL, NULL );
5595 			break;
5596 		case 0:
5597 			err = (object->*func)(  output, (void *)outputCount, NULL, NULL, NULL, NULL );
5598 			break;
5599 
5600 		default:
5601 			IOLog("%s: Bad method table\n", object->getName());
5602 		}
5603 	}while (false);
5604 
5605 	return err;
5606 }
5607 
5608 
5609 kern_return_t
shim_io_async_method_scalarI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5610 shim_io_async_method_scalarI_structureO(
5611 	IOExternalAsyncMethod * method,
5612 	IOService *             object,
5613 	mach_port_t             asyncWakePort,
5614 	io_user_reference_t *   asyncReference,
5615 	uint32_t                asyncReferenceCount,
5616 	const io_user_scalar_t * input,
5617 	mach_msg_type_number_t  inputCount,
5618 	io_struct_inband_t              output,
5619 	mach_msg_type_number_t *        outputCount )
5620 {
5621 	IOAsyncMethod       func;
5622 	uint32_t            i;
5623 	IOReturn            err;
5624 	io_async_ref_t      reference;
5625 
5626 	for (i = 0; i < asyncReferenceCount; i++) {
5627 		reference[i] = REF32(asyncReference[i]);
5628 	}
5629 
5630 	err = kIOReturnBadArgument;
5631 	do {
5632 		if (inputCount != method->count0) {
5633 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5634 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5635 			continue;
5636 		}
5637 		if ((kIOUCVariableStructureSize != method->count1)
5638 		    && (*outputCount != method->count1)) {
5639 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5640 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5641 			continue;
5642 		}
5643 
5644 		func = method->func;
5645 
5646 		switch (inputCount) {
5647 		case 5:
5648 			err = (object->*func)(  reference,
5649 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5650 			    ARG32(input[3]), ARG32(input[4]),
5651 			    output );
5652 			break;
5653 		case 4:
5654 			err = (object->*func)(  reference,
5655 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5656 			    ARG32(input[3]),
5657 			    output, (void *)outputCount );
5658 			break;
5659 		case 3:
5660 			err = (object->*func)(  reference,
5661 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5662 			    output, (void *)outputCount, NULL );
5663 			break;
5664 		case 2:
5665 			err = (object->*func)(  reference,
5666 			    ARG32(input[0]), ARG32(input[1]),
5667 			    output, (void *)outputCount, NULL, NULL );
5668 			break;
5669 		case 1:
5670 			err = (object->*func)(  reference,
5671 			    ARG32(input[0]),
5672 			    output, (void *)outputCount, NULL, NULL, NULL );
5673 			break;
5674 		case 0:
5675 			err = (object->*func)(  reference,
5676 			    output, (void *)outputCount, NULL, NULL, NULL, NULL );
5677 			break;
5678 
5679 		default:
5680 			IOLog("%s: Bad method table\n", object->getName());
5681 		}
5682 	}while (false);
5683 
5684 	return err;
5685 }
5686 
5687 /* Routine io_connect_method_scalarI_structureI */
5688 kern_return_t
is_io_connect_method_scalarI_structureI(io_connect_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5689 is_io_connect_method_scalarI_structureI(
5690 	io_connect_t            connect,
5691 	uint32_t                index,
5692 	io_scalar_inband_t      input,
5693 	mach_msg_type_number_t  inputCount,
5694 	io_struct_inband_t      inputStruct,
5695 	mach_msg_type_number_t  inputStructCount )
5696 {
5697 	uint32_t i;
5698 	io_scalar_inband64_t _input;
5699 
5700 	mach_msg_type_number_t scalar_outputCnt = 0;
5701 	mach_msg_type_number_t inband_outputCnt = 0;
5702 	mach_vm_size_t ool_output_size = 0;
5703 
5704 	for (i = 0; i < inputCount; i++) {
5705 		_input[i] = SCALAR64(input[i]);
5706 	}
5707 
5708 	return is_io_connect_method(connect, index,
5709 	           _input, inputCount,
5710 	           inputStruct, inputStructCount,
5711 	           0, 0,
5712 	           NULL, &inband_outputCnt,
5713 	           NULL, &scalar_outputCnt,
5714 	           0, &ool_output_size);
5715 }
5716 
5717 kern_return_t
shim_io_connect_method_scalarI_structureI(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5718 shim_io_connect_method_scalarI_structureI(
5719 	IOExternalMethod *  method,
5720 	IOService *         object,
5721 	const io_user_scalar_t * input,
5722 	mach_msg_type_number_t  inputCount,
5723 	io_struct_inband_t              inputStruct,
5724 	mach_msg_type_number_t  inputStructCount )
5725 {
5726 	IOMethod            func;
5727 	IOReturn            err = kIOReturnBadArgument;
5728 
5729 	do{
5730 		if (inputCount != method->count0) {
5731 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5732 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5733 			continue;
5734 		}
5735 		if ((kIOUCVariableStructureSize != method->count1)
5736 		    && (inputStructCount != method->count1)) {
5737 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5738 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5739 			continue;
5740 		}
5741 
5742 		func = method->func;
5743 
5744 		switch (inputCount) {
5745 		case 5:
5746 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5747 			    ARG32(input[3]), ARG32(input[4]),
5748 			    inputStruct );
5749 			break;
5750 		case 4:
5751 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *)  input[2],
5752 			    ARG32(input[3]),
5753 			    inputStruct, (void *)(uintptr_t)inputStructCount );
5754 			break;
5755 		case 3:
5756 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5757 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5758 			    NULL );
5759 			break;
5760 		case 2:
5761 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5762 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5763 			    NULL, NULL );
5764 			break;
5765 		case 1:
5766 			err = (object->*func)( ARG32(input[0]),
5767 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5768 			    NULL, NULL, NULL );
5769 			break;
5770 		case 0:
5771 			err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5772 			    NULL, NULL, NULL, NULL );
5773 			break;
5774 
5775 		default:
5776 			IOLog("%s: Bad method table\n", object->getName());
5777 		}
5778 	}while (false);
5779 
5780 	return err;
5781 }
5782 
5783 kern_return_t
shim_io_async_method_scalarI_structureI(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5784 shim_io_async_method_scalarI_structureI(
5785 	IOExternalAsyncMethod * method,
5786 	IOService *             object,
5787 	mach_port_t             asyncWakePort,
5788 	io_user_reference_t *   asyncReference,
5789 	uint32_t                asyncReferenceCount,
5790 	const io_user_scalar_t * input,
5791 	mach_msg_type_number_t  inputCount,
5792 	io_struct_inband_t              inputStruct,
5793 	mach_msg_type_number_t  inputStructCount )
5794 {
5795 	IOAsyncMethod       func;
5796 	uint32_t            i;
5797 	IOReturn            err = kIOReturnBadArgument;
5798 	io_async_ref_t      reference;
5799 
5800 	for (i = 0; i < asyncReferenceCount; i++) {
5801 		reference[i] = REF32(asyncReference[i]);
5802 	}
5803 
5804 	do{
5805 		if (inputCount != method->count0) {
5806 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5807 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5808 			continue;
5809 		}
5810 		if ((kIOUCVariableStructureSize != method->count1)
5811 		    && (inputStructCount != method->count1)) {
5812 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5813 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5814 			continue;
5815 		}
5816 
5817 		func = method->func;
5818 
5819 		switch (inputCount) {
5820 		case 5:
5821 			err = (object->*func)(  reference,
5822 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5823 			    ARG32(input[3]), ARG32(input[4]),
5824 			    inputStruct );
5825 			break;
5826 		case 4:
5827 			err = (object->*func)(  reference,
5828 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5829 			    ARG32(input[3]),
5830 			    inputStruct, (void *)(uintptr_t)inputStructCount );
5831 			break;
5832 		case 3:
5833 			err = (object->*func)(  reference,
5834 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5835 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5836 			    NULL );
5837 			break;
5838 		case 2:
5839 			err = (object->*func)(  reference,
5840 			    ARG32(input[0]), ARG32(input[1]),
5841 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5842 			    NULL, NULL );
5843 			break;
5844 		case 1:
5845 			err = (object->*func)(  reference,
5846 			    ARG32(input[0]),
5847 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5848 			    NULL, NULL, NULL );
5849 			break;
5850 		case 0:
5851 			err = (object->*func)(  reference,
5852 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5853 			    NULL, NULL, NULL, NULL );
5854 			break;
5855 
5856 		default:
5857 			IOLog("%s: Bad method table\n", object->getName());
5858 		}
5859 	}while (false);
5860 
5861 	return err;
5862 }
5863 
5864 /* Routine io_connect_method_structureI_structureO */
5865 kern_return_t
is_io_connect_method_structureI_structureO(io_object_t connect,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5866 is_io_connect_method_structureI_structureO(
5867 	io_object_t     connect,
5868 	uint32_t        index,
5869 	io_struct_inband_t              input,
5870 	mach_msg_type_number_t  inputCount,
5871 	io_struct_inband_t              output,
5872 	mach_msg_type_number_t *        outputCount )
5873 {
5874 	mach_msg_type_number_t scalar_outputCnt = 0;
5875 	mach_vm_size_t ool_output_size = 0;
5876 
5877 	return is_io_connect_method(connect, index,
5878 	           NULL, 0,
5879 	           input, inputCount,
5880 	           0, 0,
5881 	           output, outputCount,
5882 	           NULL, &scalar_outputCnt,
5883 	           0, &ool_output_size);
5884 }
5885 
5886 kern_return_t
shim_io_connect_method_structureI_structureO(IOExternalMethod * method,IOService * object,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5887 shim_io_connect_method_structureI_structureO(
5888 	IOExternalMethod *  method,
5889 	IOService *         object,
5890 	io_struct_inband_t              input,
5891 	mach_msg_type_number_t  inputCount,
5892 	io_struct_inband_t              output,
5893 	IOByteCount *   outputCount )
5894 {
5895 	IOMethod            func;
5896 	IOReturn            err = kIOReturnBadArgument;
5897 
5898 	do{
5899 		if ((kIOUCVariableStructureSize != method->count0)
5900 		    && (inputCount != method->count0)) {
5901 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5902 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5903 			continue;
5904 		}
5905 		if ((kIOUCVariableStructureSize != method->count1)
5906 		    && (*outputCount != method->count1)) {
5907 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5908 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5909 			continue;
5910 		}
5911 
5912 		func = method->func;
5913 
5914 		if (method->count1) {
5915 			if (method->count0) {
5916 				err = (object->*func)( input, output,
5917 				    (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5918 			} else {
5919 				err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5920 			}
5921 		} else {
5922 			err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5923 		}
5924 	}while (false);
5925 
5926 
5927 	return err;
5928 }
5929 
5930 kern_return_t
shim_io_async_method_structureI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5931 shim_io_async_method_structureI_structureO(
5932 	IOExternalAsyncMethod * method,
5933 	IOService *             object,
5934 	mach_port_t           asyncWakePort,
5935 	io_user_reference_t * asyncReference,
5936 	uint32_t              asyncReferenceCount,
5937 	io_struct_inband_t              input,
5938 	mach_msg_type_number_t  inputCount,
5939 	io_struct_inband_t              output,
5940 	mach_msg_type_number_t *        outputCount )
5941 {
5942 	IOAsyncMethod       func;
5943 	uint32_t            i;
5944 	IOReturn            err;
5945 	io_async_ref_t      reference;
5946 
5947 	for (i = 0; i < asyncReferenceCount; i++) {
5948 		reference[i] = REF32(asyncReference[i]);
5949 	}
5950 
5951 	err = kIOReturnBadArgument;
5952 	do{
5953 		if ((kIOUCVariableStructureSize != method->count0)
5954 		    && (inputCount != method->count0)) {
5955 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5956 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5957 			continue;
5958 		}
5959 		if ((kIOUCVariableStructureSize != method->count1)
5960 		    && (*outputCount != method->count1)) {
5961 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5962 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5963 			continue;
5964 		}
5965 
5966 		func = method->func;
5967 
5968 		if (method->count1) {
5969 			if (method->count0) {
5970 				err = (object->*func)( reference,
5971 				    input, output,
5972 				    (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5973 			} else {
5974 				err = (object->*func)( reference,
5975 				    output, outputCount, NULL, NULL, NULL, NULL );
5976 			}
5977 		} else {
5978 			err = (object->*func)( reference,
5979 			    input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5980 		}
5981 	}while (false);
5982 
5983 	return err;
5984 }
5985 
5986 /* Routine io_catalog_send_data */
5987 kern_return_t
is_io_catalog_send_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t inData,mach_msg_type_number_t inDataCount,kern_return_t * result)5988 is_io_catalog_send_data(
5989 	mach_port_t             main_port,
5990 	uint32_t                flag,
5991 	io_buf_ptr_t            inData,
5992 	mach_msg_type_number_t  inDataCount,
5993 	kern_return_t *         result)
5994 {
5995 	// Allow sending catalog data if there is no kextd and the kernel is DEVELOPMENT || DEBUG
5996 #if NO_KEXTD && !(DEVELOPMENT || DEBUG)
5997 	return kIOReturnNotPrivileged;
5998 #else /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
5999 	OSObject * obj = NULL;
6000 	vm_offset_t data;
6001 	kern_return_t kr = kIOReturnError;
6002 
6003 	//printf("io_catalog_send_data called. flag: %d\n", flag);
6004 
6005 	if (main_port != main_device_port) {
6006 		return kIOReturnNotPrivileged;
6007 	}
6008 
6009 	if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
6010 	    flag != kIOCatalogKextdActive &&
6011 	    flag != kIOCatalogKextdFinishedLaunching) &&
6012 	    (!inData || !inDataCount)) {
6013 		return kIOReturnBadArgument;
6014 	}
6015 
6016 	if (!IOCurrentTaskHasEntitlement(kIOCatalogManagementEntitlement)) {
6017 		OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
6018 		IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
6019 		OSSafeReleaseNULL(taskName);
6020 		// For now, fake success to not break applications relying on this function succeeding.
6021 		// See <rdar://problem/32554970> for more details.
6022 		return kIOReturnSuccess;
6023 	}
6024 
6025 	if (inData) {
6026 		vm_map_offset_t map_data;
6027 
6028 		if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
6029 			return kIOReturnMessageTooLarge;
6030 		}
6031 
6032 		kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
6033 		data = CAST_DOWN(vm_offset_t, map_data);
6034 
6035 		if (kr != KERN_SUCCESS) {
6036 			return kr;
6037 		}
6038 
6039 		// must return success after vm_map_copyout() succeeds
6040 
6041 		if (inDataCount) {
6042 			obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
6043 			vm_deallocate( kernel_map, data, inDataCount );
6044 			if (!obj) {
6045 				*result = kIOReturnNoMemory;
6046 				return KERN_SUCCESS;
6047 			}
6048 		}
6049 	}
6050 
6051 	switch (flag) {
6052 	case kIOCatalogResetDrivers:
6053 	case kIOCatalogResetDriversNoMatch: {
6054 		OSArray * array;
6055 
6056 		array = OSDynamicCast(OSArray, obj);
6057 		if (array) {
6058 			if (!gIOCatalogue->resetAndAddDrivers(array,
6059 			    flag == kIOCatalogResetDrivers)) {
6060 				kr = kIOReturnError;
6061 			}
6062 		} else {
6063 			kr = kIOReturnBadArgument;
6064 		}
6065 	}
6066 	break;
6067 
6068 	case kIOCatalogAddDrivers:
6069 	case kIOCatalogAddDriversNoMatch: {
6070 		OSArray * array;
6071 
6072 		array = OSDynamicCast(OSArray, obj);
6073 		if (array) {
6074 			if (!gIOCatalogue->addDrivers( array,
6075 			    flag == kIOCatalogAddDrivers)) {
6076 				kr = kIOReturnError;
6077 			}
6078 		} else {
6079 			kr = kIOReturnBadArgument;
6080 		}
6081 	}
6082 	break;
6083 
6084 	case kIOCatalogRemoveDrivers:
6085 	case kIOCatalogRemoveDriversNoMatch: {
6086 		OSDictionary * dict;
6087 
6088 		dict = OSDynamicCast(OSDictionary, obj);
6089 		if (dict) {
6090 			if (!gIOCatalogue->removeDrivers( dict,
6091 			    flag == kIOCatalogRemoveDrivers )) {
6092 				kr = kIOReturnError;
6093 			}
6094 		} else {
6095 			kr = kIOReturnBadArgument;
6096 		}
6097 	}
6098 	break;
6099 
6100 	case kIOCatalogStartMatching__Removed:
6101 	case kIOCatalogRemoveKernelLinker__Removed:
6102 	case kIOCatalogKextdActive:
6103 	case kIOCatalogKextdFinishedLaunching:
6104 		kr = KERN_NOT_SUPPORTED;
6105 		break;
6106 
6107 	default:
6108 		kr = kIOReturnBadArgument;
6109 		break;
6110 	}
6111 
6112 	if (obj) {
6113 		obj->release();
6114 	}
6115 
6116 	*result = kr;
6117 	return KERN_SUCCESS;
6118 #endif /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6119 }
6120 
6121 /* Routine io_catalog_terminate */
6122 kern_return_t
is_io_catalog_terminate(mach_port_t main_port,uint32_t flag,io_name_t name)6123 is_io_catalog_terminate(
6124 	mach_port_t main_port,
6125 	uint32_t flag,
6126 	io_name_t name )
6127 {
6128 	kern_return_t          kr;
6129 
6130 	if (main_port != main_device_port) {
6131 		return kIOReturnNotPrivileged;
6132 	}
6133 
6134 	kr = IOUserClient::clientHasPrivilege((void *) current_task(),
6135 	    kIOClientPrivilegeAdministrator );
6136 	if (kIOReturnSuccess != kr) {
6137 		return kr;
6138 	}
6139 
6140 	switch (flag) {
6141 #if !defined(SECURE_KERNEL)
6142 	case kIOCatalogServiceTerminate:
6143 		kr = gIOCatalogue->terminateDrivers(NULL, name, false);
6144 		break;
6145 
6146 	case kIOCatalogModuleUnload:
6147 	case kIOCatalogModuleTerminate:
6148 		kr = gIOCatalogue->terminateDriversForModule(name,
6149 		    flag == kIOCatalogModuleUnload);
6150 		break;
6151 #endif
6152 
6153 	default:
6154 		kr = kIOReturnBadArgument;
6155 		break;
6156 	}
6157 
6158 	return kr;
6159 }
6160 
6161 /* Routine io_catalog_get_data */
6162 kern_return_t
is_io_catalog_get_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t * outData,mach_msg_type_number_t * outDataCount)6163 is_io_catalog_get_data(
6164 	mach_port_t             main_port,
6165 	uint32_t                flag,
6166 	io_buf_ptr_t            *outData,
6167 	mach_msg_type_number_t  *outDataCount)
6168 {
6169 	kern_return_t kr = kIOReturnSuccess;
6170 	OSSerialize * s;
6171 
6172 	if (main_port != main_device_port) {
6173 		return kIOReturnNotPrivileged;
6174 	}
6175 
6176 	//printf("io_catalog_get_data called. flag: %d\n", flag);
6177 
6178 	s = OSSerialize::withCapacity(4096);
6179 	if (!s) {
6180 		return kIOReturnNoMemory;
6181 	}
6182 
6183 	kr = gIOCatalogue->serializeData(flag, s);
6184 
6185 	if (kr == kIOReturnSuccess) {
6186 		mach_vm_address_t data;
6187 		vm_map_copy_t copy;
6188 		unsigned int size;
6189 
6190 		size = s->getLength();
6191 		kr = mach_vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
6192 		if (kr == kIOReturnSuccess) {
6193 			bcopy(s->text(), (void *)data, size);
6194 			kr = vm_map_copyin(kernel_map, data, size, true, &copy);
6195 			*outData = (char *)copy;
6196 			*outDataCount = size;
6197 		}
6198 	}
6199 
6200 	s->release();
6201 
6202 	return kr;
6203 }
6204 
6205 /* Routine io_catalog_get_gen_count */
6206 kern_return_t
is_io_catalog_get_gen_count(mach_port_t main_port,uint32_t * genCount)6207 is_io_catalog_get_gen_count(
6208 	mach_port_t             main_port,
6209 	uint32_t                *genCount)
6210 {
6211 	if (main_port != main_device_port) {
6212 		return kIOReturnNotPrivileged;
6213 	}
6214 
6215 	//printf("io_catalog_get_gen_count called.\n");
6216 
6217 	if (!genCount) {
6218 		return kIOReturnBadArgument;
6219 	}
6220 
6221 	*genCount = gIOCatalogue->getGenerationCount();
6222 
6223 	return kIOReturnSuccess;
6224 }
6225 
6226 /* Routine io_catalog_module_loaded.
6227  * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
6228  */
6229 kern_return_t
is_io_catalog_module_loaded(mach_port_t main_port,io_name_t name)6230 is_io_catalog_module_loaded(
6231 	mach_port_t             main_port,
6232 	io_name_t               name)
6233 {
6234 	if (main_port != main_device_port) {
6235 		return kIOReturnNotPrivileged;
6236 	}
6237 
6238 	//printf("io_catalog_module_loaded called. name %s\n", name);
6239 
6240 	if (!name) {
6241 		return kIOReturnBadArgument;
6242 	}
6243 
6244 	gIOCatalogue->moduleHasLoaded(name);
6245 
6246 	return kIOReturnSuccess;
6247 }
6248 
6249 kern_return_t
is_io_catalog_reset(mach_port_t main_port,uint32_t flag)6250 is_io_catalog_reset(
6251 	mach_port_t             main_port,
6252 	uint32_t                flag)
6253 {
6254 	if (main_port != main_device_port) {
6255 		return kIOReturnNotPrivileged;
6256 	}
6257 
6258 	switch (flag) {
6259 	case kIOCatalogResetDefault:
6260 		gIOCatalogue->reset();
6261 		break;
6262 
6263 	default:
6264 		return kIOReturnBadArgument;
6265 	}
6266 
6267 	return kIOReturnSuccess;
6268 }
6269 
6270 kern_return_t
iokit_user_client_trap(struct iokit_user_client_trap_args * args)6271 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
6272 {
6273 	kern_return_t    result = kIOReturnBadArgument;
6274 	IOUserClient   * userClient;
6275 	OSObject       * object;
6276 	uintptr_t        ref;
6277 	mach_port_name_t portName;
6278 
6279 	ref     = (uintptr_t) args->userClientRef;
6280 
6281 	if ((ref == MACH_PORT_DEAD) || (ref == (uintptr_t) MACH_PORT_NULL)) {
6282 		return kIOReturnBadArgument;
6283 	}
6284 	// kobject port names always have b0-1 set, so we use these bits as flags to
6285 	// iokit_user_client_trap()
6286 	// keep this up to date with ipc_entry_name_mask();
6287 	portName = (mach_port_name_t) (ref | 3);
6288 	if (((1ULL << 32) & ref) || !(1 & ref)) {
6289 		object = iokit_lookup_uext_ref_current_task(portName);
6290 		if (object) {
6291 			result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6292 		}
6293 		OSSafeReleaseNULL(object);
6294 	} else {
6295 		io_object_t ref_current_task = iokit_lookup_connect_ref_current_task((mach_port_name_t) ref);
6296 		if ((userClient = OSDynamicCast(IOUserClient, ref_current_task))) {
6297 			IOExternalTrap *trap = NULL;
6298 			IOService *target = NULL;
6299 
6300 			result = kIOReturnSuccess;
6301 			io_filter_policy_t filterPolicy = userClient->filterForTask(current_task(), 0);
6302 			if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
6303 				result = gIOUCFilterCallbacks->io_filter_applier(userClient, filterPolicy, io_filter_type_trap, args->index);
6304 			}
6305 			if (kIOReturnSuccess == result) {
6306 				trap = userClient->getTargetAndTrapForIndex(&target, args->index);
6307 			}
6308 			if (trap && target) {
6309 				IOTrap func;
6310 
6311 				func = trap->func;
6312 
6313 				if (func) {
6314 					result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6315 				}
6316 			}
6317 
6318 			iokit_remove_connect_reference(userClient);
6319 		} else {
6320 			OSSafeReleaseNULL(ref_current_task);
6321 		}
6322 	}
6323 
6324 	return result;
6325 }
6326 
6327 /* Routine io_device_tree_entry_exists_with_name */
6328 kern_return_t
is_io_device_tree_entry_exists_with_name(mach_port_t main_port,io_name_t name,boolean_t * exists)6329 is_io_device_tree_entry_exists_with_name(
6330 	mach_port_t main_port,
6331 	io_name_t name,
6332 	boolean_t *exists )
6333 {
6334 	OSCollectionIterator *iter;
6335 
6336 	if (main_port != main_device_port) {
6337 		return kIOReturnNotPrivileged;
6338 	}
6339 
6340 	iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
6341 	*exists = iter && iter->getNextObject();
6342 	OSSafeReleaseNULL(iter);
6343 
6344 	return kIOReturnSuccess;
6345 }
6346 } /* extern "C" */
6347 
6348 IOReturn
callExternalMethod(uint32_t selector,IOExternalMethodArguments * args)6349 IOUserClient::callExternalMethod(uint32_t selector, IOExternalMethodArguments * args)
6350 {
6351 	IOReturn ret;
6352 
6353 	if (defaultLocking) {
6354 		if (defaultLockingSingleThreadExternalMethod) {
6355 			IORWLockWrite(lock);
6356 		} else {
6357 			IORWLockRead(lock);
6358 		}
6359 	}
6360 	if (uc2022) {
6361 		ret = ((IOUserClient2022 *) this)->externalMethod(selector, (IOExternalMethodArgumentsOpaque *) args);
6362 	} else {
6363 		ret = externalMethod(selector, args);
6364 	}
6365 	if (defaultLocking) {
6366 		IORWLockUnlock(lock);
6367 	}
6368 	return ret;
6369 }
6370 
6371 MIG_SERVER_ROUTINE IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * arguments,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6372 IOUserClient2022::externalMethod(uint32_t selector, IOExternalMethodArguments * arguments,
6373     IOExternalMethodDispatch *dispatch,
6374     OSObject *target, void *reference)
6375 {
6376 	panic("wrong externalMethod for IOUserClient2022");
6377 }
6378 
6379 IOReturn
dispatchExternalMethod(uint32_t selector,IOExternalMethodArgumentsOpaque * arguments,const IOExternalMethodDispatch2022 dispatchArray[],size_t dispatchArrayCount,OSObject * target,void * reference)6380 IOUserClient2022::dispatchExternalMethod(uint32_t selector, IOExternalMethodArgumentsOpaque *arguments,
6381     const IOExternalMethodDispatch2022 dispatchArray[], size_t dispatchArrayCount,
6382     OSObject * target, void * reference)
6383 {
6384 	IOReturn    err;
6385 	IOExternalMethodArguments * args = (typeof(args))arguments;
6386 	const IOExternalMethodDispatch2022 * dispatch;
6387 
6388 	if (!dispatchArray) {
6389 		return kIOReturnError;
6390 	}
6391 	if (selector >= dispatchArrayCount) {
6392 		return kIOReturnBadArgument;
6393 	}
6394 	dispatch = &dispatchArray[selector];
6395 
6396 	uint32_t count;
6397 	count = dispatch->checkScalarInputCount;
6398 	if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6399 		return kIOReturnBadArgument;
6400 	}
6401 
6402 	count = dispatch->checkStructureInputSize;
6403 	if ((kIOUCVariableStructureSize != count)
6404 	    && (count != ((args->structureInputDescriptor)
6405 	    ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6406 		return kIOReturnBadArgument;
6407 	}
6408 
6409 	count = dispatch->checkScalarOutputCount;
6410 	if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6411 		return kIOReturnBadArgument;
6412 	}
6413 
6414 	count = dispatch->checkStructureOutputSize;
6415 	if ((kIOUCVariableStructureSize != count)
6416 	    && (count != ((args->structureOutputDescriptor)
6417 	    ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6418 		return kIOReturnBadArgument;
6419 	}
6420 
6421 	if (args->asyncWakePort && !dispatch->allowAsync) {
6422 		return kIOReturnBadArgument;
6423 	}
6424 
6425 	if (dispatch->checkEntitlement) {
6426 		if (!IOCurrentTaskHasEntitlement(dispatch->checkEntitlement)) {
6427 			return kIOReturnNotPrivileged;
6428 		}
6429 	}
6430 
6431 	if (dispatch->function) {
6432 		err = (*dispatch->function)(target, reference, args);
6433 	} else {
6434 		err = kIOReturnNoCompletion; /* implementer can dispatch */
6435 	}
6436 	return err;
6437 }
6438 
6439 IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * args,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6440 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
6441     IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
6442 {
6443 	IOReturn    err;
6444 	IOService * object;
6445 	IOByteCount structureOutputSize;
6446 
6447 	if (dispatch) {
6448 		uint32_t count;
6449 		count = dispatch->checkScalarInputCount;
6450 		if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6451 			return kIOReturnBadArgument;
6452 		}
6453 
6454 		count = dispatch->checkStructureInputSize;
6455 		if ((kIOUCVariableStructureSize != count)
6456 		    && (count != ((args->structureInputDescriptor)
6457 		    ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6458 			return kIOReturnBadArgument;
6459 		}
6460 
6461 		count = dispatch->checkScalarOutputCount;
6462 		if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6463 			return kIOReturnBadArgument;
6464 		}
6465 
6466 		count = dispatch->checkStructureOutputSize;
6467 		if ((kIOUCVariableStructureSize != count)
6468 		    && (count != ((args->structureOutputDescriptor)
6469 		    ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6470 			return kIOReturnBadArgument;
6471 		}
6472 
6473 		if (dispatch->function) {
6474 			err = (*dispatch->function)(target, reference, args);
6475 		} else {
6476 			err = kIOReturnNoCompletion; /* implementer can dispatch */
6477 		}
6478 		return err;
6479 	}
6480 
6481 
6482 	// pre-Leopard API's don't do ool structs
6483 	if (args->structureInputDescriptor || args->structureOutputDescriptor) {
6484 		err = kIOReturnIPCError;
6485 		return err;
6486 	}
6487 
6488 	structureOutputSize = args->structureOutputSize;
6489 
6490 	if (args->asyncWakePort) {
6491 		IOExternalAsyncMethod * method;
6492 		object = NULL;
6493 		if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
6494 			return kIOReturnUnsupported;
6495 		}
6496 
6497 		if (kIOUCForegroundOnly & method->flags) {
6498 			if (task_is_gpu_denied(current_task())) {
6499 				return kIOReturnNotPermitted;
6500 			}
6501 		}
6502 
6503 		switch (method->flags & kIOUCTypeMask) {
6504 		case kIOUCScalarIStructI:
6505 			err = shim_io_async_method_scalarI_structureI( method, object,
6506 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6507 			    args->scalarInput, args->scalarInputCount,
6508 			    (char *)args->structureInput, args->structureInputSize );
6509 			break;
6510 
6511 		case kIOUCScalarIScalarO:
6512 			err = shim_io_async_method_scalarI_scalarO( method, object,
6513 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6514 			    args->scalarInput, args->scalarInputCount,
6515 			    args->scalarOutput, &args->scalarOutputCount );
6516 			break;
6517 
6518 		case kIOUCScalarIStructO:
6519 			err = shim_io_async_method_scalarI_structureO( method, object,
6520 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6521 			    args->scalarInput, args->scalarInputCount,
6522 			    (char *) args->structureOutput, &args->structureOutputSize );
6523 			break;
6524 
6525 
6526 		case kIOUCStructIStructO:
6527 			err = shim_io_async_method_structureI_structureO( method, object,
6528 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6529 			    (char *)args->structureInput, args->structureInputSize,
6530 			    (char *) args->structureOutput, &args->structureOutputSize );
6531 			break;
6532 
6533 		default:
6534 			err = kIOReturnBadArgument;
6535 			break;
6536 		}
6537 	} else {
6538 		IOExternalMethod *      method;
6539 		object = NULL;
6540 		if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
6541 			return kIOReturnUnsupported;
6542 		}
6543 
6544 		if (kIOUCForegroundOnly & method->flags) {
6545 			if (task_is_gpu_denied(current_task())) {
6546 				return kIOReturnNotPermitted;
6547 			}
6548 		}
6549 
6550 		switch (method->flags & kIOUCTypeMask) {
6551 		case kIOUCScalarIStructI:
6552 			err = shim_io_connect_method_scalarI_structureI( method, object,
6553 			    args->scalarInput, args->scalarInputCount,
6554 			    (char *) args->structureInput, args->structureInputSize );
6555 			break;
6556 
6557 		case kIOUCScalarIScalarO:
6558 			err = shim_io_connect_method_scalarI_scalarO( method, object,
6559 			    args->scalarInput, args->scalarInputCount,
6560 			    args->scalarOutput, &args->scalarOutputCount );
6561 			break;
6562 
6563 		case kIOUCScalarIStructO:
6564 			err = shim_io_connect_method_scalarI_structureO( method, object,
6565 			    args->scalarInput, args->scalarInputCount,
6566 			    (char *) args->structureOutput, &structureOutputSize );
6567 			break;
6568 
6569 
6570 		case kIOUCStructIStructO:
6571 			err = shim_io_connect_method_structureI_structureO( method, object,
6572 			    (char *) args->structureInput, args->structureInputSize,
6573 			    (char *) args->structureOutput, &structureOutputSize );
6574 			break;
6575 
6576 		default:
6577 			err = kIOReturnBadArgument;
6578 			break;
6579 		}
6580 	}
6581 
6582 	if (structureOutputSize > UINT_MAX) {
6583 		structureOutputSize = 0;
6584 		err = kIOReturnBadArgument;
6585 	}
6586 
6587 	args->structureOutputSize = ((typeof(args->structureOutputSize))structureOutputSize);
6588 
6589 	return err;
6590 }
6591 
6592 IOReturn
registerFilterCallbacks(const struct io_filter_callbacks * callbacks,size_t size)6593 IOUserClient::registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size)
6594 {
6595 	if (size < sizeof(*callbacks)) {
6596 		return kIOReturnBadArgument;
6597 	}
6598 	if (!OSCompareAndSwapPtr(NULL, __DECONST(void *, callbacks), &gIOUCFilterCallbacks)) {
6599 		return kIOReturnBusy;
6600 	}
6601 	return kIOReturnSuccess;
6602 }
6603 
6604 
6605 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6606 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6607 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6608 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6609 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6610 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6611 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6612 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6613 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6614 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6615 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6616 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6617 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6618 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6619 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6620 OSMetaClassDefineReservedUnused(IOUserClient, 15);
6621 
6622 OSMetaClassDefineReservedUnused(IOUserClient2022, 0);
6623 OSMetaClassDefineReservedUnused(IOUserClient2022, 1);
6624 OSMetaClassDefineReservedUnused(IOUserClient2022, 2);
6625 OSMetaClassDefineReservedUnused(IOUserClient2022, 3);
6626