xref: /xnu-10002.1.13/iokit/Kernel/IOUserClient.cpp (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <libkern/c++/OSKext.h>
30 #include <libkern/c++/OSSharedPtr.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51 #include <sys/code_signing.h>
52 
53 #include <mach/sdt.h>
54 #include <os/hash.h>
55 
56 #include <libkern/amfi/amfi.h>
57 
58 #if CONFIG_MACF
59 
60 extern "C" {
61 #include <security/mac_framework.h>
62 };
63 #include <sys/kauth.h>
64 
65 #define IOMACF_LOG 0
66 
67 #endif /* CONFIG_MACF */
68 
69 #include <IOKit/assert.h>
70 
71 #include "IOServicePrivate.h"
72 #include "IOKitKernelInternal.h"
73 
74 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
75 #define SCALAR32(x) ((uint32_t )x)
76 #define ARG32(x)    ((void *)(uintptr_t)SCALAR32(x))
77 #define REF64(x)    ((io_user_reference_t)((UInt64)(x)))
78 #define REF32(x)    ((int)(x))
79 
80 enum{
81 	kIOUCAsync0Flags          = 3ULL,
82 	kIOUCAsync64Flag          = 1ULL,
83 	kIOUCAsyncErrorLoggedFlag = 2ULL
84 };
85 
86 #if IOKITSTATS
87 
88 #define IOStatisticsRegisterCounter() \
89 do { \
90 	reserved->counter = IOStatistics::registerUserClient(this); \
91 } while (0)
92 
93 #define IOStatisticsUnregisterCounter() \
94 do { \
95 	if (reserved) \
96 	        IOStatistics::unregisterUserClient(reserved->counter); \
97 } while (0)
98 
99 #define IOStatisticsClientCall() \
100 do { \
101 	IOStatistics::countUserClientCall(client); \
102 } while (0)
103 
104 #else
105 
106 #define IOStatisticsRegisterCounter()
107 #define IOStatisticsUnregisterCounter()
108 #define IOStatisticsClientCall()
109 
110 #endif /* IOKITSTATS */
111 
112 #if DEVELOPMENT || DEBUG
113 
114 #define FAKE_STACK_FRAME(a)                                             \
115 	const void ** __frameptr;                                       \
116 	const void  * __retaddr;                                        \
117 	__frameptr = (typeof(__frameptr)) __builtin_frame_address(0);   \
118 	__retaddr = __frameptr[1];                                      \
119 	__frameptr[1] = (a);
120 
121 #define FAKE_STACK_FRAME_END()                                          \
122 	__frameptr[1] = __retaddr;
123 
124 #else /* DEVELOPMENT || DEBUG */
125 
126 #define FAKE_STACK_FRAME(a)
127 #define FAKE_STACK_FRAME_END()
128 
129 #endif /* DEVELOPMENT || DEBUG */
130 
131 #define ASYNC_REF_COUNT         (sizeof(io_async_ref_t) / sizeof(natural_t))
132 #define ASYNC_REF64_COUNT       (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
133 
134 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
135 
136 extern "C" {
137 #include <mach/mach_traps.h>
138 #include <vm/vm_map.h>
139 } /* extern "C" */
140 
141 struct IOMachPortHashList;
142 
143 static_assert(IKOT_MAX_TYPE <= 255);
144 
145 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
146 
147 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
148 class IOMachPort : public OSObject
149 {
150 	OSDeclareDefaultStructors(IOMachPort);
151 public:
152 	mach_port_mscount_t mscount;
153 	IOLock      lock;
154 	SLIST_ENTRY(IOMachPort) link;
155 	ipc_port_t  port;
156 	OSObject*   XNU_PTRAUTH_SIGNED_PTR("IOMachPort.object") object;
157 
158 	static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
159 
160 	static IOMachPortHashList* bucketForObject(OSObject *obj,
161 	    ipc_kobject_type_t type);
162 
163 	static LIBKERN_RETURNS_NOT_RETAINED IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
164 
165 	static bool noMoreSendersForObject( OSObject * obj,
166 	    ipc_kobject_type_t type, mach_port_mscount_t * mscount );
167 	static void releasePortForObject( OSObject * obj,
168 	    ipc_kobject_type_t type );
169 
170 	static mach_port_name_t makeSendRightForTask( task_t task,
171 	    io_object_t obj, ipc_kobject_type_t type );
172 
173 	virtual void free() APPLE_KEXT_OVERRIDE;
174 };
175 
176 #define super OSObject
177 OSDefineMetaClassAndStructorsWithZone(IOMachPort, OSObject, ZC_ZFREE_CLEARMEM)
178 
179 static IOLock *         gIOObjectPortLock;
180 IOLock *                gIOUserServerLock;
181 
182 SECURITY_READ_ONLY_LATE(const struct io_filter_callbacks *) gIOUCFilterCallbacks;
183 
184 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
185 
186 SLIST_HEAD(IOMachPortHashList, IOMachPort);
187 
188 #if defined(XNU_TARGET_OS_OSX)
189 #define PORT_HASH_SIZE 4096
190 #else /* defined(!XNU_TARGET_OS_OSX) */
191 #define PORT_HASH_SIZE 256
192 #endif /* !defined(!XNU_TARGET_OS_OSX) */
193 
194 IOMachPortHashList gIOMachPortHash[PORT_HASH_SIZE];
195 
196 void
IOMachPortInitialize(void)197 IOMachPortInitialize(void)
198 {
199 	for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
200 		SLIST_INIT(&gIOMachPortHash[i]);
201 	}
202 }
203 
204 IOMachPortHashList*
bucketForObject(OSObject * obj,ipc_kobject_type_t type)205 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
206 {
207 	return &gIOMachPortHash[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
208 }
209 
210 IOMachPort*
portForObjectInBucket(IOMachPortHashList * bucket,OSObject * obj,ipc_kobject_type_t type)211 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
212 {
213 	IOMachPort *machPort;
214 
215 	SLIST_FOREACH(machPort, bucket, link) {
216 		if (machPort->object == obj && iokit_port_type(machPort->port) == type) {
217 			return machPort;
218 		}
219 	}
220 	return NULL;
221 }
222 
223 IOMachPort*
withObjectAndType(OSObject * obj,ipc_kobject_type_t type)224 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
225 {
226 	IOMachPort *machPort = NULL;
227 
228 	machPort = new IOMachPort;
229 	if (__improbable(machPort && !machPort->init())) {
230 		OSSafeReleaseNULL(machPort);
231 		return NULL;
232 	}
233 
234 	machPort->object = obj;
235 	machPort->port = iokit_alloc_object_port(machPort, type);
236 	IOLockInlineInit(&machPort->lock);
237 
238 	obj->taggedRetain(OSTypeID(OSCollection));
239 	machPort->mscount++;
240 
241 	return machPort;
242 }
243 
244 bool
noMoreSendersForObject(OSObject * obj,ipc_kobject_type_t type,mach_port_mscount_t * mscount)245 IOMachPort::noMoreSendersForObject( OSObject * obj,
246     ipc_kobject_type_t type, mach_port_mscount_t * mscount )
247 {
248 	IOMachPort *machPort = NULL;
249 	IOUserClient *uc;
250 	OSAction *action;
251 	bool destroyed = true;
252 
253 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
254 
255 	obj->retain();
256 
257 	lck_mtx_lock(gIOObjectPortLock);
258 
259 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
260 
261 	if (machPort) {
262 		destroyed = (machPort->mscount <= *mscount);
263 		if (!destroyed) {
264 			*mscount = machPort->mscount;
265 			lck_mtx_unlock(gIOObjectPortLock);
266 		} else {
267 			if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
268 				uc->noMoreSenders();
269 			}
270 			SLIST_REMOVE(bucket, machPort, IOMachPort, link);
271 
272 			IOLockLock(&machPort->lock);
273 			iokit_remove_object_port(machPort->port, type);
274 			machPort->object = NULL;
275 			IOLockUnlock(&machPort->lock);
276 
277 			lck_mtx_unlock(gIOObjectPortLock);
278 
279 			OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
280 
281 			obj->taggedRelease(OSTypeID(OSCollection));
282 		}
283 	} else {
284 		lck_mtx_unlock(gIOObjectPortLock);
285 	}
286 
287 	if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
288 		action->Aborted();
289 	}
290 
291 	if (IKOT_UEXT_OBJECT == type && IOUserServer::shouldLeakObjects()) {
292 		// Leak object
293 		obj->retain();
294 	}
295 
296 	obj->release();
297 
298 	return destroyed;
299 }
300 
301 void
releasePortForObject(OSObject * obj,ipc_kobject_type_t type)302 IOMachPort::releasePortForObject( OSObject * obj,
303     ipc_kobject_type_t type )
304 {
305 	IOMachPort *machPort;
306 	IOService  *service;
307 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
308 
309 	assert(IKOT_IOKIT_CONNECT != type);
310 
311 	lck_mtx_lock(gIOObjectPortLock);
312 
313 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
314 
315 	if (machPort
316 	    && (type == IKOT_IOKIT_OBJECT)
317 	    && (service = OSDynamicCast(IOService, obj))
318 	    && !service->machPortHoldDestroy()) {
319 		obj->retain();
320 		SLIST_REMOVE(bucket, machPort, IOMachPort, link);
321 
322 		IOLockLock(&machPort->lock);
323 		iokit_remove_object_port(machPort->port, type);
324 		machPort->object = NULL;
325 		IOLockUnlock(&machPort->lock);
326 
327 		lck_mtx_unlock(gIOObjectPortLock);
328 
329 		OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
330 
331 		obj->taggedRelease(OSTypeID(OSCollection));
332 		obj->release();
333 	} else {
334 		lck_mtx_unlock(gIOObjectPortLock);
335 	}
336 }
337 
338 void
destroyUserReferences(OSObject * obj)339 IOUserClient::destroyUserReferences( OSObject * obj )
340 {
341 	IOMachPort *machPort;
342 	bool        destroyPort;
343 
344 	IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
345 
346 	// panther, 3160200
347 	// IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
348 
349 	obj->retain();
350 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
351 	IOMachPortHashList *mappingBucket = NULL;
352 
353 	lck_mtx_lock(gIOObjectPortLock);
354 
355 	IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
356 	if (uc && uc->mappings) {
357 		mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
358 	}
359 
360 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
361 
362 	if (machPort == NULL) {
363 		lck_mtx_unlock(gIOObjectPortLock);
364 		goto end;
365 	}
366 
367 	SLIST_REMOVE(bucket, machPort, IOMachPort, link);
368 	obj->taggedRelease(OSTypeID(OSCollection));
369 
370 	destroyPort = true;
371 	if (uc) {
372 		uc->noMoreSenders();
373 		if (uc->mappings) {
374 			uc->mappings->taggedRetain(OSTypeID(OSCollection));
375 			SLIST_INSERT_HEAD(mappingBucket, machPort, link);
376 
377 			IOLockLock(&machPort->lock);
378 			machPort->object = uc->mappings;
379 			IOLockUnlock(&machPort->lock);
380 
381 			lck_mtx_unlock(gIOObjectPortLock);
382 
383 			OSSafeReleaseNULL(uc->mappings);
384 			destroyPort = false;
385 		}
386 	}
387 
388 	if (destroyPort) {
389 		IOLockLock(&machPort->lock);
390 		iokit_remove_object_port(machPort->port, IKOT_IOKIT_CONNECT);
391 		machPort->object = NULL;
392 		IOLockUnlock(&machPort->lock);
393 
394 		lck_mtx_unlock(gIOObjectPortLock);
395 		OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
396 	}
397 
398 end:
399 	OSSafeReleaseNULL(obj);
400 }
401 
402 mach_port_name_t
makeSendRightForTask(task_t task,io_object_t obj,ipc_kobject_type_t type)403 IOMachPort::makeSendRightForTask( task_t task,
404     io_object_t obj, ipc_kobject_type_t type )
405 {
406 	return iokit_make_send_right( task, obj, type );
407 }
408 
409 void
free(void)410 IOMachPort::free( void )
411 {
412 	if (port) {
413 		iokit_destroy_object_port(port, iokit_port_type(port));
414 	}
415 	IOLockInlineDestroy(&lock);
416 	super::free();
417 }
418 
419 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
420 
421 static bool
IOTaskRegistryCompatibility(task_t task)422 IOTaskRegistryCompatibility(task_t task)
423 {
424 	return false;
425 }
426 
427 static void
IOTaskRegistryCompatibilityMatching(task_t task,OSDictionary * matching)428 IOTaskRegistryCompatibilityMatching(task_t task, OSDictionary * matching)
429 {
430 	matching->setObject(gIOServiceNotificationUserKey, kOSBooleanTrue);
431 	if (!IOTaskRegistryCompatibility(task)) {
432 		return;
433 	}
434 	matching->setObject(gIOCompatibilityMatchKey, kOSBooleanTrue);
435 }
436 
437 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
438 
OSDefineMetaClassAndStructors(IOUserIterator,OSIterator)439 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
440 
441 IOUserIterator *
442 IOUserIterator::withIterator(OSIterator * iter)
443 {
444 	IOUserIterator * me;
445 
446 	if (!iter) {
447 		return NULL;
448 	}
449 
450 	me = new IOUserIterator;
451 	if (me && !me->init()) {
452 		me->release();
453 		me = NULL;
454 	}
455 	if (!me) {
456 		iter->release();
457 		return me;
458 	}
459 	me->userIteratorObject = iter;
460 
461 	return me;
462 }
463 
464 bool
init(void)465 IOUserIterator::init( void )
466 {
467 	if (!OSObject::init()) {
468 		return false;
469 	}
470 
471 	IOLockInlineInit(&lock);
472 	return true;
473 }
474 
475 void
free()476 IOUserIterator::free()
477 {
478 	if (userIteratorObject) {
479 		userIteratorObject->release();
480 	}
481 	IOLockInlineDestroy(&lock);
482 	OSObject::free();
483 }
484 
485 void
reset()486 IOUserIterator::reset()
487 {
488 	IOLockLock(&lock);
489 	assert(OSDynamicCast(OSIterator, userIteratorObject));
490 	((OSIterator *)userIteratorObject)->reset();
491 	IOLockUnlock(&lock);
492 }
493 
494 bool
isValid()495 IOUserIterator::isValid()
496 {
497 	bool ret;
498 
499 	IOLockLock(&lock);
500 	assert(OSDynamicCast(OSIterator, userIteratorObject));
501 	ret = ((OSIterator *)userIteratorObject)->isValid();
502 	IOLockUnlock(&lock);
503 
504 	return ret;
505 }
506 
507 OSObject *
getNextObject()508 IOUserIterator::getNextObject()
509 {
510 	assert(false);
511 	return NULL;
512 }
513 
514 OSObject *
copyNextObject()515 IOUserIterator::copyNextObject()
516 {
517 	OSObject * ret = NULL;
518 
519 	IOLockLock(&lock);
520 	if (userIteratorObject) {
521 		ret = ((OSIterator *)userIteratorObject)->getNextObject();
522 		if (ret) {
523 			ret->retain();
524 		}
525 	}
526 	IOLockUnlock(&lock);
527 
528 	return ret;
529 }
530 
531 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
532 extern "C" {
533 // functions called from osfmk/device/iokit_rpc.c
534 
535 void
iokit_port_object_description(io_object_t obj,kobject_description_t desc)536 iokit_port_object_description(io_object_t obj, kobject_description_t desc)
537 {
538 	IORegistryEntry    * regEntry;
539 	IOUserNotification * __unused noti;
540 	_IOServiceNotifier * __unused serviceNoti;
541 	OSSerialize        * __unused s;
542 	OSDictionary       * __unused matching = NULL;
543 
544 	if ((regEntry = OSDynamicCast(IORegistryEntry, obj))) {
545 		snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(0x%qx)", obj->getMetaClass()->getClassName(), regEntry->getRegistryEntryID());
546 #if DEVELOPMENT || DEBUG
547 	} else if ((noti = OSDynamicCast(IOUserNotification, obj))) {
548 		// serviceNoti->matching may become NULL if the port gets a no-senders notification, so we have to lock gIOObjectPortLock
549 		IOLockLock(gIOObjectPortLock);
550 		serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->userIteratorObject);
551 		if (serviceNoti && (matching = serviceNoti->matching)) {
552 			matching->retain();
553 		}
554 		IOLockUnlock(gIOObjectPortLock);
555 
556 		if (matching) {
557 			s = OSSerialize::withCapacity((unsigned int) page_size);
558 			if (s && matching->serialize(s)) {
559 				snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text());
560 			}
561 			OSSafeReleaseNULL(s);
562 			OSSafeReleaseNULL(matching);
563 		}
564 #endif /* DEVELOPMENT || DEBUG */
565 	} else {
566 		snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s", obj->getMetaClass()->getClassName());
567 	}
568 }
569 
570 // FIXME: Implementation of these functions are hidden from the static analyzer.
571 // As for now, the analyzer doesn't consistently support wrapper functions
572 // for retain and release.
573 #ifndef __clang_analyzer__
574 void
iokit_add_reference(io_object_t obj,natural_t type)575 iokit_add_reference( io_object_t obj, natural_t type )
576 {
577 	IOUserClient * uc;
578 
579 	if (!obj) {
580 		return;
581 	}
582 
583 	if ((IKOT_IOKIT_CONNECT == type)
584 	    && (uc = OSDynamicCast(IOUserClient, obj))) {
585 		OSIncrementAtomic(&uc->__ipc);
586 	}
587 
588 	obj->retain();
589 }
590 
591 void
iokit_remove_reference(io_object_t obj)592 iokit_remove_reference( io_object_t obj )
593 {
594 	if (obj) {
595 		obj->release();
596 	}
597 }
598 #endif // __clang_analyzer__
599 
600 void
iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj)601 iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj )
602 {
603 	IOUserClient * uc;
604 	bool           finalize = false;
605 
606 	if (!obj) {
607 		return;
608 	}
609 
610 	if ((uc = OSDynamicCast(IOUserClient, obj))) {
611 		assert(uc->__ipc);
612 		if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive()) {
613 			IOLockLock(gIOObjectPortLock);
614 			if ((finalize = uc->__ipcFinal)) {
615 				uc->__ipcFinal = false;
616 			}
617 			IOLockUnlock(gIOObjectPortLock);
618 		}
619 		if (finalize) {
620 			uc->scheduleFinalize(true);
621 		}
622 	}
623 
624 	obj->release();
625 }
626 
627 void
iokit_kobject_retain(io_kobject_t machPort)628 iokit_kobject_retain(io_kobject_t machPort)
629 {
630 	assert(OSDynamicCast(IOMachPort, machPort));
631 	machPort->retain();
632 }
633 
634 io_object_t
iokit_copy_object_for_consumed_kobject(LIBKERN_CONSUMED io_kobject_t machPort,natural_t type)635 iokit_copy_object_for_consumed_kobject(LIBKERN_CONSUMED io_kobject_t machPort, natural_t type)
636 {
637 	io_object_t  result;
638 
639 	assert(OSDynamicCast(IOMachPort, machPort));
640 
641 	IOLockLock(&machPort->lock);
642 	result = machPort->object;
643 	if (result) {
644 		iokit_add_reference(result, type);
645 	}
646 	IOLockUnlock(&machPort->lock);
647 	machPort->release();
648 	return result;
649 }
650 
651 bool
finalizeUserReferences(OSObject * obj)652 IOUserClient::finalizeUserReferences(OSObject * obj)
653 {
654 	IOUserClient * uc;
655 	bool           ok = true;
656 
657 	if ((uc = OSDynamicCast(IOUserClient, obj))) {
658 		IOLockLock(gIOObjectPortLock);
659 		if ((uc->__ipcFinal = (0 != uc->__ipc))) {
660 			ok = false;
661 		}
662 		IOLockUnlock(gIOObjectPortLock);
663 	}
664 	return ok;
665 }
666 
667 ipc_port_t
iokit_port_for_object(io_object_t obj,ipc_kobject_type_t type,ipc_kobject_t * kobj)668 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type, ipc_kobject_t * kobj )
669 {
670 	IOMachPort *machPort = NULL;
671 	ipc_port_t   port = NULL;
672 
673 	IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
674 
675 	lck_mtx_lock(gIOObjectPortLock);
676 
677 	machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
678 
679 	if (__improbable(machPort == NULL)) {
680 		machPort = IOMachPort::withObjectAndType(obj, type);
681 		if (__improbable(machPort == NULL)) {
682 			goto end;
683 		}
684 		SLIST_INSERT_HEAD(bucket, machPort, link);
685 	} else {
686 		machPort->mscount++;
687 	}
688 
689 	iokit_retain_port(machPort->port);
690 	port = machPort->port;
691 
692 end:
693 	if (kobj) {
694 		*kobj = machPort;
695 	}
696 	lck_mtx_unlock(gIOObjectPortLock);
697 
698 	return port;
699 }
700 
701 kern_return_t
iokit_client_died(io_object_t obj,ipc_port_t,ipc_kobject_type_t type,mach_port_mscount_t * mscount)702 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
703     ipc_kobject_type_t type, mach_port_mscount_t * mscount )
704 {
705 	IOUserClient *      client;
706 	IOMemoryMap *       map;
707 	IOUserNotification * notify;
708 	IOUserServerCheckInToken * token;
709 
710 	if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
711 		return kIOReturnNotReady;
712 	}
713 
714 	switch (type) {
715 	case IKOT_IOKIT_CONNECT:
716 		if ((client = OSDynamicCast( IOUserClient, obj ))) {
717 			IOStatisticsClientCall();
718 			IORWLockWrite(&client->lock);
719 			client->clientDied();
720 			IORWLockUnlock(&client->lock);
721 		}
722 		break;
723 	case IKOT_IOKIT_OBJECT:
724 		if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
725 			map->taskDied();
726 		} else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
727 			notify->setNotification( NULL );
728 		}
729 		break;
730 	case IKOT_IOKIT_IDENT:
731 		if ((token = OSDynamicCast( IOUserServerCheckInToken, obj ))) {
732 			token->cancel();
733 		}
734 		break;
735 	}
736 
737 	return kIOReturnSuccess;
738 }
739 };      /* extern "C" */
740 
741 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
742 
743 class IOServiceUserNotification : public IOUserNotification
744 {
745 	OSDeclareDefaultStructors(IOServiceUserNotification);
746 
747 	struct PingMsgKdata {
748 		mach_msg_header_t               msgHdr;
749 	};
750 	struct PingMsgUdata {
751 		OSNotificationHeader64          notifyHeader;
752 	};
753 
754 	enum { kMaxOutstanding = 1024 };
755 
756 	ipc_port_t          remotePort;
757 	void                *msgReference;
758 	mach_msg_size_t     msgReferenceSize;
759 	natural_t           msgType;
760 	OSArray     *       newSet;
761 	bool                armed;
762 	bool                ipcLogged;
763 
764 public:
765 
766 	virtual bool init( mach_port_t port, natural_t type,
767 	    void * reference, vm_size_t referenceSize,
768 	    bool clientIs64 );
769 	virtual void free() APPLE_KEXT_OVERRIDE;
770 	void invalidatePort(void);
771 
772 	static bool _handler( void * target,
773 	    void * ref, IOService * newService, IONotifier * notifier );
774 	virtual bool handler( void * ref, IOService * newService );
775 
776 	virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
777 	virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
778 };
779 
780 class IOServiceMessageUserNotification : public IOUserNotification
781 {
782 	OSDeclareDefaultStructors(IOServiceMessageUserNotification);
783 
784 	struct PingMsgKdata {
785 		mach_msg_header_t               msgHdr;
786 		mach_msg_body_t                 msgBody;
787 		mach_msg_port_descriptor_t      ports[1];
788 	};
789 	struct PingMsgUdata {
790 		OSNotificationHeader64          notifyHeader __attribute__ ((packed));
791 	};
792 
793 	ipc_port_t          remotePort;
794 	void                *msgReference;
795 	mach_msg_size_t     msgReferenceSize;
796 	mach_msg_size_t     msgExtraSize;
797 	natural_t           msgType;
798 	uint8_t             clientIs64;
799 	int                 owningPID;
800 	bool                ipcLogged;
801 
802 public:
803 
804 	virtual bool init( mach_port_t port, natural_t type,
805 	    void * reference, vm_size_t referenceSize,
806 	    bool clientIs64 );
807 
808 	virtual void free() APPLE_KEXT_OVERRIDE;
809 	void invalidatePort(void);
810 
811 	static IOReturn _handler( void * target, void * ref,
812 	    UInt32 messageType, IOService * provider,
813 	    void * messageArgument, vm_size_t argSize );
814 	virtual IOReturn handler( void * ref,
815 	    UInt32 messageType, IOService * provider,
816 	    void * messageArgument, vm_size_t argSize );
817 
818 	virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
819 	virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
820 };
821 
822 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
823 
824 #undef super
825 #define super IOUserIterator
826 OSDefineMetaClass( IOUserNotification, IOUserIterator );
827 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
828 
829 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
830 
831 void
free(void)832 IOUserNotification::free( void )
833 {
834 #if DEVELOPMENT || DEBUG
835 	IOLockLock( gIOObjectPortLock);
836 
837 	assert(userIteratorObject == NULL);
838 
839 	IOLockUnlock( gIOObjectPortLock);
840 #endif /* DEVELOPMENT || DEBUG */
841 
842 	super::free();
843 }
844 
845 
846 void
setNotification(IONotifier * notify)847 IOUserNotification::setNotification( IONotifier * notify )
848 {
849 	OSObject * previousNotify;
850 
851 	/*
852 	 * We must retain this object here before proceeding.
853 	 * Two threads may race in setNotification(). If one thread sets a new notifier while the
854 	 * other thread sets the notifier to NULL, it is possible for the second thread to call release()
855 	 * before the first thread calls retain(). Without the retain here, this thread interleaving
856 	 * would cause the object to get released and freed before it is retained by the first thread,
857 	 * which is a UaF.
858 	 */
859 	retain();
860 
861 	IOLockLock( gIOObjectPortLock);
862 
863 	previousNotify = userIteratorObject;
864 	userIteratorObject = notify;
865 
866 	IOLockUnlock( gIOObjectPortLock);
867 
868 	if (previousNotify) {
869 		assert(OSDynamicCast(IONotifier, previousNotify));
870 		((IONotifier *)previousNotify)->remove();
871 
872 		if (notify == NULL) {
873 			release();
874 		}
875 	} else if (notify) {
876 		// new IONotifier, retain the object. release() will happen in setNotification(NULL)
877 		retain();
878 	}
879 
880 	release(); // paired with retain() at beginning of this method
881 }
882 
883 void
reset()884 IOUserNotification::reset()
885 {
886 	// ?
887 }
888 
889 bool
isValid()890 IOUserNotification::isValid()
891 {
892 	return true;
893 }
894 
895 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
896 
897 #undef super
898 #define super IOUserNotification
OSDefineMetaClassAndStructors(IOServiceUserNotification,IOUserNotification)899 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
900 
901 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
902 
903 bool
904 IOServiceUserNotification::init( mach_port_t port, natural_t type,
905     void * reference, vm_size_t referenceSize,
906     bool clientIs64 )
907 {
908 	if (!super::init()) {
909 		return false;
910 	}
911 
912 	newSet = OSArray::withCapacity( 1 );
913 	if (!newSet) {
914 		return false;
915 	}
916 
917 	if (referenceSize > sizeof(OSAsyncReference64)) {
918 		return false;
919 	}
920 
921 	msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
922 	msgReference = IOMallocZeroData(msgReferenceSize);
923 	if (!msgReference) {
924 		return false;
925 	}
926 
927 	remotePort = port;
928 	msgType = type;
929 	bcopy( reference, msgReference, referenceSize );
930 
931 	return true;
932 }
933 
934 void
invalidatePort(void)935 IOServiceUserNotification::invalidatePort(void)
936 {
937 	remotePort = MACH_PORT_NULL;
938 }
939 
940 void
free(void)941 IOServiceUserNotification::free( void )
942 {
943 	if (remotePort) {
944 		iokit_release_port_send(remotePort);
945 	}
946 	IOFreeData(msgReference, msgReferenceSize);
947 	OSSafeReleaseNULL(newSet);
948 
949 	super::free();
950 }
951 
952 bool
_handler(void * target,void * ref,IOService * newService,IONotifier * notifier)953 IOServiceUserNotification::_handler( void * target,
954     void * ref, IOService * newService, IONotifier * notifier )
955 {
956 	IOServiceUserNotification * targetObj = (IOServiceUserNotification *)target;
957 	bool ret;
958 
959 	targetObj->retain();
960 	ret = targetObj->handler( ref, newService );
961 	targetObj->release();
962 	return ret;
963 }
964 
965 bool
handler(void * ref,IOService * newService)966 IOServiceUserNotification::handler( void * ref,
967     IOService * newService )
968 {
969 	unsigned int        count;
970 	kern_return_t       kr;
971 	ipc_port_t          port = NULL;
972 	bool                sendPing = false;
973 	mach_msg_size_t     msgSize, payloadSize;
974 
975 	IOTakeLock( &lock );
976 
977 	count = newSet->getCount();
978 	if (count < kMaxOutstanding) {
979 		newSet->setObject( newService );
980 		if ((sendPing = (armed && (0 == count)))) {
981 			armed = false;
982 		}
983 	}
984 
985 	IOUnlock( &lock );
986 
987 	if (kIOServiceTerminatedNotificationType == msgType) {
988 		lck_mtx_lock(gIOObjectPortLock);
989 		newService->setMachPortHoldDestroy(true);
990 		lck_mtx_unlock(gIOObjectPortLock);
991 	}
992 
993 	if (sendPing) {
994 		port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT, NULL );
995 
996 		payloadSize = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
997 		msgSize = (mach_msg_size_t)(sizeof(PingMsgKdata) + payloadSize);
998 
999 		kr = kernel_mach_msg_send_with_builder_internal(0, payloadSize,
1000 		    (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1001 		    MACH_MSG_TIMEOUT_NONE, NULL,
1002 		    ^(mach_msg_header_t *hdr, __assert_only mach_msg_descriptor_t *descs, void *payload){
1003 			PingMsgUdata *udata = (PingMsgUdata *)payload;
1004 
1005 			hdr->msgh_remote_port    = remotePort;
1006 			hdr->msgh_local_port     = port;
1007 			hdr->msgh_bits           = MACH_MSGH_BITS(
1008 				MACH_MSG_TYPE_COPY_SEND /*remote*/,
1009 				MACH_MSG_TYPE_MAKE_SEND /*local*/);
1010 			hdr->msgh_size           = msgSize;
1011 			hdr->msgh_id             = kOSNotificationMessageID;
1012 
1013 			assert(descs == NULL);
1014 			/* End of kernel processed data */
1015 
1016 			udata->notifyHeader.size          = 0;
1017 			udata->notifyHeader.type          = msgType;
1018 
1019 			assert((char *)udata->notifyHeader.reference + msgReferenceSize <= (char *)payload + payloadSize);
1020 			bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1021 		});
1022 
1023 		if (port) {
1024 			iokit_release_port( port );
1025 		}
1026 
1027 		if ((KERN_SUCCESS != kr) && !ipcLogged) {
1028 			ipcLogged = true;
1029 			IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1030 		}
1031 	}
1032 
1033 	return true;
1034 }
1035 OSObject *
getNextObject()1036 IOServiceUserNotification::getNextObject()
1037 {
1038 	assert(false);
1039 	return NULL;
1040 }
1041 
1042 OSObject *
copyNextObject()1043 IOServiceUserNotification::copyNextObject()
1044 {
1045 	unsigned int        count;
1046 	OSObject *          result;
1047 
1048 	IOLockLock(&lock);
1049 
1050 	count = newSet->getCount();
1051 	if (count) {
1052 		result = newSet->getObject( count - 1 );
1053 		result->retain();
1054 		newSet->removeObject( count - 1);
1055 	} else {
1056 		result = NULL;
1057 		armed = true;
1058 	}
1059 
1060 	IOLockUnlock(&lock);
1061 
1062 	return result;
1063 }
1064 
1065 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1066 
OSDefineMetaClassAndStructors(IOServiceMessageUserNotification,IOUserNotification)1067 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
1068 
1069 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1070 
1071 bool
1072 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
1073     void * reference, vm_size_t referenceSize, bool client64 )
1074 {
1075 	if (!super::init()) {
1076 		return false;
1077 	}
1078 
1079 	if (referenceSize > sizeof(OSAsyncReference64)) {
1080 		return false;
1081 	}
1082 
1083 	clientIs64 = client64;
1084 
1085 	owningPID = proc_selfpid();
1086 
1087 	msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
1088 	msgReference = IOMallocZeroData(msgReferenceSize);
1089 	if (!msgReference) {
1090 		return false;
1091 	}
1092 
1093 	remotePort = port;
1094 	msgType = type;
1095 	bcopy( reference, msgReference, referenceSize );
1096 
1097 	return true;
1098 }
1099 
1100 void
invalidatePort(void)1101 IOServiceMessageUserNotification::invalidatePort(void)
1102 {
1103 	remotePort = MACH_PORT_NULL;
1104 }
1105 
1106 void
free(void)1107 IOServiceMessageUserNotification::free( void )
1108 {
1109 	if (remotePort) {
1110 		iokit_release_port_send(remotePort);
1111 	}
1112 	IOFreeData(msgReference, msgReferenceSize);
1113 
1114 	super::free();
1115 }
1116 
1117 IOReturn
_handler(void * target,void * ref,UInt32 messageType,IOService * provider,void * argument,vm_size_t argSize)1118 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1119     UInt32 messageType, IOService * provider,
1120     void * argument, vm_size_t argSize )
1121 {
1122 	IOServiceMessageUserNotification * targetObj = (IOServiceMessageUserNotification *)target;
1123 	IOReturn ret;
1124 
1125 	targetObj->retain();
1126 	ret = targetObj->handler(
1127 		ref, messageType, provider, argument, argSize);
1128 	targetObj->release();
1129 	return ret;
1130 }
1131 
1132 IOReturn
handler(void * ref,UInt32 messageType,IOService * provider,void * messageArgument,vm_size_t callerArgSize)1133 IOServiceMessageUserNotification::handler( void * ref,
1134     UInt32 messageType, IOService * provider,
1135     void * messageArgument, vm_size_t callerArgSize )
1136 {
1137 	kern_return_t                kr;
1138 	vm_size_t                    argSize;
1139 	mach_msg_size_t              thisMsgSize;
1140 	ipc_port_t                   thisPort, providerPort;
1141 
1142 	if (kIOMessageCopyClientID == messageType) {
1143 		*((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1144 		return kIOReturnSuccess;
1145 	}
1146 
1147 	if (callerArgSize == 0) {
1148 		if (clientIs64) {
1149 			argSize = sizeof(io_user_reference_t);
1150 		} else {
1151 			argSize = sizeof(uint32_t);
1152 		}
1153 	} else {
1154 		if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1155 			callerArgSize = kIOUserNotifyMaxMessageSize;
1156 		}
1157 		argSize = callerArgSize;
1158 	}
1159 
1160 	// adjust message size for ipc restrictions
1161 	natural_t type = msgType;
1162 	type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1163 	type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1164 	argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1165 
1166 	mach_msg_size_t extraSize = kIOUserNotifyMaxMessageSize + sizeof(IOServiceInterestContent64);
1167 	mach_msg_size_t msgSize = (mach_msg_size_t) (sizeof(PingMsgKdata) +
1168 	    sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize);
1169 
1170 	if (os_add3_overflow(msgSize, offsetof(IOServiceInterestContent64, messageArgument), argSize, &thisMsgSize)) {
1171 		return kIOReturnBadArgument;
1172 	}
1173 	mach_msg_size_t payloadSize = thisMsgSize - sizeof(PingMsgKdata);
1174 
1175 	providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT, NULL );
1176 	thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT, NULL );
1177 
1178 	kr = kernel_mach_msg_send_with_builder_internal(1, payloadSize,
1179 	    (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1180 	    MACH_MSG_TIMEOUT_NONE, NULL,
1181 	    ^(mach_msg_header_t *hdr, mach_msg_descriptor_t *descs, void *payload){
1182 		mach_msg_port_descriptor_t *port_desc = (mach_msg_port_descriptor_t *)descs;
1183 		PingMsgUdata *udata = (PingMsgUdata *)payload;
1184 		IOServiceInterestContent64 * data;
1185 		mach_msg_size_t dataOffset;
1186 
1187 		hdr->msgh_remote_port    = remotePort;
1188 		hdr->msgh_local_port     = thisPort;
1189 		hdr->msgh_bits           = MACH_MSGH_BITS_COMPLEX
1190 		|  MACH_MSGH_BITS(
1191 			MACH_MSG_TYPE_COPY_SEND /*remote*/,
1192 			MACH_MSG_TYPE_MAKE_SEND /*local*/);
1193 		hdr->msgh_size           = thisMsgSize;
1194 		hdr->msgh_id             = kOSNotificationMessageID;
1195 
1196 		/* body.msgh_descriptor_count is set automatically after the closure */
1197 
1198 		port_desc[0].name              = providerPort;
1199 		port_desc[0].disposition       = MACH_MSG_TYPE_MAKE_SEND;
1200 		port_desc[0].type              = MACH_MSG_PORT_DESCRIPTOR;
1201 		/* End of kernel processed data */
1202 
1203 		udata->notifyHeader.size          = extraSize;
1204 		udata->notifyHeader.type          = type;
1205 		bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1206 
1207 		/* data is after msgReference */
1208 		dataOffset = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1209 		data = (IOServiceInterestContent64 *) (((uint8_t *) udata) + dataOffset);
1210 		data->messageType = messageType;
1211 
1212 		if (callerArgSize == 0) {
1213 		        assert((char *)data->messageArgument + argSize <= (char *)payload + payloadSize);
1214 		        data->messageArgument[0] = (io_user_reference_t) messageArgument;
1215 		        if (!clientIs64) {
1216 		                data->messageArgument[0] |= (data->messageArgument[0] << 32);
1217 			}
1218 		} else {
1219 		        assert((char *)data->messageArgument + callerArgSize <= (char *)payload + payloadSize);
1220 		        bcopy(messageArgument, data->messageArgument, callerArgSize);
1221 		}
1222 	});
1223 
1224 	if (thisPort) {
1225 		iokit_release_port( thisPort );
1226 	}
1227 	if (providerPort) {
1228 		iokit_release_port( providerPort );
1229 	}
1230 
1231 	if (kr == MACH_SEND_NO_BUFFER) {
1232 		return kIOReturnNoMemory;
1233 	}
1234 
1235 	if ((KERN_SUCCESS != kr) && !ipcLogged) {
1236 		ipcLogged = true;
1237 		IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1238 	}
1239 
1240 	return kIOReturnSuccess;
1241 }
1242 
1243 OSObject *
getNextObject()1244 IOServiceMessageUserNotification::getNextObject()
1245 {
1246 	return NULL;
1247 }
1248 
1249 OSObject *
copyNextObject()1250 IOServiceMessageUserNotification::copyNextObject()
1251 {
1252 	return NULL;
1253 }
1254 
1255 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1256 
1257 #undef super
1258 #define super IOService
1259 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1260 
1261 IOLock       * gIOUserClientOwnersLock;
1262 
1263 static_assert(offsetof(IOUserClient, __opaque_end) -
1264     offsetof(IOUserClient, __opaque_start) == sizeof(void *) * 9,
1265     "ABI check: Opaque ivars for IOUserClient must be 9 void * big");
1266 
1267 void
initialize(void)1268 IOUserClient::initialize( void )
1269 {
1270 	gIOObjectPortLock       = IOLockAlloc();
1271 	gIOUserClientOwnersLock = IOLockAlloc();
1272 	gIOUserServerLock       = IOLockAlloc();
1273 	assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1274 
1275 #if IOTRACKING
1276 	IOTrackingQueueCollectUser(IOUserIterator::gMetaClass.getTracking());
1277 	IOTrackingQueueCollectUser(IOServiceMessageUserNotification::gMetaClass.getTracking());
1278 	IOTrackingQueueCollectUser(IOServiceUserNotification::gMetaClass.getTracking());
1279 	IOTrackingQueueCollectUser(IOUserClient::gMetaClass.getTracking());
1280 	IOTrackingQueueCollectUser(IOMachPort::gMetaClass.getTracking());
1281 #endif /* IOTRACKING */
1282 }
1283 
1284 void
1285 #if __LP64__
1286 __attribute__((__noreturn__))
1287 #endif
setAsyncReference(OSAsyncReference asyncRef,mach_port_t wakePort,void * callback,void * refcon)1288 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1289     mach_port_t wakePort,
1290     void *callback, void *refcon)
1291 {
1292 #if __LP64__
1293 	panic("setAsyncReference not valid for 64b");
1294 #else
1295 	asyncRef[kIOAsyncReservedIndex]      = ((uintptr_t) wakePort)
1296 	    | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1297 	asyncRef[kIOAsyncCalloutFuncIndex]   = (uintptr_t) callback;
1298 	asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1299 #endif
1300 }
1301 
1302 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon)1303 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1304     mach_port_t wakePort,
1305     mach_vm_address_t callback, io_user_reference_t refcon)
1306 {
1307 	asyncRef[kIOAsyncReservedIndex]      = ((io_user_reference_t) wakePort)
1308 	    | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1309 	asyncRef[kIOAsyncCalloutFuncIndex]   = (io_user_reference_t) callback;
1310 	asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1311 }
1312 
1313 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon,task_t task)1314 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1315     mach_port_t wakePort,
1316     mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1317 {
1318 	setAsyncReference64(asyncRef, wakePort, callback, refcon);
1319 	if (vm_map_is_64bit(get_task_map(task))) {
1320 		asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1321 	}
1322 }
1323 
1324 static OSDictionary *
CopyConsoleUser(UInt32 uid)1325 CopyConsoleUser(UInt32 uid)
1326 {
1327 	OSArray * array;
1328 	OSDictionary * user = NULL;
1329 
1330 	OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1331 	if ((array = OSDynamicCast(OSArray, ioProperty))) {
1332 		for (unsigned int idx = 0;
1333 		    (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1334 		    idx++) {
1335 			OSNumber * num;
1336 
1337 			if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1338 			    && (uid == num->unsigned32BitValue())) {
1339 				user->retain();
1340 				break;
1341 			}
1342 		}
1343 	}
1344 	OSSafeReleaseNULL(ioProperty);
1345 	return user;
1346 }
1347 
1348 static OSDictionary *
CopyUserOnConsole(void)1349 CopyUserOnConsole(void)
1350 {
1351 	OSArray * array;
1352 	OSDictionary * user = NULL;
1353 
1354 	OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1355 	if ((array = OSDynamicCast(OSArray, ioProperty))) {
1356 		for (unsigned int idx = 0;
1357 		    (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1358 		    idx++) {
1359 			if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1360 				user->retain();
1361 				break;
1362 			}
1363 		}
1364 	}
1365 	OSSafeReleaseNULL(ioProperty);
1366 	return user;
1367 }
1368 
1369 IOReturn
clientHasAuthorization(task_t task,IOService * service)1370 IOUserClient::clientHasAuthorization( task_t task,
1371     IOService * service )
1372 {
1373 	proc_t p;
1374 
1375 	p = (proc_t) get_bsdtask_info(task);
1376 	if (p) {
1377 		uint64_t authorizationID;
1378 
1379 		authorizationID = proc_uniqueid(p);
1380 		if (authorizationID) {
1381 			if (service->getAuthorizationID() == authorizationID) {
1382 				return kIOReturnSuccess;
1383 			}
1384 		}
1385 	}
1386 
1387 	return kIOReturnNotPermitted;
1388 }
1389 
1390 IOReturn
clientHasPrivilege(void * securityToken,const char * privilegeName)1391 IOUserClient::clientHasPrivilege( void * securityToken,
1392     const char * privilegeName )
1393 {
1394 	kern_return_t           kr;
1395 	security_token_t        token;
1396 	mach_msg_type_number_t  count;
1397 	task_t                  task;
1398 	OSDictionary *          user;
1399 	bool                    secureConsole;
1400 
1401 
1402 	if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1403 	    sizeof(kIOClientPrivilegeForeground))) {
1404 		if (task_is_gpu_denied(current_task())) {
1405 			return kIOReturnNotPrivileged;
1406 		} else {
1407 			return kIOReturnSuccess;
1408 		}
1409 	}
1410 
1411 	if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1412 	    sizeof(kIOClientPrivilegeConsoleSession))) {
1413 		kauth_cred_t cred;
1414 		proc_t       p;
1415 
1416 		task = (task_t) securityToken;
1417 		if (!task) {
1418 			task = current_task();
1419 		}
1420 		p = (proc_t) get_bsdtask_info(task);
1421 		kr = kIOReturnNotPrivileged;
1422 
1423 		if (p && (cred = kauth_cred_proc_ref(p))) {
1424 			user = CopyUserOnConsole();
1425 			if (user) {
1426 				OSNumber * num;
1427 				if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1428 				    && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1429 					kr = kIOReturnSuccess;
1430 				}
1431 				user->release();
1432 			}
1433 			kauth_cred_unref(&cred);
1434 		}
1435 		return kr;
1436 	}
1437 
1438 	if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1439 	    sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1440 		task = (task_t)((IOUCProcessToken *)securityToken)->token;
1441 	} else {
1442 		task = (task_t)securityToken;
1443 	}
1444 
1445 	count = TASK_SECURITY_TOKEN_COUNT;
1446 	kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1447 
1448 	if (KERN_SUCCESS != kr) {
1449 	} else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1450 	    sizeof(kIOClientPrivilegeAdministrator))) {
1451 		if (0 != token.val[0]) {
1452 			kr = kIOReturnNotPrivileged;
1453 		}
1454 	} else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1455 	    sizeof(kIOClientPrivilegeLocalUser))) {
1456 		user = CopyConsoleUser(token.val[0]);
1457 		if (user) {
1458 			user->release();
1459 		} else {
1460 			kr = kIOReturnNotPrivileged;
1461 		}
1462 	} else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1463 	    sizeof(kIOClientPrivilegeConsoleUser))) {
1464 		user = CopyConsoleUser(token.val[0]);
1465 		if (user) {
1466 			if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1467 				kr = kIOReturnNotPrivileged;
1468 			} else if (secureConsole) {
1469 				OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1470 				if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1471 					kr = kIOReturnNotPrivileged;
1472 				}
1473 			}
1474 			user->release();
1475 		} else {
1476 			kr = kIOReturnNotPrivileged;
1477 		}
1478 	} else {
1479 		kr = kIOReturnUnsupported;
1480 	}
1481 
1482 	return kr;
1483 }
1484 
1485 OSDictionary *
copyClientEntitlements(task_t task)1486 IOUserClient::copyClientEntitlements(task_t task)
1487 {
1488 	proc_t p = NULL;
1489 	pid_t pid = 0;
1490 	OSDictionary *entitlements = NULL;
1491 
1492 	p = (proc_t)get_bsdtask_info(task);
1493 	if (p == NULL) {
1494 		return NULL;
1495 	}
1496 	pid = proc_pid(p);
1497 
1498 	if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1499 		if (entitlements) {
1500 			return entitlements;
1501 		}
1502 	}
1503 
1504 	// If the above fails, thats it
1505 	return NULL;
1506 }
1507 
1508 OSDictionary *
copyClientEntitlementsVnode(vnode_t vnode,off_t offset)1509 IOUserClient::copyClientEntitlementsVnode(vnode_t vnode, off_t offset)
1510 {
1511 	OSDictionary *entitlements = NULL;
1512 
1513 	if (cs_entitlements_dictionary_copy_vnode(vnode, offset, (void**)&entitlements) != 0) {
1514 		return NULL;
1515 	}
1516 	return entitlements;
1517 }
1518 
1519 OSObject *
copyClientEntitlement(task_t task,const char * entitlement)1520 IOUserClient::copyClientEntitlement( task_t task,
1521     const char * entitlement )
1522 {
1523 	void *entitlement_object = NULL;
1524 
1525 	if (task == NULL) {
1526 		task = current_task();
1527 	}
1528 
1529 	/* Validate input arguments */
1530 	if (task == kernel_task || entitlement == NULL) {
1531 		return NULL;
1532 	}
1533 	proc_t proc = (proc_t)get_bsdtask_info(task);
1534 
1535 	kern_return_t ret = amfi->OSEntitlements.copyEntitlementAsOSObjectWithProc(
1536 		proc,
1537 		entitlement,
1538 		&entitlement_object);
1539 
1540 	if (ret != KERN_SUCCESS) {
1541 		return NULL;
1542 	}
1543 	assert(entitlement_object != NULL);
1544 
1545 	return (OSObject*)entitlement_object;
1546 }
1547 
1548 OSObject *
copyClientEntitlementVnode(struct vnode * vnode,off_t offset,const char * entitlement)1549 IOUserClient::copyClientEntitlementVnode(
1550 	struct vnode *vnode,
1551 	off_t offset,
1552 	const char *entitlement)
1553 {
1554 	OSDictionary *entitlements;
1555 	OSObject *value;
1556 
1557 	entitlements = copyClientEntitlementsVnode(vnode, offset);
1558 	if (entitlements == NULL) {
1559 		return NULL;
1560 	}
1561 
1562 	/* Fetch the entitlement value from the dictionary. */
1563 	value = entitlements->getObject(entitlement);
1564 	if (value != NULL) {
1565 		value->retain();
1566 	}
1567 
1568 	entitlements->release();
1569 	return value;
1570 }
1571 
1572 bool
init()1573 IOUserClient::init()
1574 {
1575 	if (getPropertyTable() || super::init()) {
1576 		return reserve();
1577 	}
1578 
1579 	return false;
1580 }
1581 
1582 bool
init(OSDictionary * dictionary)1583 IOUserClient::init(OSDictionary * dictionary)
1584 {
1585 	if (getPropertyTable() || super::init(dictionary)) {
1586 		return reserve();
1587 	}
1588 
1589 	return false;
1590 }
1591 
1592 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type)1593 IOUserClient::initWithTask(task_t owningTask,
1594     void * securityID,
1595     UInt32 type )
1596 {
1597 	if (getPropertyTable() || super::init()) {
1598 		return reserve();
1599 	}
1600 
1601 	return false;
1602 }
1603 
1604 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type,OSDictionary * properties)1605 IOUserClient::initWithTask(task_t owningTask,
1606     void * securityID,
1607     UInt32 type,
1608     OSDictionary * properties )
1609 {
1610 	bool ok;
1611 
1612 	ok = super::init( properties );
1613 	ok &= initWithTask( owningTask, securityID, type );
1614 
1615 	return ok;
1616 }
1617 
1618 bool
reserve()1619 IOUserClient::reserve()
1620 {
1621 	if (!reserved) {
1622 		reserved = IOMallocType(ExpansionData);
1623 	}
1624 	setTerminateDefer(NULL, true);
1625 	IOStatisticsRegisterCounter();
1626 	IORWLockInlineInit(&lock);
1627 	IOLockInlineInit(&filterLock);
1628 
1629 	return true;
1630 }
1631 
1632 struct IOUserClientOwner {
1633 	task_t         task;
1634 	queue_chain_t  taskLink;
1635 	IOUserClient * uc;
1636 	queue_chain_t  ucLink;
1637 };
1638 
1639 IOReturn
registerOwner(task_t task)1640 IOUserClient::registerOwner(task_t task)
1641 {
1642 	IOUserClientOwner * owner;
1643 	IOReturn            ret;
1644 	bool                newOwner;
1645 
1646 	IOLockLock(gIOUserClientOwnersLock);
1647 
1648 	newOwner = true;
1649 	ret = kIOReturnSuccess;
1650 
1651 	if (!owners.next) {
1652 		queue_init(&owners);
1653 	} else {
1654 		queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1655 		{
1656 			if (task != owner->task) {
1657 				continue;
1658 			}
1659 			newOwner = false;
1660 			break;
1661 		}
1662 	}
1663 	if (newOwner) {
1664 		owner = IOMallocType(IOUserClientOwner);
1665 
1666 		owner->task = task;
1667 		owner->uc   = this;
1668 		queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1669 		queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1670 		if (messageAppSuspended) {
1671 			task_set_message_app_suspended(task, true);
1672 		}
1673 	}
1674 
1675 	IOLockUnlock(gIOUserClientOwnersLock);
1676 
1677 	return ret;
1678 }
1679 
1680 void
noMoreSenders(void)1681 IOUserClient::noMoreSenders(void)
1682 {
1683 	IOUserClientOwner * owner;
1684 	IOUserClientOwner * iter;
1685 	queue_head_t      * taskque;
1686 	bool                hasMessageAppSuspended;
1687 
1688 	IOLockLock(gIOUserClientOwnersLock);
1689 
1690 	if (owners.next) {
1691 		while (!queue_empty(&owners)) {
1692 			owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1693 			taskque = task_io_user_clients(owner->task);
1694 			queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1695 			hasMessageAppSuspended = false;
1696 			queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1697 				hasMessageAppSuspended = iter->uc->messageAppSuspended;
1698 				if (hasMessageAppSuspended) {
1699 					break;
1700 				}
1701 			}
1702 			task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1703 			queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1704 			IOFreeType(owner, IOUserClientOwner);
1705 		}
1706 		owners.next = owners.prev = NULL;
1707 	}
1708 
1709 	IOLockUnlock(gIOUserClientOwnersLock);
1710 }
1711 
1712 
1713 extern "C" void
iokit_task_app_suspended_changed(task_t task)1714 iokit_task_app_suspended_changed(task_t task)
1715 {
1716 	queue_head_t      * taskque;
1717 	IOUserClientOwner * owner;
1718 	OSSet             * set;
1719 
1720 	IOLockLock(gIOUserClientOwnersLock);
1721 
1722 	taskque = task_io_user_clients(task);
1723 	set = NULL;
1724 	queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1725 		if (!owner->uc->messageAppSuspended) {
1726 			continue;
1727 		}
1728 		if (!set) {
1729 			set = OSSet::withCapacity(4);
1730 			if (!set) {
1731 				break;
1732 			}
1733 		}
1734 		set->setObject(owner->uc);
1735 	}
1736 
1737 	IOLockUnlock(gIOUserClientOwnersLock);
1738 
1739 	if (set) {
1740 		set->iterateObjects(^bool (OSObject * obj) {
1741 			IOUserClient      * uc;
1742 
1743 			uc = (typeof(uc))obj;
1744 #if 0
1745 			{
1746 			        OSString          * str;
1747 			        str = IOCopyLogNameForPID(task_pid(task));
1748 			        IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1749 			        uc->getName(), task_is_app_suspended(task));
1750 			        OSSafeReleaseNULL(str);
1751 			}
1752 #endif
1753 			uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1754 
1755 			return false;
1756 		});
1757 		set->release();
1758 	}
1759 }
1760 
1761 extern "C" kern_return_t
iokit_task_terminate(task_t task)1762 iokit_task_terminate(task_t task)
1763 {
1764 	IOUserClientOwner * owner;
1765 	IOUserClient      * dead;
1766 	IOUserClient      * uc;
1767 	queue_head_t      * taskque;
1768 
1769 	IOLockLock(gIOUserClientOwnersLock);
1770 
1771 	taskque = task_io_user_clients(task);
1772 	dead = NULL;
1773 	while (!queue_empty(taskque)) {
1774 		owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1775 		uc = owner->uc;
1776 		queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1777 		queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1778 		if (queue_empty(&uc->owners)) {
1779 			uc->retain();
1780 			IOLog("destroying out of band connect for %s\n", uc->getName());
1781 			// now using the uc queue head as a singly linked queue,
1782 			// leaving .next as NULL to mark it empty
1783 			uc->owners.next = NULL;
1784 			uc->owners.prev = (queue_entry_t) dead;
1785 			dead = uc;
1786 		}
1787 		IOFreeType(owner, IOUserClientOwner);
1788 	}
1789 
1790 	IOLockUnlock(gIOUserClientOwnersLock);
1791 
1792 	while (dead) {
1793 		uc = dead;
1794 		dead = (IOUserClient *)(void *) dead->owners.prev;
1795 		uc->owners.prev = NULL;
1796 		if (uc->sharedInstance || !uc->closed) {
1797 			uc->clientDied();
1798 		}
1799 		uc->release();
1800 	}
1801 
1802 	return KERN_SUCCESS;
1803 }
1804 
1805 struct IOUCFilterPolicy {
1806 	task_t             task;
1807 	io_filter_policy_t filterPolicy;
1808 	IOUCFilterPolicy * next;
1809 };
1810 
1811 io_filter_policy_t
filterForTask(task_t task,io_filter_policy_t addFilterPolicy)1812 IOUserClient::filterForTask(task_t task, io_filter_policy_t addFilterPolicy)
1813 {
1814 	IOUCFilterPolicy * elem;
1815 	io_filter_policy_t filterPolicy;
1816 
1817 	filterPolicy = 0;
1818 	IOLockLock(&filterLock);
1819 
1820 	for (elem = reserved->filterPolicies; elem && (elem->task != task); elem = elem->next) {
1821 	}
1822 
1823 	if (elem) {
1824 		if (addFilterPolicy) {
1825 			assert(addFilterPolicy == elem->filterPolicy);
1826 		}
1827 		filterPolicy = elem->filterPolicy;
1828 	} else if (addFilterPolicy) {
1829 		elem = IOMallocType(IOUCFilterPolicy);
1830 		elem->task               = task;
1831 		elem->filterPolicy       = addFilterPolicy;
1832 		elem->next               = reserved->filterPolicies;
1833 		reserved->filterPolicies = elem;
1834 		filterPolicy = addFilterPolicy;
1835 	}
1836 
1837 	IOLockUnlock(&filterLock);
1838 	return filterPolicy;
1839 }
1840 
1841 void
free()1842 IOUserClient::free()
1843 {
1844 	if (mappings) {
1845 		mappings->release();
1846 	}
1847 
1848 	IOStatisticsUnregisterCounter();
1849 
1850 	assert(!owners.next);
1851 	assert(!owners.prev);
1852 
1853 	if (reserved) {
1854 		IOUCFilterPolicy * elem;
1855 		IOUCFilterPolicy * nextElem;
1856 		for (elem = reserved->filterPolicies; elem; elem = nextElem) {
1857 			nextElem = elem->next;
1858 			if (elem->filterPolicy && gIOUCFilterCallbacks->io_filter_release) {
1859 				gIOUCFilterCallbacks->io_filter_release(elem->filterPolicy);
1860 			}
1861 			IOFreeType(elem, IOUCFilterPolicy);
1862 		}
1863 		IOFreeType(reserved, ExpansionData);
1864 		IORWLockInlineDestroy(&lock);
1865 		IOLockInlineDestroy(&filterLock);
1866 	}
1867 
1868 	super::free();
1869 }
1870 
1871 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1872 
OSDefineMetaClassAndAbstractStructors(IOUserClient2022,IOUserClient)1873 OSDefineMetaClassAndAbstractStructors( IOUserClient2022, IOUserClient )
1874 
1875 
1876 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1877 
1878 IOReturn
1879 IOUserClient::clientDied( void )
1880 {
1881 	IOReturn ret = kIOReturnNotReady;
1882 
1883 	if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1884 		ret = clientClose();
1885 	}
1886 
1887 	return ret;
1888 }
1889 
1890 IOReturn
clientClose(void)1891 IOUserClient::clientClose( void )
1892 {
1893 	return kIOReturnUnsupported;
1894 }
1895 
1896 IOService *
getService(void)1897 IOUserClient::getService( void )
1898 {
1899 	return NULL;
1900 }
1901 
1902 IOReturn
registerNotificationPort(mach_port_t,UInt32,UInt32)1903 IOUserClient::registerNotificationPort(
1904 	mach_port_t     /* port */,
1905 	UInt32          /* type */,
1906 	UInt32          /* refCon */)
1907 {
1908 	return kIOReturnUnsupported;
1909 }
1910 
1911 IOReturn
registerNotificationPort(mach_port_t port,UInt32 type,io_user_reference_t refCon)1912 IOUserClient::registerNotificationPort(
1913 	mach_port_t port,
1914 	UInt32          type,
1915 	io_user_reference_t refCon)
1916 {
1917 	return registerNotificationPort(port, type, (UInt32) refCon);
1918 }
1919 
1920 IOReturn
getNotificationSemaphore(UInt32 notification_type,semaphore_t * semaphore)1921 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1922     semaphore_t * semaphore )
1923 {
1924 	return kIOReturnUnsupported;
1925 }
1926 
1927 IOReturn
connectClient(IOUserClient *)1928 IOUserClient::connectClient( IOUserClient * /* client */ )
1929 {
1930 	return kIOReturnUnsupported;
1931 }
1932 
1933 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,IOMemoryDescriptor ** memory)1934 IOUserClient::clientMemoryForType( UInt32 type,
1935     IOOptionBits * options,
1936     IOMemoryDescriptor ** memory )
1937 {
1938 	return kIOReturnUnsupported;
1939 }
1940 
1941 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,OSSharedPtr<IOMemoryDescriptor> & memory)1942 IOUserClient::clientMemoryForType( UInt32 type,
1943     IOOptionBits * options,
1944     OSSharedPtr<IOMemoryDescriptor>& memory )
1945 {
1946 	IOMemoryDescriptor* memoryRaw = nullptr;
1947 	IOReturn result = clientMemoryForType(type, options, &memoryRaw);
1948 	memory.reset(memoryRaw, OSNoRetain);
1949 	return result;
1950 }
1951 
1952 #if !__LP64__
1953 IOMemoryMap *
mapClientMemory(IOOptionBits type,task_t task,IOOptionBits mapFlags,IOVirtualAddress atAddress)1954 IOUserClient::mapClientMemory(
1955 	IOOptionBits            type,
1956 	task_t                  task,
1957 	IOOptionBits            mapFlags,
1958 	IOVirtualAddress        atAddress )
1959 {
1960 	return NULL;
1961 }
1962 #endif
1963 
1964 IOMemoryMap *
mapClientMemory64(IOOptionBits type,task_t task,IOOptionBits mapFlags,mach_vm_address_t atAddress)1965 IOUserClient::mapClientMemory64(
1966 	IOOptionBits            type,
1967 	task_t                  task,
1968 	IOOptionBits            mapFlags,
1969 	mach_vm_address_t       atAddress )
1970 {
1971 	IOReturn            err;
1972 	IOOptionBits        options = 0;
1973 	IOMemoryDescriptor * memory = NULL;
1974 	IOMemoryMap *       map = NULL;
1975 
1976 	err = clientMemoryForType((UInt32) type, &options, &memory );
1977 
1978 	if (memory && (kIOReturnSuccess == err)) {
1979 		FAKE_STACK_FRAME(getMetaClass());
1980 
1981 		options = (options & ~kIOMapUserOptionsMask)
1982 		    | (mapFlags & kIOMapUserOptionsMask);
1983 		map = memory->createMappingInTask( task, atAddress, options );
1984 		memory->release();
1985 
1986 		FAKE_STACK_FRAME_END();
1987 	}
1988 
1989 	return map;
1990 }
1991 
1992 IOReturn
exportObjectToClient(task_t task,OSObject * obj,io_object_t * clientObj)1993 IOUserClient::exportObjectToClient(task_t task,
1994     OSObject *obj, io_object_t *clientObj)
1995 {
1996 	mach_port_name_t    name;
1997 
1998 	name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1999 
2000 	*clientObj = (io_object_t)(uintptr_t) name;
2001 
2002 	if (obj) {
2003 		obj->release();
2004 	}
2005 
2006 	return kIOReturnSuccess;
2007 }
2008 
2009 IOReturn
copyPortNameForObjectInTask(task_t task,OSObject * obj,mach_port_name_t * port_name)2010 IOUserClient::copyPortNameForObjectInTask(task_t task,
2011     OSObject *obj, mach_port_name_t * port_name)
2012 {
2013 	mach_port_name_t    name;
2014 
2015 	name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
2016 
2017 	*(mach_port_name_t *) port_name = name;
2018 
2019 	return kIOReturnSuccess;
2020 }
2021 
2022 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSObject ** obj)2023 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2024     OSObject **obj)
2025 {
2026 	OSObject * object;
2027 
2028 	object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
2029 
2030 	*obj = object;
2031 
2032 	return object ? kIOReturnSuccess : kIOReturnIPCError;
2033 }
2034 
2035 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSSharedPtr<OSObject> & obj)2036 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2037     OSSharedPtr<OSObject>& obj)
2038 {
2039 	OSObject* objRaw = NULL;
2040 	IOReturn result = copyObjectForPortNameInTask(task, port_name, &objRaw);
2041 	obj.reset(objRaw, OSNoRetain);
2042 	return result;
2043 }
2044 
2045 IOReturn
adjustPortNameReferencesInTask(task_t task,mach_port_name_t port_name,mach_port_delta_t delta)2046 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
2047 {
2048 	return iokit_mod_send_right(task, port_name, delta);
2049 }
2050 
2051 IOExternalMethod *
getExternalMethodForIndex(UInt32)2052 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
2053 {
2054 	return NULL;
2055 }
2056 
2057 IOExternalAsyncMethod *
getExternalAsyncMethodForIndex(UInt32)2058 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
2059 {
2060 	return NULL;
2061 }
2062 
2063 IOExternalTrap *
2064 IOUserClient::
getExternalTrapForIndex(UInt32 index)2065 getExternalTrapForIndex(UInt32 index)
2066 {
2067 	return NULL;
2068 }
2069 
2070 #pragma clang diagnostic push
2071 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2072 
2073 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
2074 // functions can break clients of kexts implementing getExternalMethodForIndex()
2075 IOExternalMethod *
2076 IOUserClient::
getTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2077 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
2078 {
2079 	IOExternalMethod *method = getExternalMethodForIndex(index);
2080 
2081 	if (method) {
2082 		*targetP = (IOService *) method->object;
2083 	}
2084 
2085 	return method;
2086 }
2087 
2088 IOExternalMethod *
2089 IOUserClient::
getTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2090 getTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2091 {
2092 	IOService* targetPRaw = NULL;
2093 	IOExternalMethod* result = getTargetAndMethodForIndex(&targetPRaw, index);
2094 	targetP.reset(targetPRaw, OSRetain);
2095 	return result;
2096 }
2097 
2098 IOExternalAsyncMethod *
2099 IOUserClient::
getAsyncTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2100 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
2101 {
2102 	IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
2103 
2104 	if (method) {
2105 		*targetP = (IOService *) method->object;
2106 	}
2107 
2108 	return method;
2109 }
2110 
2111 IOExternalAsyncMethod *
2112 IOUserClient::
getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2113 getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2114 {
2115 	IOService* targetPRaw = NULL;
2116 	IOExternalAsyncMethod* result = getAsyncTargetAndMethodForIndex(&targetPRaw, index);
2117 	targetP.reset(targetPRaw, OSRetain);
2118 	return result;
2119 }
2120 
2121 IOExternalTrap *
2122 IOUserClient::
getTargetAndTrapForIndex(IOService ** targetP,UInt32 index)2123 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
2124 {
2125 	IOExternalTrap *trap = getExternalTrapForIndex(index);
2126 
2127 	if (trap) {
2128 		*targetP = trap->object;
2129 	}
2130 
2131 	return trap;
2132 }
2133 #pragma clang diagnostic pop
2134 
2135 IOReturn
releaseAsyncReference64(OSAsyncReference64 reference)2136 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
2137 {
2138 	mach_port_t port;
2139 	port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2140 
2141 	if (MACH_PORT_NULL != port) {
2142 		iokit_release_port_send(port);
2143 	}
2144 
2145 	return kIOReturnSuccess;
2146 }
2147 
2148 IOReturn
releaseNotificationPort(mach_port_t port)2149 IOUserClient::releaseNotificationPort(mach_port_t port)
2150 {
2151 	if (MACH_PORT_NULL != port) {
2152 		iokit_release_port_send(port);
2153 	}
2154 
2155 	return kIOReturnSuccess;
2156 }
2157 
2158 IOReturn
sendAsyncResult(OSAsyncReference reference,IOReturn result,void * args[],UInt32 numArgs)2159 IOUserClient::sendAsyncResult(OSAsyncReference reference,
2160     IOReturn result, void *args[], UInt32 numArgs)
2161 {
2162 	OSAsyncReference64  reference64;
2163 	OSBoundedArray<io_user_reference_t, kMaxAsyncArgs> args64;
2164 	unsigned int        idx;
2165 
2166 	if (numArgs > kMaxAsyncArgs) {
2167 		return kIOReturnMessageTooLarge;
2168 	}
2169 
2170 	for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2171 		reference64[idx] = REF64(reference[idx]);
2172 	}
2173 
2174 	for (idx = 0; idx < numArgs; idx++) {
2175 		args64[idx] = REF64(args[idx]);
2176 	}
2177 
2178 	return sendAsyncResult64(reference64, result, args64.data(), numArgs);
2179 }
2180 
2181 IOReturn
sendAsyncResult64WithOptions(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2182 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2183     IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2184 {
2185 	return _sendAsyncResult64(reference, result, args, numArgs, options);
2186 }
2187 
2188 IOReturn
sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs)2189 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2190     IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2191 {
2192 	return _sendAsyncResult64(reference, result, args, numArgs, 0);
2193 }
2194 
2195 IOReturn
_sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2196 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2197     IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2198 {
2199 	struct ReplyMsg {
2200 		mach_msg_header_t msgHdr;
2201 		union{
2202 			struct{
2203 				OSNotificationHeader     notifyHdr;
2204 				IOAsyncCompletionContent asyncContent;
2205 				uint32_t                 args[kMaxAsyncArgs];
2206 			} msg32;
2207 			struct{
2208 				OSNotificationHeader64   notifyHdr;
2209 				IOAsyncCompletionContent asyncContent;
2210 				io_user_reference_t      args[kMaxAsyncArgs] __attribute__ ((packed));
2211 			} msg64;
2212 		} m;
2213 	};
2214 	ReplyMsg      replyMsg;
2215 	mach_port_t   replyPort;
2216 	kern_return_t kr;
2217 
2218 	// If no reply port, do nothing.
2219 	replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2220 	if (replyPort == MACH_PORT_NULL) {
2221 		return kIOReturnSuccess;
2222 	}
2223 
2224 	if (numArgs > kMaxAsyncArgs) {
2225 		return kIOReturnMessageTooLarge;
2226 	}
2227 
2228 	bzero(&replyMsg, sizeof(replyMsg));
2229 	replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2230 	    0 /*local*/);
2231 	replyMsg.msgHdr.msgh_remote_port = replyPort;
2232 	replyMsg.msgHdr.msgh_local_port  = NULL;
2233 	replyMsg.msgHdr.msgh_id          = kOSNotificationMessageID;
2234 	if (kIOUCAsync64Flag & reference[0]) {
2235 		replyMsg.msgHdr.msgh_size =
2236 		    sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2237 		    - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2238 		replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2239 		    + numArgs * sizeof(io_user_reference_t);
2240 		replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2241 		/* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2242 		bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2243 
2244 		replyMsg.m.msg64.asyncContent.result = result;
2245 		if (numArgs) {
2246 			bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2247 		}
2248 	} else {
2249 		unsigned int idx;
2250 
2251 		replyMsg.msgHdr.msgh_size =
2252 		    sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2253 		    - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2254 
2255 		replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2256 		    + numArgs * sizeof(uint32_t);
2257 		replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2258 
2259 		/* Skip reference[0] which is left as 0 from the earlier bzero */
2260 		for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2261 			replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2262 		}
2263 
2264 		replyMsg.m.msg32.asyncContent.result = result;
2265 
2266 		for (idx = 0; idx < numArgs; idx++) {
2267 			replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2268 		}
2269 	}
2270 
2271 	if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2272 		kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2273 		    replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2274 	} else {
2275 		/* Fail on full queue. */
2276 		kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
2277 		    replyMsg.msgHdr.msgh_size);
2278 	}
2279 	if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2280 		reference[0] |= kIOUCAsyncErrorLoggedFlag;
2281 		IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
2282 	}
2283 	return kr;
2284 }
2285 
2286 
2287 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2288 
2289 extern "C" {
2290 #define CHECK(cls, obj, out)                      \
2291 	cls * out;                              \
2292 	if( !(out = OSDynamicCast( cls, obj)))  \
2293 	    return( kIOReturnBadArgument )
2294 
2295 #define CHECKLOCKED(cls, obj, out)                                        \
2296 	IOUserIterator * oIter;                                         \
2297 	cls * out;                                                      \
2298 	if( !(oIter = OSDynamicCast(IOUserIterator, obj)))              \
2299 	    return (kIOReturnBadArgument);                              \
2300 	if( !(out = OSDynamicCast(cls, oIter->userIteratorObject)))     \
2301 	    return (kIOReturnBadArgument)
2302 
2303 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2304 
2305 // Create a vm_map_copy_t or kalloc'ed data for memory
2306 // to be copied out. ipc will free after the copyout.
2307 
2308 static kern_return_t
copyoutkdata(const void * data,vm_size_t len,io_buf_ptr_t * buf)2309 copyoutkdata( const void * data, vm_size_t len,
2310     io_buf_ptr_t * buf )
2311 {
2312 	kern_return_t       err;
2313 	vm_map_copy_t       copy;
2314 
2315 	err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2316 	    false /* src_destroy */, &copy);
2317 
2318 	assert( err == KERN_SUCCESS );
2319 	if (err == KERN_SUCCESS) {
2320 		*buf = (char *) copy;
2321 	}
2322 
2323 	return err;
2324 }
2325 
2326 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2327 
2328 /* Routine io_server_version */
2329 kern_return_t
is_io_server_version(mach_port_t main_port,uint64_t * version)2330 is_io_server_version(
2331 	mach_port_t main_port,
2332 	uint64_t *version)
2333 {
2334 	*version = IOKIT_SERVER_VERSION;
2335 	return kIOReturnSuccess;
2336 }
2337 
2338 /* Routine io_object_get_class */
2339 kern_return_t
is_io_object_get_class(io_object_t object,io_name_t className)2340 is_io_object_get_class(
2341 	io_object_t object,
2342 	io_name_t className )
2343 {
2344 	const OSMetaClass* my_obj = NULL;
2345 
2346 	if (!object) {
2347 		return kIOReturnBadArgument;
2348 	}
2349 
2350 	my_obj = object->getMetaClass();
2351 	if (!my_obj) {
2352 		return kIOReturnNotFound;
2353 	}
2354 
2355 	strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2356 
2357 	return kIOReturnSuccess;
2358 }
2359 
2360 /* Routine io_object_get_superclass */
2361 kern_return_t
is_io_object_get_superclass(mach_port_t main_port,io_name_t obj_name,io_name_t class_name)2362 is_io_object_get_superclass(
2363 	mach_port_t main_port,
2364 	io_name_t obj_name,
2365 	io_name_t class_name)
2366 {
2367 	IOReturn            ret;
2368 	const OSMetaClass * meta;
2369 	const OSMetaClass * super;
2370 	const OSSymbol    * name;
2371 	const char        * cstr;
2372 
2373 	if (!obj_name || !class_name) {
2374 		return kIOReturnBadArgument;
2375 	}
2376 	if (main_port != main_device_port) {
2377 		return kIOReturnNotPrivileged;
2378 	}
2379 
2380 	ret = kIOReturnNotFound;
2381 	meta = NULL;
2382 	do{
2383 		name = OSSymbol::withCString(obj_name);
2384 		if (!name) {
2385 			break;
2386 		}
2387 		meta = OSMetaClass::copyMetaClassWithName(name);
2388 		if (!meta) {
2389 			break;
2390 		}
2391 		super = meta->getSuperClass();
2392 		if (!super) {
2393 			break;
2394 		}
2395 		cstr = super->getClassName();
2396 		if (!cstr) {
2397 			break;
2398 		}
2399 		strlcpy(class_name, cstr, sizeof(io_name_t));
2400 		ret = kIOReturnSuccess;
2401 	}while (false);
2402 
2403 	OSSafeReleaseNULL(name);
2404 	if (meta) {
2405 		meta->releaseMetaClass();
2406 	}
2407 
2408 	return ret;
2409 }
2410 
2411 /* Routine io_object_get_bundle_identifier */
2412 kern_return_t
is_io_object_get_bundle_identifier(mach_port_t main_port,io_name_t obj_name,io_name_t bundle_name)2413 is_io_object_get_bundle_identifier(
2414 	mach_port_t main_port,
2415 	io_name_t obj_name,
2416 	io_name_t bundle_name)
2417 {
2418 	IOReturn            ret;
2419 	const OSMetaClass * meta;
2420 	const OSSymbol    * name;
2421 	const OSSymbol    * identifier;
2422 	const char        * cstr;
2423 
2424 	if (!obj_name || !bundle_name) {
2425 		return kIOReturnBadArgument;
2426 	}
2427 	if (main_port != main_device_port) {
2428 		return kIOReturnNotPrivileged;
2429 	}
2430 
2431 	ret = kIOReturnNotFound;
2432 	meta = NULL;
2433 	do{
2434 		name = OSSymbol::withCString(obj_name);
2435 		if (!name) {
2436 			break;
2437 		}
2438 		meta = OSMetaClass::copyMetaClassWithName(name);
2439 		if (!meta) {
2440 			break;
2441 		}
2442 		identifier = meta->getKmodName();
2443 		if (!identifier) {
2444 			break;
2445 		}
2446 		cstr = identifier->getCStringNoCopy();
2447 		if (!cstr) {
2448 			break;
2449 		}
2450 		strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2451 		ret = kIOReturnSuccess;
2452 	}while (false);
2453 
2454 	OSSafeReleaseNULL(name);
2455 	if (meta) {
2456 		meta->releaseMetaClass();
2457 	}
2458 
2459 	return ret;
2460 }
2461 
2462 /* Routine io_object_conforms_to */
2463 kern_return_t
is_io_object_conforms_to(io_object_t object,io_name_t className,boolean_t * conforms)2464 is_io_object_conforms_to(
2465 	io_object_t object,
2466 	io_name_t className,
2467 	boolean_t *conforms )
2468 {
2469 	if (!object) {
2470 		return kIOReturnBadArgument;
2471 	}
2472 
2473 	*conforms = (NULL != object->metaCast( className ));
2474 
2475 	return kIOReturnSuccess;
2476 }
2477 
2478 /* Routine io_object_get_retain_count */
2479 kern_return_t
is_io_object_get_retain_count(io_object_t object,uint32_t * retainCount)2480 is_io_object_get_retain_count(
2481 	io_object_t object,
2482 	uint32_t *retainCount )
2483 {
2484 	if (!object) {
2485 		return kIOReturnBadArgument;
2486 	}
2487 
2488 	*retainCount = object->getRetainCount();
2489 	return kIOReturnSuccess;
2490 }
2491 
2492 /* Routine io_iterator_next */
2493 kern_return_t
is_io_iterator_next(io_object_t iterator,io_object_t * object)2494 is_io_iterator_next(
2495 	io_object_t iterator,
2496 	io_object_t *object )
2497 {
2498 	IOReturn    ret;
2499 	OSObject *  obj;
2500 	OSIterator * iter;
2501 	IOUserIterator * uiter;
2502 
2503 	if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2504 		obj = uiter->copyNextObject();
2505 	} else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2506 		obj = iter->getNextObject();
2507 		if (obj) {
2508 			obj->retain();
2509 		}
2510 	} else {
2511 		return kIOReturnBadArgument;
2512 	}
2513 
2514 	if (obj) {
2515 		*object = obj;
2516 		ret = kIOReturnSuccess;
2517 	} else {
2518 		ret = kIOReturnNoDevice;
2519 	}
2520 
2521 	return ret;
2522 }
2523 
2524 /* Routine io_iterator_reset */
2525 kern_return_t
is_io_iterator_reset(io_object_t iterator)2526 is_io_iterator_reset(
2527 	io_object_t iterator )
2528 {
2529 	CHECK( OSIterator, iterator, iter );
2530 
2531 	iter->reset();
2532 
2533 	return kIOReturnSuccess;
2534 }
2535 
2536 /* Routine io_iterator_is_valid */
2537 kern_return_t
is_io_iterator_is_valid(io_object_t iterator,boolean_t * is_valid)2538 is_io_iterator_is_valid(
2539 	io_object_t iterator,
2540 	boolean_t *is_valid )
2541 {
2542 	CHECK( OSIterator, iterator, iter );
2543 
2544 	*is_valid = iter->isValid();
2545 
2546 	return kIOReturnSuccess;
2547 }
2548 
2549 static kern_return_t
internal_io_service_match_property_table(io_service_t _service,const char * matching,mach_msg_type_number_t matching_size,boolean_t * matches)2550 internal_io_service_match_property_table(
2551 	io_service_t _service,
2552 	const char * matching,
2553 	mach_msg_type_number_t matching_size,
2554 	boolean_t *matches)
2555 {
2556 	CHECK( IOService, _service, service );
2557 
2558 	kern_return_t       kr;
2559 	OSObject *          obj;
2560 	OSDictionary *      dict;
2561 
2562 	assert(matching_size);
2563 
2564 
2565 	obj = OSUnserializeXML(matching, matching_size);
2566 
2567 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2568 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2569 		*matches = service->passiveMatch( dict );
2570 		kr = kIOReturnSuccess;
2571 	} else {
2572 		kr = kIOReturnBadArgument;
2573 	}
2574 
2575 	if (obj) {
2576 		obj->release();
2577 	}
2578 
2579 	return kr;
2580 }
2581 
2582 /* Routine io_service_match_property_table */
2583 kern_return_t
is_io_service_match_property_table(io_service_t service,io_string_t matching,boolean_t * matches)2584 is_io_service_match_property_table(
2585 	io_service_t service,
2586 	io_string_t matching,
2587 	boolean_t *matches )
2588 {
2589 	return kIOReturnUnsupported;
2590 }
2591 
2592 
2593 /* Routine io_service_match_property_table_ool */
2594 kern_return_t
is_io_service_match_property_table_ool(io_object_t service,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,boolean_t * matches)2595 is_io_service_match_property_table_ool(
2596 	io_object_t service,
2597 	io_buf_ptr_t matching,
2598 	mach_msg_type_number_t matchingCnt,
2599 	kern_return_t *result,
2600 	boolean_t *matches )
2601 {
2602 	kern_return_t         kr;
2603 	vm_offset_t           data;
2604 	vm_map_offset_t       map_data;
2605 
2606 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2607 	data = CAST_DOWN(vm_offset_t, map_data);
2608 
2609 	if (KERN_SUCCESS == kr) {
2610 		// must return success after vm_map_copyout() succeeds
2611 		*result = internal_io_service_match_property_table(service,
2612 		    (const char *)data, matchingCnt, matches );
2613 		vm_deallocate( kernel_map, data, matchingCnt );
2614 	}
2615 
2616 	return kr;
2617 }
2618 
2619 /* Routine io_service_match_property_table_bin */
2620 kern_return_t
is_io_service_match_property_table_bin(io_object_t service,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,boolean_t * matches)2621 is_io_service_match_property_table_bin(
2622 	io_object_t service,
2623 	io_struct_inband_t matching,
2624 	mach_msg_type_number_t matchingCnt,
2625 	boolean_t *matches)
2626 {
2627 	return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2628 }
2629 
2630 static kern_return_t
internal_io_service_get_matching_services(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_iterator_t * existing)2631 internal_io_service_get_matching_services(
2632 	mach_port_t main_port,
2633 	const char * matching,
2634 	mach_msg_type_number_t matching_size,
2635 	io_iterator_t *existing )
2636 {
2637 	kern_return_t       kr;
2638 	OSObject *          obj;
2639 	OSDictionary *      dict;
2640 
2641 	if (main_port != main_device_port) {
2642 		return kIOReturnNotPrivileged;
2643 	}
2644 
2645 	assert(matching_size);
2646 	obj = OSUnserializeXML(matching, matching_size);
2647 
2648 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2649 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2650 		*existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2651 		kr = kIOReturnSuccess;
2652 	} else {
2653 		kr = kIOReturnBadArgument;
2654 	}
2655 
2656 	if (obj) {
2657 		obj->release();
2658 	}
2659 
2660 	return kr;
2661 }
2662 
2663 /* Routine io_service_get_matching_services */
2664 kern_return_t
is_io_service_get_matching_services(mach_port_t main_port,io_string_t matching,io_iterator_t * existing)2665 is_io_service_get_matching_services(
2666 	mach_port_t main_port,
2667 	io_string_t matching,
2668 	io_iterator_t *existing )
2669 {
2670 	return kIOReturnUnsupported;
2671 }
2672 
2673 /* Routine io_service_get_matching_services_ool */
2674 kern_return_t
is_io_service_get_matching_services_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * existing)2675 is_io_service_get_matching_services_ool(
2676 	mach_port_t main_port,
2677 	io_buf_ptr_t matching,
2678 	mach_msg_type_number_t matchingCnt,
2679 	kern_return_t *result,
2680 	io_object_t *existing )
2681 {
2682 	kern_return_t       kr;
2683 	vm_offset_t         data;
2684 	vm_map_offset_t     map_data;
2685 
2686 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2687 	data = CAST_DOWN(vm_offset_t, map_data);
2688 
2689 	if (KERN_SUCCESS == kr) {
2690 		// must return success after vm_map_copyout() succeeds
2691 		// and mig will copy out objects on success
2692 		*existing = NULL;
2693 		*result = internal_io_service_get_matching_services(main_port,
2694 		    (const char *) data, matchingCnt, existing);
2695 		vm_deallocate( kernel_map, data, matchingCnt );
2696 	}
2697 
2698 	return kr;
2699 }
2700 
2701 /* Routine io_service_get_matching_services_bin */
2702 kern_return_t
is_io_service_get_matching_services_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * existing)2703 is_io_service_get_matching_services_bin(
2704 	mach_port_t main_port,
2705 	io_struct_inband_t matching,
2706 	mach_msg_type_number_t matchingCnt,
2707 	io_object_t *existing)
2708 {
2709 	return internal_io_service_get_matching_services(main_port, matching, matchingCnt, existing);
2710 }
2711 
2712 
2713 static kern_return_t
internal_io_service_get_matching_service(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_service_t * service)2714 internal_io_service_get_matching_service(
2715 	mach_port_t main_port,
2716 	const char * matching,
2717 	mach_msg_type_number_t matching_size,
2718 	io_service_t *service )
2719 {
2720 	kern_return_t       kr;
2721 	OSObject *          obj;
2722 	OSDictionary *      dict;
2723 
2724 	if (main_port != main_device_port) {
2725 		return kIOReturnNotPrivileged;
2726 	}
2727 
2728 	assert(matching_size);
2729 	obj = OSUnserializeXML(matching, matching_size);
2730 
2731 	if ((dict = OSDynamicCast( OSDictionary, obj))) {
2732 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2733 		*service = IOService::copyMatchingService( dict );
2734 		kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2735 	} else {
2736 		kr = kIOReturnBadArgument;
2737 	}
2738 
2739 	if (obj) {
2740 		obj->release();
2741 	}
2742 
2743 	return kr;
2744 }
2745 
2746 /* Routine io_service_get_matching_service */
2747 kern_return_t
is_io_service_get_matching_service(mach_port_t main_port,io_string_t matching,io_service_t * service)2748 is_io_service_get_matching_service(
2749 	mach_port_t main_port,
2750 	io_string_t matching,
2751 	io_service_t *service )
2752 {
2753 	return kIOReturnUnsupported;
2754 }
2755 
2756 /* Routine io_service_get_matching_services_ool */
2757 kern_return_t
is_io_service_get_matching_service_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * service)2758 is_io_service_get_matching_service_ool(
2759 	mach_port_t main_port,
2760 	io_buf_ptr_t matching,
2761 	mach_msg_type_number_t matchingCnt,
2762 	kern_return_t *result,
2763 	io_object_t *service )
2764 {
2765 	kern_return_t       kr;
2766 	vm_offset_t         data;
2767 	vm_map_offset_t     map_data;
2768 
2769 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2770 	data = CAST_DOWN(vm_offset_t, map_data);
2771 
2772 	if (KERN_SUCCESS == kr) {
2773 		// must return success after vm_map_copyout() succeeds
2774 		// and mig will copy out objects on success
2775 		*service = NULL;
2776 		*result = internal_io_service_get_matching_service(main_port,
2777 		    (const char *) data, matchingCnt, service );
2778 		vm_deallocate( kernel_map, data, matchingCnt );
2779 	}
2780 
2781 	return kr;
2782 }
2783 
2784 /* Routine io_service_get_matching_service_bin */
2785 kern_return_t
is_io_service_get_matching_service_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * service)2786 is_io_service_get_matching_service_bin(
2787 	mach_port_t main_port,
2788 	io_struct_inband_t matching,
2789 	mach_msg_type_number_t matchingCnt,
2790 	io_object_t *service)
2791 {
2792 	return internal_io_service_get_matching_service(main_port, matching, matchingCnt, service);
2793 }
2794 
2795 static kern_return_t
internal_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,const char * matching,size_t matching_size,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)2796 internal_io_service_add_notification(
2797 	mach_port_t main_port,
2798 	io_name_t notification_type,
2799 	const char * matching,
2800 	size_t matching_size,
2801 	mach_port_t port,
2802 	void * reference,
2803 	vm_size_t referenceSize,
2804 	bool client64,
2805 	io_object_t * notification )
2806 {
2807 	IOServiceUserNotification * userNotify = NULL;
2808 	IONotifier *                notify = NULL;
2809 	const OSSymbol *            sym;
2810 	OSObject *                  obj;
2811 	OSDictionary *              dict;
2812 	IOReturn                    err;
2813 	natural_t                   userMsgType;
2814 
2815 	if (main_port != main_device_port) {
2816 		return kIOReturnNotPrivileged;
2817 	}
2818 
2819 	do {
2820 		err = kIOReturnNoResources;
2821 
2822 		if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2823 			return kIOReturnMessageTooLarge;
2824 		}
2825 
2826 		if (!(sym = OSSymbol::withCString( notification_type ))) {
2827 			err = kIOReturnNoResources;
2828 		}
2829 
2830 		assert(matching_size);
2831 		obj = OSUnserializeXML(matching, matching_size);
2832 		dict = OSDynamicCast(OSDictionary, obj);
2833 		if (!dict) {
2834 			err = kIOReturnBadArgument;
2835 			continue;
2836 		}
2837 		IOTaskRegistryCompatibilityMatching(current_task(), dict);
2838 
2839 		if ((sym == gIOPublishNotification)
2840 		    || (sym == gIOFirstPublishNotification)) {
2841 			userMsgType = kIOServicePublishNotificationType;
2842 		} else if ((sym == gIOMatchedNotification)
2843 		    || (sym == gIOFirstMatchNotification)) {
2844 			userMsgType = kIOServiceMatchedNotificationType;
2845 		} else if ((sym == gIOTerminatedNotification)
2846 		    || (sym == gIOWillTerminateNotification)) {
2847 			userMsgType = kIOServiceTerminatedNotificationType;
2848 		} else {
2849 			userMsgType = kLastIOKitNotificationType;
2850 		}
2851 
2852 		userNotify = new IOServiceUserNotification;
2853 
2854 		if (userNotify && !userNotify->init( port, userMsgType,
2855 		    reference, referenceSize, client64)) {
2856 			userNotify->release();
2857 			userNotify = NULL;
2858 		}
2859 		if (!userNotify) {
2860 			continue;
2861 		}
2862 
2863 		notify = IOService::addMatchingNotification( sym, dict,
2864 		    &userNotify->_handler, userNotify );
2865 		if (notify) {
2866 			*notification = userNotify;
2867 			userNotify->setNotification( notify );
2868 			err = kIOReturnSuccess;
2869 		} else {
2870 			err = kIOReturnUnsupported;
2871 		}
2872 	} while (false);
2873 
2874 	if ((kIOReturnSuccess != err) && userNotify) {
2875 		userNotify->setNotification(NULL);
2876 		userNotify->invalidatePort();
2877 		userNotify->release();
2878 		userNotify = NULL;
2879 	}
2880 
2881 	if (sym) {
2882 		sym->release();
2883 	}
2884 	if (obj) {
2885 		obj->release();
2886 	}
2887 
2888 	return err;
2889 }
2890 
2891 
2892 /* Routine io_service_add_notification */
2893 kern_return_t
is_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2894 is_io_service_add_notification(
2895 	mach_port_t main_port,
2896 	io_name_t notification_type,
2897 	io_string_t matching,
2898 	mach_port_t port,
2899 	io_async_ref_t reference,
2900 	mach_msg_type_number_t referenceCnt,
2901 	io_object_t * notification )
2902 {
2903 	return kIOReturnUnsupported;
2904 }
2905 
2906 /* Routine io_service_add_notification_64 */
2907 kern_return_t
is_io_service_add_notification_64(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2908 is_io_service_add_notification_64(
2909 	mach_port_t main_port,
2910 	io_name_t notification_type,
2911 	io_string_t matching,
2912 	mach_port_t wake_port,
2913 	io_async_ref64_t reference,
2914 	mach_msg_type_number_t referenceCnt,
2915 	io_object_t *notification )
2916 {
2917 	return kIOReturnUnsupported;
2918 }
2919 
2920 /* Routine io_service_add_notification_bin */
2921 kern_return_t
is_io_service_add_notification_bin(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2922 is_io_service_add_notification_bin
2923 (
2924 	mach_port_t main_port,
2925 	io_name_t notification_type,
2926 	io_struct_inband_t matching,
2927 	mach_msg_type_number_t matchingCnt,
2928 	mach_port_t wake_port,
2929 	io_async_ref_t reference,
2930 	mach_msg_type_number_t referenceCnt,
2931 	io_object_t *notification)
2932 {
2933 	io_async_ref_t zreference;
2934 
2935 	if (referenceCnt > ASYNC_REF_COUNT) {
2936 		return kIOReturnBadArgument;
2937 	}
2938 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2939 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2940 
2941 	return internal_io_service_add_notification(main_port, notification_type,
2942 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2943 	           false, notification);
2944 }
2945 
2946 /* Routine io_service_add_notification_bin_64 */
2947 kern_return_t
is_io_service_add_notification_bin_64(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2948 is_io_service_add_notification_bin_64
2949 (
2950 	mach_port_t main_port,
2951 	io_name_t notification_type,
2952 	io_struct_inband_t matching,
2953 	mach_msg_type_number_t matchingCnt,
2954 	mach_port_t wake_port,
2955 	io_async_ref64_t reference,
2956 	mach_msg_type_number_t referenceCnt,
2957 	io_object_t *notification)
2958 {
2959 	io_async_ref64_t zreference;
2960 
2961 	if (referenceCnt > ASYNC_REF64_COUNT) {
2962 		return kIOReturnBadArgument;
2963 	}
2964 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2965 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2966 
2967 	return internal_io_service_add_notification(main_port, notification_type,
2968 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2969 	           true, notification);
2970 }
2971 
2972 static kern_return_t
internal_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,void * reference,vm_size_t referenceSize,bool client64,kern_return_t * result,io_object_t * notification)2973 internal_io_service_add_notification_ool(
2974 	mach_port_t main_port,
2975 	io_name_t notification_type,
2976 	io_buf_ptr_t matching,
2977 	mach_msg_type_number_t matchingCnt,
2978 	mach_port_t wake_port,
2979 	void * reference,
2980 	vm_size_t referenceSize,
2981 	bool client64,
2982 	kern_return_t *result,
2983 	io_object_t *notification )
2984 {
2985 	kern_return_t       kr;
2986 	vm_offset_t         data;
2987 	vm_map_offset_t     map_data;
2988 
2989 	kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2990 	data = CAST_DOWN(vm_offset_t, map_data);
2991 
2992 	if (KERN_SUCCESS == kr) {
2993 		// must return success after vm_map_copyout() succeeds
2994 		// and mig will copy out objects on success
2995 		*notification = NULL;
2996 		*result = internal_io_service_add_notification( main_port, notification_type,
2997 		    (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2998 		vm_deallocate( kernel_map, data, matchingCnt );
2999 	}
3000 
3001 	return kr;
3002 }
3003 
3004 /* Routine io_service_add_notification_ool */
3005 kern_return_t
is_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3006 is_io_service_add_notification_ool(
3007 	mach_port_t main_port,
3008 	io_name_t notification_type,
3009 	io_buf_ptr_t matching,
3010 	mach_msg_type_number_t matchingCnt,
3011 	mach_port_t wake_port,
3012 	io_async_ref_t reference,
3013 	mach_msg_type_number_t referenceCnt,
3014 	kern_return_t *result,
3015 	io_object_t *notification )
3016 {
3017 	io_async_ref_t zreference;
3018 
3019 	if (referenceCnt > ASYNC_REF_COUNT) {
3020 		return kIOReturnBadArgument;
3021 	}
3022 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3023 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3024 
3025 	return internal_io_service_add_notification_ool(main_port, notification_type,
3026 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3027 	           false, result, notification);
3028 }
3029 
3030 /* Routine io_service_add_notification_ool_64 */
3031 kern_return_t
is_io_service_add_notification_ool_64(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3032 is_io_service_add_notification_ool_64(
3033 	mach_port_t main_port,
3034 	io_name_t notification_type,
3035 	io_buf_ptr_t matching,
3036 	mach_msg_type_number_t matchingCnt,
3037 	mach_port_t wake_port,
3038 	io_async_ref64_t reference,
3039 	mach_msg_type_number_t referenceCnt,
3040 	kern_return_t *result,
3041 	io_object_t *notification )
3042 {
3043 	io_async_ref64_t zreference;
3044 
3045 	if (referenceCnt > ASYNC_REF64_COUNT) {
3046 		return kIOReturnBadArgument;
3047 	}
3048 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3049 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3050 
3051 	return internal_io_service_add_notification_ool(main_port, notification_type,
3052 	           matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3053 	           true, result, notification);
3054 }
3055 
3056 /* Routine io_service_add_notification_old */
3057 kern_return_t
is_io_service_add_notification_old(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,natural_t ref,io_object_t * notification)3058 is_io_service_add_notification_old(
3059 	mach_port_t main_port,
3060 	io_name_t notification_type,
3061 	io_string_t matching,
3062 	mach_port_t port,
3063 	// for binary compatibility reasons, this must be natural_t for ILP32
3064 	natural_t ref,
3065 	io_object_t * notification )
3066 {
3067 	return is_io_service_add_notification( main_port, notification_type,
3068 	           matching, port, &ref, 1, notification );
3069 }
3070 
3071 
3072 static kern_return_t
internal_io_service_add_interest_notification(io_object_t _service,io_name_t type_of_interest,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)3073 internal_io_service_add_interest_notification(
3074 	io_object_t _service,
3075 	io_name_t type_of_interest,
3076 	mach_port_t port,
3077 	void * reference,
3078 	vm_size_t referenceSize,
3079 	bool client64,
3080 	io_object_t * notification )
3081 {
3082 	IOServiceMessageUserNotification *  userNotify = NULL;
3083 	IONotifier *                        notify = NULL;
3084 	const OSSymbol *                    sym;
3085 	IOReturn                            err;
3086 
3087 	CHECK( IOService, _service, service );
3088 
3089 	err = kIOReturnNoResources;
3090 	if ((sym = OSSymbol::withCString( type_of_interest ))) {
3091 		do {
3092 			userNotify = new IOServiceMessageUserNotification;
3093 
3094 			if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
3095 			    reference, referenceSize, client64 )) {
3096 				userNotify->release();
3097 				userNotify = NULL;
3098 			}
3099 			if (!userNotify) {
3100 				continue;
3101 			}
3102 
3103 			notify = service->registerInterest( sym,
3104 			    &userNotify->_handler, userNotify );
3105 			if (notify) {
3106 				*notification = userNotify;
3107 				userNotify->setNotification( notify );
3108 				err = kIOReturnSuccess;
3109 			} else {
3110 				err = kIOReturnUnsupported;
3111 			}
3112 		} while (false);
3113 
3114 		sym->release();
3115 	}
3116 
3117 	if ((kIOReturnSuccess != err) && userNotify) {
3118 		userNotify->setNotification(NULL);
3119 		userNotify->invalidatePort();
3120 		userNotify->release();
3121 		userNotify = NULL;
3122 	}
3123 
3124 	return err;
3125 }
3126 
3127 /* Routine io_service_add_message_notification */
3128 kern_return_t
is_io_service_add_interest_notification(io_object_t service,io_name_t type_of_interest,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3129 is_io_service_add_interest_notification(
3130 	io_object_t service,
3131 	io_name_t type_of_interest,
3132 	mach_port_t port,
3133 	io_async_ref_t reference,
3134 	mach_msg_type_number_t referenceCnt,
3135 	io_object_t * notification )
3136 {
3137 	io_async_ref_t zreference;
3138 
3139 	if (referenceCnt > ASYNC_REF_COUNT) {
3140 		return kIOReturnBadArgument;
3141 	}
3142 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3143 	bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3144 
3145 	return internal_io_service_add_interest_notification(service, type_of_interest,
3146 	           port, &zreference[0], sizeof(io_async_ref_t), false, notification);
3147 }
3148 
3149 /* Routine io_service_add_interest_notification_64 */
3150 kern_return_t
is_io_service_add_interest_notification_64(io_object_t service,io_name_t type_of_interest,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3151 is_io_service_add_interest_notification_64(
3152 	io_object_t service,
3153 	io_name_t type_of_interest,
3154 	mach_port_t wake_port,
3155 	io_async_ref64_t reference,
3156 	mach_msg_type_number_t referenceCnt,
3157 	io_object_t *notification )
3158 {
3159 	io_async_ref64_t zreference;
3160 
3161 	if (referenceCnt > ASYNC_REF64_COUNT) {
3162 		return kIOReturnBadArgument;
3163 	}
3164 	bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3165 	bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3166 
3167 	return internal_io_service_add_interest_notification(service, type_of_interest,
3168 	           wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
3169 }
3170 
3171 
3172 /* Routine io_service_acknowledge_notification */
3173 kern_return_t
is_io_service_acknowledge_notification(io_object_t _service,natural_t notify_ref,natural_t response)3174 is_io_service_acknowledge_notification(
3175 	io_object_t _service,
3176 	natural_t notify_ref,
3177 	natural_t response )
3178 {
3179 	CHECK( IOService, _service, service );
3180 
3181 	return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3182 	           (IOOptionBits) response );
3183 }
3184 
3185 /* Routine io_connect_get_semaphore */
3186 kern_return_t
is_io_connect_get_notification_semaphore(io_connect_t connection,natural_t notification_type,semaphore_t * semaphore)3187 is_io_connect_get_notification_semaphore(
3188 	io_connect_t connection,
3189 	natural_t notification_type,
3190 	semaphore_t *semaphore )
3191 {
3192 	IOReturn ret;
3193 	CHECK( IOUserClient, connection, client );
3194 
3195 	IOStatisticsClientCall();
3196 	IORWLockWrite(&client->lock);
3197 	ret = client->getNotificationSemaphore((UInt32) notification_type,
3198 	    semaphore );
3199 	IORWLockUnlock(&client->lock);
3200 
3201 	return ret;
3202 }
3203 
3204 /* Routine io_registry_get_root_entry */
3205 kern_return_t
is_io_registry_get_root_entry(mach_port_t main_port,io_object_t * root)3206 is_io_registry_get_root_entry(
3207 	mach_port_t main_port,
3208 	io_object_t *root )
3209 {
3210 	IORegistryEntry *   entry;
3211 
3212 	if (main_port != main_device_port) {
3213 		return kIOReturnNotPrivileged;
3214 	}
3215 
3216 	entry = IORegistryEntry::getRegistryRoot();
3217 	if (entry) {
3218 		entry->retain();
3219 	}
3220 	*root = entry;
3221 
3222 	return kIOReturnSuccess;
3223 }
3224 
3225 /* Routine io_registry_create_iterator */
3226 kern_return_t
is_io_registry_create_iterator(mach_port_t main_port,io_name_t plane,uint32_t options,io_object_t * iterator)3227 is_io_registry_create_iterator(
3228 	mach_port_t main_port,
3229 	io_name_t plane,
3230 	uint32_t options,
3231 	io_object_t *iterator )
3232 {
3233 	if (main_port != main_device_port) {
3234 		return kIOReturnNotPrivileged;
3235 	}
3236 
3237 	*iterator = IOUserIterator::withIterator(
3238 		IORegistryIterator::iterateOver(
3239 			IORegistryEntry::getPlane( plane ), options ));
3240 
3241 	return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3242 }
3243 
3244 /* Routine io_registry_entry_create_iterator */
3245 kern_return_t
is_io_registry_entry_create_iterator(io_object_t registry_entry,io_name_t plane,uint32_t options,io_object_t * iterator)3246 is_io_registry_entry_create_iterator(
3247 	io_object_t registry_entry,
3248 	io_name_t plane,
3249 	uint32_t options,
3250 	io_object_t *iterator )
3251 {
3252 	CHECK( IORegistryEntry, registry_entry, entry );
3253 
3254 	*iterator = IOUserIterator::withIterator(
3255 		IORegistryIterator::iterateOver( entry,
3256 		IORegistryEntry::getPlane( plane ), options ));
3257 
3258 	return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3259 }
3260 
3261 /* Routine io_registry_iterator_enter */
3262 kern_return_t
is_io_registry_iterator_enter_entry(io_object_t iterator)3263 is_io_registry_iterator_enter_entry(
3264 	io_object_t iterator )
3265 {
3266 	CHECKLOCKED( IORegistryIterator, iterator, iter );
3267 
3268 	IOLockLock(&oIter->lock);
3269 	iter->enterEntry();
3270 	IOLockUnlock(&oIter->lock);
3271 
3272 	return kIOReturnSuccess;
3273 }
3274 
3275 /* Routine io_registry_iterator_exit */
3276 kern_return_t
is_io_registry_iterator_exit_entry(io_object_t iterator)3277 is_io_registry_iterator_exit_entry(
3278 	io_object_t iterator )
3279 {
3280 	bool        didIt;
3281 
3282 	CHECKLOCKED( IORegistryIterator, iterator, iter );
3283 
3284 	IOLockLock(&oIter->lock);
3285 	didIt = iter->exitEntry();
3286 	IOLockUnlock(&oIter->lock);
3287 
3288 	return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3289 }
3290 
3291 /* Routine io_registry_entry_from_path */
3292 kern_return_t
is_io_registry_entry_from_path(mach_port_t main_port,io_string_t path,io_object_t * registry_entry)3293 is_io_registry_entry_from_path(
3294 	mach_port_t main_port,
3295 	io_string_t path,
3296 	io_object_t *registry_entry )
3297 {
3298 	IORegistryEntry *   entry;
3299 
3300 	if (main_port != main_device_port) {
3301 		return kIOReturnNotPrivileged;
3302 	}
3303 
3304 	entry = IORegistryEntry::fromPath( path );
3305 
3306 	if (!entry && IOTaskRegistryCompatibility(current_task())) {
3307 		OSDictionary * matching;
3308 		const OSObject * objects[2] = { kOSBooleanTrue, NULL };
3309 		const OSSymbol * keys[2]    = { gIOCompatibilityMatchKey, gIOPathMatchKey };
3310 
3311 		objects[1] = OSString::withCStringNoCopy(path);
3312 		matching = OSDictionary::withObjects(objects, keys, 2, 2);
3313 		if (matching) {
3314 			entry = IOService::copyMatchingService(matching);
3315 		}
3316 		OSSafeReleaseNULL(matching);
3317 		OSSafeReleaseNULL(objects[1]);
3318 	}
3319 
3320 	*registry_entry = entry;
3321 
3322 	return kIOReturnSuccess;
3323 }
3324 
3325 
3326 /* Routine io_registry_entry_from_path */
3327 kern_return_t
is_io_registry_entry_from_path_ool(mach_port_t main_port,io_string_inband_t path,io_buf_ptr_t path_ool,mach_msg_type_number_t path_oolCnt,kern_return_t * result,io_object_t * registry_entry)3328 is_io_registry_entry_from_path_ool(
3329 	mach_port_t main_port,
3330 	io_string_inband_t path,
3331 	io_buf_ptr_t path_ool,
3332 	mach_msg_type_number_t path_oolCnt,
3333 	kern_return_t *result,
3334 	io_object_t *registry_entry)
3335 {
3336 	IORegistryEntry *   entry;
3337 	vm_map_offset_t     map_data;
3338 	const char *        cpath;
3339 	IOReturn            res;
3340 	kern_return_t       err;
3341 
3342 	if (main_port != main_device_port) {
3343 		return kIOReturnNotPrivileged;
3344 	}
3345 
3346 	map_data = 0;
3347 	entry    = NULL;
3348 	res = err = KERN_SUCCESS;
3349 	if (path[0]) {
3350 		cpath = path;
3351 	} else {
3352 		if (!path_oolCnt) {
3353 			return kIOReturnBadArgument;
3354 		}
3355 		if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3356 			return kIOReturnMessageTooLarge;
3357 		}
3358 
3359 		err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3360 		if (KERN_SUCCESS == err) {
3361 			// must return success to mig after vm_map_copyout() succeeds, so result is actual
3362 			cpath = CAST_DOWN(const char *, map_data);
3363 			if (cpath[path_oolCnt - 1]) {
3364 				res = kIOReturnBadArgument;
3365 			}
3366 		}
3367 	}
3368 
3369 	if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3370 		entry = IORegistryEntry::fromPath(cpath);
3371 		res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3372 	}
3373 
3374 	if (map_data) {
3375 		vm_deallocate(kernel_map, map_data, path_oolCnt);
3376 	}
3377 
3378 	if (KERN_SUCCESS != err) {
3379 		res = err;
3380 	}
3381 	*registry_entry = entry;
3382 	*result = res;
3383 
3384 	return err;
3385 }
3386 
3387 
3388 /* Routine io_registry_entry_in_plane */
3389 kern_return_t
is_io_registry_entry_in_plane(io_object_t registry_entry,io_name_t plane,boolean_t * inPlane)3390 is_io_registry_entry_in_plane(
3391 	io_object_t registry_entry,
3392 	io_name_t plane,
3393 	boolean_t *inPlane )
3394 {
3395 	CHECK( IORegistryEntry, registry_entry, entry );
3396 
3397 	*inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3398 
3399 	return kIOReturnSuccess;
3400 }
3401 
3402 
3403 /* Routine io_registry_entry_get_path */
3404 kern_return_t
is_io_registry_entry_get_path(io_object_t registry_entry,io_name_t plane,io_string_t path)3405 is_io_registry_entry_get_path(
3406 	io_object_t registry_entry,
3407 	io_name_t plane,
3408 	io_string_t path )
3409 {
3410 	int         length;
3411 	CHECK( IORegistryEntry, registry_entry, entry );
3412 
3413 	length = sizeof(io_string_t);
3414 	if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3415 		return kIOReturnSuccess;
3416 	} else {
3417 		return kIOReturnBadArgument;
3418 	}
3419 }
3420 
3421 /* Routine io_registry_entry_get_path */
3422 kern_return_t
is_io_registry_entry_get_path_ool(io_object_t registry_entry,io_name_t plane,io_string_inband_t path,io_buf_ptr_t * path_ool,mach_msg_type_number_t * path_oolCnt)3423 is_io_registry_entry_get_path_ool(
3424 	io_object_t registry_entry,
3425 	io_name_t plane,
3426 	io_string_inband_t path,
3427 	io_buf_ptr_t *path_ool,
3428 	mach_msg_type_number_t *path_oolCnt)
3429 {
3430 	enum   { kMaxPath = 16384 };
3431 	IOReturn err;
3432 	int      length;
3433 	char   * buf;
3434 
3435 	CHECK( IORegistryEntry, registry_entry, entry );
3436 
3437 	*path_ool    = NULL;
3438 	*path_oolCnt = 0;
3439 	length = sizeof(io_string_inband_t);
3440 	if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3441 		err = kIOReturnSuccess;
3442 	} else {
3443 		length = kMaxPath;
3444 		buf = IONewData(char, length);
3445 		if (!buf) {
3446 			err = kIOReturnNoMemory;
3447 		} else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3448 			err = kIOReturnError;
3449 		} else {
3450 			*path_oolCnt = length;
3451 			err = copyoutkdata(buf, length, path_ool);
3452 		}
3453 		if (buf) {
3454 			IODeleteData(buf, char, kMaxPath);
3455 		}
3456 	}
3457 
3458 	return err;
3459 }
3460 
3461 
3462 /* Routine io_registry_entry_get_name */
3463 kern_return_t
is_io_registry_entry_get_name(io_object_t registry_entry,io_name_t name)3464 is_io_registry_entry_get_name(
3465 	io_object_t registry_entry,
3466 	io_name_t name )
3467 {
3468 	CHECK( IORegistryEntry, registry_entry, entry );
3469 
3470 	strncpy( name, entry->getName(), sizeof(io_name_t));
3471 
3472 	return kIOReturnSuccess;
3473 }
3474 
3475 /* Routine io_registry_entry_get_name_in_plane */
3476 kern_return_t
is_io_registry_entry_get_name_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t name)3477 is_io_registry_entry_get_name_in_plane(
3478 	io_object_t registry_entry,
3479 	io_name_t planeName,
3480 	io_name_t name )
3481 {
3482 	const IORegistryPlane * plane;
3483 	CHECK( IORegistryEntry, registry_entry, entry );
3484 
3485 	if (planeName[0]) {
3486 		plane = IORegistryEntry::getPlane( planeName );
3487 	} else {
3488 		plane = NULL;
3489 	}
3490 
3491 	strncpy( name, entry->getName( plane), sizeof(io_name_t));
3492 
3493 	return kIOReturnSuccess;
3494 }
3495 
3496 /* Routine io_registry_entry_get_location_in_plane */
3497 kern_return_t
is_io_registry_entry_get_location_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t location)3498 is_io_registry_entry_get_location_in_plane(
3499 	io_object_t registry_entry,
3500 	io_name_t planeName,
3501 	io_name_t location )
3502 {
3503 	const IORegistryPlane * plane;
3504 	CHECK( IORegistryEntry, registry_entry, entry );
3505 
3506 	if (planeName[0]) {
3507 		plane = IORegistryEntry::getPlane( planeName );
3508 	} else {
3509 		plane = NULL;
3510 	}
3511 
3512 	const char * cstr = entry->getLocation( plane );
3513 
3514 	if (cstr) {
3515 		strncpy( location, cstr, sizeof(io_name_t));
3516 		return kIOReturnSuccess;
3517 	} else {
3518 		return kIOReturnNotFound;
3519 	}
3520 }
3521 
3522 /* Routine io_registry_entry_get_registry_entry_id */
3523 kern_return_t
is_io_registry_entry_get_registry_entry_id(io_object_t registry_entry,uint64_t * entry_id)3524 is_io_registry_entry_get_registry_entry_id(
3525 	io_object_t registry_entry,
3526 	uint64_t *entry_id )
3527 {
3528 	CHECK( IORegistryEntry, registry_entry, entry );
3529 
3530 	*entry_id = entry->getRegistryEntryID();
3531 
3532 	return kIOReturnSuccess;
3533 }
3534 
3535 
3536 static OSObject *
IOCopyPropertyCompatible(IORegistryEntry * regEntry,const char * name)3537 IOCopyPropertyCompatible(IORegistryEntry * regEntry, const char * name)
3538 {
3539 	OSObject     * obj;
3540 	OSObject     * compatProperties;
3541 	OSDictionary * props;
3542 
3543 	obj = regEntry->copyProperty(name);
3544 	if (obj) {
3545 		return obj;
3546 	}
3547 
3548 	compatProperties = regEntry->copyProperty(gIOUserServicePropertiesKey);
3549 	if (!compatProperties
3550 	    && IOTaskRegistryCompatibility(current_task())) {
3551 		compatProperties = regEntry->copyProperty(gIOCompatibilityPropertiesKey);
3552 	}
3553 	if (compatProperties) {
3554 		props = OSDynamicCast(OSDictionary, compatProperties);
3555 		if (props) {
3556 			obj = props->getObject(name);
3557 			if (obj) {
3558 				obj->retain();
3559 			}
3560 		}
3561 		compatProperties->release();
3562 	}
3563 
3564 	return obj;
3565 }
3566 
3567 /* Routine io_registry_entry_get_property */
3568 kern_return_t
is_io_registry_entry_get_property_bytes(io_object_t registry_entry,io_name_t property_name,io_struct_inband_t buf,mach_msg_type_number_t * dataCnt)3569 is_io_registry_entry_get_property_bytes(
3570 	io_object_t registry_entry,
3571 	io_name_t property_name,
3572 	io_struct_inband_t buf,
3573 	mach_msg_type_number_t *dataCnt )
3574 {
3575 	OSObject    *       obj;
3576 	OSData      *       data;
3577 	OSString    *       str;
3578 	OSBoolean   *       boo;
3579 	OSNumber    *       off;
3580 	UInt64              offsetBytes;
3581 	unsigned int        len = 0;
3582 	const void *        bytes = NULL;
3583 	IOReturn            ret = kIOReturnSuccess;
3584 
3585 	CHECK( IORegistryEntry, registry_entry, entry );
3586 
3587 #if CONFIG_MACF
3588 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3589 		return kIOReturnNotPermitted;
3590 	}
3591 #endif
3592 
3593 	obj = IOCopyPropertyCompatible(entry, property_name);
3594 	if (!obj) {
3595 		return kIOReturnNoResources;
3596 	}
3597 
3598 	// One day OSData will be a common container base class
3599 	// until then...
3600 	if ((data = OSDynamicCast( OSData, obj ))) {
3601 		len = data->getLength();
3602 		bytes = data->getBytesNoCopy();
3603 		if (!data->isSerializable()) {
3604 			len = 0;
3605 		}
3606 	} else if ((str = OSDynamicCast( OSString, obj ))) {
3607 		len = str->getLength() + 1;
3608 		bytes = str->getCStringNoCopy();
3609 	} else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3610 		len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3611 		bytes = boo->isTrue() ? "Yes" : "No";
3612 	} else if ((off = OSDynamicCast( OSNumber, obj ))) {
3613 		offsetBytes = off->unsigned64BitValue();
3614 		len = off->numberOfBytes();
3615 		if (len > sizeof(offsetBytes)) {
3616 			len = sizeof(offsetBytes);
3617 		}
3618 		bytes = &offsetBytes;
3619 #ifdef __BIG_ENDIAN__
3620 		bytes = (const void *)
3621 		    (((UInt32) bytes) + (sizeof(UInt64) - len));
3622 #endif
3623 	} else {
3624 		ret = kIOReturnBadArgument;
3625 	}
3626 
3627 	if (bytes) {
3628 		if (*dataCnt < len) {
3629 			ret = kIOReturnIPCError;
3630 		} else {
3631 			*dataCnt = len;
3632 			bcopy( bytes, buf, len );
3633 		}
3634 	}
3635 	obj->release();
3636 
3637 	return ret;
3638 }
3639 
3640 
3641 /* Routine io_registry_entry_get_property */
3642 kern_return_t
is_io_registry_entry_get_property(io_object_t registry_entry,io_name_t property_name,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3643 is_io_registry_entry_get_property(
3644 	io_object_t registry_entry,
3645 	io_name_t property_name,
3646 	io_buf_ptr_t *properties,
3647 	mach_msg_type_number_t *propertiesCnt )
3648 {
3649 	kern_return_t       err;
3650 	unsigned int        len;
3651 	OSObject *          obj;
3652 
3653 	CHECK( IORegistryEntry, registry_entry, entry );
3654 
3655 #if CONFIG_MACF
3656 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3657 		return kIOReturnNotPermitted;
3658 	}
3659 #endif
3660 
3661 	obj = IOCopyPropertyCompatible(entry, property_name);
3662 	if (!obj) {
3663 		return kIOReturnNotFound;
3664 	}
3665 
3666 	OSSerialize * s = OSSerialize::withCapacity(4096);
3667 	if (!s) {
3668 		obj->release();
3669 		return kIOReturnNoMemory;
3670 	}
3671 
3672 	if (obj->serialize( s )) {
3673 		len = s->getLength();
3674 		*propertiesCnt = len;
3675 		err = copyoutkdata( s->text(), len, properties );
3676 	} else {
3677 		err = kIOReturnUnsupported;
3678 	}
3679 
3680 	s->release();
3681 	obj->release();
3682 
3683 	return err;
3684 }
3685 
3686 /* Routine io_registry_entry_get_property_recursively */
3687 kern_return_t
is_io_registry_entry_get_property_recursively(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3688 is_io_registry_entry_get_property_recursively(
3689 	io_object_t registry_entry,
3690 	io_name_t plane,
3691 	io_name_t property_name,
3692 	uint32_t options,
3693 	io_buf_ptr_t *properties,
3694 	mach_msg_type_number_t *propertiesCnt )
3695 {
3696 	kern_return_t       err;
3697 	unsigned int        len;
3698 	OSObject *          obj;
3699 
3700 	CHECK( IORegistryEntry, registry_entry, entry );
3701 
3702 #if CONFIG_MACF
3703 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3704 		return kIOReturnNotPermitted;
3705 	}
3706 #endif
3707 
3708 	obj = entry->copyProperty( property_name,
3709 	    IORegistryEntry::getPlane( plane ), options );
3710 	if (!obj) {
3711 		return kIOReturnNotFound;
3712 	}
3713 
3714 	OSSerialize * s = OSSerialize::withCapacity(4096);
3715 	if (!s) {
3716 		obj->release();
3717 		return kIOReturnNoMemory;
3718 	}
3719 
3720 	if (obj->serialize( s )) {
3721 		len = s->getLength();
3722 		*propertiesCnt = len;
3723 		err = copyoutkdata( s->text(), len, properties );
3724 	} else {
3725 		err = kIOReturnUnsupported;
3726 	}
3727 
3728 	s->release();
3729 	obj->release();
3730 
3731 	return err;
3732 }
3733 
3734 /* Routine io_registry_entry_get_properties */
3735 kern_return_t
is_io_registry_entry_get_properties(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3736 is_io_registry_entry_get_properties(
3737 	io_object_t registry_entry,
3738 	io_buf_ptr_t *properties,
3739 	mach_msg_type_number_t *propertiesCnt )
3740 {
3741 	return kIOReturnUnsupported;
3742 }
3743 
3744 #if CONFIG_MACF
3745 
3746 struct GetPropertiesEditorRef {
3747 	kauth_cred_t      cred;
3748 	IORegistryEntry * entry;
3749 	OSCollection    * root;
3750 };
3751 
3752 static const LIBKERN_RETURNS_RETAINED OSMetaClassBase *
GetPropertiesEditor(void * reference,OSSerialize * s,OSCollection * container,const OSSymbol * name,const OSMetaClassBase * value)3753 GetPropertiesEditor(void                  * reference,
3754     OSSerialize           * s,
3755     OSCollection          * container,
3756     const OSSymbol        * name,
3757     const OSMetaClassBase * value)
3758 {
3759 	GetPropertiesEditorRef * ref = (typeof(ref))reference;
3760 
3761 	if (!ref->root) {
3762 		ref->root = container;
3763 	}
3764 	if (ref->root == container) {
3765 		if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3766 			value = NULL;
3767 		}
3768 	}
3769 	if (value) {
3770 		value->retain();
3771 	}
3772 	return value;
3773 }
3774 
3775 #endif /* CONFIG_MACF */
3776 
3777 /* Routine io_registry_entry_get_properties_bin_buf */
3778 kern_return_t
is_io_registry_entry_get_properties_bin_buf(io_object_t registry_entry,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3779 is_io_registry_entry_get_properties_bin_buf(
3780 	io_object_t registry_entry,
3781 	mach_vm_address_t buf,
3782 	mach_vm_size_t *bufsize,
3783 	io_buf_ptr_t *properties,
3784 	mach_msg_type_number_t *propertiesCnt)
3785 {
3786 	kern_return_t          err = kIOReturnSuccess;
3787 	unsigned int           len;
3788 	OSObject             * compatProperties;
3789 	OSSerialize          * s;
3790 	OSSerialize::Editor    editor = NULL;
3791 	void                 * editRef = NULL;
3792 
3793 	CHECK(IORegistryEntry, registry_entry, entry);
3794 
3795 #if CONFIG_MACF
3796 	GetPropertiesEditorRef ref;
3797 	if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3798 		editor    = &GetPropertiesEditor;
3799 		editRef   = &ref;
3800 		ref.cred  = kauth_cred_get();
3801 		ref.entry = entry;
3802 		ref.root  = NULL;
3803 	}
3804 #endif
3805 
3806 	s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3807 	if (!s) {
3808 		return kIOReturnNoMemory;
3809 	}
3810 
3811 
3812 	compatProperties = entry->copyProperty(gIOUserServicePropertiesKey);
3813 	if (!compatProperties
3814 	    && IOTaskRegistryCompatibility(current_task())) {
3815 		compatProperties = entry->copyProperty(gIOCompatibilityPropertiesKey);
3816 	}
3817 
3818 	if (compatProperties) {
3819 		OSDictionary * dict;
3820 
3821 		dict = entry->dictionaryWithProperties();
3822 		if (!dict) {
3823 			err = kIOReturnNoMemory;
3824 		} else {
3825 			dict->removeObject(gIOUserServicePropertiesKey);
3826 			dict->removeObject(gIOCompatibilityPropertiesKey);
3827 			dict->merge(OSDynamicCast(OSDictionary, compatProperties));
3828 			if (!dict->serialize(s)) {
3829 				err = kIOReturnUnsupported;
3830 			}
3831 			dict->release();
3832 		}
3833 		compatProperties->release();
3834 	} else if (!entry->serializeProperties(s)) {
3835 		err = kIOReturnUnsupported;
3836 	}
3837 
3838 	if (kIOReturnSuccess == err) {
3839 		len = s->getLength();
3840 		if (buf && bufsize && len <= *bufsize) {
3841 			*bufsize = len;
3842 			*propertiesCnt = 0;
3843 			*properties = nullptr;
3844 			if (copyout(s->text(), buf, len)) {
3845 				err = kIOReturnVMError;
3846 			} else {
3847 				err = kIOReturnSuccess;
3848 			}
3849 		} else {
3850 			if (bufsize) {
3851 				*bufsize = 0;
3852 			}
3853 			*propertiesCnt = len;
3854 			err = copyoutkdata( s->text(), len, properties );
3855 		}
3856 	}
3857 	s->release();
3858 
3859 	return err;
3860 }
3861 
3862 /* Routine io_registry_entry_get_properties_bin */
3863 kern_return_t
is_io_registry_entry_get_properties_bin(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3864 is_io_registry_entry_get_properties_bin(
3865 	io_object_t registry_entry,
3866 	io_buf_ptr_t *properties,
3867 	mach_msg_type_number_t *propertiesCnt)
3868 {
3869 	return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3870 	           0, NULL, properties, propertiesCnt);
3871 }
3872 
3873 /* Routine io_registry_entry_get_property_bin_buf */
3874 kern_return_t
is_io_registry_entry_get_property_bin_buf(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3875 is_io_registry_entry_get_property_bin_buf(
3876 	io_object_t registry_entry,
3877 	io_name_t plane,
3878 	io_name_t property_name,
3879 	uint32_t options,
3880 	mach_vm_address_t buf,
3881 	mach_vm_size_t *bufsize,
3882 	io_buf_ptr_t *properties,
3883 	mach_msg_type_number_t *propertiesCnt )
3884 {
3885 	kern_return_t       err;
3886 	unsigned int        len;
3887 	OSObject *          obj;
3888 	const OSSymbol *    sym;
3889 
3890 	CHECK( IORegistryEntry, registry_entry, entry );
3891 
3892 #if CONFIG_MACF
3893 	if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3894 		return kIOReturnNotPermitted;
3895 	}
3896 #endif
3897 
3898 	sym = OSSymbol::withCString(property_name);
3899 	if (!sym) {
3900 		return kIOReturnNoMemory;
3901 	}
3902 
3903 	err = kIOReturnNotFound;
3904 	if (gIORegistryEntryPropertyKeysKey == sym) {
3905 		obj = entry->copyPropertyKeys();
3906 	} else {
3907 		if ((kIORegistryIterateRecursively & options) && plane[0]) {
3908 			obj = IOCopyPropertyCompatible(entry, property_name);
3909 			if (obj == NULL) {
3910 				IORegistryIterator * iter = IORegistryIterator::iterateOver(entry, IORegistryEntry::getPlane(plane), options);
3911 				if (iter) {
3912 					while ((NULL == obj) && (entry = iter->getNextObject())) {
3913 						OSObject * currentObj = IOCopyPropertyCompatible(entry, property_name);
3914 #if CONFIG_MACF
3915 						if (currentObj != NULL && 0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3916 							// Record that MAC hook blocked this entry and property, and continue to next entry
3917 							err = kIOReturnNotPermitted;
3918 							OSSafeReleaseNULL(currentObj);
3919 							continue;
3920 						}
3921 #endif
3922 						obj = currentObj;
3923 					}
3924 					iter->release();
3925 				}
3926 			}
3927 		} else {
3928 			obj = IOCopyPropertyCompatible(entry, property_name);
3929 		}
3930 		if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3931 			entry->removeProperty(sym);
3932 		}
3933 	}
3934 
3935 	sym->release();
3936 	if (!obj) {
3937 		return err;
3938 	}
3939 
3940 	OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3941 	if (!s) {
3942 		obj->release();
3943 		return kIOReturnNoMemory;
3944 	}
3945 
3946 	if (obj->serialize( s )) {
3947 		len = s->getLength();
3948 		if (buf && bufsize && len <= *bufsize) {
3949 			*bufsize = len;
3950 			*propertiesCnt = 0;
3951 			*properties = nullptr;
3952 			if (copyout(s->text(), buf, len)) {
3953 				err = kIOReturnVMError;
3954 			} else {
3955 				err = kIOReturnSuccess;
3956 			}
3957 		} else {
3958 			if (bufsize) {
3959 				*bufsize = 0;
3960 			}
3961 			*propertiesCnt = len;
3962 			err = copyoutkdata( s->text(), len, properties );
3963 		}
3964 	} else {
3965 		err = kIOReturnUnsupported;
3966 	}
3967 
3968 	s->release();
3969 	obj->release();
3970 
3971 	return err;
3972 }
3973 
3974 /* Routine io_registry_entry_get_property_bin */
3975 kern_return_t
is_io_registry_entry_get_property_bin(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3976 is_io_registry_entry_get_property_bin(
3977 	io_object_t registry_entry,
3978 	io_name_t plane,
3979 	io_name_t property_name,
3980 	uint32_t options,
3981 	io_buf_ptr_t *properties,
3982 	mach_msg_type_number_t *propertiesCnt )
3983 {
3984 	return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
3985 	           property_name, options, 0, NULL, properties, propertiesCnt);
3986 }
3987 
3988 
3989 /* Routine io_registry_entry_set_properties */
3990 kern_return_t
is_io_registry_entry_set_properties(io_object_t registry_entry,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)3991 is_io_registry_entry_set_properties
3992 (
3993 	io_object_t registry_entry,
3994 	io_buf_ptr_t properties,
3995 	mach_msg_type_number_t propertiesCnt,
3996 	kern_return_t * result)
3997 {
3998 	OSObject *          obj;
3999 	kern_return_t       err;
4000 	IOReturn            res;
4001 	vm_offset_t         data;
4002 	vm_map_offset_t     map_data;
4003 
4004 	CHECK( IORegistryEntry, registry_entry, entry );
4005 
4006 	if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
4007 		return kIOReturnMessageTooLarge;
4008 	}
4009 
4010 	err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4011 	data = CAST_DOWN(vm_offset_t, map_data);
4012 
4013 	if (KERN_SUCCESS == err) {
4014 		FAKE_STACK_FRAME(entry->getMetaClass());
4015 
4016 		// must return success after vm_map_copyout() succeeds
4017 		obj = OSUnserializeXML((const char *) data, propertiesCnt );
4018 		vm_deallocate( kernel_map, data, propertiesCnt );
4019 
4020 		if (!obj) {
4021 			res = kIOReturnBadArgument;
4022 		}
4023 #if CONFIG_MACF
4024 		else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
4025 		    registry_entry, obj)) {
4026 			res = kIOReturnNotPermitted;
4027 		}
4028 #endif
4029 		else {
4030 			IOService    * service = OSDynamicCast(IOService, entry);
4031 			OSDictionary * props = OSDynamicCast(OSDictionary, obj);
4032 			OSObject     * allowable = entry->copyProperty(gIORegistryEntryAllowableSetPropertiesKey);
4033 			OSArray      * allowableArray;
4034 
4035 			if (!allowable) {
4036 				res = kIOReturnSuccess;
4037 			} else {
4038 				if (!props) {
4039 					res = kIOReturnNotPermitted;
4040 				} else if (!(allowableArray = OSDynamicCast(OSArray, allowable))) {
4041 					res = kIOReturnNotPermitted;
4042 				} else {
4043 					bool allFound __block, found __block;
4044 
4045 					allFound = true;
4046 					props->iterateObjects(^(const OSSymbol * key, OSObject * value) {
4047 							found = false;
4048 							for (unsigned int idx = 0; !found; idx++) {
4049 							        OSObject * next = allowableArray->getObject(idx);
4050 							        if (!next) {
4051 							                break;
4052 								}
4053 							        found = next->isEqualTo(key);
4054 							}
4055 							allFound &= found;
4056 							if (!found) {
4057 							        IOLog("IORegistryEntrySetProperties(%s, %s) disallowed due to " kIORegistryEntryAllowableSetPropertiesKey "\n",
4058 							        entry->getName(), key->getCStringNoCopy());
4059 							}
4060 							return !allFound;
4061 						});
4062 					res =  allFound ? kIOReturnSuccess : kIOReturnBadArgument;
4063 				}
4064 			}
4065 			if (kIOReturnSuccess == res) {
4066 				IOUserClient *
4067 				    client = OSDynamicCast(IOUserClient, entry);
4068 
4069 				if (client && client->defaultLockingSetProperties) {
4070 					IORWLockWrite(&client->lock);
4071 				}
4072 
4073 				if (!client && (kOSBooleanTrue == entry->getProperty(gIORegistryEntryDefaultLockingSetPropertiesKey))) {
4074 					res = entry->runPropertyActionBlock(^IOReturn (void) {
4075 							return entry->setProperties( obj );
4076 						});
4077 				} else {
4078 					res = entry->setProperties( obj );
4079 				}
4080 
4081 				if (client && client->defaultLockingSetProperties) {
4082 					IORWLockUnlock(&client->lock);
4083 				}
4084 				if (service && props && service->hasUserServer()) {
4085 					res = service->UserSetProperties(props);
4086 				}
4087 			}
4088 			OSSafeReleaseNULL(allowable);
4089 		}
4090 		if (obj) {
4091 			obj->release();
4092 		}
4093 
4094 		FAKE_STACK_FRAME_END();
4095 	} else {
4096 		res = err;
4097 	}
4098 
4099 	*result = res;
4100 	return err;
4101 }
4102 
4103 /* Routine io_registry_entry_get_child_iterator */
4104 kern_return_t
is_io_registry_entry_get_child_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4105 is_io_registry_entry_get_child_iterator(
4106 	io_object_t registry_entry,
4107 	io_name_t plane,
4108 	io_object_t *iterator )
4109 {
4110 	CHECK( IORegistryEntry, registry_entry, entry );
4111 
4112 	*iterator = IOUserIterator::withIterator(entry->getChildIterator(
4113 		    IORegistryEntry::getPlane( plane )));
4114 
4115 	return kIOReturnSuccess;
4116 }
4117 
4118 /* Routine io_registry_entry_get_parent_iterator */
4119 kern_return_t
is_io_registry_entry_get_parent_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4120 is_io_registry_entry_get_parent_iterator(
4121 	io_object_t registry_entry,
4122 	io_name_t plane,
4123 	io_object_t *iterator)
4124 {
4125 	CHECK( IORegistryEntry, registry_entry, entry );
4126 
4127 	*iterator = IOUserIterator::withIterator(entry->getParentIterator(
4128 		    IORegistryEntry::getPlane( plane )));
4129 
4130 	return kIOReturnSuccess;
4131 }
4132 
4133 /* Routine io_service_get_busy_state */
4134 kern_return_t
is_io_service_get_busy_state(io_object_t _service,uint32_t * busyState)4135 is_io_service_get_busy_state(
4136 	io_object_t _service,
4137 	uint32_t *busyState )
4138 {
4139 	CHECK( IOService, _service, service );
4140 
4141 	*busyState = service->getBusyState();
4142 
4143 	return kIOReturnSuccess;
4144 }
4145 
4146 /* Routine io_service_get_state */
4147 kern_return_t
is_io_service_get_state(io_object_t _service,uint64_t * state,uint32_t * busy_state,uint64_t * accumulated_busy_time)4148 is_io_service_get_state(
4149 	io_object_t _service,
4150 	uint64_t *state,
4151 	uint32_t *busy_state,
4152 	uint64_t *accumulated_busy_time )
4153 {
4154 	CHECK( IOService, _service, service );
4155 
4156 	*state                 = service->getState();
4157 	*busy_state            = service->getBusyState();
4158 	*accumulated_busy_time = service->getAccumulatedBusyTime();
4159 
4160 	return kIOReturnSuccess;
4161 }
4162 
4163 /* Routine io_service_wait_quiet */
4164 kern_return_t
is_io_service_wait_quiet(io_object_t _service,mach_timespec_t wait_time)4165 is_io_service_wait_quiet(
4166 	io_object_t _service,
4167 	mach_timespec_t wait_time )
4168 {
4169 	uint64_t    timeoutNS;
4170 
4171 	CHECK( IOService, _service, service );
4172 
4173 	timeoutNS = wait_time.tv_sec;
4174 	timeoutNS *= kSecondScale;
4175 	timeoutNS += wait_time.tv_nsec;
4176 
4177 	return service->waitQuiet(timeoutNS);
4178 }
4179 
4180 /* Routine io_service_wait_quiet_with_options */
4181 kern_return_t
is_io_service_wait_quiet_with_options(io_object_t _service,mach_timespec_t wait_time,uint32_t options)4182 is_io_service_wait_quiet_with_options(
4183 	io_object_t _service,
4184 	mach_timespec_t wait_time,
4185 	uint32_t options )
4186 {
4187 	uint64_t    timeoutNS;
4188 
4189 	CHECK( IOService, _service, service );
4190 
4191 	timeoutNS = wait_time.tv_sec;
4192 	timeoutNS *= kSecondScale;
4193 	timeoutNS += wait_time.tv_nsec;
4194 
4195 	if ((options & kIOWaitQuietPanicOnFailure) && !IOCurrentTaskHasEntitlement(kIOWaitQuietPanicsEntitlement)) {
4196 		OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4197 		IOLog("IOServiceWaitQuietWithOptions(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4198 		OSSafeReleaseNULL(taskName);
4199 
4200 		/* strip this option from the options before calling waitQuietWithOptions */
4201 		options &= ~kIOWaitQuietPanicOnFailure;
4202 	}
4203 
4204 	return service->waitQuietWithOptions(timeoutNS, options);
4205 }
4206 
4207 
4208 /* Routine io_service_request_probe */
4209 kern_return_t
is_io_service_request_probe(io_object_t _service,uint32_t options)4210 is_io_service_request_probe(
4211 	io_object_t _service,
4212 	uint32_t options )
4213 {
4214 	CHECK( IOService, _service, service );
4215 
4216 	return service->requestProbe( options );
4217 }
4218 
4219 /* Routine io_service_get_authorization_id */
4220 kern_return_t
is_io_service_get_authorization_id(io_object_t _service,uint64_t * authorization_id)4221 is_io_service_get_authorization_id(
4222 	io_object_t _service,
4223 	uint64_t *authorization_id )
4224 {
4225 	kern_return_t          kr;
4226 
4227 	CHECK( IOService, _service, service );
4228 
4229 	kr = IOUserClient::clientHasPrivilege((void *) current_task(),
4230 	    kIOClientPrivilegeAdministrator );
4231 	if (kIOReturnSuccess != kr) {
4232 		return kr;
4233 	}
4234 
4235 #if defined(XNU_TARGET_OS_OSX)
4236 	*authorization_id = service->getAuthorizationID();
4237 #else /* defined(XNU_TARGET_OS_OSX) */
4238 	*authorization_id = 0;
4239 	kr = kIOReturnUnsupported;
4240 #endif /* defined(XNU_TARGET_OS_OSX) */
4241 
4242 	return kr;
4243 }
4244 
4245 /* Routine io_service_set_authorization_id */
4246 kern_return_t
is_io_service_set_authorization_id(io_object_t _service,uint64_t authorization_id)4247 is_io_service_set_authorization_id(
4248 	io_object_t _service,
4249 	uint64_t authorization_id )
4250 {
4251 	CHECK( IOService, _service, service );
4252 
4253 #if defined(XNU_TARGET_OS_OSX)
4254 	return service->setAuthorizationID( authorization_id );
4255 #else /* defined(XNU_TARGET_OS_OSX) */
4256 	return kIOReturnUnsupported;
4257 #endif /* defined(XNU_TARGET_OS_OSX) */
4258 }
4259 
4260 /* Routine io_service_open_ndr */
4261 kern_return_t
is_io_service_open_extended(io_object_t _service,task_t owningTask,uint32_t connect_type,NDR_record_t ndr,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result,io_object_t * connection)4262 is_io_service_open_extended(
4263 	io_object_t _service,
4264 	task_t owningTask,
4265 	uint32_t connect_type,
4266 	NDR_record_t ndr,
4267 	io_buf_ptr_t properties,
4268 	mach_msg_type_number_t propertiesCnt,
4269 	kern_return_t * result,
4270 	io_object_t *connection )
4271 {
4272 	IOUserClient * client = NULL;
4273 	kern_return_t  err = KERN_SUCCESS;
4274 	IOReturn       res = kIOReturnSuccess;
4275 	OSDictionary * propertiesDict = NULL;
4276 	bool           disallowAccess = false;
4277 
4278 	CHECK( IOService, _service, service );
4279 
4280 	if (!owningTask) {
4281 		return kIOReturnBadArgument;
4282 	}
4283 	assert(owningTask == current_task());
4284 	if (owningTask != current_task()) {
4285 		return kIOReturnBadArgument;
4286 	}
4287 
4288 #if CONFIG_MACF
4289 	if (mac_iokit_check_open_service(kauth_cred_get(), service, connect_type) != 0) {
4290 		return kIOReturnNotPermitted;
4291 	}
4292 #endif
4293 	do{
4294 		if (properties) {
4295 			return kIOReturnUnsupported;
4296 		}
4297 #if 0
4298 		{
4299 			OSObject *      obj;
4300 			vm_offset_t     data;
4301 			vm_map_offset_t map_data;
4302 
4303 			if (propertiesCnt > sizeof(io_struct_inband_t)) {
4304 				return kIOReturnMessageTooLarge;
4305 			}
4306 
4307 			err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4308 			res = err;
4309 			data = CAST_DOWN(vm_offset_t, map_data);
4310 			if (KERN_SUCCESS == err) {
4311 				// must return success after vm_map_copyout() succeeds
4312 				obj = OSUnserializeXML((const char *) data, propertiesCnt );
4313 				vm_deallocate( kernel_map, data, propertiesCnt );
4314 				propertiesDict = OSDynamicCast(OSDictionary, obj);
4315 				if (!propertiesDict) {
4316 					res = kIOReturnBadArgument;
4317 					if (obj) {
4318 						obj->release();
4319 					}
4320 				}
4321 			}
4322 			if (kIOReturnSuccess != res) {
4323 				break;
4324 			}
4325 		}
4326 #endif
4327 		res = service->newUserClient( owningTask, (void *) owningTask,
4328 		    connect_type, propertiesDict, &client );
4329 
4330 		if (propertiesDict) {
4331 			propertiesDict->release();
4332 		}
4333 
4334 		if (res == kIOReturnSuccess && OSDynamicCast(IOUserClient, client) == NULL) {
4335 			// client should always be a IOUserClient
4336 			res = kIOReturnError;
4337 		}
4338 
4339 		if (res == kIOReturnSuccess) {
4340 			if (!client->reserved) {
4341 				if (!client->reserve()) {
4342 					client->clientClose();
4343 					OSSafeReleaseNULL(client);
4344 					res = kIOReturnNoMemory;
4345 				}
4346 			}
4347 		}
4348 
4349 		if (res == kIOReturnSuccess) {
4350 			OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4351 			if (creatorName) {
4352 				client->setProperty(kIOUserClientCreatorKey, creatorName);
4353 			}
4354 			const char * creatorNameCStr = creatorName ? creatorName->getCStringNoCopy() : "<unknown>";
4355 			client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
4356 			if (client->sharedInstance) {
4357 				IOLockLock(gIOUserClientOwnersLock);
4358 			}
4359 			if (!client->opened) {
4360 				client->opened = true;
4361 
4362 				client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
4363 				{
4364 					OSObject * obj;
4365 					extern const OSSymbol * gIOSurfaceIdentifier;
4366 					obj = client->getProperty(kIOUserClientDefaultLockingKey);
4367 					bool hasProps = false;
4368 
4369 					client->uc2022 = (NULL != OSDynamicCast(IOUserClient2022, client));
4370 					if (obj) {
4371 						hasProps = true;
4372 						client->defaultLocking = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingKey));
4373 					} else if (client->uc2022) {
4374 						res = kIOReturnError;
4375 					}
4376 					obj = client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey);
4377 					if (obj) {
4378 						hasProps = true;
4379 						client->defaultLockingSetProperties = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey));
4380 					} else if (client->uc2022) {
4381 						res = kIOReturnError;
4382 					}
4383 					obj = client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey);
4384 					if (obj) {
4385 						hasProps = true;
4386 						client->defaultLockingSingleThreadExternalMethod = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey));
4387 					} else if (client->uc2022) {
4388 						res = kIOReturnError;
4389 					}
4390 					if (kIOReturnSuccess != res) {
4391 						IOLog("IOUC %s requires kIOUserClientDefaultLockingKey, kIOUserClientDefaultLockingSetPropertiesKey, kIOUserClientDefaultLockingSingleThreadExternalMethodKey\n",
4392 						    client->getMetaClass()->getClassName());
4393 					}
4394 					if (!hasProps) {
4395 						const OSMetaClass * meta;
4396 						OSKext            * kext;
4397 						meta = client->getMetaClass();
4398 						kext = meta->getKext();
4399 						if (!kext || !kext->hasDependency(gIOSurfaceIdentifier)) {
4400 							client->defaultLocking = true;
4401 							client->defaultLockingSetProperties = false;
4402 							client->defaultLockingSingleThreadExternalMethod = false;
4403 							client->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
4404 						}
4405 					}
4406 				}
4407 			}
4408 			if (client->sharedInstance) {
4409 				IOLockUnlock(gIOUserClientOwnersLock);
4410 			}
4411 
4412 			OSObject     * requiredEntitlement = client->copyProperty(gIOUserClientEntitlementsKey);
4413 			OSString * requiredEntitlementString = OSDynamicCast(OSString, requiredEntitlement);
4414 			//If this is an IOUserClient2022, having kIOUserClientEntitlementsKey is mandatory.
4415 			//If it has kIOUserClientEntitlementsKey, the value must be either kOSBooleanFalse or an OSString
4416 			//If the value is kOSBooleanFalse, we allow access.
4417 			//If the value is an OSString, we allow access if the task has the named entitlement
4418 			if (client->uc2022) {
4419 				if (!requiredEntitlement) {
4420 					IOLog("IOUC %s missing " kIOUserClientEntitlementsKey " property\n",
4421 					    client->getMetaClass()->getClassName());
4422 					disallowAccess = true;
4423 				} else if (!requiredEntitlementString && requiredEntitlement != kOSBooleanFalse) {
4424 					IOLog("IOUC %s had " kIOUserClientEntitlementsKey "with value not boolean false or string\n", client->getMetaClass()->getClassName());
4425 					disallowAccess = true;
4426 				}
4427 			}
4428 
4429 			if (requiredEntitlement && disallowAccess == false) {
4430 				if (kOSBooleanFalse == requiredEntitlement) {
4431 					// allow
4432 					disallowAccess = false;
4433 				} else {
4434 					disallowAccess = !IOTaskHasEntitlement(owningTask, requiredEntitlementString->getCStringNoCopy());
4435 					if (disallowAccess) {
4436 						IOLog("IOUC %s missing entitlement in process %s\n",
4437 						    client->getMetaClass()->getClassName(), creatorNameCStr);
4438 					}
4439 				}
4440 			}
4441 
4442 			OSSafeReleaseNULL(requiredEntitlement);
4443 
4444 			if (disallowAccess) {
4445 				res = kIOReturnNotPrivileged;
4446 			}
4447 #if CONFIG_MACF
4448 			else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4449 				IOLog("IOUC %s failed MACF in process %s\n",
4450 				    client->getMetaClass()->getClassName(), creatorNameCStr);
4451 				res = kIOReturnNotPermitted;
4452 			}
4453 #endif
4454 
4455 			if ((kIOReturnSuccess == res)
4456 			    && gIOUCFilterCallbacks
4457 			    && gIOUCFilterCallbacks->io_filter_resolver) {
4458 				io_filter_policy_t filterPolicy;
4459 				filterPolicy = client->filterForTask(owningTask, 0);
4460 				if (!filterPolicy) {
4461 					res = gIOUCFilterCallbacks->io_filter_resolver(owningTask, client, connect_type, &filterPolicy);
4462 					if (kIOReturnUnsupported == res) {
4463 						res = kIOReturnSuccess;
4464 					} else if (kIOReturnSuccess == res) {
4465 						client->filterForTask(owningTask, filterPolicy);
4466 					} else {
4467 						IOLog("IOUC %s failed sandbox in process %s\n",
4468 						    client->getMetaClass()->getClassName(), creatorNameCStr);
4469 					}
4470 				}
4471 			}
4472 
4473 			if (kIOReturnSuccess == res) {
4474 				res = client->registerOwner(owningTask);
4475 			}
4476 			OSSafeReleaseNULL(creatorName);
4477 
4478 			if (kIOReturnSuccess != res) {
4479 				IOStatisticsClientCall();
4480 				client->clientClose();
4481 				client->setTerminateDefer(service, false);
4482 				client->release();
4483 				client = NULL;
4484 				break;
4485 			}
4486 			client->setTerminateDefer(service, false);
4487 		}
4488 	}while (false);
4489 
4490 	*connection = client;
4491 	*result = res;
4492 
4493 	return err;
4494 }
4495 
4496 /* Routine io_service_close */
4497 kern_return_t
is_io_service_close(io_object_t connection)4498 is_io_service_close(
4499 	io_object_t connection )
4500 {
4501 	OSSet * mappings;
4502 	if ((mappings = OSDynamicCast(OSSet, connection))) {
4503 		return kIOReturnSuccess;
4504 	}
4505 
4506 	CHECK( IOUserClient, connection, client );
4507 
4508 	IOStatisticsClientCall();
4509 
4510 	if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4511 		IORWLockWrite(&client->lock);
4512 		client->clientClose();
4513 		IORWLockUnlock(&client->lock);
4514 	} else {
4515 		IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4516 		    client->getRegistryEntryID(), client->getName());
4517 	}
4518 
4519 	return kIOReturnSuccess;
4520 }
4521 
4522 /* Routine io_connect_get_service */
4523 kern_return_t
is_io_connect_get_service(io_object_t connection,io_object_t * service)4524 is_io_connect_get_service(
4525 	io_object_t connection,
4526 	io_object_t *service )
4527 {
4528 	IOService * theService;
4529 
4530 	CHECK( IOUserClient, connection, client );
4531 
4532 	theService = client->getService();
4533 	if (theService) {
4534 		theService->retain();
4535 	}
4536 
4537 	*service = theService;
4538 
4539 	return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4540 }
4541 
4542 /* Routine io_connect_set_notification_port */
4543 kern_return_t
is_io_connect_set_notification_port(io_object_t connection,uint32_t notification_type,mach_port_t port,uint32_t reference)4544 is_io_connect_set_notification_port(
4545 	io_object_t connection,
4546 	uint32_t notification_type,
4547 	mach_port_t port,
4548 	uint32_t reference)
4549 {
4550 	kern_return_t ret;
4551 	CHECK( IOUserClient, connection, client );
4552 
4553 	IOStatisticsClientCall();
4554 	IORWLockWrite(&client->lock);
4555 	ret = client->registerNotificationPort( port, notification_type,
4556 	    (io_user_reference_t) reference );
4557 	IORWLockUnlock(&client->lock);
4558 	return ret;
4559 }
4560 
4561 /* Routine io_connect_set_notification_port */
4562 kern_return_t
is_io_connect_set_notification_port_64(io_object_t connection,uint32_t notification_type,mach_port_t port,io_user_reference_t reference)4563 is_io_connect_set_notification_port_64(
4564 	io_object_t connection,
4565 	uint32_t notification_type,
4566 	mach_port_t port,
4567 	io_user_reference_t reference)
4568 {
4569 	kern_return_t ret;
4570 	CHECK( IOUserClient, connection, client );
4571 
4572 	IOStatisticsClientCall();
4573 	IORWLockWrite(&client->lock);
4574 	ret = client->registerNotificationPort( port, notification_type,
4575 	    reference );
4576 	IORWLockUnlock(&client->lock);
4577 	return ret;
4578 }
4579 
4580 /* Routine io_connect_map_memory_into_task */
4581 kern_return_t
is_io_connect_map_memory_into_task(io_connect_t connection,uint32_t memory_type,task_t into_task,mach_vm_address_t * address,mach_vm_size_t * size,uint32_t flags)4582 is_io_connect_map_memory_into_task
4583 (
4584 	io_connect_t connection,
4585 	uint32_t memory_type,
4586 	task_t into_task,
4587 	mach_vm_address_t *address,
4588 	mach_vm_size_t *size,
4589 	uint32_t flags
4590 )
4591 {
4592 	IOReturn            err;
4593 	IOMemoryMap *       map;
4594 
4595 	CHECK( IOUserClient, connection, client );
4596 
4597 	if (!into_task) {
4598 		return kIOReturnBadArgument;
4599 	}
4600 
4601 	IOStatisticsClientCall();
4602 	if (client->defaultLocking) {
4603 		IORWLockWrite(&client->lock);
4604 	}
4605 	map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4606 	if (client->defaultLocking) {
4607 		IORWLockUnlock(&client->lock);
4608 	}
4609 
4610 	if (map) {
4611 		*address = map->getAddress();
4612 		if (size) {
4613 			*size = map->getSize();
4614 		}
4615 
4616 		if (client->sharedInstance
4617 		    || (into_task != current_task())) {
4618 			// push a name out to the task owning the map,
4619 			// so we can clean up maps
4620 			mach_port_name_t name __unused =
4621 			    IOMachPort::makeSendRightForTask(
4622 				into_task, map, IKOT_IOKIT_OBJECT );
4623 			map->release();
4624 		} else {
4625 			// keep it with the user client
4626 			IOLockLock( gIOObjectPortLock);
4627 			if (NULL == client->mappings) {
4628 				client->mappings = OSSet::withCapacity(2);
4629 			}
4630 			if (client->mappings) {
4631 				client->mappings->setObject( map);
4632 			}
4633 			IOLockUnlock( gIOObjectPortLock);
4634 			map->release();
4635 		}
4636 		err = kIOReturnSuccess;
4637 	} else {
4638 		err = kIOReturnBadArgument;
4639 	}
4640 
4641 	return err;
4642 }
4643 
4644 /* Routine is_io_connect_map_memory */
4645 kern_return_t
is_io_connect_map_memory(io_object_t connect,uint32_t type,task_t task,uint32_t * mapAddr,uint32_t * mapSize,uint32_t flags)4646 is_io_connect_map_memory(
4647 	io_object_t     connect,
4648 	uint32_t        type,
4649 	task_t          task,
4650 	uint32_t  *     mapAddr,
4651 	uint32_t  *     mapSize,
4652 	uint32_t        flags )
4653 {
4654 	IOReturn          err;
4655 	mach_vm_address_t address;
4656 	mach_vm_size_t    size;
4657 
4658 	address = SCALAR64(*mapAddr);
4659 	size    = SCALAR64(*mapSize);
4660 
4661 	err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4662 
4663 	*mapAddr = SCALAR32(address);
4664 	*mapSize = SCALAR32(size);
4665 
4666 	return err;
4667 }
4668 } /* extern "C" */
4669 
4670 IOMemoryMap *
removeMappingForDescriptor(IOMemoryDescriptor * mem)4671 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4672 {
4673 	OSIterator *  iter;
4674 	IOMemoryMap * map = NULL;
4675 
4676 	IOLockLock(gIOObjectPortLock);
4677 
4678 	iter = OSCollectionIterator::withCollection(mappings);
4679 	if (iter) {
4680 		while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4681 			if (mem == map->getMemoryDescriptor()) {
4682 				map->retain();
4683 				mappings->removeObject(map);
4684 				break;
4685 			}
4686 		}
4687 		iter->release();
4688 	}
4689 
4690 	IOLockUnlock(gIOObjectPortLock);
4691 
4692 	return map;
4693 }
4694 
4695 extern "C" {
4696 /* Routine io_connect_unmap_memory_from_task */
4697 kern_return_t
is_io_connect_unmap_memory_from_task(io_connect_t connection,uint32_t memory_type,task_t from_task,mach_vm_address_t address)4698 is_io_connect_unmap_memory_from_task
4699 (
4700 	io_connect_t connection,
4701 	uint32_t memory_type,
4702 	task_t from_task,
4703 	mach_vm_address_t address)
4704 {
4705 	IOReturn            err;
4706 	IOOptionBits        options = 0;
4707 	IOMemoryDescriptor * memory = NULL;
4708 	IOMemoryMap *       map;
4709 
4710 	CHECK( IOUserClient, connection, client );
4711 
4712 	if (!from_task) {
4713 		return kIOReturnBadArgument;
4714 	}
4715 
4716 	IOStatisticsClientCall();
4717 	if (client->defaultLocking) {
4718 		IORWLockWrite(&client->lock);
4719 	}
4720 	err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4721 	if (client->defaultLocking) {
4722 		IORWLockUnlock(&client->lock);
4723 	}
4724 
4725 	if (memory && (kIOReturnSuccess == err)) {
4726 		options = (options & ~kIOMapUserOptionsMask)
4727 		    | kIOMapAnywhere | kIOMapReference;
4728 
4729 		map = memory->createMappingInTask( from_task, address, options );
4730 		memory->release();
4731 		if (map) {
4732 			IOLockLock( gIOObjectPortLock);
4733 			if (client->mappings) {
4734 				client->mappings->removeObject( map);
4735 			}
4736 			IOLockUnlock( gIOObjectPortLock);
4737 
4738 			mach_port_name_t name = 0;
4739 			bool is_shared_instance_or_from_current_task = from_task != current_task() || client->sharedInstance;
4740 			if (is_shared_instance_or_from_current_task) {
4741 				name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4742 				map->release();
4743 			}
4744 
4745 			if (name) {
4746 				map->userClientUnmap();
4747 				err = iokit_mod_send_right( from_task, name, -2 );
4748 				err = kIOReturnSuccess;
4749 			} else {
4750 				IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4751 			}
4752 			if (!is_shared_instance_or_from_current_task) {
4753 				map->release();
4754 			}
4755 		} else {
4756 			err = kIOReturnBadArgument;
4757 		}
4758 	}
4759 
4760 	return err;
4761 }
4762 
4763 kern_return_t
is_io_connect_unmap_memory(io_object_t connect,uint32_t type,task_t task,uint32_t mapAddr)4764 is_io_connect_unmap_memory(
4765 	io_object_t     connect,
4766 	uint32_t        type,
4767 	task_t          task,
4768 	uint32_t        mapAddr )
4769 {
4770 	IOReturn            err;
4771 	mach_vm_address_t   address;
4772 
4773 	address = SCALAR64(mapAddr);
4774 
4775 	err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4776 
4777 	return err;
4778 }
4779 
4780 
4781 /* Routine io_connect_add_client */
4782 kern_return_t
is_io_connect_add_client(io_object_t connection,io_object_t connect_to)4783 is_io_connect_add_client(
4784 	io_object_t connection,
4785 	io_object_t connect_to)
4786 {
4787 	CHECK( IOUserClient, connection, client );
4788 	CHECK( IOUserClient, connect_to, to );
4789 
4790 	IOReturn ret;
4791 
4792 	IOStatisticsClientCall();
4793 	if (client->defaultLocking) {
4794 		IORWLockWrite(&client->lock);
4795 	}
4796 	ret = client->connectClient( to );
4797 	if (client->defaultLocking) {
4798 		IORWLockUnlock(&client->lock);
4799 	}
4800 	return ret;
4801 }
4802 
4803 
4804 /* Routine io_connect_set_properties */
4805 kern_return_t
is_io_connect_set_properties(io_object_t connection,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4806 is_io_connect_set_properties(
4807 	io_object_t connection,
4808 	io_buf_ptr_t properties,
4809 	mach_msg_type_number_t propertiesCnt,
4810 	kern_return_t * result)
4811 {
4812 	return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4813 }
4814 
4815 /* Routine io_user_client_method */
4816 kern_return_t
is_io_connect_method_var_output(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,io_buf_ptr_t * var_output,mach_msg_type_number_t * var_outputCnt)4817 is_io_connect_method_var_output
4818 (
4819 	io_connect_t connection,
4820 	uint32_t selector,
4821 	io_scalar_inband64_t scalar_input,
4822 	mach_msg_type_number_t scalar_inputCnt,
4823 	io_struct_inband_t inband_input,
4824 	mach_msg_type_number_t inband_inputCnt,
4825 	mach_vm_address_t ool_input,
4826 	mach_vm_size_t ool_input_size,
4827 	io_struct_inband_t inband_output,
4828 	mach_msg_type_number_t *inband_outputCnt,
4829 	io_scalar_inband64_t scalar_output,
4830 	mach_msg_type_number_t *scalar_outputCnt,
4831 	io_buf_ptr_t *var_output,
4832 	mach_msg_type_number_t *var_outputCnt
4833 )
4834 {
4835 	CHECK( IOUserClient, connection, client );
4836 
4837 	IOExternalMethodArguments args;
4838 	IOReturn ret;
4839 	IOMemoryDescriptor * inputMD  = NULL;
4840 	OSObject *           structureVariableOutputData = NULL;
4841 
4842 	bzero(&args.__reserved[0], sizeof(args.__reserved));
4843 	args.__reservedA = 0;
4844 	args.version = kIOExternalMethodArgumentsCurrentVersion;
4845 
4846 	args.selector = selector;
4847 
4848 	args.asyncWakePort               = MACH_PORT_NULL;
4849 	args.asyncReference              = NULL;
4850 	args.asyncReferenceCount         = 0;
4851 	args.structureVariableOutputData = &structureVariableOutputData;
4852 
4853 	args.scalarInput = scalar_input;
4854 	args.scalarInputCount = scalar_inputCnt;
4855 	args.structureInput = inband_input;
4856 	args.structureInputSize = inband_inputCnt;
4857 
4858 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4859 		return kIOReturnIPCError;
4860 	}
4861 
4862 	if (ool_input) {
4863 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4864 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
4865 		    current_task());
4866 	}
4867 
4868 	args.structureInputDescriptor = inputMD;
4869 
4870 	args.scalarOutput = scalar_output;
4871 	args.scalarOutputCount = *scalar_outputCnt;
4872 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4873 	args.structureOutput = inband_output;
4874 	args.structureOutputSize = *inband_outputCnt;
4875 	args.structureOutputDescriptor = NULL;
4876 	args.structureOutputDescriptorSize = 0;
4877 
4878 	IOStatisticsClientCall();
4879 	ret = kIOReturnSuccess;
4880 
4881 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4882 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4883 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
4884 	}
4885 
4886 	if (kIOReturnSuccess == ret) {
4887 		ret = client->callExternalMethod(selector, &args);
4888 	}
4889 
4890 	*scalar_outputCnt = args.scalarOutputCount;
4891 	*inband_outputCnt = args.structureOutputSize;
4892 
4893 	if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4894 		OSSerialize * serialize;
4895 		OSData      * data;
4896 		unsigned int  len;
4897 
4898 		if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4899 			len = serialize->getLength();
4900 			*var_outputCnt = len;
4901 			ret = copyoutkdata(serialize->text(), len, var_output);
4902 		} else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4903 			data->clipForCopyout();
4904 			len = data->getLength();
4905 			*var_outputCnt = len;
4906 			ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4907 		} else {
4908 			ret = kIOReturnUnderrun;
4909 		}
4910 	}
4911 
4912 	if (inputMD) {
4913 		inputMD->release();
4914 	}
4915 	if (structureVariableOutputData) {
4916 		structureVariableOutputData->release();
4917 	}
4918 
4919 	return ret;
4920 }
4921 
4922 /* Routine io_user_client_method */
4923 kern_return_t
is_io_connect_method(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)4924 is_io_connect_method
4925 (
4926 	io_connect_t connection,
4927 	uint32_t selector,
4928 	io_scalar_inband64_t scalar_input,
4929 	mach_msg_type_number_t scalar_inputCnt,
4930 	io_struct_inband_t inband_input,
4931 	mach_msg_type_number_t inband_inputCnt,
4932 	mach_vm_address_t ool_input,
4933 	mach_vm_size_t ool_input_size,
4934 	io_struct_inband_t inband_output,
4935 	mach_msg_type_number_t *inband_outputCnt,
4936 	io_scalar_inband64_t scalar_output,
4937 	mach_msg_type_number_t *scalar_outputCnt,
4938 	mach_vm_address_t ool_output,
4939 	mach_vm_size_t *ool_output_size
4940 )
4941 {
4942 	CHECK( IOUserClient, connection, client );
4943 
4944 	IOExternalMethodArguments args;
4945 	IOReturn ret;
4946 	IOMemoryDescriptor * inputMD  = NULL;
4947 	IOMemoryDescriptor * outputMD = NULL;
4948 
4949 	bzero(&args.__reserved[0], sizeof(args.__reserved));
4950 	args.__reservedA = 0;
4951 	args.version = kIOExternalMethodArgumentsCurrentVersion;
4952 
4953 	args.selector = selector;
4954 
4955 	args.asyncWakePort               = MACH_PORT_NULL;
4956 	args.asyncReference              = NULL;
4957 	args.asyncReferenceCount         = 0;
4958 	args.structureVariableOutputData = NULL;
4959 
4960 	args.scalarInput = scalar_input;
4961 	args.scalarInputCount = scalar_inputCnt;
4962 	args.structureInput = inband_input;
4963 	args.structureInputSize = inband_inputCnt;
4964 
4965 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4966 		return kIOReturnIPCError;
4967 	}
4968 	if (ool_output) {
4969 		if (*ool_output_size <= sizeof(io_struct_inband_t)) {
4970 			return kIOReturnIPCError;
4971 		}
4972 		if (*ool_output_size > UINT_MAX) {
4973 			return kIOReturnIPCError;
4974 		}
4975 	}
4976 
4977 	if (ool_input) {
4978 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4979 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
4980 		    current_task());
4981 	}
4982 
4983 	args.structureInputDescriptor = inputMD;
4984 
4985 	args.scalarOutput = scalar_output;
4986 	args.scalarOutputCount = *scalar_outputCnt;
4987 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4988 	args.structureOutput = inband_output;
4989 	args.structureOutputSize = *inband_outputCnt;
4990 
4991 	if (ool_output && ool_output_size) {
4992 		outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4993 		    kIODirectionIn, current_task());
4994 	}
4995 
4996 	args.structureOutputDescriptor = outputMD;
4997 	args.structureOutputDescriptorSize = ool_output_size
4998 	    ? ((typeof(args.structureOutputDescriptorSize)) * ool_output_size)
4999 	    : 0;
5000 
5001 	IOStatisticsClientCall();
5002 	ret = kIOReturnSuccess;
5003 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5004 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5005 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
5006 	}
5007 	if (kIOReturnSuccess == ret) {
5008 		ret = client->callExternalMethod( selector, &args );
5009 	}
5010 
5011 	*scalar_outputCnt = args.scalarOutputCount;
5012 	*inband_outputCnt = args.structureOutputSize;
5013 	*ool_output_size  = args.structureOutputDescriptorSize;
5014 
5015 	if (inputMD) {
5016 		inputMD->release();
5017 	}
5018 	if (outputMD) {
5019 		outputMD->release();
5020 	}
5021 
5022 	return ret;
5023 }
5024 
5025 /* Routine io_async_user_client_method */
5026 kern_return_t
is_io_connect_async_method(io_connect_t connection,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)5027 is_io_connect_async_method
5028 (
5029 	io_connect_t connection,
5030 	mach_port_t wake_port,
5031 	io_async_ref64_t reference,
5032 	mach_msg_type_number_t referenceCnt,
5033 	uint32_t selector,
5034 	io_scalar_inband64_t scalar_input,
5035 	mach_msg_type_number_t scalar_inputCnt,
5036 	io_struct_inband_t inband_input,
5037 	mach_msg_type_number_t inband_inputCnt,
5038 	mach_vm_address_t ool_input,
5039 	mach_vm_size_t ool_input_size,
5040 	io_struct_inband_t inband_output,
5041 	mach_msg_type_number_t *inband_outputCnt,
5042 	io_scalar_inband64_t scalar_output,
5043 	mach_msg_type_number_t *scalar_outputCnt,
5044 	mach_vm_address_t ool_output,
5045 	mach_vm_size_t * ool_output_size
5046 )
5047 {
5048 	CHECK( IOUserClient, connection, client );
5049 
5050 	IOExternalMethodArguments args;
5051 	IOReturn ret;
5052 	IOMemoryDescriptor * inputMD  = NULL;
5053 	IOMemoryDescriptor * outputMD = NULL;
5054 
5055 	if (referenceCnt < 1) {
5056 		return kIOReturnBadArgument;
5057 	}
5058 
5059 	bzero(&args.__reserved[0], sizeof(args.__reserved));
5060 	args.__reservedA = 0;
5061 	args.version = kIOExternalMethodArgumentsCurrentVersion;
5062 
5063 	reference[0]             = (io_user_reference_t) wake_port;
5064 	if (vm_map_is_64bit(get_task_map(current_task()))) {
5065 		reference[0]         |= kIOUCAsync64Flag;
5066 	}
5067 
5068 	args.selector = selector;
5069 
5070 	args.asyncWakePort       = wake_port;
5071 	args.asyncReference      = reference;
5072 	args.asyncReferenceCount = referenceCnt;
5073 
5074 	args.structureVariableOutputData = NULL;
5075 
5076 	args.scalarInput = scalar_input;
5077 	args.scalarInputCount = scalar_inputCnt;
5078 	args.structureInput = inband_input;
5079 	args.structureInputSize = inband_inputCnt;
5080 
5081 	if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5082 		return kIOReturnIPCError;
5083 	}
5084 	if (ool_output) {
5085 		if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5086 			return kIOReturnIPCError;
5087 		}
5088 		if (*ool_output_size > UINT_MAX) {
5089 			return kIOReturnIPCError;
5090 		}
5091 	}
5092 
5093 	if (ool_input) {
5094 		inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5095 		    kIODirectionOut | kIOMemoryMapCopyOnWrite,
5096 		    current_task());
5097 	}
5098 
5099 	args.structureInputDescriptor = inputMD;
5100 
5101 	args.scalarOutput = scalar_output;
5102 	args.scalarOutputCount = *scalar_outputCnt;
5103 	bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5104 	args.structureOutput = inband_output;
5105 	args.structureOutputSize = *inband_outputCnt;
5106 
5107 	if (ool_output) {
5108 		outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5109 		    kIODirectionIn, current_task());
5110 	}
5111 
5112 	args.structureOutputDescriptor = outputMD;
5113 	args.structureOutputDescriptorSize = ((typeof(args.structureOutputDescriptorSize)) * ool_output_size);
5114 
5115 	IOStatisticsClientCall();
5116 	ret = kIOReturnSuccess;
5117 	io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5118 	if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5119 		ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_async_method, selector);
5120 	}
5121 	if (kIOReturnSuccess == ret) {
5122 		ret = client->callExternalMethod( selector, &args );
5123 	}
5124 
5125 	*scalar_outputCnt = args.scalarOutputCount;
5126 	*inband_outputCnt = args.structureOutputSize;
5127 	*ool_output_size  = args.structureOutputDescriptorSize;
5128 
5129 	if (inputMD) {
5130 		inputMD->release();
5131 	}
5132 	if (outputMD) {
5133 		outputMD->release();
5134 	}
5135 
5136 	return ret;
5137 }
5138 
5139 /* Routine io_connect_method_scalarI_scalarO */
5140 kern_return_t
is_io_connect_method_scalarI_scalarO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5141 is_io_connect_method_scalarI_scalarO(
5142 	io_object_t        connect,
5143 	uint32_t           index,
5144 	io_scalar_inband_t       input,
5145 	mach_msg_type_number_t   inputCount,
5146 	io_scalar_inband_t       output,
5147 	mach_msg_type_number_t * outputCount )
5148 {
5149 	IOReturn err;
5150 	uint32_t i;
5151 	io_scalar_inband64_t _input;
5152 	io_scalar_inband64_t _output;
5153 
5154 	mach_msg_type_number_t struct_outputCnt = 0;
5155 	mach_vm_size_t ool_output_size = 0;
5156 
5157 	bzero(&_output[0], sizeof(_output));
5158 	for (i = 0; i < inputCount; i++) {
5159 		_input[i] = SCALAR64(input[i]);
5160 	}
5161 
5162 	err = is_io_connect_method(connect, index,
5163 	    _input, inputCount,
5164 	    NULL, 0,
5165 	    0, 0,
5166 	    NULL, &struct_outputCnt,
5167 	    _output, outputCount,
5168 	    0, &ool_output_size);
5169 
5170 	for (i = 0; i < *outputCount; i++) {
5171 		output[i] = SCALAR32(_output[i]);
5172 	}
5173 
5174 	return err;
5175 }
5176 
5177 kern_return_t
shim_io_connect_method_scalarI_scalarO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5178 shim_io_connect_method_scalarI_scalarO(
5179 	IOExternalMethod *      method,
5180 	IOService *             object,
5181 	const io_user_scalar_t * input,
5182 	mach_msg_type_number_t   inputCount,
5183 	io_user_scalar_t * output,
5184 	mach_msg_type_number_t * outputCount )
5185 {
5186 	IOMethod            func;
5187 	io_scalar_inband_t  _output;
5188 	IOReturn            err;
5189 	err = kIOReturnBadArgument;
5190 
5191 	bzero(&_output[0], sizeof(_output));
5192 	do {
5193 		if (inputCount != method->count0) {
5194 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5195 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5196 			continue;
5197 		}
5198 		if (*outputCount != method->count1) {
5199 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5200 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5201 			continue;
5202 		}
5203 
5204 		func = method->func;
5205 
5206 		switch (inputCount) {
5207 		case 6:
5208 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5209 			    ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5210 			break;
5211 		case 5:
5212 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5213 			    ARG32(input[3]), ARG32(input[4]),
5214 			    &_output[0] );
5215 			break;
5216 		case 4:
5217 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5218 			    ARG32(input[3]),
5219 			    &_output[0], &_output[1] );
5220 			break;
5221 		case 3:
5222 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5223 			    &_output[0], &_output[1], &_output[2] );
5224 			break;
5225 		case 2:
5226 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]),
5227 			    &_output[0], &_output[1], &_output[2],
5228 			    &_output[3] );
5229 			break;
5230 		case 1:
5231 			err = (object->*func)(  ARG32(input[0]),
5232 			    &_output[0], &_output[1], &_output[2],
5233 			    &_output[3], &_output[4] );
5234 			break;
5235 		case 0:
5236 			err = (object->*func)(  &_output[0], &_output[1], &_output[2],
5237 			    &_output[3], &_output[4], &_output[5] );
5238 			break;
5239 
5240 		default:
5241 			IOLog("%s: Bad method table\n", object->getName());
5242 		}
5243 	}while (false);
5244 
5245 	uint32_t i;
5246 	for (i = 0; i < *outputCount; i++) {
5247 		output[i] = SCALAR32(_output[i]);
5248 	}
5249 
5250 	return err;
5251 }
5252 
5253 /* Routine io_async_method_scalarI_scalarO */
5254 kern_return_t
is_io_async_method_scalarI_scalarO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5255 is_io_async_method_scalarI_scalarO(
5256 	io_object_t        connect,
5257 	mach_port_t wake_port,
5258 	io_async_ref_t reference,
5259 	mach_msg_type_number_t referenceCnt,
5260 	uint32_t           index,
5261 	io_scalar_inband_t       input,
5262 	mach_msg_type_number_t   inputCount,
5263 	io_scalar_inband_t       output,
5264 	mach_msg_type_number_t * outputCount )
5265 {
5266 	IOReturn err;
5267 	uint32_t i;
5268 	io_scalar_inband64_t _input;
5269 	io_scalar_inband64_t _output;
5270 	io_async_ref64_t _reference;
5271 
5272 	if (referenceCnt > ASYNC_REF64_COUNT) {
5273 		return kIOReturnBadArgument;
5274 	}
5275 	bzero(&_output[0], sizeof(_output));
5276 	for (i = 0; i < referenceCnt; i++) {
5277 		_reference[i] = REF64(reference[i]);
5278 	}
5279 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5280 
5281 	mach_msg_type_number_t struct_outputCnt = 0;
5282 	mach_vm_size_t ool_output_size = 0;
5283 
5284 	for (i = 0; i < inputCount; i++) {
5285 		_input[i] = SCALAR64(input[i]);
5286 	}
5287 
5288 	err = is_io_connect_async_method(connect,
5289 	    wake_port, _reference, referenceCnt,
5290 	    index,
5291 	    _input, inputCount,
5292 	    NULL, 0,
5293 	    0, 0,
5294 	    NULL, &struct_outputCnt,
5295 	    _output, outputCount,
5296 	    0, &ool_output_size);
5297 
5298 	for (i = 0; i < *outputCount; i++) {
5299 		output[i] = SCALAR32(_output[i]);
5300 	}
5301 
5302 	return err;
5303 }
5304 /* Routine io_async_method_scalarI_structureO */
5305 kern_return_t
is_io_async_method_scalarI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5306 is_io_async_method_scalarI_structureO(
5307 	io_object_t     connect,
5308 	mach_port_t wake_port,
5309 	io_async_ref_t reference,
5310 	mach_msg_type_number_t referenceCnt,
5311 	uint32_t        index,
5312 	io_scalar_inband_t input,
5313 	mach_msg_type_number_t  inputCount,
5314 	io_struct_inband_t              output,
5315 	mach_msg_type_number_t *        outputCount )
5316 {
5317 	uint32_t i;
5318 	io_scalar_inband64_t _input;
5319 	io_async_ref64_t _reference;
5320 
5321 	if (referenceCnt > ASYNC_REF64_COUNT) {
5322 		return kIOReturnBadArgument;
5323 	}
5324 	for (i = 0; i < referenceCnt; i++) {
5325 		_reference[i] = REF64(reference[i]);
5326 	}
5327 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5328 
5329 	mach_msg_type_number_t scalar_outputCnt = 0;
5330 	mach_vm_size_t ool_output_size = 0;
5331 
5332 	for (i = 0; i < inputCount; i++) {
5333 		_input[i] = SCALAR64(input[i]);
5334 	}
5335 
5336 	return is_io_connect_async_method(connect,
5337 	           wake_port, _reference, referenceCnt,
5338 	           index,
5339 	           _input, inputCount,
5340 	           NULL, 0,
5341 	           0, 0,
5342 	           output, outputCount,
5343 	           NULL, &scalar_outputCnt,
5344 	           0, &ool_output_size);
5345 }
5346 
5347 /* Routine io_async_method_scalarI_structureI */
5348 kern_return_t
is_io_async_method_scalarI_structureI(io_connect_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5349 is_io_async_method_scalarI_structureI(
5350 	io_connect_t            connect,
5351 	mach_port_t wake_port,
5352 	io_async_ref_t reference,
5353 	mach_msg_type_number_t referenceCnt,
5354 	uint32_t                index,
5355 	io_scalar_inband_t      input,
5356 	mach_msg_type_number_t  inputCount,
5357 	io_struct_inband_t      inputStruct,
5358 	mach_msg_type_number_t  inputStructCount )
5359 {
5360 	uint32_t i;
5361 	io_scalar_inband64_t _input;
5362 	io_async_ref64_t _reference;
5363 
5364 	if (referenceCnt > ASYNC_REF64_COUNT) {
5365 		return kIOReturnBadArgument;
5366 	}
5367 	for (i = 0; i < referenceCnt; i++) {
5368 		_reference[i] = REF64(reference[i]);
5369 	}
5370 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5371 
5372 	mach_msg_type_number_t scalar_outputCnt = 0;
5373 	mach_msg_type_number_t inband_outputCnt = 0;
5374 	mach_vm_size_t ool_output_size = 0;
5375 
5376 	for (i = 0; i < inputCount; i++) {
5377 		_input[i] = SCALAR64(input[i]);
5378 	}
5379 
5380 	return is_io_connect_async_method(connect,
5381 	           wake_port, _reference, referenceCnt,
5382 	           index,
5383 	           _input, inputCount,
5384 	           inputStruct, inputStructCount,
5385 	           0, 0,
5386 	           NULL, &inband_outputCnt,
5387 	           NULL, &scalar_outputCnt,
5388 	           0, &ool_output_size);
5389 }
5390 
5391 /* Routine io_async_method_structureI_structureO */
5392 kern_return_t
is_io_async_method_structureI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5393 is_io_async_method_structureI_structureO(
5394 	io_object_t     connect,
5395 	mach_port_t wake_port,
5396 	io_async_ref_t reference,
5397 	mach_msg_type_number_t referenceCnt,
5398 	uint32_t        index,
5399 	io_struct_inband_t              input,
5400 	mach_msg_type_number_t  inputCount,
5401 	io_struct_inband_t              output,
5402 	mach_msg_type_number_t *        outputCount )
5403 {
5404 	uint32_t i;
5405 	mach_msg_type_number_t scalar_outputCnt = 0;
5406 	mach_vm_size_t ool_output_size = 0;
5407 	io_async_ref64_t _reference;
5408 
5409 	if (referenceCnt > ASYNC_REF64_COUNT) {
5410 		return kIOReturnBadArgument;
5411 	}
5412 	for (i = 0; i < referenceCnt; i++) {
5413 		_reference[i] = REF64(reference[i]);
5414 	}
5415 	bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5416 
5417 	return is_io_connect_async_method(connect,
5418 	           wake_port, _reference, referenceCnt,
5419 	           index,
5420 	           NULL, 0,
5421 	           input, inputCount,
5422 	           0, 0,
5423 	           output, outputCount,
5424 	           NULL, &scalar_outputCnt,
5425 	           0, &ool_output_size);
5426 }
5427 
5428 
5429 kern_return_t
shim_io_async_method_scalarI_scalarO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5430 shim_io_async_method_scalarI_scalarO(
5431 	IOExternalAsyncMethod * method,
5432 	IOService *             object,
5433 	mach_port_t             asyncWakePort,
5434 	io_user_reference_t *   asyncReference,
5435 	uint32_t                asyncReferenceCount,
5436 	const io_user_scalar_t * input,
5437 	mach_msg_type_number_t   inputCount,
5438 	io_user_scalar_t * output,
5439 	mach_msg_type_number_t * outputCount )
5440 {
5441 	IOAsyncMethod       func;
5442 	uint32_t            i;
5443 	io_scalar_inband_t  _output;
5444 	IOReturn            err;
5445 	io_async_ref_t      reference;
5446 
5447 	bzero(&_output[0], sizeof(_output));
5448 	for (i = 0; i < asyncReferenceCount; i++) {
5449 		reference[i] = REF32(asyncReference[i]);
5450 	}
5451 
5452 	err = kIOReturnBadArgument;
5453 
5454 	do {
5455 		if (inputCount != method->count0) {
5456 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5457 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5458 			continue;
5459 		}
5460 		if (*outputCount != method->count1) {
5461 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5462 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5463 			continue;
5464 		}
5465 
5466 		func = method->func;
5467 
5468 		switch (inputCount) {
5469 		case 6:
5470 			err = (object->*func)(  reference,
5471 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5472 			    ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5473 			break;
5474 		case 5:
5475 			err = (object->*func)(  reference,
5476 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5477 			    ARG32(input[3]), ARG32(input[4]),
5478 			    &_output[0] );
5479 			break;
5480 		case 4:
5481 			err = (object->*func)(  reference,
5482 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5483 			    ARG32(input[3]),
5484 			    &_output[0], &_output[1] );
5485 			break;
5486 		case 3:
5487 			err = (object->*func)(  reference,
5488 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5489 			    &_output[0], &_output[1], &_output[2] );
5490 			break;
5491 		case 2:
5492 			err = (object->*func)(  reference,
5493 			    ARG32(input[0]), ARG32(input[1]),
5494 			    &_output[0], &_output[1], &_output[2],
5495 			    &_output[3] );
5496 			break;
5497 		case 1:
5498 			err = (object->*func)(  reference,
5499 			    ARG32(input[0]),
5500 			    &_output[0], &_output[1], &_output[2],
5501 			    &_output[3], &_output[4] );
5502 			break;
5503 		case 0:
5504 			err = (object->*func)(  reference,
5505 			    &_output[0], &_output[1], &_output[2],
5506 			    &_output[3], &_output[4], &_output[5] );
5507 			break;
5508 
5509 		default:
5510 			IOLog("%s: Bad method table\n", object->getName());
5511 		}
5512 	}while (false);
5513 
5514 	for (i = 0; i < *outputCount; i++) {
5515 		output[i] = SCALAR32(_output[i]);
5516 	}
5517 
5518 	return err;
5519 }
5520 
5521 
5522 /* Routine io_connect_method_scalarI_structureO */
5523 kern_return_t
is_io_connect_method_scalarI_structureO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5524 is_io_connect_method_scalarI_structureO(
5525 	io_object_t     connect,
5526 	uint32_t        index,
5527 	io_scalar_inband_t input,
5528 	mach_msg_type_number_t  inputCount,
5529 	io_struct_inband_t              output,
5530 	mach_msg_type_number_t *        outputCount )
5531 {
5532 	uint32_t i;
5533 	io_scalar_inband64_t _input;
5534 
5535 	mach_msg_type_number_t scalar_outputCnt = 0;
5536 	mach_vm_size_t ool_output_size = 0;
5537 
5538 	for (i = 0; i < inputCount; i++) {
5539 		_input[i] = SCALAR64(input[i]);
5540 	}
5541 
5542 	return is_io_connect_method(connect, index,
5543 	           _input, inputCount,
5544 	           NULL, 0,
5545 	           0, 0,
5546 	           output, outputCount,
5547 	           NULL, &scalar_outputCnt,
5548 	           0, &ool_output_size);
5549 }
5550 
5551 kern_return_t
shim_io_connect_method_scalarI_structureO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5552 shim_io_connect_method_scalarI_structureO(
5553 
5554 	IOExternalMethod *      method,
5555 	IOService *             object,
5556 	const io_user_scalar_t * input,
5557 	mach_msg_type_number_t  inputCount,
5558 	io_struct_inband_t              output,
5559 	IOByteCount *   outputCount )
5560 {
5561 	IOMethod            func;
5562 	IOReturn            err;
5563 
5564 	err = kIOReturnBadArgument;
5565 
5566 	do {
5567 		if (inputCount != method->count0) {
5568 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5569 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5570 			continue;
5571 		}
5572 		if ((kIOUCVariableStructureSize != method->count1)
5573 		    && (*outputCount != method->count1)) {
5574 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5575 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5576 			continue;
5577 		}
5578 
5579 		func = method->func;
5580 
5581 		switch (inputCount) {
5582 		case 5:
5583 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5584 			    ARG32(input[3]), ARG32(input[4]),
5585 			    output );
5586 			break;
5587 		case 4:
5588 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5589 			    ARG32(input[3]),
5590 			    output, (void *)outputCount );
5591 			break;
5592 		case 3:
5593 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5594 			    output, (void *)outputCount, NULL );
5595 			break;
5596 		case 2:
5597 			err = (object->*func)(  ARG32(input[0]), ARG32(input[1]),
5598 			    output, (void *)outputCount, NULL, NULL );
5599 			break;
5600 		case 1:
5601 			err = (object->*func)(  ARG32(input[0]),
5602 			    output, (void *)outputCount, NULL, NULL, NULL );
5603 			break;
5604 		case 0:
5605 			err = (object->*func)(  output, (void *)outputCount, NULL, NULL, NULL, NULL );
5606 			break;
5607 
5608 		default:
5609 			IOLog("%s: Bad method table\n", object->getName());
5610 		}
5611 	}while (false);
5612 
5613 	return err;
5614 }
5615 
5616 
5617 kern_return_t
shim_io_async_method_scalarI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5618 shim_io_async_method_scalarI_structureO(
5619 	IOExternalAsyncMethod * method,
5620 	IOService *             object,
5621 	mach_port_t             asyncWakePort,
5622 	io_user_reference_t *   asyncReference,
5623 	uint32_t                asyncReferenceCount,
5624 	const io_user_scalar_t * input,
5625 	mach_msg_type_number_t  inputCount,
5626 	io_struct_inband_t              output,
5627 	mach_msg_type_number_t *        outputCount )
5628 {
5629 	IOAsyncMethod       func;
5630 	uint32_t            i;
5631 	IOReturn            err;
5632 	io_async_ref_t      reference;
5633 
5634 	for (i = 0; i < asyncReferenceCount; i++) {
5635 		reference[i] = REF32(asyncReference[i]);
5636 	}
5637 
5638 	err = kIOReturnBadArgument;
5639 	do {
5640 		if (inputCount != method->count0) {
5641 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5642 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5643 			continue;
5644 		}
5645 		if ((kIOUCVariableStructureSize != method->count1)
5646 		    && (*outputCount != method->count1)) {
5647 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5648 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5649 			continue;
5650 		}
5651 
5652 		func = method->func;
5653 
5654 		switch (inputCount) {
5655 		case 5:
5656 			err = (object->*func)(  reference,
5657 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5658 			    ARG32(input[3]), ARG32(input[4]),
5659 			    output );
5660 			break;
5661 		case 4:
5662 			err = (object->*func)(  reference,
5663 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5664 			    ARG32(input[3]),
5665 			    output, (void *)outputCount );
5666 			break;
5667 		case 3:
5668 			err = (object->*func)(  reference,
5669 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5670 			    output, (void *)outputCount, NULL );
5671 			break;
5672 		case 2:
5673 			err = (object->*func)(  reference,
5674 			    ARG32(input[0]), ARG32(input[1]),
5675 			    output, (void *)outputCount, NULL, NULL );
5676 			break;
5677 		case 1:
5678 			err = (object->*func)(  reference,
5679 			    ARG32(input[0]),
5680 			    output, (void *)outputCount, NULL, NULL, NULL );
5681 			break;
5682 		case 0:
5683 			err = (object->*func)(  reference,
5684 			    output, (void *)outputCount, NULL, NULL, NULL, NULL );
5685 			break;
5686 
5687 		default:
5688 			IOLog("%s: Bad method table\n", object->getName());
5689 		}
5690 	}while (false);
5691 
5692 	return err;
5693 }
5694 
5695 /* Routine io_connect_method_scalarI_structureI */
5696 kern_return_t
is_io_connect_method_scalarI_structureI(io_connect_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5697 is_io_connect_method_scalarI_structureI(
5698 	io_connect_t            connect,
5699 	uint32_t                index,
5700 	io_scalar_inband_t      input,
5701 	mach_msg_type_number_t  inputCount,
5702 	io_struct_inband_t      inputStruct,
5703 	mach_msg_type_number_t  inputStructCount )
5704 {
5705 	uint32_t i;
5706 	io_scalar_inband64_t _input;
5707 
5708 	mach_msg_type_number_t scalar_outputCnt = 0;
5709 	mach_msg_type_number_t inband_outputCnt = 0;
5710 	mach_vm_size_t ool_output_size = 0;
5711 
5712 	for (i = 0; i < inputCount; i++) {
5713 		_input[i] = SCALAR64(input[i]);
5714 	}
5715 
5716 	return is_io_connect_method(connect, index,
5717 	           _input, inputCount,
5718 	           inputStruct, inputStructCount,
5719 	           0, 0,
5720 	           NULL, &inband_outputCnt,
5721 	           NULL, &scalar_outputCnt,
5722 	           0, &ool_output_size);
5723 }
5724 
5725 kern_return_t
shim_io_connect_method_scalarI_structureI(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5726 shim_io_connect_method_scalarI_structureI(
5727 	IOExternalMethod *  method,
5728 	IOService *         object,
5729 	const io_user_scalar_t * input,
5730 	mach_msg_type_number_t  inputCount,
5731 	io_struct_inband_t              inputStruct,
5732 	mach_msg_type_number_t  inputStructCount )
5733 {
5734 	IOMethod            func;
5735 	IOReturn            err = kIOReturnBadArgument;
5736 
5737 	do{
5738 		if (inputCount != method->count0) {
5739 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5740 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5741 			continue;
5742 		}
5743 		if ((kIOUCVariableStructureSize != method->count1)
5744 		    && (inputStructCount != method->count1)) {
5745 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5746 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5747 			continue;
5748 		}
5749 
5750 		func = method->func;
5751 
5752 		switch (inputCount) {
5753 		case 5:
5754 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5755 			    ARG32(input[3]), ARG32(input[4]),
5756 			    inputStruct );
5757 			break;
5758 		case 4:
5759 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *)  input[2],
5760 			    ARG32(input[3]),
5761 			    inputStruct, (void *)(uintptr_t)inputStructCount );
5762 			break;
5763 		case 3:
5764 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5765 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5766 			    NULL );
5767 			break;
5768 		case 2:
5769 			err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5770 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5771 			    NULL, NULL );
5772 			break;
5773 		case 1:
5774 			err = (object->*func)( ARG32(input[0]),
5775 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5776 			    NULL, NULL, NULL );
5777 			break;
5778 		case 0:
5779 			err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5780 			    NULL, NULL, NULL, NULL );
5781 			break;
5782 
5783 		default:
5784 			IOLog("%s: Bad method table\n", object->getName());
5785 		}
5786 	}while (false);
5787 
5788 	return err;
5789 }
5790 
5791 kern_return_t
shim_io_async_method_scalarI_structureI(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5792 shim_io_async_method_scalarI_structureI(
5793 	IOExternalAsyncMethod * method,
5794 	IOService *             object,
5795 	mach_port_t             asyncWakePort,
5796 	io_user_reference_t *   asyncReference,
5797 	uint32_t                asyncReferenceCount,
5798 	const io_user_scalar_t * input,
5799 	mach_msg_type_number_t  inputCount,
5800 	io_struct_inband_t              inputStruct,
5801 	mach_msg_type_number_t  inputStructCount )
5802 {
5803 	IOAsyncMethod       func;
5804 	uint32_t            i;
5805 	IOReturn            err = kIOReturnBadArgument;
5806 	io_async_ref_t      reference;
5807 
5808 	for (i = 0; i < asyncReferenceCount; i++) {
5809 		reference[i] = REF32(asyncReference[i]);
5810 	}
5811 
5812 	do{
5813 		if (inputCount != method->count0) {
5814 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5815 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5816 			continue;
5817 		}
5818 		if ((kIOUCVariableStructureSize != method->count1)
5819 		    && (inputStructCount != method->count1)) {
5820 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5821 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5822 			continue;
5823 		}
5824 
5825 		func = method->func;
5826 
5827 		switch (inputCount) {
5828 		case 5:
5829 			err = (object->*func)(  reference,
5830 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5831 			    ARG32(input[3]), ARG32(input[4]),
5832 			    inputStruct );
5833 			break;
5834 		case 4:
5835 			err = (object->*func)(  reference,
5836 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5837 			    ARG32(input[3]),
5838 			    inputStruct, (void *)(uintptr_t)inputStructCount );
5839 			break;
5840 		case 3:
5841 			err = (object->*func)(  reference,
5842 			    ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5843 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5844 			    NULL );
5845 			break;
5846 		case 2:
5847 			err = (object->*func)(  reference,
5848 			    ARG32(input[0]), ARG32(input[1]),
5849 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5850 			    NULL, NULL );
5851 			break;
5852 		case 1:
5853 			err = (object->*func)(  reference,
5854 			    ARG32(input[0]),
5855 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5856 			    NULL, NULL, NULL );
5857 			break;
5858 		case 0:
5859 			err = (object->*func)(  reference,
5860 			    inputStruct, (void *)(uintptr_t)inputStructCount,
5861 			    NULL, NULL, NULL, NULL );
5862 			break;
5863 
5864 		default:
5865 			IOLog("%s: Bad method table\n", object->getName());
5866 		}
5867 	}while (false);
5868 
5869 	return err;
5870 }
5871 
5872 /* Routine io_connect_method_structureI_structureO */
5873 kern_return_t
is_io_connect_method_structureI_structureO(io_object_t connect,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5874 is_io_connect_method_structureI_structureO(
5875 	io_object_t     connect,
5876 	uint32_t        index,
5877 	io_struct_inband_t              input,
5878 	mach_msg_type_number_t  inputCount,
5879 	io_struct_inband_t              output,
5880 	mach_msg_type_number_t *        outputCount )
5881 {
5882 	mach_msg_type_number_t scalar_outputCnt = 0;
5883 	mach_vm_size_t ool_output_size = 0;
5884 
5885 	return is_io_connect_method(connect, index,
5886 	           NULL, 0,
5887 	           input, inputCount,
5888 	           0, 0,
5889 	           output, outputCount,
5890 	           NULL, &scalar_outputCnt,
5891 	           0, &ool_output_size);
5892 }
5893 
5894 kern_return_t
shim_io_connect_method_structureI_structureO(IOExternalMethod * method,IOService * object,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5895 shim_io_connect_method_structureI_structureO(
5896 	IOExternalMethod *  method,
5897 	IOService *         object,
5898 	io_struct_inband_t              input,
5899 	mach_msg_type_number_t  inputCount,
5900 	io_struct_inband_t              output,
5901 	IOByteCount *   outputCount )
5902 {
5903 	IOMethod            func;
5904 	IOReturn            err = kIOReturnBadArgument;
5905 
5906 	do{
5907 		if ((kIOUCVariableStructureSize != method->count0)
5908 		    && (inputCount != method->count0)) {
5909 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5910 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5911 			continue;
5912 		}
5913 		if ((kIOUCVariableStructureSize != method->count1)
5914 		    && (*outputCount != method->count1)) {
5915 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5916 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5917 			continue;
5918 		}
5919 
5920 		func = method->func;
5921 
5922 		if (method->count1) {
5923 			if (method->count0) {
5924 				err = (object->*func)( input, output,
5925 				    (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5926 			} else {
5927 				err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5928 			}
5929 		} else {
5930 			err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5931 		}
5932 	}while (false);
5933 
5934 
5935 	return err;
5936 }
5937 
5938 kern_return_t
shim_io_async_method_structureI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5939 shim_io_async_method_structureI_structureO(
5940 	IOExternalAsyncMethod * method,
5941 	IOService *             object,
5942 	mach_port_t           asyncWakePort,
5943 	io_user_reference_t * asyncReference,
5944 	uint32_t              asyncReferenceCount,
5945 	io_struct_inband_t              input,
5946 	mach_msg_type_number_t  inputCount,
5947 	io_struct_inband_t              output,
5948 	mach_msg_type_number_t *        outputCount )
5949 {
5950 	IOAsyncMethod       func;
5951 	uint32_t            i;
5952 	IOReturn            err;
5953 	io_async_ref_t      reference;
5954 
5955 	for (i = 0; i < asyncReferenceCount; i++) {
5956 		reference[i] = REF32(asyncReference[i]);
5957 	}
5958 
5959 	err = kIOReturnBadArgument;
5960 	do{
5961 		if ((kIOUCVariableStructureSize != method->count0)
5962 		    && (inputCount != method->count0)) {
5963 			IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5964 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5965 			continue;
5966 		}
5967 		if ((kIOUCVariableStructureSize != method->count1)
5968 		    && (*outputCount != method->count1)) {
5969 			IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5970 			DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5971 			continue;
5972 		}
5973 
5974 		func = method->func;
5975 
5976 		if (method->count1) {
5977 			if (method->count0) {
5978 				err = (object->*func)( reference,
5979 				    input, output,
5980 				    (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5981 			} else {
5982 				err = (object->*func)( reference,
5983 				    output, outputCount, NULL, NULL, NULL, NULL );
5984 			}
5985 		} else {
5986 			err = (object->*func)( reference,
5987 			    input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5988 		}
5989 	}while (false);
5990 
5991 	return err;
5992 }
5993 
5994 /* Routine io_catalog_send_data */
5995 kern_return_t
is_io_catalog_send_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t inData,mach_msg_type_number_t inDataCount,kern_return_t * result)5996 is_io_catalog_send_data(
5997 	mach_port_t             main_port,
5998 	uint32_t                flag,
5999 	io_buf_ptr_t            inData,
6000 	mach_msg_type_number_t  inDataCount,
6001 	kern_return_t *         result)
6002 {
6003 	// Allow sending catalog data if there is no kextd and the kernel is DEVELOPMENT || DEBUG
6004 #if NO_KEXTD && !(DEVELOPMENT || DEBUG)
6005 	return kIOReturnNotPrivileged;
6006 #else /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6007 	OSObject * obj = NULL;
6008 	vm_offset_t data;
6009 	kern_return_t kr = kIOReturnError;
6010 
6011 	//printf("io_catalog_send_data called. flag: %d\n", flag);
6012 
6013 	if (main_port != main_device_port) {
6014 		return kIOReturnNotPrivileged;
6015 	}
6016 
6017 	if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
6018 	    flag != kIOCatalogKextdActive &&
6019 	    flag != kIOCatalogKextdFinishedLaunching) &&
6020 	    (!inData || !inDataCount)) {
6021 		return kIOReturnBadArgument;
6022 	}
6023 
6024 	if (!IOCurrentTaskHasEntitlement(kIOCatalogManagementEntitlement)) {
6025 		OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
6026 		IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
6027 		OSSafeReleaseNULL(taskName);
6028 		// For now, fake success to not break applications relying on this function succeeding.
6029 		// See <rdar://problem/32554970> for more details.
6030 		return kIOReturnSuccess;
6031 	}
6032 
6033 	if (inData) {
6034 		vm_map_offset_t map_data;
6035 
6036 		if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
6037 			return kIOReturnMessageTooLarge;
6038 		}
6039 
6040 		kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
6041 		data = CAST_DOWN(vm_offset_t, map_data);
6042 
6043 		if (kr != KERN_SUCCESS) {
6044 			return kr;
6045 		}
6046 
6047 		// must return success after vm_map_copyout() succeeds
6048 
6049 		if (inDataCount) {
6050 			obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
6051 			vm_deallocate( kernel_map, data, inDataCount );
6052 			if (!obj) {
6053 				*result = kIOReturnNoMemory;
6054 				return KERN_SUCCESS;
6055 			}
6056 		}
6057 	}
6058 
6059 	switch (flag) {
6060 	case kIOCatalogResetDrivers:
6061 	case kIOCatalogResetDriversNoMatch: {
6062 		OSArray * array;
6063 
6064 		array = OSDynamicCast(OSArray, obj);
6065 		if (array) {
6066 			if (!gIOCatalogue->resetAndAddDrivers(array,
6067 			    flag == kIOCatalogResetDrivers)) {
6068 				kr = kIOReturnError;
6069 			}
6070 		} else {
6071 			kr = kIOReturnBadArgument;
6072 		}
6073 	}
6074 	break;
6075 
6076 	case kIOCatalogAddDrivers:
6077 	case kIOCatalogAddDriversNoMatch: {
6078 		OSArray * array;
6079 
6080 		array = OSDynamicCast(OSArray, obj);
6081 		if (array) {
6082 			if (!gIOCatalogue->addDrivers( array,
6083 			    flag == kIOCatalogAddDrivers)) {
6084 				kr = kIOReturnError;
6085 			}
6086 		} else {
6087 			kr = kIOReturnBadArgument;
6088 		}
6089 	}
6090 	break;
6091 
6092 	case kIOCatalogRemoveDrivers:
6093 	case kIOCatalogRemoveDriversNoMatch: {
6094 		OSDictionary * dict;
6095 
6096 		dict = OSDynamicCast(OSDictionary, obj);
6097 		if (dict) {
6098 			if (!gIOCatalogue->removeDrivers( dict,
6099 			    flag == kIOCatalogRemoveDrivers )) {
6100 				kr = kIOReturnError;
6101 			}
6102 		} else {
6103 			kr = kIOReturnBadArgument;
6104 		}
6105 	}
6106 	break;
6107 
6108 	case kIOCatalogStartMatching__Removed:
6109 	case kIOCatalogRemoveKernelLinker__Removed:
6110 	case kIOCatalogKextdActive:
6111 	case kIOCatalogKextdFinishedLaunching:
6112 		kr = KERN_NOT_SUPPORTED;
6113 		break;
6114 
6115 	default:
6116 		kr = kIOReturnBadArgument;
6117 		break;
6118 	}
6119 
6120 	if (obj) {
6121 		obj->release();
6122 	}
6123 
6124 	*result = kr;
6125 	return KERN_SUCCESS;
6126 #endif /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6127 }
6128 
6129 /* Routine io_catalog_terminate */
6130 kern_return_t
is_io_catalog_terminate(mach_port_t main_port,uint32_t flag,io_name_t name)6131 is_io_catalog_terminate(
6132 	mach_port_t main_port,
6133 	uint32_t flag,
6134 	io_name_t name )
6135 {
6136 	kern_return_t          kr;
6137 
6138 	if (main_port != main_device_port) {
6139 		return kIOReturnNotPrivileged;
6140 	}
6141 
6142 	kr = IOUserClient::clientHasPrivilege((void *) current_task(),
6143 	    kIOClientPrivilegeAdministrator );
6144 	if (kIOReturnSuccess != kr) {
6145 		return kr;
6146 	}
6147 
6148 	switch (flag) {
6149 #if !defined(SECURE_KERNEL)
6150 	case kIOCatalogServiceTerminate:
6151 		kr = gIOCatalogue->terminateDrivers(NULL, name, false);
6152 		break;
6153 
6154 	case kIOCatalogModuleUnload:
6155 	case kIOCatalogModuleTerminate:
6156 		kr = gIOCatalogue->terminateDriversForModule(name,
6157 		    flag == kIOCatalogModuleUnload);
6158 		break;
6159 #endif
6160 
6161 	default:
6162 		kr = kIOReturnBadArgument;
6163 		break;
6164 	}
6165 
6166 	return kr;
6167 }
6168 
6169 /* Routine io_catalog_get_data */
6170 kern_return_t
is_io_catalog_get_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t * outData,mach_msg_type_number_t * outDataCount)6171 is_io_catalog_get_data(
6172 	mach_port_t             main_port,
6173 	uint32_t                flag,
6174 	io_buf_ptr_t            *outData,
6175 	mach_msg_type_number_t  *outDataCount)
6176 {
6177 	kern_return_t kr = kIOReturnSuccess;
6178 	OSSerialize * s;
6179 
6180 	if (main_port != main_device_port) {
6181 		return kIOReturnNotPrivileged;
6182 	}
6183 
6184 	//printf("io_catalog_get_data called. flag: %d\n", flag);
6185 
6186 	s = OSSerialize::withCapacity(4096);
6187 	if (!s) {
6188 		return kIOReturnNoMemory;
6189 	}
6190 
6191 	kr = gIOCatalogue->serializeData(flag, s);
6192 
6193 	if (kr == kIOReturnSuccess) {
6194 		mach_vm_address_t data;
6195 		vm_map_copy_t copy;
6196 		unsigned int size;
6197 
6198 		size = s->getLength();
6199 		kr = mach_vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
6200 		if (kr == kIOReturnSuccess) {
6201 			bcopy(s->text(), (void *)data, size);
6202 			kr = vm_map_copyin(kernel_map, data, size, true, &copy);
6203 			*outData = (char *)copy;
6204 			*outDataCount = size;
6205 		}
6206 	}
6207 
6208 	s->release();
6209 
6210 	return kr;
6211 }
6212 
6213 /* Routine io_catalog_get_gen_count */
6214 kern_return_t
is_io_catalog_get_gen_count(mach_port_t main_port,uint32_t * genCount)6215 is_io_catalog_get_gen_count(
6216 	mach_port_t             main_port,
6217 	uint32_t                *genCount)
6218 {
6219 	if (main_port != main_device_port) {
6220 		return kIOReturnNotPrivileged;
6221 	}
6222 
6223 	//printf("io_catalog_get_gen_count called.\n");
6224 
6225 	if (!genCount) {
6226 		return kIOReturnBadArgument;
6227 	}
6228 
6229 	*genCount = gIOCatalogue->getGenerationCount();
6230 
6231 	return kIOReturnSuccess;
6232 }
6233 
6234 /* Routine io_catalog_module_loaded.
6235  * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
6236  */
6237 kern_return_t
is_io_catalog_module_loaded(mach_port_t main_port,io_name_t name)6238 is_io_catalog_module_loaded(
6239 	mach_port_t             main_port,
6240 	io_name_t               name)
6241 {
6242 	if (main_port != main_device_port) {
6243 		return kIOReturnNotPrivileged;
6244 	}
6245 
6246 	//printf("io_catalog_module_loaded called. name %s\n", name);
6247 
6248 	if (!name) {
6249 		return kIOReturnBadArgument;
6250 	}
6251 
6252 	gIOCatalogue->moduleHasLoaded(name);
6253 
6254 	return kIOReturnSuccess;
6255 }
6256 
6257 kern_return_t
is_io_catalog_reset(mach_port_t main_port,uint32_t flag)6258 is_io_catalog_reset(
6259 	mach_port_t             main_port,
6260 	uint32_t                flag)
6261 {
6262 	if (main_port != main_device_port) {
6263 		return kIOReturnNotPrivileged;
6264 	}
6265 
6266 	switch (flag) {
6267 	case kIOCatalogResetDefault:
6268 		gIOCatalogue->reset();
6269 		break;
6270 
6271 	default:
6272 		return kIOReturnBadArgument;
6273 	}
6274 
6275 	return kIOReturnSuccess;
6276 }
6277 
6278 kern_return_t
iokit_user_client_trap(struct iokit_user_client_trap_args * args)6279 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
6280 {
6281 	kern_return_t    result = kIOReturnBadArgument;
6282 	IOUserClient   * userClient;
6283 	OSObject       * object;
6284 	uintptr_t        ref;
6285 	mach_port_name_t portName;
6286 
6287 	ref     = (uintptr_t) args->userClientRef;
6288 
6289 	if ((ref == MACH_PORT_DEAD) || (ref == (uintptr_t) MACH_PORT_NULL)) {
6290 		return kIOReturnBadArgument;
6291 	}
6292 	// kobject port names always have b0-1 set, so we use these bits as flags to
6293 	// iokit_user_client_trap()
6294 	// keep this up to date with ipc_entry_name_mask();
6295 	portName = (mach_port_name_t) (ref | 3);
6296 	if (((1ULL << 32) & ref) || !(1 & ref)) {
6297 		object = iokit_lookup_uext_ref_current_task(portName);
6298 		if (object) {
6299 			result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6300 		}
6301 		OSSafeReleaseNULL(object);
6302 	} else {
6303 		io_object_t ref_current_task = iokit_lookup_connect_ref_current_task((mach_port_name_t) ref);
6304 		if ((userClient = OSDynamicCast(IOUserClient, ref_current_task))) {
6305 			IOExternalTrap *trap = NULL;
6306 			IOService *target = NULL;
6307 
6308 			result = kIOReturnSuccess;
6309 			io_filter_policy_t filterPolicy = userClient->filterForTask(current_task(), 0);
6310 			if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
6311 				result = gIOUCFilterCallbacks->io_filter_applier(userClient, filterPolicy, io_filter_type_trap, args->index);
6312 			}
6313 			if (kIOReturnSuccess == result) {
6314 				trap = userClient->getTargetAndTrapForIndex(&target, args->index);
6315 			}
6316 			if (trap && target) {
6317 				IOTrap func;
6318 
6319 				func = trap->func;
6320 
6321 				if (func) {
6322 					result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6323 				}
6324 			}
6325 
6326 			iokit_remove_connect_reference(userClient);
6327 		} else {
6328 			OSSafeReleaseNULL(ref_current_task);
6329 		}
6330 	}
6331 
6332 	return result;
6333 }
6334 
6335 /* Routine io_device_tree_entry_exists_with_name */
6336 kern_return_t
is_io_device_tree_entry_exists_with_name(mach_port_t main_port,io_name_t name,boolean_t * exists)6337 is_io_device_tree_entry_exists_with_name(
6338 	mach_port_t main_port,
6339 	io_name_t name,
6340 	boolean_t *exists )
6341 {
6342 	OSCollectionIterator *iter;
6343 
6344 	if (main_port != main_device_port) {
6345 		return kIOReturnNotPrivileged;
6346 	}
6347 
6348 	iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
6349 	*exists = iter && iter->getNextObject();
6350 	OSSafeReleaseNULL(iter);
6351 
6352 	return kIOReturnSuccess;
6353 }
6354 } /* extern "C" */
6355 
6356 IOReturn
callExternalMethod(uint32_t selector,IOExternalMethodArguments * args)6357 IOUserClient::callExternalMethod(uint32_t selector, IOExternalMethodArguments * args)
6358 {
6359 	IOReturn ret;
6360 
6361 	if (defaultLocking) {
6362 		if (defaultLockingSingleThreadExternalMethod) {
6363 			IORWLockWrite(&lock);
6364 		} else {
6365 			IORWLockRead(&lock);
6366 		}
6367 	}
6368 	if (uc2022) {
6369 		ret = ((IOUserClient2022 *) this)->externalMethod(selector, (IOExternalMethodArgumentsOpaque *) args);
6370 	} else {
6371 		ret = externalMethod(selector, args);
6372 	}
6373 	if (defaultLocking) {
6374 		IORWLockUnlock(&lock);
6375 	}
6376 	return ret;
6377 }
6378 
6379 MIG_SERVER_ROUTINE IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * arguments,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6380 IOUserClient2022::externalMethod(uint32_t selector, IOExternalMethodArguments * arguments,
6381     IOExternalMethodDispatch *dispatch,
6382     OSObject *target, void *reference)
6383 {
6384 	panic("wrong externalMethod for IOUserClient2022");
6385 }
6386 
6387 IOReturn
dispatchExternalMethod(uint32_t selector,IOExternalMethodArgumentsOpaque * arguments,const IOExternalMethodDispatch2022 dispatchArray[],size_t dispatchArrayCount,OSObject * target,void * reference)6388 IOUserClient2022::dispatchExternalMethod(uint32_t selector, IOExternalMethodArgumentsOpaque *arguments,
6389     const IOExternalMethodDispatch2022 dispatchArray[], size_t dispatchArrayCount,
6390     OSObject * target, void * reference)
6391 {
6392 	IOReturn    err;
6393 	IOExternalMethodArguments * args = (typeof(args))arguments;
6394 	const IOExternalMethodDispatch2022 * dispatch;
6395 
6396 	if (!dispatchArray) {
6397 		return kIOReturnError;
6398 	}
6399 	if (selector >= dispatchArrayCount) {
6400 		return kIOReturnBadArgument;
6401 	}
6402 	dispatch = &dispatchArray[selector];
6403 
6404 	uint32_t count;
6405 	count = dispatch->checkScalarInputCount;
6406 	if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6407 		return kIOReturnBadArgument;
6408 	}
6409 
6410 	count = dispatch->checkStructureInputSize;
6411 	if ((kIOUCVariableStructureSize != count)
6412 	    && (count != ((args->structureInputDescriptor)
6413 	    ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6414 		return kIOReturnBadArgument;
6415 	}
6416 
6417 	count = dispatch->checkScalarOutputCount;
6418 	if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6419 		return kIOReturnBadArgument;
6420 	}
6421 
6422 	count = dispatch->checkStructureOutputSize;
6423 	if ((kIOUCVariableStructureSize != count)
6424 	    && (count != ((args->structureOutputDescriptor)
6425 	    ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6426 		return kIOReturnBadArgument;
6427 	}
6428 
6429 	if (args->asyncWakePort && !dispatch->allowAsync) {
6430 		return kIOReturnBadArgument;
6431 	}
6432 
6433 	if (dispatch->checkEntitlement) {
6434 		if (!IOCurrentTaskHasEntitlement(dispatch->checkEntitlement)) {
6435 			return kIOReturnNotPrivileged;
6436 		}
6437 	}
6438 
6439 	if (dispatch->function) {
6440 		err = (*dispatch->function)(target, reference, args);
6441 	} else {
6442 		err = kIOReturnNoCompletion; /* implementer can dispatch */
6443 	}
6444 	return err;
6445 }
6446 
6447 IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * args,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6448 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
6449     IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
6450 {
6451 	IOReturn    err;
6452 	IOService * object;
6453 	IOByteCount structureOutputSize;
6454 
6455 	if (dispatch) {
6456 		uint32_t count;
6457 		count = dispatch->checkScalarInputCount;
6458 		if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6459 			return kIOReturnBadArgument;
6460 		}
6461 
6462 		count = dispatch->checkStructureInputSize;
6463 		if ((kIOUCVariableStructureSize != count)
6464 		    && (count != ((args->structureInputDescriptor)
6465 		    ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6466 			return kIOReturnBadArgument;
6467 		}
6468 
6469 		count = dispatch->checkScalarOutputCount;
6470 		if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6471 			return kIOReturnBadArgument;
6472 		}
6473 
6474 		count = dispatch->checkStructureOutputSize;
6475 		if ((kIOUCVariableStructureSize != count)
6476 		    && (count != ((args->structureOutputDescriptor)
6477 		    ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6478 			return kIOReturnBadArgument;
6479 		}
6480 
6481 		if (dispatch->function) {
6482 			err = (*dispatch->function)(target, reference, args);
6483 		} else {
6484 			err = kIOReturnNoCompletion; /* implementer can dispatch */
6485 		}
6486 		return err;
6487 	}
6488 
6489 
6490 	// pre-Leopard API's don't do ool structs
6491 	if (args->structureInputDescriptor || args->structureOutputDescriptor) {
6492 		err = kIOReturnIPCError;
6493 		return err;
6494 	}
6495 
6496 	structureOutputSize = args->structureOutputSize;
6497 
6498 	if (args->asyncWakePort) {
6499 		IOExternalAsyncMethod * method;
6500 		object = NULL;
6501 		if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
6502 			return kIOReturnUnsupported;
6503 		}
6504 
6505 		if (kIOUCForegroundOnly & method->flags) {
6506 			if (task_is_gpu_denied(current_task())) {
6507 				return kIOReturnNotPermitted;
6508 			}
6509 		}
6510 
6511 		switch (method->flags & kIOUCTypeMask) {
6512 		case kIOUCScalarIStructI:
6513 			err = shim_io_async_method_scalarI_structureI( method, object,
6514 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6515 			    args->scalarInput, args->scalarInputCount,
6516 			    (char *)args->structureInput, args->structureInputSize );
6517 			break;
6518 
6519 		case kIOUCScalarIScalarO:
6520 			err = shim_io_async_method_scalarI_scalarO( method, object,
6521 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6522 			    args->scalarInput, args->scalarInputCount,
6523 			    args->scalarOutput, &args->scalarOutputCount );
6524 			break;
6525 
6526 		case kIOUCScalarIStructO:
6527 			err = shim_io_async_method_scalarI_structureO( method, object,
6528 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6529 			    args->scalarInput, args->scalarInputCount,
6530 			    (char *) args->structureOutput, &args->structureOutputSize );
6531 			break;
6532 
6533 
6534 		case kIOUCStructIStructO:
6535 			err = shim_io_async_method_structureI_structureO( method, object,
6536 			    args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6537 			    (char *)args->structureInput, args->structureInputSize,
6538 			    (char *) args->structureOutput, &args->structureOutputSize );
6539 			break;
6540 
6541 		default:
6542 			err = kIOReturnBadArgument;
6543 			break;
6544 		}
6545 	} else {
6546 		IOExternalMethod *      method;
6547 		object = NULL;
6548 		if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
6549 			return kIOReturnUnsupported;
6550 		}
6551 
6552 		if (kIOUCForegroundOnly & method->flags) {
6553 			if (task_is_gpu_denied(current_task())) {
6554 				return kIOReturnNotPermitted;
6555 			}
6556 		}
6557 
6558 		switch (method->flags & kIOUCTypeMask) {
6559 		case kIOUCScalarIStructI:
6560 			err = shim_io_connect_method_scalarI_structureI( method, object,
6561 			    args->scalarInput, args->scalarInputCount,
6562 			    (char *) args->structureInput, args->structureInputSize );
6563 			break;
6564 
6565 		case kIOUCScalarIScalarO:
6566 			err = shim_io_connect_method_scalarI_scalarO( method, object,
6567 			    args->scalarInput, args->scalarInputCount,
6568 			    args->scalarOutput, &args->scalarOutputCount );
6569 			break;
6570 
6571 		case kIOUCScalarIStructO:
6572 			err = shim_io_connect_method_scalarI_structureO( method, object,
6573 			    args->scalarInput, args->scalarInputCount,
6574 			    (char *) args->structureOutput, &structureOutputSize );
6575 			break;
6576 
6577 
6578 		case kIOUCStructIStructO:
6579 			err = shim_io_connect_method_structureI_structureO( method, object,
6580 			    (char *) args->structureInput, args->structureInputSize,
6581 			    (char *) args->structureOutput, &structureOutputSize );
6582 			break;
6583 
6584 		default:
6585 			err = kIOReturnBadArgument;
6586 			break;
6587 		}
6588 	}
6589 
6590 	if (structureOutputSize > UINT_MAX) {
6591 		structureOutputSize = 0;
6592 		err = kIOReturnBadArgument;
6593 	}
6594 
6595 	args->structureOutputSize = ((typeof(args->structureOutputSize))structureOutputSize);
6596 
6597 	return err;
6598 }
6599 
6600 IOReturn
registerFilterCallbacks(const struct io_filter_callbacks * callbacks,size_t size)6601 IOUserClient::registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size)
6602 {
6603 	if (size < sizeof(*callbacks)) {
6604 		return kIOReturnBadArgument;
6605 	}
6606 	if (!OSCompareAndSwapPtr(NULL, __DECONST(void *, callbacks), &gIOUCFilterCallbacks)) {
6607 		return kIOReturnBusy;
6608 	}
6609 	return kIOReturnSuccess;
6610 }
6611 
6612 
6613 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6614 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6615 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6616 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6617 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6618 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6619 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6620 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6621 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6622 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6623 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6624 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6625 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6626 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6627 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6628 OSMetaClassDefineReservedUnused(IOUserClient, 15);
6629 
6630 OSMetaClassDefineReservedUnused(IOUserClient2022, 0);
6631 OSMetaClassDefineReservedUnused(IOUserClient2022, 1);
6632 OSMetaClassDefineReservedUnused(IOUserClient2022, 2);
6633 OSMetaClassDefineReservedUnused(IOUserClient2022, 3);
6634