1 /*
2 * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <libkern/c++/OSKext.h>
30 #include <libkern/c++/OSSharedPtr.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51
52 #include <mach/sdt.h>
53 #include <os/hash.h>
54
55 #include <libkern/amfi/amfi.h>
56
57 #if CONFIG_MACF
58
59 extern "C" {
60 #include <security/mac_framework.h>
61 };
62 #include <sys/kauth.h>
63
64 #define IOMACF_LOG 0
65
66 #endif /* CONFIG_MACF */
67
68 #include <IOKit/assert.h>
69
70 #include "IOServicePrivate.h"
71 #include "IOKitKernelInternal.h"
72
73 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
74 #define SCALAR32(x) ((uint32_t )x)
75 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
76 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
77 #define REF32(x) ((int)(x))
78
79 enum{
80 kIOUCAsync0Flags = 3ULL,
81 kIOUCAsync64Flag = 1ULL,
82 kIOUCAsyncErrorLoggedFlag = 2ULL
83 };
84
85 #if IOKITSTATS
86
87 #define IOStatisticsRegisterCounter() \
88 do { \
89 reserved->counter = IOStatistics::registerUserClient(this); \
90 } while (0)
91
92 #define IOStatisticsUnregisterCounter() \
93 do { \
94 if (reserved) \
95 IOStatistics::unregisterUserClient(reserved->counter); \
96 } while (0)
97
98 #define IOStatisticsClientCall() \
99 do { \
100 IOStatistics::countUserClientCall(client); \
101 } while (0)
102
103 #else
104
105 #define IOStatisticsRegisterCounter()
106 #define IOStatisticsUnregisterCounter()
107 #define IOStatisticsClientCall()
108
109 #endif /* IOKITSTATS */
110
111 #if DEVELOPMENT || DEBUG
112
113 #define FAKE_STACK_FRAME(a) \
114 const void ** __frameptr; \
115 const void * __retaddr; \
116 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
117 __retaddr = __frameptr[1]; \
118 __frameptr[1] = (a);
119
120 #define FAKE_STACK_FRAME_END() \
121 __frameptr[1] = __retaddr;
122
123 #else /* DEVELOPMENT || DEBUG */
124
125 #define FAKE_STACK_FRAME(a)
126 #define FAKE_STACK_FRAME_END()
127
128 #endif /* DEVELOPMENT || DEBUG */
129
130 #define ASYNC_REF_COUNT (sizeof(io_async_ref_t) / sizeof(natural_t))
131 #define ASYNC_REF64_COUNT (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
132
133 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
134
135 extern "C" {
136 #include <mach/mach_traps.h>
137 #include <vm/vm_map.h>
138 } /* extern "C" */
139
140 struct IOMachPortHashList;
141
142 static_assert(IKOT_MAX_TYPE <= 255);
143
144 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
145
146 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
147 class IOMachPort : public OSObject
148 {
149 OSDeclareDefaultStructors(IOMachPort);
150 public:
151 SLIST_ENTRY(IOMachPort) link;
152 ipc_port_t port;
153 OSObject* object;
154 UInt32 mscount;
155 UInt8 holdDestroy;
156 UInt8 type;
157
158 static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
159
160 static IOMachPortHashList* bucketForObject(OSObject *obj,
161 ipc_kobject_type_t type);
162
163 static LIBKERN_RETURNS_NOT_RETAINED IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
164
165 static bool noMoreSendersForObject( OSObject * obj,
166 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
167 static void releasePortForObject( OSObject * obj,
168 ipc_kobject_type_t type );
169 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
170
171 static mach_port_name_t makeSendRightForTask( task_t task,
172 io_object_t obj, ipc_kobject_type_t type );
173
174 virtual void free() APPLE_KEXT_OVERRIDE;
175 };
176
177 #define super OSObject
178 OSDefineMetaClassAndStructorsWithZone(IOMachPort, OSObject, ZC_ZFREE_CLEARMEM)
179
180 static IOLock * gIOObjectPortLock;
181 IOLock * gIOUserServerLock;
182
183 SECURITY_READ_ONLY_LATE(const struct io_filter_callbacks *) gIOUCFilterCallbacks;
184
185 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
186
187 SLIST_HEAD(IOMachPortHashList, IOMachPort);
188
189 #if defined(XNU_TARGET_OS_OSX)
190 #define PORT_HASH_SIZE 4096
191 #else /* defined(!XNU_TARGET_OS_OSX) */
192 #define PORT_HASH_SIZE 256
193 #endif /* !defined(!XNU_TARGET_OS_OSX) */
194
195 IOMachPortHashList gIOMachPortHash[PORT_HASH_SIZE];
196
197 void
IOMachPortInitialize(void)198 IOMachPortInitialize(void)
199 {
200 for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
201 SLIST_INIT(&gIOMachPortHash[i]);
202 }
203 }
204
205 IOMachPortHashList*
bucketForObject(OSObject * obj,ipc_kobject_type_t type)206 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
207 {
208 return &gIOMachPortHash[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
209 }
210
211 IOMachPort*
portForObjectInBucket(IOMachPortHashList * bucket,OSObject * obj,ipc_kobject_type_t type)212 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
213 {
214 IOMachPort *machPort;
215
216 SLIST_FOREACH(machPort, bucket, link) {
217 if (machPort->object == obj && machPort->type == type) {
218 return machPort;
219 }
220 }
221 return NULL;
222 }
223
224 IOMachPort*
withObjectAndType(OSObject * obj,ipc_kobject_type_t type)225 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
226 {
227 IOMachPort *machPort = NULL;
228
229 machPort = new IOMachPort;
230 if (__improbable(machPort && !machPort->init())) {
231 OSSafeReleaseNULL(machPort);
232 return NULL;
233 }
234
235 machPort->object = obj;
236 machPort->type = (typeof(machPort->type))type;
237 machPort->port = iokit_alloc_object_port(obj, type);
238
239 obj->taggedRetain(OSTypeID(OSCollection));
240 machPort->mscount++;
241
242 return machPort;
243 }
244
245 bool
noMoreSendersForObject(OSObject * obj,ipc_kobject_type_t type,mach_port_mscount_t * mscount)246 IOMachPort::noMoreSendersForObject( OSObject * obj,
247 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
248 {
249 IOMachPort *machPort = NULL;
250 IOUserClient *uc;
251 OSAction *action;
252 bool destroyed = true;
253
254 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
255
256 obj->retain();
257
258 lck_mtx_lock(gIOObjectPortLock);
259
260 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
261
262 if (machPort) {
263 destroyed = (machPort->mscount <= *mscount);
264 if (!destroyed) {
265 *mscount = machPort->mscount;
266 lck_mtx_unlock(gIOObjectPortLock);
267 } else {
268 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
269 uc->noMoreSenders();
270 }
271 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
272
273 lck_mtx_unlock(gIOObjectPortLock);
274
275 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
276
277 obj->taggedRelease(OSTypeID(OSCollection));
278 }
279 } else {
280 lck_mtx_unlock(gIOObjectPortLock);
281 }
282
283 if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
284 action->Aborted();
285 }
286
287 if (IKOT_UEXT_OBJECT == type && IOUserServer::shouldLeakObjects()) {
288 // Leak object
289 obj->retain();
290 }
291
292 obj->release();
293
294 return destroyed;
295 }
296
297 void
releasePortForObject(OSObject * obj,ipc_kobject_type_t type)298 IOMachPort::releasePortForObject( OSObject * obj,
299 ipc_kobject_type_t type )
300 {
301 IOMachPort *machPort;
302 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
303
304 assert(IKOT_IOKIT_CONNECT != type);
305
306 lck_mtx_lock(gIOObjectPortLock);
307
308 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
309
310 if (machPort && !machPort->holdDestroy) {
311 obj->retain();
312 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
313
314 lck_mtx_unlock(gIOObjectPortLock);
315
316 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
317
318 obj->taggedRelease(OSTypeID(OSCollection));
319 obj->release();
320 } else {
321 lck_mtx_unlock(gIOObjectPortLock);
322 }
323 }
324
325 void
setHoldDestroy(OSObject * obj,ipc_kobject_type_t type)326 IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
327 {
328 IOMachPort * machPort;
329
330 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
331 lck_mtx_lock(gIOObjectPortLock);
332
333 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
334
335 if (machPort) {
336 machPort->holdDestroy = true;
337 }
338
339 lck_mtx_unlock(gIOObjectPortLock);
340 }
341
342 void
IOMachPortDestroyUserReferences(OSObject * obj,natural_t type)343 IOMachPortDestroyUserReferences(OSObject * obj, natural_t type)
344 {
345 IOMachPort::releasePortForObject(obj, type);
346 }
347
348 void
destroyUserReferences(OSObject * obj)349 IOUserClient::destroyUserReferences( OSObject * obj )
350 {
351 IOMachPort *machPort;
352
353 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
354
355 // panther, 3160200
356 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
357
358 obj->retain();
359 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
360 IOMachPortHashList *mappingBucket = NULL;
361
362 lck_mtx_lock(gIOObjectPortLock);
363
364 IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
365 if (uc && uc->mappings) {
366 mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
367 }
368
369 machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
370
371 if (machPort == NULL) {
372 lck_mtx_unlock(gIOObjectPortLock);
373 goto end;
374 }
375
376 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
377 obj->taggedRelease(OSTypeID(OSCollection));
378
379 if (uc) {
380 uc->noMoreSenders();
381 if (uc->mappings) {
382 uc->mappings->taggedRetain(OSTypeID(OSCollection));
383 machPort->object = uc->mappings;
384 SLIST_INSERT_HEAD(mappingBucket, machPort, link);
385 iokit_switch_object_port(machPort->port, uc->mappings, IKOT_IOKIT_CONNECT);
386
387 lck_mtx_unlock(gIOObjectPortLock);
388
389 OSSafeReleaseNULL(uc->mappings);
390 } else {
391 lck_mtx_unlock(gIOObjectPortLock);
392 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
393 }
394 } else {
395 lck_mtx_unlock(gIOObjectPortLock);
396 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
397 }
398
399
400 end:
401 OSSafeReleaseNULL(obj);
402 }
403
404 mach_port_name_t
makeSendRightForTask(task_t task,io_object_t obj,ipc_kobject_type_t type)405 IOMachPort::makeSendRightForTask( task_t task,
406 io_object_t obj, ipc_kobject_type_t type )
407 {
408 return iokit_make_send_right( task, obj, type );
409 }
410
411 void
free(void)412 IOMachPort::free( void )
413 {
414 if (port) {
415 iokit_destroy_object_port( port, type );
416 }
417 super::free();
418 }
419
420 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
421
422 static bool
IOTaskRegistryCompatibility(task_t task)423 IOTaskRegistryCompatibility(task_t task)
424 {
425 return false;
426 }
427
428 static void
IOTaskRegistryCompatibilityMatching(task_t task,OSDictionary * matching)429 IOTaskRegistryCompatibilityMatching(task_t task, OSDictionary * matching)
430 {
431 matching->setObject(gIOServiceNotificationUserKey, kOSBooleanTrue);
432 if (!IOTaskRegistryCompatibility(task)) {
433 return;
434 }
435 matching->setObject(gIOCompatibilityMatchKey, kOSBooleanTrue);
436 }
437
438 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
439
OSDefineMetaClassAndStructors(IOUserIterator,OSIterator)440 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
441
442 IOUserIterator *
443 IOUserIterator::withIterator(OSIterator * iter)
444 {
445 IOUserIterator * me;
446
447 if (!iter) {
448 return NULL;
449 }
450
451 me = new IOUserIterator;
452 if (me && !me->init()) {
453 me->release();
454 me = NULL;
455 }
456 if (!me) {
457 iter->release();
458 return me;
459 }
460 me->userIteratorObject = iter;
461
462 return me;
463 }
464
465 bool
init(void)466 IOUserIterator::init( void )
467 {
468 if (!OSObject::init()) {
469 return false;
470 }
471
472 lock = IOLockAlloc();
473 if (!lock) {
474 return false;
475 }
476
477 return true;
478 }
479
480 void
free()481 IOUserIterator::free()
482 {
483 if (userIteratorObject) {
484 userIteratorObject->release();
485 }
486 if (lock) {
487 IOLockFree(lock);
488 }
489 OSObject::free();
490 }
491
492 void
reset()493 IOUserIterator::reset()
494 {
495 IOLockLock(lock);
496 assert(OSDynamicCast(OSIterator, userIteratorObject));
497 ((OSIterator *)userIteratorObject)->reset();
498 IOLockUnlock(lock);
499 }
500
501 bool
isValid()502 IOUserIterator::isValid()
503 {
504 bool ret;
505
506 IOLockLock(lock);
507 assert(OSDynamicCast(OSIterator, userIteratorObject));
508 ret = ((OSIterator *)userIteratorObject)->isValid();
509 IOLockUnlock(lock);
510
511 return ret;
512 }
513
514 OSObject *
getNextObject()515 IOUserIterator::getNextObject()
516 {
517 assert(false);
518 return NULL;
519 }
520
521 OSObject *
copyNextObject()522 IOUserIterator::copyNextObject()
523 {
524 OSObject * ret = NULL;
525
526 IOLockLock(lock);
527 if (userIteratorObject) {
528 ret = ((OSIterator *)userIteratorObject)->getNextObject();
529 if (ret) {
530 ret->retain();
531 }
532 }
533 IOLockUnlock(lock);
534
535 return ret;
536 }
537
538 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
539 extern "C" {
540 // functions called from osfmk/device/iokit_rpc.c
541
542 void
iokit_port_object_description(io_object_t obj,kobject_description_t desc)543 iokit_port_object_description(io_object_t obj, kobject_description_t desc)
544 {
545 IORegistryEntry * regEntry;
546 IOUserNotification * __unused noti;
547 _IOServiceNotifier * __unused serviceNoti;
548 OSSerialize * __unused s;
549 OSDictionary * __unused matching = NULL;
550
551 if ((regEntry = OSDynamicCast(IORegistryEntry, obj))) {
552 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(0x%qx)", obj->getMetaClass()->getClassName(), regEntry->getRegistryEntryID());
553 #if DEVELOPMENT || DEBUG
554 } else if ((noti = OSDynamicCast(IOUserNotification, obj))) {
555 // serviceNoti->matching may become NULL if the port gets a no-senders notification, so we have to lock gIOObjectPortLock
556 IOLockLock(gIOObjectPortLock);
557 serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->userIteratorObject);
558 if (serviceNoti && (matching = serviceNoti->matching)) {
559 matching->retain();
560 }
561 IOLockUnlock(gIOObjectPortLock);
562
563 if (matching) {
564 s = OSSerialize::withCapacity((unsigned int) page_size);
565 if (s && matching->serialize(s)) {
566 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text());
567 }
568 OSSafeReleaseNULL(s);
569 OSSafeReleaseNULL(matching);
570 }
571 #endif /* DEVELOPMENT || DEBUG */
572 } else {
573 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s", obj->getMetaClass()->getClassName());
574 }
575 }
576
577 // FIXME: Implementation of these functions are hidden from the static analyzer.
578 // As for now, the analyzer doesn't consistently support wrapper functions
579 // for retain and release.
580 #ifndef __clang_analyzer__
581 void
iokit_add_reference(io_object_t obj,natural_t type)582 iokit_add_reference( io_object_t obj, natural_t type )
583 {
584 IOUserClient * uc;
585
586 if (!obj) {
587 return;
588 }
589
590 if ((IKOT_IOKIT_CONNECT == type)
591 && (uc = OSDynamicCast(IOUserClient, obj))) {
592 OSIncrementAtomic(&uc->__ipc);
593 }
594
595 obj->retain();
596 }
597
598 void
iokit_remove_reference(io_object_t obj)599 iokit_remove_reference( io_object_t obj )
600 {
601 if (obj) {
602 obj->release();
603 }
604 }
605 #endif // __clang_analyzer__
606
607 void
iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj)608 iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj )
609 {
610 IOUserClient * uc;
611 bool finalize = false;
612
613 if (!obj) {
614 return;
615 }
616
617 if ((uc = OSDynamicCast(IOUserClient, obj))) {
618 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive()) {
619 IOLockLock(gIOObjectPortLock);
620 if ((finalize = uc->__ipcFinal)) {
621 uc->__ipcFinal = false;
622 }
623 IOLockUnlock(gIOObjectPortLock);
624 }
625 if (finalize) {
626 uc->scheduleFinalize(true);
627 }
628 }
629
630 obj->release();
631 }
632
633 bool
finalizeUserReferences(OSObject * obj)634 IOUserClient::finalizeUserReferences(OSObject * obj)
635 {
636 IOUserClient * uc;
637 bool ok = true;
638
639 if ((uc = OSDynamicCast(IOUserClient, obj))) {
640 IOLockLock(gIOObjectPortLock);
641 if ((uc->__ipcFinal = (0 != uc->__ipc))) {
642 ok = false;
643 }
644 IOLockUnlock(gIOObjectPortLock);
645 }
646 return ok;
647 }
648
649 ipc_port_t
iokit_port_for_object(io_object_t obj,ipc_kobject_type_t type)650 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
651 {
652 IOMachPort *machPort = NULL;
653 ipc_port_t port = NULL;
654
655 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
656
657 lck_mtx_lock(gIOObjectPortLock);
658
659 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
660
661 if (__improbable(machPort == NULL)) {
662 machPort = IOMachPort::withObjectAndType(obj, type);
663 if (__improbable(machPort == NULL)) {
664 goto end;
665 }
666 SLIST_INSERT_HEAD(bucket, machPort, link);
667 } else {
668 machPort->mscount++;
669 }
670
671 iokit_retain_port(machPort->port);
672 port = machPort->port;
673
674 end:
675 lck_mtx_unlock(gIOObjectPortLock);
676
677 return port;
678 }
679
680 kern_return_t
iokit_client_died(io_object_t obj,ipc_port_t,ipc_kobject_type_t type,mach_port_mscount_t * mscount)681 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
682 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
683 {
684 IOUserClient * client;
685 IOMemoryMap * map;
686 IOUserNotification * notify;
687 IOUserServerCheckInToken * token;
688
689 if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
690 return kIOReturnNotReady;
691 }
692
693 switch (type) {
694 case IKOT_IOKIT_CONNECT:
695 if ((client = OSDynamicCast( IOUserClient, obj ))) {
696 IOStatisticsClientCall();
697 IORWLockWrite(client->lock);
698 client->clientDied();
699 IORWLockUnlock(client->lock);
700 }
701 break;
702 case IKOT_IOKIT_OBJECT:
703 if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
704 map->taskDied();
705 } else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
706 notify->setNotification( NULL );
707 }
708 break;
709 case IKOT_IOKIT_IDENT:
710 if ((token = OSDynamicCast( IOUserServerCheckInToken, obj ))) {
711 token->cancel();
712 }
713 break;
714 }
715
716 return kIOReturnSuccess;
717 }
718 }; /* extern "C" */
719
720 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
721
722 class IOServiceUserNotification : public IOUserNotification
723 {
724 OSDeclareDefaultStructors(IOServiceUserNotification);
725
726 struct PingMsg {
727 mach_msg_header_t msgHdr;
728 OSNotificationHeader64 notifyHeader;
729 };
730
731 enum { kMaxOutstanding = 1024 };
732
733 ipc_port_t remotePort;
734 void *msgReference;
735 mach_msg_size_t msgReferenceSize;
736 natural_t msgType;
737 OSArray * newSet;
738 bool armed;
739 bool ipcLogged;
740
741 public:
742
743 virtual bool init( mach_port_t port, natural_t type,
744 void * reference, vm_size_t referenceSize,
745 bool clientIs64 );
746 virtual void free() APPLE_KEXT_OVERRIDE;
747 void invalidatePort(void);
748
749 static bool _handler( void * target,
750 void * ref, IOService * newService, IONotifier * notifier );
751 virtual bool handler( void * ref, IOService * newService );
752
753 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
754 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
755 };
756
757 class IOServiceMessageUserNotification : public IOUserNotification
758 {
759 OSDeclareDefaultStructors(IOServiceMessageUserNotification);
760
761 struct PingMsg {
762 mach_msg_header_t msgHdr;
763 mach_msg_body_t msgBody;
764 mach_msg_port_descriptor_t ports[1];
765 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
766 };
767
768 ipc_port_t remotePort;
769 void *msgReference;
770 mach_msg_size_t msgReferenceSize;
771 mach_msg_size_t msgExtraSize;
772 natural_t msgType;
773 uint8_t clientIs64;
774 int owningPID;
775 bool ipcLogged;
776
777 public:
778
779 virtual bool init( mach_port_t port, natural_t type,
780 void * reference, vm_size_t referenceSize,
781 bool clientIs64 );
782
783 virtual void free() APPLE_KEXT_OVERRIDE;
784 void invalidatePort(void);
785
786 static IOReturn _handler( void * target, void * ref,
787 UInt32 messageType, IOService * provider,
788 void * messageArgument, vm_size_t argSize );
789 virtual IOReturn handler( void * ref,
790 UInt32 messageType, IOService * provider,
791 void * messageArgument, vm_size_t argSize );
792
793 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
794 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
795 };
796
797 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
798
799 #undef super
800 #define super IOUserIterator
801 OSDefineMetaClass( IOUserNotification, IOUserIterator );
802 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
803
804 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
805
806 void
free(void)807 IOUserNotification::free( void )
808 {
809 #if DEVELOPMENT || DEBUG
810 IOLockLock( gIOObjectPortLock);
811
812 assert(userIteratorObject == NULL);
813
814 IOLockUnlock( gIOObjectPortLock);
815 #endif /* DEVELOPMENT || DEBUG */
816
817 super::free();
818 }
819
820
821 void
setNotification(IONotifier * notify)822 IOUserNotification::setNotification( IONotifier * notify )
823 {
824 OSObject * previousNotify;
825
826 /*
827 * We must retain this object here before proceeding.
828 * Two threads may race in setNotification(). If one thread sets a new notifier while the
829 * other thread sets the notifier to NULL, it is possible for the second thread to call release()
830 * before the first thread calls retain(). Without the retain here, this thread interleaving
831 * would cause the object to get released and freed before it is retained by the first thread,
832 * which is a UaF.
833 */
834 retain();
835
836 IOLockLock( gIOObjectPortLock);
837
838 previousNotify = userIteratorObject;
839 userIteratorObject = notify;
840
841 IOLockUnlock( gIOObjectPortLock);
842
843 if (previousNotify) {
844 assert(OSDynamicCast(IONotifier, previousNotify));
845 ((IONotifier *)previousNotify)->remove();
846
847 if (notify == NULL) {
848 release();
849 }
850 } else if (notify) {
851 // new IONotifier, retain the object. release() will happen in setNotification(NULL)
852 retain();
853 }
854
855 release(); // paired with retain() at beginning of this method
856 }
857
858 void
reset()859 IOUserNotification::reset()
860 {
861 // ?
862 }
863
864 bool
isValid()865 IOUserNotification::isValid()
866 {
867 return true;
868 }
869
870 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
871
872 #undef super
873 #define super IOUserNotification
OSDefineMetaClassAndStructors(IOServiceUserNotification,IOUserNotification)874 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
875
876 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
877
878 bool
879 IOServiceUserNotification::init( mach_port_t port, natural_t type,
880 void * reference, vm_size_t referenceSize,
881 bool clientIs64 )
882 {
883 if (!super::init()) {
884 return false;
885 }
886
887 newSet = OSArray::withCapacity( 1 );
888 if (!newSet) {
889 return false;
890 }
891
892 if (referenceSize > sizeof(OSAsyncReference64)) {
893 return false;
894 }
895
896 msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
897 msgReference = IOMallocZeroData(msgReferenceSize);
898 if (!msgReference) {
899 return false;
900 }
901
902 remotePort = port;
903 msgType = type;
904 bcopy( reference, msgReference, referenceSize );
905
906 return true;
907 }
908
909 void
invalidatePort(void)910 IOServiceUserNotification::invalidatePort(void)
911 {
912 remotePort = MACH_PORT_NULL;
913 }
914
915 void
free(void)916 IOServiceUserNotification::free( void )
917 {
918 if (remotePort) {
919 iokit_release_port_send(remotePort);
920 }
921 IOFreeData(msgReference, msgReferenceSize);
922 OSSafeReleaseNULL(newSet);
923
924 super::free();
925 }
926
927 bool
_handler(void * target,void * ref,IOService * newService,IONotifier * notifier)928 IOServiceUserNotification::_handler( void * target,
929 void * ref, IOService * newService, IONotifier * notifier )
930 {
931 IOServiceUserNotification * targetObj = (IOServiceUserNotification *)target;
932 bool ret;
933
934 targetObj->retain();
935 ret = targetObj->handler( ref, newService );
936 targetObj->release();
937 return ret;
938 }
939
940 bool
handler(void * ref,IOService * newService)941 IOServiceUserNotification::handler( void * ref,
942 IOService * newService )
943 {
944 unsigned int count;
945 kern_return_t kr;
946 ipc_port_t port = NULL;
947 bool sendPing = false;
948 mach_msg_size_t msgSize;
949
950 IOTakeLock( lock );
951
952 count = newSet->getCount();
953 if (count < kMaxOutstanding) {
954 newSet->setObject( newService );
955 if ((sendPing = (armed && (0 == count)))) {
956 armed = false;
957 }
958 }
959
960 IOUnlock( lock );
961
962 if (kIOServiceTerminatedNotificationType == msgType) {
963 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
964 }
965
966 if (sendPing) {
967 port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
968
969 msgSize = (mach_msg_size_t)(sizeof(PingMsg) - sizeof(OSAsyncReference64) + msgReferenceSize);
970 kr = kernel_mach_msg_send_with_builder(msgSize,
971 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
972 MACH_MSG_TIMEOUT_NONE, NULL,
973 ^(mach_msg_header_t *hdr, mach_msg_size_t size){
974 PingMsg *thisMsg = (PingMsg *)hdr;
975
976 thisMsg->msgHdr.msgh_remote_port = remotePort;
977 thisMsg->msgHdr.msgh_local_port = port;
978 thisMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
979 MACH_MSG_TYPE_COPY_SEND /*remote*/,
980 MACH_MSG_TYPE_MAKE_SEND /*local*/);
981 thisMsg->msgHdr.msgh_size = msgSize;
982 thisMsg->msgHdr.msgh_id = kOSNotificationMessageID;
983
984 thisMsg->notifyHeader.size = 0;
985 thisMsg->notifyHeader.type = msgType;
986
987 bcopy( msgReference, thisMsg->notifyHeader.reference, msgReferenceSize );
988 });
989
990 if (port) {
991 iokit_release_port( port );
992 }
993
994 if ((KERN_SUCCESS != kr) && !ipcLogged) {
995 ipcLogged = true;
996 IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
997 }
998 }
999
1000 return true;
1001 }
1002 OSObject *
getNextObject()1003 IOServiceUserNotification::getNextObject()
1004 {
1005 assert(false);
1006 return NULL;
1007 }
1008
1009 OSObject *
copyNextObject()1010 IOServiceUserNotification::copyNextObject()
1011 {
1012 unsigned int count;
1013 OSObject * result;
1014
1015 IOLockLock(lock);
1016
1017 count = newSet->getCount();
1018 if (count) {
1019 result = newSet->getObject( count - 1 );
1020 result->retain();
1021 newSet->removeObject( count - 1);
1022 } else {
1023 result = NULL;
1024 armed = true;
1025 }
1026
1027 IOLockUnlock(lock);
1028
1029 return result;
1030 }
1031
1032 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1033
OSDefineMetaClassAndStructors(IOServiceMessageUserNotification,IOUserNotification)1034 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
1035
1036 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1037
1038 bool
1039 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
1040 void * reference, vm_size_t referenceSize, bool client64 )
1041 {
1042 if (!super::init()) {
1043 return false;
1044 }
1045
1046 if (referenceSize > sizeof(OSAsyncReference64)) {
1047 return false;
1048 }
1049
1050 clientIs64 = client64;
1051
1052 owningPID = proc_selfpid();
1053
1054 msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
1055 msgReference = IOMallocZeroData(msgReferenceSize);
1056 if (!msgReference) {
1057 return false;
1058 }
1059
1060 remotePort = port;
1061 msgType = type;
1062 bcopy( reference, msgReference, referenceSize );
1063
1064 return true;
1065 }
1066
1067 void
invalidatePort(void)1068 IOServiceMessageUserNotification::invalidatePort(void)
1069 {
1070 remotePort = MACH_PORT_NULL;
1071 }
1072
1073 void
free(void)1074 IOServiceMessageUserNotification::free( void )
1075 {
1076 if (remotePort) {
1077 iokit_release_port_send(remotePort);
1078 }
1079 IOFreeData(msgReference, msgReferenceSize);
1080
1081 super::free();
1082 }
1083
1084 IOReturn
_handler(void * target,void * ref,UInt32 messageType,IOService * provider,void * argument,vm_size_t argSize)1085 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1086 UInt32 messageType, IOService * provider,
1087 void * argument, vm_size_t argSize )
1088 {
1089 IOServiceMessageUserNotification * targetObj = (IOServiceMessageUserNotification *)target;
1090 IOReturn ret;
1091
1092 targetObj->retain();
1093 ret = targetObj->handler(
1094 ref, messageType, provider, argument, argSize);
1095 targetObj->release();
1096 return ret;
1097 }
1098
1099 IOReturn
handler(void * ref,UInt32 messageType,IOService * provider,void * messageArgument,vm_size_t callerArgSize)1100 IOServiceMessageUserNotification::handler( void * ref,
1101 UInt32 messageType, IOService * provider,
1102 void * messageArgument, vm_size_t callerArgSize )
1103 {
1104 kern_return_t kr;
1105 vm_size_t argSize;
1106 mach_msg_size_t thisMsgSize;
1107 ipc_port_t thisPort, providerPort;
1108
1109 if (kIOMessageCopyClientID == messageType) {
1110 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1111 return kIOReturnSuccess;
1112 }
1113
1114 if (callerArgSize == 0) {
1115 if (clientIs64) {
1116 argSize = sizeof(io_user_reference_t);
1117 } else {
1118 argSize = sizeof(uint32_t);
1119 }
1120 } else {
1121 if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1122 callerArgSize = kIOUserNotifyMaxMessageSize;
1123 }
1124 argSize = callerArgSize;
1125 }
1126
1127 // adjust message size for ipc restrictions
1128 natural_t type = msgType;
1129 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1130 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1131 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1132
1133 mach_msg_size_t extraSize = kIOUserNotifyMaxMessageSize + sizeof(IOServiceInterestContent64);
1134 mach_msg_size_t msgSize = (mach_msg_size_t) (sizeof(PingMsg) - sizeof(OSAsyncReference64) + msgReferenceSize);
1135
1136 if (os_add3_overflow(msgSize, offsetof(IOServiceInterestContent64, messageArgument), argSize, &thisMsgSize)) {
1137 return kIOReturnBadArgument;
1138 }
1139
1140 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1141 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1142
1143 kr = kernel_mach_msg_send_with_builder(thisMsgSize,
1144 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1145 MACH_MSG_TIMEOUT_NONE, NULL,
1146 ^(mach_msg_header_t *hdr, mach_msg_size_t size) {
1147 PingMsg *thisMsg = (PingMsg *)hdr;
1148 IOServiceInterestContent64 * data;
1149
1150 thisMsg->msgHdr.msgh_remote_port = remotePort;
1151 thisMsg->msgHdr.msgh_local_port = thisPort;
1152 thisMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
1153 | MACH_MSGH_BITS(
1154 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1155 MACH_MSG_TYPE_MAKE_SEND /*local*/);
1156 thisMsg->msgHdr.msgh_size = size;
1157 thisMsg->msgHdr.msgh_id = kOSNotificationMessageID;
1158
1159 thisMsg->msgBody.msgh_descriptor_count = 1;
1160
1161 thisMsg->ports[0].name = providerPort;
1162 thisMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
1163 thisMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
1164
1165 thisMsg->notifyHeader.size = extraSize;
1166 thisMsg->notifyHeader.type = type;
1167 bcopy( msgReference, thisMsg->notifyHeader.reference, msgReferenceSize );
1168
1169
1170 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1171 // == thisMsg->notifyHeader.content;
1172 data->messageType = messageType;
1173
1174 if (callerArgSize == 0) {
1175 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1176 if (!clientIs64) {
1177 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1178 }
1179 } else {
1180 bcopy( messageArgument, data->messageArgument, callerArgSize );
1181 }
1182 });
1183
1184 if (thisPort) {
1185 iokit_release_port( thisPort );
1186 }
1187 if (providerPort) {
1188 iokit_release_port( providerPort );
1189 }
1190
1191 if (kr == MACH_SEND_NO_BUFFER) {
1192 return kIOReturnNoMemory;
1193 }
1194
1195 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1196 ipcLogged = true;
1197 IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1198 }
1199
1200 return kIOReturnSuccess;
1201 }
1202
1203 OSObject *
getNextObject()1204 IOServiceMessageUserNotification::getNextObject()
1205 {
1206 return NULL;
1207 }
1208
1209 OSObject *
copyNextObject()1210 IOServiceMessageUserNotification::copyNextObject()
1211 {
1212 return NULL;
1213 }
1214
1215 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1216
1217 #undef super
1218 #define super IOService
1219 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1220
1221 IOLock * gIOUserClientOwnersLock;
1222
1223 void
initialize(void)1224 IOUserClient::initialize( void )
1225 {
1226 gIOObjectPortLock = IOLockAlloc();
1227 gIOUserClientOwnersLock = IOLockAlloc();
1228 gIOUserServerLock = IOLockAlloc();
1229 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1230
1231 #if IOTRACKING
1232 IOTrackingQueueCollectUser(IOUserIterator::gMetaClass.getTracking());
1233 IOTrackingQueueCollectUser(IOServiceMessageUserNotification::gMetaClass.getTracking());
1234 IOTrackingQueueCollectUser(IOServiceUserNotification::gMetaClass.getTracking());
1235 IOTrackingQueueCollectUser(IOUserClient::gMetaClass.getTracking());
1236 IOTrackingQueueCollectUser(IOMachPort::gMetaClass.getTracking());
1237 #endif /* IOTRACKING */
1238 }
1239
1240 void
1241 #if __LP64__
1242 __attribute__((__noreturn__))
1243 #endif
setAsyncReference(OSAsyncReference asyncRef,mach_port_t wakePort,void * callback,void * refcon)1244 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1245 mach_port_t wakePort,
1246 void *callback, void *refcon)
1247 {
1248 #if __LP64__
1249 panic("setAsyncReference not valid for 64b");
1250 #else
1251 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1252 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1253 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1254 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1255 #endif
1256 }
1257
1258 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon)1259 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1260 mach_port_t wakePort,
1261 mach_vm_address_t callback, io_user_reference_t refcon)
1262 {
1263 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1264 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1265 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1266 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1267 }
1268
1269 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon,task_t task)1270 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1271 mach_port_t wakePort,
1272 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1273 {
1274 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1275 if (vm_map_is_64bit(get_task_map(task))) {
1276 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1277 }
1278 }
1279
1280 static OSDictionary *
CopyConsoleUser(UInt32 uid)1281 CopyConsoleUser(UInt32 uid)
1282 {
1283 OSArray * array;
1284 OSDictionary * user = NULL;
1285
1286 OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1287 if ((array = OSDynamicCast(OSArray, ioProperty))) {
1288 for (unsigned int idx = 0;
1289 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1290 idx++) {
1291 OSNumber * num;
1292
1293 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1294 && (uid == num->unsigned32BitValue())) {
1295 user->retain();
1296 break;
1297 }
1298 }
1299 }
1300 OSSafeReleaseNULL(ioProperty);
1301 return user;
1302 }
1303
1304 static OSDictionary *
CopyUserOnConsole(void)1305 CopyUserOnConsole(void)
1306 {
1307 OSArray * array;
1308 OSDictionary * user = NULL;
1309
1310 OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1311 if ((array = OSDynamicCast(OSArray, ioProperty))) {
1312 for (unsigned int idx = 0;
1313 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1314 idx++) {
1315 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1316 user->retain();
1317 break;
1318 }
1319 }
1320 }
1321 OSSafeReleaseNULL(ioProperty);
1322 return user;
1323 }
1324
1325 IOReturn
clientHasAuthorization(task_t task,IOService * service)1326 IOUserClient::clientHasAuthorization( task_t task,
1327 IOService * service )
1328 {
1329 proc_t p;
1330
1331 p = (proc_t) get_bsdtask_info(task);
1332 if (p) {
1333 uint64_t authorizationID;
1334
1335 authorizationID = proc_uniqueid(p);
1336 if (authorizationID) {
1337 if (service->getAuthorizationID() == authorizationID) {
1338 return kIOReturnSuccess;
1339 }
1340 }
1341 }
1342
1343 return kIOReturnNotPermitted;
1344 }
1345
1346 IOReturn
clientHasPrivilege(void * securityToken,const char * privilegeName)1347 IOUserClient::clientHasPrivilege( void * securityToken,
1348 const char * privilegeName )
1349 {
1350 kern_return_t kr;
1351 security_token_t token;
1352 mach_msg_type_number_t count;
1353 task_t task;
1354 OSDictionary * user;
1355 bool secureConsole;
1356
1357
1358 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1359 sizeof(kIOClientPrivilegeForeground))) {
1360 if (task_is_gpu_denied(current_task())) {
1361 return kIOReturnNotPrivileged;
1362 } else {
1363 return kIOReturnSuccess;
1364 }
1365 }
1366
1367 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1368 sizeof(kIOClientPrivilegeConsoleSession))) {
1369 kauth_cred_t cred;
1370 proc_t p;
1371
1372 task = (task_t) securityToken;
1373 if (!task) {
1374 task = current_task();
1375 }
1376 p = (proc_t) get_bsdtask_info(task);
1377 kr = kIOReturnNotPrivileged;
1378
1379 if (p && (cred = kauth_cred_proc_ref(p))) {
1380 user = CopyUserOnConsole();
1381 if (user) {
1382 OSNumber * num;
1383 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1384 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1385 kr = kIOReturnSuccess;
1386 }
1387 user->release();
1388 }
1389 kauth_cred_unref(&cred);
1390 }
1391 return kr;
1392 }
1393
1394 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1395 sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1396 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1397 } else {
1398 task = (task_t)securityToken;
1399 }
1400
1401 count = TASK_SECURITY_TOKEN_COUNT;
1402 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1403
1404 if (KERN_SUCCESS != kr) {
1405 } else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1406 sizeof(kIOClientPrivilegeAdministrator))) {
1407 if (0 != token.val[0]) {
1408 kr = kIOReturnNotPrivileged;
1409 }
1410 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1411 sizeof(kIOClientPrivilegeLocalUser))) {
1412 user = CopyConsoleUser(token.val[0]);
1413 if (user) {
1414 user->release();
1415 } else {
1416 kr = kIOReturnNotPrivileged;
1417 }
1418 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1419 sizeof(kIOClientPrivilegeConsoleUser))) {
1420 user = CopyConsoleUser(token.val[0]);
1421 if (user) {
1422 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1423 kr = kIOReturnNotPrivileged;
1424 } else if (secureConsole) {
1425 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1426 if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1427 kr = kIOReturnNotPrivileged;
1428 }
1429 }
1430 user->release();
1431 } else {
1432 kr = kIOReturnNotPrivileged;
1433 }
1434 } else {
1435 kr = kIOReturnUnsupported;
1436 }
1437
1438 return kr;
1439 }
1440
1441 OSDictionary *
copyClientEntitlements(task_t task)1442 IOUserClient::copyClientEntitlements(task_t task)
1443 {
1444 proc_t p = NULL;
1445 pid_t pid = 0;
1446 OSDictionary *entitlements = NULL;
1447
1448 p = (proc_t)get_bsdtask_info(task);
1449 if (p == NULL) {
1450 return NULL;
1451 }
1452 pid = proc_pid(p);
1453
1454 if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1455 if (entitlements) {
1456 return entitlements;
1457 }
1458 }
1459
1460 // If the above fails, thats it
1461 return NULL;
1462 }
1463
1464 OSDictionary *
copyClientEntitlementsVnode(vnode_t vnode,off_t offset)1465 IOUserClient::copyClientEntitlementsVnode(vnode_t vnode, off_t offset)
1466 {
1467 OSDictionary *entitlements = NULL;
1468
1469 if (cs_entitlements_dictionary_copy_vnode(vnode, offset, (void**)&entitlements) != 0) {
1470 return NULL;
1471 }
1472 return entitlements;
1473 }
1474
1475 OSObject *
copyClientEntitlement(task_t task,const char * entitlement)1476 IOUserClient::copyClientEntitlement( task_t task,
1477 const char * entitlement )
1478 {
1479 OSDictionary *entitlements;
1480 OSObject *value;
1481
1482 #if PMAP_CS_ENABLE && !CONFIG_X86_64_COMPAT
1483 if (pmap_cs_enabled() && amfi->query_context_to_object) {
1484 struct CEQueryContext queryCtx = {};
1485 size_t entlen = strlen(entitlement);
1486 CEQuery_t query = {
1487 /*
1488 * We only select the dict value, if it exists this we will get
1489 * a value CEQueryContext back to points to pmap backed memory
1490 */
1491 CESelectDictValueDynamic((const uint8_t*)entitlement, entlen)
1492 };
1493 if (task == current_task()) {
1494 // NULL task means current task, which translated to the current pmap
1495 if (!pmap_query_entitlements(NULL, query, 1, &queryCtx)) {
1496 return NULL;
1497 }
1498 } else {
1499 vm_map_t task_map = get_task_map_reference(task);
1500 if (task_map) {
1501 pmap_t pmap = vm_map_get_pmap(task_map);
1502 if (!pmap || !pmap_query_entitlements(pmap, query, 1, &queryCtx)) {
1503 vm_map_deallocate(task_map);
1504 return NULL;
1505 }
1506 vm_map_deallocate(task_map);
1507 }
1508 }
1509 value = (OSObject*)amfi->query_context_to_object(&queryCtx);
1510 return value;
1511 }
1512 #endif
1513
1514 entitlements = copyClientEntitlements(task);
1515 if (entitlements == NULL) {
1516 return NULL;
1517 }
1518
1519 /* Fetch the entitlement value from the dictionary. */
1520 value = entitlements->getObject(entitlement);
1521 if (value != NULL) {
1522 value->retain();
1523 }
1524
1525 entitlements->release();
1526 return value;
1527 }
1528
1529 OSObject *
copyClientEntitlementVnode(struct vnode * vnode,off_t offset,const char * entitlement)1530 IOUserClient::copyClientEntitlementVnode(
1531 struct vnode *vnode,
1532 off_t offset,
1533 const char *entitlement)
1534 {
1535 OSDictionary *entitlements;
1536 OSObject *value;
1537
1538 entitlements = copyClientEntitlementsVnode(vnode, offset);
1539 if (entitlements == NULL) {
1540 return NULL;
1541 }
1542
1543 /* Fetch the entitlement value from the dictionary. */
1544 value = entitlements->getObject(entitlement);
1545 if (value != NULL) {
1546 value->retain();
1547 }
1548
1549 entitlements->release();
1550 return value;
1551 }
1552
1553 bool
init()1554 IOUserClient::init()
1555 {
1556 if (getPropertyTable() || super::init()) {
1557 return reserve();
1558 }
1559
1560 return false;
1561 }
1562
1563 bool
init(OSDictionary * dictionary)1564 IOUserClient::init(OSDictionary * dictionary)
1565 {
1566 if (getPropertyTable() || super::init(dictionary)) {
1567 return reserve();
1568 }
1569
1570 return false;
1571 }
1572
1573 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type)1574 IOUserClient::initWithTask(task_t owningTask,
1575 void * securityID,
1576 UInt32 type )
1577 {
1578 if (getPropertyTable() || super::init()) {
1579 return reserve();
1580 }
1581
1582 return false;
1583 }
1584
1585 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type,OSDictionary * properties)1586 IOUserClient::initWithTask(task_t owningTask,
1587 void * securityID,
1588 UInt32 type,
1589 OSDictionary * properties )
1590 {
1591 bool ok;
1592
1593 ok = super::init( properties );
1594 ok &= initWithTask( owningTask, securityID, type );
1595
1596 return ok;
1597 }
1598
1599 bool
reserve()1600 IOUserClient::reserve()
1601 {
1602 if (!reserved) {
1603 reserved = IOMallocType(ExpansionData);
1604 }
1605 setTerminateDefer(NULL, true);
1606 IOStatisticsRegisterCounter();
1607
1608 return true;
1609 }
1610
1611 struct IOUserClientOwner {
1612 task_t task;
1613 queue_chain_t taskLink;
1614 IOUserClient * uc;
1615 queue_chain_t ucLink;
1616 };
1617
1618 IOReturn
registerOwner(task_t task)1619 IOUserClient::registerOwner(task_t task)
1620 {
1621 IOUserClientOwner * owner;
1622 IOReturn ret;
1623 bool newOwner;
1624
1625 IOLockLock(gIOUserClientOwnersLock);
1626
1627 newOwner = true;
1628 ret = kIOReturnSuccess;
1629
1630 if (!owners.next) {
1631 queue_init(&owners);
1632 } else {
1633 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1634 {
1635 if (task != owner->task) {
1636 continue;
1637 }
1638 newOwner = false;
1639 break;
1640 }
1641 }
1642 if (newOwner) {
1643 owner = IOMallocType(IOUserClientOwner);
1644
1645 owner->task = task;
1646 owner->uc = this;
1647 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1648 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1649 if (messageAppSuspended) {
1650 task_set_message_app_suspended(task, true);
1651 }
1652 }
1653
1654 IOLockUnlock(gIOUserClientOwnersLock);
1655
1656 return ret;
1657 }
1658
1659 void
noMoreSenders(void)1660 IOUserClient::noMoreSenders(void)
1661 {
1662 IOUserClientOwner * owner;
1663 IOUserClientOwner * iter;
1664 queue_head_t * taskque;
1665 bool hasMessageAppSuspended;
1666
1667 IOLockLock(gIOUserClientOwnersLock);
1668
1669 if (owners.next) {
1670 while (!queue_empty(&owners)) {
1671 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1672 taskque = task_io_user_clients(owner->task);
1673 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1674 hasMessageAppSuspended = false;
1675 queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1676 hasMessageAppSuspended = iter->uc->messageAppSuspended;
1677 if (hasMessageAppSuspended) {
1678 break;
1679 }
1680 }
1681 task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1682 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1683 IOFreeType(owner, IOUserClientOwner);
1684 }
1685 owners.next = owners.prev = NULL;
1686 }
1687
1688 IOLockUnlock(gIOUserClientOwnersLock);
1689 }
1690
1691
1692 extern "C" void
iokit_task_app_suspended_changed(task_t task)1693 iokit_task_app_suspended_changed(task_t task)
1694 {
1695 queue_head_t * taskque;
1696 IOUserClientOwner * owner;
1697 OSSet * set;
1698
1699 IOLockLock(gIOUserClientOwnersLock);
1700
1701 taskque = task_io_user_clients(task);
1702 set = NULL;
1703 queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1704 if (!owner->uc->messageAppSuspended) {
1705 continue;
1706 }
1707 if (!set) {
1708 set = OSSet::withCapacity(4);
1709 if (!set) {
1710 break;
1711 }
1712 }
1713 set->setObject(owner->uc);
1714 }
1715
1716 IOLockUnlock(gIOUserClientOwnersLock);
1717
1718 if (set) {
1719 set->iterateObjects(^bool (OSObject * obj) {
1720 IOUserClient * uc;
1721
1722 uc = (typeof(uc))obj;
1723 #if 0
1724 {
1725 OSString * str;
1726 str = IOCopyLogNameForPID(task_pid(task));
1727 IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1728 uc->getName(), task_is_app_suspended(task));
1729 OSSafeReleaseNULL(str);
1730 }
1731 #endif
1732 uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1733
1734 return false;
1735 });
1736 set->release();
1737 }
1738 }
1739
1740 extern "C" kern_return_t
iokit_task_terminate(task_t task)1741 iokit_task_terminate(task_t task)
1742 {
1743 IOUserClientOwner * owner;
1744 IOUserClient * dead;
1745 IOUserClient * uc;
1746 queue_head_t * taskque;
1747
1748 IOLockLock(gIOUserClientOwnersLock);
1749
1750 taskque = task_io_user_clients(task);
1751 dead = NULL;
1752 while (!queue_empty(taskque)) {
1753 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1754 uc = owner->uc;
1755 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1756 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1757 if (queue_empty(&uc->owners)) {
1758 uc->retain();
1759 IOLog("destroying out of band connect for %s\n", uc->getName());
1760 // now using the uc queue head as a singly linked queue,
1761 // leaving .next as NULL to mark it empty
1762 uc->owners.next = NULL;
1763 uc->owners.prev = (queue_entry_t) dead;
1764 dead = uc;
1765 }
1766 IOFreeType(owner, IOUserClientOwner);
1767 }
1768
1769 IOLockUnlock(gIOUserClientOwnersLock);
1770
1771 while (dead) {
1772 uc = dead;
1773 dead = (IOUserClient *)(void *) dead->owners.prev;
1774 uc->owners.prev = NULL;
1775 if (uc->sharedInstance || !uc->closed) {
1776 uc->clientDied();
1777 }
1778 uc->release();
1779 }
1780
1781 return KERN_SUCCESS;
1782 }
1783
1784 struct IOUCFilterPolicy {
1785 task_t task;
1786 io_filter_policy_t filterPolicy;
1787 IOUCFilterPolicy * next;
1788 };
1789
1790 io_filter_policy_t
filterForTask(task_t task,io_filter_policy_t addFilterPolicy)1791 IOUserClient::filterForTask(task_t task, io_filter_policy_t addFilterPolicy)
1792 {
1793 IOUCFilterPolicy * elem;
1794 io_filter_policy_t filterPolicy;
1795
1796 filterPolicy = 0;
1797 IOLockLock(filterLock);
1798
1799 for (elem = reserved->filterPolicies; elem && (elem->task != task); elem = elem->next) {
1800 }
1801
1802 if (elem) {
1803 if (addFilterPolicy) {
1804 assert(addFilterPolicy == elem->filterPolicy);
1805 }
1806 filterPolicy = elem->filterPolicy;
1807 } else if (addFilterPolicy) {
1808 elem = IOMallocType(IOUCFilterPolicy);
1809 elem->task = task;
1810 elem->filterPolicy = addFilterPolicy;
1811 elem->next = reserved->filterPolicies;
1812 reserved->filterPolicies = elem;
1813 filterPolicy = addFilterPolicy;
1814 }
1815
1816 IOLockUnlock(filterLock);
1817 return filterPolicy;
1818 }
1819
1820 void
free()1821 IOUserClient::free()
1822 {
1823 if (mappings) {
1824 mappings->release();
1825 }
1826 if (lock) {
1827 IORWLockFree(lock);
1828 }
1829 if (filterLock) {
1830 IOLockFree(filterLock);
1831 }
1832
1833 IOStatisticsUnregisterCounter();
1834
1835 assert(!owners.next);
1836 assert(!owners.prev);
1837
1838 if (reserved) {
1839 IOUCFilterPolicy * elem;
1840 IOUCFilterPolicy * nextElem;
1841 for (elem = reserved->filterPolicies; elem; elem = nextElem) {
1842 nextElem = elem->next;
1843 if (elem->filterPolicy && gIOUCFilterCallbacks->io_filter_release) {
1844 gIOUCFilterCallbacks->io_filter_release(elem->filterPolicy);
1845 }
1846 IOFreeType(elem, IOUCFilterPolicy);
1847 }
1848 IOFreeType(reserved, ExpansionData);
1849 }
1850
1851 super::free();
1852 }
1853
1854 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1855
OSDefineMetaClassAndAbstractStructors(IOUserClient2022,IOUserClient)1856 OSDefineMetaClassAndAbstractStructors( IOUserClient2022, IOUserClient )
1857
1858
1859 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1860
1861 IOReturn
1862 IOUserClient::clientDied( void )
1863 {
1864 IOReturn ret = kIOReturnNotReady;
1865
1866 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1867 ret = clientClose();
1868 }
1869
1870 return ret;
1871 }
1872
1873 IOReturn
clientClose(void)1874 IOUserClient::clientClose( void )
1875 {
1876 return kIOReturnUnsupported;
1877 }
1878
1879 IOService *
getService(void)1880 IOUserClient::getService( void )
1881 {
1882 return NULL;
1883 }
1884
1885 IOReturn
registerNotificationPort(mach_port_t,UInt32,UInt32)1886 IOUserClient::registerNotificationPort(
1887 mach_port_t /* port */,
1888 UInt32 /* type */,
1889 UInt32 /* refCon */)
1890 {
1891 return kIOReturnUnsupported;
1892 }
1893
1894 IOReturn
registerNotificationPort(mach_port_t port,UInt32 type,io_user_reference_t refCon)1895 IOUserClient::registerNotificationPort(
1896 mach_port_t port,
1897 UInt32 type,
1898 io_user_reference_t refCon)
1899 {
1900 return registerNotificationPort(port, type, (UInt32) refCon);
1901 }
1902
1903 IOReturn
getNotificationSemaphore(UInt32 notification_type,semaphore_t * semaphore)1904 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1905 semaphore_t * semaphore )
1906 {
1907 return kIOReturnUnsupported;
1908 }
1909
1910 IOReturn
connectClient(IOUserClient *)1911 IOUserClient::connectClient( IOUserClient * /* client */ )
1912 {
1913 return kIOReturnUnsupported;
1914 }
1915
1916 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,IOMemoryDescriptor ** memory)1917 IOUserClient::clientMemoryForType( UInt32 type,
1918 IOOptionBits * options,
1919 IOMemoryDescriptor ** memory )
1920 {
1921 return kIOReturnUnsupported;
1922 }
1923
1924 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,OSSharedPtr<IOMemoryDescriptor> & memory)1925 IOUserClient::clientMemoryForType( UInt32 type,
1926 IOOptionBits * options,
1927 OSSharedPtr<IOMemoryDescriptor>& memory )
1928 {
1929 IOMemoryDescriptor* memoryRaw = nullptr;
1930 IOReturn result = clientMemoryForType(type, options, &memoryRaw);
1931 memory.reset(memoryRaw, OSNoRetain);
1932 return result;
1933 }
1934
1935 #if !__LP64__
1936 IOMemoryMap *
mapClientMemory(IOOptionBits type,task_t task,IOOptionBits mapFlags,IOVirtualAddress atAddress)1937 IOUserClient::mapClientMemory(
1938 IOOptionBits type,
1939 task_t task,
1940 IOOptionBits mapFlags,
1941 IOVirtualAddress atAddress )
1942 {
1943 return NULL;
1944 }
1945 #endif
1946
1947 IOMemoryMap *
mapClientMemory64(IOOptionBits type,task_t task,IOOptionBits mapFlags,mach_vm_address_t atAddress)1948 IOUserClient::mapClientMemory64(
1949 IOOptionBits type,
1950 task_t task,
1951 IOOptionBits mapFlags,
1952 mach_vm_address_t atAddress )
1953 {
1954 IOReturn err;
1955 IOOptionBits options = 0;
1956 IOMemoryDescriptor * memory = NULL;
1957 IOMemoryMap * map = NULL;
1958
1959 err = clientMemoryForType((UInt32) type, &options, &memory );
1960
1961 if (memory && (kIOReturnSuccess == err)) {
1962 FAKE_STACK_FRAME(getMetaClass());
1963
1964 options = (options & ~kIOMapUserOptionsMask)
1965 | (mapFlags & kIOMapUserOptionsMask);
1966 map = memory->createMappingInTask( task, atAddress, options );
1967 memory->release();
1968
1969 FAKE_STACK_FRAME_END();
1970 }
1971
1972 return map;
1973 }
1974
1975 IOReturn
exportObjectToClient(task_t task,OSObject * obj,io_object_t * clientObj)1976 IOUserClient::exportObjectToClient(task_t task,
1977 OSObject *obj, io_object_t *clientObj)
1978 {
1979 mach_port_name_t name;
1980
1981 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1982
1983 *clientObj = (io_object_t)(uintptr_t) name;
1984
1985 if (obj) {
1986 obj->release();
1987 }
1988
1989 return kIOReturnSuccess;
1990 }
1991
1992 IOReturn
copyPortNameForObjectInTask(task_t task,OSObject * obj,mach_port_name_t * port_name)1993 IOUserClient::copyPortNameForObjectInTask(task_t task,
1994 OSObject *obj, mach_port_name_t * port_name)
1995 {
1996 mach_port_name_t name;
1997
1998 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
1999
2000 *(mach_port_name_t *) port_name = name;
2001
2002 return kIOReturnSuccess;
2003 }
2004
2005 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSObject ** obj)2006 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2007 OSObject **obj)
2008 {
2009 OSObject * object;
2010
2011 object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
2012
2013 *obj = object;
2014
2015 return object ? kIOReturnSuccess : kIOReturnIPCError;
2016 }
2017
2018 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSSharedPtr<OSObject> & obj)2019 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2020 OSSharedPtr<OSObject>& obj)
2021 {
2022 OSObject* objRaw = NULL;
2023 IOReturn result = copyObjectForPortNameInTask(task, port_name, &objRaw);
2024 obj.reset(objRaw, OSNoRetain);
2025 return result;
2026 }
2027
2028 IOReturn
adjustPortNameReferencesInTask(task_t task,mach_port_name_t port_name,mach_port_delta_t delta)2029 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
2030 {
2031 return iokit_mod_send_right(task, port_name, delta);
2032 }
2033
2034 IOExternalMethod *
getExternalMethodForIndex(UInt32)2035 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
2036 {
2037 return NULL;
2038 }
2039
2040 IOExternalAsyncMethod *
getExternalAsyncMethodForIndex(UInt32)2041 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
2042 {
2043 return NULL;
2044 }
2045
2046 IOExternalTrap *
2047 IOUserClient::
getExternalTrapForIndex(UInt32 index)2048 getExternalTrapForIndex(UInt32 index)
2049 {
2050 return NULL;
2051 }
2052
2053 #pragma clang diagnostic push
2054 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2055
2056 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
2057 // functions can break clients of kexts implementing getExternalMethodForIndex()
2058 IOExternalMethod *
2059 IOUserClient::
getTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2060 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
2061 {
2062 IOExternalMethod *method = getExternalMethodForIndex(index);
2063
2064 if (method) {
2065 *targetP = (IOService *) method->object;
2066 }
2067
2068 return method;
2069 }
2070
2071 IOExternalMethod *
2072 IOUserClient::
getTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2073 getTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2074 {
2075 IOService* targetPRaw = NULL;
2076 IOExternalMethod* result = getTargetAndMethodForIndex(&targetPRaw, index);
2077 targetP.reset(targetPRaw, OSRetain);
2078 return result;
2079 }
2080
2081 IOExternalAsyncMethod *
2082 IOUserClient::
getAsyncTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2083 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
2084 {
2085 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
2086
2087 if (method) {
2088 *targetP = (IOService *) method->object;
2089 }
2090
2091 return method;
2092 }
2093
2094 IOExternalAsyncMethod *
2095 IOUserClient::
getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2096 getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2097 {
2098 IOService* targetPRaw = NULL;
2099 IOExternalAsyncMethod* result = getAsyncTargetAndMethodForIndex(&targetPRaw, index);
2100 targetP.reset(targetPRaw, OSRetain);
2101 return result;
2102 }
2103
2104 IOExternalTrap *
2105 IOUserClient::
getTargetAndTrapForIndex(IOService ** targetP,UInt32 index)2106 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
2107 {
2108 IOExternalTrap *trap = getExternalTrapForIndex(index);
2109
2110 if (trap) {
2111 *targetP = trap->object;
2112 }
2113
2114 return trap;
2115 }
2116 #pragma clang diagnostic pop
2117
2118 IOReturn
releaseAsyncReference64(OSAsyncReference64 reference)2119 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
2120 {
2121 mach_port_t port;
2122 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2123
2124 if (MACH_PORT_NULL != port) {
2125 iokit_release_port_send(port);
2126 }
2127
2128 return kIOReturnSuccess;
2129 }
2130
2131 IOReturn
releaseNotificationPort(mach_port_t port)2132 IOUserClient::releaseNotificationPort(mach_port_t port)
2133 {
2134 if (MACH_PORT_NULL != port) {
2135 iokit_release_port_send(port);
2136 }
2137
2138 return kIOReturnSuccess;
2139 }
2140
2141 IOReturn
sendAsyncResult(OSAsyncReference reference,IOReturn result,void * args[],UInt32 numArgs)2142 IOUserClient::sendAsyncResult(OSAsyncReference reference,
2143 IOReturn result, void *args[], UInt32 numArgs)
2144 {
2145 OSAsyncReference64 reference64;
2146 OSBoundedArray<io_user_reference_t, kMaxAsyncArgs> args64;
2147 unsigned int idx;
2148
2149 if (numArgs > kMaxAsyncArgs) {
2150 return kIOReturnMessageTooLarge;
2151 }
2152
2153 for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2154 reference64[idx] = REF64(reference[idx]);
2155 }
2156
2157 for (idx = 0; idx < numArgs; idx++) {
2158 args64[idx] = REF64(args[idx]);
2159 }
2160
2161 return sendAsyncResult64(reference64, result, args64.data(), numArgs);
2162 }
2163
2164 IOReturn
sendAsyncResult64WithOptions(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2165 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2166 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2167 {
2168 return _sendAsyncResult64(reference, result, args, numArgs, options);
2169 }
2170
2171 IOReturn
sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs)2172 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2173 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2174 {
2175 return _sendAsyncResult64(reference, result, args, numArgs, 0);
2176 }
2177
2178 IOReturn
_sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2179 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2180 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2181 {
2182 struct ReplyMsg {
2183 mach_msg_header_t msgHdr;
2184 union{
2185 struct{
2186 OSNotificationHeader notifyHdr;
2187 IOAsyncCompletionContent asyncContent;
2188 uint32_t args[kMaxAsyncArgs];
2189 } msg32;
2190 struct{
2191 OSNotificationHeader64 notifyHdr;
2192 IOAsyncCompletionContent asyncContent;
2193 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
2194 } msg64;
2195 } m;
2196 };
2197 ReplyMsg replyMsg;
2198 mach_port_t replyPort;
2199 kern_return_t kr;
2200
2201 // If no reply port, do nothing.
2202 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2203 if (replyPort == MACH_PORT_NULL) {
2204 return kIOReturnSuccess;
2205 }
2206
2207 if (numArgs > kMaxAsyncArgs) {
2208 return kIOReturnMessageTooLarge;
2209 }
2210
2211 bzero(&replyMsg, sizeof(replyMsg));
2212 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2213 0 /*local*/);
2214 replyMsg.msgHdr.msgh_remote_port = replyPort;
2215 replyMsg.msgHdr.msgh_local_port = NULL;
2216 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
2217 if (kIOUCAsync64Flag & reference[0]) {
2218 replyMsg.msgHdr.msgh_size =
2219 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2220 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2221 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2222 + numArgs * sizeof(io_user_reference_t);
2223 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2224 /* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2225 bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2226
2227 replyMsg.m.msg64.asyncContent.result = result;
2228 if (numArgs) {
2229 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2230 }
2231 } else {
2232 unsigned int idx;
2233
2234 replyMsg.msgHdr.msgh_size =
2235 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2236 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2237
2238 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2239 + numArgs * sizeof(uint32_t);
2240 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2241
2242 /* Skip reference[0] which is left as 0 from the earlier bzero */
2243 for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2244 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2245 }
2246
2247 replyMsg.m.msg32.asyncContent.result = result;
2248
2249 for (idx = 0; idx < numArgs; idx++) {
2250 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2251 }
2252 }
2253
2254 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2255 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2256 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2257 } else {
2258 /* Fail on full queue. */
2259 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
2260 replyMsg.msgHdr.msgh_size);
2261 }
2262 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2263 reference[0] |= kIOUCAsyncErrorLoggedFlag;
2264 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
2265 }
2266 return kr;
2267 }
2268
2269
2270 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2271
2272 extern "C" {
2273 #define CHECK(cls, obj, out) \
2274 cls * out; \
2275 if( !(out = OSDynamicCast( cls, obj))) \
2276 return( kIOReturnBadArgument )
2277
2278 #define CHECKLOCKED(cls, obj, out) \
2279 IOUserIterator * oIter; \
2280 cls * out; \
2281 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
2282 return (kIOReturnBadArgument); \
2283 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
2284 return (kIOReturnBadArgument)
2285
2286 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2287
2288 // Create a vm_map_copy_t or kalloc'ed data for memory
2289 // to be copied out. ipc will free after the copyout.
2290
2291 static kern_return_t
copyoutkdata(const void * data,vm_size_t len,io_buf_ptr_t * buf)2292 copyoutkdata( const void * data, vm_size_t len,
2293 io_buf_ptr_t * buf )
2294 {
2295 kern_return_t err;
2296 vm_map_copy_t copy;
2297
2298 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2299 false /* src_destroy */, ©);
2300
2301 assert( err == KERN_SUCCESS );
2302 if (err == KERN_SUCCESS) {
2303 *buf = (char *) copy;
2304 }
2305
2306 return err;
2307 }
2308
2309 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2310
2311 /* Routine io_server_version */
2312 kern_return_t
is_io_server_version(mach_port_t main_port,uint64_t * version)2313 is_io_server_version(
2314 mach_port_t main_port,
2315 uint64_t *version)
2316 {
2317 *version = IOKIT_SERVER_VERSION;
2318 return kIOReturnSuccess;
2319 }
2320
2321 /* Routine io_object_get_class */
2322 kern_return_t
is_io_object_get_class(io_object_t object,io_name_t className)2323 is_io_object_get_class(
2324 io_object_t object,
2325 io_name_t className )
2326 {
2327 const OSMetaClass* my_obj = NULL;
2328
2329 if (!object) {
2330 return kIOReturnBadArgument;
2331 }
2332
2333 my_obj = object->getMetaClass();
2334 if (!my_obj) {
2335 return kIOReturnNotFound;
2336 }
2337
2338 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2339
2340 return kIOReturnSuccess;
2341 }
2342
2343 /* Routine io_object_get_superclass */
2344 kern_return_t
is_io_object_get_superclass(mach_port_t main_port,io_name_t obj_name,io_name_t class_name)2345 is_io_object_get_superclass(
2346 mach_port_t main_port,
2347 io_name_t obj_name,
2348 io_name_t class_name)
2349 {
2350 IOReturn ret;
2351 const OSMetaClass * meta;
2352 const OSMetaClass * super;
2353 const OSSymbol * name;
2354 const char * cstr;
2355
2356 if (!obj_name || !class_name) {
2357 return kIOReturnBadArgument;
2358 }
2359 if (main_port != main_device_port) {
2360 return kIOReturnNotPrivileged;
2361 }
2362
2363 ret = kIOReturnNotFound;
2364 meta = NULL;
2365 do{
2366 name = OSSymbol::withCString(obj_name);
2367 if (!name) {
2368 break;
2369 }
2370 meta = OSMetaClass::copyMetaClassWithName(name);
2371 if (!meta) {
2372 break;
2373 }
2374 super = meta->getSuperClass();
2375 if (!super) {
2376 break;
2377 }
2378 cstr = super->getClassName();
2379 if (!cstr) {
2380 break;
2381 }
2382 strlcpy(class_name, cstr, sizeof(io_name_t));
2383 ret = kIOReturnSuccess;
2384 }while (false);
2385
2386 OSSafeReleaseNULL(name);
2387 if (meta) {
2388 meta->releaseMetaClass();
2389 }
2390
2391 return ret;
2392 }
2393
2394 /* Routine io_object_get_bundle_identifier */
2395 kern_return_t
is_io_object_get_bundle_identifier(mach_port_t main_port,io_name_t obj_name,io_name_t bundle_name)2396 is_io_object_get_bundle_identifier(
2397 mach_port_t main_port,
2398 io_name_t obj_name,
2399 io_name_t bundle_name)
2400 {
2401 IOReturn ret;
2402 const OSMetaClass * meta;
2403 const OSSymbol * name;
2404 const OSSymbol * identifier;
2405 const char * cstr;
2406
2407 if (!obj_name || !bundle_name) {
2408 return kIOReturnBadArgument;
2409 }
2410 if (main_port != main_device_port) {
2411 return kIOReturnNotPrivileged;
2412 }
2413
2414 ret = kIOReturnNotFound;
2415 meta = NULL;
2416 do{
2417 name = OSSymbol::withCString(obj_name);
2418 if (!name) {
2419 break;
2420 }
2421 meta = OSMetaClass::copyMetaClassWithName(name);
2422 if (!meta) {
2423 break;
2424 }
2425 identifier = meta->getKmodName();
2426 if (!identifier) {
2427 break;
2428 }
2429 cstr = identifier->getCStringNoCopy();
2430 if (!cstr) {
2431 break;
2432 }
2433 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2434 ret = kIOReturnSuccess;
2435 }while (false);
2436
2437 OSSafeReleaseNULL(name);
2438 if (meta) {
2439 meta->releaseMetaClass();
2440 }
2441
2442 return ret;
2443 }
2444
2445 /* Routine io_object_conforms_to */
2446 kern_return_t
is_io_object_conforms_to(io_object_t object,io_name_t className,boolean_t * conforms)2447 is_io_object_conforms_to(
2448 io_object_t object,
2449 io_name_t className,
2450 boolean_t *conforms )
2451 {
2452 if (!object) {
2453 return kIOReturnBadArgument;
2454 }
2455
2456 *conforms = (NULL != object->metaCast( className ));
2457
2458 return kIOReturnSuccess;
2459 }
2460
2461 /* Routine io_object_get_retain_count */
2462 kern_return_t
is_io_object_get_retain_count(io_object_t object,uint32_t * retainCount)2463 is_io_object_get_retain_count(
2464 io_object_t object,
2465 uint32_t *retainCount )
2466 {
2467 if (!object) {
2468 return kIOReturnBadArgument;
2469 }
2470
2471 *retainCount = object->getRetainCount();
2472 return kIOReturnSuccess;
2473 }
2474
2475 /* Routine io_iterator_next */
2476 kern_return_t
is_io_iterator_next(io_object_t iterator,io_object_t * object)2477 is_io_iterator_next(
2478 io_object_t iterator,
2479 io_object_t *object )
2480 {
2481 IOReturn ret;
2482 OSObject * obj;
2483 OSIterator * iter;
2484 IOUserIterator * uiter;
2485
2486 if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2487 obj = uiter->copyNextObject();
2488 } else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2489 obj = iter->getNextObject();
2490 if (obj) {
2491 obj->retain();
2492 }
2493 } else {
2494 return kIOReturnBadArgument;
2495 }
2496
2497 if (obj) {
2498 *object = obj;
2499 ret = kIOReturnSuccess;
2500 } else {
2501 ret = kIOReturnNoDevice;
2502 }
2503
2504 return ret;
2505 }
2506
2507 /* Routine io_iterator_reset */
2508 kern_return_t
is_io_iterator_reset(io_object_t iterator)2509 is_io_iterator_reset(
2510 io_object_t iterator )
2511 {
2512 CHECK( OSIterator, iterator, iter );
2513
2514 iter->reset();
2515
2516 return kIOReturnSuccess;
2517 }
2518
2519 /* Routine io_iterator_is_valid */
2520 kern_return_t
is_io_iterator_is_valid(io_object_t iterator,boolean_t * is_valid)2521 is_io_iterator_is_valid(
2522 io_object_t iterator,
2523 boolean_t *is_valid )
2524 {
2525 CHECK( OSIterator, iterator, iter );
2526
2527 *is_valid = iter->isValid();
2528
2529 return kIOReturnSuccess;
2530 }
2531
2532 static kern_return_t
internal_io_service_match_property_table(io_service_t _service,const char * matching,mach_msg_type_number_t matching_size,boolean_t * matches)2533 internal_io_service_match_property_table(
2534 io_service_t _service,
2535 const char * matching,
2536 mach_msg_type_number_t matching_size,
2537 boolean_t *matches)
2538 {
2539 CHECK( IOService, _service, service );
2540
2541 kern_return_t kr;
2542 OSObject * obj;
2543 OSDictionary * dict;
2544
2545 assert(matching_size);
2546
2547
2548 obj = OSUnserializeXML(matching, matching_size);
2549
2550 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2551 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2552 *matches = service->passiveMatch( dict );
2553 kr = kIOReturnSuccess;
2554 } else {
2555 kr = kIOReturnBadArgument;
2556 }
2557
2558 if (obj) {
2559 obj->release();
2560 }
2561
2562 return kr;
2563 }
2564
2565 /* Routine io_service_match_property_table */
2566 kern_return_t
is_io_service_match_property_table(io_service_t service,io_string_t matching,boolean_t * matches)2567 is_io_service_match_property_table(
2568 io_service_t service,
2569 io_string_t matching,
2570 boolean_t *matches )
2571 {
2572 return kIOReturnUnsupported;
2573 }
2574
2575
2576 /* Routine io_service_match_property_table_ool */
2577 kern_return_t
is_io_service_match_property_table_ool(io_object_t service,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,boolean_t * matches)2578 is_io_service_match_property_table_ool(
2579 io_object_t service,
2580 io_buf_ptr_t matching,
2581 mach_msg_type_number_t matchingCnt,
2582 kern_return_t *result,
2583 boolean_t *matches )
2584 {
2585 kern_return_t kr;
2586 vm_offset_t data;
2587 vm_map_offset_t map_data;
2588
2589 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2590 data = CAST_DOWN(vm_offset_t, map_data);
2591
2592 if (KERN_SUCCESS == kr) {
2593 // must return success after vm_map_copyout() succeeds
2594 *result = internal_io_service_match_property_table(service,
2595 (const char *)data, matchingCnt, matches );
2596 vm_deallocate( kernel_map, data, matchingCnt );
2597 }
2598
2599 return kr;
2600 }
2601
2602 /* Routine io_service_match_property_table_bin */
2603 kern_return_t
is_io_service_match_property_table_bin(io_object_t service,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,boolean_t * matches)2604 is_io_service_match_property_table_bin(
2605 io_object_t service,
2606 io_struct_inband_t matching,
2607 mach_msg_type_number_t matchingCnt,
2608 boolean_t *matches)
2609 {
2610 return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2611 }
2612
2613 static kern_return_t
internal_io_service_get_matching_services(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_iterator_t * existing)2614 internal_io_service_get_matching_services(
2615 mach_port_t main_port,
2616 const char * matching,
2617 mach_msg_type_number_t matching_size,
2618 io_iterator_t *existing )
2619 {
2620 kern_return_t kr;
2621 OSObject * obj;
2622 OSDictionary * dict;
2623
2624 if (main_port != main_device_port) {
2625 return kIOReturnNotPrivileged;
2626 }
2627
2628 assert(matching_size);
2629 obj = OSUnserializeXML(matching, matching_size);
2630
2631 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2632 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2633 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2634 kr = kIOReturnSuccess;
2635 } else {
2636 kr = kIOReturnBadArgument;
2637 }
2638
2639 if (obj) {
2640 obj->release();
2641 }
2642
2643 return kr;
2644 }
2645
2646 /* Routine io_service_get_matching_services */
2647 kern_return_t
is_io_service_get_matching_services(mach_port_t main_port,io_string_t matching,io_iterator_t * existing)2648 is_io_service_get_matching_services(
2649 mach_port_t main_port,
2650 io_string_t matching,
2651 io_iterator_t *existing )
2652 {
2653 return kIOReturnUnsupported;
2654 }
2655
2656 /* Routine io_service_get_matching_services_ool */
2657 kern_return_t
is_io_service_get_matching_services_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * existing)2658 is_io_service_get_matching_services_ool(
2659 mach_port_t main_port,
2660 io_buf_ptr_t matching,
2661 mach_msg_type_number_t matchingCnt,
2662 kern_return_t *result,
2663 io_object_t *existing )
2664 {
2665 kern_return_t kr;
2666 vm_offset_t data;
2667 vm_map_offset_t map_data;
2668
2669 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2670 data = CAST_DOWN(vm_offset_t, map_data);
2671
2672 if (KERN_SUCCESS == kr) {
2673 // must return success after vm_map_copyout() succeeds
2674 // and mig will copy out objects on success
2675 *existing = NULL;
2676 *result = internal_io_service_get_matching_services(main_port,
2677 (const char *) data, matchingCnt, existing);
2678 vm_deallocate( kernel_map, data, matchingCnt );
2679 }
2680
2681 return kr;
2682 }
2683
2684 /* Routine io_service_get_matching_services_bin */
2685 kern_return_t
is_io_service_get_matching_services_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * existing)2686 is_io_service_get_matching_services_bin(
2687 mach_port_t main_port,
2688 io_struct_inband_t matching,
2689 mach_msg_type_number_t matchingCnt,
2690 io_object_t *existing)
2691 {
2692 return internal_io_service_get_matching_services(main_port, matching, matchingCnt, existing);
2693 }
2694
2695
2696 static kern_return_t
internal_io_service_get_matching_service(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_service_t * service)2697 internal_io_service_get_matching_service(
2698 mach_port_t main_port,
2699 const char * matching,
2700 mach_msg_type_number_t matching_size,
2701 io_service_t *service )
2702 {
2703 kern_return_t kr;
2704 OSObject * obj;
2705 OSDictionary * dict;
2706
2707 if (main_port != main_device_port) {
2708 return kIOReturnNotPrivileged;
2709 }
2710
2711 assert(matching_size);
2712 obj = OSUnserializeXML(matching, matching_size);
2713
2714 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2715 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2716 *service = IOService::copyMatchingService( dict );
2717 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2718 } else {
2719 kr = kIOReturnBadArgument;
2720 }
2721
2722 if (obj) {
2723 obj->release();
2724 }
2725
2726 return kr;
2727 }
2728
2729 /* Routine io_service_get_matching_service */
2730 kern_return_t
is_io_service_get_matching_service(mach_port_t main_port,io_string_t matching,io_service_t * service)2731 is_io_service_get_matching_service(
2732 mach_port_t main_port,
2733 io_string_t matching,
2734 io_service_t *service )
2735 {
2736 return kIOReturnUnsupported;
2737 }
2738
2739 /* Routine io_service_get_matching_services_ool */
2740 kern_return_t
is_io_service_get_matching_service_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * service)2741 is_io_service_get_matching_service_ool(
2742 mach_port_t main_port,
2743 io_buf_ptr_t matching,
2744 mach_msg_type_number_t matchingCnt,
2745 kern_return_t *result,
2746 io_object_t *service )
2747 {
2748 kern_return_t kr;
2749 vm_offset_t data;
2750 vm_map_offset_t map_data;
2751
2752 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2753 data = CAST_DOWN(vm_offset_t, map_data);
2754
2755 if (KERN_SUCCESS == kr) {
2756 // must return success after vm_map_copyout() succeeds
2757 // and mig will copy out objects on success
2758 *service = NULL;
2759 *result = internal_io_service_get_matching_service(main_port,
2760 (const char *) data, matchingCnt, service );
2761 vm_deallocate( kernel_map, data, matchingCnt );
2762 }
2763
2764 return kr;
2765 }
2766
2767 /* Routine io_service_get_matching_service_bin */
2768 kern_return_t
is_io_service_get_matching_service_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * service)2769 is_io_service_get_matching_service_bin(
2770 mach_port_t main_port,
2771 io_struct_inband_t matching,
2772 mach_msg_type_number_t matchingCnt,
2773 io_object_t *service)
2774 {
2775 return internal_io_service_get_matching_service(main_port, matching, matchingCnt, service);
2776 }
2777
2778 static kern_return_t
internal_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,const char * matching,size_t matching_size,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)2779 internal_io_service_add_notification(
2780 mach_port_t main_port,
2781 io_name_t notification_type,
2782 const char * matching,
2783 size_t matching_size,
2784 mach_port_t port,
2785 void * reference,
2786 vm_size_t referenceSize,
2787 bool client64,
2788 io_object_t * notification )
2789 {
2790 IOServiceUserNotification * userNotify = NULL;
2791 IONotifier * notify = NULL;
2792 const OSSymbol * sym;
2793 OSObject * obj;
2794 OSDictionary * dict;
2795 IOReturn err;
2796 natural_t userMsgType;
2797
2798 if (main_port != main_device_port) {
2799 return kIOReturnNotPrivileged;
2800 }
2801
2802 do {
2803 err = kIOReturnNoResources;
2804
2805 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2806 return kIOReturnMessageTooLarge;
2807 }
2808
2809 if (!(sym = OSSymbol::withCString( notification_type ))) {
2810 err = kIOReturnNoResources;
2811 }
2812
2813 assert(matching_size);
2814 obj = OSUnserializeXML(matching, matching_size);
2815 dict = OSDynamicCast(OSDictionary, obj);
2816 if (!dict) {
2817 err = kIOReturnBadArgument;
2818 continue;
2819 }
2820 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2821
2822 if ((sym == gIOPublishNotification)
2823 || (sym == gIOFirstPublishNotification)) {
2824 userMsgType = kIOServicePublishNotificationType;
2825 } else if ((sym == gIOMatchedNotification)
2826 || (sym == gIOFirstMatchNotification)) {
2827 userMsgType = kIOServiceMatchedNotificationType;
2828 } else if ((sym == gIOTerminatedNotification)
2829 || (sym == gIOWillTerminateNotification)) {
2830 userMsgType = kIOServiceTerminatedNotificationType;
2831 } else {
2832 userMsgType = kLastIOKitNotificationType;
2833 }
2834
2835 userNotify = new IOServiceUserNotification;
2836
2837 if (userNotify && !userNotify->init( port, userMsgType,
2838 reference, referenceSize, client64)) {
2839 userNotify->release();
2840 userNotify = NULL;
2841 }
2842 if (!userNotify) {
2843 continue;
2844 }
2845
2846 notify = IOService::addMatchingNotification( sym, dict,
2847 &userNotify->_handler, userNotify );
2848 if (notify) {
2849 *notification = userNotify;
2850 userNotify->setNotification( notify );
2851 err = kIOReturnSuccess;
2852 } else {
2853 err = kIOReturnUnsupported;
2854 }
2855 } while (false);
2856
2857 if ((kIOReturnSuccess != err) && userNotify) {
2858 userNotify->setNotification(NULL);
2859 userNotify->invalidatePort();
2860 userNotify->release();
2861 userNotify = NULL;
2862 }
2863
2864 if (sym) {
2865 sym->release();
2866 }
2867 if (obj) {
2868 obj->release();
2869 }
2870
2871 return err;
2872 }
2873
2874
2875 /* Routine io_service_add_notification */
2876 kern_return_t
is_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2877 is_io_service_add_notification(
2878 mach_port_t main_port,
2879 io_name_t notification_type,
2880 io_string_t matching,
2881 mach_port_t port,
2882 io_async_ref_t reference,
2883 mach_msg_type_number_t referenceCnt,
2884 io_object_t * notification )
2885 {
2886 return kIOReturnUnsupported;
2887 }
2888
2889 /* Routine io_service_add_notification_64 */
2890 kern_return_t
is_io_service_add_notification_64(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2891 is_io_service_add_notification_64(
2892 mach_port_t main_port,
2893 io_name_t notification_type,
2894 io_string_t matching,
2895 mach_port_t wake_port,
2896 io_async_ref64_t reference,
2897 mach_msg_type_number_t referenceCnt,
2898 io_object_t *notification )
2899 {
2900 return kIOReturnUnsupported;
2901 }
2902
2903 /* Routine io_service_add_notification_bin */
2904 kern_return_t
is_io_service_add_notification_bin(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2905 is_io_service_add_notification_bin
2906 (
2907 mach_port_t main_port,
2908 io_name_t notification_type,
2909 io_struct_inband_t matching,
2910 mach_msg_type_number_t matchingCnt,
2911 mach_port_t wake_port,
2912 io_async_ref_t reference,
2913 mach_msg_type_number_t referenceCnt,
2914 io_object_t *notification)
2915 {
2916 io_async_ref_t zreference;
2917
2918 if (referenceCnt > ASYNC_REF_COUNT) {
2919 return kIOReturnBadArgument;
2920 }
2921 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2922 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2923
2924 return internal_io_service_add_notification(main_port, notification_type,
2925 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2926 false, notification);
2927 }
2928
2929 /* Routine io_service_add_notification_bin_64 */
2930 kern_return_t
is_io_service_add_notification_bin_64(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2931 is_io_service_add_notification_bin_64
2932 (
2933 mach_port_t main_port,
2934 io_name_t notification_type,
2935 io_struct_inband_t matching,
2936 mach_msg_type_number_t matchingCnt,
2937 mach_port_t wake_port,
2938 io_async_ref64_t reference,
2939 mach_msg_type_number_t referenceCnt,
2940 io_object_t *notification)
2941 {
2942 io_async_ref64_t zreference;
2943
2944 if (referenceCnt > ASYNC_REF64_COUNT) {
2945 return kIOReturnBadArgument;
2946 }
2947 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2948 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2949
2950 return internal_io_service_add_notification(main_port, notification_type,
2951 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2952 true, notification);
2953 }
2954
2955 static kern_return_t
internal_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,void * reference,vm_size_t referenceSize,bool client64,kern_return_t * result,io_object_t * notification)2956 internal_io_service_add_notification_ool(
2957 mach_port_t main_port,
2958 io_name_t notification_type,
2959 io_buf_ptr_t matching,
2960 mach_msg_type_number_t matchingCnt,
2961 mach_port_t wake_port,
2962 void * reference,
2963 vm_size_t referenceSize,
2964 bool client64,
2965 kern_return_t *result,
2966 io_object_t *notification )
2967 {
2968 kern_return_t kr;
2969 vm_offset_t data;
2970 vm_map_offset_t map_data;
2971
2972 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2973 data = CAST_DOWN(vm_offset_t, map_data);
2974
2975 if (KERN_SUCCESS == kr) {
2976 // must return success after vm_map_copyout() succeeds
2977 // and mig will copy out objects on success
2978 *notification = NULL;
2979 *result = internal_io_service_add_notification( main_port, notification_type,
2980 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2981 vm_deallocate( kernel_map, data, matchingCnt );
2982 }
2983
2984 return kr;
2985 }
2986
2987 /* Routine io_service_add_notification_ool */
2988 kern_return_t
is_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)2989 is_io_service_add_notification_ool(
2990 mach_port_t main_port,
2991 io_name_t notification_type,
2992 io_buf_ptr_t matching,
2993 mach_msg_type_number_t matchingCnt,
2994 mach_port_t wake_port,
2995 io_async_ref_t reference,
2996 mach_msg_type_number_t referenceCnt,
2997 kern_return_t *result,
2998 io_object_t *notification )
2999 {
3000 io_async_ref_t zreference;
3001
3002 if (referenceCnt > ASYNC_REF_COUNT) {
3003 return kIOReturnBadArgument;
3004 }
3005 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3006 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3007
3008 return internal_io_service_add_notification_ool(main_port, notification_type,
3009 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3010 false, result, notification);
3011 }
3012
3013 /* Routine io_service_add_notification_ool_64 */
3014 kern_return_t
is_io_service_add_notification_ool_64(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3015 is_io_service_add_notification_ool_64(
3016 mach_port_t main_port,
3017 io_name_t notification_type,
3018 io_buf_ptr_t matching,
3019 mach_msg_type_number_t matchingCnt,
3020 mach_port_t wake_port,
3021 io_async_ref64_t reference,
3022 mach_msg_type_number_t referenceCnt,
3023 kern_return_t *result,
3024 io_object_t *notification )
3025 {
3026 io_async_ref64_t zreference;
3027
3028 if (referenceCnt > ASYNC_REF64_COUNT) {
3029 return kIOReturnBadArgument;
3030 }
3031 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3032 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3033
3034 return internal_io_service_add_notification_ool(main_port, notification_type,
3035 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3036 true, result, notification);
3037 }
3038
3039 /* Routine io_service_add_notification_old */
3040 kern_return_t
is_io_service_add_notification_old(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,natural_t ref,io_object_t * notification)3041 is_io_service_add_notification_old(
3042 mach_port_t main_port,
3043 io_name_t notification_type,
3044 io_string_t matching,
3045 mach_port_t port,
3046 // for binary compatibility reasons, this must be natural_t for ILP32
3047 natural_t ref,
3048 io_object_t * notification )
3049 {
3050 return is_io_service_add_notification( main_port, notification_type,
3051 matching, port, &ref, 1, notification );
3052 }
3053
3054
3055 static kern_return_t
internal_io_service_add_interest_notification(io_object_t _service,io_name_t type_of_interest,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)3056 internal_io_service_add_interest_notification(
3057 io_object_t _service,
3058 io_name_t type_of_interest,
3059 mach_port_t port,
3060 void * reference,
3061 vm_size_t referenceSize,
3062 bool client64,
3063 io_object_t * notification )
3064 {
3065 IOServiceMessageUserNotification * userNotify = NULL;
3066 IONotifier * notify = NULL;
3067 const OSSymbol * sym;
3068 IOReturn err;
3069
3070 CHECK( IOService, _service, service );
3071
3072 err = kIOReturnNoResources;
3073 if ((sym = OSSymbol::withCString( type_of_interest ))) {
3074 do {
3075 userNotify = new IOServiceMessageUserNotification;
3076
3077 if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
3078 reference, referenceSize, client64 )) {
3079 userNotify->release();
3080 userNotify = NULL;
3081 }
3082 if (!userNotify) {
3083 continue;
3084 }
3085
3086 notify = service->registerInterest( sym,
3087 &userNotify->_handler, userNotify );
3088 if (notify) {
3089 *notification = userNotify;
3090 userNotify->setNotification( notify );
3091 err = kIOReturnSuccess;
3092 } else {
3093 err = kIOReturnUnsupported;
3094 }
3095 } while (false);
3096
3097 sym->release();
3098 }
3099
3100 if ((kIOReturnSuccess != err) && userNotify) {
3101 userNotify->setNotification(NULL);
3102 userNotify->invalidatePort();
3103 userNotify->release();
3104 userNotify = NULL;
3105 }
3106
3107 return err;
3108 }
3109
3110 /* Routine io_service_add_message_notification */
3111 kern_return_t
is_io_service_add_interest_notification(io_object_t service,io_name_t type_of_interest,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3112 is_io_service_add_interest_notification(
3113 io_object_t service,
3114 io_name_t type_of_interest,
3115 mach_port_t port,
3116 io_async_ref_t reference,
3117 mach_msg_type_number_t referenceCnt,
3118 io_object_t * notification )
3119 {
3120 io_async_ref_t zreference;
3121
3122 if (referenceCnt > ASYNC_REF_COUNT) {
3123 return kIOReturnBadArgument;
3124 }
3125 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3126 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3127
3128 return internal_io_service_add_interest_notification(service, type_of_interest,
3129 port, &zreference[0], sizeof(io_async_ref_t), false, notification);
3130 }
3131
3132 /* Routine io_service_add_interest_notification_64 */
3133 kern_return_t
is_io_service_add_interest_notification_64(io_object_t service,io_name_t type_of_interest,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3134 is_io_service_add_interest_notification_64(
3135 io_object_t service,
3136 io_name_t type_of_interest,
3137 mach_port_t wake_port,
3138 io_async_ref64_t reference,
3139 mach_msg_type_number_t referenceCnt,
3140 io_object_t *notification )
3141 {
3142 io_async_ref64_t zreference;
3143
3144 if (referenceCnt > ASYNC_REF64_COUNT) {
3145 return kIOReturnBadArgument;
3146 }
3147 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3148 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3149
3150 return internal_io_service_add_interest_notification(service, type_of_interest,
3151 wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
3152 }
3153
3154
3155 /* Routine io_service_acknowledge_notification */
3156 kern_return_t
is_io_service_acknowledge_notification(io_object_t _service,natural_t notify_ref,natural_t response)3157 is_io_service_acknowledge_notification(
3158 io_object_t _service,
3159 natural_t notify_ref,
3160 natural_t response )
3161 {
3162 CHECK( IOService, _service, service );
3163
3164 return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3165 (IOOptionBits) response );
3166 }
3167
3168 /* Routine io_connect_get_semaphore */
3169 kern_return_t
is_io_connect_get_notification_semaphore(io_connect_t connection,natural_t notification_type,semaphore_t * semaphore)3170 is_io_connect_get_notification_semaphore(
3171 io_connect_t connection,
3172 natural_t notification_type,
3173 semaphore_t *semaphore )
3174 {
3175 IOReturn ret;
3176 CHECK( IOUserClient, connection, client );
3177
3178 IOStatisticsClientCall();
3179 IORWLockWrite(client->lock);
3180 ret = client->getNotificationSemaphore((UInt32) notification_type,
3181 semaphore );
3182 IORWLockUnlock(client->lock);
3183
3184 return ret;
3185 }
3186
3187 /* Routine io_registry_get_root_entry */
3188 kern_return_t
is_io_registry_get_root_entry(mach_port_t main_port,io_object_t * root)3189 is_io_registry_get_root_entry(
3190 mach_port_t main_port,
3191 io_object_t *root )
3192 {
3193 IORegistryEntry * entry;
3194
3195 if (main_port != main_device_port) {
3196 return kIOReturnNotPrivileged;
3197 }
3198
3199 entry = IORegistryEntry::getRegistryRoot();
3200 if (entry) {
3201 entry->retain();
3202 }
3203 *root = entry;
3204
3205 return kIOReturnSuccess;
3206 }
3207
3208 /* Routine io_registry_create_iterator */
3209 kern_return_t
is_io_registry_create_iterator(mach_port_t main_port,io_name_t plane,uint32_t options,io_object_t * iterator)3210 is_io_registry_create_iterator(
3211 mach_port_t main_port,
3212 io_name_t plane,
3213 uint32_t options,
3214 io_object_t *iterator )
3215 {
3216 if (main_port != main_device_port) {
3217 return kIOReturnNotPrivileged;
3218 }
3219
3220 *iterator = IOUserIterator::withIterator(
3221 IORegistryIterator::iterateOver(
3222 IORegistryEntry::getPlane( plane ), options ));
3223
3224 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3225 }
3226
3227 /* Routine io_registry_entry_create_iterator */
3228 kern_return_t
is_io_registry_entry_create_iterator(io_object_t registry_entry,io_name_t plane,uint32_t options,io_object_t * iterator)3229 is_io_registry_entry_create_iterator(
3230 io_object_t registry_entry,
3231 io_name_t plane,
3232 uint32_t options,
3233 io_object_t *iterator )
3234 {
3235 CHECK( IORegistryEntry, registry_entry, entry );
3236
3237 *iterator = IOUserIterator::withIterator(
3238 IORegistryIterator::iterateOver( entry,
3239 IORegistryEntry::getPlane( plane ), options ));
3240
3241 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3242 }
3243
3244 /* Routine io_registry_iterator_enter */
3245 kern_return_t
is_io_registry_iterator_enter_entry(io_object_t iterator)3246 is_io_registry_iterator_enter_entry(
3247 io_object_t iterator )
3248 {
3249 CHECKLOCKED( IORegistryIterator, iterator, iter );
3250
3251 IOLockLock(oIter->lock);
3252 iter->enterEntry();
3253 IOLockUnlock(oIter->lock);
3254
3255 return kIOReturnSuccess;
3256 }
3257
3258 /* Routine io_registry_iterator_exit */
3259 kern_return_t
is_io_registry_iterator_exit_entry(io_object_t iterator)3260 is_io_registry_iterator_exit_entry(
3261 io_object_t iterator )
3262 {
3263 bool didIt;
3264
3265 CHECKLOCKED( IORegistryIterator, iterator, iter );
3266
3267 IOLockLock(oIter->lock);
3268 didIt = iter->exitEntry();
3269 IOLockUnlock(oIter->lock);
3270
3271 return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3272 }
3273
3274 /* Routine io_registry_entry_from_path */
3275 kern_return_t
is_io_registry_entry_from_path(mach_port_t main_port,io_string_t path,io_object_t * registry_entry)3276 is_io_registry_entry_from_path(
3277 mach_port_t main_port,
3278 io_string_t path,
3279 io_object_t *registry_entry )
3280 {
3281 IORegistryEntry * entry;
3282
3283 if (main_port != main_device_port) {
3284 return kIOReturnNotPrivileged;
3285 }
3286
3287 entry = IORegistryEntry::fromPath( path );
3288
3289 if (!entry && IOTaskRegistryCompatibility(current_task())) {
3290 OSDictionary * matching;
3291 const OSObject * objects[2] = { kOSBooleanTrue, NULL };
3292 const OSSymbol * keys[2] = { gIOCompatibilityMatchKey, gIOPathMatchKey };
3293
3294 objects[1] = OSString::withCStringNoCopy(path);
3295 matching = OSDictionary::withObjects(objects, keys, 2, 2);
3296 if (matching) {
3297 entry = IOService::copyMatchingService(matching);
3298 }
3299 OSSafeReleaseNULL(matching);
3300 OSSafeReleaseNULL(objects[1]);
3301 }
3302
3303 *registry_entry = entry;
3304
3305 return kIOReturnSuccess;
3306 }
3307
3308
3309 /* Routine io_registry_entry_from_path */
3310 kern_return_t
is_io_registry_entry_from_path_ool(mach_port_t main_port,io_string_inband_t path,io_buf_ptr_t path_ool,mach_msg_type_number_t path_oolCnt,kern_return_t * result,io_object_t * registry_entry)3311 is_io_registry_entry_from_path_ool(
3312 mach_port_t main_port,
3313 io_string_inband_t path,
3314 io_buf_ptr_t path_ool,
3315 mach_msg_type_number_t path_oolCnt,
3316 kern_return_t *result,
3317 io_object_t *registry_entry)
3318 {
3319 IORegistryEntry * entry;
3320 vm_map_offset_t map_data;
3321 const char * cpath;
3322 IOReturn res;
3323 kern_return_t err;
3324
3325 if (main_port != main_device_port) {
3326 return kIOReturnNotPrivileged;
3327 }
3328
3329 map_data = 0;
3330 entry = NULL;
3331 res = err = KERN_SUCCESS;
3332 if (path[0]) {
3333 cpath = path;
3334 } else {
3335 if (!path_oolCnt) {
3336 return kIOReturnBadArgument;
3337 }
3338 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3339 return kIOReturnMessageTooLarge;
3340 }
3341
3342 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3343 if (KERN_SUCCESS == err) {
3344 // must return success to mig after vm_map_copyout() succeeds, so result is actual
3345 cpath = CAST_DOWN(const char *, map_data);
3346 if (cpath[path_oolCnt - 1]) {
3347 res = kIOReturnBadArgument;
3348 }
3349 }
3350 }
3351
3352 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3353 entry = IORegistryEntry::fromPath(cpath);
3354 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3355 }
3356
3357 if (map_data) {
3358 vm_deallocate(kernel_map, map_data, path_oolCnt);
3359 }
3360
3361 if (KERN_SUCCESS != err) {
3362 res = err;
3363 }
3364 *registry_entry = entry;
3365 *result = res;
3366
3367 return err;
3368 }
3369
3370
3371 /* Routine io_registry_entry_in_plane */
3372 kern_return_t
is_io_registry_entry_in_plane(io_object_t registry_entry,io_name_t plane,boolean_t * inPlane)3373 is_io_registry_entry_in_plane(
3374 io_object_t registry_entry,
3375 io_name_t plane,
3376 boolean_t *inPlane )
3377 {
3378 CHECK( IORegistryEntry, registry_entry, entry );
3379
3380 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3381
3382 return kIOReturnSuccess;
3383 }
3384
3385
3386 /* Routine io_registry_entry_get_path */
3387 kern_return_t
is_io_registry_entry_get_path(io_object_t registry_entry,io_name_t plane,io_string_t path)3388 is_io_registry_entry_get_path(
3389 io_object_t registry_entry,
3390 io_name_t plane,
3391 io_string_t path )
3392 {
3393 int length;
3394 CHECK( IORegistryEntry, registry_entry, entry );
3395
3396 length = sizeof(io_string_t);
3397 if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3398 return kIOReturnSuccess;
3399 } else {
3400 return kIOReturnBadArgument;
3401 }
3402 }
3403
3404 /* Routine io_registry_entry_get_path */
3405 kern_return_t
is_io_registry_entry_get_path_ool(io_object_t registry_entry,io_name_t plane,io_string_inband_t path,io_buf_ptr_t * path_ool,mach_msg_type_number_t * path_oolCnt)3406 is_io_registry_entry_get_path_ool(
3407 io_object_t registry_entry,
3408 io_name_t plane,
3409 io_string_inband_t path,
3410 io_buf_ptr_t *path_ool,
3411 mach_msg_type_number_t *path_oolCnt)
3412 {
3413 enum { kMaxPath = 16384 };
3414 IOReturn err;
3415 int length;
3416 char * buf;
3417
3418 CHECK( IORegistryEntry, registry_entry, entry );
3419
3420 *path_ool = NULL;
3421 *path_oolCnt = 0;
3422 length = sizeof(io_string_inband_t);
3423 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3424 err = kIOReturnSuccess;
3425 } else {
3426 length = kMaxPath;
3427 buf = IONewData(char, length);
3428 if (!buf) {
3429 err = kIOReturnNoMemory;
3430 } else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3431 err = kIOReturnError;
3432 } else {
3433 *path_oolCnt = length;
3434 err = copyoutkdata(buf, length, path_ool);
3435 }
3436 if (buf) {
3437 IODeleteData(buf, char, kMaxPath);
3438 }
3439 }
3440
3441 return err;
3442 }
3443
3444
3445 /* Routine io_registry_entry_get_name */
3446 kern_return_t
is_io_registry_entry_get_name(io_object_t registry_entry,io_name_t name)3447 is_io_registry_entry_get_name(
3448 io_object_t registry_entry,
3449 io_name_t name )
3450 {
3451 CHECK( IORegistryEntry, registry_entry, entry );
3452
3453 strncpy( name, entry->getName(), sizeof(io_name_t));
3454
3455 return kIOReturnSuccess;
3456 }
3457
3458 /* Routine io_registry_entry_get_name_in_plane */
3459 kern_return_t
is_io_registry_entry_get_name_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t name)3460 is_io_registry_entry_get_name_in_plane(
3461 io_object_t registry_entry,
3462 io_name_t planeName,
3463 io_name_t name )
3464 {
3465 const IORegistryPlane * plane;
3466 CHECK( IORegistryEntry, registry_entry, entry );
3467
3468 if (planeName[0]) {
3469 plane = IORegistryEntry::getPlane( planeName );
3470 } else {
3471 plane = NULL;
3472 }
3473
3474 strncpy( name, entry->getName( plane), sizeof(io_name_t));
3475
3476 return kIOReturnSuccess;
3477 }
3478
3479 /* Routine io_registry_entry_get_location_in_plane */
3480 kern_return_t
is_io_registry_entry_get_location_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t location)3481 is_io_registry_entry_get_location_in_plane(
3482 io_object_t registry_entry,
3483 io_name_t planeName,
3484 io_name_t location )
3485 {
3486 const IORegistryPlane * plane;
3487 CHECK( IORegistryEntry, registry_entry, entry );
3488
3489 if (planeName[0]) {
3490 plane = IORegistryEntry::getPlane( planeName );
3491 } else {
3492 plane = NULL;
3493 }
3494
3495 const char * cstr = entry->getLocation( plane );
3496
3497 if (cstr) {
3498 strncpy( location, cstr, sizeof(io_name_t));
3499 return kIOReturnSuccess;
3500 } else {
3501 return kIOReturnNotFound;
3502 }
3503 }
3504
3505 /* Routine io_registry_entry_get_registry_entry_id */
3506 kern_return_t
is_io_registry_entry_get_registry_entry_id(io_object_t registry_entry,uint64_t * entry_id)3507 is_io_registry_entry_get_registry_entry_id(
3508 io_object_t registry_entry,
3509 uint64_t *entry_id )
3510 {
3511 CHECK( IORegistryEntry, registry_entry, entry );
3512
3513 *entry_id = entry->getRegistryEntryID();
3514
3515 return kIOReturnSuccess;
3516 }
3517
3518
3519 static OSObject *
IOCopyPropertyCompatible(IORegistryEntry * regEntry,const char * name)3520 IOCopyPropertyCompatible(IORegistryEntry * regEntry, const char * name)
3521 {
3522 OSObject * obj;
3523 OSObject * compatProperties;
3524 OSDictionary * props;
3525
3526 obj = regEntry->copyProperty(name);
3527 if (obj) {
3528 return obj;
3529 }
3530
3531 compatProperties = regEntry->copyProperty(gIOUserServicePropertiesKey);
3532 if (!compatProperties
3533 && IOTaskRegistryCompatibility(current_task())) {
3534 compatProperties = regEntry->copyProperty(gIOCompatibilityPropertiesKey);
3535 }
3536 if (compatProperties) {
3537 props = OSDynamicCast(OSDictionary, compatProperties);
3538 if (props) {
3539 obj = props->getObject(name);
3540 if (obj) {
3541 obj->retain();
3542 }
3543 }
3544 compatProperties->release();
3545 }
3546
3547 return obj;
3548 }
3549
3550 /* Routine io_registry_entry_get_property */
3551 kern_return_t
is_io_registry_entry_get_property_bytes(io_object_t registry_entry,io_name_t property_name,io_struct_inband_t buf,mach_msg_type_number_t * dataCnt)3552 is_io_registry_entry_get_property_bytes(
3553 io_object_t registry_entry,
3554 io_name_t property_name,
3555 io_struct_inband_t buf,
3556 mach_msg_type_number_t *dataCnt )
3557 {
3558 OSObject * obj;
3559 OSData * data;
3560 OSString * str;
3561 OSBoolean * boo;
3562 OSNumber * off;
3563 UInt64 offsetBytes;
3564 unsigned int len = 0;
3565 const void * bytes = NULL;
3566 IOReturn ret = kIOReturnSuccess;
3567
3568 CHECK( IORegistryEntry, registry_entry, entry );
3569
3570 #if CONFIG_MACF
3571 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3572 return kIOReturnNotPermitted;
3573 }
3574 #endif
3575
3576 obj = IOCopyPropertyCompatible(entry, property_name);
3577 if (!obj) {
3578 return kIOReturnNoResources;
3579 }
3580
3581 // One day OSData will be a common container base class
3582 // until then...
3583 if ((data = OSDynamicCast( OSData, obj ))) {
3584 len = data->getLength();
3585 bytes = data->getBytesNoCopy();
3586 if (!data->isSerializable()) {
3587 len = 0;
3588 }
3589 } else if ((str = OSDynamicCast( OSString, obj ))) {
3590 len = str->getLength() + 1;
3591 bytes = str->getCStringNoCopy();
3592 } else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3593 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3594 bytes = boo->isTrue() ? "Yes" : "No";
3595 } else if ((off = OSDynamicCast( OSNumber, obj ))) {
3596 offsetBytes = off->unsigned64BitValue();
3597 len = off->numberOfBytes();
3598 if (len > sizeof(offsetBytes)) {
3599 len = sizeof(offsetBytes);
3600 }
3601 bytes = &offsetBytes;
3602 #ifdef __BIG_ENDIAN__
3603 bytes = (const void *)
3604 (((UInt32) bytes) + (sizeof(UInt64) - len));
3605 #endif
3606 } else {
3607 ret = kIOReturnBadArgument;
3608 }
3609
3610 if (bytes) {
3611 if (*dataCnt < len) {
3612 ret = kIOReturnIPCError;
3613 } else {
3614 *dataCnt = len;
3615 bcopy( bytes, buf, len );
3616 }
3617 }
3618 obj->release();
3619
3620 return ret;
3621 }
3622
3623
3624 /* Routine io_registry_entry_get_property */
3625 kern_return_t
is_io_registry_entry_get_property(io_object_t registry_entry,io_name_t property_name,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3626 is_io_registry_entry_get_property(
3627 io_object_t registry_entry,
3628 io_name_t property_name,
3629 io_buf_ptr_t *properties,
3630 mach_msg_type_number_t *propertiesCnt )
3631 {
3632 kern_return_t err;
3633 unsigned int len;
3634 OSObject * obj;
3635
3636 CHECK( IORegistryEntry, registry_entry, entry );
3637
3638 #if CONFIG_MACF
3639 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3640 return kIOReturnNotPermitted;
3641 }
3642 #endif
3643
3644 obj = IOCopyPropertyCompatible(entry, property_name);
3645 if (!obj) {
3646 return kIOReturnNotFound;
3647 }
3648
3649 OSSerialize * s = OSSerialize::withCapacity(4096);
3650 if (!s) {
3651 obj->release();
3652 return kIOReturnNoMemory;
3653 }
3654
3655 if (obj->serialize( s )) {
3656 len = s->getLength();
3657 *propertiesCnt = len;
3658 err = copyoutkdata( s->text(), len, properties );
3659 } else {
3660 err = kIOReturnUnsupported;
3661 }
3662
3663 s->release();
3664 obj->release();
3665
3666 return err;
3667 }
3668
3669 /* Routine io_registry_entry_get_property_recursively */
3670 kern_return_t
is_io_registry_entry_get_property_recursively(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3671 is_io_registry_entry_get_property_recursively(
3672 io_object_t registry_entry,
3673 io_name_t plane,
3674 io_name_t property_name,
3675 uint32_t options,
3676 io_buf_ptr_t *properties,
3677 mach_msg_type_number_t *propertiesCnt )
3678 {
3679 kern_return_t err;
3680 unsigned int len;
3681 OSObject * obj;
3682
3683 CHECK( IORegistryEntry, registry_entry, entry );
3684
3685 #if CONFIG_MACF
3686 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3687 return kIOReturnNotPermitted;
3688 }
3689 #endif
3690
3691 obj = entry->copyProperty( property_name,
3692 IORegistryEntry::getPlane( plane ), options );
3693 if (!obj) {
3694 return kIOReturnNotFound;
3695 }
3696
3697 OSSerialize * s = OSSerialize::withCapacity(4096);
3698 if (!s) {
3699 obj->release();
3700 return kIOReturnNoMemory;
3701 }
3702
3703 if (obj->serialize( s )) {
3704 len = s->getLength();
3705 *propertiesCnt = len;
3706 err = copyoutkdata( s->text(), len, properties );
3707 } else {
3708 err = kIOReturnUnsupported;
3709 }
3710
3711 s->release();
3712 obj->release();
3713
3714 return err;
3715 }
3716
3717 /* Routine io_registry_entry_get_properties */
3718 kern_return_t
is_io_registry_entry_get_properties(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3719 is_io_registry_entry_get_properties(
3720 io_object_t registry_entry,
3721 io_buf_ptr_t *properties,
3722 mach_msg_type_number_t *propertiesCnt )
3723 {
3724 return kIOReturnUnsupported;
3725 }
3726
3727 #if CONFIG_MACF
3728
3729 struct GetPropertiesEditorRef {
3730 kauth_cred_t cred;
3731 IORegistryEntry * entry;
3732 OSCollection * root;
3733 };
3734
3735 static const LIBKERN_RETURNS_RETAINED OSMetaClassBase *
GetPropertiesEditor(void * reference,OSSerialize * s,OSCollection * container,const OSSymbol * name,const OSMetaClassBase * value)3736 GetPropertiesEditor(void * reference,
3737 OSSerialize * s,
3738 OSCollection * container,
3739 const OSSymbol * name,
3740 const OSMetaClassBase * value)
3741 {
3742 GetPropertiesEditorRef * ref = (typeof(ref))reference;
3743
3744 if (!ref->root) {
3745 ref->root = container;
3746 }
3747 if (ref->root == container) {
3748 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3749 value = NULL;
3750 }
3751 }
3752 if (value) {
3753 value->retain();
3754 }
3755 return value;
3756 }
3757
3758 #endif /* CONFIG_MACF */
3759
3760 /* Routine io_registry_entry_get_properties_bin_buf */
3761 kern_return_t
is_io_registry_entry_get_properties_bin_buf(io_object_t registry_entry,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3762 is_io_registry_entry_get_properties_bin_buf(
3763 io_object_t registry_entry,
3764 mach_vm_address_t buf,
3765 mach_vm_size_t *bufsize,
3766 io_buf_ptr_t *properties,
3767 mach_msg_type_number_t *propertiesCnt)
3768 {
3769 kern_return_t err = kIOReturnSuccess;
3770 unsigned int len;
3771 OSObject * compatProperties;
3772 OSSerialize * s;
3773 OSSerialize::Editor editor = NULL;
3774 void * editRef = NULL;
3775
3776 CHECK(IORegistryEntry, registry_entry, entry);
3777
3778 #if CONFIG_MACF
3779 GetPropertiesEditorRef ref;
3780 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3781 editor = &GetPropertiesEditor;
3782 editRef = &ref;
3783 ref.cred = kauth_cred_get();
3784 ref.entry = entry;
3785 ref.root = NULL;
3786 }
3787 #endif
3788
3789 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3790 if (!s) {
3791 return kIOReturnNoMemory;
3792 }
3793
3794
3795 compatProperties = entry->copyProperty(gIOUserServicePropertiesKey);
3796 if (!compatProperties
3797 && IOTaskRegistryCompatibility(current_task())) {
3798 compatProperties = entry->copyProperty(gIOCompatibilityPropertiesKey);
3799 }
3800
3801 if (compatProperties) {
3802 OSDictionary * dict;
3803
3804 dict = entry->dictionaryWithProperties();
3805 if (!dict) {
3806 err = kIOReturnNoMemory;
3807 } else {
3808 dict->removeObject(gIOUserServicePropertiesKey);
3809 dict->removeObject(gIOCompatibilityPropertiesKey);
3810 dict->merge(OSDynamicCast(OSDictionary, compatProperties));
3811 if (!dict->serialize(s)) {
3812 err = kIOReturnUnsupported;
3813 }
3814 dict->release();
3815 }
3816 compatProperties->release();
3817 } else if (!entry->serializeProperties(s)) {
3818 err = kIOReturnUnsupported;
3819 }
3820
3821 if (kIOReturnSuccess == err) {
3822 len = s->getLength();
3823 if (buf && bufsize && len <= *bufsize) {
3824 *bufsize = len;
3825 *propertiesCnt = 0;
3826 *properties = nullptr;
3827 if (copyout(s->text(), buf, len)) {
3828 err = kIOReturnVMError;
3829 } else {
3830 err = kIOReturnSuccess;
3831 }
3832 } else {
3833 if (bufsize) {
3834 *bufsize = 0;
3835 }
3836 *propertiesCnt = len;
3837 err = copyoutkdata( s->text(), len, properties );
3838 }
3839 }
3840 s->release();
3841
3842 return err;
3843 }
3844
3845 /* Routine io_registry_entry_get_properties_bin */
3846 kern_return_t
is_io_registry_entry_get_properties_bin(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3847 is_io_registry_entry_get_properties_bin(
3848 io_object_t registry_entry,
3849 io_buf_ptr_t *properties,
3850 mach_msg_type_number_t *propertiesCnt)
3851 {
3852 return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3853 0, NULL, properties, propertiesCnt);
3854 }
3855
3856 /* Routine io_registry_entry_get_property_bin_buf */
3857 kern_return_t
is_io_registry_entry_get_property_bin_buf(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3858 is_io_registry_entry_get_property_bin_buf(
3859 io_object_t registry_entry,
3860 io_name_t plane,
3861 io_name_t property_name,
3862 uint32_t options,
3863 mach_vm_address_t buf,
3864 mach_vm_size_t *bufsize,
3865 io_buf_ptr_t *properties,
3866 mach_msg_type_number_t *propertiesCnt )
3867 {
3868 kern_return_t err;
3869 unsigned int len;
3870 OSObject * obj;
3871 const OSSymbol * sym;
3872
3873 CHECK( IORegistryEntry, registry_entry, entry );
3874
3875 #if CONFIG_MACF
3876 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3877 return kIOReturnNotPermitted;
3878 }
3879 #endif
3880
3881 sym = OSSymbol::withCString(property_name);
3882 if (!sym) {
3883 return kIOReturnNoMemory;
3884 }
3885
3886 err = kIOReturnNotFound;
3887 if (gIORegistryEntryPropertyKeysKey == sym) {
3888 obj = entry->copyPropertyKeys();
3889 } else {
3890 if ((kIORegistryIterateRecursively & options) && plane[0]) {
3891 obj = IOCopyPropertyCompatible(entry, property_name);
3892 if (obj == NULL) {
3893 IORegistryIterator * iter = IORegistryIterator::iterateOver(entry, IORegistryEntry::getPlane(plane), options);
3894 if (iter) {
3895 while ((NULL == obj) && (entry = iter->getNextObject())) {
3896 OSObject * currentObj = IOCopyPropertyCompatible(entry, property_name);
3897 #if CONFIG_MACF
3898 if (currentObj != NULL && 0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3899 // Record that MAC hook blocked this entry and property, and continue to next entry
3900 err = kIOReturnNotPermitted;
3901 OSSafeReleaseNULL(currentObj);
3902 continue;
3903 }
3904 #endif
3905 obj = currentObj;
3906 }
3907 iter->release();
3908 }
3909 }
3910 } else {
3911 obj = IOCopyPropertyCompatible(entry, property_name);
3912 }
3913 if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3914 entry->removeProperty(sym);
3915 }
3916 }
3917
3918 sym->release();
3919 if (!obj) {
3920 return err;
3921 }
3922
3923 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3924 if (!s) {
3925 obj->release();
3926 return kIOReturnNoMemory;
3927 }
3928
3929 if (obj->serialize( s )) {
3930 len = s->getLength();
3931 if (buf && bufsize && len <= *bufsize) {
3932 *bufsize = len;
3933 *propertiesCnt = 0;
3934 *properties = nullptr;
3935 if (copyout(s->text(), buf, len)) {
3936 err = kIOReturnVMError;
3937 } else {
3938 err = kIOReturnSuccess;
3939 }
3940 } else {
3941 if (bufsize) {
3942 *bufsize = 0;
3943 }
3944 *propertiesCnt = len;
3945 err = copyoutkdata( s->text(), len, properties );
3946 }
3947 } else {
3948 err = kIOReturnUnsupported;
3949 }
3950
3951 s->release();
3952 obj->release();
3953
3954 return err;
3955 }
3956
3957 /* Routine io_registry_entry_get_property_bin */
3958 kern_return_t
is_io_registry_entry_get_property_bin(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3959 is_io_registry_entry_get_property_bin(
3960 io_object_t registry_entry,
3961 io_name_t plane,
3962 io_name_t property_name,
3963 uint32_t options,
3964 io_buf_ptr_t *properties,
3965 mach_msg_type_number_t *propertiesCnt )
3966 {
3967 return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
3968 property_name, options, 0, NULL, properties, propertiesCnt);
3969 }
3970
3971
3972 /* Routine io_registry_entry_set_properties */
3973 kern_return_t
is_io_registry_entry_set_properties(io_object_t registry_entry,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)3974 is_io_registry_entry_set_properties
3975 (
3976 io_object_t registry_entry,
3977 io_buf_ptr_t properties,
3978 mach_msg_type_number_t propertiesCnt,
3979 kern_return_t * result)
3980 {
3981 OSObject * obj;
3982 kern_return_t err;
3983 IOReturn res;
3984 vm_offset_t data;
3985 vm_map_offset_t map_data;
3986
3987 CHECK( IORegistryEntry, registry_entry, entry );
3988
3989 if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
3990 return kIOReturnMessageTooLarge;
3991 }
3992
3993 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3994 data = CAST_DOWN(vm_offset_t, map_data);
3995
3996 if (KERN_SUCCESS == err) {
3997 FAKE_STACK_FRAME(entry->getMetaClass());
3998
3999 // must return success after vm_map_copyout() succeeds
4000 obj = OSUnserializeXML((const char *) data, propertiesCnt );
4001 vm_deallocate( kernel_map, data, propertiesCnt );
4002
4003 if (!obj) {
4004 res = kIOReturnBadArgument;
4005 }
4006 #if CONFIG_MACF
4007 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
4008 registry_entry, obj)) {
4009 res = kIOReturnNotPermitted;
4010 }
4011 #endif
4012 else {
4013 IOService * service = OSDynamicCast(IOService, entry);
4014 OSDictionary * props = OSDynamicCast(OSDictionary, obj);
4015 OSObject * allowable = entry->copyProperty(gIORegistryEntryAllowableSetPropertiesKey);
4016 OSArray * allowableArray;
4017
4018 if (!allowable) {
4019 res = kIOReturnSuccess;
4020 } else {
4021 if (!props) {
4022 res = kIOReturnNotPermitted;
4023 } else if (!(allowableArray = OSDynamicCast(OSArray, allowable))) {
4024 res = kIOReturnNotPermitted;
4025 } else {
4026 bool allFound __block, found __block;
4027
4028 allFound = true;
4029 props->iterateObjects(^(const OSSymbol * key, OSObject * value) {
4030 found = false;
4031 for (unsigned int idx = 0; !found; idx++) {
4032 OSObject * next = allowableArray->getObject(idx);
4033 if (!next) {
4034 break;
4035 }
4036 found = next->isEqualTo(key);
4037 }
4038 allFound &= found;
4039 if (!found) {
4040 IOLog("IORegistryEntrySetProperties(%s, %s) disallowed due to " kIORegistryEntryAllowableSetPropertiesKey "\n",
4041 entry->getName(), key->getCStringNoCopy());
4042 }
4043 return !allFound;
4044 });
4045 res = allFound ? kIOReturnSuccess : kIOReturnBadArgument;
4046 }
4047 }
4048 if (kIOReturnSuccess == res) {
4049 IOUserClient *
4050 client = OSDynamicCast(IOUserClient, entry);
4051
4052 if (client && client->defaultLockingSetProperties) {
4053 IORWLockWrite(client->lock);
4054 }
4055
4056 if (!client && (kOSBooleanTrue == entry->getProperty(gIORegistryEntryDefaultLockingSetPropertiesKey))) {
4057 res = entry->runPropertyActionBlock(^IOReturn (void) {
4058 return entry->setProperties( obj );
4059 });
4060 } else {
4061 res = entry->setProperties( obj );
4062 }
4063
4064 if (client && client->defaultLockingSetProperties) {
4065 IORWLockUnlock(client->lock);
4066 }
4067 if (service && props && service->hasUserServer()) {
4068 res = service->UserSetProperties(props);
4069 }
4070 }
4071 OSSafeReleaseNULL(allowable);
4072 }
4073 if (obj) {
4074 obj->release();
4075 }
4076
4077 FAKE_STACK_FRAME_END();
4078 } else {
4079 res = err;
4080 }
4081
4082 *result = res;
4083 return err;
4084 }
4085
4086 /* Routine io_registry_entry_get_child_iterator */
4087 kern_return_t
is_io_registry_entry_get_child_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4088 is_io_registry_entry_get_child_iterator(
4089 io_object_t registry_entry,
4090 io_name_t plane,
4091 io_object_t *iterator )
4092 {
4093 CHECK( IORegistryEntry, registry_entry, entry );
4094
4095 *iterator = IOUserIterator::withIterator(entry->getChildIterator(
4096 IORegistryEntry::getPlane( plane )));
4097
4098 return kIOReturnSuccess;
4099 }
4100
4101 /* Routine io_registry_entry_get_parent_iterator */
4102 kern_return_t
is_io_registry_entry_get_parent_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4103 is_io_registry_entry_get_parent_iterator(
4104 io_object_t registry_entry,
4105 io_name_t plane,
4106 io_object_t *iterator)
4107 {
4108 CHECK( IORegistryEntry, registry_entry, entry );
4109
4110 *iterator = IOUserIterator::withIterator(entry->getParentIterator(
4111 IORegistryEntry::getPlane( plane )));
4112
4113 return kIOReturnSuccess;
4114 }
4115
4116 /* Routine io_service_get_busy_state */
4117 kern_return_t
is_io_service_get_busy_state(io_object_t _service,uint32_t * busyState)4118 is_io_service_get_busy_state(
4119 io_object_t _service,
4120 uint32_t *busyState )
4121 {
4122 CHECK( IOService, _service, service );
4123
4124 *busyState = service->getBusyState();
4125
4126 return kIOReturnSuccess;
4127 }
4128
4129 /* Routine io_service_get_state */
4130 kern_return_t
is_io_service_get_state(io_object_t _service,uint64_t * state,uint32_t * busy_state,uint64_t * accumulated_busy_time)4131 is_io_service_get_state(
4132 io_object_t _service,
4133 uint64_t *state,
4134 uint32_t *busy_state,
4135 uint64_t *accumulated_busy_time )
4136 {
4137 CHECK( IOService, _service, service );
4138
4139 *state = service->getState();
4140 *busy_state = service->getBusyState();
4141 *accumulated_busy_time = service->getAccumulatedBusyTime();
4142
4143 return kIOReturnSuccess;
4144 }
4145
4146 /* Routine io_service_wait_quiet */
4147 kern_return_t
is_io_service_wait_quiet(io_object_t _service,mach_timespec_t wait_time)4148 is_io_service_wait_quiet(
4149 io_object_t _service,
4150 mach_timespec_t wait_time )
4151 {
4152 uint64_t timeoutNS;
4153
4154 CHECK( IOService, _service, service );
4155
4156 timeoutNS = wait_time.tv_sec;
4157 timeoutNS *= kSecondScale;
4158 timeoutNS += wait_time.tv_nsec;
4159
4160 return service->waitQuiet(timeoutNS);
4161 }
4162
4163 /* Routine io_service_wait_quiet_with_options */
4164 kern_return_t
is_io_service_wait_quiet_with_options(io_object_t _service,mach_timespec_t wait_time,uint32_t options)4165 is_io_service_wait_quiet_with_options(
4166 io_object_t _service,
4167 mach_timespec_t wait_time,
4168 uint32_t options )
4169 {
4170 uint64_t timeoutNS;
4171
4172 CHECK( IOService, _service, service );
4173
4174 timeoutNS = wait_time.tv_sec;
4175 timeoutNS *= kSecondScale;
4176 timeoutNS += wait_time.tv_nsec;
4177
4178 if ((options & kIOWaitQuietPanicOnFailure) && !IOCurrentTaskHasEntitlement(kIOWaitQuietPanicsEntitlement)) {
4179 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4180 IOLog("IOServiceWaitQuietWithOptions(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4181 OSSafeReleaseNULL(taskName);
4182
4183 /* strip this option from the options before calling waitQuietWithOptions */
4184 options &= ~kIOWaitQuietPanicOnFailure;
4185 }
4186
4187 return service->waitQuietWithOptions(timeoutNS, options);
4188 }
4189
4190
4191 /* Routine io_service_request_probe */
4192 kern_return_t
is_io_service_request_probe(io_object_t _service,uint32_t options)4193 is_io_service_request_probe(
4194 io_object_t _service,
4195 uint32_t options )
4196 {
4197 CHECK( IOService, _service, service );
4198
4199 return service->requestProbe( options );
4200 }
4201
4202 /* Routine io_service_get_authorization_id */
4203 kern_return_t
is_io_service_get_authorization_id(io_object_t _service,uint64_t * authorization_id)4204 is_io_service_get_authorization_id(
4205 io_object_t _service,
4206 uint64_t *authorization_id )
4207 {
4208 kern_return_t kr;
4209
4210 CHECK( IOService, _service, service );
4211
4212 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
4213 kIOClientPrivilegeAdministrator );
4214 if (kIOReturnSuccess != kr) {
4215 return kr;
4216 }
4217
4218 *authorization_id = service->getAuthorizationID();
4219
4220 return kr;
4221 }
4222
4223 /* Routine io_service_set_authorization_id */
4224 kern_return_t
is_io_service_set_authorization_id(io_object_t _service,uint64_t authorization_id)4225 is_io_service_set_authorization_id(
4226 io_object_t _service,
4227 uint64_t authorization_id )
4228 {
4229 CHECK( IOService, _service, service );
4230
4231 return service->setAuthorizationID( authorization_id );
4232 }
4233
4234 /* Routine io_service_open_ndr */
4235 kern_return_t
is_io_service_open_extended(io_object_t _service,task_t owningTask,uint32_t connect_type,NDR_record_t ndr,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result,io_object_t * connection)4236 is_io_service_open_extended(
4237 io_object_t _service,
4238 task_t owningTask,
4239 uint32_t connect_type,
4240 NDR_record_t ndr,
4241 io_buf_ptr_t properties,
4242 mach_msg_type_number_t propertiesCnt,
4243 kern_return_t * result,
4244 io_object_t *connection )
4245 {
4246 IOUserClient * client = NULL;
4247 kern_return_t err = KERN_SUCCESS;
4248 IOReturn res = kIOReturnSuccess;
4249 OSDictionary * propertiesDict = NULL;
4250 bool disallowAccess = false;
4251
4252 CHECK( IOService, _service, service );
4253
4254 if (!owningTask) {
4255 return kIOReturnBadArgument;
4256 }
4257 assert(owningTask == current_task());
4258 if (owningTask != current_task()) {
4259 return kIOReturnBadArgument;
4260 }
4261
4262 #if CONFIG_MACF
4263 if (mac_iokit_check_open_service(kauth_cred_get(), service, connect_type) != 0) {
4264 return kIOReturnNotPermitted;
4265 }
4266 #endif
4267 do{
4268 if (properties) {
4269 return kIOReturnUnsupported;
4270 }
4271 #if 0
4272 {
4273 OSObject * obj;
4274 vm_offset_t data;
4275 vm_map_offset_t map_data;
4276
4277 if (propertiesCnt > sizeof(io_struct_inband_t)) {
4278 return kIOReturnMessageTooLarge;
4279 }
4280
4281 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4282 res = err;
4283 data = CAST_DOWN(vm_offset_t, map_data);
4284 if (KERN_SUCCESS == err) {
4285 // must return success after vm_map_copyout() succeeds
4286 obj = OSUnserializeXML((const char *) data, propertiesCnt );
4287 vm_deallocate( kernel_map, data, propertiesCnt );
4288 propertiesDict = OSDynamicCast(OSDictionary, obj);
4289 if (!propertiesDict) {
4290 res = kIOReturnBadArgument;
4291 if (obj) {
4292 obj->release();
4293 }
4294 }
4295 }
4296 if (kIOReturnSuccess != res) {
4297 break;
4298 }
4299 }
4300 #endif
4301 res = service->newUserClient( owningTask, (void *) owningTask,
4302 connect_type, propertiesDict, &client );
4303
4304 if (propertiesDict) {
4305 propertiesDict->release();
4306 }
4307
4308 if (res == kIOReturnSuccess && OSDynamicCast(IOUserClient, client) == NULL) {
4309 // client should always be a IOUserClient
4310 res = kIOReturnError;
4311 }
4312
4313 if (res == kIOReturnSuccess) {
4314 if (!client->reserved) {
4315 if (!client->reserve()) {
4316 client->clientClose();
4317 OSSafeReleaseNULL(client);
4318 res = kIOReturnNoMemory;
4319 }
4320 }
4321 }
4322
4323 if (res == kIOReturnSuccess) {
4324 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4325 if (creatorName) {
4326 client->setProperty(kIOUserClientCreatorKey, creatorName);
4327 }
4328 const char * creatorNameCStr = creatorName ? creatorName->getCStringNoCopy() : "<unknown>";
4329 client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
4330 if (client->sharedInstance) {
4331 IOLockLock(gIOUserClientOwnersLock);
4332 }
4333 if (!client->lock) {
4334 client->lock = IORWLockAlloc();
4335 client->filterLock = IOLockAlloc();
4336
4337 client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
4338 {
4339 OSObject * obj;
4340 extern const OSSymbol * gIOSurfaceIdentifier;
4341 obj = client->getProperty(kIOUserClientDefaultLockingKey);
4342 bool hasProps = false;
4343
4344 client->uc2022 = (NULL != OSDynamicCast(IOUserClient2022, client));
4345 if (obj) {
4346 hasProps = true;
4347 client->defaultLocking = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingKey));
4348 } else if (client->uc2022) {
4349 res = kIOReturnError;
4350 }
4351 obj = client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey);
4352 if (obj) {
4353 hasProps = true;
4354 client->defaultLockingSetProperties = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey));
4355 } else if (client->uc2022) {
4356 res = kIOReturnError;
4357 }
4358 obj = client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey);
4359 if (obj) {
4360 hasProps = true;
4361 client->defaultLockingSingleThreadExternalMethod = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey));
4362 } else if (client->uc2022) {
4363 res = kIOReturnError;
4364 }
4365 if (kIOReturnSuccess != res) {
4366 IOLog("IOUC %s requires kIOUserClientDefaultLockingKey, kIOUserClientDefaultLockingSetPropertiesKey, kIOUserClientDefaultLockingSingleThreadExternalMethodKey\n",
4367 client->getMetaClass()->getClassName());
4368 }
4369 if (!hasProps) {
4370 const OSMetaClass * meta;
4371 OSKext * kext;
4372 meta = client->getMetaClass();
4373 kext = meta->getKext();
4374 if (!kext || !kext->hasDependency(gIOSurfaceIdentifier)) {
4375 client->defaultLocking = true;
4376 client->defaultLockingSetProperties = false;
4377 client->defaultLockingSingleThreadExternalMethod = false;
4378 client->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
4379 }
4380 }
4381 }
4382 }
4383 if (client->sharedInstance) {
4384 IOLockUnlock(gIOUserClientOwnersLock);
4385 }
4386
4387 OSObject * requiredEntitlement = client->copyProperty(gIOUserClientEntitlementsKey);
4388 OSString * requiredEntitlementString = OSDynamicCast(OSString, requiredEntitlement);
4389 //If this is an IOUserClient2022, having kIOUserClientEntitlementsKey is mandatory.
4390 //If it has kIOUserClientEntitlementsKey, the value must be either kOSBooleanFalse or an OSString
4391 //If the value is kOSBooleanFalse, we allow access.
4392 //If the value is an OSString, we allow access if the task has the named entitlement
4393 if (client->uc2022) {
4394 if (!requiredEntitlement) {
4395 IOLog("IOUC %s missing " kIOUserClientEntitlementsKey " property\n",
4396 client->getMetaClass()->getClassName());
4397 disallowAccess = true;
4398 } else if (!requiredEntitlementString && requiredEntitlement != kOSBooleanFalse) {
4399 IOLog("IOUC %s had " kIOUserClientEntitlementsKey "with value not boolean false or string\n", client->getMetaClass()->getClassName());
4400 disallowAccess = true;
4401 }
4402 }
4403
4404 if (requiredEntitlement && disallowAccess == false) {
4405 if (kOSBooleanFalse == requiredEntitlement) {
4406 // allow
4407 disallowAccess = false;
4408 } else {
4409 disallowAccess = !IOTaskHasEntitlement(owningTask, requiredEntitlementString->getCStringNoCopy());
4410 if (disallowAccess) {
4411 IOLog("IOUC %s missing entitlement in process %s\n",
4412 client->getMetaClass()->getClassName(), creatorNameCStr);
4413 }
4414 }
4415 }
4416
4417 OSSafeReleaseNULL(requiredEntitlement);
4418
4419 if (disallowAccess) {
4420 res = kIOReturnNotPrivileged;
4421 }
4422 #if CONFIG_MACF
4423 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4424 IOLog("IOUC %s failed MACF in process %s\n",
4425 client->getMetaClass()->getClassName(), creatorNameCStr);
4426 res = kIOReturnNotPermitted;
4427 }
4428 #endif
4429
4430 if ((kIOReturnSuccess == res)
4431 && gIOUCFilterCallbacks
4432 && gIOUCFilterCallbacks->io_filter_resolver) {
4433 io_filter_policy_t filterPolicy;
4434 filterPolicy = client->filterForTask(owningTask, 0);
4435 if (!filterPolicy) {
4436 res = gIOUCFilterCallbacks->io_filter_resolver(owningTask, client, connect_type, &filterPolicy);
4437 if (kIOReturnUnsupported == res) {
4438 res = kIOReturnSuccess;
4439 } else if (kIOReturnSuccess == res) {
4440 client->filterForTask(owningTask, filterPolicy);
4441 } else {
4442 IOLog("IOUC %s failed sandbox in process %s\n",
4443 client->getMetaClass()->getClassName(), creatorNameCStr);
4444 }
4445 }
4446 }
4447
4448 if (kIOReturnSuccess == res) {
4449 res = client->registerOwner(owningTask);
4450 }
4451 OSSafeReleaseNULL(creatorName);
4452
4453 if (kIOReturnSuccess != res) {
4454 IOStatisticsClientCall();
4455 client->clientClose();
4456 client->setTerminateDefer(service, false);
4457 client->release();
4458 client = NULL;
4459 break;
4460 }
4461 client->setTerminateDefer(service, false);
4462 }
4463 }while (false);
4464
4465 *connection = client;
4466 *result = res;
4467
4468 return err;
4469 }
4470
4471 /* Routine io_service_close */
4472 kern_return_t
is_io_service_close(io_object_t connection)4473 is_io_service_close(
4474 io_object_t connection )
4475 {
4476 OSSet * mappings;
4477 if ((mappings = OSDynamicCast(OSSet, connection))) {
4478 return kIOReturnSuccess;
4479 }
4480
4481 CHECK( IOUserClient, connection, client );
4482
4483 IOStatisticsClientCall();
4484
4485 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4486 IORWLockWrite(client->lock);
4487 client->clientClose();
4488 IORWLockUnlock(client->lock);
4489 } else {
4490 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4491 client->getRegistryEntryID(), client->getName());
4492 }
4493
4494 return kIOReturnSuccess;
4495 }
4496
4497 /* Routine io_connect_get_service */
4498 kern_return_t
is_io_connect_get_service(io_object_t connection,io_object_t * service)4499 is_io_connect_get_service(
4500 io_object_t connection,
4501 io_object_t *service )
4502 {
4503 IOService * theService;
4504
4505 CHECK( IOUserClient, connection, client );
4506
4507 theService = client->getService();
4508 if (theService) {
4509 theService->retain();
4510 }
4511
4512 *service = theService;
4513
4514 return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4515 }
4516
4517 /* Routine io_connect_set_notification_port */
4518 kern_return_t
is_io_connect_set_notification_port(io_object_t connection,uint32_t notification_type,mach_port_t port,uint32_t reference)4519 is_io_connect_set_notification_port(
4520 io_object_t connection,
4521 uint32_t notification_type,
4522 mach_port_t port,
4523 uint32_t reference)
4524 {
4525 kern_return_t ret;
4526 CHECK( IOUserClient, connection, client );
4527
4528 IOStatisticsClientCall();
4529 IORWLockWrite(client->lock);
4530 ret = client->registerNotificationPort( port, notification_type,
4531 (io_user_reference_t) reference );
4532 IORWLockUnlock(client->lock);
4533 return ret;
4534 }
4535
4536 /* Routine io_connect_set_notification_port */
4537 kern_return_t
is_io_connect_set_notification_port_64(io_object_t connection,uint32_t notification_type,mach_port_t port,io_user_reference_t reference)4538 is_io_connect_set_notification_port_64(
4539 io_object_t connection,
4540 uint32_t notification_type,
4541 mach_port_t port,
4542 io_user_reference_t reference)
4543 {
4544 kern_return_t ret;
4545 CHECK( IOUserClient, connection, client );
4546
4547 IOStatisticsClientCall();
4548 IORWLockWrite(client->lock);
4549 ret = client->registerNotificationPort( port, notification_type,
4550 reference );
4551 IORWLockUnlock(client->lock);
4552 return ret;
4553 }
4554
4555 /* Routine io_connect_map_memory_into_task */
4556 kern_return_t
is_io_connect_map_memory_into_task(io_connect_t connection,uint32_t memory_type,task_t into_task,mach_vm_address_t * address,mach_vm_size_t * size,uint32_t flags)4557 is_io_connect_map_memory_into_task
4558 (
4559 io_connect_t connection,
4560 uint32_t memory_type,
4561 task_t into_task,
4562 mach_vm_address_t *address,
4563 mach_vm_size_t *size,
4564 uint32_t flags
4565 )
4566 {
4567 IOReturn err;
4568 IOMemoryMap * map;
4569
4570 CHECK( IOUserClient, connection, client );
4571
4572 if (!into_task) {
4573 return kIOReturnBadArgument;
4574 }
4575
4576 IOStatisticsClientCall();
4577 if (client->defaultLocking) {
4578 IORWLockWrite(client->lock);
4579 }
4580 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4581 if (client->defaultLocking) {
4582 IORWLockUnlock(client->lock);
4583 }
4584
4585 if (map) {
4586 *address = map->getAddress();
4587 if (size) {
4588 *size = map->getSize();
4589 }
4590
4591 if (client->sharedInstance
4592 || (into_task != current_task())) {
4593 // push a name out to the task owning the map,
4594 // so we can clean up maps
4595 mach_port_name_t name __unused =
4596 IOMachPort::makeSendRightForTask(
4597 into_task, map, IKOT_IOKIT_OBJECT );
4598 map->release();
4599 } else {
4600 // keep it with the user client
4601 IOLockLock( gIOObjectPortLock);
4602 if (NULL == client->mappings) {
4603 client->mappings = OSSet::withCapacity(2);
4604 }
4605 if (client->mappings) {
4606 client->mappings->setObject( map);
4607 }
4608 IOLockUnlock( gIOObjectPortLock);
4609 map->release();
4610 }
4611 err = kIOReturnSuccess;
4612 } else {
4613 err = kIOReturnBadArgument;
4614 }
4615
4616 return err;
4617 }
4618
4619 /* Routine is_io_connect_map_memory */
4620 kern_return_t
is_io_connect_map_memory(io_object_t connect,uint32_t type,task_t task,uint32_t * mapAddr,uint32_t * mapSize,uint32_t flags)4621 is_io_connect_map_memory(
4622 io_object_t connect,
4623 uint32_t type,
4624 task_t task,
4625 uint32_t * mapAddr,
4626 uint32_t * mapSize,
4627 uint32_t flags )
4628 {
4629 IOReturn err;
4630 mach_vm_address_t address;
4631 mach_vm_size_t size;
4632
4633 address = SCALAR64(*mapAddr);
4634 size = SCALAR64(*mapSize);
4635
4636 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4637
4638 *mapAddr = SCALAR32(address);
4639 *mapSize = SCALAR32(size);
4640
4641 return err;
4642 }
4643 } /* extern "C" */
4644
4645 IOMemoryMap *
removeMappingForDescriptor(IOMemoryDescriptor * mem)4646 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4647 {
4648 OSIterator * iter;
4649 IOMemoryMap * map = NULL;
4650
4651 IOLockLock(gIOObjectPortLock);
4652
4653 iter = OSCollectionIterator::withCollection(mappings);
4654 if (iter) {
4655 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4656 if (mem == map->getMemoryDescriptor()) {
4657 map->retain();
4658 mappings->removeObject(map);
4659 break;
4660 }
4661 }
4662 iter->release();
4663 }
4664
4665 IOLockUnlock(gIOObjectPortLock);
4666
4667 return map;
4668 }
4669
4670 extern "C" {
4671 /* Routine io_connect_unmap_memory_from_task */
4672 kern_return_t
is_io_connect_unmap_memory_from_task(io_connect_t connection,uint32_t memory_type,task_t from_task,mach_vm_address_t address)4673 is_io_connect_unmap_memory_from_task
4674 (
4675 io_connect_t connection,
4676 uint32_t memory_type,
4677 task_t from_task,
4678 mach_vm_address_t address)
4679 {
4680 IOReturn err;
4681 IOOptionBits options = 0;
4682 IOMemoryDescriptor * memory = NULL;
4683 IOMemoryMap * map;
4684
4685 CHECK( IOUserClient, connection, client );
4686
4687 if (!from_task) {
4688 return kIOReturnBadArgument;
4689 }
4690
4691 IOStatisticsClientCall();
4692 if (client->defaultLocking) {
4693 IORWLockWrite(client->lock);
4694 }
4695 err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4696 if (client->defaultLocking) {
4697 IORWLockUnlock(client->lock);
4698 }
4699
4700 if (memory && (kIOReturnSuccess == err)) {
4701 options = (options & ~kIOMapUserOptionsMask)
4702 | kIOMapAnywhere | kIOMapReference;
4703
4704 map = memory->createMappingInTask( from_task, address, options );
4705 memory->release();
4706 if (map) {
4707 IOLockLock( gIOObjectPortLock);
4708 if (client->mappings) {
4709 client->mappings->removeObject( map);
4710 }
4711 IOLockUnlock( gIOObjectPortLock);
4712
4713 mach_port_name_t name = 0;
4714 bool is_shared_instance_or_from_current_task = from_task != current_task() || client->sharedInstance;
4715 if (is_shared_instance_or_from_current_task) {
4716 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4717 map->release();
4718 }
4719
4720 if (name) {
4721 map->userClientUnmap();
4722 err = iokit_mod_send_right( from_task, name, -2 );
4723 err = kIOReturnSuccess;
4724 } else {
4725 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4726 }
4727 if (!is_shared_instance_or_from_current_task) {
4728 map->release();
4729 }
4730 } else {
4731 err = kIOReturnBadArgument;
4732 }
4733 }
4734
4735 return err;
4736 }
4737
4738 kern_return_t
is_io_connect_unmap_memory(io_object_t connect,uint32_t type,task_t task,uint32_t mapAddr)4739 is_io_connect_unmap_memory(
4740 io_object_t connect,
4741 uint32_t type,
4742 task_t task,
4743 uint32_t mapAddr )
4744 {
4745 IOReturn err;
4746 mach_vm_address_t address;
4747
4748 address = SCALAR64(mapAddr);
4749
4750 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4751
4752 return err;
4753 }
4754
4755
4756 /* Routine io_connect_add_client */
4757 kern_return_t
is_io_connect_add_client(io_object_t connection,io_object_t connect_to)4758 is_io_connect_add_client(
4759 io_object_t connection,
4760 io_object_t connect_to)
4761 {
4762 CHECK( IOUserClient, connection, client );
4763 CHECK( IOUserClient, connect_to, to );
4764
4765 IOReturn ret;
4766
4767 IOStatisticsClientCall();
4768 if (client->defaultLocking) {
4769 IORWLockWrite(client->lock);
4770 }
4771 ret = client->connectClient( to );
4772 if (client->defaultLocking) {
4773 IORWLockUnlock(client->lock);
4774 }
4775 return ret;
4776 }
4777
4778
4779 /* Routine io_connect_set_properties */
4780 kern_return_t
is_io_connect_set_properties(io_object_t connection,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4781 is_io_connect_set_properties(
4782 io_object_t connection,
4783 io_buf_ptr_t properties,
4784 mach_msg_type_number_t propertiesCnt,
4785 kern_return_t * result)
4786 {
4787 return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4788 }
4789
4790 /* Routine io_user_client_method */
4791 kern_return_t
is_io_connect_method_var_output(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,io_buf_ptr_t * var_output,mach_msg_type_number_t * var_outputCnt)4792 is_io_connect_method_var_output
4793 (
4794 io_connect_t connection,
4795 uint32_t selector,
4796 io_scalar_inband64_t scalar_input,
4797 mach_msg_type_number_t scalar_inputCnt,
4798 io_struct_inband_t inband_input,
4799 mach_msg_type_number_t inband_inputCnt,
4800 mach_vm_address_t ool_input,
4801 mach_vm_size_t ool_input_size,
4802 io_struct_inband_t inband_output,
4803 mach_msg_type_number_t *inband_outputCnt,
4804 io_scalar_inband64_t scalar_output,
4805 mach_msg_type_number_t *scalar_outputCnt,
4806 io_buf_ptr_t *var_output,
4807 mach_msg_type_number_t *var_outputCnt
4808 )
4809 {
4810 CHECK( IOUserClient, connection, client );
4811
4812 IOExternalMethodArguments args;
4813 IOReturn ret;
4814 IOMemoryDescriptor * inputMD = NULL;
4815 OSObject * structureVariableOutputData = NULL;
4816
4817 bzero(&args.__reserved[0], sizeof(args.__reserved));
4818 args.__reservedA = 0;
4819 args.version = kIOExternalMethodArgumentsCurrentVersion;
4820
4821 args.selector = selector;
4822
4823 args.asyncWakePort = MACH_PORT_NULL;
4824 args.asyncReference = NULL;
4825 args.asyncReferenceCount = 0;
4826 args.structureVariableOutputData = &structureVariableOutputData;
4827
4828 args.scalarInput = scalar_input;
4829 args.scalarInputCount = scalar_inputCnt;
4830 args.structureInput = inband_input;
4831 args.structureInputSize = inband_inputCnt;
4832
4833 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4834 return kIOReturnIPCError;
4835 }
4836
4837 if (ool_input) {
4838 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4839 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4840 current_task());
4841 }
4842
4843 args.structureInputDescriptor = inputMD;
4844
4845 args.scalarOutput = scalar_output;
4846 args.scalarOutputCount = *scalar_outputCnt;
4847 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4848 args.structureOutput = inband_output;
4849 args.structureOutputSize = *inband_outputCnt;
4850 args.structureOutputDescriptor = NULL;
4851 args.structureOutputDescriptorSize = 0;
4852
4853 IOStatisticsClientCall();
4854 ret = kIOReturnSuccess;
4855
4856 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4857 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4858 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
4859 }
4860
4861 if (kIOReturnSuccess == ret) {
4862 ret = client->callExternalMethod(selector, &args);
4863 }
4864
4865 *scalar_outputCnt = args.scalarOutputCount;
4866 *inband_outputCnt = args.structureOutputSize;
4867
4868 if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4869 OSSerialize * serialize;
4870 OSData * data;
4871 unsigned int len;
4872
4873 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4874 len = serialize->getLength();
4875 *var_outputCnt = len;
4876 ret = copyoutkdata(serialize->text(), len, var_output);
4877 } else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4878 data->clipForCopyout();
4879 len = data->getLength();
4880 *var_outputCnt = len;
4881 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4882 } else {
4883 ret = kIOReturnUnderrun;
4884 }
4885 }
4886
4887 if (inputMD) {
4888 inputMD->release();
4889 }
4890 if (structureVariableOutputData) {
4891 structureVariableOutputData->release();
4892 }
4893
4894 return ret;
4895 }
4896
4897 /* Routine io_user_client_method */
4898 kern_return_t
is_io_connect_method(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)4899 is_io_connect_method
4900 (
4901 io_connect_t connection,
4902 uint32_t selector,
4903 io_scalar_inband64_t scalar_input,
4904 mach_msg_type_number_t scalar_inputCnt,
4905 io_struct_inband_t inband_input,
4906 mach_msg_type_number_t inband_inputCnt,
4907 mach_vm_address_t ool_input,
4908 mach_vm_size_t ool_input_size,
4909 io_struct_inband_t inband_output,
4910 mach_msg_type_number_t *inband_outputCnt,
4911 io_scalar_inband64_t scalar_output,
4912 mach_msg_type_number_t *scalar_outputCnt,
4913 mach_vm_address_t ool_output,
4914 mach_vm_size_t *ool_output_size
4915 )
4916 {
4917 CHECK( IOUserClient, connection, client );
4918
4919 IOExternalMethodArguments args;
4920 IOReturn ret;
4921 IOMemoryDescriptor * inputMD = NULL;
4922 IOMemoryDescriptor * outputMD = NULL;
4923
4924 bzero(&args.__reserved[0], sizeof(args.__reserved));
4925 args.__reservedA = 0;
4926 args.version = kIOExternalMethodArgumentsCurrentVersion;
4927
4928 args.selector = selector;
4929
4930 args.asyncWakePort = MACH_PORT_NULL;
4931 args.asyncReference = NULL;
4932 args.asyncReferenceCount = 0;
4933 args.structureVariableOutputData = NULL;
4934
4935 args.scalarInput = scalar_input;
4936 args.scalarInputCount = scalar_inputCnt;
4937 args.structureInput = inband_input;
4938 args.structureInputSize = inband_inputCnt;
4939
4940 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4941 return kIOReturnIPCError;
4942 }
4943 if (ool_output) {
4944 if (*ool_output_size <= sizeof(io_struct_inband_t)) {
4945 return kIOReturnIPCError;
4946 }
4947 if (*ool_output_size > UINT_MAX) {
4948 return kIOReturnIPCError;
4949 }
4950 }
4951
4952 if (ool_input) {
4953 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4954 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4955 current_task());
4956 }
4957
4958 args.structureInputDescriptor = inputMD;
4959
4960 args.scalarOutput = scalar_output;
4961 args.scalarOutputCount = *scalar_outputCnt;
4962 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4963 args.structureOutput = inband_output;
4964 args.structureOutputSize = *inband_outputCnt;
4965
4966 if (ool_output && ool_output_size) {
4967 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4968 kIODirectionIn, current_task());
4969 }
4970
4971 args.structureOutputDescriptor = outputMD;
4972 args.structureOutputDescriptorSize = ool_output_size
4973 ? ((typeof(args.structureOutputDescriptorSize)) * ool_output_size)
4974 : 0;
4975
4976 IOStatisticsClientCall();
4977 ret = kIOReturnSuccess;
4978 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4979 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4980 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
4981 }
4982 if (kIOReturnSuccess == ret) {
4983 ret = client->callExternalMethod( selector, &args );
4984 }
4985
4986 *scalar_outputCnt = args.scalarOutputCount;
4987 *inband_outputCnt = args.structureOutputSize;
4988 *ool_output_size = args.structureOutputDescriptorSize;
4989
4990 if (inputMD) {
4991 inputMD->release();
4992 }
4993 if (outputMD) {
4994 outputMD->release();
4995 }
4996
4997 return ret;
4998 }
4999
5000 /* Routine io_async_user_client_method */
5001 kern_return_t
is_io_connect_async_method(io_connect_t connection,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)5002 is_io_connect_async_method
5003 (
5004 io_connect_t connection,
5005 mach_port_t wake_port,
5006 io_async_ref64_t reference,
5007 mach_msg_type_number_t referenceCnt,
5008 uint32_t selector,
5009 io_scalar_inband64_t scalar_input,
5010 mach_msg_type_number_t scalar_inputCnt,
5011 io_struct_inband_t inband_input,
5012 mach_msg_type_number_t inband_inputCnt,
5013 mach_vm_address_t ool_input,
5014 mach_vm_size_t ool_input_size,
5015 io_struct_inband_t inband_output,
5016 mach_msg_type_number_t *inband_outputCnt,
5017 io_scalar_inband64_t scalar_output,
5018 mach_msg_type_number_t *scalar_outputCnt,
5019 mach_vm_address_t ool_output,
5020 mach_vm_size_t * ool_output_size
5021 )
5022 {
5023 CHECK( IOUserClient, connection, client );
5024
5025 IOExternalMethodArguments args;
5026 IOReturn ret;
5027 IOMemoryDescriptor * inputMD = NULL;
5028 IOMemoryDescriptor * outputMD = NULL;
5029
5030 if (referenceCnt < 1) {
5031 return kIOReturnBadArgument;
5032 }
5033
5034 bzero(&args.__reserved[0], sizeof(args.__reserved));
5035 args.__reservedA = 0;
5036 args.version = kIOExternalMethodArgumentsCurrentVersion;
5037
5038 reference[0] = (io_user_reference_t) wake_port;
5039 if (vm_map_is_64bit(get_task_map(current_task()))) {
5040 reference[0] |= kIOUCAsync64Flag;
5041 }
5042
5043 args.selector = selector;
5044
5045 args.asyncWakePort = wake_port;
5046 args.asyncReference = reference;
5047 args.asyncReferenceCount = referenceCnt;
5048
5049 args.structureVariableOutputData = NULL;
5050
5051 args.scalarInput = scalar_input;
5052 args.scalarInputCount = scalar_inputCnt;
5053 args.structureInput = inband_input;
5054 args.structureInputSize = inband_inputCnt;
5055
5056 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5057 return kIOReturnIPCError;
5058 }
5059 if (ool_output) {
5060 if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5061 return kIOReturnIPCError;
5062 }
5063 if (*ool_output_size > UINT_MAX) {
5064 return kIOReturnIPCError;
5065 }
5066 }
5067
5068 if (ool_input) {
5069 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5070 kIODirectionOut | kIOMemoryMapCopyOnWrite,
5071 current_task());
5072 }
5073
5074 args.structureInputDescriptor = inputMD;
5075
5076 args.scalarOutput = scalar_output;
5077 args.scalarOutputCount = *scalar_outputCnt;
5078 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5079 args.structureOutput = inband_output;
5080 args.structureOutputSize = *inband_outputCnt;
5081
5082 if (ool_output) {
5083 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5084 kIODirectionIn, current_task());
5085 }
5086
5087 args.structureOutputDescriptor = outputMD;
5088 args.structureOutputDescriptorSize = ((typeof(args.structureOutputDescriptorSize)) * ool_output_size);
5089
5090 IOStatisticsClientCall();
5091 ret = kIOReturnSuccess;
5092 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5093 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5094 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_async_method, selector);
5095 }
5096 if (kIOReturnSuccess == ret) {
5097 ret = client->callExternalMethod( selector, &args );
5098 }
5099
5100 *scalar_outputCnt = args.scalarOutputCount;
5101 *inband_outputCnt = args.structureOutputSize;
5102 *ool_output_size = args.structureOutputDescriptorSize;
5103
5104 if (inputMD) {
5105 inputMD->release();
5106 }
5107 if (outputMD) {
5108 outputMD->release();
5109 }
5110
5111 return ret;
5112 }
5113
5114 /* Routine io_connect_method_scalarI_scalarO */
5115 kern_return_t
is_io_connect_method_scalarI_scalarO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5116 is_io_connect_method_scalarI_scalarO(
5117 io_object_t connect,
5118 uint32_t index,
5119 io_scalar_inband_t input,
5120 mach_msg_type_number_t inputCount,
5121 io_scalar_inband_t output,
5122 mach_msg_type_number_t * outputCount )
5123 {
5124 IOReturn err;
5125 uint32_t i;
5126 io_scalar_inband64_t _input;
5127 io_scalar_inband64_t _output;
5128
5129 mach_msg_type_number_t struct_outputCnt = 0;
5130 mach_vm_size_t ool_output_size = 0;
5131
5132 bzero(&_output[0], sizeof(_output));
5133 for (i = 0; i < inputCount; i++) {
5134 _input[i] = SCALAR64(input[i]);
5135 }
5136
5137 err = is_io_connect_method(connect, index,
5138 _input, inputCount,
5139 NULL, 0,
5140 0, 0,
5141 NULL, &struct_outputCnt,
5142 _output, outputCount,
5143 0, &ool_output_size);
5144
5145 for (i = 0; i < *outputCount; i++) {
5146 output[i] = SCALAR32(_output[i]);
5147 }
5148
5149 return err;
5150 }
5151
5152 kern_return_t
shim_io_connect_method_scalarI_scalarO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5153 shim_io_connect_method_scalarI_scalarO(
5154 IOExternalMethod * method,
5155 IOService * object,
5156 const io_user_scalar_t * input,
5157 mach_msg_type_number_t inputCount,
5158 io_user_scalar_t * output,
5159 mach_msg_type_number_t * outputCount )
5160 {
5161 IOMethod func;
5162 io_scalar_inband_t _output;
5163 IOReturn err;
5164 err = kIOReturnBadArgument;
5165
5166 bzero(&_output[0], sizeof(_output));
5167 do {
5168 if (inputCount != method->count0) {
5169 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5170 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5171 continue;
5172 }
5173 if (*outputCount != method->count1) {
5174 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5175 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5176 continue;
5177 }
5178
5179 func = method->func;
5180
5181 switch (inputCount) {
5182 case 6:
5183 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5184 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5185 break;
5186 case 5:
5187 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5188 ARG32(input[3]), ARG32(input[4]),
5189 &_output[0] );
5190 break;
5191 case 4:
5192 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5193 ARG32(input[3]),
5194 &_output[0], &_output[1] );
5195 break;
5196 case 3:
5197 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5198 &_output[0], &_output[1], &_output[2] );
5199 break;
5200 case 2:
5201 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5202 &_output[0], &_output[1], &_output[2],
5203 &_output[3] );
5204 break;
5205 case 1:
5206 err = (object->*func)( ARG32(input[0]),
5207 &_output[0], &_output[1], &_output[2],
5208 &_output[3], &_output[4] );
5209 break;
5210 case 0:
5211 err = (object->*func)( &_output[0], &_output[1], &_output[2],
5212 &_output[3], &_output[4], &_output[5] );
5213 break;
5214
5215 default:
5216 IOLog("%s: Bad method table\n", object->getName());
5217 }
5218 }while (false);
5219
5220 uint32_t i;
5221 for (i = 0; i < *outputCount; i++) {
5222 output[i] = SCALAR32(_output[i]);
5223 }
5224
5225 return err;
5226 }
5227
5228 /* Routine io_async_method_scalarI_scalarO */
5229 kern_return_t
is_io_async_method_scalarI_scalarO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5230 is_io_async_method_scalarI_scalarO(
5231 io_object_t connect,
5232 mach_port_t wake_port,
5233 io_async_ref_t reference,
5234 mach_msg_type_number_t referenceCnt,
5235 uint32_t index,
5236 io_scalar_inband_t input,
5237 mach_msg_type_number_t inputCount,
5238 io_scalar_inband_t output,
5239 mach_msg_type_number_t * outputCount )
5240 {
5241 IOReturn err;
5242 uint32_t i;
5243 io_scalar_inband64_t _input;
5244 io_scalar_inband64_t _output;
5245 io_async_ref64_t _reference;
5246
5247 if (referenceCnt > ASYNC_REF64_COUNT) {
5248 return kIOReturnBadArgument;
5249 }
5250 bzero(&_output[0], sizeof(_output));
5251 for (i = 0; i < referenceCnt; i++) {
5252 _reference[i] = REF64(reference[i]);
5253 }
5254 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5255
5256 mach_msg_type_number_t struct_outputCnt = 0;
5257 mach_vm_size_t ool_output_size = 0;
5258
5259 for (i = 0; i < inputCount; i++) {
5260 _input[i] = SCALAR64(input[i]);
5261 }
5262
5263 err = is_io_connect_async_method(connect,
5264 wake_port, _reference, referenceCnt,
5265 index,
5266 _input, inputCount,
5267 NULL, 0,
5268 0, 0,
5269 NULL, &struct_outputCnt,
5270 _output, outputCount,
5271 0, &ool_output_size);
5272
5273 for (i = 0; i < *outputCount; i++) {
5274 output[i] = SCALAR32(_output[i]);
5275 }
5276
5277 return err;
5278 }
5279 /* Routine io_async_method_scalarI_structureO */
5280 kern_return_t
is_io_async_method_scalarI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5281 is_io_async_method_scalarI_structureO(
5282 io_object_t connect,
5283 mach_port_t wake_port,
5284 io_async_ref_t reference,
5285 mach_msg_type_number_t referenceCnt,
5286 uint32_t index,
5287 io_scalar_inband_t input,
5288 mach_msg_type_number_t inputCount,
5289 io_struct_inband_t output,
5290 mach_msg_type_number_t * outputCount )
5291 {
5292 uint32_t i;
5293 io_scalar_inband64_t _input;
5294 io_async_ref64_t _reference;
5295
5296 if (referenceCnt > ASYNC_REF64_COUNT) {
5297 return kIOReturnBadArgument;
5298 }
5299 for (i = 0; i < referenceCnt; i++) {
5300 _reference[i] = REF64(reference[i]);
5301 }
5302 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5303
5304 mach_msg_type_number_t scalar_outputCnt = 0;
5305 mach_vm_size_t ool_output_size = 0;
5306
5307 for (i = 0; i < inputCount; i++) {
5308 _input[i] = SCALAR64(input[i]);
5309 }
5310
5311 return is_io_connect_async_method(connect,
5312 wake_port, _reference, referenceCnt,
5313 index,
5314 _input, inputCount,
5315 NULL, 0,
5316 0, 0,
5317 output, outputCount,
5318 NULL, &scalar_outputCnt,
5319 0, &ool_output_size);
5320 }
5321
5322 /* Routine io_async_method_scalarI_structureI */
5323 kern_return_t
is_io_async_method_scalarI_structureI(io_connect_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5324 is_io_async_method_scalarI_structureI(
5325 io_connect_t connect,
5326 mach_port_t wake_port,
5327 io_async_ref_t reference,
5328 mach_msg_type_number_t referenceCnt,
5329 uint32_t index,
5330 io_scalar_inband_t input,
5331 mach_msg_type_number_t inputCount,
5332 io_struct_inband_t inputStruct,
5333 mach_msg_type_number_t inputStructCount )
5334 {
5335 uint32_t i;
5336 io_scalar_inband64_t _input;
5337 io_async_ref64_t _reference;
5338
5339 if (referenceCnt > ASYNC_REF64_COUNT) {
5340 return kIOReturnBadArgument;
5341 }
5342 for (i = 0; i < referenceCnt; i++) {
5343 _reference[i] = REF64(reference[i]);
5344 }
5345 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5346
5347 mach_msg_type_number_t scalar_outputCnt = 0;
5348 mach_msg_type_number_t inband_outputCnt = 0;
5349 mach_vm_size_t ool_output_size = 0;
5350
5351 for (i = 0; i < inputCount; i++) {
5352 _input[i] = SCALAR64(input[i]);
5353 }
5354
5355 return is_io_connect_async_method(connect,
5356 wake_port, _reference, referenceCnt,
5357 index,
5358 _input, inputCount,
5359 inputStruct, inputStructCount,
5360 0, 0,
5361 NULL, &inband_outputCnt,
5362 NULL, &scalar_outputCnt,
5363 0, &ool_output_size);
5364 }
5365
5366 /* Routine io_async_method_structureI_structureO */
5367 kern_return_t
is_io_async_method_structureI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5368 is_io_async_method_structureI_structureO(
5369 io_object_t connect,
5370 mach_port_t wake_port,
5371 io_async_ref_t reference,
5372 mach_msg_type_number_t referenceCnt,
5373 uint32_t index,
5374 io_struct_inband_t input,
5375 mach_msg_type_number_t inputCount,
5376 io_struct_inband_t output,
5377 mach_msg_type_number_t * outputCount )
5378 {
5379 uint32_t i;
5380 mach_msg_type_number_t scalar_outputCnt = 0;
5381 mach_vm_size_t ool_output_size = 0;
5382 io_async_ref64_t _reference;
5383
5384 if (referenceCnt > ASYNC_REF64_COUNT) {
5385 return kIOReturnBadArgument;
5386 }
5387 for (i = 0; i < referenceCnt; i++) {
5388 _reference[i] = REF64(reference[i]);
5389 }
5390 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5391
5392 return is_io_connect_async_method(connect,
5393 wake_port, _reference, referenceCnt,
5394 index,
5395 NULL, 0,
5396 input, inputCount,
5397 0, 0,
5398 output, outputCount,
5399 NULL, &scalar_outputCnt,
5400 0, &ool_output_size);
5401 }
5402
5403
5404 kern_return_t
shim_io_async_method_scalarI_scalarO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5405 shim_io_async_method_scalarI_scalarO(
5406 IOExternalAsyncMethod * method,
5407 IOService * object,
5408 mach_port_t asyncWakePort,
5409 io_user_reference_t * asyncReference,
5410 uint32_t asyncReferenceCount,
5411 const io_user_scalar_t * input,
5412 mach_msg_type_number_t inputCount,
5413 io_user_scalar_t * output,
5414 mach_msg_type_number_t * outputCount )
5415 {
5416 IOAsyncMethod func;
5417 uint32_t i;
5418 io_scalar_inband_t _output;
5419 IOReturn err;
5420 io_async_ref_t reference;
5421
5422 bzero(&_output[0], sizeof(_output));
5423 for (i = 0; i < asyncReferenceCount; i++) {
5424 reference[i] = REF32(asyncReference[i]);
5425 }
5426
5427 err = kIOReturnBadArgument;
5428
5429 do {
5430 if (inputCount != method->count0) {
5431 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5432 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5433 continue;
5434 }
5435 if (*outputCount != method->count1) {
5436 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5437 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5438 continue;
5439 }
5440
5441 func = method->func;
5442
5443 switch (inputCount) {
5444 case 6:
5445 err = (object->*func)( reference,
5446 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5447 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5448 break;
5449 case 5:
5450 err = (object->*func)( reference,
5451 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5452 ARG32(input[3]), ARG32(input[4]),
5453 &_output[0] );
5454 break;
5455 case 4:
5456 err = (object->*func)( reference,
5457 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5458 ARG32(input[3]),
5459 &_output[0], &_output[1] );
5460 break;
5461 case 3:
5462 err = (object->*func)( reference,
5463 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5464 &_output[0], &_output[1], &_output[2] );
5465 break;
5466 case 2:
5467 err = (object->*func)( reference,
5468 ARG32(input[0]), ARG32(input[1]),
5469 &_output[0], &_output[1], &_output[2],
5470 &_output[3] );
5471 break;
5472 case 1:
5473 err = (object->*func)( reference,
5474 ARG32(input[0]),
5475 &_output[0], &_output[1], &_output[2],
5476 &_output[3], &_output[4] );
5477 break;
5478 case 0:
5479 err = (object->*func)( reference,
5480 &_output[0], &_output[1], &_output[2],
5481 &_output[3], &_output[4], &_output[5] );
5482 break;
5483
5484 default:
5485 IOLog("%s: Bad method table\n", object->getName());
5486 }
5487 }while (false);
5488
5489 for (i = 0; i < *outputCount; i++) {
5490 output[i] = SCALAR32(_output[i]);
5491 }
5492
5493 return err;
5494 }
5495
5496
5497 /* Routine io_connect_method_scalarI_structureO */
5498 kern_return_t
is_io_connect_method_scalarI_structureO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5499 is_io_connect_method_scalarI_structureO(
5500 io_object_t connect,
5501 uint32_t index,
5502 io_scalar_inband_t input,
5503 mach_msg_type_number_t inputCount,
5504 io_struct_inband_t output,
5505 mach_msg_type_number_t * outputCount )
5506 {
5507 uint32_t i;
5508 io_scalar_inband64_t _input;
5509
5510 mach_msg_type_number_t scalar_outputCnt = 0;
5511 mach_vm_size_t ool_output_size = 0;
5512
5513 for (i = 0; i < inputCount; i++) {
5514 _input[i] = SCALAR64(input[i]);
5515 }
5516
5517 return is_io_connect_method(connect, index,
5518 _input, inputCount,
5519 NULL, 0,
5520 0, 0,
5521 output, outputCount,
5522 NULL, &scalar_outputCnt,
5523 0, &ool_output_size);
5524 }
5525
5526 kern_return_t
shim_io_connect_method_scalarI_structureO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5527 shim_io_connect_method_scalarI_structureO(
5528
5529 IOExternalMethod * method,
5530 IOService * object,
5531 const io_user_scalar_t * input,
5532 mach_msg_type_number_t inputCount,
5533 io_struct_inband_t output,
5534 IOByteCount * outputCount )
5535 {
5536 IOMethod func;
5537 IOReturn err;
5538
5539 err = kIOReturnBadArgument;
5540
5541 do {
5542 if (inputCount != method->count0) {
5543 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5544 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5545 continue;
5546 }
5547 if ((kIOUCVariableStructureSize != method->count1)
5548 && (*outputCount != method->count1)) {
5549 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5550 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5551 continue;
5552 }
5553
5554 func = method->func;
5555
5556 switch (inputCount) {
5557 case 5:
5558 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5559 ARG32(input[3]), ARG32(input[4]),
5560 output );
5561 break;
5562 case 4:
5563 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5564 ARG32(input[3]),
5565 output, (void *)outputCount );
5566 break;
5567 case 3:
5568 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5569 output, (void *)outputCount, NULL );
5570 break;
5571 case 2:
5572 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5573 output, (void *)outputCount, NULL, NULL );
5574 break;
5575 case 1:
5576 err = (object->*func)( ARG32(input[0]),
5577 output, (void *)outputCount, NULL, NULL, NULL );
5578 break;
5579 case 0:
5580 err = (object->*func)( output, (void *)outputCount, NULL, NULL, NULL, NULL );
5581 break;
5582
5583 default:
5584 IOLog("%s: Bad method table\n", object->getName());
5585 }
5586 }while (false);
5587
5588 return err;
5589 }
5590
5591
5592 kern_return_t
shim_io_async_method_scalarI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5593 shim_io_async_method_scalarI_structureO(
5594 IOExternalAsyncMethod * method,
5595 IOService * object,
5596 mach_port_t asyncWakePort,
5597 io_user_reference_t * asyncReference,
5598 uint32_t asyncReferenceCount,
5599 const io_user_scalar_t * input,
5600 mach_msg_type_number_t inputCount,
5601 io_struct_inband_t output,
5602 mach_msg_type_number_t * outputCount )
5603 {
5604 IOAsyncMethod func;
5605 uint32_t i;
5606 IOReturn err;
5607 io_async_ref_t reference;
5608
5609 for (i = 0; i < asyncReferenceCount; i++) {
5610 reference[i] = REF32(asyncReference[i]);
5611 }
5612
5613 err = kIOReturnBadArgument;
5614 do {
5615 if (inputCount != method->count0) {
5616 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5617 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5618 continue;
5619 }
5620 if ((kIOUCVariableStructureSize != method->count1)
5621 && (*outputCount != method->count1)) {
5622 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5623 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5624 continue;
5625 }
5626
5627 func = method->func;
5628
5629 switch (inputCount) {
5630 case 5:
5631 err = (object->*func)( reference,
5632 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5633 ARG32(input[3]), ARG32(input[4]),
5634 output );
5635 break;
5636 case 4:
5637 err = (object->*func)( reference,
5638 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5639 ARG32(input[3]),
5640 output, (void *)outputCount );
5641 break;
5642 case 3:
5643 err = (object->*func)( reference,
5644 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5645 output, (void *)outputCount, NULL );
5646 break;
5647 case 2:
5648 err = (object->*func)( reference,
5649 ARG32(input[0]), ARG32(input[1]),
5650 output, (void *)outputCount, NULL, NULL );
5651 break;
5652 case 1:
5653 err = (object->*func)( reference,
5654 ARG32(input[0]),
5655 output, (void *)outputCount, NULL, NULL, NULL );
5656 break;
5657 case 0:
5658 err = (object->*func)( reference,
5659 output, (void *)outputCount, NULL, NULL, NULL, NULL );
5660 break;
5661
5662 default:
5663 IOLog("%s: Bad method table\n", object->getName());
5664 }
5665 }while (false);
5666
5667 return err;
5668 }
5669
5670 /* Routine io_connect_method_scalarI_structureI */
5671 kern_return_t
is_io_connect_method_scalarI_structureI(io_connect_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5672 is_io_connect_method_scalarI_structureI(
5673 io_connect_t connect,
5674 uint32_t index,
5675 io_scalar_inband_t input,
5676 mach_msg_type_number_t inputCount,
5677 io_struct_inband_t inputStruct,
5678 mach_msg_type_number_t inputStructCount )
5679 {
5680 uint32_t i;
5681 io_scalar_inband64_t _input;
5682
5683 mach_msg_type_number_t scalar_outputCnt = 0;
5684 mach_msg_type_number_t inband_outputCnt = 0;
5685 mach_vm_size_t ool_output_size = 0;
5686
5687 for (i = 0; i < inputCount; i++) {
5688 _input[i] = SCALAR64(input[i]);
5689 }
5690
5691 return is_io_connect_method(connect, index,
5692 _input, inputCount,
5693 inputStruct, inputStructCount,
5694 0, 0,
5695 NULL, &inband_outputCnt,
5696 NULL, &scalar_outputCnt,
5697 0, &ool_output_size);
5698 }
5699
5700 kern_return_t
shim_io_connect_method_scalarI_structureI(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5701 shim_io_connect_method_scalarI_structureI(
5702 IOExternalMethod * method,
5703 IOService * object,
5704 const io_user_scalar_t * input,
5705 mach_msg_type_number_t inputCount,
5706 io_struct_inband_t inputStruct,
5707 mach_msg_type_number_t inputStructCount )
5708 {
5709 IOMethod func;
5710 IOReturn err = kIOReturnBadArgument;
5711
5712 do{
5713 if (inputCount != method->count0) {
5714 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5715 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5716 continue;
5717 }
5718 if ((kIOUCVariableStructureSize != method->count1)
5719 && (inputStructCount != method->count1)) {
5720 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5721 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5722 continue;
5723 }
5724
5725 func = method->func;
5726
5727 switch (inputCount) {
5728 case 5:
5729 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5730 ARG32(input[3]), ARG32(input[4]),
5731 inputStruct );
5732 break;
5733 case 4:
5734 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
5735 ARG32(input[3]),
5736 inputStruct, (void *)(uintptr_t)inputStructCount );
5737 break;
5738 case 3:
5739 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5740 inputStruct, (void *)(uintptr_t)inputStructCount,
5741 NULL );
5742 break;
5743 case 2:
5744 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5745 inputStruct, (void *)(uintptr_t)inputStructCount,
5746 NULL, NULL );
5747 break;
5748 case 1:
5749 err = (object->*func)( ARG32(input[0]),
5750 inputStruct, (void *)(uintptr_t)inputStructCount,
5751 NULL, NULL, NULL );
5752 break;
5753 case 0:
5754 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5755 NULL, NULL, NULL, NULL );
5756 break;
5757
5758 default:
5759 IOLog("%s: Bad method table\n", object->getName());
5760 }
5761 }while (false);
5762
5763 return err;
5764 }
5765
5766 kern_return_t
shim_io_async_method_scalarI_structureI(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5767 shim_io_async_method_scalarI_structureI(
5768 IOExternalAsyncMethod * method,
5769 IOService * object,
5770 mach_port_t asyncWakePort,
5771 io_user_reference_t * asyncReference,
5772 uint32_t asyncReferenceCount,
5773 const io_user_scalar_t * input,
5774 mach_msg_type_number_t inputCount,
5775 io_struct_inband_t inputStruct,
5776 mach_msg_type_number_t inputStructCount )
5777 {
5778 IOAsyncMethod func;
5779 uint32_t i;
5780 IOReturn err = kIOReturnBadArgument;
5781 io_async_ref_t reference;
5782
5783 for (i = 0; i < asyncReferenceCount; i++) {
5784 reference[i] = REF32(asyncReference[i]);
5785 }
5786
5787 do{
5788 if (inputCount != method->count0) {
5789 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5790 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5791 continue;
5792 }
5793 if ((kIOUCVariableStructureSize != method->count1)
5794 && (inputStructCount != method->count1)) {
5795 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5796 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5797 continue;
5798 }
5799
5800 func = method->func;
5801
5802 switch (inputCount) {
5803 case 5:
5804 err = (object->*func)( reference,
5805 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5806 ARG32(input[3]), ARG32(input[4]),
5807 inputStruct );
5808 break;
5809 case 4:
5810 err = (object->*func)( reference,
5811 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5812 ARG32(input[3]),
5813 inputStruct, (void *)(uintptr_t)inputStructCount );
5814 break;
5815 case 3:
5816 err = (object->*func)( reference,
5817 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5818 inputStruct, (void *)(uintptr_t)inputStructCount,
5819 NULL );
5820 break;
5821 case 2:
5822 err = (object->*func)( reference,
5823 ARG32(input[0]), ARG32(input[1]),
5824 inputStruct, (void *)(uintptr_t)inputStructCount,
5825 NULL, NULL );
5826 break;
5827 case 1:
5828 err = (object->*func)( reference,
5829 ARG32(input[0]),
5830 inputStruct, (void *)(uintptr_t)inputStructCount,
5831 NULL, NULL, NULL );
5832 break;
5833 case 0:
5834 err = (object->*func)( reference,
5835 inputStruct, (void *)(uintptr_t)inputStructCount,
5836 NULL, NULL, NULL, NULL );
5837 break;
5838
5839 default:
5840 IOLog("%s: Bad method table\n", object->getName());
5841 }
5842 }while (false);
5843
5844 return err;
5845 }
5846
5847 /* Routine io_connect_method_structureI_structureO */
5848 kern_return_t
is_io_connect_method_structureI_structureO(io_object_t connect,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5849 is_io_connect_method_structureI_structureO(
5850 io_object_t connect,
5851 uint32_t index,
5852 io_struct_inband_t input,
5853 mach_msg_type_number_t inputCount,
5854 io_struct_inband_t output,
5855 mach_msg_type_number_t * outputCount )
5856 {
5857 mach_msg_type_number_t scalar_outputCnt = 0;
5858 mach_vm_size_t ool_output_size = 0;
5859
5860 return is_io_connect_method(connect, index,
5861 NULL, 0,
5862 input, inputCount,
5863 0, 0,
5864 output, outputCount,
5865 NULL, &scalar_outputCnt,
5866 0, &ool_output_size);
5867 }
5868
5869 kern_return_t
shim_io_connect_method_structureI_structureO(IOExternalMethod * method,IOService * object,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5870 shim_io_connect_method_structureI_structureO(
5871 IOExternalMethod * method,
5872 IOService * object,
5873 io_struct_inband_t input,
5874 mach_msg_type_number_t inputCount,
5875 io_struct_inband_t output,
5876 IOByteCount * outputCount )
5877 {
5878 IOMethod func;
5879 IOReturn err = kIOReturnBadArgument;
5880
5881 do{
5882 if ((kIOUCVariableStructureSize != method->count0)
5883 && (inputCount != method->count0)) {
5884 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5885 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5886 continue;
5887 }
5888 if ((kIOUCVariableStructureSize != method->count1)
5889 && (*outputCount != method->count1)) {
5890 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5891 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5892 continue;
5893 }
5894
5895 func = method->func;
5896
5897 if (method->count1) {
5898 if (method->count0) {
5899 err = (object->*func)( input, output,
5900 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5901 } else {
5902 err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5903 }
5904 } else {
5905 err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5906 }
5907 }while (false);
5908
5909
5910 return err;
5911 }
5912
5913 kern_return_t
shim_io_async_method_structureI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5914 shim_io_async_method_structureI_structureO(
5915 IOExternalAsyncMethod * method,
5916 IOService * object,
5917 mach_port_t asyncWakePort,
5918 io_user_reference_t * asyncReference,
5919 uint32_t asyncReferenceCount,
5920 io_struct_inband_t input,
5921 mach_msg_type_number_t inputCount,
5922 io_struct_inband_t output,
5923 mach_msg_type_number_t * outputCount )
5924 {
5925 IOAsyncMethod func;
5926 uint32_t i;
5927 IOReturn err;
5928 io_async_ref_t reference;
5929
5930 for (i = 0; i < asyncReferenceCount; i++) {
5931 reference[i] = REF32(asyncReference[i]);
5932 }
5933
5934 err = kIOReturnBadArgument;
5935 do{
5936 if ((kIOUCVariableStructureSize != method->count0)
5937 && (inputCount != method->count0)) {
5938 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5939 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5940 continue;
5941 }
5942 if ((kIOUCVariableStructureSize != method->count1)
5943 && (*outputCount != method->count1)) {
5944 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5945 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5946 continue;
5947 }
5948
5949 func = method->func;
5950
5951 if (method->count1) {
5952 if (method->count0) {
5953 err = (object->*func)( reference,
5954 input, output,
5955 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5956 } else {
5957 err = (object->*func)( reference,
5958 output, outputCount, NULL, NULL, NULL, NULL );
5959 }
5960 } else {
5961 err = (object->*func)( reference,
5962 input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5963 }
5964 }while (false);
5965
5966 return err;
5967 }
5968
5969 /* Routine io_catalog_send_data */
5970 kern_return_t
is_io_catalog_send_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t inData,mach_msg_type_number_t inDataCount,kern_return_t * result)5971 is_io_catalog_send_data(
5972 mach_port_t main_port,
5973 uint32_t flag,
5974 io_buf_ptr_t inData,
5975 mach_msg_type_number_t inDataCount,
5976 kern_return_t * result)
5977 {
5978 // Allow sending catalog data if there is no kextd and the kernel is DEVELOPMENT || DEBUG
5979 #if NO_KEXTD && !(DEVELOPMENT || DEBUG)
5980 return kIOReturnNotPrivileged;
5981 #else /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
5982 OSObject * obj = NULL;
5983 vm_offset_t data;
5984 kern_return_t kr = kIOReturnError;
5985
5986 //printf("io_catalog_send_data called. flag: %d\n", flag);
5987
5988 if (main_port != main_device_port) {
5989 return kIOReturnNotPrivileged;
5990 }
5991
5992 if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
5993 flag != kIOCatalogKextdActive &&
5994 flag != kIOCatalogKextdFinishedLaunching) &&
5995 (!inData || !inDataCount)) {
5996 return kIOReturnBadArgument;
5997 }
5998
5999 if (!IOCurrentTaskHasEntitlement(kIOCatalogManagementEntitlement)) {
6000 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
6001 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
6002 OSSafeReleaseNULL(taskName);
6003 // For now, fake success to not break applications relying on this function succeeding.
6004 // See <rdar://problem/32554970> for more details.
6005 return kIOReturnSuccess;
6006 }
6007
6008 if (inData) {
6009 vm_map_offset_t map_data;
6010
6011 if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
6012 return kIOReturnMessageTooLarge;
6013 }
6014
6015 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
6016 data = CAST_DOWN(vm_offset_t, map_data);
6017
6018 if (kr != KERN_SUCCESS) {
6019 return kr;
6020 }
6021
6022 // must return success after vm_map_copyout() succeeds
6023
6024 if (inDataCount) {
6025 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
6026 vm_deallocate( kernel_map, data, inDataCount );
6027 if (!obj) {
6028 *result = kIOReturnNoMemory;
6029 return KERN_SUCCESS;
6030 }
6031 }
6032 }
6033
6034 switch (flag) {
6035 case kIOCatalogResetDrivers:
6036 case kIOCatalogResetDriversNoMatch: {
6037 OSArray * array;
6038
6039 array = OSDynamicCast(OSArray, obj);
6040 if (array) {
6041 if (!gIOCatalogue->resetAndAddDrivers(array,
6042 flag == kIOCatalogResetDrivers)) {
6043 kr = kIOReturnError;
6044 }
6045 } else {
6046 kr = kIOReturnBadArgument;
6047 }
6048 }
6049 break;
6050
6051 case kIOCatalogAddDrivers:
6052 case kIOCatalogAddDriversNoMatch: {
6053 OSArray * array;
6054
6055 array = OSDynamicCast(OSArray, obj);
6056 if (array) {
6057 if (!gIOCatalogue->addDrivers( array,
6058 flag == kIOCatalogAddDrivers)) {
6059 kr = kIOReturnError;
6060 }
6061 } else {
6062 kr = kIOReturnBadArgument;
6063 }
6064 }
6065 break;
6066
6067 case kIOCatalogRemoveDrivers:
6068 case kIOCatalogRemoveDriversNoMatch: {
6069 OSDictionary * dict;
6070
6071 dict = OSDynamicCast(OSDictionary, obj);
6072 if (dict) {
6073 if (!gIOCatalogue->removeDrivers( dict,
6074 flag == kIOCatalogRemoveDrivers )) {
6075 kr = kIOReturnError;
6076 }
6077 } else {
6078 kr = kIOReturnBadArgument;
6079 }
6080 }
6081 break;
6082
6083 case kIOCatalogStartMatching__Removed:
6084 case kIOCatalogRemoveKernelLinker__Removed:
6085 case kIOCatalogKextdActive:
6086 case kIOCatalogKextdFinishedLaunching:
6087 kr = KERN_NOT_SUPPORTED;
6088 break;
6089
6090 default:
6091 kr = kIOReturnBadArgument;
6092 break;
6093 }
6094
6095 if (obj) {
6096 obj->release();
6097 }
6098
6099 *result = kr;
6100 return KERN_SUCCESS;
6101 #endif /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6102 }
6103
6104 /* Routine io_catalog_terminate */
6105 kern_return_t
is_io_catalog_terminate(mach_port_t main_port,uint32_t flag,io_name_t name)6106 is_io_catalog_terminate(
6107 mach_port_t main_port,
6108 uint32_t flag,
6109 io_name_t name )
6110 {
6111 kern_return_t kr;
6112
6113 if (main_port != main_device_port) {
6114 return kIOReturnNotPrivileged;
6115 }
6116
6117 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
6118 kIOClientPrivilegeAdministrator );
6119 if (kIOReturnSuccess != kr) {
6120 return kr;
6121 }
6122
6123 switch (flag) {
6124 #if !defined(SECURE_KERNEL)
6125 case kIOCatalogServiceTerminate:
6126 kr = gIOCatalogue->terminateDrivers(NULL, name, false);
6127 break;
6128
6129 case kIOCatalogModuleUnload:
6130 case kIOCatalogModuleTerminate:
6131 kr = gIOCatalogue->terminateDriversForModule(name,
6132 flag == kIOCatalogModuleUnload);
6133 break;
6134 #endif
6135
6136 default:
6137 kr = kIOReturnBadArgument;
6138 break;
6139 }
6140
6141 return kr;
6142 }
6143
6144 /* Routine io_catalog_get_data */
6145 kern_return_t
is_io_catalog_get_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t * outData,mach_msg_type_number_t * outDataCount)6146 is_io_catalog_get_data(
6147 mach_port_t main_port,
6148 uint32_t flag,
6149 io_buf_ptr_t *outData,
6150 mach_msg_type_number_t *outDataCount)
6151 {
6152 kern_return_t kr = kIOReturnSuccess;
6153 OSSerialize * s;
6154
6155 if (main_port != main_device_port) {
6156 return kIOReturnNotPrivileged;
6157 }
6158
6159 //printf("io_catalog_get_data called. flag: %d\n", flag);
6160
6161 s = OSSerialize::withCapacity(4096);
6162 if (!s) {
6163 return kIOReturnNoMemory;
6164 }
6165
6166 kr = gIOCatalogue->serializeData(flag, s);
6167
6168 if (kr == kIOReturnSuccess) {
6169 mach_vm_address_t data;
6170 vm_map_copy_t copy;
6171 unsigned int size;
6172
6173 size = s->getLength();
6174 kr = mach_vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
6175 if (kr == kIOReturnSuccess) {
6176 bcopy(s->text(), (void *)data, size);
6177 kr = vm_map_copyin(kernel_map, data, size, true, ©);
6178 *outData = (char *)copy;
6179 *outDataCount = size;
6180 }
6181 }
6182
6183 s->release();
6184
6185 return kr;
6186 }
6187
6188 /* Routine io_catalog_get_gen_count */
6189 kern_return_t
is_io_catalog_get_gen_count(mach_port_t main_port,uint32_t * genCount)6190 is_io_catalog_get_gen_count(
6191 mach_port_t main_port,
6192 uint32_t *genCount)
6193 {
6194 if (main_port != main_device_port) {
6195 return kIOReturnNotPrivileged;
6196 }
6197
6198 //printf("io_catalog_get_gen_count called.\n");
6199
6200 if (!genCount) {
6201 return kIOReturnBadArgument;
6202 }
6203
6204 *genCount = gIOCatalogue->getGenerationCount();
6205
6206 return kIOReturnSuccess;
6207 }
6208
6209 /* Routine io_catalog_module_loaded.
6210 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
6211 */
6212 kern_return_t
is_io_catalog_module_loaded(mach_port_t main_port,io_name_t name)6213 is_io_catalog_module_loaded(
6214 mach_port_t main_port,
6215 io_name_t name)
6216 {
6217 if (main_port != main_device_port) {
6218 return kIOReturnNotPrivileged;
6219 }
6220
6221 //printf("io_catalog_module_loaded called. name %s\n", name);
6222
6223 if (!name) {
6224 return kIOReturnBadArgument;
6225 }
6226
6227 gIOCatalogue->moduleHasLoaded(name);
6228
6229 return kIOReturnSuccess;
6230 }
6231
6232 kern_return_t
is_io_catalog_reset(mach_port_t main_port,uint32_t flag)6233 is_io_catalog_reset(
6234 mach_port_t main_port,
6235 uint32_t flag)
6236 {
6237 if (main_port != main_device_port) {
6238 return kIOReturnNotPrivileged;
6239 }
6240
6241 switch (flag) {
6242 case kIOCatalogResetDefault:
6243 gIOCatalogue->reset();
6244 break;
6245
6246 default:
6247 return kIOReturnBadArgument;
6248 }
6249
6250 return kIOReturnSuccess;
6251 }
6252
6253 kern_return_t
iokit_user_client_trap(struct iokit_user_client_trap_args * args)6254 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
6255 {
6256 kern_return_t result = kIOReturnBadArgument;
6257 IOUserClient * userClient;
6258 OSObject * object;
6259 uintptr_t ref;
6260 mach_port_name_t portName;
6261
6262 ref = (uintptr_t) args->userClientRef;
6263
6264 if ((ref == MACH_PORT_DEAD) || (ref == (uintptr_t) MACH_PORT_NULL)) {
6265 return kIOReturnBadArgument;
6266 }
6267 // kobject port names always have b0-1 set, so we use these bits as flags to
6268 // iokit_user_client_trap()
6269 // keep this up to date with ipc_entry_name_mask();
6270 portName = (mach_port_name_t) (ref | 3);
6271 if (((1ULL << 32) & ref) || !(1 & ref)) {
6272 object = iokit_lookup_uext_ref_current_task(portName);
6273 if (object) {
6274 result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6275 }
6276 OSSafeReleaseNULL(object);
6277 } else {
6278 io_object_t ref_current_task = iokit_lookup_connect_ref_current_task((mach_port_name_t) ref);
6279 if ((userClient = OSDynamicCast(IOUserClient, ref_current_task))) {
6280 IOExternalTrap *trap = NULL;
6281 IOService *target = NULL;
6282
6283 result = kIOReturnSuccess;
6284 io_filter_policy_t filterPolicy = userClient->filterForTask(current_task(), 0);
6285 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
6286 result = gIOUCFilterCallbacks->io_filter_applier(userClient, filterPolicy, io_filter_type_trap, args->index);
6287 }
6288 if (kIOReturnSuccess == result) {
6289 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
6290 }
6291 if (trap && target) {
6292 IOTrap func;
6293
6294 func = trap->func;
6295
6296 if (func) {
6297 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6298 }
6299 }
6300
6301 iokit_remove_connect_reference(userClient);
6302 } else {
6303 OSSafeReleaseNULL(ref_current_task);
6304 }
6305 }
6306
6307 return result;
6308 }
6309
6310 /* Routine io_device_tree_entry_exists_with_name */
6311 kern_return_t
is_io_device_tree_entry_exists_with_name(mach_port_t main_port,io_name_t name,boolean_t * exists)6312 is_io_device_tree_entry_exists_with_name(
6313 mach_port_t main_port,
6314 io_name_t name,
6315 boolean_t *exists )
6316 {
6317 OSCollectionIterator *iter;
6318
6319 if (main_port != main_device_port) {
6320 return kIOReturnNotPrivileged;
6321 }
6322
6323 iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
6324 *exists = iter && iter->getNextObject();
6325 OSSafeReleaseNULL(iter);
6326
6327 return kIOReturnSuccess;
6328 }
6329 } /* extern "C" */
6330
6331 IOReturn
callExternalMethod(uint32_t selector,IOExternalMethodArguments * args)6332 IOUserClient::callExternalMethod(uint32_t selector, IOExternalMethodArguments * args)
6333 {
6334 IOReturn ret;
6335
6336 if (defaultLocking) {
6337 if (defaultLockingSingleThreadExternalMethod) {
6338 IORWLockWrite(lock);
6339 } else {
6340 IORWLockRead(lock);
6341 }
6342 }
6343 if (uc2022) {
6344 ret = ((IOUserClient2022 *) this)->externalMethod(selector, (IOExternalMethodArgumentsOpaque *) args);
6345 } else {
6346 ret = externalMethod(selector, args);
6347 }
6348 if (defaultLocking) {
6349 IORWLockUnlock(lock);
6350 }
6351 return ret;
6352 }
6353
6354 MIG_SERVER_ROUTINE IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * arguments,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6355 IOUserClient2022::externalMethod(uint32_t selector, IOExternalMethodArguments * arguments,
6356 IOExternalMethodDispatch *dispatch,
6357 OSObject *target, void *reference)
6358 {
6359 panic("wrong externalMethod for IOUserClient2022");
6360 }
6361
6362 IOReturn
dispatchExternalMethod(uint32_t selector,IOExternalMethodArgumentsOpaque * arguments,const IOExternalMethodDispatch2022 dispatchArray[],size_t dispatchArrayCount,OSObject * target,void * reference)6363 IOUserClient2022::dispatchExternalMethod(uint32_t selector, IOExternalMethodArgumentsOpaque *arguments,
6364 const IOExternalMethodDispatch2022 dispatchArray[], size_t dispatchArrayCount,
6365 OSObject * target, void * reference)
6366 {
6367 IOReturn err;
6368 IOExternalMethodArguments * args = (typeof(args))arguments;
6369 const IOExternalMethodDispatch2022 * dispatch;
6370
6371 if (!dispatchArray) {
6372 return kIOReturnError;
6373 }
6374 if (selector >= dispatchArrayCount) {
6375 return kIOReturnBadArgument;
6376 }
6377 dispatch = &dispatchArray[selector];
6378
6379 uint32_t count;
6380 count = dispatch->checkScalarInputCount;
6381 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6382 return kIOReturnBadArgument;
6383 }
6384
6385 count = dispatch->checkStructureInputSize;
6386 if ((kIOUCVariableStructureSize != count)
6387 && (count != ((args->structureInputDescriptor)
6388 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6389 return kIOReturnBadArgument;
6390 }
6391
6392 count = dispatch->checkScalarOutputCount;
6393 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6394 return kIOReturnBadArgument;
6395 }
6396
6397 count = dispatch->checkStructureOutputSize;
6398 if ((kIOUCVariableStructureSize != count)
6399 && (count != ((args->structureOutputDescriptor)
6400 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6401 return kIOReturnBadArgument;
6402 }
6403
6404 if (args->asyncWakePort && !dispatch->allowAsync) {
6405 return kIOReturnBadArgument;
6406 }
6407
6408 if (dispatch->checkEntitlement) {
6409 if (!IOCurrentTaskHasEntitlement(dispatch->checkEntitlement)) {
6410 return kIOReturnNotPrivileged;
6411 }
6412 }
6413
6414 if (dispatch->function) {
6415 err = (*dispatch->function)(target, reference, args);
6416 } else {
6417 err = kIOReturnNoCompletion; /* implementer can dispatch */
6418 }
6419 return err;
6420 }
6421
6422 IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * args,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6423 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
6424 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
6425 {
6426 IOReturn err;
6427 IOService * object;
6428 IOByteCount structureOutputSize;
6429
6430 if (dispatch) {
6431 uint32_t count;
6432 count = dispatch->checkScalarInputCount;
6433 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6434 return kIOReturnBadArgument;
6435 }
6436
6437 count = dispatch->checkStructureInputSize;
6438 if ((kIOUCVariableStructureSize != count)
6439 && (count != ((args->structureInputDescriptor)
6440 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6441 return kIOReturnBadArgument;
6442 }
6443
6444 count = dispatch->checkScalarOutputCount;
6445 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6446 return kIOReturnBadArgument;
6447 }
6448
6449 count = dispatch->checkStructureOutputSize;
6450 if ((kIOUCVariableStructureSize != count)
6451 && (count != ((args->structureOutputDescriptor)
6452 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6453 return kIOReturnBadArgument;
6454 }
6455
6456 if (dispatch->function) {
6457 err = (*dispatch->function)(target, reference, args);
6458 } else {
6459 err = kIOReturnNoCompletion; /* implementer can dispatch */
6460 }
6461 return err;
6462 }
6463
6464
6465 // pre-Leopard API's don't do ool structs
6466 if (args->structureInputDescriptor || args->structureOutputDescriptor) {
6467 err = kIOReturnIPCError;
6468 return err;
6469 }
6470
6471 structureOutputSize = args->structureOutputSize;
6472
6473 if (args->asyncWakePort) {
6474 IOExternalAsyncMethod * method;
6475 object = NULL;
6476 if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
6477 return kIOReturnUnsupported;
6478 }
6479
6480 if (kIOUCForegroundOnly & method->flags) {
6481 if (task_is_gpu_denied(current_task())) {
6482 return kIOReturnNotPermitted;
6483 }
6484 }
6485
6486 switch (method->flags & kIOUCTypeMask) {
6487 case kIOUCScalarIStructI:
6488 err = shim_io_async_method_scalarI_structureI( method, object,
6489 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6490 args->scalarInput, args->scalarInputCount,
6491 (char *)args->structureInput, args->structureInputSize );
6492 break;
6493
6494 case kIOUCScalarIScalarO:
6495 err = shim_io_async_method_scalarI_scalarO( method, object,
6496 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6497 args->scalarInput, args->scalarInputCount,
6498 args->scalarOutput, &args->scalarOutputCount );
6499 break;
6500
6501 case kIOUCScalarIStructO:
6502 err = shim_io_async_method_scalarI_structureO( method, object,
6503 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6504 args->scalarInput, args->scalarInputCount,
6505 (char *) args->structureOutput, &args->structureOutputSize );
6506 break;
6507
6508
6509 case kIOUCStructIStructO:
6510 err = shim_io_async_method_structureI_structureO( method, object,
6511 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6512 (char *)args->structureInput, args->structureInputSize,
6513 (char *) args->structureOutput, &args->structureOutputSize );
6514 break;
6515
6516 default:
6517 err = kIOReturnBadArgument;
6518 break;
6519 }
6520 } else {
6521 IOExternalMethod * method;
6522 object = NULL;
6523 if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
6524 return kIOReturnUnsupported;
6525 }
6526
6527 if (kIOUCForegroundOnly & method->flags) {
6528 if (task_is_gpu_denied(current_task())) {
6529 return kIOReturnNotPermitted;
6530 }
6531 }
6532
6533 switch (method->flags & kIOUCTypeMask) {
6534 case kIOUCScalarIStructI:
6535 err = shim_io_connect_method_scalarI_structureI( method, object,
6536 args->scalarInput, args->scalarInputCount,
6537 (char *) args->structureInput, args->structureInputSize );
6538 break;
6539
6540 case kIOUCScalarIScalarO:
6541 err = shim_io_connect_method_scalarI_scalarO( method, object,
6542 args->scalarInput, args->scalarInputCount,
6543 args->scalarOutput, &args->scalarOutputCount );
6544 break;
6545
6546 case kIOUCScalarIStructO:
6547 err = shim_io_connect_method_scalarI_structureO( method, object,
6548 args->scalarInput, args->scalarInputCount,
6549 (char *) args->structureOutput, &structureOutputSize );
6550 break;
6551
6552
6553 case kIOUCStructIStructO:
6554 err = shim_io_connect_method_structureI_structureO( method, object,
6555 (char *) args->structureInput, args->structureInputSize,
6556 (char *) args->structureOutput, &structureOutputSize );
6557 break;
6558
6559 default:
6560 err = kIOReturnBadArgument;
6561 break;
6562 }
6563 }
6564
6565 if (structureOutputSize > UINT_MAX) {
6566 structureOutputSize = 0;
6567 err = kIOReturnBadArgument;
6568 }
6569
6570 args->structureOutputSize = ((typeof(args->structureOutputSize))structureOutputSize);
6571
6572 return err;
6573 }
6574
6575 IOReturn
registerFilterCallbacks(const struct io_filter_callbacks * callbacks,size_t size)6576 IOUserClient::registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size)
6577 {
6578 if (size < sizeof(*callbacks)) {
6579 return kIOReturnBadArgument;
6580 }
6581 if (!OSCompareAndSwapPtr(NULL, __DECONST(void *, callbacks), &gIOUCFilterCallbacks)) {
6582 return kIOReturnBusy;
6583 }
6584 return kIOReturnSuccess;
6585 }
6586
6587
6588 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6589 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6590 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6591 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6592 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6593 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6594 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6595 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6596 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6597 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6598 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6599 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6600 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6601 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6602 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6603 OSMetaClassDefineReservedUnused(IOUserClient, 15);
6604
6605 OSMetaClassDefineReservedUnused(IOUserClient2022, 0);
6606 OSMetaClassDefineReservedUnused(IOUserClient2022, 1);
6607 OSMetaClassDefineReservedUnused(IOUserClient2022, 2);
6608 OSMetaClassDefineReservedUnused(IOUserClient2022, 3);
6609