1 /*
2 * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <libkern/c++/OSKext.h>
30 #include <libkern/c++/OSSharedPtr.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51 #include <sys/code_signing.h>
52
53 #include <mach/sdt.h>
54 #include <os/hash.h>
55
56 #include <libkern/amfi/amfi.h>
57
58 #if CONFIG_MACF
59
60 extern "C" {
61 #include <security/mac_framework.h>
62 };
63 #include <sys/kauth.h>
64
65 #define IOMACF_LOG 0
66
67 #endif /* CONFIG_MACF */
68
69 #include <IOKit/assert.h>
70
71 #include "IOServicePrivate.h"
72 #include "IOKitKernelInternal.h"
73
74 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
75 #define SCALAR32(x) ((uint32_t )x)
76 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
77 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
78 #define REF32(x) ((int)(x))
79
80 enum{
81 kIOUCAsync0Flags = 3ULL,
82 kIOUCAsync64Flag = 1ULL,
83 kIOUCAsyncErrorLoggedFlag = 2ULL
84 };
85
86 #if IOKITSTATS
87
88 #define IOStatisticsRegisterCounter() \
89 do { \
90 reserved->counter = IOStatistics::registerUserClient(this); \
91 } while (0)
92
93 #define IOStatisticsUnregisterCounter() \
94 do { \
95 if (reserved) \
96 IOStatistics::unregisterUserClient(reserved->counter); \
97 } while (0)
98
99 #define IOStatisticsClientCall() \
100 do { \
101 IOStatistics::countUserClientCall(client); \
102 } while (0)
103
104 #else
105
106 #define IOStatisticsRegisterCounter()
107 #define IOStatisticsUnregisterCounter()
108 #define IOStatisticsClientCall()
109
110 #endif /* IOKITSTATS */
111
112 #if DEVELOPMENT || DEBUG
113
114 #define FAKE_STACK_FRAME(a) \
115 const void ** __frameptr; \
116 const void * __retaddr; \
117 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
118 __retaddr = __frameptr[1]; \
119 __frameptr[1] = (a);
120
121 #define FAKE_STACK_FRAME_END() \
122 __frameptr[1] = __retaddr;
123
124 #else /* DEVELOPMENT || DEBUG */
125
126 #define FAKE_STACK_FRAME(a)
127 #define FAKE_STACK_FRAME_END()
128
129 #endif /* DEVELOPMENT || DEBUG */
130
131 #define ASYNC_REF_COUNT (sizeof(io_async_ref_t) / sizeof(natural_t))
132 #define ASYNC_REF64_COUNT (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
133
134 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
135
136 extern "C" {
137 #include <mach/mach_traps.h>
138 #include <vm/vm_map.h>
139 } /* extern "C" */
140
141 struct IOMachPortHashList;
142
143 static_assert(IKOT_MAX_TYPE <= 255);
144
145 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
146
147 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
148 class IOMachPort : public OSObject
149 {
150 OSDeclareDefaultStructors(IOMachPort);
151 public:
152 mach_port_mscount_t mscount;
153 IOLock lock;
154 SLIST_ENTRY(IOMachPort) link;
155 ipc_port_t port;
156 OSObject* XNU_PTRAUTH_SIGNED_PTR("IOMachPort.object") object;
157
158 static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
159
160 static IOMachPortHashList* bucketForObject(OSObject *obj,
161 ipc_kobject_type_t type);
162
163 static LIBKERN_RETURNS_NOT_RETAINED IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
164
165 static bool noMoreSendersForObject( OSObject * obj,
166 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
167 static void releasePortForObject( OSObject * obj,
168 ipc_kobject_type_t type );
169
170 static mach_port_name_t makeSendRightForTask( task_t task,
171 io_object_t obj, ipc_kobject_type_t type );
172
173 virtual void free() APPLE_KEXT_OVERRIDE;
174 };
175
176 #define super OSObject
177 OSDefineMetaClassAndStructorsWithZone(IOMachPort, OSObject, ZC_ZFREE_CLEARMEM)
178
179 static IOLock * gIOObjectPortLock;
180 IOLock * gIOUserServerLock;
181
182 SECURITY_READ_ONLY_LATE(const struct io_filter_callbacks *) gIOUCFilterCallbacks;
183
184 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
185
186 SLIST_HEAD(IOMachPortHashList, IOMachPort);
187
188 #if defined(XNU_TARGET_OS_OSX)
189 #define PORT_HASH_SIZE 4096
190 #else /* defined(!XNU_TARGET_OS_OSX) */
191 #define PORT_HASH_SIZE 256
192 #endif /* !defined(!XNU_TARGET_OS_OSX) */
193
194 IOMachPortHashList gIOMachPortHash[PORT_HASH_SIZE];
195
196 void
IOMachPortInitialize(void)197 IOMachPortInitialize(void)
198 {
199 for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
200 SLIST_INIT(&gIOMachPortHash[i]);
201 }
202 }
203
204 IOMachPortHashList*
bucketForObject(OSObject * obj,ipc_kobject_type_t type)205 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
206 {
207 return &gIOMachPortHash[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
208 }
209
210 IOMachPort*
portForObjectInBucket(IOMachPortHashList * bucket,OSObject * obj,ipc_kobject_type_t type)211 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
212 {
213 IOMachPort *machPort;
214
215 SLIST_FOREACH(machPort, bucket, link) {
216 if (machPort->object == obj && iokit_port_type(machPort->port) == type) {
217 return machPort;
218 }
219 }
220 return NULL;
221 }
222
223 IOMachPort*
withObjectAndType(OSObject * obj,ipc_kobject_type_t type)224 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
225 {
226 IOMachPort *machPort = NULL;
227
228 machPort = new IOMachPort;
229 if (__improbable(machPort && !machPort->init())) {
230 OSSafeReleaseNULL(machPort);
231 return NULL;
232 }
233
234 machPort->object = obj;
235 machPort->port = iokit_alloc_object_port(machPort, type);
236 IOLockInlineInit(&machPort->lock);
237
238 obj->taggedRetain(OSTypeID(OSCollection));
239 machPort->mscount++;
240
241 return machPort;
242 }
243
244 bool
noMoreSendersForObject(OSObject * obj,ipc_kobject_type_t type,mach_port_mscount_t * mscount)245 IOMachPort::noMoreSendersForObject( OSObject * obj,
246 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
247 {
248 IOMachPort *machPort = NULL;
249 IOUserClient *uc;
250 OSAction *action;
251 bool destroyed = true;
252
253 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
254
255 obj->retain();
256
257 lck_mtx_lock(gIOObjectPortLock);
258
259 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
260
261 if (machPort) {
262 destroyed = (machPort->mscount <= *mscount);
263 if (!destroyed) {
264 *mscount = machPort->mscount;
265 lck_mtx_unlock(gIOObjectPortLock);
266 } else {
267 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
268 uc->noMoreSenders();
269 }
270 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
271
272 IOLockLock(&machPort->lock);
273 iokit_remove_object_port(machPort->port, type);
274 machPort->object = NULL;
275 IOLockUnlock(&machPort->lock);
276
277 lck_mtx_unlock(gIOObjectPortLock);
278
279 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
280
281 obj->taggedRelease(OSTypeID(OSCollection));
282 }
283 } else {
284 lck_mtx_unlock(gIOObjectPortLock);
285 }
286
287 if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
288 action->Aborted();
289 }
290
291 if (IKOT_UEXT_OBJECT == type && IOUserServer::shouldLeakObjects()) {
292 // Leak object
293 obj->retain();
294 }
295
296 obj->release();
297
298 return destroyed;
299 }
300
301 void
releasePortForObject(OSObject * obj,ipc_kobject_type_t type)302 IOMachPort::releasePortForObject( OSObject * obj,
303 ipc_kobject_type_t type )
304 {
305 IOMachPort *machPort;
306 IOService *service;
307 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
308
309 assert(IKOT_IOKIT_CONNECT != type);
310
311 lck_mtx_lock(gIOObjectPortLock);
312
313 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
314
315 if (machPort
316 && (type == IKOT_IOKIT_OBJECT)
317 && (service = OSDynamicCast(IOService, obj))
318 && !service->machPortHoldDestroy()) {
319 obj->retain();
320 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
321
322 IOLockLock(&machPort->lock);
323 iokit_remove_object_port(machPort->port, type);
324 machPort->object = NULL;
325 IOLockUnlock(&machPort->lock);
326
327 lck_mtx_unlock(gIOObjectPortLock);
328
329 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
330
331 obj->taggedRelease(OSTypeID(OSCollection));
332 obj->release();
333 } else {
334 lck_mtx_unlock(gIOObjectPortLock);
335 }
336 }
337
338 void
destroyUserReferences(OSObject * obj)339 IOUserClient::destroyUserReferences( OSObject * obj )
340 {
341 IOMachPort *machPort;
342 bool destroyPort;
343
344 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
345
346 // panther, 3160200
347 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
348
349 obj->retain();
350 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
351 IOMachPortHashList *mappingBucket = NULL;
352
353 lck_mtx_lock(gIOObjectPortLock);
354
355 IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
356 if (uc && uc->mappings) {
357 mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
358 }
359
360 machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
361
362 if (machPort == NULL) {
363 lck_mtx_unlock(gIOObjectPortLock);
364 goto end;
365 }
366
367 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
368 obj->taggedRelease(OSTypeID(OSCollection));
369
370 destroyPort = true;
371 if (uc) {
372 uc->noMoreSenders();
373 if (uc->mappings) {
374 uc->mappings->taggedRetain(OSTypeID(OSCollection));
375 SLIST_INSERT_HEAD(mappingBucket, machPort, link);
376
377 IOLockLock(&machPort->lock);
378 machPort->object = uc->mappings;
379 IOLockUnlock(&machPort->lock);
380
381 lck_mtx_unlock(gIOObjectPortLock);
382
383 OSSafeReleaseNULL(uc->mappings);
384 destroyPort = false;
385 }
386 }
387
388 if (destroyPort) {
389 IOLockLock(&machPort->lock);
390 iokit_remove_object_port(machPort->port, IKOT_IOKIT_CONNECT);
391 machPort->object = NULL;
392 IOLockUnlock(&machPort->lock);
393
394 lck_mtx_unlock(gIOObjectPortLock);
395 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
396 }
397
398 end:
399 OSSafeReleaseNULL(obj);
400 }
401
402 mach_port_name_t
makeSendRightForTask(task_t task,io_object_t obj,ipc_kobject_type_t type)403 IOMachPort::makeSendRightForTask( task_t task,
404 io_object_t obj, ipc_kobject_type_t type )
405 {
406 return iokit_make_send_right( task, obj, type );
407 }
408
409 void
free(void)410 IOMachPort::free( void )
411 {
412 if (port) {
413 iokit_destroy_object_port(port, iokit_port_type(port));
414 }
415 IOLockInlineDestroy(&lock);
416 super::free();
417 }
418
419 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
420
421 static bool
IOTaskRegistryCompatibility(task_t task)422 IOTaskRegistryCompatibility(task_t task)
423 {
424 return false;
425 }
426
427 static void
IOTaskRegistryCompatibilityMatching(task_t task,OSDictionary * matching)428 IOTaskRegistryCompatibilityMatching(task_t task, OSDictionary * matching)
429 {
430 matching->setObject(gIOServiceNotificationUserKey, kOSBooleanTrue);
431 if (!IOTaskRegistryCompatibility(task)) {
432 return;
433 }
434 matching->setObject(gIOCompatibilityMatchKey, kOSBooleanTrue);
435 }
436
437 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
438
OSDefineMetaClassAndStructors(IOUserIterator,OSIterator)439 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
440
441 IOUserIterator *
442 IOUserIterator::withIterator(OSIterator * iter)
443 {
444 IOUserIterator * me;
445
446 if (!iter) {
447 return NULL;
448 }
449
450 me = new IOUserIterator;
451 if (me && !me->init()) {
452 me->release();
453 me = NULL;
454 }
455 if (!me) {
456 iter->release();
457 return me;
458 }
459 me->userIteratorObject = iter;
460
461 return me;
462 }
463
464 bool
init(void)465 IOUserIterator::init( void )
466 {
467 if (!OSObject::init()) {
468 return false;
469 }
470
471 IOLockInlineInit(&lock);
472 return true;
473 }
474
475 void
free()476 IOUserIterator::free()
477 {
478 if (userIteratorObject) {
479 userIteratorObject->release();
480 }
481 IOLockInlineDestroy(&lock);
482 OSObject::free();
483 }
484
485 void
reset()486 IOUserIterator::reset()
487 {
488 IOLockLock(&lock);
489 assert(OSDynamicCast(OSIterator, userIteratorObject));
490 ((OSIterator *)userIteratorObject)->reset();
491 IOLockUnlock(&lock);
492 }
493
494 bool
isValid()495 IOUserIterator::isValid()
496 {
497 bool ret;
498
499 IOLockLock(&lock);
500 assert(OSDynamicCast(OSIterator, userIteratorObject));
501 ret = ((OSIterator *)userIteratorObject)->isValid();
502 IOLockUnlock(&lock);
503
504 return ret;
505 }
506
507 OSObject *
getNextObject()508 IOUserIterator::getNextObject()
509 {
510 assert(false);
511 return NULL;
512 }
513
514 OSObject *
copyNextObject()515 IOUserIterator::copyNextObject()
516 {
517 OSObject * ret = NULL;
518
519 IOLockLock(&lock);
520 if (userIteratorObject) {
521 ret = ((OSIterator *)userIteratorObject)->getNextObject();
522 if (ret) {
523 ret->retain();
524 }
525 }
526 IOLockUnlock(&lock);
527
528 return ret;
529 }
530
531 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
532 extern "C" {
533 // functions called from osfmk/device/iokit_rpc.c
534
535 void
iokit_port_object_description(io_object_t obj,kobject_description_t desc)536 iokit_port_object_description(io_object_t obj, kobject_description_t desc)
537 {
538 IORegistryEntry * regEntry;
539 IOUserNotification * __unused noti;
540 _IOServiceNotifier * __unused serviceNoti;
541 OSSerialize * __unused s;
542 OSDictionary * __unused matching = NULL;
543
544 if ((regEntry = OSDynamicCast(IORegistryEntry, obj))) {
545 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(0x%qx)", obj->getMetaClass()->getClassName(), regEntry->getRegistryEntryID());
546 #if DEVELOPMENT || DEBUG
547 } else if ((noti = OSDynamicCast(IOUserNotification, obj))) {
548 // serviceNoti->matching may become NULL if the port gets a no-senders notification, so we have to lock gIOObjectPortLock
549 IOLockLock(gIOObjectPortLock);
550 serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->userIteratorObject);
551 if (serviceNoti && (matching = serviceNoti->matching)) {
552 matching->retain();
553 }
554 IOLockUnlock(gIOObjectPortLock);
555
556 if (matching) {
557 s = OSSerialize::withCapacity((unsigned int) page_size);
558 if (s && matching->serialize(s)) {
559 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text());
560 }
561 OSSafeReleaseNULL(s);
562 OSSafeReleaseNULL(matching);
563 }
564 #endif /* DEVELOPMENT || DEBUG */
565 } else {
566 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s", obj->getMetaClass()->getClassName());
567 }
568 }
569
570 // FIXME: Implementation of these functions are hidden from the static analyzer.
571 // As for now, the analyzer doesn't consistently support wrapper functions
572 // for retain and release.
573 #ifndef __clang_analyzer__
574 void
iokit_add_reference(io_object_t obj,natural_t type)575 iokit_add_reference( io_object_t obj, natural_t type )
576 {
577 if (!obj) {
578 return;
579 }
580 obj->retain();
581 }
582
583 void
iokit_remove_reference(io_object_t obj)584 iokit_remove_reference( io_object_t obj )
585 {
586 if (obj) {
587 obj->release();
588 }
589 }
590 #endif // __clang_analyzer__
591
592 void
iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj)593 iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj )
594 {
595 if (!obj) {
596 return;
597 }
598 obj->release();
599 }
600
601 enum {
602 kIPCLockNone = 0,
603 kIPCLockRead = 1,
604 kIPCLockWrite = 2
605 };
606
607 void
ipcEnter(int locking)608 IOUserClient::ipcEnter(int locking)
609 {
610 switch (locking) {
611 case kIPCLockWrite:
612 IORWLockWrite(&lock);
613 break;
614 case kIPCLockRead:
615 IORWLockRead(&lock);
616 break;
617 case kIPCLockNone:
618 break;
619 default:
620 panic("ipcEnter");
621 }
622
623 OSIncrementAtomic(&__ipc);
624 }
625
626 void
ipcExit(int locking)627 IOUserClient::ipcExit(int locking)
628 {
629 bool finalize = false;
630
631 assert(__ipc);
632 if (1 == OSDecrementAtomic(&__ipc) && isInactive()) {
633 IOLockLock(gIOObjectPortLock);
634 if ((finalize = __ipcFinal)) {
635 __ipcFinal = false;
636 }
637 IOLockUnlock(gIOObjectPortLock);
638 if (finalize) {
639 scheduleFinalize(true);
640 }
641 }
642 switch (locking) {
643 case kIPCLockWrite:
644 case kIPCLockRead:
645 IORWLockUnlock(&lock);
646 break;
647 case kIPCLockNone:
648 break;
649 default:
650 panic("ipcExit");
651 }
652 }
653
654 void
iokit_kobject_retain(io_kobject_t machPort)655 iokit_kobject_retain(io_kobject_t machPort)
656 {
657 assert(OSDynamicCast(IOMachPort, machPort));
658 machPort->retain();
659 }
660
661 io_object_t
iokit_copy_object_for_consumed_kobject(LIBKERN_CONSUMED io_kobject_t machPort,natural_t type)662 iokit_copy_object_for_consumed_kobject(LIBKERN_CONSUMED io_kobject_t machPort, natural_t type)
663 {
664 io_object_t result;
665
666 assert(OSDynamicCast(IOMachPort, machPort));
667
668 IOLockLock(&machPort->lock);
669 result = machPort->object;
670 if (result) {
671 iokit_add_reference(result, type);
672 }
673 IOLockUnlock(&machPort->lock);
674 machPort->release();
675 return result;
676 }
677
678 bool
finalizeUserReferences(OSObject * obj)679 IOUserClient::finalizeUserReferences(OSObject * obj)
680 {
681 IOUserClient * uc;
682 bool ok = true;
683
684 if ((uc = OSDynamicCast(IOUserClient, obj))) {
685 IOLockLock(gIOObjectPortLock);
686 if ((uc->__ipcFinal = (0 != uc->__ipc))) {
687 ok = false;
688 }
689 IOLockUnlock(gIOObjectPortLock);
690 }
691 return ok;
692 }
693
694 ipc_port_t
iokit_port_for_object(io_object_t obj,ipc_kobject_type_t type,ipc_kobject_t * kobj)695 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type, ipc_kobject_t * kobj )
696 {
697 IOMachPort *machPort = NULL;
698 ipc_port_t port = NULL;
699
700 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
701
702 lck_mtx_lock(gIOObjectPortLock);
703
704 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
705
706 if (__improbable(machPort == NULL)) {
707 machPort = IOMachPort::withObjectAndType(obj, type);
708 if (__improbable(machPort == NULL)) {
709 goto end;
710 }
711 SLIST_INSERT_HEAD(bucket, machPort, link);
712 } else {
713 machPort->mscount++;
714 }
715
716 iokit_retain_port(machPort->port);
717 port = machPort->port;
718
719 end:
720 if (kobj) {
721 *kobj = machPort;
722 }
723 lck_mtx_unlock(gIOObjectPortLock);
724
725 return port;
726 }
727
728 kern_return_t
iokit_client_died(io_object_t obj,ipc_port_t,ipc_kobject_type_t type,mach_port_mscount_t * mscount)729 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
730 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
731 {
732 IOUserClient * client;
733 IOMemoryMap * map;
734 IOUserNotification * notify;
735 IOUserServerCheckInToken * token;
736
737 if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
738 return kIOReturnNotReady;
739 }
740
741 switch (type) {
742 case IKOT_IOKIT_CONNECT:
743 if ((client = OSDynamicCast( IOUserClient, obj ))) {
744 IOStatisticsClientCall();
745 IORWLockWrite(&client->lock);
746 client->clientDied();
747 IORWLockUnlock(&client->lock);
748 }
749 break;
750 case IKOT_IOKIT_OBJECT:
751 if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
752 map->taskDied();
753 } else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
754 notify->setNotification( NULL );
755 }
756 break;
757 case IKOT_IOKIT_IDENT:
758 if ((token = OSDynamicCast( IOUserServerCheckInToken, obj ))) {
759 token->cancel();
760 }
761 break;
762 }
763
764 return kIOReturnSuccess;
765 }
766 }; /* extern "C" */
767
768 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
769
770 class IOServiceUserNotification : public IOUserNotification
771 {
772 OSDeclareDefaultStructors(IOServiceUserNotification);
773
774 struct PingMsgKdata {
775 mach_msg_header_t msgHdr;
776 };
777 struct PingMsgUdata {
778 OSNotificationHeader64 notifyHeader;
779 };
780
781 enum { kMaxOutstanding = 1024 };
782
783 ipc_port_t remotePort;
784 void *msgReference;
785 mach_msg_size_t msgReferenceSize;
786 natural_t msgType;
787 OSArray * newSet;
788 bool armed;
789 bool ipcLogged;
790
791 public:
792
793 virtual bool init( mach_port_t port, natural_t type,
794 void * reference, vm_size_t referenceSize,
795 bool clientIs64 );
796 virtual void free() APPLE_KEXT_OVERRIDE;
797 void invalidatePort(void);
798
799 static bool _handler( void * target,
800 void * ref, IOService * newService, IONotifier * notifier );
801 virtual bool handler( void * ref, IOService * newService );
802
803 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
804 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
805 };
806
807 class IOServiceMessageUserNotification : public IOUserNotification
808 {
809 OSDeclareDefaultStructors(IOServiceMessageUserNotification);
810
811 struct PingMsgKdata {
812 mach_msg_header_t msgHdr;
813 mach_msg_body_t msgBody;
814 mach_msg_port_descriptor_t ports[1];
815 };
816 struct PingMsgUdata {
817 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
818 };
819
820 ipc_port_t remotePort;
821 void *msgReference;
822 mach_msg_size_t msgReferenceSize;
823 mach_msg_size_t msgExtraSize;
824 natural_t msgType;
825 uint8_t clientIs64;
826 int owningPID;
827 bool ipcLogged;
828
829 public:
830
831 virtual bool init( mach_port_t port, natural_t type,
832 void * reference, vm_size_t referenceSize,
833 bool clientIs64 );
834
835 virtual void free() APPLE_KEXT_OVERRIDE;
836 void invalidatePort(void);
837
838 static IOReturn _handler( void * target, void * ref,
839 UInt32 messageType, IOService * provider,
840 void * messageArgument, vm_size_t argSize );
841 virtual IOReturn handler( void * ref,
842 UInt32 messageType, IOService * provider,
843 void * messageArgument, vm_size_t argSize );
844
845 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
846 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
847 };
848
849 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
850
851 #undef super
852 #define super IOUserIterator
853 OSDefineMetaClass( IOUserNotification, IOUserIterator );
854 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
855
856 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
857
858 void
free(void)859 IOUserNotification::free( void )
860 {
861 #if DEVELOPMENT || DEBUG
862 IOLockLock( gIOObjectPortLock);
863
864 assert(userIteratorObject == NULL);
865
866 IOLockUnlock( gIOObjectPortLock);
867 #endif /* DEVELOPMENT || DEBUG */
868
869 super::free();
870 }
871
872
873 void
setNotification(IONotifier * notify)874 IOUserNotification::setNotification( IONotifier * notify )
875 {
876 OSObject * previousNotify;
877
878 /*
879 * We must retain this object here before proceeding.
880 * Two threads may race in setNotification(). If one thread sets a new notifier while the
881 * other thread sets the notifier to NULL, it is possible for the second thread to call release()
882 * before the first thread calls retain(). Without the retain here, this thread interleaving
883 * would cause the object to get released and freed before it is retained by the first thread,
884 * which is a UaF.
885 */
886 retain();
887
888 IOLockLock( gIOObjectPortLock);
889
890 previousNotify = userIteratorObject;
891 userIteratorObject = notify;
892
893 IOLockUnlock( gIOObjectPortLock);
894
895 if (previousNotify) {
896 assert(OSDynamicCast(IONotifier, previousNotify));
897 ((IONotifier *)previousNotify)->remove();
898
899 if (notify == NULL) {
900 release();
901 }
902 } else if (notify) {
903 // new IONotifier, retain the object. release() will happen in setNotification(NULL)
904 retain();
905 }
906
907 release(); // paired with retain() at beginning of this method
908 }
909
910 void
reset()911 IOUserNotification::reset()
912 {
913 // ?
914 }
915
916 bool
isValid()917 IOUserNotification::isValid()
918 {
919 return true;
920 }
921
922 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
923
924 #undef super
925 #define super IOUserNotification
OSDefineMetaClassAndStructors(IOServiceUserNotification,IOUserNotification)926 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
927
928 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
929
930 bool
931 IOServiceUserNotification::init( mach_port_t port, natural_t type,
932 void * reference, vm_size_t referenceSize,
933 bool clientIs64 )
934 {
935 if (!super::init()) {
936 return false;
937 }
938
939 newSet = OSArray::withCapacity( 1 );
940 if (!newSet) {
941 return false;
942 }
943
944 if (referenceSize > sizeof(OSAsyncReference64)) {
945 return false;
946 }
947
948 msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
949 msgReference = IOMallocZeroData(msgReferenceSize);
950 if (!msgReference) {
951 return false;
952 }
953
954 remotePort = port;
955 msgType = type;
956 bcopy( reference, msgReference, referenceSize );
957
958 return true;
959 }
960
961 void
invalidatePort(void)962 IOServiceUserNotification::invalidatePort(void)
963 {
964 remotePort = MACH_PORT_NULL;
965 }
966
967 void
free(void)968 IOServiceUserNotification::free( void )
969 {
970 if (remotePort) {
971 iokit_release_port_send(remotePort);
972 }
973 IOFreeData(msgReference, msgReferenceSize);
974 OSSafeReleaseNULL(newSet);
975
976 super::free();
977 }
978
979 bool
_handler(void * target,void * ref,IOService * newService,IONotifier * notifier)980 IOServiceUserNotification::_handler( void * target,
981 void * ref, IOService * newService, IONotifier * notifier )
982 {
983 IOServiceUserNotification * targetObj = (IOServiceUserNotification *)target;
984 bool ret;
985
986 targetObj->retain();
987 ret = targetObj->handler( ref, newService );
988 targetObj->release();
989 return ret;
990 }
991
992 bool
handler(void * ref,IOService * newService)993 IOServiceUserNotification::handler( void * ref,
994 IOService * newService )
995 {
996 unsigned int count;
997 kern_return_t kr;
998 ipc_port_t port = NULL;
999 bool sendPing = false;
1000 mach_msg_size_t msgSize, payloadSize;
1001
1002 IOTakeLock( &lock );
1003
1004 count = newSet->getCount();
1005 if (count < kMaxOutstanding) {
1006 newSet->setObject( newService );
1007 if ((sendPing = (armed && (0 == count)))) {
1008 armed = false;
1009 }
1010 }
1011
1012 IOUnlock( &lock );
1013
1014 if (kIOServiceTerminatedNotificationType == msgType) {
1015 lck_mtx_lock(gIOObjectPortLock);
1016 newService->setMachPortHoldDestroy(true);
1017 lck_mtx_unlock(gIOObjectPortLock);
1018 }
1019
1020 if (sendPing) {
1021 port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT, NULL );
1022
1023 payloadSize = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1024 msgSize = (mach_msg_size_t)(sizeof(PingMsgKdata) + payloadSize);
1025
1026 kr = kernel_mach_msg_send_with_builder_internal(0, payloadSize,
1027 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1028 MACH_MSG_TIMEOUT_NONE, NULL,
1029 ^(mach_msg_header_t *hdr, __assert_only mach_msg_descriptor_t *descs, void *payload){
1030 PingMsgUdata *udata = (PingMsgUdata *)payload;
1031
1032 hdr->msgh_remote_port = remotePort;
1033 hdr->msgh_local_port = port;
1034 hdr->msgh_bits = MACH_MSGH_BITS(
1035 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1036 MACH_MSG_TYPE_MAKE_SEND /*local*/);
1037 hdr->msgh_size = msgSize;
1038 hdr->msgh_id = kOSNotificationMessageID;
1039
1040 assert(descs == NULL);
1041 /* End of kernel processed data */
1042
1043 udata->notifyHeader.size = 0;
1044 udata->notifyHeader.type = msgType;
1045
1046 assert((char *)udata->notifyHeader.reference + msgReferenceSize <= (char *)payload + payloadSize);
1047 bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1048 });
1049
1050 if (port) {
1051 iokit_release_port( port );
1052 }
1053
1054 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1055 ipcLogged = true;
1056 IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1057 }
1058 }
1059
1060 return true;
1061 }
1062 OSObject *
getNextObject()1063 IOServiceUserNotification::getNextObject()
1064 {
1065 assert(false);
1066 return NULL;
1067 }
1068
1069 OSObject *
copyNextObject()1070 IOServiceUserNotification::copyNextObject()
1071 {
1072 unsigned int count;
1073 OSObject * result;
1074
1075 IOLockLock(&lock);
1076
1077 count = newSet->getCount();
1078 if (count) {
1079 result = newSet->getObject( count - 1 );
1080 result->retain();
1081 newSet->removeObject( count - 1);
1082 } else {
1083 result = NULL;
1084 armed = true;
1085 }
1086
1087 IOLockUnlock(&lock);
1088
1089 return result;
1090 }
1091
1092 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1093
OSDefineMetaClassAndStructors(IOServiceMessageUserNotification,IOUserNotification)1094 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
1095
1096 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1097
1098 bool
1099 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
1100 void * reference, vm_size_t referenceSize, bool client64 )
1101 {
1102 if (!super::init()) {
1103 return false;
1104 }
1105
1106 if (referenceSize > sizeof(OSAsyncReference64)) {
1107 return false;
1108 }
1109
1110 clientIs64 = client64;
1111
1112 owningPID = proc_selfpid();
1113
1114 msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
1115 msgReference = IOMallocZeroData(msgReferenceSize);
1116 if (!msgReference) {
1117 return false;
1118 }
1119
1120 remotePort = port;
1121 msgType = type;
1122 bcopy( reference, msgReference, referenceSize );
1123
1124 return true;
1125 }
1126
1127 void
invalidatePort(void)1128 IOServiceMessageUserNotification::invalidatePort(void)
1129 {
1130 remotePort = MACH_PORT_NULL;
1131 }
1132
1133 void
free(void)1134 IOServiceMessageUserNotification::free( void )
1135 {
1136 if (remotePort) {
1137 iokit_release_port_send(remotePort);
1138 }
1139 IOFreeData(msgReference, msgReferenceSize);
1140
1141 super::free();
1142 }
1143
1144 IOReturn
_handler(void * target,void * ref,UInt32 messageType,IOService * provider,void * argument,vm_size_t argSize)1145 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1146 UInt32 messageType, IOService * provider,
1147 void * argument, vm_size_t argSize )
1148 {
1149 IOServiceMessageUserNotification * targetObj = (IOServiceMessageUserNotification *)target;
1150 IOReturn ret;
1151
1152 targetObj->retain();
1153 ret = targetObj->handler(
1154 ref, messageType, provider, argument, argSize);
1155 targetObj->release();
1156 return ret;
1157 }
1158
1159 IOReturn
handler(void * ref,UInt32 messageType,IOService * provider,void * messageArgument,vm_size_t callerArgSize)1160 IOServiceMessageUserNotification::handler( void * ref,
1161 UInt32 messageType, IOService * provider,
1162 void * messageArgument, vm_size_t callerArgSize )
1163 {
1164 kern_return_t kr;
1165 vm_size_t argSize;
1166 mach_msg_size_t thisMsgSize;
1167 ipc_port_t thisPort, providerPort;
1168
1169 if (kIOMessageCopyClientID == messageType) {
1170 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1171 return kIOReturnSuccess;
1172 }
1173
1174 if (callerArgSize == 0) {
1175 if (clientIs64) {
1176 argSize = sizeof(io_user_reference_t);
1177 } else {
1178 argSize = sizeof(uint32_t);
1179 }
1180 } else {
1181 if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1182 callerArgSize = kIOUserNotifyMaxMessageSize;
1183 }
1184 argSize = callerArgSize;
1185 }
1186
1187 // adjust message size for ipc restrictions
1188 natural_t type = msgType;
1189 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1190 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1191 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1192
1193 mach_msg_size_t extraSize = kIOUserNotifyMaxMessageSize + sizeof(IOServiceInterestContent64);
1194 mach_msg_size_t msgSize = (mach_msg_size_t) (sizeof(PingMsgKdata) +
1195 sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize);
1196
1197 if (os_add3_overflow(msgSize, offsetof(IOServiceInterestContent64, messageArgument), argSize, &thisMsgSize)) {
1198 return kIOReturnBadArgument;
1199 }
1200 mach_msg_size_t payloadSize = thisMsgSize - sizeof(PingMsgKdata);
1201
1202 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT, NULL );
1203 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT, NULL );
1204
1205 kr = kernel_mach_msg_send_with_builder_internal(1, payloadSize,
1206 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1207 MACH_MSG_TIMEOUT_NONE, NULL,
1208 ^(mach_msg_header_t *hdr, mach_msg_descriptor_t *descs, void *payload){
1209 mach_msg_port_descriptor_t *port_desc = (mach_msg_port_descriptor_t *)descs;
1210 PingMsgUdata *udata = (PingMsgUdata *)payload;
1211 IOServiceInterestContent64 * data;
1212 mach_msg_size_t dataOffset;
1213
1214 hdr->msgh_remote_port = remotePort;
1215 hdr->msgh_local_port = thisPort;
1216 hdr->msgh_bits = MACH_MSGH_BITS_COMPLEX
1217 | MACH_MSGH_BITS(
1218 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1219 MACH_MSG_TYPE_MAKE_SEND /*local*/);
1220 hdr->msgh_size = thisMsgSize;
1221 hdr->msgh_id = kOSNotificationMessageID;
1222
1223 /* body.msgh_descriptor_count is set automatically after the closure */
1224
1225 port_desc[0].name = providerPort;
1226 port_desc[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
1227 port_desc[0].type = MACH_MSG_PORT_DESCRIPTOR;
1228 /* End of kernel processed data */
1229
1230 udata->notifyHeader.size = extraSize;
1231 udata->notifyHeader.type = type;
1232 bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1233
1234 /* data is after msgReference */
1235 dataOffset = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1236 data = (IOServiceInterestContent64 *) (((uint8_t *) udata) + dataOffset);
1237 data->messageType = messageType;
1238
1239 if (callerArgSize == 0) {
1240 assert((char *)data->messageArgument + argSize <= (char *)payload + payloadSize);
1241 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1242 if (!clientIs64) {
1243 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1244 }
1245 } else {
1246 assert((char *)data->messageArgument + callerArgSize <= (char *)payload + payloadSize);
1247 bcopy(messageArgument, data->messageArgument, callerArgSize);
1248 }
1249 });
1250
1251 if (thisPort) {
1252 iokit_release_port( thisPort );
1253 }
1254 if (providerPort) {
1255 iokit_release_port( providerPort );
1256 }
1257
1258 if (kr == MACH_SEND_NO_BUFFER) {
1259 return kIOReturnNoMemory;
1260 }
1261
1262 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1263 ipcLogged = true;
1264 IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1265 }
1266
1267 return kIOReturnSuccess;
1268 }
1269
1270 OSObject *
getNextObject()1271 IOServiceMessageUserNotification::getNextObject()
1272 {
1273 return NULL;
1274 }
1275
1276 OSObject *
copyNextObject()1277 IOServiceMessageUserNotification::copyNextObject()
1278 {
1279 return NULL;
1280 }
1281
1282 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1283
1284 #undef super
1285 #define super IOService
1286 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1287
1288 IOLock * gIOUserClientOwnersLock;
1289
1290 static_assert(offsetof(IOUserClient, __opaque_end) -
1291 offsetof(IOUserClient, __opaque_start) == sizeof(void *) * 9,
1292 "ABI check: Opaque ivars for IOUserClient must be 9 void * big");
1293
1294 void
initialize(void)1295 IOUserClient::initialize( void )
1296 {
1297 gIOObjectPortLock = IOLockAlloc();
1298 gIOUserClientOwnersLock = IOLockAlloc();
1299 gIOUserServerLock = IOLockAlloc();
1300 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1301
1302 #if IOTRACKING
1303 IOTrackingQueueCollectUser(IOUserIterator::gMetaClass.getTracking());
1304 IOTrackingQueueCollectUser(IOServiceMessageUserNotification::gMetaClass.getTracking());
1305 IOTrackingQueueCollectUser(IOServiceUserNotification::gMetaClass.getTracking());
1306 IOTrackingQueueCollectUser(IOUserClient::gMetaClass.getTracking());
1307 IOTrackingQueueCollectUser(IOMachPort::gMetaClass.getTracking());
1308 #endif /* IOTRACKING */
1309 }
1310
1311 void
1312 #if __LP64__
1313 __attribute__((__noreturn__))
1314 #endif
setAsyncReference(OSAsyncReference asyncRef,mach_port_t wakePort,void * callback,void * refcon)1315 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1316 mach_port_t wakePort,
1317 void *callback, void *refcon)
1318 {
1319 #if __LP64__
1320 panic("setAsyncReference not valid for 64b");
1321 #else
1322 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1323 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1324 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1325 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1326 #endif
1327 }
1328
1329 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon)1330 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1331 mach_port_t wakePort,
1332 mach_vm_address_t callback, io_user_reference_t refcon)
1333 {
1334 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1335 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1336 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1337 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1338 }
1339
1340 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon,task_t task)1341 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1342 mach_port_t wakePort,
1343 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1344 {
1345 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1346 if (vm_map_is_64bit(get_task_map(task))) {
1347 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1348 }
1349 }
1350
1351 static OSDictionary *
CopyConsoleUser(UInt32 uid)1352 CopyConsoleUser(UInt32 uid)
1353 {
1354 OSArray * array;
1355 OSDictionary * user = NULL;
1356
1357 OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1358 if ((array = OSDynamicCast(OSArray, ioProperty))) {
1359 for (unsigned int idx = 0;
1360 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1361 idx++) {
1362 OSNumber * num;
1363
1364 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1365 && (uid == num->unsigned32BitValue())) {
1366 user->retain();
1367 break;
1368 }
1369 }
1370 }
1371 OSSafeReleaseNULL(ioProperty);
1372 return user;
1373 }
1374
1375 static OSDictionary *
CopyUserOnConsole(void)1376 CopyUserOnConsole(void)
1377 {
1378 OSArray * array;
1379 OSDictionary * user = NULL;
1380
1381 OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1382 if ((array = OSDynamicCast(OSArray, ioProperty))) {
1383 for (unsigned int idx = 0;
1384 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1385 idx++) {
1386 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1387 user->retain();
1388 break;
1389 }
1390 }
1391 }
1392 OSSafeReleaseNULL(ioProperty);
1393 return user;
1394 }
1395
1396 IOReturn
clientHasAuthorization(task_t task,IOService * service)1397 IOUserClient::clientHasAuthorization( task_t task,
1398 IOService * service )
1399 {
1400 proc_t p;
1401
1402 p = (proc_t) get_bsdtask_info(task);
1403 if (p) {
1404 uint64_t authorizationID;
1405
1406 authorizationID = proc_uniqueid(p);
1407 if (authorizationID) {
1408 if (service->getAuthorizationID() == authorizationID) {
1409 return kIOReturnSuccess;
1410 }
1411 }
1412 }
1413
1414 return kIOReturnNotPermitted;
1415 }
1416
1417 IOReturn
clientHasPrivilege(void * securityToken,const char * privilegeName)1418 IOUserClient::clientHasPrivilege( void * securityToken,
1419 const char * privilegeName )
1420 {
1421 kern_return_t kr;
1422 security_token_t token;
1423 mach_msg_type_number_t count;
1424 task_t task;
1425 OSDictionary * user;
1426 bool secureConsole;
1427
1428
1429 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1430 sizeof(kIOClientPrivilegeForeground))) {
1431 if (task_is_gpu_denied(current_task())) {
1432 return kIOReturnNotPrivileged;
1433 } else {
1434 return kIOReturnSuccess;
1435 }
1436 }
1437
1438 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1439 sizeof(kIOClientPrivilegeConsoleSession))) {
1440 kauth_cred_t cred;
1441 proc_t p;
1442
1443 task = (task_t) securityToken;
1444 if (!task) {
1445 task = current_task();
1446 }
1447 p = (proc_t) get_bsdtask_info(task);
1448 kr = kIOReturnNotPrivileged;
1449
1450 if (p && (cred = kauth_cred_proc_ref(p))) {
1451 user = CopyUserOnConsole();
1452 if (user) {
1453 OSNumber * num;
1454 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1455 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1456 kr = kIOReturnSuccess;
1457 }
1458 user->release();
1459 }
1460 kauth_cred_unref(&cred);
1461 }
1462 return kr;
1463 }
1464
1465 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1466 sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1467 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1468 } else {
1469 task = (task_t)securityToken;
1470 }
1471
1472 count = TASK_SECURITY_TOKEN_COUNT;
1473 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1474
1475 if (KERN_SUCCESS != kr) {
1476 } else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1477 sizeof(kIOClientPrivilegeAdministrator))) {
1478 if (0 != token.val[0]) {
1479 kr = kIOReturnNotPrivileged;
1480 }
1481 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1482 sizeof(kIOClientPrivilegeLocalUser))) {
1483 user = CopyConsoleUser(token.val[0]);
1484 if (user) {
1485 user->release();
1486 } else {
1487 kr = kIOReturnNotPrivileged;
1488 }
1489 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1490 sizeof(kIOClientPrivilegeConsoleUser))) {
1491 user = CopyConsoleUser(token.val[0]);
1492 if (user) {
1493 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1494 kr = kIOReturnNotPrivileged;
1495 } else if (secureConsole) {
1496 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1497 if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1498 kr = kIOReturnNotPrivileged;
1499 }
1500 }
1501 user->release();
1502 } else {
1503 kr = kIOReturnNotPrivileged;
1504 }
1505 } else {
1506 kr = kIOReturnUnsupported;
1507 }
1508
1509 return kr;
1510 }
1511
1512 OSDictionary *
copyClientEntitlements(task_t task)1513 IOUserClient::copyClientEntitlements(task_t task)
1514 {
1515 proc_t p = NULL;
1516 pid_t pid = 0;
1517 OSDictionary *entitlements = NULL;
1518
1519 p = (proc_t)get_bsdtask_info(task);
1520 if (p == NULL) {
1521 return NULL;
1522 }
1523 pid = proc_pid(p);
1524
1525 if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1526 if (entitlements) {
1527 return entitlements;
1528 }
1529 }
1530
1531 // If the above fails, thats it
1532 return NULL;
1533 }
1534
1535 OSDictionary *
copyClientEntitlementsVnode(vnode_t vnode,off_t offset)1536 IOUserClient::copyClientEntitlementsVnode(vnode_t vnode, off_t offset)
1537 {
1538 OSDictionary *entitlements = NULL;
1539
1540 if (cs_entitlements_dictionary_copy_vnode(vnode, offset, (void**)&entitlements) != 0) {
1541 return NULL;
1542 }
1543 return entitlements;
1544 }
1545
1546 OSObject *
copyClientEntitlement(task_t task,const char * entitlement)1547 IOUserClient::copyClientEntitlement( task_t task,
1548 const char * entitlement )
1549 {
1550 void *entitlement_object = NULL;
1551
1552 if (task == NULL) {
1553 task = current_task();
1554 }
1555
1556 /* Validate input arguments */
1557 if (task == kernel_task || entitlement == NULL) {
1558 return NULL;
1559 }
1560 proc_t proc = (proc_t)get_bsdtask_info(task);
1561
1562 kern_return_t ret = amfi->OSEntitlements.copyEntitlementAsOSObjectWithProc(
1563 proc,
1564 entitlement,
1565 &entitlement_object);
1566
1567 if (ret != KERN_SUCCESS) {
1568 return NULL;
1569 }
1570 assert(entitlement_object != NULL);
1571
1572 return (OSObject*)entitlement_object;
1573 }
1574
1575 OSObject *
copyClientEntitlementVnode(struct vnode * vnode,off_t offset,const char * entitlement)1576 IOUserClient::copyClientEntitlementVnode(
1577 struct vnode *vnode,
1578 off_t offset,
1579 const char *entitlement)
1580 {
1581 OSDictionary *entitlements;
1582 OSObject *value;
1583
1584 entitlements = copyClientEntitlementsVnode(vnode, offset);
1585 if (entitlements == NULL) {
1586 return NULL;
1587 }
1588
1589 /* Fetch the entitlement value from the dictionary. */
1590 value = entitlements->getObject(entitlement);
1591 if (value != NULL) {
1592 value->retain();
1593 }
1594
1595 entitlements->release();
1596 return value;
1597 }
1598
1599 bool
init()1600 IOUserClient::init()
1601 {
1602 if (getPropertyTable() || super::init()) {
1603 return reserve();
1604 }
1605
1606 return false;
1607 }
1608
1609 bool
init(OSDictionary * dictionary)1610 IOUserClient::init(OSDictionary * dictionary)
1611 {
1612 if (getPropertyTable() || super::init(dictionary)) {
1613 return reserve();
1614 }
1615
1616 return false;
1617 }
1618
1619 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type)1620 IOUserClient::initWithTask(task_t owningTask,
1621 void * securityID,
1622 UInt32 type )
1623 {
1624 if (getPropertyTable() || super::init()) {
1625 return reserve();
1626 }
1627
1628 return false;
1629 }
1630
1631 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type,OSDictionary * properties)1632 IOUserClient::initWithTask(task_t owningTask,
1633 void * securityID,
1634 UInt32 type,
1635 OSDictionary * properties )
1636 {
1637 bool ok;
1638
1639 ok = super::init( properties );
1640 ok &= initWithTask( owningTask, securityID, type );
1641
1642 return ok;
1643 }
1644
1645 bool
reserve()1646 IOUserClient::reserve()
1647 {
1648 if (!reserved) {
1649 reserved = IOMallocType(ExpansionData);
1650 }
1651 setTerminateDefer(NULL, true);
1652 IOStatisticsRegisterCounter();
1653 IORWLockInlineInit(&lock);
1654 IOLockInlineInit(&filterLock);
1655
1656 return true;
1657 }
1658
1659 struct IOUserClientOwner {
1660 task_t task;
1661 queue_chain_t taskLink;
1662 IOUserClient * uc;
1663 queue_chain_t ucLink;
1664 };
1665
1666 IOReturn
registerOwner(task_t task)1667 IOUserClient::registerOwner(task_t task)
1668 {
1669 IOUserClientOwner * owner;
1670 IOReturn ret;
1671 bool newOwner;
1672
1673 IOLockLock(gIOUserClientOwnersLock);
1674
1675 newOwner = true;
1676 ret = kIOReturnSuccess;
1677
1678 if (!owners.next) {
1679 queue_init(&owners);
1680 } else {
1681 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1682 {
1683 if (task != owner->task) {
1684 continue;
1685 }
1686 newOwner = false;
1687 break;
1688 }
1689 }
1690 if (newOwner) {
1691 owner = IOMallocType(IOUserClientOwner);
1692
1693 owner->task = task;
1694 owner->uc = this;
1695 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1696 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1697 if (messageAppSuspended) {
1698 task_set_message_app_suspended(task, true);
1699 }
1700 }
1701
1702 IOLockUnlock(gIOUserClientOwnersLock);
1703
1704 return ret;
1705 }
1706
1707 void
noMoreSenders(void)1708 IOUserClient::noMoreSenders(void)
1709 {
1710 IOUserClientOwner * owner;
1711 IOUserClientOwner * iter;
1712 queue_head_t * taskque;
1713 bool hasMessageAppSuspended;
1714
1715 IOLockLock(gIOUserClientOwnersLock);
1716
1717 if (owners.next) {
1718 while (!queue_empty(&owners)) {
1719 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1720 taskque = task_io_user_clients(owner->task);
1721 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1722 hasMessageAppSuspended = false;
1723 queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1724 hasMessageAppSuspended = iter->uc->messageAppSuspended;
1725 if (hasMessageAppSuspended) {
1726 break;
1727 }
1728 }
1729 task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1730 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1731 IOFreeType(owner, IOUserClientOwner);
1732 }
1733 owners.next = owners.prev = NULL;
1734 }
1735
1736 IOLockUnlock(gIOUserClientOwnersLock);
1737 }
1738
1739
1740 extern "C" void
iokit_task_app_suspended_changed(task_t task)1741 iokit_task_app_suspended_changed(task_t task)
1742 {
1743 queue_head_t * taskque;
1744 IOUserClientOwner * owner;
1745 OSSet * set;
1746
1747 IOLockLock(gIOUserClientOwnersLock);
1748
1749 taskque = task_io_user_clients(task);
1750 set = NULL;
1751 queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1752 if (!owner->uc->messageAppSuspended) {
1753 continue;
1754 }
1755 if (!set) {
1756 set = OSSet::withCapacity(4);
1757 if (!set) {
1758 break;
1759 }
1760 }
1761 set->setObject(owner->uc);
1762 }
1763
1764 IOLockUnlock(gIOUserClientOwnersLock);
1765
1766 if (set) {
1767 set->iterateObjects(^bool (OSObject * obj) {
1768 IOUserClient * uc;
1769
1770 uc = (typeof(uc))obj;
1771 #if 0
1772 {
1773 OSString * str;
1774 str = IOCopyLogNameForPID(task_pid(task));
1775 IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1776 uc->getName(), task_is_app_suspended(task));
1777 OSSafeReleaseNULL(str);
1778 }
1779 #endif
1780 uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1781
1782 return false;
1783 });
1784 set->release();
1785 }
1786 }
1787
1788 extern "C" kern_return_t
iokit_task_terminate(task_t task)1789 iokit_task_terminate(task_t task)
1790 {
1791 IOUserClientOwner * owner;
1792 IOUserClient * dead;
1793 IOUserClient * uc;
1794 queue_head_t * taskque;
1795
1796 IOLockLock(gIOUserClientOwnersLock);
1797
1798 taskque = task_io_user_clients(task);
1799 dead = NULL;
1800 while (!queue_empty(taskque)) {
1801 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1802 uc = owner->uc;
1803 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1804 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1805 if (queue_empty(&uc->owners)) {
1806 uc->retain();
1807 IOLog("destroying out of band connect for %s\n", uc->getName());
1808 // now using the uc queue head as a singly linked queue,
1809 // leaving .next as NULL to mark it empty
1810 uc->owners.next = NULL;
1811 uc->owners.prev = (queue_entry_t) dead;
1812 dead = uc;
1813 }
1814 IOFreeType(owner, IOUserClientOwner);
1815 }
1816
1817 IOLockUnlock(gIOUserClientOwnersLock);
1818
1819 while (dead) {
1820 uc = dead;
1821 dead = (IOUserClient *)(void *) dead->owners.prev;
1822 uc->owners.prev = NULL;
1823 if (uc->sharedInstance || !uc->closed) {
1824 uc->clientDied();
1825 }
1826 uc->release();
1827 }
1828
1829 return KERN_SUCCESS;
1830 }
1831
1832 struct IOUCFilterPolicy {
1833 task_t task;
1834 io_filter_policy_t filterPolicy;
1835 IOUCFilterPolicy * next;
1836 };
1837
1838 io_filter_policy_t
filterForTask(task_t task,io_filter_policy_t addFilterPolicy)1839 IOUserClient::filterForTask(task_t task, io_filter_policy_t addFilterPolicy)
1840 {
1841 IOUCFilterPolicy * elem;
1842 io_filter_policy_t filterPolicy;
1843
1844 filterPolicy = 0;
1845 IOLockLock(&filterLock);
1846
1847 for (elem = reserved->filterPolicies; elem && (elem->task != task); elem = elem->next) {
1848 }
1849
1850 if (elem) {
1851 if (addFilterPolicy) {
1852 assert(addFilterPolicy == elem->filterPolicy);
1853 }
1854 filterPolicy = elem->filterPolicy;
1855 } else if (addFilterPolicy) {
1856 elem = IOMallocType(IOUCFilterPolicy);
1857 elem->task = task;
1858 elem->filterPolicy = addFilterPolicy;
1859 elem->next = reserved->filterPolicies;
1860 reserved->filterPolicies = elem;
1861 filterPolicy = addFilterPolicy;
1862 }
1863
1864 IOLockUnlock(&filterLock);
1865 return filterPolicy;
1866 }
1867
1868 void
free()1869 IOUserClient::free()
1870 {
1871 if (mappings) {
1872 mappings->release();
1873 }
1874
1875 IOStatisticsUnregisterCounter();
1876
1877 assert(!owners.next);
1878 assert(!owners.prev);
1879
1880 if (reserved) {
1881 IOUCFilterPolicy * elem;
1882 IOUCFilterPolicy * nextElem;
1883 for (elem = reserved->filterPolicies; elem; elem = nextElem) {
1884 nextElem = elem->next;
1885 if (elem->filterPolicy && gIOUCFilterCallbacks->io_filter_release) {
1886 gIOUCFilterCallbacks->io_filter_release(elem->filterPolicy);
1887 }
1888 IOFreeType(elem, IOUCFilterPolicy);
1889 }
1890 IOFreeType(reserved, ExpansionData);
1891 IORWLockInlineDestroy(&lock);
1892 IOLockInlineDestroy(&filterLock);
1893 }
1894
1895 super::free();
1896 }
1897
1898 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1899
OSDefineMetaClassAndAbstractStructors(IOUserClient2022,IOUserClient)1900 OSDefineMetaClassAndAbstractStructors( IOUserClient2022, IOUserClient )
1901
1902
1903 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1904
1905 IOReturn
1906 IOUserClient::clientDied( void )
1907 {
1908 IOReturn ret = kIOReturnNotReady;
1909
1910 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1911 ret = clientClose();
1912 }
1913
1914 return ret;
1915 }
1916
1917 IOReturn
clientClose(void)1918 IOUserClient::clientClose( void )
1919 {
1920 return kIOReturnUnsupported;
1921 }
1922
1923 IOService *
getService(void)1924 IOUserClient::getService( void )
1925 {
1926 return NULL;
1927 }
1928
1929 IOReturn
registerNotificationPort(mach_port_t,UInt32,UInt32)1930 IOUserClient::registerNotificationPort(
1931 mach_port_t /* port */,
1932 UInt32 /* type */,
1933 UInt32 /* refCon */)
1934 {
1935 return kIOReturnUnsupported;
1936 }
1937
1938 IOReturn
registerNotificationPort(mach_port_t port,UInt32 type,io_user_reference_t refCon)1939 IOUserClient::registerNotificationPort(
1940 mach_port_t port,
1941 UInt32 type,
1942 io_user_reference_t refCon)
1943 {
1944 return registerNotificationPort(port, type, (UInt32) refCon);
1945 }
1946
1947 IOReturn
getNotificationSemaphore(UInt32 notification_type,semaphore_t * semaphore)1948 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1949 semaphore_t * semaphore )
1950 {
1951 return kIOReturnUnsupported;
1952 }
1953
1954 IOReturn
connectClient(IOUserClient *)1955 IOUserClient::connectClient( IOUserClient * /* client */ )
1956 {
1957 return kIOReturnUnsupported;
1958 }
1959
1960 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,IOMemoryDescriptor ** memory)1961 IOUserClient::clientMemoryForType( UInt32 type,
1962 IOOptionBits * options,
1963 IOMemoryDescriptor ** memory )
1964 {
1965 return kIOReturnUnsupported;
1966 }
1967
1968 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,OSSharedPtr<IOMemoryDescriptor> & memory)1969 IOUserClient::clientMemoryForType( UInt32 type,
1970 IOOptionBits * options,
1971 OSSharedPtr<IOMemoryDescriptor>& memory )
1972 {
1973 IOMemoryDescriptor* memoryRaw = nullptr;
1974 IOReturn result = clientMemoryForType(type, options, &memoryRaw);
1975 memory.reset(memoryRaw, OSNoRetain);
1976 return result;
1977 }
1978
1979 #if !__LP64__
1980 IOMemoryMap *
mapClientMemory(IOOptionBits type,task_t task,IOOptionBits mapFlags,IOVirtualAddress atAddress)1981 IOUserClient::mapClientMemory(
1982 IOOptionBits type,
1983 task_t task,
1984 IOOptionBits mapFlags,
1985 IOVirtualAddress atAddress )
1986 {
1987 return NULL;
1988 }
1989 #endif
1990
1991 IOMemoryMap *
mapClientMemory64(IOOptionBits type,task_t task,IOOptionBits mapFlags,mach_vm_address_t atAddress)1992 IOUserClient::mapClientMemory64(
1993 IOOptionBits type,
1994 task_t task,
1995 IOOptionBits mapFlags,
1996 mach_vm_address_t atAddress )
1997 {
1998 IOReturn err;
1999 IOOptionBits options = 0;
2000 IOMemoryDescriptor * memory = NULL;
2001 IOMemoryMap * map = NULL;
2002
2003 err = clientMemoryForType((UInt32) type, &options, &memory );
2004
2005 if (memory && (kIOReturnSuccess == err)) {
2006 FAKE_STACK_FRAME(getMetaClass());
2007
2008 options = (options & ~kIOMapUserOptionsMask)
2009 | (mapFlags & kIOMapUserOptionsMask);
2010 map = memory->createMappingInTask( task, atAddress, options );
2011 memory->release();
2012
2013 FAKE_STACK_FRAME_END();
2014 }
2015
2016 return map;
2017 }
2018
2019 IOReturn
exportObjectToClient(task_t task,OSObject * obj,io_object_t * clientObj)2020 IOUserClient::exportObjectToClient(task_t task,
2021 OSObject *obj, io_object_t *clientObj)
2022 {
2023 mach_port_name_t name;
2024
2025 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
2026
2027 *clientObj = (io_object_t)(uintptr_t) name;
2028
2029 if (obj) {
2030 obj->release();
2031 }
2032
2033 return kIOReturnSuccess;
2034 }
2035
2036 IOReturn
copyPortNameForObjectInTask(task_t task,OSObject * obj,mach_port_name_t * port_name)2037 IOUserClient::copyPortNameForObjectInTask(task_t task,
2038 OSObject *obj, mach_port_name_t * port_name)
2039 {
2040 mach_port_name_t name;
2041
2042 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
2043
2044 *(mach_port_name_t *) port_name = name;
2045
2046 return kIOReturnSuccess;
2047 }
2048
2049 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSObject ** obj)2050 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2051 OSObject **obj)
2052 {
2053 OSObject * object;
2054
2055 object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
2056
2057 *obj = object;
2058
2059 return object ? kIOReturnSuccess : kIOReturnIPCError;
2060 }
2061
2062 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSSharedPtr<OSObject> & obj)2063 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2064 OSSharedPtr<OSObject>& obj)
2065 {
2066 OSObject* objRaw = NULL;
2067 IOReturn result = copyObjectForPortNameInTask(task, port_name, &objRaw);
2068 obj.reset(objRaw, OSNoRetain);
2069 return result;
2070 }
2071
2072 IOReturn
adjustPortNameReferencesInTask(task_t task,mach_port_name_t port_name,mach_port_delta_t delta)2073 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
2074 {
2075 return iokit_mod_send_right(task, port_name, delta);
2076 }
2077
2078 IOExternalMethod *
getExternalMethodForIndex(UInt32)2079 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
2080 {
2081 return NULL;
2082 }
2083
2084 IOExternalAsyncMethod *
getExternalAsyncMethodForIndex(UInt32)2085 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
2086 {
2087 return NULL;
2088 }
2089
2090 IOExternalTrap *
2091 IOUserClient::
getExternalTrapForIndex(UInt32 index)2092 getExternalTrapForIndex(UInt32 index)
2093 {
2094 return NULL;
2095 }
2096
2097 #pragma clang diagnostic push
2098 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2099
2100 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
2101 // functions can break clients of kexts implementing getExternalMethodForIndex()
2102 IOExternalMethod *
2103 IOUserClient::
getTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2104 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
2105 {
2106 IOExternalMethod *method = getExternalMethodForIndex(index);
2107
2108 if (method) {
2109 *targetP = (IOService *) method->object;
2110 }
2111
2112 return method;
2113 }
2114
2115 IOExternalMethod *
2116 IOUserClient::
getTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2117 getTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2118 {
2119 IOService* targetPRaw = NULL;
2120 IOExternalMethod* result = getTargetAndMethodForIndex(&targetPRaw, index);
2121 targetP.reset(targetPRaw, OSRetain);
2122 return result;
2123 }
2124
2125 IOExternalAsyncMethod *
2126 IOUserClient::
getAsyncTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2127 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
2128 {
2129 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
2130
2131 if (method) {
2132 *targetP = (IOService *) method->object;
2133 }
2134
2135 return method;
2136 }
2137
2138 IOExternalAsyncMethod *
2139 IOUserClient::
getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2140 getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2141 {
2142 IOService* targetPRaw = NULL;
2143 IOExternalAsyncMethod* result = getAsyncTargetAndMethodForIndex(&targetPRaw, index);
2144 targetP.reset(targetPRaw, OSRetain);
2145 return result;
2146 }
2147
2148 IOExternalTrap *
2149 IOUserClient::
getTargetAndTrapForIndex(IOService ** targetP,UInt32 index)2150 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
2151 {
2152 IOExternalTrap *trap = getExternalTrapForIndex(index);
2153
2154 if (trap) {
2155 *targetP = trap->object;
2156 }
2157
2158 return trap;
2159 }
2160 #pragma clang diagnostic pop
2161
2162 IOReturn
releaseAsyncReference64(OSAsyncReference64 reference)2163 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
2164 {
2165 mach_port_t port;
2166 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2167
2168 if (MACH_PORT_NULL != port) {
2169 iokit_release_port_send(port);
2170 }
2171
2172 return kIOReturnSuccess;
2173 }
2174
2175 IOReturn
releaseNotificationPort(mach_port_t port)2176 IOUserClient::releaseNotificationPort(mach_port_t port)
2177 {
2178 if (MACH_PORT_NULL != port) {
2179 iokit_release_port_send(port);
2180 }
2181
2182 return kIOReturnSuccess;
2183 }
2184
2185 IOReturn
sendAsyncResult(OSAsyncReference reference,IOReturn result,void * args[],UInt32 numArgs)2186 IOUserClient::sendAsyncResult(OSAsyncReference reference,
2187 IOReturn result, void *args[], UInt32 numArgs)
2188 {
2189 OSAsyncReference64 reference64;
2190 OSBoundedArray<io_user_reference_t, kMaxAsyncArgs> args64;
2191 unsigned int idx;
2192
2193 if (numArgs > kMaxAsyncArgs) {
2194 return kIOReturnMessageTooLarge;
2195 }
2196
2197 for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2198 reference64[idx] = REF64(reference[idx]);
2199 }
2200
2201 for (idx = 0; idx < numArgs; idx++) {
2202 args64[idx] = REF64(args[idx]);
2203 }
2204
2205 return sendAsyncResult64(reference64, result, args64.data(), numArgs);
2206 }
2207
2208 IOReturn
sendAsyncResult64WithOptions(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2209 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2210 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2211 {
2212 return _sendAsyncResult64(reference, result, args, numArgs, options);
2213 }
2214
2215 IOReturn
sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs)2216 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2217 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2218 {
2219 return _sendAsyncResult64(reference, result, args, numArgs, 0);
2220 }
2221
2222 IOReturn
_sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2223 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2224 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2225 {
2226 struct ReplyMsg {
2227 mach_msg_header_t msgHdr;
2228 union{
2229 struct{
2230 OSNotificationHeader notifyHdr;
2231 IOAsyncCompletionContent asyncContent;
2232 uint32_t args[kMaxAsyncArgs];
2233 } msg32;
2234 struct{
2235 OSNotificationHeader64 notifyHdr;
2236 IOAsyncCompletionContent asyncContent;
2237 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
2238 } msg64;
2239 } m;
2240 };
2241 ReplyMsg replyMsg;
2242 mach_port_t replyPort;
2243 kern_return_t kr;
2244
2245 // If no reply port, do nothing.
2246 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2247 if (replyPort == MACH_PORT_NULL) {
2248 return kIOReturnSuccess;
2249 }
2250
2251 if (numArgs > kMaxAsyncArgs) {
2252 return kIOReturnMessageTooLarge;
2253 }
2254
2255 bzero(&replyMsg, sizeof(replyMsg));
2256 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2257 0 /*local*/);
2258 replyMsg.msgHdr.msgh_remote_port = replyPort;
2259 replyMsg.msgHdr.msgh_local_port = NULL;
2260 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
2261 if (kIOUCAsync64Flag & reference[0]) {
2262 replyMsg.msgHdr.msgh_size =
2263 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2264 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2265 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2266 + numArgs * sizeof(io_user_reference_t);
2267 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2268 /* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2269 bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2270
2271 replyMsg.m.msg64.asyncContent.result = result;
2272 if (numArgs) {
2273 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2274 }
2275 } else {
2276 unsigned int idx;
2277
2278 replyMsg.msgHdr.msgh_size =
2279 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2280 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2281
2282 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2283 + numArgs * sizeof(uint32_t);
2284 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2285
2286 /* Skip reference[0] which is left as 0 from the earlier bzero */
2287 for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2288 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2289 }
2290
2291 replyMsg.m.msg32.asyncContent.result = result;
2292
2293 for (idx = 0; idx < numArgs; idx++) {
2294 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2295 }
2296 }
2297
2298 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2299 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2300 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2301 } else {
2302 /* Fail on full queue. */
2303 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
2304 replyMsg.msgHdr.msgh_size);
2305 }
2306 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2307 reference[0] |= kIOUCAsyncErrorLoggedFlag;
2308 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
2309 }
2310 return kr;
2311 }
2312
2313
2314 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2315
2316 extern "C" {
2317 #define CHECK(cls, obj, out) \
2318 cls * out; \
2319 if( !(out = OSDynamicCast( cls, obj))) \
2320 return( kIOReturnBadArgument )
2321
2322 #define CHECKLOCKED(cls, obj, out) \
2323 IOUserIterator * oIter; \
2324 cls * out; \
2325 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
2326 return (kIOReturnBadArgument); \
2327 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
2328 return (kIOReturnBadArgument)
2329
2330 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2331
2332 // Create a vm_map_copy_t or kalloc'ed data for memory
2333 // to be copied out. ipc will free after the copyout.
2334
2335 static kern_return_t
copyoutkdata(const void * data,vm_size_t len,io_buf_ptr_t * buf)2336 copyoutkdata( const void * data, vm_size_t len,
2337 io_buf_ptr_t * buf )
2338 {
2339 kern_return_t err;
2340 vm_map_copy_t copy;
2341
2342 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2343 false /* src_destroy */, ©);
2344
2345 assert( err == KERN_SUCCESS );
2346 if (err == KERN_SUCCESS) {
2347 *buf = (char *) copy;
2348 }
2349
2350 return err;
2351 }
2352
2353 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2354
2355 /* Routine io_server_version */
2356 kern_return_t
is_io_server_version(mach_port_t main_port,uint64_t * version)2357 is_io_server_version(
2358 mach_port_t main_port,
2359 uint64_t *version)
2360 {
2361 *version = IOKIT_SERVER_VERSION;
2362 return kIOReturnSuccess;
2363 }
2364
2365 /* Routine io_object_get_class */
2366 kern_return_t
is_io_object_get_class(io_object_t object,io_name_t className)2367 is_io_object_get_class(
2368 io_object_t object,
2369 io_name_t className )
2370 {
2371 const OSMetaClass* my_obj = NULL;
2372
2373 if (!object) {
2374 return kIOReturnBadArgument;
2375 }
2376
2377 my_obj = object->getMetaClass();
2378 if (!my_obj) {
2379 return kIOReturnNotFound;
2380 }
2381
2382 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2383
2384 return kIOReturnSuccess;
2385 }
2386
2387 /* Routine io_object_get_superclass */
2388 kern_return_t
is_io_object_get_superclass(mach_port_t main_port,io_name_t obj_name,io_name_t class_name)2389 is_io_object_get_superclass(
2390 mach_port_t main_port,
2391 io_name_t obj_name,
2392 io_name_t class_name)
2393 {
2394 IOReturn ret;
2395 const OSMetaClass * meta;
2396 const OSMetaClass * super;
2397 const OSSymbol * name;
2398 const char * cstr;
2399
2400 if (!obj_name || !class_name) {
2401 return kIOReturnBadArgument;
2402 }
2403 if (main_port != main_device_port) {
2404 return kIOReturnNotPrivileged;
2405 }
2406
2407 ret = kIOReturnNotFound;
2408 meta = NULL;
2409 do{
2410 name = OSSymbol::withCString(obj_name);
2411 if (!name) {
2412 break;
2413 }
2414 meta = OSMetaClass::copyMetaClassWithName(name);
2415 if (!meta) {
2416 break;
2417 }
2418 super = meta->getSuperClass();
2419 if (!super) {
2420 break;
2421 }
2422 cstr = super->getClassName();
2423 if (!cstr) {
2424 break;
2425 }
2426 strlcpy(class_name, cstr, sizeof(io_name_t));
2427 ret = kIOReturnSuccess;
2428 }while (false);
2429
2430 OSSafeReleaseNULL(name);
2431 if (meta) {
2432 meta->releaseMetaClass();
2433 }
2434
2435 return ret;
2436 }
2437
2438 /* Routine io_object_get_bundle_identifier */
2439 kern_return_t
is_io_object_get_bundle_identifier(mach_port_t main_port,io_name_t obj_name,io_name_t bundle_name)2440 is_io_object_get_bundle_identifier(
2441 mach_port_t main_port,
2442 io_name_t obj_name,
2443 io_name_t bundle_name)
2444 {
2445 IOReturn ret;
2446 const OSMetaClass * meta;
2447 const OSSymbol * name;
2448 const OSSymbol * identifier;
2449 const char * cstr;
2450
2451 if (!obj_name || !bundle_name) {
2452 return kIOReturnBadArgument;
2453 }
2454 if (main_port != main_device_port) {
2455 return kIOReturnNotPrivileged;
2456 }
2457
2458 ret = kIOReturnNotFound;
2459 meta = NULL;
2460 do{
2461 name = OSSymbol::withCString(obj_name);
2462 if (!name) {
2463 break;
2464 }
2465 meta = OSMetaClass::copyMetaClassWithName(name);
2466 if (!meta) {
2467 break;
2468 }
2469 identifier = meta->getKmodName();
2470 if (!identifier) {
2471 break;
2472 }
2473 cstr = identifier->getCStringNoCopy();
2474 if (!cstr) {
2475 break;
2476 }
2477 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2478 ret = kIOReturnSuccess;
2479 }while (false);
2480
2481 OSSafeReleaseNULL(name);
2482 if (meta) {
2483 meta->releaseMetaClass();
2484 }
2485
2486 return ret;
2487 }
2488
2489 /* Routine io_object_conforms_to */
2490 kern_return_t
is_io_object_conforms_to(io_object_t object,io_name_t className,boolean_t * conforms)2491 is_io_object_conforms_to(
2492 io_object_t object,
2493 io_name_t className,
2494 boolean_t *conforms )
2495 {
2496 if (!object) {
2497 return kIOReturnBadArgument;
2498 }
2499
2500 *conforms = (NULL != object->metaCast( className ));
2501
2502 return kIOReturnSuccess;
2503 }
2504
2505 /* Routine io_object_get_retain_count */
2506 kern_return_t
is_io_object_get_retain_count(io_object_t object,uint32_t * retainCount)2507 is_io_object_get_retain_count(
2508 io_object_t object,
2509 uint32_t *retainCount )
2510 {
2511 if (!object) {
2512 return kIOReturnBadArgument;
2513 }
2514
2515 *retainCount = object->getRetainCount();
2516 return kIOReturnSuccess;
2517 }
2518
2519 /* Routine io_iterator_next */
2520 kern_return_t
is_io_iterator_next(io_object_t iterator,io_object_t * object)2521 is_io_iterator_next(
2522 io_object_t iterator,
2523 io_object_t *object )
2524 {
2525 IOReturn ret;
2526 OSObject * obj;
2527 OSIterator * iter;
2528 IOUserIterator * uiter;
2529
2530 if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2531 obj = uiter->copyNextObject();
2532 } else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2533 obj = iter->getNextObject();
2534 if (obj) {
2535 obj->retain();
2536 }
2537 } else {
2538 return kIOReturnBadArgument;
2539 }
2540
2541 if (obj) {
2542 *object = obj;
2543 ret = kIOReturnSuccess;
2544 } else {
2545 ret = kIOReturnNoDevice;
2546 }
2547
2548 return ret;
2549 }
2550
2551 /* Routine io_iterator_reset */
2552 kern_return_t
is_io_iterator_reset(io_object_t iterator)2553 is_io_iterator_reset(
2554 io_object_t iterator )
2555 {
2556 CHECK( OSIterator, iterator, iter );
2557
2558 iter->reset();
2559
2560 return kIOReturnSuccess;
2561 }
2562
2563 /* Routine io_iterator_is_valid */
2564 kern_return_t
is_io_iterator_is_valid(io_object_t iterator,boolean_t * is_valid)2565 is_io_iterator_is_valid(
2566 io_object_t iterator,
2567 boolean_t *is_valid )
2568 {
2569 CHECK( OSIterator, iterator, iter );
2570
2571 *is_valid = iter->isValid();
2572
2573 return kIOReturnSuccess;
2574 }
2575
2576 static kern_return_t
internal_io_service_match_property_table(io_service_t _service,const char * matching,mach_msg_type_number_t matching_size,boolean_t * matches)2577 internal_io_service_match_property_table(
2578 io_service_t _service,
2579 const char * matching,
2580 mach_msg_type_number_t matching_size,
2581 boolean_t *matches)
2582 {
2583 CHECK( IOService, _service, service );
2584
2585 kern_return_t kr;
2586 OSObject * obj;
2587 OSDictionary * dict;
2588
2589 assert(matching_size);
2590
2591
2592 obj = OSUnserializeXML(matching, matching_size);
2593
2594 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2595 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2596 *matches = service->passiveMatch( dict );
2597 kr = kIOReturnSuccess;
2598 } else {
2599 kr = kIOReturnBadArgument;
2600 }
2601
2602 if (obj) {
2603 obj->release();
2604 }
2605
2606 return kr;
2607 }
2608
2609 /* Routine io_service_match_property_table */
2610 kern_return_t
is_io_service_match_property_table(io_service_t service,io_string_t matching,boolean_t * matches)2611 is_io_service_match_property_table(
2612 io_service_t service,
2613 io_string_t matching,
2614 boolean_t *matches )
2615 {
2616 return kIOReturnUnsupported;
2617 }
2618
2619
2620 /* Routine io_service_match_property_table_ool */
2621 kern_return_t
is_io_service_match_property_table_ool(io_object_t service,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,boolean_t * matches)2622 is_io_service_match_property_table_ool(
2623 io_object_t service,
2624 io_buf_ptr_t matching,
2625 mach_msg_type_number_t matchingCnt,
2626 kern_return_t *result,
2627 boolean_t *matches )
2628 {
2629 kern_return_t kr;
2630 vm_offset_t data;
2631 vm_map_offset_t map_data;
2632
2633 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2634 data = CAST_DOWN(vm_offset_t, map_data);
2635
2636 if (KERN_SUCCESS == kr) {
2637 // must return success after vm_map_copyout() succeeds
2638 *result = internal_io_service_match_property_table(service,
2639 (const char *)data, matchingCnt, matches );
2640 vm_deallocate( kernel_map, data, matchingCnt );
2641 }
2642
2643 return kr;
2644 }
2645
2646 /* Routine io_service_match_property_table_bin */
2647 kern_return_t
is_io_service_match_property_table_bin(io_object_t service,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,boolean_t * matches)2648 is_io_service_match_property_table_bin(
2649 io_object_t service,
2650 io_struct_inband_t matching,
2651 mach_msg_type_number_t matchingCnt,
2652 boolean_t *matches)
2653 {
2654 return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2655 }
2656
2657 static kern_return_t
internal_io_service_get_matching_services(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_iterator_t * existing)2658 internal_io_service_get_matching_services(
2659 mach_port_t main_port,
2660 const char * matching,
2661 mach_msg_type_number_t matching_size,
2662 io_iterator_t *existing )
2663 {
2664 kern_return_t kr;
2665 OSObject * obj;
2666 OSDictionary * dict;
2667
2668 if (main_port != main_device_port) {
2669 return kIOReturnNotPrivileged;
2670 }
2671
2672 assert(matching_size);
2673 obj = OSUnserializeXML(matching, matching_size);
2674
2675 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2676 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2677 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2678 kr = kIOReturnSuccess;
2679 } else {
2680 kr = kIOReturnBadArgument;
2681 }
2682
2683 if (obj) {
2684 obj->release();
2685 }
2686
2687 return kr;
2688 }
2689
2690 /* Routine io_service_get_matching_services */
2691 kern_return_t
is_io_service_get_matching_services(mach_port_t main_port,io_string_t matching,io_iterator_t * existing)2692 is_io_service_get_matching_services(
2693 mach_port_t main_port,
2694 io_string_t matching,
2695 io_iterator_t *existing )
2696 {
2697 return kIOReturnUnsupported;
2698 }
2699
2700 /* Routine io_service_get_matching_services_ool */
2701 kern_return_t
is_io_service_get_matching_services_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * existing)2702 is_io_service_get_matching_services_ool(
2703 mach_port_t main_port,
2704 io_buf_ptr_t matching,
2705 mach_msg_type_number_t matchingCnt,
2706 kern_return_t *result,
2707 io_object_t *existing )
2708 {
2709 kern_return_t kr;
2710 vm_offset_t data;
2711 vm_map_offset_t map_data;
2712
2713 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2714 data = CAST_DOWN(vm_offset_t, map_data);
2715
2716 if (KERN_SUCCESS == kr) {
2717 // must return success after vm_map_copyout() succeeds
2718 // and mig will copy out objects on success
2719 *existing = NULL;
2720 *result = internal_io_service_get_matching_services(main_port,
2721 (const char *) data, matchingCnt, existing);
2722 vm_deallocate( kernel_map, data, matchingCnt );
2723 }
2724
2725 return kr;
2726 }
2727
2728 /* Routine io_service_get_matching_services_bin */
2729 kern_return_t
is_io_service_get_matching_services_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * existing)2730 is_io_service_get_matching_services_bin(
2731 mach_port_t main_port,
2732 io_struct_inband_t matching,
2733 mach_msg_type_number_t matchingCnt,
2734 io_object_t *existing)
2735 {
2736 return internal_io_service_get_matching_services(main_port, matching, matchingCnt, existing);
2737 }
2738
2739
2740 static kern_return_t
internal_io_service_get_matching_service(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_service_t * service)2741 internal_io_service_get_matching_service(
2742 mach_port_t main_port,
2743 const char * matching,
2744 mach_msg_type_number_t matching_size,
2745 io_service_t *service )
2746 {
2747 kern_return_t kr;
2748 OSObject * obj;
2749 OSDictionary * dict;
2750
2751 if (main_port != main_device_port) {
2752 return kIOReturnNotPrivileged;
2753 }
2754
2755 assert(matching_size);
2756 obj = OSUnserializeXML(matching, matching_size);
2757
2758 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2759 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2760 *service = IOService::copyMatchingService( dict );
2761 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2762 } else {
2763 kr = kIOReturnBadArgument;
2764 }
2765
2766 if (obj) {
2767 obj->release();
2768 }
2769
2770 return kr;
2771 }
2772
2773 /* Routine io_service_get_matching_service */
2774 kern_return_t
is_io_service_get_matching_service(mach_port_t main_port,io_string_t matching,io_service_t * service)2775 is_io_service_get_matching_service(
2776 mach_port_t main_port,
2777 io_string_t matching,
2778 io_service_t *service )
2779 {
2780 return kIOReturnUnsupported;
2781 }
2782
2783 /* Routine io_service_get_matching_services_ool */
2784 kern_return_t
is_io_service_get_matching_service_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * service)2785 is_io_service_get_matching_service_ool(
2786 mach_port_t main_port,
2787 io_buf_ptr_t matching,
2788 mach_msg_type_number_t matchingCnt,
2789 kern_return_t *result,
2790 io_object_t *service )
2791 {
2792 kern_return_t kr;
2793 vm_offset_t data;
2794 vm_map_offset_t map_data;
2795
2796 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2797 data = CAST_DOWN(vm_offset_t, map_data);
2798
2799 if (KERN_SUCCESS == kr) {
2800 // must return success after vm_map_copyout() succeeds
2801 // and mig will copy out objects on success
2802 *service = NULL;
2803 *result = internal_io_service_get_matching_service(main_port,
2804 (const char *) data, matchingCnt, service );
2805 vm_deallocate( kernel_map, data, matchingCnt );
2806 }
2807
2808 return kr;
2809 }
2810
2811 /* Routine io_service_get_matching_service_bin */
2812 kern_return_t
is_io_service_get_matching_service_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * service)2813 is_io_service_get_matching_service_bin(
2814 mach_port_t main_port,
2815 io_struct_inband_t matching,
2816 mach_msg_type_number_t matchingCnt,
2817 io_object_t *service)
2818 {
2819 return internal_io_service_get_matching_service(main_port, matching, matchingCnt, service);
2820 }
2821
2822 static kern_return_t
internal_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,const char * matching,size_t matching_size,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)2823 internal_io_service_add_notification(
2824 mach_port_t main_port,
2825 io_name_t notification_type,
2826 const char * matching,
2827 size_t matching_size,
2828 mach_port_t port,
2829 void * reference,
2830 vm_size_t referenceSize,
2831 bool client64,
2832 io_object_t * notification )
2833 {
2834 IOServiceUserNotification * userNotify = NULL;
2835 IONotifier * notify = NULL;
2836 const OSSymbol * sym;
2837 OSObject * obj;
2838 OSDictionary * dict;
2839 IOReturn err;
2840 natural_t userMsgType;
2841
2842 if (main_port != main_device_port) {
2843 return kIOReturnNotPrivileged;
2844 }
2845
2846 do {
2847 err = kIOReturnNoResources;
2848
2849 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2850 return kIOReturnMessageTooLarge;
2851 }
2852
2853 if (!(sym = OSSymbol::withCString( notification_type ))) {
2854 err = kIOReturnNoResources;
2855 }
2856
2857 assert(matching_size);
2858 obj = OSUnserializeXML(matching, matching_size);
2859 dict = OSDynamicCast(OSDictionary, obj);
2860 if (!dict) {
2861 err = kIOReturnBadArgument;
2862 continue;
2863 }
2864 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2865
2866 if ((sym == gIOPublishNotification)
2867 || (sym == gIOFirstPublishNotification)) {
2868 userMsgType = kIOServicePublishNotificationType;
2869 } else if ((sym == gIOMatchedNotification)
2870 || (sym == gIOFirstMatchNotification)) {
2871 userMsgType = kIOServiceMatchedNotificationType;
2872 } else if ((sym == gIOTerminatedNotification)
2873 || (sym == gIOWillTerminateNotification)) {
2874 userMsgType = kIOServiceTerminatedNotificationType;
2875 } else {
2876 userMsgType = kLastIOKitNotificationType;
2877 }
2878
2879 userNotify = new IOServiceUserNotification;
2880
2881 if (userNotify && !userNotify->init( port, userMsgType,
2882 reference, referenceSize, client64)) {
2883 userNotify->release();
2884 userNotify = NULL;
2885 }
2886 if (!userNotify) {
2887 continue;
2888 }
2889
2890 notify = IOService::addMatchingNotification( sym, dict,
2891 &userNotify->_handler, userNotify );
2892 if (notify) {
2893 *notification = userNotify;
2894 userNotify->setNotification( notify );
2895 err = kIOReturnSuccess;
2896 } else {
2897 err = kIOReturnUnsupported;
2898 }
2899 } while (false);
2900
2901 if ((kIOReturnSuccess != err) && userNotify) {
2902 userNotify->setNotification(NULL);
2903 userNotify->invalidatePort();
2904 userNotify->release();
2905 userNotify = NULL;
2906 }
2907
2908 if (sym) {
2909 sym->release();
2910 }
2911 if (obj) {
2912 obj->release();
2913 }
2914
2915 return err;
2916 }
2917
2918
2919 /* Routine io_service_add_notification */
2920 kern_return_t
is_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2921 is_io_service_add_notification(
2922 mach_port_t main_port,
2923 io_name_t notification_type,
2924 io_string_t matching,
2925 mach_port_t port,
2926 io_async_ref_t reference,
2927 mach_msg_type_number_t referenceCnt,
2928 io_object_t * notification )
2929 {
2930 return kIOReturnUnsupported;
2931 }
2932
2933 /* Routine io_service_add_notification_64 */
2934 kern_return_t
is_io_service_add_notification_64(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2935 is_io_service_add_notification_64(
2936 mach_port_t main_port,
2937 io_name_t notification_type,
2938 io_string_t matching,
2939 mach_port_t wake_port,
2940 io_async_ref64_t reference,
2941 mach_msg_type_number_t referenceCnt,
2942 io_object_t *notification )
2943 {
2944 return kIOReturnUnsupported;
2945 }
2946
2947 /* Routine io_service_add_notification_bin */
2948 kern_return_t
is_io_service_add_notification_bin(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2949 is_io_service_add_notification_bin
2950 (
2951 mach_port_t main_port,
2952 io_name_t notification_type,
2953 io_struct_inband_t matching,
2954 mach_msg_type_number_t matchingCnt,
2955 mach_port_t wake_port,
2956 io_async_ref_t reference,
2957 mach_msg_type_number_t referenceCnt,
2958 io_object_t *notification)
2959 {
2960 io_async_ref_t zreference;
2961
2962 if (referenceCnt > ASYNC_REF_COUNT) {
2963 return kIOReturnBadArgument;
2964 }
2965 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2966 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2967
2968 return internal_io_service_add_notification(main_port, notification_type,
2969 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2970 false, notification);
2971 }
2972
2973 /* Routine io_service_add_notification_bin_64 */
2974 kern_return_t
is_io_service_add_notification_bin_64(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2975 is_io_service_add_notification_bin_64
2976 (
2977 mach_port_t main_port,
2978 io_name_t notification_type,
2979 io_struct_inband_t matching,
2980 mach_msg_type_number_t matchingCnt,
2981 mach_port_t wake_port,
2982 io_async_ref64_t reference,
2983 mach_msg_type_number_t referenceCnt,
2984 io_object_t *notification)
2985 {
2986 io_async_ref64_t zreference;
2987
2988 if (referenceCnt > ASYNC_REF64_COUNT) {
2989 return kIOReturnBadArgument;
2990 }
2991 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2992 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2993
2994 return internal_io_service_add_notification(main_port, notification_type,
2995 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2996 true, notification);
2997 }
2998
2999 static kern_return_t
internal_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,void * reference,vm_size_t referenceSize,bool client64,kern_return_t * result,io_object_t * notification)3000 internal_io_service_add_notification_ool(
3001 mach_port_t main_port,
3002 io_name_t notification_type,
3003 io_buf_ptr_t matching,
3004 mach_msg_type_number_t matchingCnt,
3005 mach_port_t wake_port,
3006 void * reference,
3007 vm_size_t referenceSize,
3008 bool client64,
3009 kern_return_t *result,
3010 io_object_t *notification )
3011 {
3012 kern_return_t kr;
3013 vm_offset_t data;
3014 vm_map_offset_t map_data;
3015
3016 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
3017 data = CAST_DOWN(vm_offset_t, map_data);
3018
3019 if (KERN_SUCCESS == kr) {
3020 // must return success after vm_map_copyout() succeeds
3021 // and mig will copy out objects on success
3022 *notification = NULL;
3023 *result = internal_io_service_add_notification( main_port, notification_type,
3024 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
3025 vm_deallocate( kernel_map, data, matchingCnt );
3026 }
3027
3028 return kr;
3029 }
3030
3031 /* Routine io_service_add_notification_ool */
3032 kern_return_t
is_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3033 is_io_service_add_notification_ool(
3034 mach_port_t main_port,
3035 io_name_t notification_type,
3036 io_buf_ptr_t matching,
3037 mach_msg_type_number_t matchingCnt,
3038 mach_port_t wake_port,
3039 io_async_ref_t reference,
3040 mach_msg_type_number_t referenceCnt,
3041 kern_return_t *result,
3042 io_object_t *notification )
3043 {
3044 io_async_ref_t zreference;
3045
3046 if (referenceCnt > ASYNC_REF_COUNT) {
3047 return kIOReturnBadArgument;
3048 }
3049 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3050 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3051
3052 return internal_io_service_add_notification_ool(main_port, notification_type,
3053 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3054 false, result, notification);
3055 }
3056
3057 /* Routine io_service_add_notification_ool_64 */
3058 kern_return_t
is_io_service_add_notification_ool_64(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3059 is_io_service_add_notification_ool_64(
3060 mach_port_t main_port,
3061 io_name_t notification_type,
3062 io_buf_ptr_t matching,
3063 mach_msg_type_number_t matchingCnt,
3064 mach_port_t wake_port,
3065 io_async_ref64_t reference,
3066 mach_msg_type_number_t referenceCnt,
3067 kern_return_t *result,
3068 io_object_t *notification )
3069 {
3070 io_async_ref64_t zreference;
3071
3072 if (referenceCnt > ASYNC_REF64_COUNT) {
3073 return kIOReturnBadArgument;
3074 }
3075 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3076 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3077
3078 return internal_io_service_add_notification_ool(main_port, notification_type,
3079 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3080 true, result, notification);
3081 }
3082
3083 /* Routine io_service_add_notification_old */
3084 kern_return_t
is_io_service_add_notification_old(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,natural_t ref,io_object_t * notification)3085 is_io_service_add_notification_old(
3086 mach_port_t main_port,
3087 io_name_t notification_type,
3088 io_string_t matching,
3089 mach_port_t port,
3090 // for binary compatibility reasons, this must be natural_t for ILP32
3091 natural_t ref,
3092 io_object_t * notification )
3093 {
3094 return is_io_service_add_notification( main_port, notification_type,
3095 matching, port, &ref, 1, notification );
3096 }
3097
3098
3099 static kern_return_t
internal_io_service_add_interest_notification(io_object_t _service,io_name_t type_of_interest,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)3100 internal_io_service_add_interest_notification(
3101 io_object_t _service,
3102 io_name_t type_of_interest,
3103 mach_port_t port,
3104 void * reference,
3105 vm_size_t referenceSize,
3106 bool client64,
3107 io_object_t * notification )
3108 {
3109 IOServiceMessageUserNotification * userNotify = NULL;
3110 IONotifier * notify = NULL;
3111 const OSSymbol * sym;
3112 IOReturn err;
3113
3114 CHECK( IOService, _service, service );
3115
3116 err = kIOReturnNoResources;
3117 if ((sym = OSSymbol::withCString( type_of_interest ))) {
3118 do {
3119 userNotify = new IOServiceMessageUserNotification;
3120
3121 if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
3122 reference, referenceSize, client64 )) {
3123 userNotify->release();
3124 userNotify = NULL;
3125 }
3126 if (!userNotify) {
3127 continue;
3128 }
3129
3130 notify = service->registerInterest( sym,
3131 &userNotify->_handler, userNotify );
3132 if (notify) {
3133 *notification = userNotify;
3134 userNotify->setNotification( notify );
3135 err = kIOReturnSuccess;
3136 } else {
3137 err = kIOReturnUnsupported;
3138 }
3139 } while (false);
3140
3141 sym->release();
3142 }
3143
3144 if ((kIOReturnSuccess != err) && userNotify) {
3145 userNotify->setNotification(NULL);
3146 userNotify->invalidatePort();
3147 userNotify->release();
3148 userNotify = NULL;
3149 }
3150
3151 return err;
3152 }
3153
3154 /* Routine io_service_add_message_notification */
3155 kern_return_t
is_io_service_add_interest_notification(io_object_t service,io_name_t type_of_interest,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3156 is_io_service_add_interest_notification(
3157 io_object_t service,
3158 io_name_t type_of_interest,
3159 mach_port_t port,
3160 io_async_ref_t reference,
3161 mach_msg_type_number_t referenceCnt,
3162 io_object_t * notification )
3163 {
3164 io_async_ref_t zreference;
3165
3166 if (referenceCnt > ASYNC_REF_COUNT) {
3167 return kIOReturnBadArgument;
3168 }
3169 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3170 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3171
3172 return internal_io_service_add_interest_notification(service, type_of_interest,
3173 port, &zreference[0], sizeof(io_async_ref_t), false, notification);
3174 }
3175
3176 /* Routine io_service_add_interest_notification_64 */
3177 kern_return_t
is_io_service_add_interest_notification_64(io_object_t service,io_name_t type_of_interest,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3178 is_io_service_add_interest_notification_64(
3179 io_object_t service,
3180 io_name_t type_of_interest,
3181 mach_port_t wake_port,
3182 io_async_ref64_t reference,
3183 mach_msg_type_number_t referenceCnt,
3184 io_object_t *notification )
3185 {
3186 io_async_ref64_t zreference;
3187
3188 if (referenceCnt > ASYNC_REF64_COUNT) {
3189 return kIOReturnBadArgument;
3190 }
3191 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3192 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3193
3194 return internal_io_service_add_interest_notification(service, type_of_interest,
3195 wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
3196 }
3197
3198
3199 /* Routine io_service_acknowledge_notification */
3200 kern_return_t
is_io_service_acknowledge_notification(io_object_t _service,natural_t notify_ref,natural_t response)3201 is_io_service_acknowledge_notification(
3202 io_object_t _service,
3203 natural_t notify_ref,
3204 natural_t response )
3205 {
3206 CHECK( IOService, _service, service );
3207
3208 return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3209 (IOOptionBits) response );
3210 }
3211
3212 /* Routine io_connect_get_semaphore */
3213 kern_return_t
is_io_connect_get_notification_semaphore(io_connect_t connection,natural_t notification_type,semaphore_t * semaphore)3214 is_io_connect_get_notification_semaphore(
3215 io_connect_t connection,
3216 natural_t notification_type,
3217 semaphore_t *semaphore )
3218 {
3219 IOReturn ret;
3220 CHECK( IOUserClient, connection, client );
3221
3222 IOStatisticsClientCall();
3223 client->ipcEnter(kIPCLockWrite);
3224 ret = client->getNotificationSemaphore((UInt32) notification_type,
3225 semaphore );
3226 client->ipcExit(kIPCLockWrite);
3227
3228 return ret;
3229 }
3230
3231 /* Routine io_registry_get_root_entry */
3232 kern_return_t
is_io_registry_get_root_entry(mach_port_t main_port,io_object_t * root)3233 is_io_registry_get_root_entry(
3234 mach_port_t main_port,
3235 io_object_t *root )
3236 {
3237 IORegistryEntry * entry;
3238
3239 if (main_port != main_device_port) {
3240 return kIOReturnNotPrivileged;
3241 }
3242
3243 entry = IORegistryEntry::getRegistryRoot();
3244 if (entry) {
3245 entry->retain();
3246 }
3247 *root = entry;
3248
3249 return kIOReturnSuccess;
3250 }
3251
3252 /* Routine io_registry_create_iterator */
3253 kern_return_t
is_io_registry_create_iterator(mach_port_t main_port,io_name_t plane,uint32_t options,io_object_t * iterator)3254 is_io_registry_create_iterator(
3255 mach_port_t main_port,
3256 io_name_t plane,
3257 uint32_t options,
3258 io_object_t *iterator )
3259 {
3260 if (main_port != main_device_port) {
3261 return kIOReturnNotPrivileged;
3262 }
3263
3264 *iterator = IOUserIterator::withIterator(
3265 IORegistryIterator::iterateOver(
3266 IORegistryEntry::getPlane( plane ), options ));
3267
3268 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3269 }
3270
3271 /* Routine io_registry_entry_create_iterator */
3272 kern_return_t
is_io_registry_entry_create_iterator(io_object_t registry_entry,io_name_t plane,uint32_t options,io_object_t * iterator)3273 is_io_registry_entry_create_iterator(
3274 io_object_t registry_entry,
3275 io_name_t plane,
3276 uint32_t options,
3277 io_object_t *iterator )
3278 {
3279 CHECK( IORegistryEntry, registry_entry, entry );
3280
3281 *iterator = IOUserIterator::withIterator(
3282 IORegistryIterator::iterateOver( entry,
3283 IORegistryEntry::getPlane( plane ), options ));
3284
3285 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3286 }
3287
3288 /* Routine io_registry_iterator_enter */
3289 kern_return_t
is_io_registry_iterator_enter_entry(io_object_t iterator)3290 is_io_registry_iterator_enter_entry(
3291 io_object_t iterator )
3292 {
3293 CHECKLOCKED( IORegistryIterator, iterator, iter );
3294
3295 IOLockLock(&oIter->lock);
3296 iter->enterEntry();
3297 IOLockUnlock(&oIter->lock);
3298
3299 return kIOReturnSuccess;
3300 }
3301
3302 /* Routine io_registry_iterator_exit */
3303 kern_return_t
is_io_registry_iterator_exit_entry(io_object_t iterator)3304 is_io_registry_iterator_exit_entry(
3305 io_object_t iterator )
3306 {
3307 bool didIt;
3308
3309 CHECKLOCKED( IORegistryIterator, iterator, iter );
3310
3311 IOLockLock(&oIter->lock);
3312 didIt = iter->exitEntry();
3313 IOLockUnlock(&oIter->lock);
3314
3315 return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3316 }
3317
3318 /* Routine io_registry_entry_from_path */
3319 kern_return_t
is_io_registry_entry_from_path(mach_port_t main_port,io_string_t path,io_object_t * registry_entry)3320 is_io_registry_entry_from_path(
3321 mach_port_t main_port,
3322 io_string_t path,
3323 io_object_t *registry_entry )
3324 {
3325 IORegistryEntry * entry;
3326
3327 if (main_port != main_device_port) {
3328 return kIOReturnNotPrivileged;
3329 }
3330
3331 entry = IORegistryEntry::fromPath( path );
3332
3333 if (!entry && IOTaskRegistryCompatibility(current_task())) {
3334 OSDictionary * matching;
3335 const OSObject * objects[2] = { kOSBooleanTrue, NULL };
3336 const OSSymbol * keys[2] = { gIOCompatibilityMatchKey, gIOPathMatchKey };
3337
3338 objects[1] = OSString::withCStringNoCopy(path);
3339 matching = OSDictionary::withObjects(objects, keys, 2, 2);
3340 if (matching) {
3341 entry = IOService::copyMatchingService(matching);
3342 }
3343 OSSafeReleaseNULL(matching);
3344 OSSafeReleaseNULL(objects[1]);
3345 }
3346
3347 *registry_entry = entry;
3348
3349 return kIOReturnSuccess;
3350 }
3351
3352
3353 /* Routine io_registry_entry_from_path */
3354 kern_return_t
is_io_registry_entry_from_path_ool(mach_port_t main_port,io_string_inband_t path,io_buf_ptr_t path_ool,mach_msg_type_number_t path_oolCnt,kern_return_t * result,io_object_t * registry_entry)3355 is_io_registry_entry_from_path_ool(
3356 mach_port_t main_port,
3357 io_string_inband_t path,
3358 io_buf_ptr_t path_ool,
3359 mach_msg_type_number_t path_oolCnt,
3360 kern_return_t *result,
3361 io_object_t *registry_entry)
3362 {
3363 IORegistryEntry * entry;
3364 vm_map_offset_t map_data;
3365 const char * cpath;
3366 IOReturn res;
3367 kern_return_t err;
3368
3369 if (main_port != main_device_port) {
3370 return kIOReturnNotPrivileged;
3371 }
3372
3373 map_data = 0;
3374 entry = NULL;
3375 res = err = KERN_SUCCESS;
3376 if (path[0]) {
3377 cpath = path;
3378 } else {
3379 if (!path_oolCnt) {
3380 return kIOReturnBadArgument;
3381 }
3382 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3383 return kIOReturnMessageTooLarge;
3384 }
3385
3386 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3387 if (KERN_SUCCESS == err) {
3388 // must return success to mig after vm_map_copyout() succeeds, so result is actual
3389 cpath = CAST_DOWN(const char *, map_data);
3390 if (cpath[path_oolCnt - 1]) {
3391 res = kIOReturnBadArgument;
3392 }
3393 }
3394 }
3395
3396 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3397 entry = IORegistryEntry::fromPath(cpath);
3398 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3399 }
3400
3401 if (map_data) {
3402 vm_deallocate(kernel_map, map_data, path_oolCnt);
3403 }
3404
3405 if (KERN_SUCCESS != err) {
3406 res = err;
3407 }
3408 *registry_entry = entry;
3409 *result = res;
3410
3411 return err;
3412 }
3413
3414
3415 /* Routine io_registry_entry_in_plane */
3416 kern_return_t
is_io_registry_entry_in_plane(io_object_t registry_entry,io_name_t plane,boolean_t * inPlane)3417 is_io_registry_entry_in_plane(
3418 io_object_t registry_entry,
3419 io_name_t plane,
3420 boolean_t *inPlane )
3421 {
3422 CHECK( IORegistryEntry, registry_entry, entry );
3423
3424 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3425
3426 return kIOReturnSuccess;
3427 }
3428
3429
3430 /* Routine io_registry_entry_get_path */
3431 kern_return_t
is_io_registry_entry_get_path(io_object_t registry_entry,io_name_t plane,io_string_t path)3432 is_io_registry_entry_get_path(
3433 io_object_t registry_entry,
3434 io_name_t plane,
3435 io_string_t path )
3436 {
3437 int length;
3438 CHECK( IORegistryEntry, registry_entry, entry );
3439
3440 length = sizeof(io_string_t);
3441 if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3442 return kIOReturnSuccess;
3443 } else {
3444 return kIOReturnBadArgument;
3445 }
3446 }
3447
3448 /* Routine io_registry_entry_get_path */
3449 kern_return_t
is_io_registry_entry_get_path_ool(io_object_t registry_entry,io_name_t plane,io_string_inband_t path,io_buf_ptr_t * path_ool,mach_msg_type_number_t * path_oolCnt)3450 is_io_registry_entry_get_path_ool(
3451 io_object_t registry_entry,
3452 io_name_t plane,
3453 io_string_inband_t path,
3454 io_buf_ptr_t *path_ool,
3455 mach_msg_type_number_t *path_oolCnt)
3456 {
3457 enum { kMaxPath = 16384 };
3458 IOReturn err;
3459 int length;
3460 char * buf;
3461
3462 CHECK( IORegistryEntry, registry_entry, entry );
3463
3464 *path_ool = NULL;
3465 *path_oolCnt = 0;
3466 length = sizeof(io_string_inband_t);
3467 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3468 err = kIOReturnSuccess;
3469 } else {
3470 length = kMaxPath;
3471 buf = IONewData(char, length);
3472 if (!buf) {
3473 err = kIOReturnNoMemory;
3474 } else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3475 err = kIOReturnError;
3476 } else {
3477 *path_oolCnt = length;
3478 err = copyoutkdata(buf, length, path_ool);
3479 }
3480 if (buf) {
3481 IODeleteData(buf, char, kMaxPath);
3482 }
3483 }
3484
3485 return err;
3486 }
3487
3488
3489 /* Routine io_registry_entry_get_name */
3490 kern_return_t
is_io_registry_entry_get_name(io_object_t registry_entry,io_name_t name)3491 is_io_registry_entry_get_name(
3492 io_object_t registry_entry,
3493 io_name_t name )
3494 {
3495 CHECK( IORegistryEntry, registry_entry, entry );
3496
3497 strncpy( name, entry->getName(), sizeof(io_name_t));
3498
3499 return kIOReturnSuccess;
3500 }
3501
3502 /* Routine io_registry_entry_get_name_in_plane */
3503 kern_return_t
is_io_registry_entry_get_name_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t name)3504 is_io_registry_entry_get_name_in_plane(
3505 io_object_t registry_entry,
3506 io_name_t planeName,
3507 io_name_t name )
3508 {
3509 const IORegistryPlane * plane;
3510 CHECK( IORegistryEntry, registry_entry, entry );
3511
3512 if (planeName[0]) {
3513 plane = IORegistryEntry::getPlane( planeName );
3514 } else {
3515 plane = NULL;
3516 }
3517
3518 strncpy( name, entry->getName( plane), sizeof(io_name_t));
3519
3520 return kIOReturnSuccess;
3521 }
3522
3523 /* Routine io_registry_entry_get_location_in_plane */
3524 kern_return_t
is_io_registry_entry_get_location_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t location)3525 is_io_registry_entry_get_location_in_plane(
3526 io_object_t registry_entry,
3527 io_name_t planeName,
3528 io_name_t location )
3529 {
3530 const IORegistryPlane * plane;
3531 CHECK( IORegistryEntry, registry_entry, entry );
3532
3533 if (planeName[0]) {
3534 plane = IORegistryEntry::getPlane( planeName );
3535 } else {
3536 plane = NULL;
3537 }
3538
3539 const char * cstr = entry->getLocation( plane );
3540
3541 if (cstr) {
3542 strncpy( location, cstr, sizeof(io_name_t));
3543 return kIOReturnSuccess;
3544 } else {
3545 return kIOReturnNotFound;
3546 }
3547 }
3548
3549 /* Routine io_registry_entry_get_registry_entry_id */
3550 kern_return_t
is_io_registry_entry_get_registry_entry_id(io_object_t registry_entry,uint64_t * entry_id)3551 is_io_registry_entry_get_registry_entry_id(
3552 io_object_t registry_entry,
3553 uint64_t *entry_id )
3554 {
3555 CHECK( IORegistryEntry, registry_entry, entry );
3556
3557 *entry_id = entry->getRegistryEntryID();
3558
3559 return kIOReturnSuccess;
3560 }
3561
3562
3563 static OSObject *
IOCopyPropertyCompatible(IORegistryEntry * regEntry,const char * name)3564 IOCopyPropertyCompatible(IORegistryEntry * regEntry, const char * name)
3565 {
3566 OSObject * obj;
3567 OSObject * compatProperties;
3568 OSDictionary * props;
3569
3570 obj = regEntry->copyProperty(name);
3571 if (obj) {
3572 return obj;
3573 }
3574
3575 compatProperties = regEntry->copyProperty(gIOUserServicePropertiesKey);
3576 if (!compatProperties
3577 && IOTaskRegistryCompatibility(current_task())) {
3578 compatProperties = regEntry->copyProperty(gIOCompatibilityPropertiesKey);
3579 }
3580 if (compatProperties) {
3581 props = OSDynamicCast(OSDictionary, compatProperties);
3582 if (props) {
3583 obj = props->getObject(name);
3584 if (obj) {
3585 obj->retain();
3586 }
3587 }
3588 compatProperties->release();
3589 }
3590
3591 return obj;
3592 }
3593
3594 /* Routine io_registry_entry_get_property */
3595 kern_return_t
is_io_registry_entry_get_property_bytes(io_object_t registry_entry,io_name_t property_name,io_struct_inband_t buf,mach_msg_type_number_t * dataCnt)3596 is_io_registry_entry_get_property_bytes(
3597 io_object_t registry_entry,
3598 io_name_t property_name,
3599 io_struct_inband_t buf,
3600 mach_msg_type_number_t *dataCnt )
3601 {
3602 OSObject * obj;
3603 OSData * data;
3604 OSString * str;
3605 OSBoolean * boo;
3606 OSNumber * off;
3607 UInt64 offsetBytes;
3608 unsigned int len = 0;
3609 const void * bytes = NULL;
3610 IOReturn ret = kIOReturnSuccess;
3611
3612 CHECK( IORegistryEntry, registry_entry, entry );
3613
3614 #if CONFIG_MACF
3615 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3616 return kIOReturnNotPermitted;
3617 }
3618 #endif
3619
3620 obj = IOCopyPropertyCompatible(entry, property_name);
3621 if (!obj) {
3622 return kIOReturnNoResources;
3623 }
3624
3625 // One day OSData will be a common container base class
3626 // until then...
3627 if ((data = OSDynamicCast( OSData, obj ))) {
3628 len = data->getLength();
3629 bytes = data->getBytesNoCopy();
3630 if (!data->isSerializable()) {
3631 len = 0;
3632 }
3633 } else if ((str = OSDynamicCast( OSString, obj ))) {
3634 len = str->getLength() + 1;
3635 bytes = str->getCStringNoCopy();
3636 } else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3637 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3638 bytes = boo->isTrue() ? "Yes" : "No";
3639 } else if ((off = OSDynamicCast( OSNumber, obj ))) {
3640 offsetBytes = off->unsigned64BitValue();
3641 len = off->numberOfBytes();
3642 if (len > sizeof(offsetBytes)) {
3643 len = sizeof(offsetBytes);
3644 }
3645 bytes = &offsetBytes;
3646 #ifdef __BIG_ENDIAN__
3647 bytes = (const void *)
3648 (((UInt32) bytes) + (sizeof(UInt64) - len));
3649 #endif
3650 } else {
3651 ret = kIOReturnBadArgument;
3652 }
3653
3654 if (bytes) {
3655 if (*dataCnt < len) {
3656 ret = kIOReturnIPCError;
3657 } else {
3658 *dataCnt = len;
3659 bcopy( bytes, buf, len );
3660 }
3661 }
3662 obj->release();
3663
3664 return ret;
3665 }
3666
3667
3668 /* Routine io_registry_entry_get_property */
3669 kern_return_t
is_io_registry_entry_get_property(io_object_t registry_entry,io_name_t property_name,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3670 is_io_registry_entry_get_property(
3671 io_object_t registry_entry,
3672 io_name_t property_name,
3673 io_buf_ptr_t *properties,
3674 mach_msg_type_number_t *propertiesCnt )
3675 {
3676 kern_return_t err;
3677 unsigned int len;
3678 OSObject * obj;
3679
3680 CHECK( IORegistryEntry, registry_entry, entry );
3681
3682 #if CONFIG_MACF
3683 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3684 return kIOReturnNotPermitted;
3685 }
3686 #endif
3687
3688 obj = IOCopyPropertyCompatible(entry, property_name);
3689 if (!obj) {
3690 return kIOReturnNotFound;
3691 }
3692
3693 OSSerialize * s = OSSerialize::withCapacity(4096);
3694 if (!s) {
3695 obj->release();
3696 return kIOReturnNoMemory;
3697 }
3698
3699 if (obj->serialize( s )) {
3700 len = s->getLength();
3701 *propertiesCnt = len;
3702 err = copyoutkdata( s->text(), len, properties );
3703 } else {
3704 err = kIOReturnUnsupported;
3705 }
3706
3707 s->release();
3708 obj->release();
3709
3710 return err;
3711 }
3712
3713 /* Routine io_registry_entry_get_property_recursively */
3714 kern_return_t
is_io_registry_entry_get_property_recursively(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3715 is_io_registry_entry_get_property_recursively(
3716 io_object_t registry_entry,
3717 io_name_t plane,
3718 io_name_t property_name,
3719 uint32_t options,
3720 io_buf_ptr_t *properties,
3721 mach_msg_type_number_t *propertiesCnt )
3722 {
3723 kern_return_t err;
3724 unsigned int len;
3725 OSObject * obj;
3726
3727 CHECK( IORegistryEntry, registry_entry, entry );
3728
3729 #if CONFIG_MACF
3730 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3731 return kIOReturnNotPermitted;
3732 }
3733 #endif
3734
3735 obj = entry->copyProperty( property_name,
3736 IORegistryEntry::getPlane( plane ), options );
3737 if (!obj) {
3738 return kIOReturnNotFound;
3739 }
3740
3741 OSSerialize * s = OSSerialize::withCapacity(4096);
3742 if (!s) {
3743 obj->release();
3744 return kIOReturnNoMemory;
3745 }
3746
3747 if (obj->serialize( s )) {
3748 len = s->getLength();
3749 *propertiesCnt = len;
3750 err = copyoutkdata( s->text(), len, properties );
3751 } else {
3752 err = kIOReturnUnsupported;
3753 }
3754
3755 s->release();
3756 obj->release();
3757
3758 return err;
3759 }
3760
3761 /* Routine io_registry_entry_get_properties */
3762 kern_return_t
is_io_registry_entry_get_properties(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3763 is_io_registry_entry_get_properties(
3764 io_object_t registry_entry,
3765 io_buf_ptr_t *properties,
3766 mach_msg_type_number_t *propertiesCnt )
3767 {
3768 return kIOReturnUnsupported;
3769 }
3770
3771 #if CONFIG_MACF
3772
3773 struct GetPropertiesEditorRef {
3774 kauth_cred_t cred;
3775 IORegistryEntry * entry;
3776 OSCollection * root;
3777 };
3778
3779 static const LIBKERN_RETURNS_RETAINED OSMetaClassBase *
GetPropertiesEditor(void * reference,OSSerialize * s,OSCollection * container,const OSSymbol * name,const OSMetaClassBase * value)3780 GetPropertiesEditor(void * reference,
3781 OSSerialize * s,
3782 OSCollection * container,
3783 const OSSymbol * name,
3784 const OSMetaClassBase * value)
3785 {
3786 GetPropertiesEditorRef * ref = (typeof(ref))reference;
3787
3788 if (!ref->root) {
3789 ref->root = container;
3790 }
3791 if (ref->root == container) {
3792 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3793 value = NULL;
3794 }
3795 }
3796 if (value) {
3797 value->retain();
3798 }
3799 return value;
3800 }
3801
3802 #endif /* CONFIG_MACF */
3803
3804 /* Routine io_registry_entry_get_properties_bin_buf */
3805 kern_return_t
is_io_registry_entry_get_properties_bin_buf(io_object_t registry_entry,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3806 is_io_registry_entry_get_properties_bin_buf(
3807 io_object_t registry_entry,
3808 mach_vm_address_t buf,
3809 mach_vm_size_t *bufsize,
3810 io_buf_ptr_t *properties,
3811 mach_msg_type_number_t *propertiesCnt)
3812 {
3813 kern_return_t err = kIOReturnSuccess;
3814 unsigned int len;
3815 OSObject * compatProperties;
3816 OSSerialize * s;
3817 OSSerialize::Editor editor = NULL;
3818 void * editRef = NULL;
3819
3820 CHECK(IORegistryEntry, registry_entry, entry);
3821
3822 #if CONFIG_MACF
3823 GetPropertiesEditorRef ref;
3824 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3825 editor = &GetPropertiesEditor;
3826 editRef = &ref;
3827 ref.cred = kauth_cred_get();
3828 ref.entry = entry;
3829 ref.root = NULL;
3830 }
3831 #endif
3832
3833 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3834 if (!s) {
3835 return kIOReturnNoMemory;
3836 }
3837
3838
3839 compatProperties = entry->copyProperty(gIOUserServicePropertiesKey);
3840 if (!compatProperties
3841 && IOTaskRegistryCompatibility(current_task())) {
3842 compatProperties = entry->copyProperty(gIOCompatibilityPropertiesKey);
3843 }
3844
3845 if (compatProperties) {
3846 OSDictionary * dict;
3847
3848 dict = entry->dictionaryWithProperties();
3849 if (!dict) {
3850 err = kIOReturnNoMemory;
3851 } else {
3852 dict->removeObject(gIOUserServicePropertiesKey);
3853 dict->removeObject(gIOCompatibilityPropertiesKey);
3854 dict->merge(OSDynamicCast(OSDictionary, compatProperties));
3855 if (!dict->serialize(s)) {
3856 err = kIOReturnUnsupported;
3857 }
3858 dict->release();
3859 }
3860 compatProperties->release();
3861 } else if (!entry->serializeProperties(s)) {
3862 err = kIOReturnUnsupported;
3863 }
3864
3865 if (kIOReturnSuccess == err) {
3866 len = s->getLength();
3867 if (buf && bufsize && len <= *bufsize) {
3868 *bufsize = len;
3869 *propertiesCnt = 0;
3870 *properties = nullptr;
3871 if (copyout(s->text(), buf, len)) {
3872 err = kIOReturnVMError;
3873 } else {
3874 err = kIOReturnSuccess;
3875 }
3876 } else {
3877 if (bufsize) {
3878 *bufsize = 0;
3879 }
3880 *propertiesCnt = len;
3881 err = copyoutkdata( s->text(), len, properties );
3882 }
3883 }
3884 s->release();
3885
3886 return err;
3887 }
3888
3889 /* Routine io_registry_entry_get_properties_bin */
3890 kern_return_t
is_io_registry_entry_get_properties_bin(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3891 is_io_registry_entry_get_properties_bin(
3892 io_object_t registry_entry,
3893 io_buf_ptr_t *properties,
3894 mach_msg_type_number_t *propertiesCnt)
3895 {
3896 return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3897 0, NULL, properties, propertiesCnt);
3898 }
3899
3900 /* Routine io_registry_entry_get_property_bin_buf */
3901 kern_return_t
is_io_registry_entry_get_property_bin_buf(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3902 is_io_registry_entry_get_property_bin_buf(
3903 io_object_t registry_entry,
3904 io_name_t plane,
3905 io_name_t property_name,
3906 uint32_t options,
3907 mach_vm_address_t buf,
3908 mach_vm_size_t *bufsize,
3909 io_buf_ptr_t *properties,
3910 mach_msg_type_number_t *propertiesCnt )
3911 {
3912 kern_return_t err;
3913 unsigned int len;
3914 OSObject * obj;
3915 const OSSymbol * sym;
3916
3917 CHECK( IORegistryEntry, registry_entry, entry );
3918
3919 #if CONFIG_MACF
3920 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3921 return kIOReturnNotPermitted;
3922 }
3923 #endif
3924
3925 sym = OSSymbol::withCString(property_name);
3926 if (!sym) {
3927 return kIOReturnNoMemory;
3928 }
3929
3930 err = kIOReturnNotFound;
3931 if (gIORegistryEntryPropertyKeysKey == sym) {
3932 obj = entry->copyPropertyKeys();
3933 } else {
3934 if ((kIORegistryIterateRecursively & options) && plane[0]) {
3935 obj = IOCopyPropertyCompatible(entry, property_name);
3936 if (obj == NULL) {
3937 IORegistryIterator * iter = IORegistryIterator::iterateOver(entry, IORegistryEntry::getPlane(plane), options);
3938 if (iter) {
3939 while ((NULL == obj) && (entry = iter->getNextObject())) {
3940 OSObject * currentObj = IOCopyPropertyCompatible(entry, property_name);
3941 #if CONFIG_MACF
3942 if (currentObj != NULL && 0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3943 // Record that MAC hook blocked this entry and property, and continue to next entry
3944 err = kIOReturnNotPermitted;
3945 OSSafeReleaseNULL(currentObj);
3946 continue;
3947 }
3948 #endif
3949 obj = currentObj;
3950 }
3951 iter->release();
3952 }
3953 }
3954 } else {
3955 obj = IOCopyPropertyCompatible(entry, property_name);
3956 }
3957 if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3958 entry->removeProperty(sym);
3959 }
3960 }
3961
3962 sym->release();
3963 if (!obj) {
3964 return err;
3965 }
3966
3967 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3968 if (!s) {
3969 obj->release();
3970 return kIOReturnNoMemory;
3971 }
3972
3973 if (obj->serialize( s )) {
3974 len = s->getLength();
3975 if (buf && bufsize && len <= *bufsize) {
3976 *bufsize = len;
3977 *propertiesCnt = 0;
3978 *properties = nullptr;
3979 if (copyout(s->text(), buf, len)) {
3980 err = kIOReturnVMError;
3981 } else {
3982 err = kIOReturnSuccess;
3983 }
3984 } else {
3985 if (bufsize) {
3986 *bufsize = 0;
3987 }
3988 *propertiesCnt = len;
3989 err = copyoutkdata( s->text(), len, properties );
3990 }
3991 } else {
3992 err = kIOReturnUnsupported;
3993 }
3994
3995 s->release();
3996 obj->release();
3997
3998 return err;
3999 }
4000
4001 /* Routine io_registry_entry_get_property_bin */
4002 kern_return_t
is_io_registry_entry_get_property_bin(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)4003 is_io_registry_entry_get_property_bin(
4004 io_object_t registry_entry,
4005 io_name_t plane,
4006 io_name_t property_name,
4007 uint32_t options,
4008 io_buf_ptr_t *properties,
4009 mach_msg_type_number_t *propertiesCnt )
4010 {
4011 return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
4012 property_name, options, 0, NULL, properties, propertiesCnt);
4013 }
4014
4015
4016 /* Routine io_registry_entry_set_properties */
4017 kern_return_t
is_io_registry_entry_set_properties(io_object_t registry_entry,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4018 is_io_registry_entry_set_properties
4019 (
4020 io_object_t registry_entry,
4021 io_buf_ptr_t properties,
4022 mach_msg_type_number_t propertiesCnt,
4023 kern_return_t * result)
4024 {
4025 OSObject * obj;
4026 kern_return_t err;
4027 IOReturn res;
4028 vm_offset_t data;
4029 vm_map_offset_t map_data;
4030
4031 CHECK( IORegistryEntry, registry_entry, entry );
4032
4033 if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
4034 return kIOReturnMessageTooLarge;
4035 }
4036
4037 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4038 data = CAST_DOWN(vm_offset_t, map_data);
4039
4040 if (KERN_SUCCESS == err) {
4041 FAKE_STACK_FRAME(entry->getMetaClass());
4042
4043 // must return success after vm_map_copyout() succeeds
4044 obj = OSUnserializeXML((const char *) data, propertiesCnt );
4045 vm_deallocate( kernel_map, data, propertiesCnt );
4046
4047 if (!obj) {
4048 res = kIOReturnBadArgument;
4049 }
4050 #if CONFIG_MACF
4051 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
4052 registry_entry, obj)) {
4053 res = kIOReturnNotPermitted;
4054 }
4055 #endif
4056 else {
4057 IOService * service = OSDynamicCast(IOService, entry);
4058 OSDictionary * props = OSDynamicCast(OSDictionary, obj);
4059 OSObject * allowable = entry->copyProperty(gIORegistryEntryAllowableSetPropertiesKey);
4060 OSArray * allowableArray;
4061
4062 if (!allowable) {
4063 res = kIOReturnSuccess;
4064 } else {
4065 if (!props) {
4066 res = kIOReturnNotPermitted;
4067 } else if (!(allowableArray = OSDynamicCast(OSArray, allowable))) {
4068 res = kIOReturnNotPermitted;
4069 } else {
4070 bool allFound __block, found __block;
4071
4072 allFound = true;
4073 props->iterateObjects(^(const OSSymbol * key, OSObject * value) {
4074 found = false;
4075 for (unsigned int idx = 0; !found; idx++) {
4076 OSObject * next = allowableArray->getObject(idx);
4077 if (!next) {
4078 break;
4079 }
4080 found = next->isEqualTo(key);
4081 }
4082 allFound &= found;
4083 if (!found) {
4084 IOLog("IORegistryEntrySetProperties(%s, %s) disallowed due to " kIORegistryEntryAllowableSetPropertiesKey "\n",
4085 entry->getName(), key->getCStringNoCopy());
4086 }
4087 return !allFound;
4088 });
4089 res = allFound ? kIOReturnSuccess : kIOReturnBadArgument;
4090 }
4091 }
4092 if (kIOReturnSuccess == res) {
4093 IOUserClient *
4094 client = OSDynamicCast(IOUserClient, entry);
4095
4096 if (client && client->defaultLockingSetProperties) {
4097 IORWLockWrite(&client->lock);
4098 }
4099
4100 if (!client && (kOSBooleanTrue == entry->getProperty(gIORegistryEntryDefaultLockingSetPropertiesKey))) {
4101 res = entry->runPropertyActionBlock(^IOReturn (void) {
4102 return entry->setProperties( obj );
4103 });
4104 } else {
4105 res = entry->setProperties( obj );
4106 }
4107
4108 if (client && client->defaultLockingSetProperties) {
4109 IORWLockUnlock(&client->lock);
4110 }
4111 if (service && props && service->hasUserServer()) {
4112 res = service->UserSetProperties(props);
4113 }
4114 }
4115 OSSafeReleaseNULL(allowable);
4116 }
4117 if (obj) {
4118 obj->release();
4119 }
4120
4121 FAKE_STACK_FRAME_END();
4122 } else {
4123 res = err;
4124 }
4125
4126 *result = res;
4127 return err;
4128 }
4129
4130 /* Routine io_registry_entry_get_child_iterator */
4131 kern_return_t
is_io_registry_entry_get_child_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4132 is_io_registry_entry_get_child_iterator(
4133 io_object_t registry_entry,
4134 io_name_t plane,
4135 io_object_t *iterator )
4136 {
4137 CHECK( IORegistryEntry, registry_entry, entry );
4138
4139 *iterator = IOUserIterator::withIterator(entry->getChildIterator(
4140 IORegistryEntry::getPlane( plane )));
4141
4142 return kIOReturnSuccess;
4143 }
4144
4145 /* Routine io_registry_entry_get_parent_iterator */
4146 kern_return_t
is_io_registry_entry_get_parent_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4147 is_io_registry_entry_get_parent_iterator(
4148 io_object_t registry_entry,
4149 io_name_t plane,
4150 io_object_t *iterator)
4151 {
4152 CHECK( IORegistryEntry, registry_entry, entry );
4153
4154 *iterator = IOUserIterator::withIterator(entry->getParentIterator(
4155 IORegistryEntry::getPlane( plane )));
4156
4157 return kIOReturnSuccess;
4158 }
4159
4160 /* Routine io_service_get_busy_state */
4161 kern_return_t
is_io_service_get_busy_state(io_object_t _service,uint32_t * busyState)4162 is_io_service_get_busy_state(
4163 io_object_t _service,
4164 uint32_t *busyState )
4165 {
4166 CHECK( IOService, _service, service );
4167
4168 *busyState = service->getBusyState();
4169
4170 return kIOReturnSuccess;
4171 }
4172
4173 /* Routine io_service_get_state */
4174 kern_return_t
is_io_service_get_state(io_object_t _service,uint64_t * state,uint32_t * busy_state,uint64_t * accumulated_busy_time)4175 is_io_service_get_state(
4176 io_object_t _service,
4177 uint64_t *state,
4178 uint32_t *busy_state,
4179 uint64_t *accumulated_busy_time )
4180 {
4181 CHECK( IOService, _service, service );
4182
4183 *state = service->getState();
4184 *busy_state = service->getBusyState();
4185 *accumulated_busy_time = service->getAccumulatedBusyTime();
4186
4187 return kIOReturnSuccess;
4188 }
4189
4190 /* Routine io_service_wait_quiet */
4191 kern_return_t
is_io_service_wait_quiet(io_object_t _service,mach_timespec_t wait_time)4192 is_io_service_wait_quiet(
4193 io_object_t _service,
4194 mach_timespec_t wait_time )
4195 {
4196 uint64_t timeoutNS;
4197
4198 CHECK( IOService, _service, service );
4199
4200 timeoutNS = wait_time.tv_sec;
4201 timeoutNS *= kSecondScale;
4202 timeoutNS += wait_time.tv_nsec;
4203
4204 return service->waitQuiet(timeoutNS);
4205 }
4206
4207 /* Routine io_service_wait_quiet_with_options */
4208 kern_return_t
is_io_service_wait_quiet_with_options(io_object_t _service,mach_timespec_t wait_time,uint32_t options)4209 is_io_service_wait_quiet_with_options(
4210 io_object_t _service,
4211 mach_timespec_t wait_time,
4212 uint32_t options )
4213 {
4214 uint64_t timeoutNS;
4215
4216 CHECK( IOService, _service, service );
4217
4218 timeoutNS = wait_time.tv_sec;
4219 timeoutNS *= kSecondScale;
4220 timeoutNS += wait_time.tv_nsec;
4221
4222 if ((options & kIOWaitQuietPanicOnFailure) && !IOCurrentTaskHasEntitlement(kIOWaitQuietPanicsEntitlement)) {
4223 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4224 IOLog("IOServiceWaitQuietWithOptions(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4225 OSSafeReleaseNULL(taskName);
4226
4227 /* strip this option from the options before calling waitQuietWithOptions */
4228 options &= ~kIOWaitQuietPanicOnFailure;
4229 }
4230
4231 return service->waitQuietWithOptions(timeoutNS, options);
4232 }
4233
4234
4235 /* Routine io_service_request_probe */
4236 kern_return_t
is_io_service_request_probe(io_object_t _service,uint32_t options)4237 is_io_service_request_probe(
4238 io_object_t _service,
4239 uint32_t options )
4240 {
4241 CHECK( IOService, _service, service );
4242
4243 return service->requestProbe( options );
4244 }
4245
4246 /* Routine io_service_get_authorization_id */
4247 kern_return_t
is_io_service_get_authorization_id(io_object_t _service,uint64_t * authorization_id)4248 is_io_service_get_authorization_id(
4249 io_object_t _service,
4250 uint64_t *authorization_id )
4251 {
4252 kern_return_t kr;
4253
4254 CHECK( IOService, _service, service );
4255
4256 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
4257 kIOClientPrivilegeAdministrator );
4258 if (kIOReturnSuccess != kr) {
4259 return kr;
4260 }
4261
4262 #if defined(XNU_TARGET_OS_OSX)
4263 *authorization_id = service->getAuthorizationID();
4264 #else /* defined(XNU_TARGET_OS_OSX) */
4265 *authorization_id = 0;
4266 kr = kIOReturnUnsupported;
4267 #endif /* defined(XNU_TARGET_OS_OSX) */
4268
4269 return kr;
4270 }
4271
4272 /* Routine io_service_set_authorization_id */
4273 kern_return_t
is_io_service_set_authorization_id(io_object_t _service,uint64_t authorization_id)4274 is_io_service_set_authorization_id(
4275 io_object_t _service,
4276 uint64_t authorization_id )
4277 {
4278 CHECK( IOService, _service, service );
4279
4280 #if defined(XNU_TARGET_OS_OSX)
4281 return service->setAuthorizationID( authorization_id );
4282 #else /* defined(XNU_TARGET_OS_OSX) */
4283 return kIOReturnUnsupported;
4284 #endif /* defined(XNU_TARGET_OS_OSX) */
4285 }
4286
4287 /* Routine io_service_open_ndr */
4288 kern_return_t
is_io_service_open_extended(io_object_t _service,task_t owningTask,uint32_t connect_type,NDR_record_t ndr,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result,io_object_t * connection)4289 is_io_service_open_extended(
4290 io_object_t _service,
4291 task_t owningTask,
4292 uint32_t connect_type,
4293 NDR_record_t ndr,
4294 io_buf_ptr_t properties,
4295 mach_msg_type_number_t propertiesCnt,
4296 kern_return_t * result,
4297 io_object_t *connection )
4298 {
4299 IOUserClient * client = NULL;
4300 kern_return_t err = KERN_SUCCESS;
4301 IOReturn res = kIOReturnSuccess;
4302 OSDictionary * propertiesDict = NULL;
4303 bool disallowAccess = false;
4304
4305 CHECK( IOService, _service, service );
4306
4307 if (!owningTask) {
4308 return kIOReturnBadArgument;
4309 }
4310 assert(owningTask == current_task());
4311 if (owningTask != current_task()) {
4312 return kIOReturnBadArgument;
4313 }
4314
4315 #if CONFIG_MACF
4316 if (mac_iokit_check_open_service(kauth_cred_get(), service, connect_type) != 0) {
4317 return kIOReturnNotPermitted;
4318 }
4319 #endif
4320 do{
4321 if (properties) {
4322 return kIOReturnUnsupported;
4323 }
4324 #if 0
4325 {
4326 OSObject * obj;
4327 vm_offset_t data;
4328 vm_map_offset_t map_data;
4329
4330 if (propertiesCnt > sizeof(io_struct_inband_t)) {
4331 return kIOReturnMessageTooLarge;
4332 }
4333
4334 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4335 res = err;
4336 data = CAST_DOWN(vm_offset_t, map_data);
4337 if (KERN_SUCCESS == err) {
4338 // must return success after vm_map_copyout() succeeds
4339 obj = OSUnserializeXML((const char *) data, propertiesCnt );
4340 vm_deallocate( kernel_map, data, propertiesCnt );
4341 propertiesDict = OSDynamicCast(OSDictionary, obj);
4342 if (!propertiesDict) {
4343 res = kIOReturnBadArgument;
4344 if (obj) {
4345 obj->release();
4346 }
4347 }
4348 }
4349 if (kIOReturnSuccess != res) {
4350 break;
4351 }
4352 }
4353 #endif
4354 res = service->newUserClient( owningTask, (void *) owningTask,
4355 connect_type, propertiesDict, &client );
4356
4357 if (propertiesDict) {
4358 propertiesDict->release();
4359 }
4360
4361 if (res == kIOReturnSuccess && OSDynamicCast(IOUserClient, client) == NULL) {
4362 // client should always be a IOUserClient
4363 res = kIOReturnError;
4364 }
4365
4366 if (res == kIOReturnSuccess) {
4367 if (!client->reserved) {
4368 if (!client->reserve()) {
4369 client->clientClose();
4370 OSSafeReleaseNULL(client);
4371 res = kIOReturnNoMemory;
4372 }
4373 }
4374 }
4375
4376 if (res == kIOReturnSuccess) {
4377 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4378 if (creatorName) {
4379 client->setProperty(kIOUserClientCreatorKey, creatorName);
4380 }
4381 const char * creatorNameCStr = creatorName ? creatorName->getCStringNoCopy() : "<unknown>";
4382 client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
4383 if (client->sharedInstance) {
4384 IOLockLock(gIOUserClientOwnersLock);
4385 }
4386 if (!client->opened) {
4387 client->opened = true;
4388
4389 client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
4390 {
4391 OSObject * obj;
4392 extern const OSSymbol * gIOSurfaceIdentifier;
4393 obj = client->getProperty(kIOUserClientDefaultLockingKey);
4394 bool hasProps = false;
4395
4396 client->uc2022 = (NULL != OSDynamicCast(IOUserClient2022, client));
4397 if (obj) {
4398 hasProps = true;
4399 client->defaultLocking = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingKey));
4400 } else if (client->uc2022) {
4401 res = kIOReturnError;
4402 }
4403 obj = client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey);
4404 if (obj) {
4405 hasProps = true;
4406 client->defaultLockingSetProperties = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey));
4407 } else if (client->uc2022) {
4408 res = kIOReturnError;
4409 }
4410 obj = client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey);
4411 if (obj) {
4412 hasProps = true;
4413 client->defaultLockingSingleThreadExternalMethod = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey));
4414 } else if (client->uc2022) {
4415 res = kIOReturnError;
4416 }
4417 if (kIOReturnSuccess != res) {
4418 IOLog("IOUC %s requires kIOUserClientDefaultLockingKey, kIOUserClientDefaultLockingSetPropertiesKey, kIOUserClientDefaultLockingSingleThreadExternalMethodKey\n",
4419 client->getMetaClass()->getClassName());
4420 }
4421 if (!hasProps) {
4422 const OSMetaClass * meta;
4423 OSKext * kext;
4424 meta = client->getMetaClass();
4425 kext = meta->getKext();
4426 if (!kext || !kext->hasDependency(gIOSurfaceIdentifier)) {
4427 client->defaultLocking = true;
4428 client->defaultLockingSetProperties = false;
4429 client->defaultLockingSingleThreadExternalMethod = false;
4430 client->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
4431 }
4432 }
4433 }
4434 }
4435 if (client->sharedInstance) {
4436 IOLockUnlock(gIOUserClientOwnersLock);
4437 }
4438
4439 OSObject * requiredEntitlement = client->copyProperty(gIOUserClientEntitlementsKey);
4440 OSString * requiredEntitlementString = OSDynamicCast(OSString, requiredEntitlement);
4441 //If this is an IOUserClient2022, having kIOUserClientEntitlementsKey is mandatory.
4442 //If it has kIOUserClientEntitlementsKey, the value must be either kOSBooleanFalse or an OSString
4443 //If the value is kOSBooleanFalse, we allow access.
4444 //If the value is an OSString, we allow access if the task has the named entitlement
4445 if (client->uc2022) {
4446 if (!requiredEntitlement) {
4447 IOLog("IOUC %s missing " kIOUserClientEntitlementsKey " property\n",
4448 client->getMetaClass()->getClassName());
4449 disallowAccess = true;
4450 } else if (!requiredEntitlementString && requiredEntitlement != kOSBooleanFalse) {
4451 IOLog("IOUC %s had " kIOUserClientEntitlementsKey "with value not boolean false or string\n", client->getMetaClass()->getClassName());
4452 disallowAccess = true;
4453 }
4454 }
4455
4456 if (requiredEntitlement && disallowAccess == false) {
4457 if (kOSBooleanFalse == requiredEntitlement) {
4458 // allow
4459 disallowAccess = false;
4460 } else {
4461 disallowAccess = !IOTaskHasEntitlement(owningTask, requiredEntitlementString->getCStringNoCopy());
4462 if (disallowAccess) {
4463 IOLog("IOUC %s missing entitlement in process %s\n",
4464 client->getMetaClass()->getClassName(), creatorNameCStr);
4465 }
4466 }
4467 }
4468
4469 OSSafeReleaseNULL(requiredEntitlement);
4470
4471 if (disallowAccess) {
4472 res = kIOReturnNotPrivileged;
4473 }
4474 #if CONFIG_MACF
4475 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4476 IOLog("IOUC %s failed MACF in process %s\n",
4477 client->getMetaClass()->getClassName(), creatorNameCStr);
4478 res = kIOReturnNotPermitted;
4479 }
4480 #endif
4481
4482 if ((kIOReturnSuccess == res)
4483 && gIOUCFilterCallbacks
4484 && gIOUCFilterCallbacks->io_filter_resolver) {
4485 io_filter_policy_t filterPolicy;
4486 filterPolicy = client->filterForTask(owningTask, 0);
4487 if (!filterPolicy) {
4488 res = gIOUCFilterCallbacks->io_filter_resolver(owningTask, client, connect_type, &filterPolicy);
4489 if (kIOReturnUnsupported == res) {
4490 res = kIOReturnSuccess;
4491 } else if (kIOReturnSuccess == res) {
4492 client->filterForTask(owningTask, filterPolicy);
4493 } else {
4494 IOLog("IOUC %s failed sandbox in process %s\n",
4495 client->getMetaClass()->getClassName(), creatorNameCStr);
4496 }
4497 }
4498 }
4499
4500 if (kIOReturnSuccess == res) {
4501 res = client->registerOwner(owningTask);
4502 }
4503 OSSafeReleaseNULL(creatorName);
4504
4505 if (kIOReturnSuccess != res) {
4506 IOStatisticsClientCall();
4507 client->clientClose();
4508 client->setTerminateDefer(service, false);
4509 client->release();
4510 client = NULL;
4511 break;
4512 }
4513 client->setTerminateDefer(service, false);
4514 }
4515 }while (false);
4516
4517 *connection = client;
4518 *result = res;
4519
4520 return err;
4521 }
4522
4523 /* Routine io_service_close */
4524 kern_return_t
is_io_service_close(io_connect_t connection)4525 is_io_service_close(
4526 io_connect_t connection )
4527 {
4528 OSSet * mappings;
4529 if ((mappings = OSDynamicCast(OSSet, connection))) {
4530 return kIOReturnSuccess;
4531 }
4532
4533 CHECK( IOUserClient, connection, client );
4534
4535 IOStatisticsClientCall();
4536
4537 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4538 client->ipcEnter(kIPCLockWrite);
4539 client->clientClose();
4540 client->ipcExit(kIPCLockWrite);
4541 } else {
4542 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4543 client->getRegistryEntryID(), client->getName());
4544 }
4545
4546 return kIOReturnSuccess;
4547 }
4548
4549 /* Routine io_connect_get_service */
4550 kern_return_t
is_io_connect_get_service(io_connect_t connection,io_object_t * service)4551 is_io_connect_get_service(
4552 io_connect_t connection,
4553 io_object_t *service )
4554 {
4555 IOService * theService;
4556
4557 CHECK( IOUserClient, connection, client );
4558
4559 client->ipcEnter(kIPCLockNone);
4560
4561 theService = client->getService();
4562 if (theService) {
4563 theService->retain();
4564 }
4565
4566 client->ipcExit(kIPCLockNone);
4567
4568 *service = theService;
4569
4570 return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4571 }
4572
4573 /* Routine io_connect_set_notification_port */
4574 kern_return_t
is_io_connect_set_notification_port(io_connect_t connection,uint32_t notification_type,mach_port_t port,uint32_t reference)4575 is_io_connect_set_notification_port(
4576 io_connect_t connection,
4577 uint32_t notification_type,
4578 mach_port_t port,
4579 uint32_t reference)
4580 {
4581 kern_return_t ret;
4582 CHECK( IOUserClient, connection, client );
4583
4584 IOStatisticsClientCall();
4585
4586 client->ipcEnter(kIPCLockWrite);
4587 ret = client->registerNotificationPort( port, notification_type,
4588 (io_user_reference_t) reference );
4589 client->ipcExit(kIPCLockWrite);
4590
4591 return ret;
4592 }
4593
4594 /* Routine io_connect_set_notification_port */
4595 kern_return_t
is_io_connect_set_notification_port_64(io_connect_t connection,uint32_t notification_type,mach_port_t port,io_user_reference_t reference)4596 is_io_connect_set_notification_port_64(
4597 io_connect_t connection,
4598 uint32_t notification_type,
4599 mach_port_t port,
4600 io_user_reference_t reference)
4601 {
4602 kern_return_t ret;
4603 CHECK( IOUserClient, connection, client );
4604
4605 IOStatisticsClientCall();
4606
4607 client->ipcEnter(kIPCLockWrite);
4608 ret = client->registerNotificationPort( port, notification_type,
4609 reference );
4610 client->ipcExit(kIPCLockWrite);
4611
4612 return ret;
4613 }
4614
4615 /* Routine io_connect_map_memory_into_task */
4616 kern_return_t
is_io_connect_map_memory_into_task(io_connect_t connection,uint32_t memory_type,task_t into_task,mach_vm_address_t * address,mach_vm_size_t * size,uint32_t flags)4617 is_io_connect_map_memory_into_task
4618 (
4619 io_connect_t connection,
4620 uint32_t memory_type,
4621 task_t into_task,
4622 mach_vm_address_t *address,
4623 mach_vm_size_t *size,
4624 uint32_t flags
4625 )
4626 {
4627 IOReturn err;
4628 IOMemoryMap * map;
4629
4630 CHECK( IOUserClient, connection, client );
4631
4632 if (!into_task) {
4633 return kIOReturnBadArgument;
4634 }
4635
4636 IOStatisticsClientCall();
4637
4638 client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4639 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4640
4641 if (map) {
4642 *address = map->getAddress();
4643 if (size) {
4644 *size = map->getSize();
4645 }
4646
4647 if (client->sharedInstance
4648 || (into_task != current_task())) {
4649 // push a name out to the task owning the map,
4650 // so we can clean up maps
4651 mach_port_name_t name __unused =
4652 IOMachPort::makeSendRightForTask(
4653 into_task, map, IKOT_IOKIT_OBJECT );
4654 map->release();
4655 } else {
4656 // keep it with the user client
4657 IOLockLock( gIOObjectPortLock);
4658 if (NULL == client->mappings) {
4659 client->mappings = OSSet::withCapacity(2);
4660 }
4661 if (client->mappings) {
4662 client->mappings->setObject( map);
4663 }
4664 IOLockUnlock( gIOObjectPortLock);
4665 map->release();
4666 }
4667 err = kIOReturnSuccess;
4668 } else {
4669 err = kIOReturnBadArgument;
4670 }
4671
4672 client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4673
4674 return err;
4675 }
4676
4677 /* Routine is_io_connect_map_memory */
4678 kern_return_t
is_io_connect_map_memory(io_object_t connect,uint32_t type,task_t task,uint32_t * mapAddr,uint32_t * mapSize,uint32_t flags)4679 is_io_connect_map_memory(
4680 io_object_t connect,
4681 uint32_t type,
4682 task_t task,
4683 uint32_t * mapAddr,
4684 uint32_t * mapSize,
4685 uint32_t flags )
4686 {
4687 IOReturn err;
4688 mach_vm_address_t address;
4689 mach_vm_size_t size;
4690
4691 address = SCALAR64(*mapAddr);
4692 size = SCALAR64(*mapSize);
4693
4694 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4695
4696 *mapAddr = SCALAR32(address);
4697 *mapSize = SCALAR32(size);
4698
4699 return err;
4700 }
4701 } /* extern "C" */
4702
4703 IOMemoryMap *
removeMappingForDescriptor(IOMemoryDescriptor * mem)4704 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4705 {
4706 OSIterator * iter;
4707 IOMemoryMap * map = NULL;
4708
4709 IOLockLock(gIOObjectPortLock);
4710
4711 iter = OSCollectionIterator::withCollection(mappings);
4712 if (iter) {
4713 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4714 if (mem == map->getMemoryDescriptor()) {
4715 map->retain();
4716 mappings->removeObject(map);
4717 break;
4718 }
4719 }
4720 iter->release();
4721 }
4722
4723 IOLockUnlock(gIOObjectPortLock);
4724
4725 return map;
4726 }
4727
4728 extern "C" {
4729 /* Routine io_connect_unmap_memory_from_task */
4730 kern_return_t
is_io_connect_unmap_memory_from_task(io_connect_t connection,uint32_t memory_type,task_t from_task,mach_vm_address_t address)4731 is_io_connect_unmap_memory_from_task
4732 (
4733 io_connect_t connection,
4734 uint32_t memory_type,
4735 task_t from_task,
4736 mach_vm_address_t address)
4737 {
4738 IOReturn err;
4739 IOOptionBits options = 0;
4740 IOMemoryDescriptor * memory = NULL;
4741 IOMemoryMap * map;
4742
4743 CHECK( IOUserClient, connection, client );
4744
4745 if (!from_task) {
4746 return kIOReturnBadArgument;
4747 }
4748
4749 IOStatisticsClientCall();
4750
4751 client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4752 err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4753
4754 if (memory && (kIOReturnSuccess == err)) {
4755 options = (options & ~kIOMapUserOptionsMask)
4756 | kIOMapAnywhere | kIOMapReference;
4757
4758 map = memory->createMappingInTask( from_task, address, options );
4759 memory->release();
4760 if (map) {
4761 IOLockLock( gIOObjectPortLock);
4762 if (client->mappings) {
4763 client->mappings->removeObject( map);
4764 }
4765 IOLockUnlock( gIOObjectPortLock);
4766
4767 mach_port_name_t name = 0;
4768 bool is_shared_instance_or_from_current_task = from_task != current_task() || client->sharedInstance;
4769 if (is_shared_instance_or_from_current_task) {
4770 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4771 map->release();
4772 }
4773
4774 if (name) {
4775 map->userClientUnmap();
4776 err = iokit_mod_send_right( from_task, name, -2 );
4777 err = kIOReturnSuccess;
4778 } else {
4779 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4780 }
4781 if (!is_shared_instance_or_from_current_task) {
4782 map->release();
4783 }
4784 } else {
4785 err = kIOReturnBadArgument;
4786 }
4787 }
4788
4789 client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4790
4791 return err;
4792 }
4793
4794 kern_return_t
is_io_connect_unmap_memory(io_object_t connect,uint32_t type,task_t task,uint32_t mapAddr)4795 is_io_connect_unmap_memory(
4796 io_object_t connect,
4797 uint32_t type,
4798 task_t task,
4799 uint32_t mapAddr )
4800 {
4801 IOReturn err;
4802 mach_vm_address_t address;
4803
4804 address = SCALAR64(mapAddr);
4805
4806 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4807
4808 return err;
4809 }
4810
4811
4812 /* Routine io_connect_add_client */
4813 kern_return_t
is_io_connect_add_client(io_connect_t connection,io_object_t connect_to)4814 is_io_connect_add_client(
4815 io_connect_t connection,
4816 io_object_t connect_to)
4817 {
4818 CHECK( IOUserClient, connection, client );
4819 CHECK( IOUserClient, connect_to, to );
4820
4821 IOReturn ret;
4822
4823 IOStatisticsClientCall();
4824
4825 client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4826 ret = client->connectClient( to );
4827 client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4828
4829 return ret;
4830 }
4831
4832
4833 /* Routine io_connect_set_properties */
4834 kern_return_t
is_io_connect_set_properties(io_connect_t connection,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4835 is_io_connect_set_properties(
4836 io_connect_t connection,
4837 io_buf_ptr_t properties,
4838 mach_msg_type_number_t propertiesCnt,
4839 kern_return_t * result)
4840 {
4841 return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4842 }
4843
4844 /* Routine io_user_client_method */
4845 kern_return_t
is_io_connect_method_var_output(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,io_buf_ptr_t * var_output,mach_msg_type_number_t * var_outputCnt)4846 is_io_connect_method_var_output
4847 (
4848 io_connect_t connection,
4849 uint32_t selector,
4850 io_scalar_inband64_t scalar_input,
4851 mach_msg_type_number_t scalar_inputCnt,
4852 io_struct_inband_t inband_input,
4853 mach_msg_type_number_t inband_inputCnt,
4854 mach_vm_address_t ool_input,
4855 mach_vm_size_t ool_input_size,
4856 io_struct_inband_t inband_output,
4857 mach_msg_type_number_t *inband_outputCnt,
4858 io_scalar_inband64_t scalar_output,
4859 mach_msg_type_number_t *scalar_outputCnt,
4860 io_buf_ptr_t *var_output,
4861 mach_msg_type_number_t *var_outputCnt
4862 )
4863 {
4864 CHECK( IOUserClient, connection, client );
4865
4866 IOExternalMethodArguments args;
4867 IOReturn ret;
4868 IOMemoryDescriptor * inputMD = NULL;
4869 OSObject * structureVariableOutputData = NULL;
4870
4871 bzero(&args.__reserved[0], sizeof(args.__reserved));
4872 args.__reservedA = 0;
4873 args.version = kIOExternalMethodArgumentsCurrentVersion;
4874
4875 args.selector = selector;
4876
4877 args.asyncWakePort = MACH_PORT_NULL;
4878 args.asyncReference = NULL;
4879 args.asyncReferenceCount = 0;
4880 args.structureVariableOutputData = &structureVariableOutputData;
4881
4882 args.scalarInput = scalar_input;
4883 args.scalarInputCount = scalar_inputCnt;
4884 args.structureInput = inband_input;
4885 args.structureInputSize = inband_inputCnt;
4886
4887 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4888 return kIOReturnIPCError;
4889 }
4890
4891 if (ool_input) {
4892 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4893 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4894 current_task());
4895 }
4896
4897 args.structureInputDescriptor = inputMD;
4898
4899 args.scalarOutput = scalar_output;
4900 args.scalarOutputCount = *scalar_outputCnt;
4901 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4902 args.structureOutput = inband_output;
4903 args.structureOutputSize = *inband_outputCnt;
4904 args.structureOutputDescriptor = NULL;
4905 args.structureOutputDescriptorSize = 0;
4906
4907 IOStatisticsClientCall();
4908 ret = kIOReturnSuccess;
4909
4910 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4911 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4912 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
4913 }
4914
4915 if (kIOReturnSuccess == ret) {
4916 ret = client->callExternalMethod(selector, &args);
4917 }
4918
4919 *scalar_outputCnt = args.scalarOutputCount;
4920 *inband_outputCnt = args.structureOutputSize;
4921
4922 if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4923 OSSerialize * serialize;
4924 OSData * data;
4925 unsigned int len;
4926
4927 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4928 len = serialize->getLength();
4929 *var_outputCnt = len;
4930 ret = copyoutkdata(serialize->text(), len, var_output);
4931 } else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4932 data->clipForCopyout();
4933 len = data->getLength();
4934 *var_outputCnt = len;
4935 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4936 } else {
4937 ret = kIOReturnUnderrun;
4938 }
4939 }
4940
4941 if (inputMD) {
4942 inputMD->release();
4943 }
4944 if (structureVariableOutputData) {
4945 structureVariableOutputData->release();
4946 }
4947
4948 return ret;
4949 }
4950
4951 /* Routine io_user_client_method */
4952 kern_return_t
is_io_connect_method(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)4953 is_io_connect_method
4954 (
4955 io_connect_t connection,
4956 uint32_t selector,
4957 io_scalar_inband64_t scalar_input,
4958 mach_msg_type_number_t scalar_inputCnt,
4959 io_struct_inband_t inband_input,
4960 mach_msg_type_number_t inband_inputCnt,
4961 mach_vm_address_t ool_input,
4962 mach_vm_size_t ool_input_size,
4963 io_struct_inband_t inband_output,
4964 mach_msg_type_number_t *inband_outputCnt,
4965 io_scalar_inband64_t scalar_output,
4966 mach_msg_type_number_t *scalar_outputCnt,
4967 mach_vm_address_t ool_output,
4968 mach_vm_size_t *ool_output_size
4969 )
4970 {
4971 CHECK( IOUserClient, connection, client );
4972
4973 IOExternalMethodArguments args;
4974 IOReturn ret;
4975 IOMemoryDescriptor * inputMD = NULL;
4976 IOMemoryDescriptor * outputMD = NULL;
4977
4978 bzero(&args.__reserved[0], sizeof(args.__reserved));
4979 args.__reservedA = 0;
4980 args.version = kIOExternalMethodArgumentsCurrentVersion;
4981
4982 args.selector = selector;
4983
4984 args.asyncWakePort = MACH_PORT_NULL;
4985 args.asyncReference = NULL;
4986 args.asyncReferenceCount = 0;
4987 args.structureVariableOutputData = NULL;
4988
4989 args.scalarInput = scalar_input;
4990 args.scalarInputCount = scalar_inputCnt;
4991 args.structureInput = inband_input;
4992 args.structureInputSize = inband_inputCnt;
4993
4994 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4995 return kIOReturnIPCError;
4996 }
4997 if (ool_output) {
4998 if (*ool_output_size <= sizeof(io_struct_inband_t)) {
4999 return kIOReturnIPCError;
5000 }
5001 if (*ool_output_size > UINT_MAX) {
5002 return kIOReturnIPCError;
5003 }
5004 }
5005
5006 if (ool_input) {
5007 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5008 kIODirectionOut | kIOMemoryMapCopyOnWrite,
5009 current_task());
5010 }
5011
5012 args.structureInputDescriptor = inputMD;
5013
5014 args.scalarOutput = scalar_output;
5015 args.scalarOutputCount = *scalar_outputCnt;
5016 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5017 args.structureOutput = inband_output;
5018 args.structureOutputSize = *inband_outputCnt;
5019
5020 if (ool_output && ool_output_size) {
5021 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5022 kIODirectionIn, current_task());
5023 }
5024
5025 args.structureOutputDescriptor = outputMD;
5026 args.structureOutputDescriptorSize = ool_output_size
5027 ? ((typeof(args.structureOutputDescriptorSize)) * ool_output_size)
5028 : 0;
5029
5030 IOStatisticsClientCall();
5031 ret = kIOReturnSuccess;
5032 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5033 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5034 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
5035 }
5036 if (kIOReturnSuccess == ret) {
5037 ret = client->callExternalMethod( selector, &args );
5038 }
5039
5040 *scalar_outputCnt = args.scalarOutputCount;
5041 *inband_outputCnt = args.structureOutputSize;
5042 *ool_output_size = args.structureOutputDescriptorSize;
5043
5044 if (inputMD) {
5045 inputMD->release();
5046 }
5047 if (outputMD) {
5048 outputMD->release();
5049 }
5050
5051 return ret;
5052 }
5053
5054 /* Routine io_async_user_client_method */
5055 kern_return_t
is_io_connect_async_method(io_connect_t connection,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)5056 is_io_connect_async_method
5057 (
5058 io_connect_t connection,
5059 mach_port_t wake_port,
5060 io_async_ref64_t reference,
5061 mach_msg_type_number_t referenceCnt,
5062 uint32_t selector,
5063 io_scalar_inband64_t scalar_input,
5064 mach_msg_type_number_t scalar_inputCnt,
5065 io_struct_inband_t inband_input,
5066 mach_msg_type_number_t inband_inputCnt,
5067 mach_vm_address_t ool_input,
5068 mach_vm_size_t ool_input_size,
5069 io_struct_inband_t inband_output,
5070 mach_msg_type_number_t *inband_outputCnt,
5071 io_scalar_inband64_t scalar_output,
5072 mach_msg_type_number_t *scalar_outputCnt,
5073 mach_vm_address_t ool_output,
5074 mach_vm_size_t * ool_output_size
5075 )
5076 {
5077 CHECK( IOUserClient, connection, client );
5078
5079 IOExternalMethodArguments args;
5080 IOReturn ret;
5081 IOMemoryDescriptor * inputMD = NULL;
5082 IOMemoryDescriptor * outputMD = NULL;
5083
5084 if (referenceCnt < 1) {
5085 return kIOReturnBadArgument;
5086 }
5087
5088 bzero(&args.__reserved[0], sizeof(args.__reserved));
5089 args.__reservedA = 0;
5090 args.version = kIOExternalMethodArgumentsCurrentVersion;
5091
5092 reference[0] = (io_user_reference_t) wake_port;
5093 if (vm_map_is_64bit(get_task_map(current_task()))) {
5094 reference[0] |= kIOUCAsync64Flag;
5095 }
5096
5097 args.selector = selector;
5098
5099 args.asyncWakePort = wake_port;
5100 args.asyncReference = reference;
5101 args.asyncReferenceCount = referenceCnt;
5102
5103 args.structureVariableOutputData = NULL;
5104
5105 args.scalarInput = scalar_input;
5106 args.scalarInputCount = scalar_inputCnt;
5107 args.structureInput = inband_input;
5108 args.structureInputSize = inband_inputCnt;
5109
5110 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5111 return kIOReturnIPCError;
5112 }
5113 if (ool_output) {
5114 if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5115 return kIOReturnIPCError;
5116 }
5117 if (*ool_output_size > UINT_MAX) {
5118 return kIOReturnIPCError;
5119 }
5120 }
5121
5122 if (ool_input) {
5123 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5124 kIODirectionOut | kIOMemoryMapCopyOnWrite,
5125 current_task());
5126 }
5127
5128 args.structureInputDescriptor = inputMD;
5129
5130 args.scalarOutput = scalar_output;
5131 args.scalarOutputCount = *scalar_outputCnt;
5132 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5133 args.structureOutput = inband_output;
5134 args.structureOutputSize = *inband_outputCnt;
5135
5136 if (ool_output) {
5137 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5138 kIODirectionIn, current_task());
5139 }
5140
5141 args.structureOutputDescriptor = outputMD;
5142 args.structureOutputDescriptorSize = ((typeof(args.structureOutputDescriptorSize)) * ool_output_size);
5143
5144 IOStatisticsClientCall();
5145 ret = kIOReturnSuccess;
5146 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5147 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5148 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_async_method, selector);
5149 }
5150 if (kIOReturnSuccess == ret) {
5151 ret = client->callExternalMethod( selector, &args );
5152 }
5153
5154 *scalar_outputCnt = args.scalarOutputCount;
5155 *inband_outputCnt = args.structureOutputSize;
5156 *ool_output_size = args.structureOutputDescriptorSize;
5157
5158 if (inputMD) {
5159 inputMD->release();
5160 }
5161 if (outputMD) {
5162 outputMD->release();
5163 }
5164
5165 return ret;
5166 }
5167
5168 /* Routine io_connect_method_scalarI_scalarO */
5169 kern_return_t
is_io_connect_method_scalarI_scalarO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5170 is_io_connect_method_scalarI_scalarO(
5171 io_object_t connect,
5172 uint32_t index,
5173 io_scalar_inband_t input,
5174 mach_msg_type_number_t inputCount,
5175 io_scalar_inband_t output,
5176 mach_msg_type_number_t * outputCount )
5177 {
5178 IOReturn err;
5179 uint32_t i;
5180 io_scalar_inband64_t _input;
5181 io_scalar_inband64_t _output;
5182
5183 mach_msg_type_number_t struct_outputCnt = 0;
5184 mach_vm_size_t ool_output_size = 0;
5185
5186 bzero(&_output[0], sizeof(_output));
5187 for (i = 0; i < inputCount; i++) {
5188 _input[i] = SCALAR64(input[i]);
5189 }
5190
5191 err = is_io_connect_method(connect, index,
5192 _input, inputCount,
5193 NULL, 0,
5194 0, 0,
5195 NULL, &struct_outputCnt,
5196 _output, outputCount,
5197 0, &ool_output_size);
5198
5199 for (i = 0; i < *outputCount; i++) {
5200 output[i] = SCALAR32(_output[i]);
5201 }
5202
5203 return err;
5204 }
5205
5206 kern_return_t
shim_io_connect_method_scalarI_scalarO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5207 shim_io_connect_method_scalarI_scalarO(
5208 IOExternalMethod * method,
5209 IOService * object,
5210 const io_user_scalar_t * input,
5211 mach_msg_type_number_t inputCount,
5212 io_user_scalar_t * output,
5213 mach_msg_type_number_t * outputCount )
5214 {
5215 IOMethod func;
5216 io_scalar_inband_t _output;
5217 IOReturn err;
5218 err = kIOReturnBadArgument;
5219
5220 bzero(&_output[0], sizeof(_output));
5221 do {
5222 if (inputCount != method->count0) {
5223 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5224 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5225 continue;
5226 }
5227 if (*outputCount != method->count1) {
5228 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5229 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5230 continue;
5231 }
5232
5233 func = method->func;
5234
5235 switch (inputCount) {
5236 case 6:
5237 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5238 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5239 break;
5240 case 5:
5241 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5242 ARG32(input[3]), ARG32(input[4]),
5243 &_output[0] );
5244 break;
5245 case 4:
5246 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5247 ARG32(input[3]),
5248 &_output[0], &_output[1] );
5249 break;
5250 case 3:
5251 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5252 &_output[0], &_output[1], &_output[2] );
5253 break;
5254 case 2:
5255 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5256 &_output[0], &_output[1], &_output[2],
5257 &_output[3] );
5258 break;
5259 case 1:
5260 err = (object->*func)( ARG32(input[0]),
5261 &_output[0], &_output[1], &_output[2],
5262 &_output[3], &_output[4] );
5263 break;
5264 case 0:
5265 err = (object->*func)( &_output[0], &_output[1], &_output[2],
5266 &_output[3], &_output[4], &_output[5] );
5267 break;
5268
5269 default:
5270 IOLog("%s: Bad method table\n", object->getName());
5271 }
5272 }while (false);
5273
5274 uint32_t i;
5275 for (i = 0; i < *outputCount; i++) {
5276 output[i] = SCALAR32(_output[i]);
5277 }
5278
5279 return err;
5280 }
5281
5282 /* Routine io_async_method_scalarI_scalarO */
5283 kern_return_t
is_io_async_method_scalarI_scalarO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5284 is_io_async_method_scalarI_scalarO(
5285 io_object_t connect,
5286 mach_port_t wake_port,
5287 io_async_ref_t reference,
5288 mach_msg_type_number_t referenceCnt,
5289 uint32_t index,
5290 io_scalar_inband_t input,
5291 mach_msg_type_number_t inputCount,
5292 io_scalar_inband_t output,
5293 mach_msg_type_number_t * outputCount )
5294 {
5295 IOReturn err;
5296 uint32_t i;
5297 io_scalar_inband64_t _input;
5298 io_scalar_inband64_t _output;
5299 io_async_ref64_t _reference;
5300
5301 if (referenceCnt > ASYNC_REF64_COUNT) {
5302 return kIOReturnBadArgument;
5303 }
5304 bzero(&_output[0], sizeof(_output));
5305 for (i = 0; i < referenceCnt; i++) {
5306 _reference[i] = REF64(reference[i]);
5307 }
5308 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5309
5310 mach_msg_type_number_t struct_outputCnt = 0;
5311 mach_vm_size_t ool_output_size = 0;
5312
5313 for (i = 0; i < inputCount; i++) {
5314 _input[i] = SCALAR64(input[i]);
5315 }
5316
5317 err = is_io_connect_async_method(connect,
5318 wake_port, _reference, referenceCnt,
5319 index,
5320 _input, inputCount,
5321 NULL, 0,
5322 0, 0,
5323 NULL, &struct_outputCnt,
5324 _output, outputCount,
5325 0, &ool_output_size);
5326
5327 for (i = 0; i < *outputCount; i++) {
5328 output[i] = SCALAR32(_output[i]);
5329 }
5330
5331 return err;
5332 }
5333 /* Routine io_async_method_scalarI_structureO */
5334 kern_return_t
is_io_async_method_scalarI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5335 is_io_async_method_scalarI_structureO(
5336 io_object_t connect,
5337 mach_port_t wake_port,
5338 io_async_ref_t reference,
5339 mach_msg_type_number_t referenceCnt,
5340 uint32_t index,
5341 io_scalar_inband_t input,
5342 mach_msg_type_number_t inputCount,
5343 io_struct_inband_t output,
5344 mach_msg_type_number_t * outputCount )
5345 {
5346 uint32_t i;
5347 io_scalar_inband64_t _input;
5348 io_async_ref64_t _reference;
5349
5350 if (referenceCnt > ASYNC_REF64_COUNT) {
5351 return kIOReturnBadArgument;
5352 }
5353 for (i = 0; i < referenceCnt; i++) {
5354 _reference[i] = REF64(reference[i]);
5355 }
5356 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5357
5358 mach_msg_type_number_t scalar_outputCnt = 0;
5359 mach_vm_size_t ool_output_size = 0;
5360
5361 for (i = 0; i < inputCount; i++) {
5362 _input[i] = SCALAR64(input[i]);
5363 }
5364
5365 return is_io_connect_async_method(connect,
5366 wake_port, _reference, referenceCnt,
5367 index,
5368 _input, inputCount,
5369 NULL, 0,
5370 0, 0,
5371 output, outputCount,
5372 NULL, &scalar_outputCnt,
5373 0, &ool_output_size);
5374 }
5375
5376 /* Routine io_async_method_scalarI_structureI */
5377 kern_return_t
is_io_async_method_scalarI_structureI(io_connect_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5378 is_io_async_method_scalarI_structureI(
5379 io_connect_t connect,
5380 mach_port_t wake_port,
5381 io_async_ref_t reference,
5382 mach_msg_type_number_t referenceCnt,
5383 uint32_t index,
5384 io_scalar_inband_t input,
5385 mach_msg_type_number_t inputCount,
5386 io_struct_inband_t inputStruct,
5387 mach_msg_type_number_t inputStructCount )
5388 {
5389 uint32_t i;
5390 io_scalar_inband64_t _input;
5391 io_async_ref64_t _reference;
5392
5393 if (referenceCnt > ASYNC_REF64_COUNT) {
5394 return kIOReturnBadArgument;
5395 }
5396 for (i = 0; i < referenceCnt; i++) {
5397 _reference[i] = REF64(reference[i]);
5398 }
5399 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5400
5401 mach_msg_type_number_t scalar_outputCnt = 0;
5402 mach_msg_type_number_t inband_outputCnt = 0;
5403 mach_vm_size_t ool_output_size = 0;
5404
5405 for (i = 0; i < inputCount; i++) {
5406 _input[i] = SCALAR64(input[i]);
5407 }
5408
5409 return is_io_connect_async_method(connect,
5410 wake_port, _reference, referenceCnt,
5411 index,
5412 _input, inputCount,
5413 inputStruct, inputStructCount,
5414 0, 0,
5415 NULL, &inband_outputCnt,
5416 NULL, &scalar_outputCnt,
5417 0, &ool_output_size);
5418 }
5419
5420 /* Routine io_async_method_structureI_structureO */
5421 kern_return_t
is_io_async_method_structureI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5422 is_io_async_method_structureI_structureO(
5423 io_object_t connect,
5424 mach_port_t wake_port,
5425 io_async_ref_t reference,
5426 mach_msg_type_number_t referenceCnt,
5427 uint32_t index,
5428 io_struct_inband_t input,
5429 mach_msg_type_number_t inputCount,
5430 io_struct_inband_t output,
5431 mach_msg_type_number_t * outputCount )
5432 {
5433 uint32_t i;
5434 mach_msg_type_number_t scalar_outputCnt = 0;
5435 mach_vm_size_t ool_output_size = 0;
5436 io_async_ref64_t _reference;
5437
5438 if (referenceCnt > ASYNC_REF64_COUNT) {
5439 return kIOReturnBadArgument;
5440 }
5441 for (i = 0; i < referenceCnt; i++) {
5442 _reference[i] = REF64(reference[i]);
5443 }
5444 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5445
5446 return is_io_connect_async_method(connect,
5447 wake_port, _reference, referenceCnt,
5448 index,
5449 NULL, 0,
5450 input, inputCount,
5451 0, 0,
5452 output, outputCount,
5453 NULL, &scalar_outputCnt,
5454 0, &ool_output_size);
5455 }
5456
5457
5458 kern_return_t
shim_io_async_method_scalarI_scalarO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5459 shim_io_async_method_scalarI_scalarO(
5460 IOExternalAsyncMethod * method,
5461 IOService * object,
5462 mach_port_t asyncWakePort,
5463 io_user_reference_t * asyncReference,
5464 uint32_t asyncReferenceCount,
5465 const io_user_scalar_t * input,
5466 mach_msg_type_number_t inputCount,
5467 io_user_scalar_t * output,
5468 mach_msg_type_number_t * outputCount )
5469 {
5470 IOAsyncMethod func;
5471 uint32_t i;
5472 io_scalar_inband_t _output;
5473 IOReturn err;
5474 io_async_ref_t reference;
5475
5476 bzero(&_output[0], sizeof(_output));
5477 for (i = 0; i < asyncReferenceCount; i++) {
5478 reference[i] = REF32(asyncReference[i]);
5479 }
5480
5481 err = kIOReturnBadArgument;
5482
5483 do {
5484 if (inputCount != method->count0) {
5485 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5486 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5487 continue;
5488 }
5489 if (*outputCount != method->count1) {
5490 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5491 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5492 continue;
5493 }
5494
5495 func = method->func;
5496
5497 switch (inputCount) {
5498 case 6:
5499 err = (object->*func)( reference,
5500 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5501 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5502 break;
5503 case 5:
5504 err = (object->*func)( reference,
5505 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5506 ARG32(input[3]), ARG32(input[4]),
5507 &_output[0] );
5508 break;
5509 case 4:
5510 err = (object->*func)( reference,
5511 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5512 ARG32(input[3]),
5513 &_output[0], &_output[1] );
5514 break;
5515 case 3:
5516 err = (object->*func)( reference,
5517 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5518 &_output[0], &_output[1], &_output[2] );
5519 break;
5520 case 2:
5521 err = (object->*func)( reference,
5522 ARG32(input[0]), ARG32(input[1]),
5523 &_output[0], &_output[1], &_output[2],
5524 &_output[3] );
5525 break;
5526 case 1:
5527 err = (object->*func)( reference,
5528 ARG32(input[0]),
5529 &_output[0], &_output[1], &_output[2],
5530 &_output[3], &_output[4] );
5531 break;
5532 case 0:
5533 err = (object->*func)( reference,
5534 &_output[0], &_output[1], &_output[2],
5535 &_output[3], &_output[4], &_output[5] );
5536 break;
5537
5538 default:
5539 IOLog("%s: Bad method table\n", object->getName());
5540 }
5541 }while (false);
5542
5543 for (i = 0; i < *outputCount; i++) {
5544 output[i] = SCALAR32(_output[i]);
5545 }
5546
5547 return err;
5548 }
5549
5550
5551 /* Routine io_connect_method_scalarI_structureO */
5552 kern_return_t
is_io_connect_method_scalarI_structureO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5553 is_io_connect_method_scalarI_structureO(
5554 io_object_t connect,
5555 uint32_t index,
5556 io_scalar_inband_t input,
5557 mach_msg_type_number_t inputCount,
5558 io_struct_inband_t output,
5559 mach_msg_type_number_t * outputCount )
5560 {
5561 uint32_t i;
5562 io_scalar_inband64_t _input;
5563
5564 mach_msg_type_number_t scalar_outputCnt = 0;
5565 mach_vm_size_t ool_output_size = 0;
5566
5567 for (i = 0; i < inputCount; i++) {
5568 _input[i] = SCALAR64(input[i]);
5569 }
5570
5571 return is_io_connect_method(connect, index,
5572 _input, inputCount,
5573 NULL, 0,
5574 0, 0,
5575 output, outputCount,
5576 NULL, &scalar_outputCnt,
5577 0, &ool_output_size);
5578 }
5579
5580 kern_return_t
shim_io_connect_method_scalarI_structureO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5581 shim_io_connect_method_scalarI_structureO(
5582
5583 IOExternalMethod * method,
5584 IOService * object,
5585 const io_user_scalar_t * input,
5586 mach_msg_type_number_t inputCount,
5587 io_struct_inband_t output,
5588 IOByteCount * outputCount )
5589 {
5590 IOMethod func;
5591 IOReturn err;
5592
5593 err = kIOReturnBadArgument;
5594
5595 do {
5596 if (inputCount != method->count0) {
5597 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5598 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5599 continue;
5600 }
5601 if ((kIOUCVariableStructureSize != method->count1)
5602 && (*outputCount != method->count1)) {
5603 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5604 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5605 continue;
5606 }
5607
5608 func = method->func;
5609
5610 switch (inputCount) {
5611 case 5:
5612 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5613 ARG32(input[3]), ARG32(input[4]),
5614 output );
5615 break;
5616 case 4:
5617 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5618 ARG32(input[3]),
5619 output, (void *)outputCount );
5620 break;
5621 case 3:
5622 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5623 output, (void *)outputCount, NULL );
5624 break;
5625 case 2:
5626 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5627 output, (void *)outputCount, NULL, NULL );
5628 break;
5629 case 1:
5630 err = (object->*func)( ARG32(input[0]),
5631 output, (void *)outputCount, NULL, NULL, NULL );
5632 break;
5633 case 0:
5634 err = (object->*func)( output, (void *)outputCount, NULL, NULL, NULL, NULL );
5635 break;
5636
5637 default:
5638 IOLog("%s: Bad method table\n", object->getName());
5639 }
5640 }while (false);
5641
5642 return err;
5643 }
5644
5645
5646 kern_return_t
shim_io_async_method_scalarI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5647 shim_io_async_method_scalarI_structureO(
5648 IOExternalAsyncMethod * method,
5649 IOService * object,
5650 mach_port_t asyncWakePort,
5651 io_user_reference_t * asyncReference,
5652 uint32_t asyncReferenceCount,
5653 const io_user_scalar_t * input,
5654 mach_msg_type_number_t inputCount,
5655 io_struct_inband_t output,
5656 mach_msg_type_number_t * outputCount )
5657 {
5658 IOAsyncMethod func;
5659 uint32_t i;
5660 IOReturn err;
5661 io_async_ref_t reference;
5662
5663 for (i = 0; i < asyncReferenceCount; i++) {
5664 reference[i] = REF32(asyncReference[i]);
5665 }
5666
5667 err = kIOReturnBadArgument;
5668 do {
5669 if (inputCount != method->count0) {
5670 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5671 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5672 continue;
5673 }
5674 if ((kIOUCVariableStructureSize != method->count1)
5675 && (*outputCount != method->count1)) {
5676 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5677 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5678 continue;
5679 }
5680
5681 func = method->func;
5682
5683 switch (inputCount) {
5684 case 5:
5685 err = (object->*func)( reference,
5686 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5687 ARG32(input[3]), ARG32(input[4]),
5688 output );
5689 break;
5690 case 4:
5691 err = (object->*func)( reference,
5692 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5693 ARG32(input[3]),
5694 output, (void *)outputCount );
5695 break;
5696 case 3:
5697 err = (object->*func)( reference,
5698 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5699 output, (void *)outputCount, NULL );
5700 break;
5701 case 2:
5702 err = (object->*func)( reference,
5703 ARG32(input[0]), ARG32(input[1]),
5704 output, (void *)outputCount, NULL, NULL );
5705 break;
5706 case 1:
5707 err = (object->*func)( reference,
5708 ARG32(input[0]),
5709 output, (void *)outputCount, NULL, NULL, NULL );
5710 break;
5711 case 0:
5712 err = (object->*func)( reference,
5713 output, (void *)outputCount, NULL, NULL, NULL, NULL );
5714 break;
5715
5716 default:
5717 IOLog("%s: Bad method table\n", object->getName());
5718 }
5719 }while (false);
5720
5721 return err;
5722 }
5723
5724 /* Routine io_connect_method_scalarI_structureI */
5725 kern_return_t
is_io_connect_method_scalarI_structureI(io_connect_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5726 is_io_connect_method_scalarI_structureI(
5727 io_connect_t connect,
5728 uint32_t index,
5729 io_scalar_inband_t input,
5730 mach_msg_type_number_t inputCount,
5731 io_struct_inband_t inputStruct,
5732 mach_msg_type_number_t inputStructCount )
5733 {
5734 uint32_t i;
5735 io_scalar_inband64_t _input;
5736
5737 mach_msg_type_number_t scalar_outputCnt = 0;
5738 mach_msg_type_number_t inband_outputCnt = 0;
5739 mach_vm_size_t ool_output_size = 0;
5740
5741 for (i = 0; i < inputCount; i++) {
5742 _input[i] = SCALAR64(input[i]);
5743 }
5744
5745 return is_io_connect_method(connect, index,
5746 _input, inputCount,
5747 inputStruct, inputStructCount,
5748 0, 0,
5749 NULL, &inband_outputCnt,
5750 NULL, &scalar_outputCnt,
5751 0, &ool_output_size);
5752 }
5753
5754 kern_return_t
shim_io_connect_method_scalarI_structureI(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5755 shim_io_connect_method_scalarI_structureI(
5756 IOExternalMethod * method,
5757 IOService * object,
5758 const io_user_scalar_t * input,
5759 mach_msg_type_number_t inputCount,
5760 io_struct_inband_t inputStruct,
5761 mach_msg_type_number_t inputStructCount )
5762 {
5763 IOMethod func;
5764 IOReturn err = kIOReturnBadArgument;
5765
5766 do{
5767 if (inputCount != method->count0) {
5768 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5769 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5770 continue;
5771 }
5772 if ((kIOUCVariableStructureSize != method->count1)
5773 && (inputStructCount != method->count1)) {
5774 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5775 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5776 continue;
5777 }
5778
5779 func = method->func;
5780
5781 switch (inputCount) {
5782 case 5:
5783 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5784 ARG32(input[3]), ARG32(input[4]),
5785 inputStruct );
5786 break;
5787 case 4:
5788 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
5789 ARG32(input[3]),
5790 inputStruct, (void *)(uintptr_t)inputStructCount );
5791 break;
5792 case 3:
5793 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5794 inputStruct, (void *)(uintptr_t)inputStructCount,
5795 NULL );
5796 break;
5797 case 2:
5798 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5799 inputStruct, (void *)(uintptr_t)inputStructCount,
5800 NULL, NULL );
5801 break;
5802 case 1:
5803 err = (object->*func)( ARG32(input[0]),
5804 inputStruct, (void *)(uintptr_t)inputStructCount,
5805 NULL, NULL, NULL );
5806 break;
5807 case 0:
5808 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5809 NULL, NULL, NULL, NULL );
5810 break;
5811
5812 default:
5813 IOLog("%s: Bad method table\n", object->getName());
5814 }
5815 }while (false);
5816
5817 return err;
5818 }
5819
5820 kern_return_t
shim_io_async_method_scalarI_structureI(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5821 shim_io_async_method_scalarI_structureI(
5822 IOExternalAsyncMethod * method,
5823 IOService * object,
5824 mach_port_t asyncWakePort,
5825 io_user_reference_t * asyncReference,
5826 uint32_t asyncReferenceCount,
5827 const io_user_scalar_t * input,
5828 mach_msg_type_number_t inputCount,
5829 io_struct_inband_t inputStruct,
5830 mach_msg_type_number_t inputStructCount )
5831 {
5832 IOAsyncMethod func;
5833 uint32_t i;
5834 IOReturn err = kIOReturnBadArgument;
5835 io_async_ref_t reference;
5836
5837 for (i = 0; i < asyncReferenceCount; i++) {
5838 reference[i] = REF32(asyncReference[i]);
5839 }
5840
5841 do{
5842 if (inputCount != method->count0) {
5843 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5844 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5845 continue;
5846 }
5847 if ((kIOUCVariableStructureSize != method->count1)
5848 && (inputStructCount != method->count1)) {
5849 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5850 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5851 continue;
5852 }
5853
5854 func = method->func;
5855
5856 switch (inputCount) {
5857 case 5:
5858 err = (object->*func)( reference,
5859 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5860 ARG32(input[3]), ARG32(input[4]),
5861 inputStruct );
5862 break;
5863 case 4:
5864 err = (object->*func)( reference,
5865 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5866 ARG32(input[3]),
5867 inputStruct, (void *)(uintptr_t)inputStructCount );
5868 break;
5869 case 3:
5870 err = (object->*func)( reference,
5871 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5872 inputStruct, (void *)(uintptr_t)inputStructCount,
5873 NULL );
5874 break;
5875 case 2:
5876 err = (object->*func)( reference,
5877 ARG32(input[0]), ARG32(input[1]),
5878 inputStruct, (void *)(uintptr_t)inputStructCount,
5879 NULL, NULL );
5880 break;
5881 case 1:
5882 err = (object->*func)( reference,
5883 ARG32(input[0]),
5884 inputStruct, (void *)(uintptr_t)inputStructCount,
5885 NULL, NULL, NULL );
5886 break;
5887 case 0:
5888 err = (object->*func)( reference,
5889 inputStruct, (void *)(uintptr_t)inputStructCount,
5890 NULL, NULL, NULL, NULL );
5891 break;
5892
5893 default:
5894 IOLog("%s: Bad method table\n", object->getName());
5895 }
5896 }while (false);
5897
5898 return err;
5899 }
5900
5901 /* Routine io_connect_method_structureI_structureO */
5902 kern_return_t
is_io_connect_method_structureI_structureO(io_object_t connect,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5903 is_io_connect_method_structureI_structureO(
5904 io_object_t connect,
5905 uint32_t index,
5906 io_struct_inband_t input,
5907 mach_msg_type_number_t inputCount,
5908 io_struct_inband_t output,
5909 mach_msg_type_number_t * outputCount )
5910 {
5911 mach_msg_type_number_t scalar_outputCnt = 0;
5912 mach_vm_size_t ool_output_size = 0;
5913
5914 return is_io_connect_method(connect, index,
5915 NULL, 0,
5916 input, inputCount,
5917 0, 0,
5918 output, outputCount,
5919 NULL, &scalar_outputCnt,
5920 0, &ool_output_size);
5921 }
5922
5923 kern_return_t
shim_io_connect_method_structureI_structureO(IOExternalMethod * method,IOService * object,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5924 shim_io_connect_method_structureI_structureO(
5925 IOExternalMethod * method,
5926 IOService * object,
5927 io_struct_inband_t input,
5928 mach_msg_type_number_t inputCount,
5929 io_struct_inband_t output,
5930 IOByteCount * outputCount )
5931 {
5932 IOMethod func;
5933 IOReturn err = kIOReturnBadArgument;
5934
5935 do{
5936 if ((kIOUCVariableStructureSize != method->count0)
5937 && (inputCount != method->count0)) {
5938 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5939 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5940 continue;
5941 }
5942 if ((kIOUCVariableStructureSize != method->count1)
5943 && (*outputCount != method->count1)) {
5944 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5945 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5946 continue;
5947 }
5948
5949 func = method->func;
5950
5951 if (method->count1) {
5952 if (method->count0) {
5953 err = (object->*func)( input, output,
5954 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5955 } else {
5956 err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5957 }
5958 } else {
5959 err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5960 }
5961 }while (false);
5962
5963
5964 return err;
5965 }
5966
5967 kern_return_t
shim_io_async_method_structureI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5968 shim_io_async_method_structureI_structureO(
5969 IOExternalAsyncMethod * method,
5970 IOService * object,
5971 mach_port_t asyncWakePort,
5972 io_user_reference_t * asyncReference,
5973 uint32_t asyncReferenceCount,
5974 io_struct_inband_t input,
5975 mach_msg_type_number_t inputCount,
5976 io_struct_inband_t output,
5977 mach_msg_type_number_t * outputCount )
5978 {
5979 IOAsyncMethod func;
5980 uint32_t i;
5981 IOReturn err;
5982 io_async_ref_t reference;
5983
5984 for (i = 0; i < asyncReferenceCount; i++) {
5985 reference[i] = REF32(asyncReference[i]);
5986 }
5987
5988 err = kIOReturnBadArgument;
5989 do{
5990 if ((kIOUCVariableStructureSize != method->count0)
5991 && (inputCount != method->count0)) {
5992 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5993 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5994 continue;
5995 }
5996 if ((kIOUCVariableStructureSize != method->count1)
5997 && (*outputCount != method->count1)) {
5998 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5999 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
6000 continue;
6001 }
6002
6003 func = method->func;
6004
6005 if (method->count1) {
6006 if (method->count0) {
6007 err = (object->*func)( reference,
6008 input, output,
6009 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
6010 } else {
6011 err = (object->*func)( reference,
6012 output, outputCount, NULL, NULL, NULL, NULL );
6013 }
6014 } else {
6015 err = (object->*func)( reference,
6016 input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
6017 }
6018 }while (false);
6019
6020 return err;
6021 }
6022
6023 /* Routine io_catalog_send_data */
6024 kern_return_t
is_io_catalog_send_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t inData,mach_msg_type_number_t inDataCount,kern_return_t * result)6025 is_io_catalog_send_data(
6026 mach_port_t main_port,
6027 uint32_t flag,
6028 io_buf_ptr_t inData,
6029 mach_msg_type_number_t inDataCount,
6030 kern_return_t * result)
6031 {
6032 // Allow sending catalog data if there is no kextd and the kernel is DEVELOPMENT || DEBUG
6033 #if NO_KEXTD && !(DEVELOPMENT || DEBUG)
6034 return kIOReturnNotPrivileged;
6035 #else /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6036 OSObject * obj = NULL;
6037 vm_offset_t data;
6038 kern_return_t kr = kIOReturnError;
6039
6040 //printf("io_catalog_send_data called. flag: %d\n", flag);
6041
6042 if (main_port != main_device_port) {
6043 return kIOReturnNotPrivileged;
6044 }
6045
6046 if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
6047 flag != kIOCatalogKextdActive &&
6048 flag != kIOCatalogKextdFinishedLaunching) &&
6049 (!inData || !inDataCount)) {
6050 return kIOReturnBadArgument;
6051 }
6052
6053 if (!IOCurrentTaskHasEntitlement(kIOCatalogManagementEntitlement)) {
6054 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
6055 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
6056 OSSafeReleaseNULL(taskName);
6057 // For now, fake success to not break applications relying on this function succeeding.
6058 // See <rdar://problem/32554970> for more details.
6059 return kIOReturnSuccess;
6060 }
6061
6062 if (inData) {
6063 vm_map_offset_t map_data;
6064
6065 if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
6066 return kIOReturnMessageTooLarge;
6067 }
6068
6069 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
6070 data = CAST_DOWN(vm_offset_t, map_data);
6071
6072 if (kr != KERN_SUCCESS) {
6073 return kr;
6074 }
6075
6076 // must return success after vm_map_copyout() succeeds
6077
6078 if (inDataCount) {
6079 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
6080 vm_deallocate( kernel_map, data, inDataCount );
6081 if (!obj) {
6082 *result = kIOReturnNoMemory;
6083 return KERN_SUCCESS;
6084 }
6085 }
6086 }
6087
6088 switch (flag) {
6089 case kIOCatalogResetDrivers:
6090 case kIOCatalogResetDriversNoMatch: {
6091 OSArray * array;
6092
6093 array = OSDynamicCast(OSArray, obj);
6094 if (array) {
6095 if (!gIOCatalogue->resetAndAddDrivers(array,
6096 flag == kIOCatalogResetDrivers)) {
6097 kr = kIOReturnError;
6098 }
6099 } else {
6100 kr = kIOReturnBadArgument;
6101 }
6102 }
6103 break;
6104
6105 case kIOCatalogAddDrivers:
6106 case kIOCatalogAddDriversNoMatch: {
6107 OSArray * array;
6108
6109 array = OSDynamicCast(OSArray, obj);
6110 if (array) {
6111 if (!gIOCatalogue->addDrivers( array,
6112 flag == kIOCatalogAddDrivers)) {
6113 kr = kIOReturnError;
6114 }
6115 } else {
6116 kr = kIOReturnBadArgument;
6117 }
6118 }
6119 break;
6120
6121 case kIOCatalogRemoveDrivers:
6122 case kIOCatalogRemoveDriversNoMatch: {
6123 OSDictionary * dict;
6124
6125 dict = OSDynamicCast(OSDictionary, obj);
6126 if (dict) {
6127 if (!gIOCatalogue->removeDrivers( dict,
6128 flag == kIOCatalogRemoveDrivers )) {
6129 kr = kIOReturnError;
6130 }
6131 } else {
6132 kr = kIOReturnBadArgument;
6133 }
6134 }
6135 break;
6136
6137 case kIOCatalogStartMatching__Removed:
6138 case kIOCatalogRemoveKernelLinker__Removed:
6139 case kIOCatalogKextdActive:
6140 case kIOCatalogKextdFinishedLaunching:
6141 kr = KERN_NOT_SUPPORTED;
6142 break;
6143
6144 default:
6145 kr = kIOReturnBadArgument;
6146 break;
6147 }
6148
6149 if (obj) {
6150 obj->release();
6151 }
6152
6153 *result = kr;
6154 return KERN_SUCCESS;
6155 #endif /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6156 }
6157
6158 /* Routine io_catalog_terminate */
6159 kern_return_t
is_io_catalog_terminate(mach_port_t main_port,uint32_t flag,io_name_t name)6160 is_io_catalog_terminate(
6161 mach_port_t main_port,
6162 uint32_t flag,
6163 io_name_t name )
6164 {
6165 kern_return_t kr;
6166
6167 if (main_port != main_device_port) {
6168 return kIOReturnNotPrivileged;
6169 }
6170
6171 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
6172 kIOClientPrivilegeAdministrator );
6173 if (kIOReturnSuccess != kr) {
6174 return kr;
6175 }
6176
6177 switch (flag) {
6178 #if !defined(SECURE_KERNEL)
6179 case kIOCatalogServiceTerminate:
6180 kr = gIOCatalogue->terminateDrivers(NULL, name, false);
6181 break;
6182
6183 case kIOCatalogModuleUnload:
6184 case kIOCatalogModuleTerminate:
6185 kr = gIOCatalogue->terminateDriversForModule(name,
6186 flag == kIOCatalogModuleUnload);
6187 break;
6188 #endif
6189
6190 default:
6191 kr = kIOReturnBadArgument;
6192 break;
6193 }
6194
6195 return kr;
6196 }
6197
6198 /* Routine io_catalog_get_data */
6199 kern_return_t
is_io_catalog_get_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t * outData,mach_msg_type_number_t * outDataCount)6200 is_io_catalog_get_data(
6201 mach_port_t main_port,
6202 uint32_t flag,
6203 io_buf_ptr_t *outData,
6204 mach_msg_type_number_t *outDataCount)
6205 {
6206 kern_return_t kr = kIOReturnSuccess;
6207 OSSerialize * s;
6208
6209 if (main_port != main_device_port) {
6210 return kIOReturnNotPrivileged;
6211 }
6212
6213 //printf("io_catalog_get_data called. flag: %d\n", flag);
6214
6215 s = OSSerialize::withCapacity(4096);
6216 if (!s) {
6217 return kIOReturnNoMemory;
6218 }
6219
6220 kr = gIOCatalogue->serializeData(flag, s);
6221
6222 if (kr == kIOReturnSuccess) {
6223 mach_vm_address_t data;
6224 vm_map_copy_t copy;
6225 unsigned int size;
6226
6227 size = s->getLength();
6228 kr = mach_vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
6229 if (kr == kIOReturnSuccess) {
6230 bcopy(s->text(), (void *)data, size);
6231 kr = vm_map_copyin(kernel_map, data, size, true, ©);
6232 *outData = (char *)copy;
6233 *outDataCount = size;
6234 }
6235 }
6236
6237 s->release();
6238
6239 return kr;
6240 }
6241
6242 /* Routine io_catalog_get_gen_count */
6243 kern_return_t
is_io_catalog_get_gen_count(mach_port_t main_port,uint32_t * genCount)6244 is_io_catalog_get_gen_count(
6245 mach_port_t main_port,
6246 uint32_t *genCount)
6247 {
6248 if (main_port != main_device_port) {
6249 return kIOReturnNotPrivileged;
6250 }
6251
6252 //printf("io_catalog_get_gen_count called.\n");
6253
6254 if (!genCount) {
6255 return kIOReturnBadArgument;
6256 }
6257
6258 *genCount = gIOCatalogue->getGenerationCount();
6259
6260 return kIOReturnSuccess;
6261 }
6262
6263 /* Routine io_catalog_module_loaded.
6264 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
6265 */
6266 kern_return_t
is_io_catalog_module_loaded(mach_port_t main_port,io_name_t name)6267 is_io_catalog_module_loaded(
6268 mach_port_t main_port,
6269 io_name_t name)
6270 {
6271 if (main_port != main_device_port) {
6272 return kIOReturnNotPrivileged;
6273 }
6274
6275 //printf("io_catalog_module_loaded called. name %s\n", name);
6276
6277 if (!name) {
6278 return kIOReturnBadArgument;
6279 }
6280
6281 gIOCatalogue->moduleHasLoaded(name);
6282
6283 return kIOReturnSuccess;
6284 }
6285
6286 kern_return_t
is_io_catalog_reset(mach_port_t main_port,uint32_t flag)6287 is_io_catalog_reset(
6288 mach_port_t main_port,
6289 uint32_t flag)
6290 {
6291 if (main_port != main_device_port) {
6292 return kIOReturnNotPrivileged;
6293 }
6294
6295 switch (flag) {
6296 case kIOCatalogResetDefault:
6297 gIOCatalogue->reset();
6298 break;
6299
6300 default:
6301 return kIOReturnBadArgument;
6302 }
6303
6304 return kIOReturnSuccess;
6305 }
6306
6307 kern_return_t
iokit_user_client_trap(struct iokit_user_client_trap_args * args)6308 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
6309 {
6310 kern_return_t result = kIOReturnBadArgument;
6311 IOUserClient * userClient;
6312 OSObject * object;
6313 uintptr_t ref;
6314 mach_port_name_t portName;
6315
6316 ref = (uintptr_t) args->userClientRef;
6317
6318 if ((ref == MACH_PORT_DEAD) || (ref == (uintptr_t) MACH_PORT_NULL)) {
6319 return kIOReturnBadArgument;
6320 }
6321 // kobject port names always have b0-1 set, so we use these bits as flags to
6322 // iokit_user_client_trap()
6323 // keep this up to date with ipc_entry_name_mask();
6324 portName = (mach_port_name_t) (ref | 3);
6325 if (((1ULL << 32) & ref) || !(1 & ref)) {
6326 object = iokit_lookup_uext_ref_current_task(portName);
6327 if (object) {
6328 result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6329 }
6330 OSSafeReleaseNULL(object);
6331 } else {
6332 io_object_t ref_current_task = iokit_lookup_connect_ref_current_task((mach_port_name_t) ref);
6333 if ((userClient = OSDynamicCast(IOUserClient, ref_current_task))) {
6334 IOExternalTrap *trap = NULL;
6335 IOService *target = NULL;
6336
6337 result = kIOReturnSuccess;
6338 io_filter_policy_t filterPolicy = userClient->filterForTask(current_task(), 0);
6339 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
6340 result = gIOUCFilterCallbacks->io_filter_applier(userClient, filterPolicy, io_filter_type_trap, args->index);
6341 }
6342 if (kIOReturnSuccess == result) {
6343 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
6344 }
6345 if (trap && target) {
6346 IOTrap func;
6347
6348 func = trap->func;
6349
6350 if (func) {
6351 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6352 }
6353 }
6354
6355 iokit_remove_connect_reference(userClient);
6356 } else {
6357 OSSafeReleaseNULL(ref_current_task);
6358 }
6359 }
6360
6361 return result;
6362 }
6363
6364 /* Routine io_device_tree_entry_exists_with_name */
6365 kern_return_t
is_io_device_tree_entry_exists_with_name(mach_port_t main_port,io_name_t name,boolean_t * exists)6366 is_io_device_tree_entry_exists_with_name(
6367 mach_port_t main_port,
6368 io_name_t name,
6369 boolean_t *exists )
6370 {
6371 OSCollectionIterator *iter;
6372
6373 if (main_port != main_device_port) {
6374 return kIOReturnNotPrivileged;
6375 }
6376
6377 iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
6378 *exists = iter && iter->getNextObject();
6379 OSSafeReleaseNULL(iter);
6380
6381 return kIOReturnSuccess;
6382 }
6383 } /* extern "C" */
6384
6385 IOReturn
callExternalMethod(uint32_t selector,IOExternalMethodArguments * args)6386 IOUserClient::callExternalMethod(uint32_t selector, IOExternalMethodArguments * args)
6387 {
6388 IOReturn ret;
6389
6390 ipcEnter(defaultLocking ? (defaultLockingSingleThreadExternalMethod ? kIPCLockWrite : kIPCLockRead) : kIPCLockNone);
6391 if (uc2022) {
6392 ret = ((IOUserClient2022 *) this)->externalMethod(selector, (IOExternalMethodArgumentsOpaque *) args);
6393 } else {
6394 ret = externalMethod(selector, args);
6395 }
6396 ipcExit(defaultLocking ? (defaultLockingSingleThreadExternalMethod ? kIPCLockWrite : kIPCLockRead) : kIPCLockNone);
6397
6398 return ret;
6399 }
6400
6401 MIG_SERVER_ROUTINE IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * arguments,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6402 IOUserClient2022::externalMethod(uint32_t selector, IOExternalMethodArguments * arguments,
6403 IOExternalMethodDispatch *dispatch,
6404 OSObject *target, void *reference)
6405 {
6406 panic("wrong externalMethod for IOUserClient2022");
6407 }
6408
6409 IOReturn
dispatchExternalMethod(uint32_t selector,IOExternalMethodArgumentsOpaque * arguments,const IOExternalMethodDispatch2022 dispatchArray[],size_t dispatchArrayCount,OSObject * target,void * reference)6410 IOUserClient2022::dispatchExternalMethod(uint32_t selector, IOExternalMethodArgumentsOpaque *arguments,
6411 const IOExternalMethodDispatch2022 dispatchArray[], size_t dispatchArrayCount,
6412 OSObject * target, void * reference)
6413 {
6414 IOReturn err;
6415 IOExternalMethodArguments * args = (typeof(args))arguments;
6416 const IOExternalMethodDispatch2022 * dispatch;
6417
6418 if (!dispatchArray) {
6419 return kIOReturnError;
6420 }
6421 if (selector >= dispatchArrayCount) {
6422 return kIOReturnBadArgument;
6423 }
6424 dispatch = &dispatchArray[selector];
6425
6426 uint32_t count;
6427 count = dispatch->checkScalarInputCount;
6428 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6429 return kIOReturnBadArgument;
6430 }
6431
6432 count = dispatch->checkStructureInputSize;
6433 if ((kIOUCVariableStructureSize != count)
6434 && (count != ((args->structureInputDescriptor)
6435 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6436 return kIOReturnBadArgument;
6437 }
6438
6439 count = dispatch->checkScalarOutputCount;
6440 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6441 return kIOReturnBadArgument;
6442 }
6443
6444 count = dispatch->checkStructureOutputSize;
6445 if ((kIOUCVariableStructureSize != count)
6446 && (count != ((args->structureOutputDescriptor)
6447 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6448 return kIOReturnBadArgument;
6449 }
6450
6451 if (args->asyncWakePort && !dispatch->allowAsync) {
6452 return kIOReturnBadArgument;
6453 }
6454
6455 if (dispatch->checkEntitlement) {
6456 if (!IOCurrentTaskHasEntitlement(dispatch->checkEntitlement)) {
6457 return kIOReturnNotPrivileged;
6458 }
6459 }
6460
6461 if (dispatch->function) {
6462 err = (*dispatch->function)(target, reference, args);
6463 } else {
6464 err = kIOReturnNoCompletion; /* implementer can dispatch */
6465 }
6466 return err;
6467 }
6468
6469 IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * args,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6470 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
6471 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
6472 {
6473 IOReturn err;
6474 IOService * object;
6475 IOByteCount structureOutputSize;
6476
6477 if (dispatch) {
6478 uint32_t count;
6479 count = dispatch->checkScalarInputCount;
6480 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6481 return kIOReturnBadArgument;
6482 }
6483
6484 count = dispatch->checkStructureInputSize;
6485 if ((kIOUCVariableStructureSize != count)
6486 && (count != ((args->structureInputDescriptor)
6487 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6488 return kIOReturnBadArgument;
6489 }
6490
6491 count = dispatch->checkScalarOutputCount;
6492 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6493 return kIOReturnBadArgument;
6494 }
6495
6496 count = dispatch->checkStructureOutputSize;
6497 if ((kIOUCVariableStructureSize != count)
6498 && (count != ((args->structureOutputDescriptor)
6499 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6500 return kIOReturnBadArgument;
6501 }
6502
6503 if (dispatch->function) {
6504 err = (*dispatch->function)(target, reference, args);
6505 } else {
6506 err = kIOReturnNoCompletion; /* implementer can dispatch */
6507 }
6508 return err;
6509 }
6510
6511
6512 // pre-Leopard API's don't do ool structs
6513 if (args->structureInputDescriptor || args->structureOutputDescriptor) {
6514 err = kIOReturnIPCError;
6515 return err;
6516 }
6517
6518 structureOutputSize = args->structureOutputSize;
6519
6520 if (args->asyncWakePort) {
6521 IOExternalAsyncMethod * method;
6522 object = NULL;
6523 if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
6524 return kIOReturnUnsupported;
6525 }
6526
6527 if (kIOUCForegroundOnly & method->flags) {
6528 if (task_is_gpu_denied(current_task())) {
6529 return kIOReturnNotPermitted;
6530 }
6531 }
6532
6533 switch (method->flags & kIOUCTypeMask) {
6534 case kIOUCScalarIStructI:
6535 err = shim_io_async_method_scalarI_structureI( method, object,
6536 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6537 args->scalarInput, args->scalarInputCount,
6538 (char *)args->structureInput, args->structureInputSize );
6539 break;
6540
6541 case kIOUCScalarIScalarO:
6542 err = shim_io_async_method_scalarI_scalarO( method, object,
6543 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6544 args->scalarInput, args->scalarInputCount,
6545 args->scalarOutput, &args->scalarOutputCount );
6546 break;
6547
6548 case kIOUCScalarIStructO:
6549 err = shim_io_async_method_scalarI_structureO( method, object,
6550 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6551 args->scalarInput, args->scalarInputCount,
6552 (char *) args->structureOutput, &args->structureOutputSize );
6553 break;
6554
6555
6556 case kIOUCStructIStructO:
6557 err = shim_io_async_method_structureI_structureO( method, object,
6558 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6559 (char *)args->structureInput, args->structureInputSize,
6560 (char *) args->structureOutput, &args->structureOutputSize );
6561 break;
6562
6563 default:
6564 err = kIOReturnBadArgument;
6565 break;
6566 }
6567 } else {
6568 IOExternalMethod * method;
6569 object = NULL;
6570 if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
6571 return kIOReturnUnsupported;
6572 }
6573
6574 if (kIOUCForegroundOnly & method->flags) {
6575 if (task_is_gpu_denied(current_task())) {
6576 return kIOReturnNotPermitted;
6577 }
6578 }
6579
6580 switch (method->flags & kIOUCTypeMask) {
6581 case kIOUCScalarIStructI:
6582 err = shim_io_connect_method_scalarI_structureI( method, object,
6583 args->scalarInput, args->scalarInputCount,
6584 (char *) args->structureInput, args->structureInputSize );
6585 break;
6586
6587 case kIOUCScalarIScalarO:
6588 err = shim_io_connect_method_scalarI_scalarO( method, object,
6589 args->scalarInput, args->scalarInputCount,
6590 args->scalarOutput, &args->scalarOutputCount );
6591 break;
6592
6593 case kIOUCScalarIStructO:
6594 err = shim_io_connect_method_scalarI_structureO( method, object,
6595 args->scalarInput, args->scalarInputCount,
6596 (char *) args->structureOutput, &structureOutputSize );
6597 break;
6598
6599
6600 case kIOUCStructIStructO:
6601 err = shim_io_connect_method_structureI_structureO( method, object,
6602 (char *) args->structureInput, args->structureInputSize,
6603 (char *) args->structureOutput, &structureOutputSize );
6604 break;
6605
6606 default:
6607 err = kIOReturnBadArgument;
6608 break;
6609 }
6610 }
6611
6612 if (structureOutputSize > UINT_MAX) {
6613 structureOutputSize = 0;
6614 err = kIOReturnBadArgument;
6615 }
6616
6617 args->structureOutputSize = ((typeof(args->structureOutputSize))structureOutputSize);
6618
6619 return err;
6620 }
6621
6622 IOReturn
registerFilterCallbacks(const struct io_filter_callbacks * callbacks,size_t size)6623 IOUserClient::registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size)
6624 {
6625 if (size < sizeof(*callbacks)) {
6626 return kIOReturnBadArgument;
6627 }
6628 if (!OSCompareAndSwapPtr(NULL, __DECONST(void *, callbacks), &gIOUCFilterCallbacks)) {
6629 return kIOReturnBusy;
6630 }
6631 return kIOReturnSuccess;
6632 }
6633
6634
6635 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6636 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6637 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6638 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6639 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6640 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6641 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6642 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6643 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6644 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6645 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6646 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6647 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6648 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6649 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6650 OSMetaClassDefineReservedUnused(IOUserClient, 15);
6651
6652 OSMetaClassDefineReservedUnused(IOUserClient2022, 0);
6653 OSMetaClassDefineReservedUnused(IOUserClient2022, 1);
6654 OSMetaClassDefineReservedUnused(IOUserClient2022, 2);
6655 OSMetaClassDefineReservedUnused(IOUserClient2022, 3);
6656