1 /*
2 * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <libkern/c++/OSKext.h>
30 #include <libkern/c++/OSSharedPtr.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51 #include <sys/code_signing.h>
52
53 #include <mach/sdt.h>
54 #include <os/hash.h>
55
56 #include <libkern/amfi/amfi.h>
57
58 #if CONFIG_MACF
59
60 extern "C" {
61 #include <security/mac_framework.h>
62 };
63 #include <sys/kauth.h>
64
65 #define IOMACF_LOG 0
66
67 #endif /* CONFIG_MACF */
68
69 #include <IOKit/assert.h>
70
71 #include "IOServicePrivate.h"
72 #include "IOKitKernelInternal.h"
73
74 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
75 #define SCALAR32(x) ((uint32_t )x)
76 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
77 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
78 #define REF32(x) ((int)(x))
79
80 enum{
81 kIOUCAsync0Flags = 3ULL,
82 kIOUCAsync64Flag = 1ULL,
83 kIOUCAsyncErrorLoggedFlag = 2ULL
84 };
85
86 #if IOKITSTATS
87
88 #define IOStatisticsRegisterCounter() \
89 do { \
90 reserved->counter = IOStatistics::registerUserClient(this); \
91 } while (0)
92
93 #define IOStatisticsUnregisterCounter() \
94 do { \
95 if (reserved) \
96 IOStatistics::unregisterUserClient(reserved->counter); \
97 } while (0)
98
99 #define IOStatisticsClientCall() \
100 do { \
101 IOStatistics::countUserClientCall(client); \
102 } while (0)
103
104 #else
105
106 #define IOStatisticsRegisterCounter()
107 #define IOStatisticsUnregisterCounter()
108 #define IOStatisticsClientCall()
109
110 #endif /* IOKITSTATS */
111
112 #if DEVELOPMENT || DEBUG
113
114 #define FAKE_STACK_FRAME(a) \
115 const void ** __frameptr; \
116 const void * __retaddr; \
117 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
118 __retaddr = __frameptr[1]; \
119 __frameptr[1] = (a);
120
121 #define FAKE_STACK_FRAME_END() \
122 __frameptr[1] = __retaddr;
123
124 #else /* DEVELOPMENT || DEBUG */
125
126 #define FAKE_STACK_FRAME(a)
127 #define FAKE_STACK_FRAME_END()
128
129 #endif /* DEVELOPMENT || DEBUG */
130
131 #define ASYNC_REF_COUNT (sizeof(io_async_ref_t) / sizeof(natural_t))
132 #define ASYNC_REF64_COUNT (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
133
134 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
135
136 extern "C" {
137 #include <mach/mach_traps.h>
138 #include <vm/vm_map.h>
139 } /* extern "C" */
140
141 struct IOMachPortHashList;
142
143 static_assert(IKOT_MAX_TYPE <= 255);
144
145 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
146
147 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
148 class IOMachPort : public OSObject
149 {
150 OSDeclareDefaultStructors(IOMachPort);
151 public:
152 mach_port_mscount_t mscount;
153 IOLock lock;
154 SLIST_ENTRY(IOMachPort) link;
155 ipc_port_t port;
156 OSObject* XNU_PTRAUTH_SIGNED_PTR("IOMachPort.object") object;
157
158 static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
159
160 static IOMachPortHashList* bucketForObject(OSObject *obj,
161 ipc_kobject_type_t type);
162
163 static LIBKERN_RETURNS_NOT_RETAINED IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
164
165 static bool noMoreSendersForObject( OSObject * obj,
166 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
167 static void releasePortForObject( OSObject * obj,
168 ipc_kobject_type_t type );
169
170 static mach_port_name_t makeSendRightForTask( task_t task,
171 io_object_t obj, ipc_kobject_type_t type );
172
173 virtual void free() APPLE_KEXT_OVERRIDE;
174 };
175
176 #define super OSObject
177 OSDefineMetaClassAndStructorsWithZone(IOMachPort, OSObject, ZC_ZFREE_CLEARMEM)
178
179 static IOLock * gIOObjectPortLock;
180 IOLock * gIOUserServerLock;
181
182 SECURITY_READ_ONLY_LATE(const struct io_filter_callbacks *) gIOUCFilterCallbacks;
183
184 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
185
186 SLIST_HEAD(IOMachPortHashList, IOMachPort);
187
188 #if defined(XNU_TARGET_OS_OSX)
189 #define PORT_HASH_SIZE 4096
190 #else /* defined(!XNU_TARGET_OS_OSX) */
191 #define PORT_HASH_SIZE 256
192 #endif /* !defined(!XNU_TARGET_OS_OSX) */
193
194 IOMachPortHashList gIOMachPortHash[PORT_HASH_SIZE];
195
196 void
IOMachPortInitialize(void)197 IOMachPortInitialize(void)
198 {
199 for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
200 SLIST_INIT(&gIOMachPortHash[i]);
201 }
202 }
203
204 IOMachPortHashList*
bucketForObject(OSObject * obj,ipc_kobject_type_t type)205 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
206 {
207 return &gIOMachPortHash[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
208 }
209
210 IOMachPort*
portForObjectInBucket(IOMachPortHashList * bucket,OSObject * obj,ipc_kobject_type_t type)211 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
212 {
213 IOMachPort *machPort;
214
215 SLIST_FOREACH(machPort, bucket, link) {
216 if (machPort->object == obj && iokit_port_type(machPort->port) == type) {
217 return machPort;
218 }
219 }
220 return NULL;
221 }
222
223 IOMachPort*
withObjectAndType(OSObject * obj,ipc_kobject_type_t type)224 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
225 {
226 IOMachPort *machPort = NULL;
227
228 machPort = new IOMachPort;
229 if (__improbable(machPort && !machPort->init())) {
230 OSSafeReleaseNULL(machPort);
231 return NULL;
232 }
233
234 machPort->object = obj;
235 machPort->port = iokit_alloc_object_port(machPort, type);
236 IOLockInlineInit(&machPort->lock);
237
238 obj->taggedRetain(OSTypeID(OSCollection));
239 machPort->mscount++;
240
241 return machPort;
242 }
243
244 bool
noMoreSendersForObject(OSObject * obj,ipc_kobject_type_t type,mach_port_mscount_t * mscount)245 IOMachPort::noMoreSendersForObject( OSObject * obj,
246 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
247 {
248 IOMachPort *machPort = NULL;
249 IOUserClient *uc;
250 OSAction *action;
251 bool destroyed = true;
252
253 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
254
255 obj->retain();
256
257 lck_mtx_lock(gIOObjectPortLock);
258
259 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
260
261 if (machPort) {
262 destroyed = (machPort->mscount <= *mscount);
263 if (!destroyed) {
264 *mscount = machPort->mscount;
265 lck_mtx_unlock(gIOObjectPortLock);
266 } else {
267 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
268 uc->noMoreSenders();
269 }
270 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
271
272 IOLockLock(&machPort->lock);
273 iokit_remove_object_port(machPort->port, type);
274 machPort->object = NULL;
275 IOLockUnlock(&machPort->lock);
276
277 lck_mtx_unlock(gIOObjectPortLock);
278
279 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
280
281 obj->taggedRelease(OSTypeID(OSCollection));
282 }
283 } else {
284 lck_mtx_unlock(gIOObjectPortLock);
285 }
286
287 if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
288 action->Aborted();
289 }
290
291 if (IKOT_UEXT_OBJECT == type && IOUserServer::shouldLeakObjects()) {
292 // Leak object
293 obj->retain();
294 }
295
296 obj->release();
297
298 return destroyed;
299 }
300
301 void
releasePortForObject(OSObject * obj,ipc_kobject_type_t type)302 IOMachPort::releasePortForObject( OSObject * obj,
303 ipc_kobject_type_t type )
304 {
305 IOMachPort *machPort;
306 IOService *service;
307 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
308
309 assert(IKOT_IOKIT_CONNECT != type);
310
311 lck_mtx_lock(gIOObjectPortLock);
312
313 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
314
315 if (machPort
316 && (type == IKOT_IOKIT_OBJECT)
317 && (service = OSDynamicCast(IOService, obj))
318 && !service->machPortHoldDestroy()) {
319 obj->retain();
320 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
321
322 IOLockLock(&machPort->lock);
323 iokit_remove_object_port(machPort->port, type);
324 machPort->object = NULL;
325 IOLockUnlock(&machPort->lock);
326
327 lck_mtx_unlock(gIOObjectPortLock);
328
329 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
330
331 obj->taggedRelease(OSTypeID(OSCollection));
332 obj->release();
333 } else {
334 lck_mtx_unlock(gIOObjectPortLock);
335 }
336 }
337
338 void
destroyUserReferences(OSObject * obj)339 IOUserClient::destroyUserReferences( OSObject * obj )
340 {
341 IOMachPort *machPort;
342 bool destroyPort;
343
344 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
345
346 // panther, 3160200
347 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
348
349 obj->retain();
350 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
351 IOMachPortHashList *mappingBucket = NULL;
352
353 lck_mtx_lock(gIOObjectPortLock);
354
355 IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
356 if (uc && uc->mappings) {
357 mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
358 }
359
360 machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
361
362 if (machPort == NULL) {
363 lck_mtx_unlock(gIOObjectPortLock);
364 goto end;
365 }
366
367 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
368 obj->taggedRelease(OSTypeID(OSCollection));
369
370 destroyPort = true;
371 if (uc) {
372 uc->noMoreSenders();
373 if (uc->mappings) {
374 uc->mappings->taggedRetain(OSTypeID(OSCollection));
375 SLIST_INSERT_HEAD(mappingBucket, machPort, link);
376
377 IOLockLock(&machPort->lock);
378 machPort->object = uc->mappings;
379 IOLockUnlock(&machPort->lock);
380
381 lck_mtx_unlock(gIOObjectPortLock);
382
383 OSSafeReleaseNULL(uc->mappings);
384 destroyPort = false;
385 }
386 }
387
388 if (destroyPort) {
389 IOLockLock(&machPort->lock);
390 iokit_remove_object_port(machPort->port, IKOT_IOKIT_CONNECT);
391 machPort->object = NULL;
392 IOLockUnlock(&machPort->lock);
393
394 lck_mtx_unlock(gIOObjectPortLock);
395 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
396 }
397
398 end:
399 OSSafeReleaseNULL(obj);
400 }
401
402 mach_port_name_t
makeSendRightForTask(task_t task,io_object_t obj,ipc_kobject_type_t type)403 IOMachPort::makeSendRightForTask( task_t task,
404 io_object_t obj, ipc_kobject_type_t type )
405 {
406 return iokit_make_send_right( task, obj, type );
407 }
408
409 void
free(void)410 IOMachPort::free( void )
411 {
412 if (port) {
413 iokit_destroy_object_port(port, iokit_port_type(port));
414 }
415 IOLockInlineDestroy(&lock);
416 super::free();
417 }
418
419 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
420
421 static bool
IOTaskRegistryCompatibility(task_t task)422 IOTaskRegistryCompatibility(task_t task)
423 {
424 return false;
425 }
426
427 static void
IOTaskRegistryCompatibilityMatching(task_t task,OSDictionary * matching)428 IOTaskRegistryCompatibilityMatching(task_t task, OSDictionary * matching)
429 {
430 matching->setObject(gIOServiceNotificationUserKey, kOSBooleanTrue);
431 if (!IOTaskRegistryCompatibility(task)) {
432 return;
433 }
434 matching->setObject(gIOCompatibilityMatchKey, kOSBooleanTrue);
435 }
436
437 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
438
OSDefineMetaClassAndStructors(IOUserIterator,OSIterator)439 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
440
441 IOUserIterator *
442 IOUserIterator::withIterator(OSIterator * iter)
443 {
444 IOUserIterator * me;
445
446 if (!iter) {
447 return NULL;
448 }
449
450 me = new IOUserIterator;
451 if (me && !me->init()) {
452 me->release();
453 me = NULL;
454 }
455 if (!me) {
456 iter->release();
457 return me;
458 }
459 me->userIteratorObject = iter;
460
461 return me;
462 }
463
464 bool
init(void)465 IOUserIterator::init( void )
466 {
467 if (!OSObject::init()) {
468 return false;
469 }
470
471 IOLockInlineInit(&lock);
472 return true;
473 }
474
475 void
free()476 IOUserIterator::free()
477 {
478 if (userIteratorObject) {
479 userIteratorObject->release();
480 }
481 IOLockInlineDestroy(&lock);
482 OSObject::free();
483 }
484
485 void
reset()486 IOUserIterator::reset()
487 {
488 IOLockLock(&lock);
489 assert(OSDynamicCast(OSIterator, userIteratorObject));
490 ((OSIterator *)userIteratorObject)->reset();
491 IOLockUnlock(&lock);
492 }
493
494 bool
isValid()495 IOUserIterator::isValid()
496 {
497 bool ret;
498
499 IOLockLock(&lock);
500 assert(OSDynamicCast(OSIterator, userIteratorObject));
501 ret = ((OSIterator *)userIteratorObject)->isValid();
502 IOLockUnlock(&lock);
503
504 return ret;
505 }
506
507 OSObject *
getNextObject()508 IOUserIterator::getNextObject()
509 {
510 assert(false);
511 return NULL;
512 }
513
514 OSObject *
copyNextObject()515 IOUserIterator::copyNextObject()
516 {
517 OSObject * ret = NULL;
518
519 IOLockLock(&lock);
520 if (userIteratorObject) {
521 ret = ((OSIterator *)userIteratorObject)->getNextObject();
522 if (ret) {
523 ret->retain();
524 }
525 }
526 IOLockUnlock(&lock);
527
528 return ret;
529 }
530
531 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
532 extern "C" {
533 // functions called from osfmk/device/iokit_rpc.c
534
535 void
iokit_port_object_description(io_object_t obj,kobject_description_t desc)536 iokit_port_object_description(io_object_t obj, kobject_description_t desc)
537 {
538 IORegistryEntry * regEntry;
539 IOUserNotification * __unused noti;
540 _IOServiceNotifier * __unused serviceNoti;
541 OSSerialize * __unused s;
542 OSDictionary * __unused matching = NULL;
543
544 if ((regEntry = OSDynamicCast(IORegistryEntry, obj))) {
545 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(0x%qx)", obj->getMetaClass()->getClassName(), regEntry->getRegistryEntryID());
546 #if DEVELOPMENT || DEBUG
547 } else if ((noti = OSDynamicCast(IOUserNotification, obj))) {
548 // serviceNoti->matching may become NULL if the port gets a no-senders notification, so we have to lock gIOObjectPortLock
549 IOLockLock(gIOObjectPortLock);
550 serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->userIteratorObject);
551 if (serviceNoti && (matching = serviceNoti->matching)) {
552 matching->retain();
553 }
554 IOLockUnlock(gIOObjectPortLock);
555
556 if (matching) {
557 s = OSSerialize::withCapacity((unsigned int) page_size);
558 if (s && matching->serialize(s)) {
559 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text());
560 }
561 OSSafeReleaseNULL(s);
562 OSSafeReleaseNULL(matching);
563 }
564 #endif /* DEVELOPMENT || DEBUG */
565 } else {
566 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s", obj->getMetaClass()->getClassName());
567 }
568 }
569
570 // FIXME: Implementation of these functions are hidden from the static analyzer.
571 // As for now, the analyzer doesn't consistently support wrapper functions
572 // for retain and release.
573 #ifndef __clang_analyzer__
574 void
iokit_add_reference(io_object_t obj,natural_t type)575 iokit_add_reference( io_object_t obj, natural_t type )
576 {
577 if (!obj) {
578 return;
579 }
580 obj->retain();
581 }
582
583 void
iokit_remove_reference(io_object_t obj)584 iokit_remove_reference( io_object_t obj )
585 {
586 if (obj) {
587 obj->release();
588 }
589 }
590 #endif // __clang_analyzer__
591
592 void
iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj)593 iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj )
594 {
595 if (!obj) {
596 return;
597 }
598 obj->release();
599 }
600
601 enum {
602 kIPCLockNone = 0,
603 kIPCLockRead = 1,
604 kIPCLockWrite = 2
605 };
606
607 void
ipcEnter(int locking)608 IOUserClient::ipcEnter(int locking)
609 {
610 switch (locking) {
611 case kIPCLockWrite:
612 IORWLockWrite(&lock);
613 break;
614 case kIPCLockRead:
615 IORWLockRead(&lock);
616 break;
617 case kIPCLockNone:
618 break;
619 default:
620 panic("ipcEnter");
621 }
622
623 OSIncrementAtomic(&__ipc);
624 }
625
626 void
ipcExit(int locking)627 IOUserClient::ipcExit(int locking)
628 {
629 bool finalize = false;
630
631 assert(__ipc);
632 if (1 == OSDecrementAtomic(&__ipc) && isInactive()) {
633 IOLockLock(gIOObjectPortLock);
634 if ((finalize = __ipcFinal)) {
635 __ipcFinal = false;
636 }
637 IOLockUnlock(gIOObjectPortLock);
638 if (finalize) {
639 scheduleFinalize(true);
640 }
641 }
642 switch (locking) {
643 case kIPCLockWrite:
644 case kIPCLockRead:
645 IORWLockUnlock(&lock);
646 break;
647 case kIPCLockNone:
648 break;
649 default:
650 panic("ipcExit");
651 }
652 }
653
654 void
iokit_kobject_retain(io_kobject_t machPort)655 iokit_kobject_retain(io_kobject_t machPort)
656 {
657 assert(OSDynamicCast(IOMachPort, machPort));
658 machPort->retain();
659 }
660
661 io_object_t
iokit_copy_object_for_consumed_kobject(LIBKERN_CONSUMED io_kobject_t machPort,natural_t type)662 iokit_copy_object_for_consumed_kobject(LIBKERN_CONSUMED io_kobject_t machPort, natural_t type)
663 {
664 io_object_t result;
665
666 assert(OSDynamicCast(IOMachPort, machPort));
667
668 IOLockLock(&machPort->lock);
669 result = machPort->object;
670 if (result) {
671 iokit_add_reference(result, type);
672 }
673 IOLockUnlock(&machPort->lock);
674 machPort->release();
675 return result;
676 }
677
678 bool
finalizeUserReferences(OSObject * obj)679 IOUserClient::finalizeUserReferences(OSObject * obj)
680 {
681 IOUserClient * uc;
682 bool ok = true;
683
684 if ((uc = OSDynamicCast(IOUserClient, obj))) {
685 IOLockLock(gIOObjectPortLock);
686 if ((uc->__ipcFinal = (0 != uc->__ipc))) {
687 ok = false;
688 }
689 IOLockUnlock(gIOObjectPortLock);
690 }
691 return ok;
692 }
693
694 ipc_port_t
iokit_port_for_object(io_object_t obj,ipc_kobject_type_t type,ipc_kobject_t * kobj)695 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type, ipc_kobject_t * kobj )
696 {
697 IOMachPort *machPort = NULL;
698 ipc_port_t port = NULL;
699
700 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
701
702 lck_mtx_lock(gIOObjectPortLock);
703
704 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
705
706 if (__improbable(machPort == NULL)) {
707 machPort = IOMachPort::withObjectAndType(obj, type);
708 if (__improbable(machPort == NULL)) {
709 goto end;
710 }
711 SLIST_INSERT_HEAD(bucket, machPort, link);
712 } else {
713 machPort->mscount++;
714 }
715
716 iokit_retain_port(machPort->port);
717 port = machPort->port;
718
719 end:
720 if (kobj) {
721 *kobj = machPort;
722 }
723 lck_mtx_unlock(gIOObjectPortLock);
724
725 return port;
726 }
727
728 kern_return_t
iokit_client_died(io_object_t obj,ipc_port_t,ipc_kobject_type_t type,mach_port_mscount_t * mscount)729 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
730 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
731 {
732 IOUserClient * client;
733 IOMemoryMap * map;
734 IOUserNotification * notify;
735 IOUserServerCheckInToken * token;
736
737 if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
738 return kIOReturnNotReady;
739 }
740
741 switch (type) {
742 case IKOT_IOKIT_CONNECT:
743 if ((client = OSDynamicCast( IOUserClient, obj ))) {
744 IOStatisticsClientCall();
745 IORWLockWrite(&client->lock);
746 client->clientDied();
747 IORWLockUnlock(&client->lock);
748 }
749 break;
750 case IKOT_IOKIT_OBJECT:
751 if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
752 map->taskDied();
753 } else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
754 notify->setNotification( NULL );
755 }
756 break;
757 case IKOT_IOKIT_IDENT:
758 if ((token = OSDynamicCast( IOUserServerCheckInToken, obj ))) {
759 token->cancel();
760 }
761 break;
762 }
763
764 return kIOReturnSuccess;
765 }
766 }; /* extern "C" */
767
768 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
769
770 class IOServiceUserNotification : public IOUserNotification
771 {
772 OSDeclareDefaultStructors(IOServiceUserNotification);
773
774 struct PingMsgKdata {
775 mach_msg_header_t msgHdr;
776 };
777 struct PingMsgUdata {
778 OSNotificationHeader64 notifyHeader;
779 };
780
781 enum { kMaxOutstanding = 1024 };
782
783 ipc_port_t remotePort;
784 void *msgReference;
785 mach_msg_size_t msgReferenceSize;
786 natural_t msgType;
787 OSArray * newSet;
788 bool armed;
789 bool ipcLogged;
790
791 public:
792
793 virtual bool init( mach_port_t port, natural_t type,
794 void * reference, vm_size_t referenceSize,
795 bool clientIs64 );
796 virtual void free() APPLE_KEXT_OVERRIDE;
797 void invalidatePort(void);
798
799 static bool _handler( void * target,
800 void * ref, IOService * newService, IONotifier * notifier );
801 virtual bool handler( void * ref, IOService * newService );
802
803 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
804 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
805 };
806
807 class IOServiceMessageUserNotification : public IOUserNotification
808 {
809 OSDeclareDefaultStructors(IOServiceMessageUserNotification);
810
811 struct PingMsgKdata {
812 mach_msg_header_t msgHdr;
813 mach_msg_body_t msgBody;
814 mach_msg_port_descriptor_t ports[1];
815 };
816 struct PingMsgUdata {
817 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
818 };
819
820 ipc_port_t remotePort;
821 void *msgReference;
822 mach_msg_size_t msgReferenceSize;
823 mach_msg_size_t msgExtraSize;
824 natural_t msgType;
825 uint8_t clientIs64;
826 int owningPID;
827 bool ipcLogged;
828
829 public:
830
831 virtual bool init( mach_port_t port, natural_t type,
832 void * reference, vm_size_t referenceSize,
833 bool clientIs64 );
834
835 virtual void free() APPLE_KEXT_OVERRIDE;
836 void invalidatePort(void);
837
838 static IOReturn _handler( void * target, void * ref,
839 UInt32 messageType, IOService * provider,
840 void * messageArgument, vm_size_t argSize );
841 virtual IOReturn handler( void * ref,
842 UInt32 messageType, IOService * provider,
843 void * messageArgument, vm_size_t argSize );
844
845 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
846 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
847 };
848
849 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
850
851 #undef super
852 #define super IOUserIterator
853 OSDefineMetaClass( IOUserNotification, IOUserIterator );
854 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
855
856 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
857
858 void
free(void)859 IOUserNotification::free( void )
860 {
861 #if DEVELOPMENT || DEBUG
862 IOLockLock( gIOObjectPortLock);
863
864 assert(userIteratorObject == NULL);
865
866 IOLockUnlock( gIOObjectPortLock);
867 #endif /* DEVELOPMENT || DEBUG */
868
869 super::free();
870 }
871
872
873 void
setNotification(IONotifier * notify)874 IOUserNotification::setNotification( IONotifier * notify )
875 {
876 OSObject * previousNotify;
877
878 /*
879 * We must retain this object here before proceeding.
880 * Two threads may race in setNotification(). If one thread sets a new notifier while the
881 * other thread sets the notifier to NULL, it is possible for the second thread to call release()
882 * before the first thread calls retain(). Without the retain here, this thread interleaving
883 * would cause the object to get released and freed before it is retained by the first thread,
884 * which is a UaF.
885 */
886 retain();
887
888 IOLockLock( gIOObjectPortLock);
889
890 previousNotify = userIteratorObject;
891 userIteratorObject = notify;
892
893 IOLockUnlock( gIOObjectPortLock);
894
895 if (previousNotify) {
896 assert(OSDynamicCast(IONotifier, previousNotify));
897 ((IONotifier *)previousNotify)->remove();
898
899 if (notify == NULL) {
900 release();
901 }
902 } else if (notify) {
903 // new IONotifier, retain the object. release() will happen in setNotification(NULL)
904 retain();
905 }
906
907 release(); // paired with retain() at beginning of this method
908 }
909
910 void
reset()911 IOUserNotification::reset()
912 {
913 // ?
914 }
915
916 bool
isValid()917 IOUserNotification::isValid()
918 {
919 return true;
920 }
921
922 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
923
924 #undef super
925 #define super IOUserNotification
OSDefineMetaClassAndStructors(IOServiceUserNotification,IOUserNotification)926 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
927
928 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
929
930 bool
931 IOServiceUserNotification::init( mach_port_t port, natural_t type,
932 void * reference, vm_size_t referenceSize,
933 bool clientIs64 )
934 {
935 if (!super::init()) {
936 return false;
937 }
938
939 newSet = OSArray::withCapacity( 1 );
940 if (!newSet) {
941 return false;
942 }
943
944 if (referenceSize > sizeof(OSAsyncReference64)) {
945 return false;
946 }
947
948 msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
949 msgReference = IOMallocZeroData(msgReferenceSize);
950 if (!msgReference) {
951 return false;
952 }
953
954 remotePort = port;
955 msgType = type;
956 bcopy( reference, msgReference, referenceSize );
957
958 return true;
959 }
960
961 void
invalidatePort(void)962 IOServiceUserNotification::invalidatePort(void)
963 {
964 remotePort = MACH_PORT_NULL;
965 }
966
967 void
free(void)968 IOServiceUserNotification::free( void )
969 {
970 if (remotePort) {
971 iokit_release_port_send(remotePort);
972 }
973 IOFreeData(msgReference, msgReferenceSize);
974 OSSafeReleaseNULL(newSet);
975
976 super::free();
977 }
978
979 bool
_handler(void * target,void * ref,IOService * newService,IONotifier * notifier)980 IOServiceUserNotification::_handler( void * target,
981 void * ref, IOService * newService, IONotifier * notifier )
982 {
983 IOServiceUserNotification * targetObj = (IOServiceUserNotification *)target;
984 bool ret;
985
986 targetObj->retain();
987 ret = targetObj->handler( ref, newService );
988 targetObj->release();
989 return ret;
990 }
991
992 bool
handler(void * ref,IOService * newService)993 IOServiceUserNotification::handler( void * ref,
994 IOService * newService )
995 {
996 unsigned int count;
997 kern_return_t kr;
998 ipc_port_t port = NULL;
999 bool sendPing = false;
1000 mach_msg_size_t msgSize, payloadSize;
1001
1002 IOTakeLock( &lock );
1003
1004 count = newSet->getCount();
1005 if (count < kMaxOutstanding) {
1006 newSet->setObject( newService );
1007 if ((sendPing = (armed && (0 == count)))) {
1008 armed = false;
1009 }
1010 }
1011
1012 IOUnlock( &lock );
1013
1014 if (kIOServiceTerminatedNotificationType == msgType) {
1015 lck_mtx_lock(gIOObjectPortLock);
1016 newService->setMachPortHoldDestroy(true);
1017 lck_mtx_unlock(gIOObjectPortLock);
1018 }
1019
1020 if (sendPing) {
1021 port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT, NULL );
1022
1023 payloadSize = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1024 msgSize = (mach_msg_size_t)(sizeof(PingMsgKdata) + payloadSize);
1025
1026 kr = kernel_mach_msg_send_with_builder_internal(0, payloadSize,
1027 MACH_SEND_KERNEL_IMPORTANCE, MACH_MSG_TIMEOUT_NONE, NULL,
1028 ^(mach_msg_header_t *hdr, __assert_only mach_msg_descriptor_t *descs, void *payload){
1029 PingMsgUdata *udata = (PingMsgUdata *)payload;
1030
1031 hdr->msgh_remote_port = remotePort;
1032 hdr->msgh_local_port = port;
1033 hdr->msgh_bits = MACH_MSGH_BITS(
1034 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1035 MACH_MSG_TYPE_MAKE_SEND /*local*/);
1036 hdr->msgh_size = msgSize;
1037 hdr->msgh_id = kOSNotificationMessageID;
1038
1039 assert(descs == NULL);
1040 /* End of kernel processed data */
1041
1042 udata->notifyHeader.size = 0;
1043 udata->notifyHeader.type = msgType;
1044
1045 assert((char *)udata->notifyHeader.reference + msgReferenceSize <= (char *)payload + payloadSize);
1046 bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1047 });
1048
1049 if (port) {
1050 iokit_release_port( port );
1051 }
1052
1053 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1054 ipcLogged = true;
1055 IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1056 }
1057 }
1058
1059 return true;
1060 }
1061 OSObject *
getNextObject()1062 IOServiceUserNotification::getNextObject()
1063 {
1064 assert(false);
1065 return NULL;
1066 }
1067
1068 OSObject *
copyNextObject()1069 IOServiceUserNotification::copyNextObject()
1070 {
1071 unsigned int count;
1072 OSObject * result;
1073
1074 IOLockLock(&lock);
1075
1076 count = newSet->getCount();
1077 if (count) {
1078 result = newSet->getObject( count - 1 );
1079 result->retain();
1080 newSet->removeObject( count - 1);
1081 } else {
1082 result = NULL;
1083 armed = true;
1084 }
1085
1086 IOLockUnlock(&lock);
1087
1088 return result;
1089 }
1090
1091 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1092
OSDefineMetaClassAndStructors(IOServiceMessageUserNotification,IOUserNotification)1093 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
1094
1095 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1096
1097 bool
1098 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
1099 void * reference, vm_size_t referenceSize, bool client64 )
1100 {
1101 if (!super::init()) {
1102 return false;
1103 }
1104
1105 if (referenceSize > sizeof(OSAsyncReference64)) {
1106 return false;
1107 }
1108
1109 clientIs64 = client64;
1110
1111 owningPID = proc_selfpid();
1112
1113 msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
1114 msgReference = IOMallocZeroData(msgReferenceSize);
1115 if (!msgReference) {
1116 return false;
1117 }
1118
1119 remotePort = port;
1120 msgType = type;
1121 bcopy( reference, msgReference, referenceSize );
1122
1123 return true;
1124 }
1125
1126 void
invalidatePort(void)1127 IOServiceMessageUserNotification::invalidatePort(void)
1128 {
1129 remotePort = MACH_PORT_NULL;
1130 }
1131
1132 void
free(void)1133 IOServiceMessageUserNotification::free( void )
1134 {
1135 if (remotePort) {
1136 iokit_release_port_send(remotePort);
1137 }
1138 IOFreeData(msgReference, msgReferenceSize);
1139
1140 super::free();
1141 }
1142
1143 IOReturn
_handler(void * target,void * ref,UInt32 messageType,IOService * provider,void * argument,vm_size_t argSize)1144 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1145 UInt32 messageType, IOService * provider,
1146 void * argument, vm_size_t argSize )
1147 {
1148 IOServiceMessageUserNotification * targetObj = (IOServiceMessageUserNotification *)target;
1149 IOReturn ret;
1150
1151 targetObj->retain();
1152 ret = targetObj->handler(
1153 ref, messageType, provider, argument, argSize);
1154 targetObj->release();
1155 return ret;
1156 }
1157
1158 IOReturn
handler(void * ref,UInt32 messageType,IOService * provider,void * messageArgument,vm_size_t callerArgSize)1159 IOServiceMessageUserNotification::handler( void * ref,
1160 UInt32 messageType, IOService * provider,
1161 void * messageArgument, vm_size_t callerArgSize )
1162 {
1163 kern_return_t kr;
1164 vm_size_t argSize;
1165 mach_msg_size_t thisMsgSize;
1166 ipc_port_t thisPort, providerPort;
1167
1168 if (kIOMessageCopyClientID == messageType) {
1169 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1170 return kIOReturnSuccess;
1171 }
1172
1173 if (callerArgSize == 0) {
1174 if (clientIs64) {
1175 argSize = sizeof(io_user_reference_t);
1176 } else {
1177 argSize = sizeof(uint32_t);
1178 }
1179 } else {
1180 if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1181 callerArgSize = kIOUserNotifyMaxMessageSize;
1182 }
1183 argSize = callerArgSize;
1184 }
1185
1186 // adjust message size for ipc restrictions
1187 natural_t type = msgType;
1188 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1189 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1190 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1191
1192 mach_msg_size_t extraSize = kIOUserNotifyMaxMessageSize + sizeof(IOServiceInterestContent64);
1193 mach_msg_size_t msgSize = (mach_msg_size_t) (sizeof(PingMsgKdata) +
1194 sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize);
1195
1196 if (os_add3_overflow(msgSize, offsetof(IOServiceInterestContent64, messageArgument), argSize, &thisMsgSize)) {
1197 return kIOReturnBadArgument;
1198 }
1199 mach_msg_size_t payloadSize = thisMsgSize - sizeof(PingMsgKdata);
1200
1201 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT, NULL );
1202 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT, NULL );
1203
1204 kr = kernel_mach_msg_send_with_builder_internal(1, payloadSize,
1205 MACH_SEND_KERNEL_IMPORTANCE, MACH_MSG_TIMEOUT_NONE, NULL,
1206 ^(mach_msg_header_t *hdr, mach_msg_descriptor_t *descs, void *payload){
1207 mach_msg_port_descriptor_t *port_desc = (mach_msg_port_descriptor_t *)descs;
1208 PingMsgUdata *udata = (PingMsgUdata *)payload;
1209 IOServiceInterestContent64 * data;
1210 mach_msg_size_t dataOffset;
1211
1212 hdr->msgh_remote_port = remotePort;
1213 hdr->msgh_local_port = thisPort;
1214 hdr->msgh_bits = MACH_MSGH_BITS_COMPLEX
1215 | MACH_MSGH_BITS(
1216 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1217 MACH_MSG_TYPE_MAKE_SEND /*local*/);
1218 hdr->msgh_size = thisMsgSize;
1219 hdr->msgh_id = kOSNotificationMessageID;
1220
1221 /* body.msgh_descriptor_count is set automatically after the closure */
1222
1223 port_desc[0].name = providerPort;
1224 port_desc[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
1225 port_desc[0].type = MACH_MSG_PORT_DESCRIPTOR;
1226 /* End of kernel processed data */
1227
1228 udata->notifyHeader.size = extraSize;
1229 udata->notifyHeader.type = type;
1230 bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1231
1232 /* data is after msgReference */
1233 dataOffset = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1234 data = (IOServiceInterestContent64 *) (((uint8_t *) udata) + dataOffset);
1235 data->messageType = messageType;
1236
1237 if (callerArgSize == 0) {
1238 assert((char *)data->messageArgument + argSize <= (char *)payload + payloadSize);
1239 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1240 if (!clientIs64) {
1241 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1242 }
1243 } else {
1244 assert((char *)data->messageArgument + callerArgSize <= (char *)payload + payloadSize);
1245 bcopy(messageArgument, data->messageArgument, callerArgSize);
1246 }
1247 });
1248
1249 if (thisPort) {
1250 iokit_release_port( thisPort );
1251 }
1252 if (providerPort) {
1253 iokit_release_port( providerPort );
1254 }
1255
1256 if (kr == MACH_SEND_NO_BUFFER) {
1257 return kIOReturnNoMemory;
1258 }
1259
1260 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1261 ipcLogged = true;
1262 IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1263 }
1264
1265 return kIOReturnSuccess;
1266 }
1267
1268 OSObject *
getNextObject()1269 IOServiceMessageUserNotification::getNextObject()
1270 {
1271 return NULL;
1272 }
1273
1274 OSObject *
copyNextObject()1275 IOServiceMessageUserNotification::copyNextObject()
1276 {
1277 return NULL;
1278 }
1279
1280 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1281
1282 #undef super
1283 #define super IOService
1284 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1285
1286 IOLock * gIOUserClientOwnersLock;
1287
1288 static_assert(offsetof(IOUserClient, __opaque_end) -
1289 offsetof(IOUserClient, __opaque_start) == sizeof(void *) * 9,
1290 "ABI check: Opaque ivars for IOUserClient must be 9 void * big");
1291
1292 void
initialize(void)1293 IOUserClient::initialize( void )
1294 {
1295 gIOObjectPortLock = IOLockAlloc();
1296 gIOUserClientOwnersLock = IOLockAlloc();
1297 gIOUserServerLock = IOLockAlloc();
1298 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1299
1300 #if IOTRACKING
1301 IOTrackingQueueCollectUser(IOUserIterator::gMetaClass.getTracking());
1302 IOTrackingQueueCollectUser(IOServiceMessageUserNotification::gMetaClass.getTracking());
1303 IOTrackingQueueCollectUser(IOServiceUserNotification::gMetaClass.getTracking());
1304 IOTrackingQueueCollectUser(IOUserClient::gMetaClass.getTracking());
1305 IOTrackingQueueCollectUser(IOMachPort::gMetaClass.getTracking());
1306 #endif /* IOTRACKING */
1307 }
1308
1309 void
1310 #if __LP64__
1311 __attribute__((__noreturn__))
1312 #endif
setAsyncReference(OSAsyncReference asyncRef,mach_port_t wakePort,void * callback,void * refcon)1313 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1314 mach_port_t wakePort,
1315 void *callback, void *refcon)
1316 {
1317 #if __LP64__
1318 panic("setAsyncReference not valid for 64b");
1319 #else
1320 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1321 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1322 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1323 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1324 #endif
1325 }
1326
1327 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon)1328 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1329 mach_port_t wakePort,
1330 mach_vm_address_t callback, io_user_reference_t refcon)
1331 {
1332 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1333 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1334 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1335 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1336 }
1337
1338 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon,task_t task)1339 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1340 mach_port_t wakePort,
1341 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1342 {
1343 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1344 if (vm_map_is_64bit(get_task_map(task))) {
1345 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1346 }
1347 }
1348
1349 static OSDictionary *
CopyConsoleUser(UInt32 uid)1350 CopyConsoleUser(UInt32 uid)
1351 {
1352 OSArray * array;
1353 OSDictionary * user = NULL;
1354
1355 OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1356 if ((array = OSDynamicCast(OSArray, ioProperty))) {
1357 for (unsigned int idx = 0;
1358 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1359 idx++) {
1360 OSNumber * num;
1361
1362 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1363 && (uid == num->unsigned32BitValue())) {
1364 user->retain();
1365 break;
1366 }
1367 }
1368 }
1369 OSSafeReleaseNULL(ioProperty);
1370 return user;
1371 }
1372
1373 static OSDictionary *
CopyUserOnConsole(void)1374 CopyUserOnConsole(void)
1375 {
1376 OSArray * array;
1377 OSDictionary * user = NULL;
1378
1379 OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1380 if ((array = OSDynamicCast(OSArray, ioProperty))) {
1381 for (unsigned int idx = 0;
1382 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1383 idx++) {
1384 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1385 user->retain();
1386 break;
1387 }
1388 }
1389 }
1390 OSSafeReleaseNULL(ioProperty);
1391 return user;
1392 }
1393
1394 IOReturn
clientHasAuthorization(task_t task,IOService * service)1395 IOUserClient::clientHasAuthorization( task_t task,
1396 IOService * service )
1397 {
1398 proc_t p;
1399
1400 p = (proc_t) get_bsdtask_info(task);
1401 if (p) {
1402 uint64_t authorizationID;
1403
1404 authorizationID = proc_uniqueid(p);
1405 if (authorizationID) {
1406 if (service->getAuthorizationID() == authorizationID) {
1407 return kIOReturnSuccess;
1408 }
1409 }
1410 }
1411
1412 return kIOReturnNotPermitted;
1413 }
1414
1415 IOReturn
clientHasPrivilege(void * securityToken,const char * privilegeName)1416 IOUserClient::clientHasPrivilege( void * securityToken,
1417 const char * privilegeName )
1418 {
1419 kern_return_t kr;
1420 security_token_t token;
1421 mach_msg_type_number_t count;
1422 task_t task;
1423 OSDictionary * user;
1424 bool secureConsole;
1425
1426
1427 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1428 sizeof(kIOClientPrivilegeForeground))) {
1429 if (task_is_gpu_denied(current_task())) {
1430 return kIOReturnNotPrivileged;
1431 } else {
1432 return kIOReturnSuccess;
1433 }
1434 }
1435
1436 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1437 sizeof(kIOClientPrivilegeConsoleSession))) {
1438 kauth_cred_t cred;
1439 proc_t p;
1440
1441 task = (task_t) securityToken;
1442 if (!task) {
1443 task = current_task();
1444 }
1445 p = (proc_t) get_bsdtask_info(task);
1446 kr = kIOReturnNotPrivileged;
1447
1448 if (p && (cred = kauth_cred_proc_ref(p))) {
1449 user = CopyUserOnConsole();
1450 if (user) {
1451 OSNumber * num;
1452 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1453 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1454 kr = kIOReturnSuccess;
1455 }
1456 user->release();
1457 }
1458 kauth_cred_unref(&cred);
1459 }
1460 return kr;
1461 }
1462
1463 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1464 sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1465 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1466 } else {
1467 task = (task_t)securityToken;
1468 }
1469
1470 count = TASK_SECURITY_TOKEN_COUNT;
1471 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1472
1473 if (KERN_SUCCESS != kr) {
1474 } else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1475 sizeof(kIOClientPrivilegeAdministrator))) {
1476 if (0 != token.val[0]) {
1477 kr = kIOReturnNotPrivileged;
1478 }
1479 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1480 sizeof(kIOClientPrivilegeLocalUser))) {
1481 user = CopyConsoleUser(token.val[0]);
1482 if (user) {
1483 user->release();
1484 } else {
1485 kr = kIOReturnNotPrivileged;
1486 }
1487 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1488 sizeof(kIOClientPrivilegeConsoleUser))) {
1489 user = CopyConsoleUser(token.val[0]);
1490 if (user) {
1491 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1492 kr = kIOReturnNotPrivileged;
1493 } else if (secureConsole) {
1494 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1495 if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1496 kr = kIOReturnNotPrivileged;
1497 }
1498 }
1499 user->release();
1500 } else {
1501 kr = kIOReturnNotPrivileged;
1502 }
1503 } else {
1504 kr = kIOReturnUnsupported;
1505 }
1506
1507 return kr;
1508 }
1509
1510 OSDictionary *
copyClientEntitlements(task_t task)1511 IOUserClient::copyClientEntitlements(task_t task)
1512 {
1513 proc_t p = NULL;
1514 pid_t pid = 0;
1515 OSDictionary *entitlements = NULL;
1516
1517 p = (proc_t)get_bsdtask_info(task);
1518 if (p == NULL) {
1519 return NULL;
1520 }
1521 pid = proc_pid(p);
1522
1523 if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1524 if (entitlements) {
1525 return entitlements;
1526 }
1527 }
1528
1529 // If the above fails, thats it
1530 return NULL;
1531 }
1532
1533 OSDictionary *
copyClientEntitlementsVnode(vnode_t vnode,off_t offset)1534 IOUserClient::copyClientEntitlementsVnode(vnode_t vnode, off_t offset)
1535 {
1536 OSDictionary *entitlements = NULL;
1537
1538 if (cs_entitlements_dictionary_copy_vnode(vnode, offset, (void**)&entitlements) != 0) {
1539 return NULL;
1540 }
1541 return entitlements;
1542 }
1543
1544 OSObject *
copyClientEntitlement(task_t task,const char * entitlement)1545 IOUserClient::copyClientEntitlement( task_t task,
1546 const char * entitlement )
1547 {
1548 void *entitlement_object = NULL;
1549
1550 if (task == NULL) {
1551 task = current_task();
1552 }
1553
1554 /* Validate input arguments */
1555 if (task == kernel_task || entitlement == NULL) {
1556 return NULL;
1557 }
1558 proc_t proc = (proc_t)get_bsdtask_info(task);
1559
1560 kern_return_t ret = amfi->OSEntitlements.copyEntitlementAsOSObjectWithProc(
1561 proc,
1562 entitlement,
1563 &entitlement_object);
1564
1565 if (ret != KERN_SUCCESS) {
1566 return NULL;
1567 }
1568 assert(entitlement_object != NULL);
1569
1570 return (OSObject*)entitlement_object;
1571 }
1572
1573 OSObject *
copyClientEntitlementVnode(struct vnode * vnode,off_t offset,const char * entitlement)1574 IOUserClient::copyClientEntitlementVnode(
1575 struct vnode *vnode,
1576 off_t offset,
1577 const char *entitlement)
1578 {
1579 OSDictionary *entitlements;
1580 OSObject *value;
1581
1582 entitlements = copyClientEntitlementsVnode(vnode, offset);
1583 if (entitlements == NULL) {
1584 return NULL;
1585 }
1586
1587 /* Fetch the entitlement value from the dictionary. */
1588 value = entitlements->getObject(entitlement);
1589 if (value != NULL) {
1590 value->retain();
1591 }
1592
1593 entitlements->release();
1594 return value;
1595 }
1596
1597 bool
init()1598 IOUserClient::init()
1599 {
1600 if (getPropertyTable() || super::init()) {
1601 return reserve();
1602 }
1603
1604 return false;
1605 }
1606
1607 bool
init(OSDictionary * dictionary)1608 IOUserClient::init(OSDictionary * dictionary)
1609 {
1610 if (getPropertyTable() || super::init(dictionary)) {
1611 return reserve();
1612 }
1613
1614 return false;
1615 }
1616
1617 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type)1618 IOUserClient::initWithTask(task_t owningTask,
1619 void * securityID,
1620 UInt32 type )
1621 {
1622 if (getPropertyTable() || super::init()) {
1623 return reserve();
1624 }
1625
1626 return false;
1627 }
1628
1629 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type,OSDictionary * properties)1630 IOUserClient::initWithTask(task_t owningTask,
1631 void * securityID,
1632 UInt32 type,
1633 OSDictionary * properties )
1634 {
1635 bool ok;
1636
1637 ok = super::init( properties );
1638 ok &= initWithTask( owningTask, securityID, type );
1639
1640 return ok;
1641 }
1642
1643 bool
reserve()1644 IOUserClient::reserve()
1645 {
1646 if (!reserved) {
1647 reserved = IOMallocType(ExpansionData);
1648 }
1649 setTerminateDefer(NULL, true);
1650 IOStatisticsRegisterCounter();
1651 IORWLockInlineInit(&lock);
1652 IOLockInlineInit(&filterLock);
1653
1654 return true;
1655 }
1656
1657 struct IOUserClientOwner {
1658 task_t task;
1659 queue_chain_t taskLink;
1660 IOUserClient * uc;
1661 queue_chain_t ucLink;
1662 };
1663
1664 IOReturn
registerOwner(task_t task)1665 IOUserClient::registerOwner(task_t task)
1666 {
1667 IOUserClientOwner * owner;
1668 IOReturn ret;
1669 bool newOwner;
1670
1671 IOLockLock(gIOUserClientOwnersLock);
1672
1673 newOwner = true;
1674 ret = kIOReturnSuccess;
1675
1676 if (!owners.next) {
1677 queue_init(&owners);
1678 } else {
1679 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1680 {
1681 if (task != owner->task) {
1682 continue;
1683 }
1684 newOwner = false;
1685 break;
1686 }
1687 }
1688 if (newOwner) {
1689 owner = IOMallocType(IOUserClientOwner);
1690
1691 owner->task = task;
1692 owner->uc = this;
1693 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1694 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1695 if (messageAppSuspended) {
1696 task_set_message_app_suspended(task, true);
1697 }
1698 }
1699
1700 IOLockUnlock(gIOUserClientOwnersLock);
1701
1702 return ret;
1703 }
1704
1705 void
noMoreSenders(void)1706 IOUserClient::noMoreSenders(void)
1707 {
1708 IOUserClientOwner * owner;
1709 IOUserClientOwner * iter;
1710 queue_head_t * taskque;
1711 bool hasMessageAppSuspended;
1712
1713 IOLockLock(gIOUserClientOwnersLock);
1714
1715 if (owners.next) {
1716 while (!queue_empty(&owners)) {
1717 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1718 taskque = task_io_user_clients(owner->task);
1719 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1720 hasMessageAppSuspended = false;
1721 queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1722 hasMessageAppSuspended = iter->uc->messageAppSuspended;
1723 if (hasMessageAppSuspended) {
1724 break;
1725 }
1726 }
1727 task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1728 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1729 IOFreeType(owner, IOUserClientOwner);
1730 }
1731 owners.next = owners.prev = NULL;
1732 }
1733
1734 IOLockUnlock(gIOUserClientOwnersLock);
1735 }
1736
1737
1738 extern "C" void
iokit_task_app_suspended_changed(task_t task)1739 iokit_task_app_suspended_changed(task_t task)
1740 {
1741 queue_head_t * taskque;
1742 IOUserClientOwner * owner;
1743 OSSet * set;
1744
1745 IOLockLock(gIOUserClientOwnersLock);
1746
1747 taskque = task_io_user_clients(task);
1748 set = NULL;
1749 queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1750 if (!owner->uc->messageAppSuspended) {
1751 continue;
1752 }
1753 if (!set) {
1754 set = OSSet::withCapacity(4);
1755 if (!set) {
1756 break;
1757 }
1758 }
1759 set->setObject(owner->uc);
1760 }
1761
1762 IOLockUnlock(gIOUserClientOwnersLock);
1763
1764 if (set) {
1765 set->iterateObjects(^bool (OSObject * obj) {
1766 IOUserClient * uc;
1767
1768 uc = (typeof(uc))obj;
1769 #if 0
1770 {
1771 OSString * str;
1772 str = IOCopyLogNameForPID(task_pid(task));
1773 IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1774 uc->getName(), task_is_app_suspended(task));
1775 OSSafeReleaseNULL(str);
1776 }
1777 #endif
1778 uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1779
1780 return false;
1781 });
1782 set->release();
1783 }
1784 }
1785
1786 static kern_return_t
iokit_task_terminate_phase1(task_t task)1787 iokit_task_terminate_phase1(task_t task)
1788 {
1789 queue_head_t * taskque;
1790 IOUserClientOwner * iter;
1791 OSSet * userServers = NULL;
1792
1793 if (!task_is_driver(task)) {
1794 return KERN_SUCCESS;
1795 }
1796 userServers = OSSet::withCapacity(1);
1797
1798 IOLockLock(gIOUserClientOwnersLock);
1799
1800 taskque = task_io_user_clients(task);
1801 queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1802 userServers->setObject(iter->uc);
1803 }
1804 IOLockUnlock(gIOUserClientOwnersLock);
1805
1806 if (userServers) {
1807 IOUserServer * userServer;
1808 while ((userServer = OSRequiredCast(IOUserServer, userServers->getAnyObject()))) {
1809 userServer->clientDied();
1810 userServers->removeObject(userServer);
1811 }
1812 userServers->release();
1813 }
1814 return KERN_SUCCESS;
1815 }
1816
1817 static kern_return_t
iokit_task_terminate_phase2(task_t task)1818 iokit_task_terminate_phase2(task_t task)
1819 {
1820 queue_head_t * taskque;
1821 IOUserClientOwner * owner;
1822 IOUserClient * dead;
1823 IOUserClient * uc;
1824
1825 IOLockLock(gIOUserClientOwnersLock);
1826 taskque = task_io_user_clients(task);
1827 dead = NULL;
1828 while (!queue_empty(taskque)) {
1829 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1830 uc = owner->uc;
1831 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1832 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1833 if (queue_empty(&uc->owners)) {
1834 uc->retain();
1835 IOLog("destroying out of band connect for %s\n", uc->getName());
1836 // now using the uc queue head as a singly linked queue,
1837 // leaving .next as NULL to mark it empty
1838 uc->owners.next = NULL;
1839 uc->owners.prev = (queue_entry_t) dead;
1840 dead = uc;
1841 }
1842 IOFreeType(owner, IOUserClientOwner);
1843 }
1844 IOLockUnlock(gIOUserClientOwnersLock);
1845
1846 while (dead) {
1847 uc = dead;
1848 dead = (IOUserClient *)(void *) dead->owners.prev;
1849 uc->owners.prev = NULL;
1850 if (uc->sharedInstance || !uc->closed) {
1851 uc->clientDied();
1852 }
1853 uc->release();
1854 }
1855
1856 return KERN_SUCCESS;
1857 }
1858
1859 extern "C" kern_return_t
iokit_task_terminate(task_t task,int phase)1860 iokit_task_terminate(task_t task, int phase)
1861 {
1862 switch (phase) {
1863 case 1:
1864 return iokit_task_terminate_phase1(task);
1865 case 2:
1866 return iokit_task_terminate_phase2(task);
1867 default:
1868 panic("iokit_task_terminate phase %d", phase);
1869 }
1870 }
1871
1872 struct IOUCFilterPolicy {
1873 task_t task;
1874 io_filter_policy_t filterPolicy;
1875 IOUCFilterPolicy * next;
1876 };
1877
1878 io_filter_policy_t
filterForTask(task_t task,io_filter_policy_t addFilterPolicy)1879 IOUserClient::filterForTask(task_t task, io_filter_policy_t addFilterPolicy)
1880 {
1881 IOUCFilterPolicy * elem;
1882 io_filter_policy_t filterPolicy;
1883
1884 filterPolicy = 0;
1885 IOLockLock(&filterLock);
1886
1887 for (elem = reserved->filterPolicies; elem && (elem->task != task); elem = elem->next) {
1888 }
1889
1890 if (elem) {
1891 if (addFilterPolicy) {
1892 assert(addFilterPolicy == elem->filterPolicy);
1893 }
1894 filterPolicy = elem->filterPolicy;
1895 } else if (addFilterPolicy) {
1896 elem = IOMallocType(IOUCFilterPolicy);
1897 elem->task = task;
1898 elem->filterPolicy = addFilterPolicy;
1899 elem->next = reserved->filterPolicies;
1900 reserved->filterPolicies = elem;
1901 filterPolicy = addFilterPolicy;
1902 }
1903
1904 IOLockUnlock(&filterLock);
1905 return filterPolicy;
1906 }
1907
1908 void
free()1909 IOUserClient::free()
1910 {
1911 if (mappings) {
1912 mappings->release();
1913 }
1914
1915 IOStatisticsUnregisterCounter();
1916
1917 assert(!owners.next);
1918 assert(!owners.prev);
1919
1920 if (reserved) {
1921 IOUCFilterPolicy * elem;
1922 IOUCFilterPolicy * nextElem;
1923 for (elem = reserved->filterPolicies; elem; elem = nextElem) {
1924 nextElem = elem->next;
1925 if (elem->filterPolicy && gIOUCFilterCallbacks->io_filter_release) {
1926 gIOUCFilterCallbacks->io_filter_release(elem->filterPolicy);
1927 }
1928 IOFreeType(elem, IOUCFilterPolicy);
1929 }
1930 IOFreeType(reserved, ExpansionData);
1931 IORWLockInlineDestroy(&lock);
1932 IOLockInlineDestroy(&filterLock);
1933 }
1934
1935 super::free();
1936 }
1937
1938 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1939
OSDefineMetaClassAndAbstractStructors(IOUserClient2022,IOUserClient)1940 OSDefineMetaClassAndAbstractStructors( IOUserClient2022, IOUserClient )
1941
1942
1943 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1944
1945 IOReturn
1946 IOUserClient::clientDied( void )
1947 {
1948 IOReturn ret = kIOReturnNotReady;
1949
1950 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1951 ret = clientClose();
1952 }
1953
1954 return ret;
1955 }
1956
1957 IOReturn
clientClose(void)1958 IOUserClient::clientClose( void )
1959 {
1960 return kIOReturnUnsupported;
1961 }
1962
1963 IOService *
getService(void)1964 IOUserClient::getService( void )
1965 {
1966 return NULL;
1967 }
1968
1969 IOReturn
registerNotificationPort(mach_port_t,UInt32,UInt32)1970 IOUserClient::registerNotificationPort(
1971 mach_port_t /* port */,
1972 UInt32 /* type */,
1973 UInt32 /* refCon */)
1974 {
1975 return kIOReturnUnsupported;
1976 }
1977
1978 IOReturn
registerNotificationPort(mach_port_t port,UInt32 type,io_user_reference_t refCon)1979 IOUserClient::registerNotificationPort(
1980 mach_port_t port,
1981 UInt32 type,
1982 io_user_reference_t refCon)
1983 {
1984 return registerNotificationPort(port, type, (UInt32) refCon);
1985 }
1986
1987 IOReturn
getNotificationSemaphore(UInt32 notification_type,semaphore_t * semaphore)1988 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1989 semaphore_t * semaphore )
1990 {
1991 return kIOReturnUnsupported;
1992 }
1993
1994 IOReturn
connectClient(IOUserClient *)1995 IOUserClient::connectClient( IOUserClient * /* client */ )
1996 {
1997 return kIOReturnUnsupported;
1998 }
1999
2000 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,IOMemoryDescriptor ** memory)2001 IOUserClient::clientMemoryForType( UInt32 type,
2002 IOOptionBits * options,
2003 IOMemoryDescriptor ** memory )
2004 {
2005 return kIOReturnUnsupported;
2006 }
2007
2008 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,OSSharedPtr<IOMemoryDescriptor> & memory)2009 IOUserClient::clientMemoryForType( UInt32 type,
2010 IOOptionBits * options,
2011 OSSharedPtr<IOMemoryDescriptor>& memory )
2012 {
2013 IOMemoryDescriptor* memoryRaw = nullptr;
2014 IOReturn result = clientMemoryForType(type, options, &memoryRaw);
2015 memory.reset(memoryRaw, OSNoRetain);
2016 return result;
2017 }
2018
2019 #if !__LP64__
2020 IOMemoryMap *
mapClientMemory(IOOptionBits type,task_t task,IOOptionBits mapFlags,IOVirtualAddress atAddress)2021 IOUserClient::mapClientMemory(
2022 IOOptionBits type,
2023 task_t task,
2024 IOOptionBits mapFlags,
2025 IOVirtualAddress atAddress )
2026 {
2027 return NULL;
2028 }
2029 #endif
2030
2031 IOMemoryMap *
mapClientMemory64(IOOptionBits type,task_t task,IOOptionBits mapFlags,mach_vm_address_t atAddress)2032 IOUserClient::mapClientMemory64(
2033 IOOptionBits type,
2034 task_t task,
2035 IOOptionBits mapFlags,
2036 mach_vm_address_t atAddress )
2037 {
2038 IOReturn err;
2039 IOOptionBits options = 0;
2040 IOMemoryDescriptor * memory = NULL;
2041 IOMemoryMap * map = NULL;
2042
2043 err = clientMemoryForType((UInt32) type, &options, &memory );
2044
2045 if (memory && (kIOReturnSuccess == err)) {
2046 FAKE_STACK_FRAME(getMetaClass());
2047
2048 options = (options & ~kIOMapUserOptionsMask)
2049 | (mapFlags & kIOMapUserOptionsMask);
2050 map = memory->createMappingInTask( task, atAddress, options );
2051 memory->release();
2052
2053 FAKE_STACK_FRAME_END();
2054 }
2055
2056 return map;
2057 }
2058
2059 IOReturn
exportObjectToClient(task_t task,OSObject * obj,io_object_t * clientObj)2060 IOUserClient::exportObjectToClient(task_t task,
2061 OSObject *obj, io_object_t *clientObj)
2062 {
2063 mach_port_name_t name;
2064
2065 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
2066
2067 *clientObj = (io_object_t)(uintptr_t) name;
2068
2069 if (obj) {
2070 obj->release();
2071 }
2072
2073 return kIOReturnSuccess;
2074 }
2075
2076 IOReturn
copyPortNameForObjectInTask(task_t task,OSObject * obj,mach_port_name_t * port_name)2077 IOUserClient::copyPortNameForObjectInTask(task_t task,
2078 OSObject *obj, mach_port_name_t * port_name)
2079 {
2080 mach_port_name_t name;
2081
2082 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
2083
2084 *(mach_port_name_t *) port_name = name;
2085
2086 return kIOReturnSuccess;
2087 }
2088
2089 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSObject ** obj)2090 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2091 OSObject **obj)
2092 {
2093 OSObject * object;
2094
2095 object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
2096
2097 *obj = object;
2098
2099 return object ? kIOReturnSuccess : kIOReturnIPCError;
2100 }
2101
2102 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSSharedPtr<OSObject> & obj)2103 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2104 OSSharedPtr<OSObject>& obj)
2105 {
2106 OSObject* objRaw = NULL;
2107 IOReturn result = copyObjectForPortNameInTask(task, port_name, &objRaw);
2108 obj.reset(objRaw, OSNoRetain);
2109 return result;
2110 }
2111
2112 IOReturn
adjustPortNameReferencesInTask(task_t task,mach_port_name_t port_name,mach_port_delta_t delta)2113 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
2114 {
2115 return iokit_mod_send_right(task, port_name, delta);
2116 }
2117
2118 IOExternalMethod *
getExternalMethodForIndex(UInt32)2119 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
2120 {
2121 return NULL;
2122 }
2123
2124 IOExternalAsyncMethod *
getExternalAsyncMethodForIndex(UInt32)2125 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
2126 {
2127 return NULL;
2128 }
2129
2130 IOExternalTrap *
2131 IOUserClient::
getExternalTrapForIndex(UInt32 index)2132 getExternalTrapForIndex(UInt32 index)
2133 {
2134 return NULL;
2135 }
2136
2137 #pragma clang diagnostic push
2138 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2139
2140 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
2141 // functions can break clients of kexts implementing getExternalMethodForIndex()
2142 IOExternalMethod *
2143 IOUserClient::
getTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2144 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
2145 {
2146 IOExternalMethod *method = getExternalMethodForIndex(index);
2147
2148 if (method) {
2149 *targetP = (IOService *) method->object;
2150 }
2151
2152 return method;
2153 }
2154
2155 IOExternalMethod *
2156 IOUserClient::
getTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2157 getTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2158 {
2159 IOService* targetPRaw = NULL;
2160 IOExternalMethod* result = getTargetAndMethodForIndex(&targetPRaw, index);
2161 targetP.reset(targetPRaw, OSRetain);
2162 return result;
2163 }
2164
2165 IOExternalAsyncMethod *
2166 IOUserClient::
getAsyncTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2167 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
2168 {
2169 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
2170
2171 if (method) {
2172 *targetP = (IOService *) method->object;
2173 }
2174
2175 return method;
2176 }
2177
2178 IOExternalAsyncMethod *
2179 IOUserClient::
getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2180 getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2181 {
2182 IOService* targetPRaw = NULL;
2183 IOExternalAsyncMethod* result = getAsyncTargetAndMethodForIndex(&targetPRaw, index);
2184 targetP.reset(targetPRaw, OSRetain);
2185 return result;
2186 }
2187
2188 IOExternalTrap *
2189 IOUserClient::
getTargetAndTrapForIndex(IOService ** targetP,UInt32 index)2190 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
2191 {
2192 IOExternalTrap *trap = getExternalTrapForIndex(index);
2193
2194 if (trap) {
2195 *targetP = trap->object;
2196 }
2197
2198 return trap;
2199 }
2200 #pragma clang diagnostic pop
2201
2202 IOReturn
releaseAsyncReference64(OSAsyncReference64 reference)2203 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
2204 {
2205 mach_port_t port;
2206 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2207
2208 if (MACH_PORT_NULL != port) {
2209 iokit_release_port_send(port);
2210 }
2211
2212 return kIOReturnSuccess;
2213 }
2214
2215 IOReturn
releaseNotificationPort(mach_port_t port)2216 IOUserClient::releaseNotificationPort(mach_port_t port)
2217 {
2218 if (MACH_PORT_NULL != port) {
2219 iokit_release_port_send(port);
2220 }
2221
2222 return kIOReturnSuccess;
2223 }
2224
2225 IOReturn
sendAsyncResult(OSAsyncReference reference,IOReturn result,void * args[],UInt32 numArgs)2226 IOUserClient::sendAsyncResult(OSAsyncReference reference,
2227 IOReturn result, void *args[], UInt32 numArgs)
2228 {
2229 OSAsyncReference64 reference64;
2230 OSBoundedArray<io_user_reference_t, kMaxAsyncArgs> args64;
2231 unsigned int idx;
2232
2233 if (numArgs > kMaxAsyncArgs) {
2234 return kIOReturnMessageTooLarge;
2235 }
2236
2237 for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2238 reference64[idx] = REF64(reference[idx]);
2239 }
2240
2241 for (idx = 0; idx < numArgs; idx++) {
2242 args64[idx] = REF64(args[idx]);
2243 }
2244
2245 return sendAsyncResult64(reference64, result, args64.data(), numArgs);
2246 }
2247
2248 IOReturn
sendAsyncResult64WithOptions(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2249 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2250 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2251 {
2252 return _sendAsyncResult64(reference, result, args, numArgs, options);
2253 }
2254
2255 IOReturn
sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs)2256 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2257 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2258 {
2259 return _sendAsyncResult64(reference, result, args, numArgs, 0);
2260 }
2261
2262 IOReturn
_sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2263 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2264 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2265 {
2266 struct ReplyMsg {
2267 mach_msg_header_t msgHdr;
2268 union{
2269 struct{
2270 OSNotificationHeader notifyHdr;
2271 IOAsyncCompletionContent asyncContent;
2272 uint32_t args[kMaxAsyncArgs];
2273 } msg32;
2274 struct{
2275 OSNotificationHeader64 notifyHdr;
2276 IOAsyncCompletionContent asyncContent;
2277 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
2278 } msg64;
2279 } m;
2280 };
2281 ReplyMsg replyMsg;
2282 mach_port_t replyPort;
2283 kern_return_t kr;
2284
2285 // If no reply port, do nothing.
2286 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2287 if (replyPort == MACH_PORT_NULL) {
2288 return kIOReturnSuccess;
2289 }
2290
2291 if (numArgs > kMaxAsyncArgs) {
2292 return kIOReturnMessageTooLarge;
2293 }
2294
2295 bzero(&replyMsg, sizeof(replyMsg));
2296 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2297 0 /*local*/);
2298 replyMsg.msgHdr.msgh_remote_port = replyPort;
2299 replyMsg.msgHdr.msgh_local_port = NULL;
2300 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
2301 if (kIOUCAsync64Flag & reference[0]) {
2302 replyMsg.msgHdr.msgh_size =
2303 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2304 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2305 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2306 + numArgs * sizeof(io_user_reference_t);
2307 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2308 /* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2309 bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2310
2311 replyMsg.m.msg64.asyncContent.result = result;
2312 if (numArgs) {
2313 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2314 }
2315 } else {
2316 unsigned int idx;
2317
2318 replyMsg.msgHdr.msgh_size =
2319 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2320 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2321
2322 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2323 + numArgs * sizeof(uint32_t);
2324 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2325
2326 /* Skip reference[0] which is left as 0 from the earlier bzero */
2327 for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2328 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2329 }
2330
2331 replyMsg.m.msg32.asyncContent.result = result;
2332
2333 for (idx = 0; idx < numArgs; idx++) {
2334 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2335 }
2336 }
2337
2338 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2339 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2340 replyMsg.msgHdr.msgh_size, MACH64_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2341 } else {
2342 /* Fail on full queue. */
2343 kr = mach_msg_send_from_kernel(&replyMsg.msgHdr,
2344 replyMsg.msgHdr.msgh_size);
2345 }
2346 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2347 reference[0] |= kIOUCAsyncErrorLoggedFlag;
2348 IOLog("%s: mach_msg_send_from_kernel(0x%x)\n", __PRETTY_FUNCTION__, kr );
2349 }
2350 return kr;
2351 }
2352
2353
2354 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2355
2356 extern "C" {
2357 #define CHECK(cls, obj, out) \
2358 cls * out; \
2359 if( !(out = OSDynamicCast( cls, obj))) \
2360 return( kIOReturnBadArgument )
2361
2362 #define CHECKLOCKED(cls, obj, out) \
2363 IOUserIterator * oIter; \
2364 cls * out; \
2365 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
2366 return (kIOReturnBadArgument); \
2367 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
2368 return (kIOReturnBadArgument)
2369
2370 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2371
2372 // Create a vm_map_copy_t or kalloc'ed data for memory
2373 // to be copied out. ipc will free after the copyout.
2374
2375 static kern_return_t
copyoutkdata(const void * data,vm_size_t len,io_buf_ptr_t * buf)2376 copyoutkdata( const void * data, vm_size_t len,
2377 io_buf_ptr_t * buf )
2378 {
2379 kern_return_t err;
2380 vm_map_copy_t copy;
2381
2382 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2383 false /* src_destroy */, ©);
2384
2385 assert( err == KERN_SUCCESS );
2386 if (err == KERN_SUCCESS) {
2387 *buf = (char *) copy;
2388 }
2389
2390 return err;
2391 }
2392
2393 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2394
2395 /* Routine io_server_version */
2396 kern_return_t
is_io_server_version(mach_port_t main_port,uint64_t * version)2397 is_io_server_version(
2398 mach_port_t main_port,
2399 uint64_t *version)
2400 {
2401 *version = IOKIT_SERVER_VERSION;
2402 return kIOReturnSuccess;
2403 }
2404
2405 /* Routine io_object_get_class */
2406 kern_return_t
is_io_object_get_class(io_object_t object,io_name_t className)2407 is_io_object_get_class(
2408 io_object_t object,
2409 io_name_t className )
2410 {
2411 const OSMetaClass* my_obj = NULL;
2412
2413 if (!object) {
2414 return kIOReturnBadArgument;
2415 }
2416
2417 my_obj = object->getMetaClass();
2418 if (!my_obj) {
2419 return kIOReturnNotFound;
2420 }
2421
2422 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2423
2424 return kIOReturnSuccess;
2425 }
2426
2427 /* Routine io_object_get_superclass */
2428 kern_return_t
is_io_object_get_superclass(mach_port_t main_port,io_name_t obj_name,io_name_t class_name)2429 is_io_object_get_superclass(
2430 mach_port_t main_port,
2431 io_name_t obj_name,
2432 io_name_t class_name)
2433 {
2434 IOReturn ret;
2435 const OSMetaClass * meta;
2436 const OSMetaClass * super;
2437 const OSSymbol * name;
2438 const char * cstr;
2439
2440 if (!obj_name || !class_name) {
2441 return kIOReturnBadArgument;
2442 }
2443 if (main_port != main_device_port) {
2444 return kIOReturnNotPrivileged;
2445 }
2446
2447 ret = kIOReturnNotFound;
2448 meta = NULL;
2449 do{
2450 name = OSSymbol::withCString(obj_name);
2451 if (!name) {
2452 break;
2453 }
2454 meta = OSMetaClass::copyMetaClassWithName(name);
2455 if (!meta) {
2456 break;
2457 }
2458 super = meta->getSuperClass();
2459 if (!super) {
2460 break;
2461 }
2462 cstr = super->getClassName();
2463 if (!cstr) {
2464 break;
2465 }
2466 strlcpy(class_name, cstr, sizeof(io_name_t));
2467 ret = kIOReturnSuccess;
2468 }while (false);
2469
2470 OSSafeReleaseNULL(name);
2471 if (meta) {
2472 meta->releaseMetaClass();
2473 }
2474
2475 return ret;
2476 }
2477
2478 /* Routine io_object_get_bundle_identifier */
2479 kern_return_t
is_io_object_get_bundle_identifier(mach_port_t main_port,io_name_t obj_name,io_name_t bundle_name)2480 is_io_object_get_bundle_identifier(
2481 mach_port_t main_port,
2482 io_name_t obj_name,
2483 io_name_t bundle_name)
2484 {
2485 IOReturn ret;
2486 const OSMetaClass * meta;
2487 const OSSymbol * name;
2488 const OSSymbol * identifier;
2489 const char * cstr;
2490
2491 if (!obj_name || !bundle_name) {
2492 return kIOReturnBadArgument;
2493 }
2494 if (main_port != main_device_port) {
2495 return kIOReturnNotPrivileged;
2496 }
2497
2498 ret = kIOReturnNotFound;
2499 meta = NULL;
2500 do{
2501 name = OSSymbol::withCString(obj_name);
2502 if (!name) {
2503 break;
2504 }
2505 meta = OSMetaClass::copyMetaClassWithName(name);
2506 if (!meta) {
2507 break;
2508 }
2509 identifier = meta->getKmodName();
2510 if (!identifier) {
2511 break;
2512 }
2513 cstr = identifier->getCStringNoCopy();
2514 if (!cstr) {
2515 break;
2516 }
2517 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2518 ret = kIOReturnSuccess;
2519 }while (false);
2520
2521 OSSafeReleaseNULL(name);
2522 if (meta) {
2523 meta->releaseMetaClass();
2524 }
2525
2526 return ret;
2527 }
2528
2529 /* Routine io_object_conforms_to */
2530 kern_return_t
is_io_object_conforms_to(io_object_t object,io_name_t className,boolean_t * conforms)2531 is_io_object_conforms_to(
2532 io_object_t object,
2533 io_name_t className,
2534 boolean_t *conforms )
2535 {
2536 if (!object) {
2537 return kIOReturnBadArgument;
2538 }
2539
2540 *conforms = (NULL != object->metaCast( className ));
2541
2542 return kIOReturnSuccess;
2543 }
2544
2545 /* Routine io_object_get_retain_count */
2546 kern_return_t
is_io_object_get_retain_count(io_object_t object,uint32_t * retainCount)2547 is_io_object_get_retain_count(
2548 io_object_t object,
2549 uint32_t *retainCount )
2550 {
2551 if (!object) {
2552 return kIOReturnBadArgument;
2553 }
2554
2555 *retainCount = object->getRetainCount();
2556 return kIOReturnSuccess;
2557 }
2558
2559 /* Routine io_iterator_next */
2560 kern_return_t
is_io_iterator_next(io_object_t iterator,io_object_t * object)2561 is_io_iterator_next(
2562 io_object_t iterator,
2563 io_object_t *object )
2564 {
2565 IOReturn ret;
2566 OSObject * obj;
2567 OSIterator * iter;
2568 IOUserIterator * uiter;
2569
2570 if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2571 obj = uiter->copyNextObject();
2572 } else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2573 obj = iter->getNextObject();
2574 if (obj) {
2575 obj->retain();
2576 }
2577 } else {
2578 return kIOReturnBadArgument;
2579 }
2580
2581 if (obj) {
2582 *object = obj;
2583 ret = kIOReturnSuccess;
2584 } else {
2585 ret = kIOReturnNoDevice;
2586 }
2587
2588 return ret;
2589 }
2590
2591 /* Routine io_iterator_reset */
2592 kern_return_t
is_io_iterator_reset(io_object_t iterator)2593 is_io_iterator_reset(
2594 io_object_t iterator )
2595 {
2596 CHECK( OSIterator, iterator, iter );
2597
2598 iter->reset();
2599
2600 return kIOReturnSuccess;
2601 }
2602
2603 /* Routine io_iterator_is_valid */
2604 kern_return_t
is_io_iterator_is_valid(io_object_t iterator,boolean_t * is_valid)2605 is_io_iterator_is_valid(
2606 io_object_t iterator,
2607 boolean_t *is_valid )
2608 {
2609 CHECK( OSIterator, iterator, iter );
2610
2611 *is_valid = iter->isValid();
2612
2613 return kIOReturnSuccess;
2614 }
2615
2616 static kern_return_t
internal_io_service_match_property_table(io_service_t _service,const char * matching,mach_msg_type_number_t matching_size,boolean_t * matches)2617 internal_io_service_match_property_table(
2618 io_service_t _service,
2619 const char * matching,
2620 mach_msg_type_number_t matching_size,
2621 boolean_t *matches)
2622 {
2623 CHECK( IOService, _service, service );
2624
2625 kern_return_t kr;
2626 OSObject * obj;
2627 OSDictionary * dict;
2628
2629 assert(matching_size);
2630
2631
2632 obj = OSUnserializeXML(matching, matching_size);
2633
2634 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2635 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2636 *matches = service->passiveMatch( dict );
2637 kr = kIOReturnSuccess;
2638 } else {
2639 kr = kIOReturnBadArgument;
2640 }
2641
2642 if (obj) {
2643 obj->release();
2644 }
2645
2646 return kr;
2647 }
2648
2649 /* Routine io_service_match_property_table */
2650 kern_return_t
is_io_service_match_property_table(io_service_t service,io_string_t matching,boolean_t * matches)2651 is_io_service_match_property_table(
2652 io_service_t service,
2653 io_string_t matching,
2654 boolean_t *matches )
2655 {
2656 return kIOReturnUnsupported;
2657 }
2658
2659
2660 /* Routine io_service_match_property_table_ool */
2661 kern_return_t
is_io_service_match_property_table_ool(io_object_t service,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,boolean_t * matches)2662 is_io_service_match_property_table_ool(
2663 io_object_t service,
2664 io_buf_ptr_t matching,
2665 mach_msg_type_number_t matchingCnt,
2666 kern_return_t *result,
2667 boolean_t *matches )
2668 {
2669 kern_return_t kr;
2670 vm_offset_t data;
2671 vm_map_offset_t map_data;
2672
2673 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2674 data = CAST_DOWN(vm_offset_t, map_data);
2675
2676 if (KERN_SUCCESS == kr) {
2677 // must return success after vm_map_copyout() succeeds
2678 *result = internal_io_service_match_property_table(service,
2679 (const char *)data, matchingCnt, matches );
2680 vm_deallocate( kernel_map, data, matchingCnt );
2681 }
2682
2683 return kr;
2684 }
2685
2686 /* Routine io_service_match_property_table_bin */
2687 kern_return_t
is_io_service_match_property_table_bin(io_object_t service,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,boolean_t * matches)2688 is_io_service_match_property_table_bin(
2689 io_object_t service,
2690 io_struct_inband_t matching,
2691 mach_msg_type_number_t matchingCnt,
2692 boolean_t *matches)
2693 {
2694 return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2695 }
2696
2697 static kern_return_t
internal_io_service_get_matching_services(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_iterator_t * existing)2698 internal_io_service_get_matching_services(
2699 mach_port_t main_port,
2700 const char * matching,
2701 mach_msg_type_number_t matching_size,
2702 io_iterator_t *existing )
2703 {
2704 kern_return_t kr;
2705 OSObject * obj;
2706 OSDictionary * dict;
2707
2708 if (main_port != main_device_port) {
2709 return kIOReturnNotPrivileged;
2710 }
2711
2712 assert(matching_size);
2713 obj = OSUnserializeXML(matching, matching_size);
2714
2715 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2716 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2717 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2718 kr = kIOReturnSuccess;
2719 } else {
2720 kr = kIOReturnBadArgument;
2721 }
2722
2723 if (obj) {
2724 obj->release();
2725 }
2726
2727 return kr;
2728 }
2729
2730 /* Routine io_service_get_matching_services */
2731 kern_return_t
is_io_service_get_matching_services(mach_port_t main_port,io_string_t matching,io_iterator_t * existing)2732 is_io_service_get_matching_services(
2733 mach_port_t main_port,
2734 io_string_t matching,
2735 io_iterator_t *existing )
2736 {
2737 return kIOReturnUnsupported;
2738 }
2739
2740 /* Routine io_service_get_matching_services_ool */
2741 kern_return_t
is_io_service_get_matching_services_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * existing)2742 is_io_service_get_matching_services_ool(
2743 mach_port_t main_port,
2744 io_buf_ptr_t matching,
2745 mach_msg_type_number_t matchingCnt,
2746 kern_return_t *result,
2747 io_object_t *existing )
2748 {
2749 kern_return_t kr;
2750 vm_offset_t data;
2751 vm_map_offset_t map_data;
2752
2753 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2754 data = CAST_DOWN(vm_offset_t, map_data);
2755
2756 if (KERN_SUCCESS == kr) {
2757 // must return success after vm_map_copyout() succeeds
2758 // and mig will copy out objects on success
2759 *existing = NULL;
2760 *result = internal_io_service_get_matching_services(main_port,
2761 (const char *) data, matchingCnt, existing);
2762 vm_deallocate( kernel_map, data, matchingCnt );
2763 }
2764
2765 return kr;
2766 }
2767
2768 /* Routine io_service_get_matching_services_bin */
2769 kern_return_t
is_io_service_get_matching_services_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * existing)2770 is_io_service_get_matching_services_bin(
2771 mach_port_t main_port,
2772 io_struct_inband_t matching,
2773 mach_msg_type_number_t matchingCnt,
2774 io_object_t *existing)
2775 {
2776 return internal_io_service_get_matching_services(main_port, matching, matchingCnt, existing);
2777 }
2778
2779
2780 static kern_return_t
internal_io_service_get_matching_service(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_service_t * service)2781 internal_io_service_get_matching_service(
2782 mach_port_t main_port,
2783 const char * matching,
2784 mach_msg_type_number_t matching_size,
2785 io_service_t *service )
2786 {
2787 kern_return_t kr;
2788 OSObject * obj;
2789 OSDictionary * dict;
2790
2791 if (main_port != main_device_port) {
2792 return kIOReturnNotPrivileged;
2793 }
2794
2795 assert(matching_size);
2796 obj = OSUnserializeXML(matching, matching_size);
2797
2798 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2799 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2800 *service = IOService::copyMatchingService( dict );
2801 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2802 } else {
2803 kr = kIOReturnBadArgument;
2804 }
2805
2806 if (obj) {
2807 obj->release();
2808 }
2809
2810 return kr;
2811 }
2812
2813 /* Routine io_service_get_matching_service */
2814 kern_return_t
is_io_service_get_matching_service(mach_port_t main_port,io_string_t matching,io_service_t * service)2815 is_io_service_get_matching_service(
2816 mach_port_t main_port,
2817 io_string_t matching,
2818 io_service_t *service )
2819 {
2820 return kIOReturnUnsupported;
2821 }
2822
2823 /* Routine io_service_get_matching_services_ool */
2824 kern_return_t
is_io_service_get_matching_service_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * service)2825 is_io_service_get_matching_service_ool(
2826 mach_port_t main_port,
2827 io_buf_ptr_t matching,
2828 mach_msg_type_number_t matchingCnt,
2829 kern_return_t *result,
2830 io_object_t *service )
2831 {
2832 kern_return_t kr;
2833 vm_offset_t data;
2834 vm_map_offset_t map_data;
2835
2836 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2837 data = CAST_DOWN(vm_offset_t, map_data);
2838
2839 if (KERN_SUCCESS == kr) {
2840 // must return success after vm_map_copyout() succeeds
2841 // and mig will copy out objects on success
2842 *service = NULL;
2843 *result = internal_io_service_get_matching_service(main_port,
2844 (const char *) data, matchingCnt, service );
2845 vm_deallocate( kernel_map, data, matchingCnt );
2846 }
2847
2848 return kr;
2849 }
2850
2851 /* Routine io_service_get_matching_service_bin */
2852 kern_return_t
is_io_service_get_matching_service_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * service)2853 is_io_service_get_matching_service_bin(
2854 mach_port_t main_port,
2855 io_struct_inband_t matching,
2856 mach_msg_type_number_t matchingCnt,
2857 io_object_t *service)
2858 {
2859 return internal_io_service_get_matching_service(main_port, matching, matchingCnt, service);
2860 }
2861
2862 static kern_return_t
internal_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,const char * matching,size_t matching_size,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)2863 internal_io_service_add_notification(
2864 mach_port_t main_port,
2865 io_name_t notification_type,
2866 const char * matching,
2867 size_t matching_size,
2868 mach_port_t port,
2869 void * reference,
2870 vm_size_t referenceSize,
2871 bool client64,
2872 io_object_t * notification )
2873 {
2874 IOServiceUserNotification * userNotify = NULL;
2875 IONotifier * notify = NULL;
2876 const OSSymbol * sym;
2877 OSObject * obj;
2878 OSDictionary * dict;
2879 IOReturn err;
2880 natural_t userMsgType;
2881
2882 if (main_port != main_device_port) {
2883 return kIOReturnNotPrivileged;
2884 }
2885
2886 do {
2887 err = kIOReturnNoResources;
2888
2889 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2890 return kIOReturnMessageTooLarge;
2891 }
2892
2893 if (!(sym = OSSymbol::withCString( notification_type ))) {
2894 err = kIOReturnNoResources;
2895 }
2896
2897 assert(matching_size);
2898 obj = OSUnserializeXML(matching, matching_size);
2899 dict = OSDynamicCast(OSDictionary, obj);
2900 if (!dict) {
2901 err = kIOReturnBadArgument;
2902 continue;
2903 }
2904 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2905
2906 if ((sym == gIOPublishNotification)
2907 || (sym == gIOFirstPublishNotification)) {
2908 userMsgType = kIOServicePublishNotificationType;
2909 } else if ((sym == gIOMatchedNotification)
2910 || (sym == gIOFirstMatchNotification)) {
2911 userMsgType = kIOServiceMatchedNotificationType;
2912 } else if ((sym == gIOTerminatedNotification)
2913 || (sym == gIOWillTerminateNotification)) {
2914 userMsgType = kIOServiceTerminatedNotificationType;
2915 } else {
2916 userMsgType = kLastIOKitNotificationType;
2917 }
2918
2919 userNotify = new IOServiceUserNotification;
2920
2921 if (userNotify && !userNotify->init( port, userMsgType,
2922 reference, referenceSize, client64)) {
2923 userNotify->release();
2924 userNotify = NULL;
2925 }
2926 if (!userNotify) {
2927 continue;
2928 }
2929
2930 notify = IOService::addMatchingNotification( sym, dict,
2931 &userNotify->_handler, userNotify );
2932 if (notify) {
2933 *notification = userNotify;
2934 userNotify->setNotification( notify );
2935 err = kIOReturnSuccess;
2936 } else {
2937 err = kIOReturnUnsupported;
2938 }
2939 } while (false);
2940
2941 if ((kIOReturnSuccess != err) && userNotify) {
2942 userNotify->setNotification(NULL);
2943 userNotify->invalidatePort();
2944 userNotify->release();
2945 userNotify = NULL;
2946 }
2947
2948 if (sym) {
2949 sym->release();
2950 }
2951 if (obj) {
2952 obj->release();
2953 }
2954
2955 return err;
2956 }
2957
2958
2959 /* Routine io_service_add_notification */
2960 kern_return_t
is_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2961 is_io_service_add_notification(
2962 mach_port_t main_port,
2963 io_name_t notification_type,
2964 io_string_t matching,
2965 mach_port_t port,
2966 io_async_ref_t reference,
2967 mach_msg_type_number_t referenceCnt,
2968 io_object_t * notification )
2969 {
2970 return kIOReturnUnsupported;
2971 }
2972
2973 /* Routine io_service_add_notification_64 */
2974 kern_return_t
is_io_service_add_notification_64(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2975 is_io_service_add_notification_64(
2976 mach_port_t main_port,
2977 io_name_t notification_type,
2978 io_string_t matching,
2979 mach_port_t wake_port,
2980 io_async_ref64_t reference,
2981 mach_msg_type_number_t referenceCnt,
2982 io_object_t *notification )
2983 {
2984 return kIOReturnUnsupported;
2985 }
2986
2987 /* Routine io_service_add_notification_bin */
2988 kern_return_t
is_io_service_add_notification_bin(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2989 is_io_service_add_notification_bin
2990 (
2991 mach_port_t main_port,
2992 io_name_t notification_type,
2993 io_struct_inband_t matching,
2994 mach_msg_type_number_t matchingCnt,
2995 mach_port_t wake_port,
2996 io_async_ref_t reference,
2997 mach_msg_type_number_t referenceCnt,
2998 io_object_t *notification)
2999 {
3000 io_async_ref_t zreference;
3001
3002 if (referenceCnt > ASYNC_REF_COUNT) {
3003 return kIOReturnBadArgument;
3004 }
3005 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3006 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3007
3008 return internal_io_service_add_notification(main_port, notification_type,
3009 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3010 false, notification);
3011 }
3012
3013 /* Routine io_service_add_notification_bin_64 */
3014 kern_return_t
is_io_service_add_notification_bin_64(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3015 is_io_service_add_notification_bin_64
3016 (
3017 mach_port_t main_port,
3018 io_name_t notification_type,
3019 io_struct_inband_t matching,
3020 mach_msg_type_number_t matchingCnt,
3021 mach_port_t wake_port,
3022 io_async_ref64_t reference,
3023 mach_msg_type_number_t referenceCnt,
3024 io_object_t *notification)
3025 {
3026 io_async_ref64_t zreference;
3027
3028 if (referenceCnt > ASYNC_REF64_COUNT) {
3029 return kIOReturnBadArgument;
3030 }
3031 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3032 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3033
3034 return internal_io_service_add_notification(main_port, notification_type,
3035 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3036 true, notification);
3037 }
3038
3039 static kern_return_t
internal_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,void * reference,vm_size_t referenceSize,bool client64,kern_return_t * result,io_object_t * notification)3040 internal_io_service_add_notification_ool(
3041 mach_port_t main_port,
3042 io_name_t notification_type,
3043 io_buf_ptr_t matching,
3044 mach_msg_type_number_t matchingCnt,
3045 mach_port_t wake_port,
3046 void * reference,
3047 vm_size_t referenceSize,
3048 bool client64,
3049 kern_return_t *result,
3050 io_object_t *notification )
3051 {
3052 kern_return_t kr;
3053 vm_offset_t data;
3054 vm_map_offset_t map_data;
3055
3056 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
3057 data = CAST_DOWN(vm_offset_t, map_data);
3058
3059 if (KERN_SUCCESS == kr) {
3060 // must return success after vm_map_copyout() succeeds
3061 // and mig will copy out objects on success
3062 *notification = NULL;
3063 *result = internal_io_service_add_notification( main_port, notification_type,
3064 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
3065 vm_deallocate( kernel_map, data, matchingCnt );
3066 }
3067
3068 return kr;
3069 }
3070
3071 /* Routine io_service_add_notification_ool */
3072 kern_return_t
is_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3073 is_io_service_add_notification_ool(
3074 mach_port_t main_port,
3075 io_name_t notification_type,
3076 io_buf_ptr_t matching,
3077 mach_msg_type_number_t matchingCnt,
3078 mach_port_t wake_port,
3079 io_async_ref_t reference,
3080 mach_msg_type_number_t referenceCnt,
3081 kern_return_t *result,
3082 io_object_t *notification )
3083 {
3084 io_async_ref_t zreference;
3085
3086 if (referenceCnt > ASYNC_REF_COUNT) {
3087 return kIOReturnBadArgument;
3088 }
3089 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3090 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3091
3092 return internal_io_service_add_notification_ool(main_port, notification_type,
3093 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3094 false, result, notification);
3095 }
3096
3097 /* Routine io_service_add_notification_ool_64 */
3098 kern_return_t
is_io_service_add_notification_ool_64(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3099 is_io_service_add_notification_ool_64(
3100 mach_port_t main_port,
3101 io_name_t notification_type,
3102 io_buf_ptr_t matching,
3103 mach_msg_type_number_t matchingCnt,
3104 mach_port_t wake_port,
3105 io_async_ref64_t reference,
3106 mach_msg_type_number_t referenceCnt,
3107 kern_return_t *result,
3108 io_object_t *notification )
3109 {
3110 io_async_ref64_t zreference;
3111
3112 if (referenceCnt > ASYNC_REF64_COUNT) {
3113 return kIOReturnBadArgument;
3114 }
3115 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3116 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3117
3118 return internal_io_service_add_notification_ool(main_port, notification_type,
3119 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3120 true, result, notification);
3121 }
3122
3123 /* Routine io_service_add_notification_old */
3124 kern_return_t
is_io_service_add_notification_old(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,natural_t ref,io_object_t * notification)3125 is_io_service_add_notification_old(
3126 mach_port_t main_port,
3127 io_name_t notification_type,
3128 io_string_t matching,
3129 mach_port_t port,
3130 // for binary compatibility reasons, this must be natural_t for ILP32
3131 natural_t ref,
3132 io_object_t * notification )
3133 {
3134 return is_io_service_add_notification( main_port, notification_type,
3135 matching, port, &ref, 1, notification );
3136 }
3137
3138
3139 static kern_return_t
internal_io_service_add_interest_notification(io_object_t _service,io_name_t type_of_interest,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)3140 internal_io_service_add_interest_notification(
3141 io_object_t _service,
3142 io_name_t type_of_interest,
3143 mach_port_t port,
3144 void * reference,
3145 vm_size_t referenceSize,
3146 bool client64,
3147 io_object_t * notification )
3148 {
3149 IOServiceMessageUserNotification * userNotify = NULL;
3150 IONotifier * notify = NULL;
3151 const OSSymbol * sym;
3152 IOReturn err;
3153
3154 CHECK( IOService, _service, service );
3155
3156 err = kIOReturnNoResources;
3157 if ((sym = OSSymbol::withCString( type_of_interest ))) {
3158 do {
3159 userNotify = new IOServiceMessageUserNotification;
3160
3161 if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
3162 reference, referenceSize, client64 )) {
3163 userNotify->release();
3164 userNotify = NULL;
3165 }
3166 if (!userNotify) {
3167 continue;
3168 }
3169
3170 notify = service->registerInterest( sym,
3171 &userNotify->_handler, userNotify );
3172 if (notify) {
3173 *notification = userNotify;
3174 userNotify->setNotification( notify );
3175 err = kIOReturnSuccess;
3176 } else {
3177 err = kIOReturnUnsupported;
3178 }
3179 } while (false);
3180
3181 sym->release();
3182 }
3183
3184 if ((kIOReturnSuccess != err) && userNotify) {
3185 userNotify->setNotification(NULL);
3186 userNotify->invalidatePort();
3187 userNotify->release();
3188 userNotify = NULL;
3189 }
3190
3191 return err;
3192 }
3193
3194 /* Routine io_service_add_message_notification */
3195 kern_return_t
is_io_service_add_interest_notification(io_object_t service,io_name_t type_of_interest,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3196 is_io_service_add_interest_notification(
3197 io_object_t service,
3198 io_name_t type_of_interest,
3199 mach_port_t port,
3200 io_async_ref_t reference,
3201 mach_msg_type_number_t referenceCnt,
3202 io_object_t * notification )
3203 {
3204 io_async_ref_t zreference;
3205
3206 if (referenceCnt > ASYNC_REF_COUNT) {
3207 return kIOReturnBadArgument;
3208 }
3209 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3210 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3211
3212 return internal_io_service_add_interest_notification(service, type_of_interest,
3213 port, &zreference[0], sizeof(io_async_ref_t), false, notification);
3214 }
3215
3216 /* Routine io_service_add_interest_notification_64 */
3217 kern_return_t
is_io_service_add_interest_notification_64(io_object_t service,io_name_t type_of_interest,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3218 is_io_service_add_interest_notification_64(
3219 io_object_t service,
3220 io_name_t type_of_interest,
3221 mach_port_t wake_port,
3222 io_async_ref64_t reference,
3223 mach_msg_type_number_t referenceCnt,
3224 io_object_t *notification )
3225 {
3226 io_async_ref64_t zreference;
3227
3228 if (referenceCnt > ASYNC_REF64_COUNT) {
3229 return kIOReturnBadArgument;
3230 }
3231 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3232 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3233
3234 return internal_io_service_add_interest_notification(service, type_of_interest,
3235 wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
3236 }
3237
3238
3239 /* Routine io_service_acknowledge_notification */
3240 kern_return_t
is_io_service_acknowledge_notification(io_object_t _service,natural_t notify_ref,natural_t response)3241 is_io_service_acknowledge_notification(
3242 io_object_t _service,
3243 natural_t notify_ref,
3244 natural_t response )
3245 {
3246 CHECK( IOService, _service, service );
3247
3248 return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3249 (IOOptionBits) response );
3250 }
3251
3252 /* Routine io_connect_get_semaphore */
3253 kern_return_t
is_io_connect_get_notification_semaphore(io_connect_t connection,natural_t notification_type,semaphore_t * semaphore)3254 is_io_connect_get_notification_semaphore(
3255 io_connect_t connection,
3256 natural_t notification_type,
3257 semaphore_t *semaphore )
3258 {
3259 IOReturn ret;
3260 CHECK( IOUserClient, connection, client );
3261
3262 IOStatisticsClientCall();
3263 client->ipcEnter(kIPCLockWrite);
3264 ret = client->getNotificationSemaphore((UInt32) notification_type,
3265 semaphore );
3266 client->ipcExit(kIPCLockWrite);
3267
3268 return ret;
3269 }
3270
3271 /* Routine io_registry_get_root_entry */
3272 kern_return_t
is_io_registry_get_root_entry(mach_port_t main_port,io_object_t * root)3273 is_io_registry_get_root_entry(
3274 mach_port_t main_port,
3275 io_object_t *root )
3276 {
3277 IORegistryEntry * entry;
3278
3279 if (main_port != main_device_port) {
3280 return kIOReturnNotPrivileged;
3281 }
3282
3283 entry = IORegistryEntry::getRegistryRoot();
3284 if (entry) {
3285 entry->retain();
3286 }
3287 *root = entry;
3288
3289 return kIOReturnSuccess;
3290 }
3291
3292 /* Routine io_registry_create_iterator */
3293 kern_return_t
is_io_registry_create_iterator(mach_port_t main_port,io_name_t plane,uint32_t options,io_object_t * iterator)3294 is_io_registry_create_iterator(
3295 mach_port_t main_port,
3296 io_name_t plane,
3297 uint32_t options,
3298 io_object_t *iterator )
3299 {
3300 if (main_port != main_device_port) {
3301 return kIOReturnNotPrivileged;
3302 }
3303
3304 *iterator = IOUserIterator::withIterator(
3305 IORegistryIterator::iterateOver(
3306 IORegistryEntry::getPlane( plane ), options ));
3307
3308 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3309 }
3310
3311 /* Routine io_registry_entry_create_iterator */
3312 kern_return_t
is_io_registry_entry_create_iterator(io_object_t registry_entry,io_name_t plane,uint32_t options,io_object_t * iterator)3313 is_io_registry_entry_create_iterator(
3314 io_object_t registry_entry,
3315 io_name_t plane,
3316 uint32_t options,
3317 io_object_t *iterator )
3318 {
3319 CHECK( IORegistryEntry, registry_entry, entry );
3320
3321 *iterator = IOUserIterator::withIterator(
3322 IORegistryIterator::iterateOver( entry,
3323 IORegistryEntry::getPlane( plane ), options ));
3324
3325 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3326 }
3327
3328 /* Routine io_registry_iterator_enter */
3329 kern_return_t
is_io_registry_iterator_enter_entry(io_object_t iterator)3330 is_io_registry_iterator_enter_entry(
3331 io_object_t iterator )
3332 {
3333 CHECKLOCKED( IORegistryIterator, iterator, iter );
3334
3335 IOLockLock(&oIter->lock);
3336 iter->enterEntry();
3337 IOLockUnlock(&oIter->lock);
3338
3339 return kIOReturnSuccess;
3340 }
3341
3342 /* Routine io_registry_iterator_exit */
3343 kern_return_t
is_io_registry_iterator_exit_entry(io_object_t iterator)3344 is_io_registry_iterator_exit_entry(
3345 io_object_t iterator )
3346 {
3347 bool didIt;
3348
3349 CHECKLOCKED( IORegistryIterator, iterator, iter );
3350
3351 IOLockLock(&oIter->lock);
3352 didIt = iter->exitEntry();
3353 IOLockUnlock(&oIter->lock);
3354
3355 return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3356 }
3357
3358 /* Routine io_registry_entry_from_path */
3359 kern_return_t
is_io_registry_entry_from_path(mach_port_t main_port,io_string_t path,io_object_t * registry_entry)3360 is_io_registry_entry_from_path(
3361 mach_port_t main_port,
3362 io_string_t path,
3363 io_object_t *registry_entry )
3364 {
3365 IORegistryEntry * entry;
3366
3367 if (main_port != main_device_port) {
3368 return kIOReturnNotPrivileged;
3369 }
3370
3371 entry = IORegistryEntry::fromPath( path );
3372
3373 if (!entry && IOTaskRegistryCompatibility(current_task())) {
3374 OSDictionary * matching;
3375 const OSObject * objects[2] = { kOSBooleanTrue, NULL };
3376 const OSSymbol * keys[2] = { gIOCompatibilityMatchKey, gIOPathMatchKey };
3377
3378 objects[1] = OSString::withCStringNoCopy(path);
3379 matching = OSDictionary::withObjects(objects, keys, 2, 2);
3380 if (matching) {
3381 entry = IOService::copyMatchingService(matching);
3382 }
3383 OSSafeReleaseNULL(matching);
3384 OSSafeReleaseNULL(objects[1]);
3385 }
3386
3387 *registry_entry = entry;
3388
3389 return kIOReturnSuccess;
3390 }
3391
3392
3393 /* Routine io_registry_entry_from_path */
3394 kern_return_t
is_io_registry_entry_from_path_ool(mach_port_t main_port,io_string_inband_t path,io_buf_ptr_t path_ool,mach_msg_type_number_t path_oolCnt,kern_return_t * result,io_object_t * registry_entry)3395 is_io_registry_entry_from_path_ool(
3396 mach_port_t main_port,
3397 io_string_inband_t path,
3398 io_buf_ptr_t path_ool,
3399 mach_msg_type_number_t path_oolCnt,
3400 kern_return_t *result,
3401 io_object_t *registry_entry)
3402 {
3403 IORegistryEntry * entry;
3404 vm_map_offset_t map_data;
3405 const char * cpath;
3406 IOReturn res;
3407 kern_return_t err;
3408
3409 if (main_port != main_device_port) {
3410 return kIOReturnNotPrivileged;
3411 }
3412
3413 map_data = 0;
3414 entry = NULL;
3415 res = err = KERN_SUCCESS;
3416 if (path[0]) {
3417 cpath = path;
3418 } else {
3419 if (!path_oolCnt) {
3420 return kIOReturnBadArgument;
3421 }
3422 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3423 return kIOReturnMessageTooLarge;
3424 }
3425
3426 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3427 if (KERN_SUCCESS == err) {
3428 // must return success to mig after vm_map_copyout() succeeds, so result is actual
3429 cpath = CAST_DOWN(const char *, map_data);
3430 if (cpath[path_oolCnt - 1]) {
3431 res = kIOReturnBadArgument;
3432 }
3433 }
3434 }
3435
3436 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3437 entry = IORegistryEntry::fromPath(cpath);
3438 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3439 }
3440
3441 if (map_data) {
3442 vm_deallocate(kernel_map, map_data, path_oolCnt);
3443 }
3444
3445 if (KERN_SUCCESS != err) {
3446 res = err;
3447 }
3448 *registry_entry = entry;
3449 *result = res;
3450
3451 return err;
3452 }
3453
3454
3455 /* Routine io_registry_entry_in_plane */
3456 kern_return_t
is_io_registry_entry_in_plane(io_object_t registry_entry,io_name_t plane,boolean_t * inPlane)3457 is_io_registry_entry_in_plane(
3458 io_object_t registry_entry,
3459 io_name_t plane,
3460 boolean_t *inPlane )
3461 {
3462 CHECK( IORegistryEntry, registry_entry, entry );
3463
3464 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3465
3466 return kIOReturnSuccess;
3467 }
3468
3469
3470 /* Routine io_registry_entry_get_path */
3471 kern_return_t
is_io_registry_entry_get_path(io_object_t registry_entry,io_name_t plane,io_string_t path)3472 is_io_registry_entry_get_path(
3473 io_object_t registry_entry,
3474 io_name_t plane,
3475 io_string_t path )
3476 {
3477 int length;
3478 CHECK( IORegistryEntry, registry_entry, entry );
3479
3480 length = sizeof(io_string_t);
3481 if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3482 return kIOReturnSuccess;
3483 } else {
3484 return kIOReturnBadArgument;
3485 }
3486 }
3487
3488 /* Routine io_registry_entry_get_path */
3489 kern_return_t
is_io_registry_entry_get_path_ool(io_object_t registry_entry,io_name_t plane,io_string_inband_t path,io_buf_ptr_t * path_ool,mach_msg_type_number_t * path_oolCnt)3490 is_io_registry_entry_get_path_ool(
3491 io_object_t registry_entry,
3492 io_name_t plane,
3493 io_string_inband_t path,
3494 io_buf_ptr_t *path_ool,
3495 mach_msg_type_number_t *path_oolCnt)
3496 {
3497 enum { kMaxPath = 16384 };
3498 IOReturn err;
3499 int length;
3500 char * buf;
3501
3502 CHECK( IORegistryEntry, registry_entry, entry );
3503
3504 *path_ool = NULL;
3505 *path_oolCnt = 0;
3506 length = sizeof(io_string_inband_t);
3507 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3508 err = kIOReturnSuccess;
3509 } else {
3510 length = kMaxPath;
3511 buf = IONewData(char, length);
3512 if (!buf) {
3513 err = kIOReturnNoMemory;
3514 } else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3515 err = kIOReturnError;
3516 } else {
3517 *path_oolCnt = length;
3518 err = copyoutkdata(buf, length, path_ool);
3519 }
3520 if (buf) {
3521 IODeleteData(buf, char, kMaxPath);
3522 }
3523 }
3524
3525 return err;
3526 }
3527
3528
3529 /* Routine io_registry_entry_get_name */
3530 kern_return_t
is_io_registry_entry_get_name(io_object_t registry_entry,io_name_t name)3531 is_io_registry_entry_get_name(
3532 io_object_t registry_entry,
3533 io_name_t name )
3534 {
3535 CHECK( IORegistryEntry, registry_entry, entry );
3536
3537 strncpy( name, entry->getName(), sizeof(io_name_t));
3538
3539 return kIOReturnSuccess;
3540 }
3541
3542 /* Routine io_registry_entry_get_name_in_plane */
3543 kern_return_t
is_io_registry_entry_get_name_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t name)3544 is_io_registry_entry_get_name_in_plane(
3545 io_object_t registry_entry,
3546 io_name_t planeName,
3547 io_name_t name )
3548 {
3549 const IORegistryPlane * plane;
3550 CHECK( IORegistryEntry, registry_entry, entry );
3551
3552 if (planeName[0]) {
3553 plane = IORegistryEntry::getPlane( planeName );
3554 } else {
3555 plane = NULL;
3556 }
3557
3558 strncpy( name, entry->getName( plane), sizeof(io_name_t));
3559
3560 return kIOReturnSuccess;
3561 }
3562
3563 /* Routine io_registry_entry_get_location_in_plane */
3564 kern_return_t
is_io_registry_entry_get_location_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t location)3565 is_io_registry_entry_get_location_in_plane(
3566 io_object_t registry_entry,
3567 io_name_t planeName,
3568 io_name_t location )
3569 {
3570 const IORegistryPlane * plane;
3571 CHECK( IORegistryEntry, registry_entry, entry );
3572
3573 if (planeName[0]) {
3574 plane = IORegistryEntry::getPlane( planeName );
3575 } else {
3576 plane = NULL;
3577 }
3578
3579 const char * cstr = entry->getLocation( plane );
3580
3581 if (cstr) {
3582 strncpy( location, cstr, sizeof(io_name_t));
3583 return kIOReturnSuccess;
3584 } else {
3585 return kIOReturnNotFound;
3586 }
3587 }
3588
3589 /* Routine io_registry_entry_get_registry_entry_id */
3590 kern_return_t
is_io_registry_entry_get_registry_entry_id(io_object_t registry_entry,uint64_t * entry_id)3591 is_io_registry_entry_get_registry_entry_id(
3592 io_object_t registry_entry,
3593 uint64_t *entry_id )
3594 {
3595 CHECK( IORegistryEntry, registry_entry, entry );
3596
3597 *entry_id = entry->getRegistryEntryID();
3598
3599 return kIOReturnSuccess;
3600 }
3601
3602
3603 static OSObject *
IOCopyPropertyCompatible(IORegistryEntry * regEntry,const char * name)3604 IOCopyPropertyCompatible(IORegistryEntry * regEntry, const char * name)
3605 {
3606 OSObject * obj;
3607 OSObject * compatProperties;
3608 OSDictionary * props;
3609
3610 obj = regEntry->copyProperty(name);
3611 if (obj) {
3612 return obj;
3613 }
3614
3615 compatProperties = regEntry->copyProperty(gIOUserServicePropertiesKey);
3616 if (!compatProperties
3617 && IOTaskRegistryCompatibility(current_task())) {
3618 compatProperties = regEntry->copyProperty(gIOCompatibilityPropertiesKey);
3619 }
3620 if (compatProperties) {
3621 props = OSDynamicCast(OSDictionary, compatProperties);
3622 if (props) {
3623 obj = props->getObject(name);
3624 if (obj) {
3625 obj->retain();
3626 }
3627 }
3628 compatProperties->release();
3629 }
3630
3631 return obj;
3632 }
3633
3634 /* Routine io_registry_entry_get_property */
3635 kern_return_t
is_io_registry_entry_get_property_bytes(io_object_t registry_entry,io_name_t property_name,io_struct_inband_t buf,mach_msg_type_number_t * dataCnt)3636 is_io_registry_entry_get_property_bytes(
3637 io_object_t registry_entry,
3638 io_name_t property_name,
3639 io_struct_inband_t buf,
3640 mach_msg_type_number_t *dataCnt )
3641 {
3642 OSObject * obj;
3643 OSData * data;
3644 OSString * str;
3645 OSBoolean * boo;
3646 OSNumber * off;
3647 UInt64 offsetBytes;
3648 unsigned int len = 0;
3649 const void * bytes = NULL;
3650 IOReturn ret = kIOReturnSuccess;
3651
3652 CHECK( IORegistryEntry, registry_entry, entry );
3653
3654 #if CONFIG_MACF
3655 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3656 return kIOReturnNotPermitted;
3657 }
3658 #endif
3659
3660 obj = IOCopyPropertyCompatible(entry, property_name);
3661 if (!obj) {
3662 return kIOReturnNoResources;
3663 }
3664
3665 // One day OSData will be a common container base class
3666 // until then...
3667 if ((data = OSDynamicCast( OSData, obj ))) {
3668 len = data->getLength();
3669 bytes = data->getBytesNoCopy();
3670 if (!data->isSerializable()) {
3671 len = 0;
3672 }
3673 } else if ((str = OSDynamicCast( OSString, obj ))) {
3674 len = str->getLength() + 1;
3675 bytes = str->getCStringNoCopy();
3676 } else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3677 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3678 bytes = boo->isTrue() ? "Yes" : "No";
3679 } else if ((off = OSDynamicCast( OSNumber, obj ))) {
3680 offsetBytes = off->unsigned64BitValue();
3681 len = off->numberOfBytes();
3682 if (len > sizeof(offsetBytes)) {
3683 len = sizeof(offsetBytes);
3684 }
3685 bytes = &offsetBytes;
3686 #ifdef __BIG_ENDIAN__
3687 bytes = (const void *)
3688 (((UInt32) bytes) + (sizeof(UInt64) - len));
3689 #endif
3690 } else {
3691 ret = kIOReturnBadArgument;
3692 }
3693
3694 if (bytes) {
3695 if (*dataCnt < len) {
3696 ret = kIOReturnIPCError;
3697 } else {
3698 *dataCnt = len;
3699 bcopy( bytes, buf, len );
3700 }
3701 }
3702 obj->release();
3703
3704 return ret;
3705 }
3706
3707
3708 /* Routine io_registry_entry_get_property */
3709 kern_return_t
is_io_registry_entry_get_property(io_object_t registry_entry,io_name_t property_name,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3710 is_io_registry_entry_get_property(
3711 io_object_t registry_entry,
3712 io_name_t property_name,
3713 io_buf_ptr_t *properties,
3714 mach_msg_type_number_t *propertiesCnt )
3715 {
3716 kern_return_t err;
3717 unsigned int len;
3718 OSObject * obj;
3719
3720 CHECK( IORegistryEntry, registry_entry, entry );
3721
3722 #if CONFIG_MACF
3723 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3724 return kIOReturnNotPermitted;
3725 }
3726 #endif
3727
3728 obj = IOCopyPropertyCompatible(entry, property_name);
3729 if (!obj) {
3730 return kIOReturnNotFound;
3731 }
3732
3733 OSSerialize * s = OSSerialize::withCapacity(4096);
3734 if (!s) {
3735 obj->release();
3736 return kIOReturnNoMemory;
3737 }
3738
3739 if (obj->serialize( s )) {
3740 len = s->getLength();
3741 *propertiesCnt = len;
3742 err = copyoutkdata( s->text(), len, properties );
3743 } else {
3744 err = kIOReturnUnsupported;
3745 }
3746
3747 s->release();
3748 obj->release();
3749
3750 return err;
3751 }
3752
3753 /* Routine io_registry_entry_get_property_recursively */
3754 kern_return_t
is_io_registry_entry_get_property_recursively(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3755 is_io_registry_entry_get_property_recursively(
3756 io_object_t registry_entry,
3757 io_name_t plane,
3758 io_name_t property_name,
3759 uint32_t options,
3760 io_buf_ptr_t *properties,
3761 mach_msg_type_number_t *propertiesCnt )
3762 {
3763 kern_return_t err;
3764 unsigned int len;
3765 OSObject * obj;
3766
3767 CHECK( IORegistryEntry, registry_entry, entry );
3768
3769 #if CONFIG_MACF
3770 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3771 return kIOReturnNotPermitted;
3772 }
3773 #endif
3774
3775 obj = entry->copyProperty( property_name,
3776 IORegistryEntry::getPlane( plane ), options );
3777 if (!obj) {
3778 return kIOReturnNotFound;
3779 }
3780
3781 OSSerialize * s = OSSerialize::withCapacity(4096);
3782 if (!s) {
3783 obj->release();
3784 return kIOReturnNoMemory;
3785 }
3786
3787 if (obj->serialize( s )) {
3788 len = s->getLength();
3789 *propertiesCnt = len;
3790 err = copyoutkdata( s->text(), len, properties );
3791 } else {
3792 err = kIOReturnUnsupported;
3793 }
3794
3795 s->release();
3796 obj->release();
3797
3798 return err;
3799 }
3800
3801 /* Routine io_registry_entry_get_properties */
3802 kern_return_t
is_io_registry_entry_get_properties(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3803 is_io_registry_entry_get_properties(
3804 io_object_t registry_entry,
3805 io_buf_ptr_t *properties,
3806 mach_msg_type_number_t *propertiesCnt )
3807 {
3808 return kIOReturnUnsupported;
3809 }
3810
3811 #if CONFIG_MACF
3812
3813 struct GetPropertiesEditorRef {
3814 kauth_cred_t cred;
3815 IORegistryEntry * entry;
3816 OSCollection * root;
3817 };
3818
3819 static const LIBKERN_RETURNS_RETAINED OSMetaClassBase *
GetPropertiesEditor(void * reference,OSSerialize * s,OSCollection * container,const OSSymbol * name,const OSMetaClassBase * value)3820 GetPropertiesEditor(void * reference,
3821 OSSerialize * s,
3822 OSCollection * container,
3823 const OSSymbol * name,
3824 const OSMetaClassBase * value)
3825 {
3826 GetPropertiesEditorRef * ref = (typeof(ref))reference;
3827
3828 if (!ref->root) {
3829 ref->root = container;
3830 }
3831 if (ref->root == container) {
3832 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3833 value = NULL;
3834 }
3835 }
3836 if (value) {
3837 value->retain();
3838 }
3839 return value;
3840 }
3841
3842 #endif /* CONFIG_MACF */
3843
3844 /* Routine io_registry_entry_get_properties_bin_buf */
3845 kern_return_t
is_io_registry_entry_get_properties_bin_buf(io_object_t registry_entry,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3846 is_io_registry_entry_get_properties_bin_buf(
3847 io_object_t registry_entry,
3848 mach_vm_address_t buf,
3849 mach_vm_size_t *bufsize,
3850 io_buf_ptr_t *properties,
3851 mach_msg_type_number_t *propertiesCnt)
3852 {
3853 kern_return_t err = kIOReturnSuccess;
3854 unsigned int len;
3855 OSObject * compatProperties;
3856 OSSerialize * s;
3857 OSSerialize::Editor editor = NULL;
3858 void * editRef = NULL;
3859
3860 CHECK(IORegistryEntry, registry_entry, entry);
3861
3862 #if CONFIG_MACF
3863 GetPropertiesEditorRef ref;
3864 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3865 editor = &GetPropertiesEditor;
3866 editRef = &ref;
3867 ref.cred = kauth_cred_get();
3868 ref.entry = entry;
3869 ref.root = NULL;
3870 }
3871 #endif
3872
3873 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3874 if (!s) {
3875 return kIOReturnNoMemory;
3876 }
3877
3878
3879 compatProperties = entry->copyProperty(gIOUserServicePropertiesKey);
3880 if (!compatProperties
3881 && IOTaskRegistryCompatibility(current_task())) {
3882 compatProperties = entry->copyProperty(gIOCompatibilityPropertiesKey);
3883 }
3884
3885 if (compatProperties) {
3886 OSDictionary * dict;
3887
3888 dict = entry->dictionaryWithProperties();
3889 if (!dict) {
3890 err = kIOReturnNoMemory;
3891 } else {
3892 dict->removeObject(gIOUserServicePropertiesKey);
3893 dict->removeObject(gIOCompatibilityPropertiesKey);
3894 dict->merge(OSDynamicCast(OSDictionary, compatProperties));
3895 if (!dict->serialize(s)) {
3896 err = kIOReturnUnsupported;
3897 }
3898 dict->release();
3899 }
3900 compatProperties->release();
3901 } else if (!entry->serializeProperties(s)) {
3902 err = kIOReturnUnsupported;
3903 }
3904
3905 if (kIOReturnSuccess == err) {
3906 len = s->getLength();
3907 if (buf && bufsize && len <= *bufsize) {
3908 *bufsize = len;
3909 *propertiesCnt = 0;
3910 *properties = nullptr;
3911 if (copyout(s->text(), buf, len)) {
3912 err = kIOReturnVMError;
3913 } else {
3914 err = kIOReturnSuccess;
3915 }
3916 } else {
3917 if (bufsize) {
3918 *bufsize = 0;
3919 }
3920 *propertiesCnt = len;
3921 err = copyoutkdata( s->text(), len, properties );
3922 }
3923 }
3924 s->release();
3925
3926 return err;
3927 }
3928
3929 /* Routine io_registry_entry_get_properties_bin */
3930 kern_return_t
is_io_registry_entry_get_properties_bin(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3931 is_io_registry_entry_get_properties_bin(
3932 io_object_t registry_entry,
3933 io_buf_ptr_t *properties,
3934 mach_msg_type_number_t *propertiesCnt)
3935 {
3936 return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3937 0, NULL, properties, propertiesCnt);
3938 }
3939
3940 /* Routine io_registry_entry_get_property_bin_buf */
3941 kern_return_t
is_io_registry_entry_get_property_bin_buf(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3942 is_io_registry_entry_get_property_bin_buf(
3943 io_object_t registry_entry,
3944 io_name_t plane,
3945 io_name_t property_name,
3946 uint32_t options,
3947 mach_vm_address_t buf,
3948 mach_vm_size_t *bufsize,
3949 io_buf_ptr_t *properties,
3950 mach_msg_type_number_t *propertiesCnt )
3951 {
3952 kern_return_t err;
3953 unsigned int len;
3954 OSObject * obj;
3955 const OSSymbol * sym;
3956
3957 CHECK( IORegistryEntry, registry_entry, entry );
3958
3959 #if CONFIG_MACF
3960 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3961 return kIOReturnNotPermitted;
3962 }
3963 #endif
3964
3965 sym = OSSymbol::withCString(property_name);
3966 if (!sym) {
3967 return kIOReturnNoMemory;
3968 }
3969
3970 err = kIOReturnNotFound;
3971 if (gIORegistryEntryPropertyKeysKey == sym) {
3972 obj = entry->copyPropertyKeys();
3973 } else {
3974 if ((kIORegistryIterateRecursively & options) && plane[0]) {
3975 obj = IOCopyPropertyCompatible(entry, property_name);
3976 if (obj == NULL) {
3977 IORegistryIterator * iter = IORegistryIterator::iterateOver(entry, IORegistryEntry::getPlane(plane), options);
3978 if (iter) {
3979 while ((NULL == obj) && (entry = iter->getNextObject())) {
3980 OSObject * currentObj = IOCopyPropertyCompatible(entry, property_name);
3981 #if CONFIG_MACF
3982 if (currentObj != NULL && 0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3983 // Record that MAC hook blocked this entry and property, and continue to next entry
3984 err = kIOReturnNotPermitted;
3985 OSSafeReleaseNULL(currentObj);
3986 continue;
3987 }
3988 #endif
3989 obj = currentObj;
3990 }
3991 iter->release();
3992 }
3993 }
3994 } else {
3995 obj = IOCopyPropertyCompatible(entry, property_name);
3996 }
3997 if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3998 entry->removeProperty(sym);
3999 }
4000 }
4001
4002 sym->release();
4003 if (!obj) {
4004 return err;
4005 }
4006
4007 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
4008 if (!s) {
4009 obj->release();
4010 return kIOReturnNoMemory;
4011 }
4012
4013 if (obj->serialize( s )) {
4014 len = s->getLength();
4015 if (buf && bufsize && len <= *bufsize) {
4016 *bufsize = len;
4017 *propertiesCnt = 0;
4018 *properties = nullptr;
4019 if (copyout(s->text(), buf, len)) {
4020 err = kIOReturnVMError;
4021 } else {
4022 err = kIOReturnSuccess;
4023 }
4024 } else {
4025 if (bufsize) {
4026 *bufsize = 0;
4027 }
4028 *propertiesCnt = len;
4029 err = copyoutkdata( s->text(), len, properties );
4030 }
4031 } else {
4032 err = kIOReturnUnsupported;
4033 }
4034
4035 s->release();
4036 obj->release();
4037
4038 return err;
4039 }
4040
4041 /* Routine io_registry_entry_get_property_bin */
4042 kern_return_t
is_io_registry_entry_get_property_bin(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)4043 is_io_registry_entry_get_property_bin(
4044 io_object_t registry_entry,
4045 io_name_t plane,
4046 io_name_t property_name,
4047 uint32_t options,
4048 io_buf_ptr_t *properties,
4049 mach_msg_type_number_t *propertiesCnt )
4050 {
4051 return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
4052 property_name, options, 0, NULL, properties, propertiesCnt);
4053 }
4054
4055
4056 /* Routine io_registry_entry_set_properties */
4057 kern_return_t
is_io_registry_entry_set_properties(io_object_t registry_entry,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4058 is_io_registry_entry_set_properties
4059 (
4060 io_object_t registry_entry,
4061 io_buf_ptr_t properties,
4062 mach_msg_type_number_t propertiesCnt,
4063 kern_return_t * result)
4064 {
4065 OSObject * obj;
4066 kern_return_t err;
4067 IOReturn res;
4068 vm_offset_t data;
4069 vm_map_offset_t map_data;
4070
4071 CHECK( IORegistryEntry, registry_entry, entry );
4072
4073 if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
4074 return kIOReturnMessageTooLarge;
4075 }
4076
4077 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4078 data = CAST_DOWN(vm_offset_t, map_data);
4079
4080 if (KERN_SUCCESS == err) {
4081 FAKE_STACK_FRAME(entry->getMetaClass());
4082
4083 // must return success after vm_map_copyout() succeeds
4084 obj = OSUnserializeXML((const char *) data, propertiesCnt );
4085 vm_deallocate( kernel_map, data, propertiesCnt );
4086
4087 if (!obj) {
4088 res = kIOReturnBadArgument;
4089 }
4090 #if CONFIG_MACF
4091 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
4092 registry_entry, obj)) {
4093 res = kIOReturnNotPermitted;
4094 }
4095 #endif
4096 else {
4097 IOService * service = OSDynamicCast(IOService, entry);
4098 OSDictionary * props = OSDynamicCast(OSDictionary, obj);
4099 OSObject * allowable = entry->copyProperty(gIORegistryEntryAllowableSetPropertiesKey);
4100 OSArray * allowableArray;
4101
4102 if (!allowable) {
4103 res = kIOReturnSuccess;
4104 } else {
4105 if (!props) {
4106 res = kIOReturnNotPermitted;
4107 } else if (!(allowableArray = OSDynamicCast(OSArray, allowable))) {
4108 res = kIOReturnNotPermitted;
4109 } else {
4110 bool allFound __block, found __block;
4111
4112 allFound = true;
4113 props->iterateObjects(^(const OSSymbol * key, OSObject * value) {
4114 found = false;
4115 for (unsigned int idx = 0; !found; idx++) {
4116 OSObject * next = allowableArray->getObject(idx);
4117 if (!next) {
4118 break;
4119 }
4120 found = next->isEqualTo(key);
4121 }
4122 allFound &= found;
4123 if (!found) {
4124 IOLog("IORegistryEntrySetProperties(%s, %s) disallowed due to " kIORegistryEntryAllowableSetPropertiesKey "\n",
4125 entry->getName(), key->getCStringNoCopy());
4126 }
4127 return !allFound;
4128 });
4129 res = allFound ? kIOReturnSuccess : kIOReturnBadArgument;
4130 }
4131 }
4132 if (kIOReturnSuccess == res) {
4133 IOUserClient *
4134 client = OSDynamicCast(IOUserClient, entry);
4135
4136 if (client && client->defaultLockingSetProperties) {
4137 IORWLockWrite(&client->lock);
4138 }
4139
4140 if (!client && (kOSBooleanTrue == entry->getProperty(gIORegistryEntryDefaultLockingSetPropertiesKey))) {
4141 res = entry->runPropertyActionBlock(^IOReturn (void) {
4142 return entry->setProperties( obj );
4143 });
4144 } else {
4145 res = entry->setProperties( obj );
4146 }
4147
4148 if (client && client->defaultLockingSetProperties) {
4149 IORWLockUnlock(&client->lock);
4150 }
4151 if (service && props && service->hasUserServer()) {
4152 res = service->UserSetProperties(props);
4153 }
4154 }
4155 OSSafeReleaseNULL(allowable);
4156 }
4157 if (obj) {
4158 obj->release();
4159 }
4160
4161 FAKE_STACK_FRAME_END();
4162 } else {
4163 res = err;
4164 }
4165
4166 *result = res;
4167 return err;
4168 }
4169
4170 /* Routine io_registry_entry_get_child_iterator */
4171 kern_return_t
is_io_registry_entry_get_child_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4172 is_io_registry_entry_get_child_iterator(
4173 io_object_t registry_entry,
4174 io_name_t plane,
4175 io_object_t *iterator )
4176 {
4177 CHECK( IORegistryEntry, registry_entry, entry );
4178
4179 *iterator = IOUserIterator::withIterator(entry->getChildIterator(
4180 IORegistryEntry::getPlane( plane )));
4181
4182 return kIOReturnSuccess;
4183 }
4184
4185 /* Routine io_registry_entry_get_parent_iterator */
4186 kern_return_t
is_io_registry_entry_get_parent_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4187 is_io_registry_entry_get_parent_iterator(
4188 io_object_t registry_entry,
4189 io_name_t plane,
4190 io_object_t *iterator)
4191 {
4192 CHECK( IORegistryEntry, registry_entry, entry );
4193
4194 *iterator = IOUserIterator::withIterator(entry->getParentIterator(
4195 IORegistryEntry::getPlane( plane )));
4196
4197 return kIOReturnSuccess;
4198 }
4199
4200 /* Routine io_service_get_busy_state */
4201 kern_return_t
is_io_service_get_busy_state(io_object_t _service,uint32_t * busyState)4202 is_io_service_get_busy_state(
4203 io_object_t _service,
4204 uint32_t *busyState )
4205 {
4206 CHECK( IOService, _service, service );
4207
4208 *busyState = service->getBusyState();
4209
4210 return kIOReturnSuccess;
4211 }
4212
4213 /* Routine io_service_get_state */
4214 kern_return_t
is_io_service_get_state(io_object_t _service,uint64_t * state,uint32_t * busy_state,uint64_t * accumulated_busy_time)4215 is_io_service_get_state(
4216 io_object_t _service,
4217 uint64_t *state,
4218 uint32_t *busy_state,
4219 uint64_t *accumulated_busy_time )
4220 {
4221 CHECK( IOService, _service, service );
4222
4223 *state = service->getState();
4224 *busy_state = service->getBusyState();
4225 *accumulated_busy_time = service->getAccumulatedBusyTime();
4226
4227 return kIOReturnSuccess;
4228 }
4229
4230 /* Routine io_service_wait_quiet */
4231 kern_return_t
is_io_service_wait_quiet(io_object_t _service,mach_timespec_t wait_time)4232 is_io_service_wait_quiet(
4233 io_object_t _service,
4234 mach_timespec_t wait_time )
4235 {
4236 uint64_t timeoutNS;
4237
4238 CHECK( IOService, _service, service );
4239
4240 timeoutNS = wait_time.tv_sec;
4241 timeoutNS *= kSecondScale;
4242 timeoutNS += wait_time.tv_nsec;
4243
4244 return service->waitQuiet(timeoutNS);
4245 }
4246
4247 /* Routine io_service_wait_quiet_with_options */
4248 kern_return_t
is_io_service_wait_quiet_with_options(io_object_t _service,mach_timespec_t wait_time,uint32_t options)4249 is_io_service_wait_quiet_with_options(
4250 io_object_t _service,
4251 mach_timespec_t wait_time,
4252 uint32_t options )
4253 {
4254 uint64_t timeoutNS;
4255
4256 CHECK( IOService, _service, service );
4257
4258 timeoutNS = wait_time.tv_sec;
4259 timeoutNS *= kSecondScale;
4260 timeoutNS += wait_time.tv_nsec;
4261
4262 if ((options & kIOWaitQuietPanicOnFailure) && !IOCurrentTaskHasEntitlement(kIOWaitQuietPanicsEntitlement)) {
4263 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4264 IOLog("IOServiceWaitQuietWithOptions(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4265 OSSafeReleaseNULL(taskName);
4266
4267 /* strip this option from the options before calling waitQuietWithOptions */
4268 options &= ~kIOWaitQuietPanicOnFailure;
4269 }
4270
4271 return service->waitQuietWithOptions(timeoutNS, options);
4272 }
4273
4274
4275 /* Routine io_service_request_probe */
4276 kern_return_t
is_io_service_request_probe(io_object_t _service,uint32_t options)4277 is_io_service_request_probe(
4278 io_object_t _service,
4279 uint32_t options )
4280 {
4281 CHECK( IOService, _service, service );
4282
4283 return service->requestProbe( options );
4284 }
4285
4286 /* Routine io_service_get_authorization_id */
4287 kern_return_t
is_io_service_get_authorization_id(io_object_t _service,uint64_t * authorization_id)4288 is_io_service_get_authorization_id(
4289 io_object_t _service,
4290 uint64_t *authorization_id )
4291 {
4292 kern_return_t kr;
4293
4294 CHECK( IOService, _service, service );
4295
4296 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
4297 kIOClientPrivilegeAdministrator );
4298 if (kIOReturnSuccess != kr) {
4299 return kr;
4300 }
4301
4302 #if defined(XNU_TARGET_OS_OSX)
4303 *authorization_id = service->getAuthorizationID();
4304 #else /* defined(XNU_TARGET_OS_OSX) */
4305 *authorization_id = 0;
4306 kr = kIOReturnUnsupported;
4307 #endif /* defined(XNU_TARGET_OS_OSX) */
4308
4309 return kr;
4310 }
4311
4312 /* Routine io_service_set_authorization_id */
4313 kern_return_t
is_io_service_set_authorization_id(io_object_t _service,uint64_t authorization_id)4314 is_io_service_set_authorization_id(
4315 io_object_t _service,
4316 uint64_t authorization_id )
4317 {
4318 CHECK( IOService, _service, service );
4319
4320 #if defined(XNU_TARGET_OS_OSX)
4321 return service->setAuthorizationID( authorization_id );
4322 #else /* defined(XNU_TARGET_OS_OSX) */
4323 return kIOReturnUnsupported;
4324 #endif /* defined(XNU_TARGET_OS_OSX) */
4325 }
4326
4327 /* Routine io_service_open_ndr */
4328 kern_return_t
is_io_service_open_extended(io_object_t _service,task_t owningTask,uint32_t connect_type,NDR_record_t ndr,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result,io_object_t * connection)4329 is_io_service_open_extended(
4330 io_object_t _service,
4331 task_t owningTask,
4332 uint32_t connect_type,
4333 NDR_record_t ndr,
4334 io_buf_ptr_t properties,
4335 mach_msg_type_number_t propertiesCnt,
4336 kern_return_t * result,
4337 io_object_t *connection )
4338 {
4339 IOUserClient * client = NULL;
4340 kern_return_t err = KERN_SUCCESS;
4341 IOReturn res = kIOReturnSuccess;
4342 OSDictionary * propertiesDict = NULL;
4343 bool disallowAccess = false;
4344
4345 CHECK( IOService, _service, service );
4346
4347 if (!owningTask) {
4348 return kIOReturnBadArgument;
4349 }
4350 assert(owningTask == current_task());
4351 if (owningTask != current_task()) {
4352 return kIOReturnBadArgument;
4353 }
4354
4355 #if CONFIG_MACF
4356 if (mac_iokit_check_open_service(kauth_cred_get(), service, connect_type) != 0) {
4357 return kIOReturnNotPermitted;
4358 }
4359 #endif
4360 do{
4361 if (properties) {
4362 return kIOReturnUnsupported;
4363 }
4364 #if 0
4365 {
4366 OSObject * obj;
4367 vm_offset_t data;
4368 vm_map_offset_t map_data;
4369
4370 if (propertiesCnt > sizeof(io_struct_inband_t)) {
4371 return kIOReturnMessageTooLarge;
4372 }
4373
4374 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4375 res = err;
4376 data = CAST_DOWN(vm_offset_t, map_data);
4377 if (KERN_SUCCESS == err) {
4378 // must return success after vm_map_copyout() succeeds
4379 obj = OSUnserializeXML((const char *) data, propertiesCnt );
4380 vm_deallocate( kernel_map, data, propertiesCnt );
4381 propertiesDict = OSDynamicCast(OSDictionary, obj);
4382 if (!propertiesDict) {
4383 res = kIOReturnBadArgument;
4384 if (obj) {
4385 obj->release();
4386 }
4387 }
4388 }
4389 if (kIOReturnSuccess != res) {
4390 break;
4391 }
4392 }
4393 #endif
4394 res = service->newUserClient( owningTask, (void *) owningTask,
4395 connect_type, propertiesDict, &client );
4396
4397 if (propertiesDict) {
4398 propertiesDict->release();
4399 }
4400
4401 if (res == kIOReturnSuccess && OSDynamicCast(IOUserClient, client) == NULL) {
4402 // client should always be a IOUserClient
4403 res = kIOReturnError;
4404 }
4405
4406 if (res == kIOReturnSuccess) {
4407 if (!client->reserved) {
4408 if (!client->reserve()) {
4409 client->clientClose();
4410 OSSafeReleaseNULL(client);
4411 res = kIOReturnNoMemory;
4412 }
4413 }
4414 }
4415
4416 if (res == kIOReturnSuccess) {
4417 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4418 if (creatorName) {
4419 client->setProperty(kIOUserClientCreatorKey, creatorName);
4420 }
4421 const char * creatorNameCStr = creatorName ? creatorName->getCStringNoCopy() : "<unknown>";
4422 client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
4423 if (client->sharedInstance) {
4424 IOLockLock(gIOUserClientOwnersLock);
4425 }
4426 if (!client->opened) {
4427 client->opened = true;
4428
4429 client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
4430 {
4431 OSObject * obj;
4432 extern const OSSymbol * gIOSurfaceIdentifier;
4433 obj = client->getProperty(kIOUserClientDefaultLockingKey);
4434 bool hasProps = false;
4435
4436 client->uc2022 = (NULL != OSDynamicCast(IOUserClient2022, client));
4437 if (obj) {
4438 hasProps = true;
4439 client->defaultLocking = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingKey));
4440 } else if (client->uc2022) {
4441 res = kIOReturnError;
4442 }
4443 obj = client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey);
4444 if (obj) {
4445 hasProps = true;
4446 client->defaultLockingSetProperties = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey));
4447 } else if (client->uc2022) {
4448 res = kIOReturnError;
4449 }
4450 obj = client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey);
4451 if (obj) {
4452 hasProps = true;
4453 client->defaultLockingSingleThreadExternalMethod = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey));
4454 } else if (client->uc2022) {
4455 res = kIOReturnError;
4456 }
4457 if (kIOReturnSuccess != res) {
4458 IOLog("IOUC %s requires kIOUserClientDefaultLockingKey, kIOUserClientDefaultLockingSetPropertiesKey, kIOUserClientDefaultLockingSingleThreadExternalMethodKey\n",
4459 client->getMetaClass()->getClassName());
4460 }
4461 if (!hasProps) {
4462 const OSMetaClass * meta;
4463 OSKext * kext;
4464 meta = client->getMetaClass();
4465 kext = meta->getKext();
4466 if (!kext || !kext->hasDependency(gIOSurfaceIdentifier)) {
4467 client->defaultLocking = true;
4468 client->defaultLockingSetProperties = false;
4469 client->defaultLockingSingleThreadExternalMethod = false;
4470 client->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
4471 }
4472 }
4473 }
4474 }
4475 if (client->sharedInstance) {
4476 IOLockUnlock(gIOUserClientOwnersLock);
4477 }
4478
4479 OSObject * requiredEntitlement = client->copyProperty(gIOUserClientEntitlementsKey);
4480 OSString * requiredEntitlementString = OSDynamicCast(OSString, requiredEntitlement);
4481 //If this is an IOUserClient2022, having kIOUserClientEntitlementsKey is mandatory.
4482 //If it has kIOUserClientEntitlementsKey, the value must be either kOSBooleanFalse or an OSString
4483 //If the value is kOSBooleanFalse, we allow access.
4484 //If the value is an OSString, we allow access if the task has the named entitlement
4485 if (client->uc2022) {
4486 if (!requiredEntitlement) {
4487 IOLog("IOUC %s missing " kIOUserClientEntitlementsKey " property\n",
4488 client->getMetaClass()->getClassName());
4489 disallowAccess = true;
4490 } else if (!requiredEntitlementString && requiredEntitlement != kOSBooleanFalse) {
4491 IOLog("IOUC %s had " kIOUserClientEntitlementsKey "with value not boolean false or string\n", client->getMetaClass()->getClassName());
4492 disallowAccess = true;
4493 }
4494 }
4495
4496 if (requiredEntitlement && disallowAccess == false) {
4497 if (kOSBooleanFalse == requiredEntitlement) {
4498 // allow
4499 disallowAccess = false;
4500 } else {
4501 disallowAccess = !IOTaskHasEntitlement(owningTask, requiredEntitlementString->getCStringNoCopy());
4502 if (disallowAccess) {
4503 IOLog("IOUC %s missing entitlement in process %s\n",
4504 client->getMetaClass()->getClassName(), creatorNameCStr);
4505 }
4506 }
4507 }
4508
4509 OSSafeReleaseNULL(requiredEntitlement);
4510
4511 if (disallowAccess) {
4512 res = kIOReturnNotPrivileged;
4513 }
4514 #if CONFIG_MACF
4515 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4516 IOLog("IOUC %s failed MACF in process %s\n",
4517 client->getMetaClass()->getClassName(), creatorNameCStr);
4518 res = kIOReturnNotPermitted;
4519 }
4520 #endif
4521
4522 if ((kIOReturnSuccess == res)
4523 && gIOUCFilterCallbacks
4524 && gIOUCFilterCallbacks->io_filter_resolver) {
4525 io_filter_policy_t filterPolicy;
4526 filterPolicy = client->filterForTask(owningTask, 0);
4527 if (!filterPolicy) {
4528 res = gIOUCFilterCallbacks->io_filter_resolver(owningTask, client, connect_type, &filterPolicy);
4529 if (kIOReturnUnsupported == res) {
4530 res = kIOReturnSuccess;
4531 } else if (kIOReturnSuccess == res) {
4532 client->filterForTask(owningTask, filterPolicy);
4533 } else {
4534 IOLog("IOUC %s failed sandbox in process %s\n",
4535 client->getMetaClass()->getClassName(), creatorNameCStr);
4536 }
4537 }
4538 }
4539
4540 if (kIOReturnSuccess == res) {
4541 res = client->registerOwner(owningTask);
4542 }
4543 OSSafeReleaseNULL(creatorName);
4544
4545 if (kIOReturnSuccess != res) {
4546 IOStatisticsClientCall();
4547 client->clientClose();
4548 client->setTerminateDefer(service, false);
4549 client->release();
4550 client = NULL;
4551 break;
4552 }
4553 client->setTerminateDefer(service, false);
4554 }
4555 }while (false);
4556
4557 *connection = client;
4558 *result = res;
4559
4560 return err;
4561 }
4562
4563 /* Routine io_service_close */
4564 kern_return_t
is_io_service_close(io_connect_t connection)4565 is_io_service_close(
4566 io_connect_t connection )
4567 {
4568 OSSet * mappings;
4569 if ((mappings = OSDynamicCast(OSSet, connection))) {
4570 return kIOReturnSuccess;
4571 }
4572
4573 CHECK( IOUserClient, connection, client );
4574
4575 IOStatisticsClientCall();
4576
4577 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4578 client->ipcEnter(kIPCLockWrite);
4579 client->clientClose();
4580 client->ipcExit(kIPCLockWrite);
4581 } else {
4582 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4583 client->getRegistryEntryID(), client->getName());
4584 }
4585
4586 return kIOReturnSuccess;
4587 }
4588
4589 /* Routine io_connect_get_service */
4590 kern_return_t
is_io_connect_get_service(io_connect_t connection,io_object_t * service)4591 is_io_connect_get_service(
4592 io_connect_t connection,
4593 io_object_t *service )
4594 {
4595 IOService * theService;
4596
4597 CHECK( IOUserClient, connection, client );
4598
4599 client->ipcEnter(kIPCLockNone);
4600
4601 theService = client->getService();
4602 if (theService) {
4603 theService->retain();
4604 }
4605
4606 client->ipcExit(kIPCLockNone);
4607
4608 *service = theService;
4609
4610 return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4611 }
4612
4613 /* Routine io_connect_set_notification_port */
4614 kern_return_t
is_io_connect_set_notification_port(io_connect_t connection,uint32_t notification_type,mach_port_t port,uint32_t reference)4615 is_io_connect_set_notification_port(
4616 io_connect_t connection,
4617 uint32_t notification_type,
4618 mach_port_t port,
4619 uint32_t reference)
4620 {
4621 kern_return_t ret;
4622 CHECK( IOUserClient, connection, client );
4623
4624 IOStatisticsClientCall();
4625
4626 client->ipcEnter(kIPCLockWrite);
4627 ret = client->registerNotificationPort( port, notification_type,
4628 (io_user_reference_t) reference );
4629 client->ipcExit(kIPCLockWrite);
4630
4631 return ret;
4632 }
4633
4634 /* Routine io_connect_set_notification_port */
4635 kern_return_t
is_io_connect_set_notification_port_64(io_connect_t connection,uint32_t notification_type,mach_port_t port,io_user_reference_t reference)4636 is_io_connect_set_notification_port_64(
4637 io_connect_t connection,
4638 uint32_t notification_type,
4639 mach_port_t port,
4640 io_user_reference_t reference)
4641 {
4642 kern_return_t ret;
4643 CHECK( IOUserClient, connection, client );
4644
4645 IOStatisticsClientCall();
4646
4647 client->ipcEnter(kIPCLockWrite);
4648 ret = client->registerNotificationPort( port, notification_type,
4649 reference );
4650 client->ipcExit(kIPCLockWrite);
4651
4652 return ret;
4653 }
4654
4655 /* Routine io_connect_map_memory_into_task */
4656 kern_return_t
is_io_connect_map_memory_into_task(io_connect_t connection,uint32_t memory_type,task_t into_task,mach_vm_address_t * address,mach_vm_size_t * size,uint32_t flags)4657 is_io_connect_map_memory_into_task
4658 (
4659 io_connect_t connection,
4660 uint32_t memory_type,
4661 task_t into_task,
4662 mach_vm_address_t *address,
4663 mach_vm_size_t *size,
4664 uint32_t flags
4665 )
4666 {
4667 IOReturn err;
4668 IOMemoryMap * map;
4669
4670 CHECK( IOUserClient, connection, client );
4671
4672 if (!into_task) {
4673 return kIOReturnBadArgument;
4674 }
4675
4676 IOStatisticsClientCall();
4677
4678 client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4679 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4680
4681 if (map) {
4682 *address = map->getAddress();
4683 if (size) {
4684 *size = map->getSize();
4685 }
4686
4687 if (client->sharedInstance
4688 || (into_task != current_task())) {
4689 // push a name out to the task owning the map,
4690 // so we can clean up maps
4691 mach_port_name_t name __unused =
4692 IOMachPort::makeSendRightForTask(
4693 into_task, map, IKOT_IOKIT_OBJECT );
4694 map->release();
4695 } else {
4696 // keep it with the user client
4697 IOLockLock( gIOObjectPortLock);
4698 if (NULL == client->mappings) {
4699 client->mappings = OSSet::withCapacity(2);
4700 }
4701 if (client->mappings) {
4702 client->mappings->setObject( map);
4703 }
4704 IOLockUnlock( gIOObjectPortLock);
4705 map->release();
4706 }
4707 err = kIOReturnSuccess;
4708 } else {
4709 err = kIOReturnBadArgument;
4710 }
4711
4712 client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4713
4714 return err;
4715 }
4716
4717 /* Routine is_io_connect_map_memory */
4718 kern_return_t
is_io_connect_map_memory(io_object_t connect,uint32_t type,task_t task,uint32_t * mapAddr,uint32_t * mapSize,uint32_t flags)4719 is_io_connect_map_memory(
4720 io_object_t connect,
4721 uint32_t type,
4722 task_t task,
4723 uint32_t * mapAddr,
4724 uint32_t * mapSize,
4725 uint32_t flags )
4726 {
4727 IOReturn err;
4728 mach_vm_address_t address;
4729 mach_vm_size_t size;
4730
4731 address = SCALAR64(*mapAddr);
4732 size = SCALAR64(*mapSize);
4733
4734 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4735
4736 *mapAddr = SCALAR32(address);
4737 *mapSize = SCALAR32(size);
4738
4739 return err;
4740 }
4741 } /* extern "C" */
4742
4743 IOMemoryMap *
removeMappingForDescriptor(IOMemoryDescriptor * mem)4744 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4745 {
4746 OSIterator * iter;
4747 IOMemoryMap * map = NULL;
4748
4749 IOLockLock(gIOObjectPortLock);
4750
4751 iter = OSCollectionIterator::withCollection(mappings);
4752 if (iter) {
4753 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4754 if (mem == map->getMemoryDescriptor()) {
4755 map->retain();
4756 mappings->removeObject(map);
4757 break;
4758 }
4759 }
4760 iter->release();
4761 }
4762
4763 IOLockUnlock(gIOObjectPortLock);
4764
4765 return map;
4766 }
4767
4768 extern "C" {
4769 /* Routine io_connect_unmap_memory_from_task */
4770 kern_return_t
is_io_connect_unmap_memory_from_task(io_connect_t connection,uint32_t memory_type,task_t from_task,mach_vm_address_t address)4771 is_io_connect_unmap_memory_from_task
4772 (
4773 io_connect_t connection,
4774 uint32_t memory_type,
4775 task_t from_task,
4776 mach_vm_address_t address)
4777 {
4778 IOReturn err;
4779 IOOptionBits options = 0;
4780 IOMemoryDescriptor * memory = NULL;
4781 IOMemoryMap * map;
4782
4783 CHECK( IOUserClient, connection, client );
4784
4785 if (!from_task) {
4786 return kIOReturnBadArgument;
4787 }
4788
4789 IOStatisticsClientCall();
4790
4791 client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4792 err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4793
4794 if (memory && (kIOReturnSuccess == err)) {
4795 options = (options & ~kIOMapUserOptionsMask)
4796 | kIOMapAnywhere | kIOMapReference;
4797
4798 map = memory->createMappingInTask( from_task, address, options );
4799 memory->release();
4800 if (map) {
4801 IOLockLock( gIOObjectPortLock);
4802 if (client->mappings) {
4803 client->mappings->removeObject( map);
4804 }
4805 IOLockUnlock( gIOObjectPortLock);
4806
4807 mach_port_name_t name = 0;
4808 bool is_shared_instance_or_from_current_task = from_task != current_task() || client->sharedInstance;
4809 if (is_shared_instance_or_from_current_task) {
4810 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4811 map->release();
4812 }
4813
4814 if (name) {
4815 map->userClientUnmap();
4816 err = iokit_mod_send_right( from_task, name, -2 );
4817 err = kIOReturnSuccess;
4818 } else {
4819 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4820 }
4821 if (!is_shared_instance_or_from_current_task) {
4822 map->release();
4823 }
4824 } else {
4825 err = kIOReturnBadArgument;
4826 }
4827 }
4828
4829 client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4830
4831 return err;
4832 }
4833
4834 kern_return_t
is_io_connect_unmap_memory(io_object_t connect,uint32_t type,task_t task,uint32_t mapAddr)4835 is_io_connect_unmap_memory(
4836 io_object_t connect,
4837 uint32_t type,
4838 task_t task,
4839 uint32_t mapAddr )
4840 {
4841 IOReturn err;
4842 mach_vm_address_t address;
4843
4844 address = SCALAR64(mapAddr);
4845
4846 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4847
4848 return err;
4849 }
4850
4851
4852 /* Routine io_connect_add_client */
4853 kern_return_t
is_io_connect_add_client(io_connect_t connection,io_object_t connect_to)4854 is_io_connect_add_client(
4855 io_connect_t connection,
4856 io_object_t connect_to)
4857 {
4858 CHECK( IOUserClient, connection, client );
4859 CHECK( IOUserClient, connect_to, to );
4860
4861 IOReturn ret;
4862
4863 IOStatisticsClientCall();
4864
4865 client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4866 ret = client->connectClient( to );
4867 client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4868
4869 return ret;
4870 }
4871
4872
4873 /* Routine io_connect_set_properties */
4874 kern_return_t
is_io_connect_set_properties(io_connect_t connection,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4875 is_io_connect_set_properties(
4876 io_connect_t connection,
4877 io_buf_ptr_t properties,
4878 mach_msg_type_number_t propertiesCnt,
4879 kern_return_t * result)
4880 {
4881 return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4882 }
4883
4884 /* Routine io_user_client_method */
4885 kern_return_t
is_io_connect_method_var_output(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,io_buf_ptr_t * var_output,mach_msg_type_number_t * var_outputCnt)4886 is_io_connect_method_var_output
4887 (
4888 io_connect_t connection,
4889 uint32_t selector,
4890 io_scalar_inband64_t scalar_input,
4891 mach_msg_type_number_t scalar_inputCnt,
4892 io_struct_inband_t inband_input,
4893 mach_msg_type_number_t inband_inputCnt,
4894 mach_vm_address_t ool_input,
4895 mach_vm_size_t ool_input_size,
4896 io_struct_inband_t inband_output,
4897 mach_msg_type_number_t *inband_outputCnt,
4898 io_scalar_inband64_t scalar_output,
4899 mach_msg_type_number_t *scalar_outputCnt,
4900 io_buf_ptr_t *var_output,
4901 mach_msg_type_number_t *var_outputCnt
4902 )
4903 {
4904 CHECK( IOUserClient, connection, client );
4905
4906 IOExternalMethodArguments args;
4907 IOReturn ret;
4908 IOMemoryDescriptor * inputMD = NULL;
4909 OSObject * structureVariableOutputData = NULL;
4910
4911 bzero(&args.__reserved[0], sizeof(args.__reserved));
4912 args.__reservedA = 0;
4913 args.version = kIOExternalMethodArgumentsCurrentVersion;
4914
4915 args.selector = selector;
4916
4917 args.asyncWakePort = MACH_PORT_NULL;
4918 args.asyncReference = NULL;
4919 args.asyncReferenceCount = 0;
4920 args.structureVariableOutputData = &structureVariableOutputData;
4921
4922 args.scalarInput = scalar_input;
4923 args.scalarInputCount = scalar_inputCnt;
4924 args.structureInput = inband_input;
4925 args.structureInputSize = inband_inputCnt;
4926
4927 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4928 return kIOReturnIPCError;
4929 }
4930
4931 if (ool_input) {
4932 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4933 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4934 current_task());
4935 }
4936
4937 args.structureInputDescriptor = inputMD;
4938
4939 args.scalarOutput = scalar_output;
4940 args.scalarOutputCount = *scalar_outputCnt;
4941 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4942 args.structureOutput = inband_output;
4943 args.structureOutputSize = *inband_outputCnt;
4944 args.structureOutputDescriptor = NULL;
4945 args.structureOutputDescriptorSize = 0;
4946
4947 IOStatisticsClientCall();
4948 ret = kIOReturnSuccess;
4949
4950 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4951 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4952 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
4953 }
4954
4955 if (kIOReturnSuccess == ret) {
4956 ret = client->callExternalMethod(selector, &args);
4957 }
4958
4959 *scalar_outputCnt = args.scalarOutputCount;
4960 *inband_outputCnt = args.structureOutputSize;
4961
4962 if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4963 OSSerialize * serialize;
4964 OSData * data;
4965 unsigned int len;
4966
4967 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4968 len = serialize->getLength();
4969 *var_outputCnt = len;
4970 ret = copyoutkdata(serialize->text(), len, var_output);
4971 } else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4972 data->clipForCopyout();
4973 len = data->getLength();
4974 *var_outputCnt = len;
4975 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4976 } else {
4977 ret = kIOReturnUnderrun;
4978 }
4979 }
4980
4981 if (inputMD) {
4982 inputMD->release();
4983 }
4984 if (structureVariableOutputData) {
4985 structureVariableOutputData->release();
4986 }
4987
4988 return ret;
4989 }
4990
4991 /* Routine io_user_client_method */
4992 kern_return_t
is_io_connect_method(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)4993 is_io_connect_method
4994 (
4995 io_connect_t connection,
4996 uint32_t selector,
4997 io_scalar_inband64_t scalar_input,
4998 mach_msg_type_number_t scalar_inputCnt,
4999 io_struct_inband_t inband_input,
5000 mach_msg_type_number_t inband_inputCnt,
5001 mach_vm_address_t ool_input,
5002 mach_vm_size_t ool_input_size,
5003 io_struct_inband_t inband_output,
5004 mach_msg_type_number_t *inband_outputCnt,
5005 io_scalar_inband64_t scalar_output,
5006 mach_msg_type_number_t *scalar_outputCnt,
5007 mach_vm_address_t ool_output,
5008 mach_vm_size_t *ool_output_size
5009 )
5010 {
5011 CHECK( IOUserClient, connection, client );
5012
5013 IOExternalMethodArguments args;
5014 IOReturn ret;
5015 IOMemoryDescriptor * inputMD = NULL;
5016 IOMemoryDescriptor * outputMD = NULL;
5017
5018 bzero(&args.__reserved[0], sizeof(args.__reserved));
5019 args.__reservedA = 0;
5020 args.version = kIOExternalMethodArgumentsCurrentVersion;
5021
5022 args.selector = selector;
5023
5024 args.asyncWakePort = MACH_PORT_NULL;
5025 args.asyncReference = NULL;
5026 args.asyncReferenceCount = 0;
5027 args.structureVariableOutputData = NULL;
5028
5029 args.scalarInput = scalar_input;
5030 args.scalarInputCount = scalar_inputCnt;
5031 args.structureInput = inband_input;
5032 args.structureInputSize = inband_inputCnt;
5033
5034 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5035 return kIOReturnIPCError;
5036 }
5037 if (ool_output) {
5038 if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5039 return kIOReturnIPCError;
5040 }
5041 if (*ool_output_size > UINT_MAX) {
5042 return kIOReturnIPCError;
5043 }
5044 }
5045
5046 if (ool_input) {
5047 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5048 kIODirectionOut | kIOMemoryMapCopyOnWrite,
5049 current_task());
5050 }
5051
5052 args.structureInputDescriptor = inputMD;
5053
5054 args.scalarOutput = scalar_output;
5055 args.scalarOutputCount = *scalar_outputCnt;
5056 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5057 args.structureOutput = inband_output;
5058 args.structureOutputSize = *inband_outputCnt;
5059
5060 if (ool_output && ool_output_size) {
5061 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5062 kIODirectionIn, current_task());
5063 }
5064
5065 args.structureOutputDescriptor = outputMD;
5066 args.structureOutputDescriptorSize = ool_output_size
5067 ? ((typeof(args.structureOutputDescriptorSize)) * ool_output_size)
5068 : 0;
5069
5070 IOStatisticsClientCall();
5071 ret = kIOReturnSuccess;
5072 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5073 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5074 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
5075 }
5076 if (kIOReturnSuccess == ret) {
5077 ret = client->callExternalMethod( selector, &args );
5078 }
5079
5080 *scalar_outputCnt = args.scalarOutputCount;
5081 *inband_outputCnt = args.structureOutputSize;
5082 *ool_output_size = args.structureOutputDescriptorSize;
5083
5084 if (inputMD) {
5085 inputMD->release();
5086 }
5087 if (outputMD) {
5088 outputMD->release();
5089 }
5090
5091 return ret;
5092 }
5093
5094 /* Routine io_async_user_client_method */
5095 kern_return_t
is_io_connect_async_method(io_connect_t connection,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)5096 is_io_connect_async_method
5097 (
5098 io_connect_t connection,
5099 mach_port_t wake_port,
5100 io_async_ref64_t reference,
5101 mach_msg_type_number_t referenceCnt,
5102 uint32_t selector,
5103 io_scalar_inband64_t scalar_input,
5104 mach_msg_type_number_t scalar_inputCnt,
5105 io_struct_inband_t inband_input,
5106 mach_msg_type_number_t inband_inputCnt,
5107 mach_vm_address_t ool_input,
5108 mach_vm_size_t ool_input_size,
5109 io_struct_inband_t inband_output,
5110 mach_msg_type_number_t *inband_outputCnt,
5111 io_scalar_inband64_t scalar_output,
5112 mach_msg_type_number_t *scalar_outputCnt,
5113 mach_vm_address_t ool_output,
5114 mach_vm_size_t * ool_output_size
5115 )
5116 {
5117 CHECK( IOUserClient, connection, client );
5118
5119 IOExternalMethodArguments args;
5120 IOReturn ret;
5121 IOMemoryDescriptor * inputMD = NULL;
5122 IOMemoryDescriptor * outputMD = NULL;
5123
5124 if (referenceCnt < 1) {
5125 return kIOReturnBadArgument;
5126 }
5127
5128 bzero(&args.__reserved[0], sizeof(args.__reserved));
5129 args.__reservedA = 0;
5130 args.version = kIOExternalMethodArgumentsCurrentVersion;
5131
5132 reference[0] = (io_user_reference_t) wake_port;
5133 if (vm_map_is_64bit(get_task_map(current_task()))) {
5134 reference[0] |= kIOUCAsync64Flag;
5135 }
5136
5137 args.selector = selector;
5138
5139 args.asyncWakePort = wake_port;
5140 args.asyncReference = reference;
5141 args.asyncReferenceCount = referenceCnt;
5142
5143 args.structureVariableOutputData = NULL;
5144
5145 args.scalarInput = scalar_input;
5146 args.scalarInputCount = scalar_inputCnt;
5147 args.structureInput = inband_input;
5148 args.structureInputSize = inband_inputCnt;
5149
5150 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5151 return kIOReturnIPCError;
5152 }
5153 if (ool_output) {
5154 if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5155 return kIOReturnIPCError;
5156 }
5157 if (*ool_output_size > UINT_MAX) {
5158 return kIOReturnIPCError;
5159 }
5160 }
5161
5162 if (ool_input) {
5163 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5164 kIODirectionOut | kIOMemoryMapCopyOnWrite,
5165 current_task());
5166 }
5167
5168 args.structureInputDescriptor = inputMD;
5169
5170 args.scalarOutput = scalar_output;
5171 args.scalarOutputCount = *scalar_outputCnt;
5172 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5173 args.structureOutput = inband_output;
5174 args.structureOutputSize = *inband_outputCnt;
5175
5176 if (ool_output) {
5177 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5178 kIODirectionIn, current_task());
5179 }
5180
5181 args.structureOutputDescriptor = outputMD;
5182 args.structureOutputDescriptorSize = ((typeof(args.structureOutputDescriptorSize)) * ool_output_size);
5183
5184 IOStatisticsClientCall();
5185 ret = kIOReturnSuccess;
5186 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5187 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5188 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_async_method, selector);
5189 }
5190 if (kIOReturnSuccess == ret) {
5191 ret = client->callExternalMethod( selector, &args );
5192 }
5193
5194 *scalar_outputCnt = args.scalarOutputCount;
5195 *inband_outputCnt = args.structureOutputSize;
5196 *ool_output_size = args.structureOutputDescriptorSize;
5197
5198 if (inputMD) {
5199 inputMD->release();
5200 }
5201 if (outputMD) {
5202 outputMD->release();
5203 }
5204
5205 return ret;
5206 }
5207
5208 /* Routine io_connect_method_scalarI_scalarO */
5209 kern_return_t
is_io_connect_method_scalarI_scalarO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5210 is_io_connect_method_scalarI_scalarO(
5211 io_object_t connect,
5212 uint32_t index,
5213 io_scalar_inband_t input,
5214 mach_msg_type_number_t inputCount,
5215 io_scalar_inband_t output,
5216 mach_msg_type_number_t * outputCount )
5217 {
5218 IOReturn err;
5219 uint32_t i;
5220 io_scalar_inband64_t _input;
5221 io_scalar_inband64_t _output;
5222
5223 mach_msg_type_number_t struct_outputCnt = 0;
5224 mach_vm_size_t ool_output_size = 0;
5225
5226 bzero(&_output[0], sizeof(_output));
5227 for (i = 0; i < inputCount; i++) {
5228 _input[i] = SCALAR64(input[i]);
5229 }
5230
5231 err = is_io_connect_method(connect, index,
5232 _input, inputCount,
5233 NULL, 0,
5234 0, 0,
5235 NULL, &struct_outputCnt,
5236 _output, outputCount,
5237 0, &ool_output_size);
5238
5239 for (i = 0; i < *outputCount; i++) {
5240 output[i] = SCALAR32(_output[i]);
5241 }
5242
5243 return err;
5244 }
5245
5246 kern_return_t
shim_io_connect_method_scalarI_scalarO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5247 shim_io_connect_method_scalarI_scalarO(
5248 IOExternalMethod * method,
5249 IOService * object,
5250 const io_user_scalar_t * input,
5251 mach_msg_type_number_t inputCount,
5252 io_user_scalar_t * output,
5253 mach_msg_type_number_t * outputCount )
5254 {
5255 IOMethod func;
5256 io_scalar_inband_t _output;
5257 IOReturn err;
5258 err = kIOReturnBadArgument;
5259
5260 bzero(&_output[0], sizeof(_output));
5261 do {
5262 if (inputCount != method->count0) {
5263 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5264 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5265 continue;
5266 }
5267 if (*outputCount != method->count1) {
5268 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5269 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5270 continue;
5271 }
5272
5273 func = method->func;
5274
5275 switch (inputCount) {
5276 case 6:
5277 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5278 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5279 break;
5280 case 5:
5281 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5282 ARG32(input[3]), ARG32(input[4]),
5283 &_output[0] );
5284 break;
5285 case 4:
5286 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5287 ARG32(input[3]),
5288 &_output[0], &_output[1] );
5289 break;
5290 case 3:
5291 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5292 &_output[0], &_output[1], &_output[2] );
5293 break;
5294 case 2:
5295 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5296 &_output[0], &_output[1], &_output[2],
5297 &_output[3] );
5298 break;
5299 case 1:
5300 err = (object->*func)( ARG32(input[0]),
5301 &_output[0], &_output[1], &_output[2],
5302 &_output[3], &_output[4] );
5303 break;
5304 case 0:
5305 err = (object->*func)( &_output[0], &_output[1], &_output[2],
5306 &_output[3], &_output[4], &_output[5] );
5307 break;
5308
5309 default:
5310 IOLog("%s: Bad method table\n", object->getName());
5311 }
5312 }while (false);
5313
5314 uint32_t i;
5315 for (i = 0; i < *outputCount; i++) {
5316 output[i] = SCALAR32(_output[i]);
5317 }
5318
5319 return err;
5320 }
5321
5322 /* Routine io_async_method_scalarI_scalarO */
5323 kern_return_t
is_io_async_method_scalarI_scalarO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5324 is_io_async_method_scalarI_scalarO(
5325 io_object_t connect,
5326 mach_port_t wake_port,
5327 io_async_ref_t reference,
5328 mach_msg_type_number_t referenceCnt,
5329 uint32_t index,
5330 io_scalar_inband_t input,
5331 mach_msg_type_number_t inputCount,
5332 io_scalar_inband_t output,
5333 mach_msg_type_number_t * outputCount )
5334 {
5335 IOReturn err;
5336 uint32_t i;
5337 io_scalar_inband64_t _input;
5338 io_scalar_inband64_t _output;
5339 io_async_ref64_t _reference;
5340
5341 if (referenceCnt > ASYNC_REF64_COUNT) {
5342 return kIOReturnBadArgument;
5343 }
5344 bzero(&_output[0], sizeof(_output));
5345 for (i = 0; i < referenceCnt; i++) {
5346 _reference[i] = REF64(reference[i]);
5347 }
5348 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5349
5350 mach_msg_type_number_t struct_outputCnt = 0;
5351 mach_vm_size_t ool_output_size = 0;
5352
5353 for (i = 0; i < inputCount; i++) {
5354 _input[i] = SCALAR64(input[i]);
5355 }
5356
5357 err = is_io_connect_async_method(connect,
5358 wake_port, _reference, referenceCnt,
5359 index,
5360 _input, inputCount,
5361 NULL, 0,
5362 0, 0,
5363 NULL, &struct_outputCnt,
5364 _output, outputCount,
5365 0, &ool_output_size);
5366
5367 for (i = 0; i < *outputCount; i++) {
5368 output[i] = SCALAR32(_output[i]);
5369 }
5370
5371 return err;
5372 }
5373 /* Routine io_async_method_scalarI_structureO */
5374 kern_return_t
is_io_async_method_scalarI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5375 is_io_async_method_scalarI_structureO(
5376 io_object_t connect,
5377 mach_port_t wake_port,
5378 io_async_ref_t reference,
5379 mach_msg_type_number_t referenceCnt,
5380 uint32_t index,
5381 io_scalar_inband_t input,
5382 mach_msg_type_number_t inputCount,
5383 io_struct_inband_t output,
5384 mach_msg_type_number_t * outputCount )
5385 {
5386 uint32_t i;
5387 io_scalar_inband64_t _input;
5388 io_async_ref64_t _reference;
5389
5390 if (referenceCnt > ASYNC_REF64_COUNT) {
5391 return kIOReturnBadArgument;
5392 }
5393 for (i = 0; i < referenceCnt; i++) {
5394 _reference[i] = REF64(reference[i]);
5395 }
5396 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5397
5398 mach_msg_type_number_t scalar_outputCnt = 0;
5399 mach_vm_size_t ool_output_size = 0;
5400
5401 for (i = 0; i < inputCount; i++) {
5402 _input[i] = SCALAR64(input[i]);
5403 }
5404
5405 return is_io_connect_async_method(connect,
5406 wake_port, _reference, referenceCnt,
5407 index,
5408 _input, inputCount,
5409 NULL, 0,
5410 0, 0,
5411 output, outputCount,
5412 NULL, &scalar_outputCnt,
5413 0, &ool_output_size);
5414 }
5415
5416 /* Routine io_async_method_scalarI_structureI */
5417 kern_return_t
is_io_async_method_scalarI_structureI(io_connect_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5418 is_io_async_method_scalarI_structureI(
5419 io_connect_t connect,
5420 mach_port_t wake_port,
5421 io_async_ref_t reference,
5422 mach_msg_type_number_t referenceCnt,
5423 uint32_t index,
5424 io_scalar_inband_t input,
5425 mach_msg_type_number_t inputCount,
5426 io_struct_inband_t inputStruct,
5427 mach_msg_type_number_t inputStructCount )
5428 {
5429 uint32_t i;
5430 io_scalar_inband64_t _input;
5431 io_async_ref64_t _reference;
5432
5433 if (referenceCnt > ASYNC_REF64_COUNT) {
5434 return kIOReturnBadArgument;
5435 }
5436 for (i = 0; i < referenceCnt; i++) {
5437 _reference[i] = REF64(reference[i]);
5438 }
5439 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5440
5441 mach_msg_type_number_t scalar_outputCnt = 0;
5442 mach_msg_type_number_t inband_outputCnt = 0;
5443 mach_vm_size_t ool_output_size = 0;
5444
5445 for (i = 0; i < inputCount; i++) {
5446 _input[i] = SCALAR64(input[i]);
5447 }
5448
5449 return is_io_connect_async_method(connect,
5450 wake_port, _reference, referenceCnt,
5451 index,
5452 _input, inputCount,
5453 inputStruct, inputStructCount,
5454 0, 0,
5455 NULL, &inband_outputCnt,
5456 NULL, &scalar_outputCnt,
5457 0, &ool_output_size);
5458 }
5459
5460 /* Routine io_async_method_structureI_structureO */
5461 kern_return_t
is_io_async_method_structureI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5462 is_io_async_method_structureI_structureO(
5463 io_object_t connect,
5464 mach_port_t wake_port,
5465 io_async_ref_t reference,
5466 mach_msg_type_number_t referenceCnt,
5467 uint32_t index,
5468 io_struct_inband_t input,
5469 mach_msg_type_number_t inputCount,
5470 io_struct_inband_t output,
5471 mach_msg_type_number_t * outputCount )
5472 {
5473 uint32_t i;
5474 mach_msg_type_number_t scalar_outputCnt = 0;
5475 mach_vm_size_t ool_output_size = 0;
5476 io_async_ref64_t _reference;
5477
5478 if (referenceCnt > ASYNC_REF64_COUNT) {
5479 return kIOReturnBadArgument;
5480 }
5481 for (i = 0; i < referenceCnt; i++) {
5482 _reference[i] = REF64(reference[i]);
5483 }
5484 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5485
5486 return is_io_connect_async_method(connect,
5487 wake_port, _reference, referenceCnt,
5488 index,
5489 NULL, 0,
5490 input, inputCount,
5491 0, 0,
5492 output, outputCount,
5493 NULL, &scalar_outputCnt,
5494 0, &ool_output_size);
5495 }
5496
5497
5498 kern_return_t
shim_io_async_method_scalarI_scalarO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5499 shim_io_async_method_scalarI_scalarO(
5500 IOExternalAsyncMethod * method,
5501 IOService * object,
5502 mach_port_t asyncWakePort,
5503 io_user_reference_t * asyncReference,
5504 uint32_t asyncReferenceCount,
5505 const io_user_scalar_t * input,
5506 mach_msg_type_number_t inputCount,
5507 io_user_scalar_t * output,
5508 mach_msg_type_number_t * outputCount )
5509 {
5510 IOAsyncMethod func;
5511 uint32_t i;
5512 io_scalar_inband_t _output;
5513 IOReturn err;
5514 io_async_ref_t reference;
5515
5516 bzero(&_output[0], sizeof(_output));
5517 for (i = 0; i < asyncReferenceCount; i++) {
5518 reference[i] = REF32(asyncReference[i]);
5519 }
5520
5521 err = kIOReturnBadArgument;
5522
5523 do {
5524 if (inputCount != method->count0) {
5525 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5526 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5527 continue;
5528 }
5529 if (*outputCount != method->count1) {
5530 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5531 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5532 continue;
5533 }
5534
5535 func = method->func;
5536
5537 switch (inputCount) {
5538 case 6:
5539 err = (object->*func)( reference,
5540 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5541 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5542 break;
5543 case 5:
5544 err = (object->*func)( reference,
5545 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5546 ARG32(input[3]), ARG32(input[4]),
5547 &_output[0] );
5548 break;
5549 case 4:
5550 err = (object->*func)( reference,
5551 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5552 ARG32(input[3]),
5553 &_output[0], &_output[1] );
5554 break;
5555 case 3:
5556 err = (object->*func)( reference,
5557 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5558 &_output[0], &_output[1], &_output[2] );
5559 break;
5560 case 2:
5561 err = (object->*func)( reference,
5562 ARG32(input[0]), ARG32(input[1]),
5563 &_output[0], &_output[1], &_output[2],
5564 &_output[3] );
5565 break;
5566 case 1:
5567 err = (object->*func)( reference,
5568 ARG32(input[0]),
5569 &_output[0], &_output[1], &_output[2],
5570 &_output[3], &_output[4] );
5571 break;
5572 case 0:
5573 err = (object->*func)( reference,
5574 &_output[0], &_output[1], &_output[2],
5575 &_output[3], &_output[4], &_output[5] );
5576 break;
5577
5578 default:
5579 IOLog("%s: Bad method table\n", object->getName());
5580 }
5581 }while (false);
5582
5583 for (i = 0; i < *outputCount; i++) {
5584 output[i] = SCALAR32(_output[i]);
5585 }
5586
5587 return err;
5588 }
5589
5590
5591 /* Routine io_connect_method_scalarI_structureO */
5592 kern_return_t
is_io_connect_method_scalarI_structureO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5593 is_io_connect_method_scalarI_structureO(
5594 io_object_t connect,
5595 uint32_t index,
5596 io_scalar_inband_t input,
5597 mach_msg_type_number_t inputCount,
5598 io_struct_inband_t output,
5599 mach_msg_type_number_t * outputCount )
5600 {
5601 uint32_t i;
5602 io_scalar_inband64_t _input;
5603
5604 mach_msg_type_number_t scalar_outputCnt = 0;
5605 mach_vm_size_t ool_output_size = 0;
5606
5607 for (i = 0; i < inputCount; i++) {
5608 _input[i] = SCALAR64(input[i]);
5609 }
5610
5611 return is_io_connect_method(connect, index,
5612 _input, inputCount,
5613 NULL, 0,
5614 0, 0,
5615 output, outputCount,
5616 NULL, &scalar_outputCnt,
5617 0, &ool_output_size);
5618 }
5619
5620 kern_return_t
shim_io_connect_method_scalarI_structureO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5621 shim_io_connect_method_scalarI_structureO(
5622
5623 IOExternalMethod * method,
5624 IOService * object,
5625 const io_user_scalar_t * input,
5626 mach_msg_type_number_t inputCount,
5627 io_struct_inband_t output,
5628 IOByteCount * outputCount )
5629 {
5630 IOMethod func;
5631 IOReturn err;
5632
5633 err = kIOReturnBadArgument;
5634
5635 do {
5636 if (inputCount != method->count0) {
5637 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5638 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5639 continue;
5640 }
5641 if ((kIOUCVariableStructureSize != method->count1)
5642 && (*outputCount != method->count1)) {
5643 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5644 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5645 continue;
5646 }
5647
5648 func = method->func;
5649
5650 switch (inputCount) {
5651 case 5:
5652 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5653 ARG32(input[3]), ARG32(input[4]),
5654 output );
5655 break;
5656 case 4:
5657 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5658 ARG32(input[3]),
5659 output, (void *)outputCount );
5660 break;
5661 case 3:
5662 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5663 output, (void *)outputCount, NULL );
5664 break;
5665 case 2:
5666 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5667 output, (void *)outputCount, NULL, NULL );
5668 break;
5669 case 1:
5670 err = (object->*func)( ARG32(input[0]),
5671 output, (void *)outputCount, NULL, NULL, NULL );
5672 break;
5673 case 0:
5674 err = (object->*func)( output, (void *)outputCount, NULL, NULL, NULL, NULL );
5675 break;
5676
5677 default:
5678 IOLog("%s: Bad method table\n", object->getName());
5679 }
5680 }while (false);
5681
5682 return err;
5683 }
5684
5685
5686 kern_return_t
shim_io_async_method_scalarI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5687 shim_io_async_method_scalarI_structureO(
5688 IOExternalAsyncMethod * method,
5689 IOService * object,
5690 mach_port_t asyncWakePort,
5691 io_user_reference_t * asyncReference,
5692 uint32_t asyncReferenceCount,
5693 const io_user_scalar_t * input,
5694 mach_msg_type_number_t inputCount,
5695 io_struct_inband_t output,
5696 mach_msg_type_number_t * outputCount )
5697 {
5698 IOAsyncMethod func;
5699 uint32_t i;
5700 IOReturn err;
5701 io_async_ref_t reference;
5702
5703 for (i = 0; i < asyncReferenceCount; i++) {
5704 reference[i] = REF32(asyncReference[i]);
5705 }
5706
5707 err = kIOReturnBadArgument;
5708 do {
5709 if (inputCount != method->count0) {
5710 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5711 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5712 continue;
5713 }
5714 if ((kIOUCVariableStructureSize != method->count1)
5715 && (*outputCount != method->count1)) {
5716 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5717 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5718 continue;
5719 }
5720
5721 func = method->func;
5722
5723 switch (inputCount) {
5724 case 5:
5725 err = (object->*func)( reference,
5726 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5727 ARG32(input[3]), ARG32(input[4]),
5728 output );
5729 break;
5730 case 4:
5731 err = (object->*func)( reference,
5732 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5733 ARG32(input[3]),
5734 output, (void *)outputCount );
5735 break;
5736 case 3:
5737 err = (object->*func)( reference,
5738 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5739 output, (void *)outputCount, NULL );
5740 break;
5741 case 2:
5742 err = (object->*func)( reference,
5743 ARG32(input[0]), ARG32(input[1]),
5744 output, (void *)outputCount, NULL, NULL );
5745 break;
5746 case 1:
5747 err = (object->*func)( reference,
5748 ARG32(input[0]),
5749 output, (void *)outputCount, NULL, NULL, NULL );
5750 break;
5751 case 0:
5752 err = (object->*func)( reference,
5753 output, (void *)outputCount, NULL, NULL, NULL, NULL );
5754 break;
5755
5756 default:
5757 IOLog("%s: Bad method table\n", object->getName());
5758 }
5759 }while (false);
5760
5761 return err;
5762 }
5763
5764 /* Routine io_connect_method_scalarI_structureI */
5765 kern_return_t
is_io_connect_method_scalarI_structureI(io_connect_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5766 is_io_connect_method_scalarI_structureI(
5767 io_connect_t connect,
5768 uint32_t index,
5769 io_scalar_inband_t input,
5770 mach_msg_type_number_t inputCount,
5771 io_struct_inband_t inputStruct,
5772 mach_msg_type_number_t inputStructCount )
5773 {
5774 uint32_t i;
5775 io_scalar_inband64_t _input;
5776
5777 mach_msg_type_number_t scalar_outputCnt = 0;
5778 mach_msg_type_number_t inband_outputCnt = 0;
5779 mach_vm_size_t ool_output_size = 0;
5780
5781 for (i = 0; i < inputCount; i++) {
5782 _input[i] = SCALAR64(input[i]);
5783 }
5784
5785 return is_io_connect_method(connect, index,
5786 _input, inputCount,
5787 inputStruct, inputStructCount,
5788 0, 0,
5789 NULL, &inband_outputCnt,
5790 NULL, &scalar_outputCnt,
5791 0, &ool_output_size);
5792 }
5793
5794 kern_return_t
shim_io_connect_method_scalarI_structureI(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5795 shim_io_connect_method_scalarI_structureI(
5796 IOExternalMethod * method,
5797 IOService * object,
5798 const io_user_scalar_t * input,
5799 mach_msg_type_number_t inputCount,
5800 io_struct_inband_t inputStruct,
5801 mach_msg_type_number_t inputStructCount )
5802 {
5803 IOMethod func;
5804 IOReturn err = kIOReturnBadArgument;
5805
5806 do{
5807 if (inputCount != method->count0) {
5808 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5809 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5810 continue;
5811 }
5812 if ((kIOUCVariableStructureSize != method->count1)
5813 && (inputStructCount != method->count1)) {
5814 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5815 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5816 continue;
5817 }
5818
5819 func = method->func;
5820
5821 switch (inputCount) {
5822 case 5:
5823 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5824 ARG32(input[3]), ARG32(input[4]),
5825 inputStruct );
5826 break;
5827 case 4:
5828 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
5829 ARG32(input[3]),
5830 inputStruct, (void *)(uintptr_t)inputStructCount );
5831 break;
5832 case 3:
5833 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5834 inputStruct, (void *)(uintptr_t)inputStructCount,
5835 NULL );
5836 break;
5837 case 2:
5838 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5839 inputStruct, (void *)(uintptr_t)inputStructCount,
5840 NULL, NULL );
5841 break;
5842 case 1:
5843 err = (object->*func)( ARG32(input[0]),
5844 inputStruct, (void *)(uintptr_t)inputStructCount,
5845 NULL, NULL, NULL );
5846 break;
5847 case 0:
5848 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5849 NULL, NULL, NULL, NULL );
5850 break;
5851
5852 default:
5853 IOLog("%s: Bad method table\n", object->getName());
5854 }
5855 }while (false);
5856
5857 return err;
5858 }
5859
5860 kern_return_t
shim_io_async_method_scalarI_structureI(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5861 shim_io_async_method_scalarI_structureI(
5862 IOExternalAsyncMethod * method,
5863 IOService * object,
5864 mach_port_t asyncWakePort,
5865 io_user_reference_t * asyncReference,
5866 uint32_t asyncReferenceCount,
5867 const io_user_scalar_t * input,
5868 mach_msg_type_number_t inputCount,
5869 io_struct_inband_t inputStruct,
5870 mach_msg_type_number_t inputStructCount )
5871 {
5872 IOAsyncMethod func;
5873 uint32_t i;
5874 IOReturn err = kIOReturnBadArgument;
5875 io_async_ref_t reference;
5876
5877 for (i = 0; i < asyncReferenceCount; i++) {
5878 reference[i] = REF32(asyncReference[i]);
5879 }
5880
5881 do{
5882 if (inputCount != method->count0) {
5883 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5884 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5885 continue;
5886 }
5887 if ((kIOUCVariableStructureSize != method->count1)
5888 && (inputStructCount != method->count1)) {
5889 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5890 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5891 continue;
5892 }
5893
5894 func = method->func;
5895
5896 switch (inputCount) {
5897 case 5:
5898 err = (object->*func)( reference,
5899 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5900 ARG32(input[3]), ARG32(input[4]),
5901 inputStruct );
5902 break;
5903 case 4:
5904 err = (object->*func)( reference,
5905 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5906 ARG32(input[3]),
5907 inputStruct, (void *)(uintptr_t)inputStructCount );
5908 break;
5909 case 3:
5910 err = (object->*func)( reference,
5911 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5912 inputStruct, (void *)(uintptr_t)inputStructCount,
5913 NULL );
5914 break;
5915 case 2:
5916 err = (object->*func)( reference,
5917 ARG32(input[0]), ARG32(input[1]),
5918 inputStruct, (void *)(uintptr_t)inputStructCount,
5919 NULL, NULL );
5920 break;
5921 case 1:
5922 err = (object->*func)( reference,
5923 ARG32(input[0]),
5924 inputStruct, (void *)(uintptr_t)inputStructCount,
5925 NULL, NULL, NULL );
5926 break;
5927 case 0:
5928 err = (object->*func)( reference,
5929 inputStruct, (void *)(uintptr_t)inputStructCount,
5930 NULL, NULL, NULL, NULL );
5931 break;
5932
5933 default:
5934 IOLog("%s: Bad method table\n", object->getName());
5935 }
5936 }while (false);
5937
5938 return err;
5939 }
5940
5941 /* Routine io_connect_method_structureI_structureO */
5942 kern_return_t
is_io_connect_method_structureI_structureO(io_object_t connect,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5943 is_io_connect_method_structureI_structureO(
5944 io_object_t connect,
5945 uint32_t index,
5946 io_struct_inband_t input,
5947 mach_msg_type_number_t inputCount,
5948 io_struct_inband_t output,
5949 mach_msg_type_number_t * outputCount )
5950 {
5951 mach_msg_type_number_t scalar_outputCnt = 0;
5952 mach_vm_size_t ool_output_size = 0;
5953
5954 return is_io_connect_method(connect, index,
5955 NULL, 0,
5956 input, inputCount,
5957 0, 0,
5958 output, outputCount,
5959 NULL, &scalar_outputCnt,
5960 0, &ool_output_size);
5961 }
5962
5963 kern_return_t
shim_io_connect_method_structureI_structureO(IOExternalMethod * method,IOService * object,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5964 shim_io_connect_method_structureI_structureO(
5965 IOExternalMethod * method,
5966 IOService * object,
5967 io_struct_inband_t input,
5968 mach_msg_type_number_t inputCount,
5969 io_struct_inband_t output,
5970 IOByteCount * outputCount )
5971 {
5972 IOMethod func;
5973 IOReturn err = kIOReturnBadArgument;
5974
5975 do{
5976 if ((kIOUCVariableStructureSize != method->count0)
5977 && (inputCount != method->count0)) {
5978 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5979 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5980 continue;
5981 }
5982 if ((kIOUCVariableStructureSize != method->count1)
5983 && (*outputCount != method->count1)) {
5984 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5985 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5986 continue;
5987 }
5988
5989 func = method->func;
5990
5991 if (method->count1) {
5992 if (method->count0) {
5993 err = (object->*func)( input, output,
5994 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5995 } else {
5996 err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5997 }
5998 } else {
5999 err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
6000 }
6001 }while (false);
6002
6003
6004 return err;
6005 }
6006
6007 kern_return_t
shim_io_async_method_structureI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)6008 shim_io_async_method_structureI_structureO(
6009 IOExternalAsyncMethod * method,
6010 IOService * object,
6011 mach_port_t asyncWakePort,
6012 io_user_reference_t * asyncReference,
6013 uint32_t asyncReferenceCount,
6014 io_struct_inband_t input,
6015 mach_msg_type_number_t inputCount,
6016 io_struct_inband_t output,
6017 mach_msg_type_number_t * outputCount )
6018 {
6019 IOAsyncMethod func;
6020 uint32_t i;
6021 IOReturn err;
6022 io_async_ref_t reference;
6023
6024 for (i = 0; i < asyncReferenceCount; i++) {
6025 reference[i] = REF32(asyncReference[i]);
6026 }
6027
6028 err = kIOReturnBadArgument;
6029 do{
6030 if ((kIOUCVariableStructureSize != method->count0)
6031 && (inputCount != method->count0)) {
6032 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
6033 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
6034 continue;
6035 }
6036 if ((kIOUCVariableStructureSize != method->count1)
6037 && (*outputCount != method->count1)) {
6038 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
6039 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
6040 continue;
6041 }
6042
6043 func = method->func;
6044
6045 if (method->count1) {
6046 if (method->count0) {
6047 err = (object->*func)( reference,
6048 input, output,
6049 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
6050 } else {
6051 err = (object->*func)( reference,
6052 output, outputCount, NULL, NULL, NULL, NULL );
6053 }
6054 } else {
6055 err = (object->*func)( reference,
6056 input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
6057 }
6058 }while (false);
6059
6060 return err;
6061 }
6062
6063 /* Routine io_catalog_send_data */
6064 kern_return_t
is_io_catalog_send_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t inData,mach_msg_type_number_t inDataCount,kern_return_t * result)6065 is_io_catalog_send_data(
6066 mach_port_t main_port,
6067 uint32_t flag,
6068 io_buf_ptr_t inData,
6069 mach_msg_type_number_t inDataCount,
6070 kern_return_t * result)
6071 {
6072 // Allow sending catalog data if there is no kextd and the kernel is DEVELOPMENT || DEBUG
6073 #if NO_KEXTD && !(DEVELOPMENT || DEBUG)
6074 return kIOReturnNotPrivileged;
6075 #else /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6076 OSObject * obj = NULL;
6077 vm_offset_t data;
6078 kern_return_t kr = kIOReturnError;
6079
6080 //printf("io_catalog_send_data called. flag: %d\n", flag);
6081
6082 if (main_port != main_device_port) {
6083 return kIOReturnNotPrivileged;
6084 }
6085
6086 if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
6087 flag != kIOCatalogKextdActive &&
6088 flag != kIOCatalogKextdFinishedLaunching) &&
6089 (!inData || !inDataCount)) {
6090 return kIOReturnBadArgument;
6091 }
6092
6093 if (!IOCurrentTaskHasEntitlement(kIOCatalogManagementEntitlement)) {
6094 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
6095 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
6096 OSSafeReleaseNULL(taskName);
6097 // For now, fake success to not break applications relying on this function succeeding.
6098 // See <rdar://problem/32554970> for more details.
6099 return kIOReturnSuccess;
6100 }
6101
6102 if (inData) {
6103 vm_map_offset_t map_data;
6104
6105 if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
6106 return kIOReturnMessageTooLarge;
6107 }
6108
6109 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
6110 data = CAST_DOWN(vm_offset_t, map_data);
6111
6112 if (kr != KERN_SUCCESS) {
6113 return kr;
6114 }
6115
6116 // must return success after vm_map_copyout() succeeds
6117
6118 if (inDataCount) {
6119 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
6120 vm_deallocate( kernel_map, data, inDataCount );
6121 if (!obj) {
6122 *result = kIOReturnNoMemory;
6123 return KERN_SUCCESS;
6124 }
6125 }
6126 }
6127
6128 switch (flag) {
6129 case kIOCatalogResetDrivers:
6130 case kIOCatalogResetDriversNoMatch: {
6131 OSArray * array;
6132
6133 array = OSDynamicCast(OSArray, obj);
6134 if (array) {
6135 if (!gIOCatalogue->resetAndAddDrivers(array,
6136 flag == kIOCatalogResetDrivers)) {
6137 kr = kIOReturnError;
6138 }
6139 } else {
6140 kr = kIOReturnBadArgument;
6141 }
6142 }
6143 break;
6144
6145 case kIOCatalogAddDrivers:
6146 case kIOCatalogAddDriversNoMatch: {
6147 OSArray * array;
6148
6149 array = OSDynamicCast(OSArray, obj);
6150 if (array) {
6151 if (!gIOCatalogue->addDrivers( array,
6152 flag == kIOCatalogAddDrivers)) {
6153 kr = kIOReturnError;
6154 }
6155 } else {
6156 kr = kIOReturnBadArgument;
6157 }
6158 }
6159 break;
6160
6161 case kIOCatalogRemoveDrivers:
6162 case kIOCatalogRemoveDriversNoMatch: {
6163 OSDictionary * dict;
6164
6165 dict = OSDynamicCast(OSDictionary, obj);
6166 if (dict) {
6167 if (!gIOCatalogue->removeDrivers( dict,
6168 flag == kIOCatalogRemoveDrivers )) {
6169 kr = kIOReturnError;
6170 }
6171 } else {
6172 kr = kIOReturnBadArgument;
6173 }
6174 }
6175 break;
6176
6177 case kIOCatalogStartMatching__Removed:
6178 case kIOCatalogRemoveKernelLinker__Removed:
6179 case kIOCatalogKextdActive:
6180 case kIOCatalogKextdFinishedLaunching:
6181 kr = KERN_NOT_SUPPORTED;
6182 break;
6183
6184 default:
6185 kr = kIOReturnBadArgument;
6186 break;
6187 }
6188
6189 if (obj) {
6190 obj->release();
6191 }
6192
6193 *result = kr;
6194 return KERN_SUCCESS;
6195 #endif /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6196 }
6197
6198 /* Routine io_catalog_terminate */
6199 kern_return_t
is_io_catalog_terminate(mach_port_t main_port,uint32_t flag,io_name_t name)6200 is_io_catalog_terminate(
6201 mach_port_t main_port,
6202 uint32_t flag,
6203 io_name_t name )
6204 {
6205 kern_return_t kr;
6206
6207 if (main_port != main_device_port) {
6208 return kIOReturnNotPrivileged;
6209 }
6210
6211 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
6212 kIOClientPrivilegeAdministrator );
6213 if (kIOReturnSuccess != kr) {
6214 return kr;
6215 }
6216
6217 switch (flag) {
6218 #if !defined(SECURE_KERNEL)
6219 case kIOCatalogServiceTerminate:
6220 kr = gIOCatalogue->terminateDrivers(NULL, name, false);
6221 break;
6222
6223 case kIOCatalogModuleUnload:
6224 case kIOCatalogModuleTerminate:
6225 kr = gIOCatalogue->terminateDriversForModule(name,
6226 flag == kIOCatalogModuleUnload);
6227 break;
6228 #endif
6229
6230 default:
6231 kr = kIOReturnBadArgument;
6232 break;
6233 }
6234
6235 return kr;
6236 }
6237
6238 /* Routine io_catalog_get_data */
6239 kern_return_t
is_io_catalog_get_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t * outData,mach_msg_type_number_t * outDataCount)6240 is_io_catalog_get_data(
6241 mach_port_t main_port,
6242 uint32_t flag,
6243 io_buf_ptr_t *outData,
6244 mach_msg_type_number_t *outDataCount)
6245 {
6246 kern_return_t kr = kIOReturnSuccess;
6247 OSSerialize * s;
6248
6249 if (main_port != main_device_port) {
6250 return kIOReturnNotPrivileged;
6251 }
6252
6253 //printf("io_catalog_get_data called. flag: %d\n", flag);
6254
6255 s = OSSerialize::withCapacity(4096);
6256 if (!s) {
6257 return kIOReturnNoMemory;
6258 }
6259
6260 kr = gIOCatalogue->serializeData(flag, s);
6261
6262 if (kr == kIOReturnSuccess) {
6263 mach_vm_address_t data;
6264 vm_map_copy_t copy;
6265 unsigned int size;
6266
6267 size = s->getLength();
6268 kr = mach_vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
6269 if (kr == kIOReturnSuccess) {
6270 bcopy(s->text(), (void *)data, size);
6271 kr = vm_map_copyin(kernel_map, data, size, true, ©);
6272 *outData = (char *)copy;
6273 *outDataCount = size;
6274 }
6275 }
6276
6277 s->release();
6278
6279 return kr;
6280 }
6281
6282 /* Routine io_catalog_get_gen_count */
6283 kern_return_t
is_io_catalog_get_gen_count(mach_port_t main_port,uint32_t * genCount)6284 is_io_catalog_get_gen_count(
6285 mach_port_t main_port,
6286 uint32_t *genCount)
6287 {
6288 if (main_port != main_device_port) {
6289 return kIOReturnNotPrivileged;
6290 }
6291
6292 //printf("io_catalog_get_gen_count called.\n");
6293
6294 if (!genCount) {
6295 return kIOReturnBadArgument;
6296 }
6297
6298 *genCount = gIOCatalogue->getGenerationCount();
6299
6300 return kIOReturnSuccess;
6301 }
6302
6303 /* Routine io_catalog_module_loaded.
6304 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
6305 */
6306 kern_return_t
is_io_catalog_module_loaded(mach_port_t main_port,io_name_t name)6307 is_io_catalog_module_loaded(
6308 mach_port_t main_port,
6309 io_name_t name)
6310 {
6311 if (main_port != main_device_port) {
6312 return kIOReturnNotPrivileged;
6313 }
6314
6315 //printf("io_catalog_module_loaded called. name %s\n", name);
6316
6317 if (!name) {
6318 return kIOReturnBadArgument;
6319 }
6320
6321 gIOCatalogue->moduleHasLoaded(name);
6322
6323 return kIOReturnSuccess;
6324 }
6325
6326 kern_return_t
is_io_catalog_reset(mach_port_t main_port,uint32_t flag)6327 is_io_catalog_reset(
6328 mach_port_t main_port,
6329 uint32_t flag)
6330 {
6331 if (main_port != main_device_port) {
6332 return kIOReturnNotPrivileged;
6333 }
6334
6335 switch (flag) {
6336 case kIOCatalogResetDefault:
6337 gIOCatalogue->reset();
6338 break;
6339
6340 default:
6341 return kIOReturnBadArgument;
6342 }
6343
6344 return kIOReturnSuccess;
6345 }
6346
6347 kern_return_t
iokit_user_client_trap(struct iokit_user_client_trap_args * args)6348 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
6349 {
6350 kern_return_t result = kIOReturnBadArgument;
6351 IOUserClient * userClient;
6352 OSObject * object;
6353 uintptr_t ref;
6354 mach_port_name_t portName;
6355
6356 ref = (uintptr_t) args->userClientRef;
6357
6358 if ((ref == MACH_PORT_DEAD) || (ref == (uintptr_t) MACH_PORT_NULL)) {
6359 return kIOReturnBadArgument;
6360 }
6361 // kobject port names always have b0-1 set, so we use these bits as flags to
6362 // iokit_user_client_trap()
6363 // keep this up to date with ipc_entry_name_mask();
6364 portName = (mach_port_name_t) (ref | 3);
6365 if (((1ULL << 32) & ref) || !(1 & ref)) {
6366 object = iokit_lookup_uext_ref_current_task(portName);
6367 if (object) {
6368 result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6369 }
6370 OSSafeReleaseNULL(object);
6371 } else {
6372 io_object_t ref_current_task = iokit_lookup_connect_ref_current_task((mach_port_name_t) ref);
6373 if ((userClient = OSDynamicCast(IOUserClient, ref_current_task))) {
6374 IOExternalTrap *trap = NULL;
6375 IOService *target = NULL;
6376
6377 result = kIOReturnSuccess;
6378 io_filter_policy_t filterPolicy = userClient->filterForTask(current_task(), 0);
6379 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
6380 result = gIOUCFilterCallbacks->io_filter_applier(userClient, filterPolicy, io_filter_type_trap, args->index);
6381 }
6382 if (kIOReturnSuccess == result) {
6383 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
6384 }
6385 if (trap && target) {
6386 IOTrap func;
6387
6388 func = trap->func;
6389
6390 if (func) {
6391 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6392 }
6393 }
6394
6395 iokit_remove_connect_reference(userClient);
6396 } else {
6397 OSSafeReleaseNULL(ref_current_task);
6398 }
6399 }
6400
6401 return result;
6402 }
6403
6404 /* Routine io_device_tree_entry_exists_with_name */
6405 kern_return_t
is_io_device_tree_entry_exists_with_name(mach_port_t main_port,io_name_t name,boolean_t * exists)6406 is_io_device_tree_entry_exists_with_name(
6407 mach_port_t main_port,
6408 io_name_t name,
6409 boolean_t *exists )
6410 {
6411 OSCollectionIterator *iter;
6412
6413 if (main_port != main_device_port) {
6414 return kIOReturnNotPrivileged;
6415 }
6416
6417 iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
6418 *exists = iter && iter->getNextObject();
6419 OSSafeReleaseNULL(iter);
6420
6421 return kIOReturnSuccess;
6422 }
6423 } /* extern "C" */
6424
6425 IOReturn
callExternalMethod(uint32_t selector,IOExternalMethodArguments * args)6426 IOUserClient::callExternalMethod(uint32_t selector, IOExternalMethodArguments * args)
6427 {
6428 IOReturn ret;
6429
6430 ipcEnter(defaultLocking ? (defaultLockingSingleThreadExternalMethod ? kIPCLockWrite : kIPCLockRead) : kIPCLockNone);
6431 if (uc2022) {
6432 ret = ((IOUserClient2022 *) this)->externalMethod(selector, (IOExternalMethodArgumentsOpaque *) args);
6433 } else {
6434 ret = externalMethod(selector, args);
6435 }
6436 ipcExit(defaultLocking ? (defaultLockingSingleThreadExternalMethod ? kIPCLockWrite : kIPCLockRead) : kIPCLockNone);
6437
6438 return ret;
6439 }
6440
6441 MIG_SERVER_ROUTINE IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * arguments,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6442 IOUserClient2022::externalMethod(uint32_t selector, IOExternalMethodArguments * arguments,
6443 IOExternalMethodDispatch *dispatch,
6444 OSObject *target, void *reference)
6445 {
6446 panic("wrong externalMethod for IOUserClient2022");
6447 }
6448
6449 IOReturn
dispatchExternalMethod(uint32_t selector,IOExternalMethodArgumentsOpaque * arguments,const IOExternalMethodDispatch2022 dispatchArray[],size_t dispatchArrayCount,OSObject * target,void * reference)6450 IOUserClient2022::dispatchExternalMethod(uint32_t selector, IOExternalMethodArgumentsOpaque *arguments,
6451 const IOExternalMethodDispatch2022 dispatchArray[], size_t dispatchArrayCount,
6452 OSObject * target, void * reference)
6453 {
6454 IOReturn err;
6455 IOExternalMethodArguments * args = (typeof(args))arguments;
6456 const IOExternalMethodDispatch2022 * dispatch;
6457
6458 if (!dispatchArray) {
6459 return kIOReturnError;
6460 }
6461 if (selector >= dispatchArrayCount) {
6462 return kIOReturnBadArgument;
6463 }
6464 dispatch = &dispatchArray[selector];
6465
6466 uint32_t count;
6467 count = dispatch->checkScalarInputCount;
6468 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6469 return kIOReturnBadArgument;
6470 }
6471
6472 count = dispatch->checkStructureInputSize;
6473 if ((kIOUCVariableStructureSize != count)
6474 && (count != ((args->structureInputDescriptor)
6475 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6476 return kIOReturnBadArgument;
6477 }
6478
6479 count = dispatch->checkScalarOutputCount;
6480 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6481 return kIOReturnBadArgument;
6482 }
6483
6484 count = dispatch->checkStructureOutputSize;
6485 if ((kIOUCVariableStructureSize != count)
6486 && (count != ((args->structureOutputDescriptor)
6487 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6488 return kIOReturnBadArgument;
6489 }
6490
6491 if (args->asyncWakePort && !dispatch->allowAsync) {
6492 return kIOReturnBadArgument;
6493 }
6494
6495 if (dispatch->checkEntitlement) {
6496 if (!IOCurrentTaskHasEntitlement(dispatch->checkEntitlement)) {
6497 return kIOReturnNotPrivileged;
6498 }
6499 }
6500
6501 if (dispatch->function) {
6502 err = (*dispatch->function)(target, reference, args);
6503 } else {
6504 err = kIOReturnNoCompletion; /* implementer can dispatch */
6505 }
6506 return err;
6507 }
6508
6509 IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * args,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6510 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
6511 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
6512 {
6513 IOReturn err;
6514 IOService * object;
6515 IOByteCount structureOutputSize;
6516
6517 if (dispatch) {
6518 uint32_t count;
6519 count = dispatch->checkScalarInputCount;
6520 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6521 return kIOReturnBadArgument;
6522 }
6523
6524 count = dispatch->checkStructureInputSize;
6525 if ((kIOUCVariableStructureSize != count)
6526 && (count != ((args->structureInputDescriptor)
6527 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6528 return kIOReturnBadArgument;
6529 }
6530
6531 count = dispatch->checkScalarOutputCount;
6532 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6533 return kIOReturnBadArgument;
6534 }
6535
6536 count = dispatch->checkStructureOutputSize;
6537 if ((kIOUCVariableStructureSize != count)
6538 && (count != ((args->structureOutputDescriptor)
6539 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6540 return kIOReturnBadArgument;
6541 }
6542
6543 if (dispatch->function) {
6544 err = (*dispatch->function)(target, reference, args);
6545 } else {
6546 err = kIOReturnNoCompletion; /* implementer can dispatch */
6547 }
6548 return err;
6549 }
6550
6551
6552 // pre-Leopard API's don't do ool structs
6553 if (args->structureInputDescriptor || args->structureOutputDescriptor) {
6554 err = kIOReturnIPCError;
6555 return err;
6556 }
6557
6558 structureOutputSize = args->structureOutputSize;
6559
6560 if (args->asyncWakePort) {
6561 IOExternalAsyncMethod * method;
6562 object = NULL;
6563 if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
6564 return kIOReturnUnsupported;
6565 }
6566
6567 if (kIOUCForegroundOnly & method->flags) {
6568 if (task_is_gpu_denied(current_task())) {
6569 return kIOReturnNotPermitted;
6570 }
6571 }
6572
6573 switch (method->flags & kIOUCTypeMask) {
6574 case kIOUCScalarIStructI:
6575 err = shim_io_async_method_scalarI_structureI( method, object,
6576 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6577 args->scalarInput, args->scalarInputCount,
6578 (char *)args->structureInput, args->structureInputSize );
6579 break;
6580
6581 case kIOUCScalarIScalarO:
6582 err = shim_io_async_method_scalarI_scalarO( method, object,
6583 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6584 args->scalarInput, args->scalarInputCount,
6585 args->scalarOutput, &args->scalarOutputCount );
6586 break;
6587
6588 case kIOUCScalarIStructO:
6589 err = shim_io_async_method_scalarI_structureO( method, object,
6590 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6591 args->scalarInput, args->scalarInputCount,
6592 (char *) args->structureOutput, &args->structureOutputSize );
6593 break;
6594
6595
6596 case kIOUCStructIStructO:
6597 err = shim_io_async_method_structureI_structureO( method, object,
6598 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6599 (char *)args->structureInput, args->structureInputSize,
6600 (char *) args->structureOutput, &args->structureOutputSize );
6601 break;
6602
6603 default:
6604 err = kIOReturnBadArgument;
6605 break;
6606 }
6607 } else {
6608 IOExternalMethod * method;
6609 object = NULL;
6610 if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
6611 return kIOReturnUnsupported;
6612 }
6613
6614 if (kIOUCForegroundOnly & method->flags) {
6615 if (task_is_gpu_denied(current_task())) {
6616 return kIOReturnNotPermitted;
6617 }
6618 }
6619
6620 switch (method->flags & kIOUCTypeMask) {
6621 case kIOUCScalarIStructI:
6622 err = shim_io_connect_method_scalarI_structureI( method, object,
6623 args->scalarInput, args->scalarInputCount,
6624 (char *) args->structureInput, args->structureInputSize );
6625 break;
6626
6627 case kIOUCScalarIScalarO:
6628 err = shim_io_connect_method_scalarI_scalarO( method, object,
6629 args->scalarInput, args->scalarInputCount,
6630 args->scalarOutput, &args->scalarOutputCount );
6631 break;
6632
6633 case kIOUCScalarIStructO:
6634 err = shim_io_connect_method_scalarI_structureO( method, object,
6635 args->scalarInput, args->scalarInputCount,
6636 (char *) args->structureOutput, &structureOutputSize );
6637 break;
6638
6639
6640 case kIOUCStructIStructO:
6641 err = shim_io_connect_method_structureI_structureO( method, object,
6642 (char *) args->structureInput, args->structureInputSize,
6643 (char *) args->structureOutput, &structureOutputSize );
6644 break;
6645
6646 default:
6647 err = kIOReturnBadArgument;
6648 break;
6649 }
6650 }
6651
6652 if (structureOutputSize > UINT_MAX) {
6653 structureOutputSize = 0;
6654 err = kIOReturnBadArgument;
6655 }
6656
6657 args->structureOutputSize = ((typeof(args->structureOutputSize))structureOutputSize);
6658
6659 return err;
6660 }
6661
6662 IOReturn
registerFilterCallbacks(const struct io_filter_callbacks * callbacks,size_t size)6663 IOUserClient::registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size)
6664 {
6665 if (size < sizeof(*callbacks)) {
6666 return kIOReturnBadArgument;
6667 }
6668 if (!OSCompareAndSwapPtr(NULL, __DECONST(void *, callbacks), &gIOUCFilterCallbacks)) {
6669 return kIOReturnBusy;
6670 }
6671 return kIOReturnSuccess;
6672 }
6673
6674
6675 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6676 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6677 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6678 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6679 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6680 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6681 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6682 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6683 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6684 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6685 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6686 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6687 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6688 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6689 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6690 OSMetaClassDefineReservedUnused(IOUserClient, 15);
6691
6692 OSMetaClassDefineReservedUnused(IOUserClient2022, 0);
6693 OSMetaClassDefineReservedUnused(IOUserClient2022, 1);
6694 OSMetaClassDefineReservedUnused(IOUserClient2022, 2);
6695 OSMetaClassDefineReservedUnused(IOUserClient2022, 3);
6696