1 /*
2 * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <libkern/c++/OSKext.h>
30 #include <libkern/c++/OSSharedPtr.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51 #include <sys/code_signing.h>
52
53 #include <mach/sdt.h>
54 #include <os/hash.h>
55
56 #include <libkern/amfi/amfi.h>
57
58 #if CONFIG_MACF
59
60 extern "C" {
61 #include <security/mac_framework.h>
62 };
63 #include <sys/kauth.h>
64
65 #define IOMACF_LOG 0
66
67 #endif /* CONFIG_MACF */
68
69 #include <IOKit/assert.h>
70
71 #include "IOServicePrivate.h"
72 #include "IOKitKernelInternal.h"
73
74 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
75 #define SCALAR32(x) ((uint32_t )x)
76 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
77 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
78 #define REF32(x) ((int)(x))
79
80 enum{
81 kIOUCAsync0Flags = 3ULL,
82 kIOUCAsync64Flag = 1ULL,
83 kIOUCAsyncErrorLoggedFlag = 2ULL
84 };
85
86 #if IOKITSTATS
87
88 #define IOStatisticsRegisterCounter() \
89 do { \
90 reserved->counter = IOStatistics::registerUserClient(this); \
91 } while (0)
92
93 #define IOStatisticsUnregisterCounter() \
94 do { \
95 if (reserved) \
96 IOStatistics::unregisterUserClient(reserved->counter); \
97 } while (0)
98
99 #define IOStatisticsClientCall() \
100 do { \
101 IOStatistics::countUserClientCall(client); \
102 } while (0)
103
104 #else
105
106 #define IOStatisticsRegisterCounter()
107 #define IOStatisticsUnregisterCounter()
108 #define IOStatisticsClientCall()
109
110 #endif /* IOKITSTATS */
111
112 #if DEVELOPMENT || DEBUG
113
114 #define FAKE_STACK_FRAME(a) \
115 const void ** __frameptr; \
116 const void * __retaddr; \
117 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
118 __retaddr = __frameptr[1]; \
119 __frameptr[1] = (a);
120
121 #define FAKE_STACK_FRAME_END() \
122 __frameptr[1] = __retaddr;
123
124 #else /* DEVELOPMENT || DEBUG */
125
126 #define FAKE_STACK_FRAME(a)
127 #define FAKE_STACK_FRAME_END()
128
129 #endif /* DEVELOPMENT || DEBUG */
130
131 #define ASYNC_REF_COUNT (sizeof(io_async_ref_t) / sizeof(natural_t))
132 #define ASYNC_REF64_COUNT (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
133
134 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
135
136 extern "C" {
137 #include <mach/mach_traps.h>
138 #include <vm/vm_map.h>
139 } /* extern "C" */
140
141 struct IOMachPortHashList;
142
143 static_assert(IKOT_MAX_TYPE <= 255);
144
145 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
146
147 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
148 class IOMachPort : public OSObject
149 {
150 OSDeclareDefaultStructors(IOMachPort);
151 public:
152 SLIST_ENTRY(IOMachPort) link;
153 ipc_port_t port;
154 OSObject* object;
155 UInt32 mscount;
156 UInt8 holdDestroy;
157 UInt8 type;
158
159 static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
160
161 static IOMachPortHashList* bucketForObject(OSObject *obj,
162 ipc_kobject_type_t type);
163
164 static LIBKERN_RETURNS_NOT_RETAINED IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
165
166 static bool noMoreSendersForObject( OSObject * obj,
167 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
168 static void releasePortForObject( OSObject * obj,
169 ipc_kobject_type_t type );
170 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
171
172 static mach_port_name_t makeSendRightForTask( task_t task,
173 io_object_t obj, ipc_kobject_type_t type );
174
175 virtual void free() APPLE_KEXT_OVERRIDE;
176 };
177
178 #define super OSObject
179 OSDefineMetaClassAndStructorsWithZone(IOMachPort, OSObject, ZC_ZFREE_CLEARMEM)
180
181 static IOLock * gIOObjectPortLock;
182 IOLock * gIOUserServerLock;
183
184 SECURITY_READ_ONLY_LATE(const struct io_filter_callbacks *) gIOUCFilterCallbacks;
185
186 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
187
188 SLIST_HEAD(IOMachPortHashList, IOMachPort);
189
190 #if defined(XNU_TARGET_OS_OSX)
191 #define PORT_HASH_SIZE 4096
192 #else /* defined(!XNU_TARGET_OS_OSX) */
193 #define PORT_HASH_SIZE 256
194 #endif /* !defined(!XNU_TARGET_OS_OSX) */
195
196 IOMachPortHashList gIOMachPortHash[PORT_HASH_SIZE];
197
198 void
IOMachPortInitialize(void)199 IOMachPortInitialize(void)
200 {
201 for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
202 SLIST_INIT(&gIOMachPortHash[i]);
203 }
204 }
205
206 IOMachPortHashList*
bucketForObject(OSObject * obj,ipc_kobject_type_t type)207 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
208 {
209 return &gIOMachPortHash[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
210 }
211
212 IOMachPort*
portForObjectInBucket(IOMachPortHashList * bucket,OSObject * obj,ipc_kobject_type_t type)213 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
214 {
215 IOMachPort *machPort;
216
217 SLIST_FOREACH(machPort, bucket, link) {
218 if (machPort->object == obj && machPort->type == type) {
219 return machPort;
220 }
221 }
222 return NULL;
223 }
224
225 IOMachPort*
withObjectAndType(OSObject * obj,ipc_kobject_type_t type)226 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
227 {
228 IOMachPort *machPort = NULL;
229
230 machPort = new IOMachPort;
231 if (__improbable(machPort && !machPort->init())) {
232 OSSafeReleaseNULL(machPort);
233 return NULL;
234 }
235
236 machPort->object = obj;
237 machPort->type = (typeof(machPort->type))type;
238 machPort->port = iokit_alloc_object_port(obj, type);
239
240 obj->taggedRetain(OSTypeID(OSCollection));
241 machPort->mscount++;
242
243 return machPort;
244 }
245
246 bool
noMoreSendersForObject(OSObject * obj,ipc_kobject_type_t type,mach_port_mscount_t * mscount)247 IOMachPort::noMoreSendersForObject( OSObject * obj,
248 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
249 {
250 IOMachPort *machPort = NULL;
251 IOUserClient *uc;
252 OSAction *action;
253 bool destroyed = true;
254
255 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
256
257 obj->retain();
258
259 lck_mtx_lock(gIOObjectPortLock);
260
261 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
262
263 if (machPort) {
264 destroyed = (machPort->mscount <= *mscount);
265 if (!destroyed) {
266 *mscount = machPort->mscount;
267 lck_mtx_unlock(gIOObjectPortLock);
268 } else {
269 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
270 uc->noMoreSenders();
271 }
272 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
273
274 lck_mtx_unlock(gIOObjectPortLock);
275
276 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
277
278 obj->taggedRelease(OSTypeID(OSCollection));
279 }
280 } else {
281 lck_mtx_unlock(gIOObjectPortLock);
282 }
283
284 if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
285 action->Aborted();
286 }
287
288 if (IKOT_UEXT_OBJECT == type && IOUserServer::shouldLeakObjects()) {
289 // Leak object
290 obj->retain();
291 }
292
293 obj->release();
294
295 return destroyed;
296 }
297
298 void
releasePortForObject(OSObject * obj,ipc_kobject_type_t type)299 IOMachPort::releasePortForObject( OSObject * obj,
300 ipc_kobject_type_t type )
301 {
302 IOMachPort *machPort;
303 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
304
305 assert(IKOT_IOKIT_CONNECT != type);
306
307 lck_mtx_lock(gIOObjectPortLock);
308
309 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
310
311 if (machPort && !machPort->holdDestroy) {
312 obj->retain();
313 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
314
315 lck_mtx_unlock(gIOObjectPortLock);
316
317 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
318
319 obj->taggedRelease(OSTypeID(OSCollection));
320 obj->release();
321 } else {
322 lck_mtx_unlock(gIOObjectPortLock);
323 }
324 }
325
326 void
setHoldDestroy(OSObject * obj,ipc_kobject_type_t type)327 IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
328 {
329 IOMachPort * machPort;
330
331 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
332 lck_mtx_lock(gIOObjectPortLock);
333
334 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
335
336 if (machPort) {
337 machPort->holdDestroy = true;
338 }
339
340 lck_mtx_unlock(gIOObjectPortLock);
341 }
342
343 void
IOMachPortDestroyUserReferences(OSObject * obj,natural_t type)344 IOMachPortDestroyUserReferences(OSObject * obj, natural_t type)
345 {
346 IOMachPort::releasePortForObject(obj, type);
347 }
348
349 void
destroyUserReferences(OSObject * obj)350 IOUserClient::destroyUserReferences( OSObject * obj )
351 {
352 IOMachPort *machPort;
353
354 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
355
356 // panther, 3160200
357 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
358
359 obj->retain();
360 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
361 IOMachPortHashList *mappingBucket = NULL;
362
363 lck_mtx_lock(gIOObjectPortLock);
364
365 IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
366 if (uc && uc->mappings) {
367 mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
368 }
369
370 machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
371
372 if (machPort == NULL) {
373 lck_mtx_unlock(gIOObjectPortLock);
374 goto end;
375 }
376
377 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
378 obj->taggedRelease(OSTypeID(OSCollection));
379
380 if (uc) {
381 uc->noMoreSenders();
382 if (uc->mappings) {
383 uc->mappings->taggedRetain(OSTypeID(OSCollection));
384 machPort->object = uc->mappings;
385 SLIST_INSERT_HEAD(mappingBucket, machPort, link);
386 iokit_switch_object_port(machPort->port, uc->mappings, IKOT_IOKIT_CONNECT);
387
388 lck_mtx_unlock(gIOObjectPortLock);
389
390 OSSafeReleaseNULL(uc->mappings);
391 } else {
392 lck_mtx_unlock(gIOObjectPortLock);
393 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
394 }
395 } else {
396 lck_mtx_unlock(gIOObjectPortLock);
397 OS_ANALYZER_SUPPRESS("77508635") OSSafeReleaseNULL(machPort);
398 }
399
400
401 end:
402 OSSafeReleaseNULL(obj);
403 }
404
405 mach_port_name_t
makeSendRightForTask(task_t task,io_object_t obj,ipc_kobject_type_t type)406 IOMachPort::makeSendRightForTask( task_t task,
407 io_object_t obj, ipc_kobject_type_t type )
408 {
409 return iokit_make_send_right( task, obj, type );
410 }
411
412 void
free(void)413 IOMachPort::free( void )
414 {
415 if (port) {
416 iokit_destroy_object_port( port, type );
417 }
418 super::free();
419 }
420
421 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
422
423 static bool
IOTaskRegistryCompatibility(task_t task)424 IOTaskRegistryCompatibility(task_t task)
425 {
426 return false;
427 }
428
429 static void
IOTaskRegistryCompatibilityMatching(task_t task,OSDictionary * matching)430 IOTaskRegistryCompatibilityMatching(task_t task, OSDictionary * matching)
431 {
432 matching->setObject(gIOServiceNotificationUserKey, kOSBooleanTrue);
433 if (!IOTaskRegistryCompatibility(task)) {
434 return;
435 }
436 matching->setObject(gIOCompatibilityMatchKey, kOSBooleanTrue);
437 }
438
439 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
440
OSDefineMetaClassAndStructors(IOUserIterator,OSIterator)441 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
442
443 IOUserIterator *
444 IOUserIterator::withIterator(OSIterator * iter)
445 {
446 IOUserIterator * me;
447
448 if (!iter) {
449 return NULL;
450 }
451
452 me = new IOUserIterator;
453 if (me && !me->init()) {
454 me->release();
455 me = NULL;
456 }
457 if (!me) {
458 iter->release();
459 return me;
460 }
461 me->userIteratorObject = iter;
462
463 return me;
464 }
465
466 bool
init(void)467 IOUserIterator::init( void )
468 {
469 if (!OSObject::init()) {
470 return false;
471 }
472
473 IOLockInlineInit(&lock);
474 return true;
475 }
476
477 void
free()478 IOUserIterator::free()
479 {
480 if (userIteratorObject) {
481 userIteratorObject->release();
482 }
483 IOLockInlineDestroy(&lock);
484 OSObject::free();
485 }
486
487 void
reset()488 IOUserIterator::reset()
489 {
490 IOLockLock(&lock);
491 assert(OSDynamicCast(OSIterator, userIteratorObject));
492 ((OSIterator *)userIteratorObject)->reset();
493 IOLockUnlock(&lock);
494 }
495
496 bool
isValid()497 IOUserIterator::isValid()
498 {
499 bool ret;
500
501 IOLockLock(&lock);
502 assert(OSDynamicCast(OSIterator, userIteratorObject));
503 ret = ((OSIterator *)userIteratorObject)->isValid();
504 IOLockUnlock(&lock);
505
506 return ret;
507 }
508
509 OSObject *
getNextObject()510 IOUserIterator::getNextObject()
511 {
512 assert(false);
513 return NULL;
514 }
515
516 OSObject *
copyNextObject()517 IOUserIterator::copyNextObject()
518 {
519 OSObject * ret = NULL;
520
521 IOLockLock(&lock);
522 if (userIteratorObject) {
523 ret = ((OSIterator *)userIteratorObject)->getNextObject();
524 if (ret) {
525 ret->retain();
526 }
527 }
528 IOLockUnlock(&lock);
529
530 return ret;
531 }
532
533 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
534 extern "C" {
535 // functions called from osfmk/device/iokit_rpc.c
536
537 void
iokit_port_object_description(io_object_t obj,kobject_description_t desc)538 iokit_port_object_description(io_object_t obj, kobject_description_t desc)
539 {
540 IORegistryEntry * regEntry;
541 IOUserNotification * __unused noti;
542 _IOServiceNotifier * __unused serviceNoti;
543 OSSerialize * __unused s;
544 OSDictionary * __unused matching = NULL;
545
546 if ((regEntry = OSDynamicCast(IORegistryEntry, obj))) {
547 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(0x%qx)", obj->getMetaClass()->getClassName(), regEntry->getRegistryEntryID());
548 #if DEVELOPMENT || DEBUG
549 } else if ((noti = OSDynamicCast(IOUserNotification, obj))) {
550 // serviceNoti->matching may become NULL if the port gets a no-senders notification, so we have to lock gIOObjectPortLock
551 IOLockLock(gIOObjectPortLock);
552 serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->userIteratorObject);
553 if (serviceNoti && (matching = serviceNoti->matching)) {
554 matching->retain();
555 }
556 IOLockUnlock(gIOObjectPortLock);
557
558 if (matching) {
559 s = OSSerialize::withCapacity((unsigned int) page_size);
560 if (s && matching->serialize(s)) {
561 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text());
562 }
563 OSSafeReleaseNULL(s);
564 OSSafeReleaseNULL(matching);
565 }
566 #endif /* DEVELOPMENT || DEBUG */
567 } else {
568 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s", obj->getMetaClass()->getClassName());
569 }
570 }
571
572 // FIXME: Implementation of these functions are hidden from the static analyzer.
573 // As for now, the analyzer doesn't consistently support wrapper functions
574 // for retain and release.
575 #ifndef __clang_analyzer__
576 void
iokit_add_reference(io_object_t obj,natural_t type)577 iokit_add_reference( io_object_t obj, natural_t type )
578 {
579 IOUserClient * uc;
580
581 if (!obj) {
582 return;
583 }
584
585 if ((IKOT_IOKIT_CONNECT == type)
586 && (uc = OSDynamicCast(IOUserClient, obj))) {
587 OSIncrementAtomic(&uc->__ipc);
588 }
589
590 obj->retain();
591 }
592
593 void
iokit_remove_reference(io_object_t obj)594 iokit_remove_reference( io_object_t obj )
595 {
596 if (obj) {
597 obj->release();
598 }
599 }
600 #endif // __clang_analyzer__
601
602 void
iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj)603 iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj )
604 {
605 IOUserClient * uc;
606 bool finalize = false;
607
608 if (!obj) {
609 return;
610 }
611
612 if ((uc = OSDynamicCast(IOUserClient, obj))) {
613 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive()) {
614 IOLockLock(gIOObjectPortLock);
615 if ((finalize = uc->__ipcFinal)) {
616 uc->__ipcFinal = false;
617 }
618 IOLockUnlock(gIOObjectPortLock);
619 }
620 if (finalize) {
621 uc->scheduleFinalize(true);
622 }
623 }
624
625 obj->release();
626 }
627
628 bool
finalizeUserReferences(OSObject * obj)629 IOUserClient::finalizeUserReferences(OSObject * obj)
630 {
631 IOUserClient * uc;
632 bool ok = true;
633
634 if ((uc = OSDynamicCast(IOUserClient, obj))) {
635 IOLockLock(gIOObjectPortLock);
636 if ((uc->__ipcFinal = (0 != uc->__ipc))) {
637 ok = false;
638 }
639 IOLockUnlock(gIOObjectPortLock);
640 }
641 return ok;
642 }
643
644 ipc_port_t
iokit_port_for_object(io_object_t obj,ipc_kobject_type_t type)645 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
646 {
647 IOMachPort *machPort = NULL;
648 ipc_port_t port = NULL;
649
650 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
651
652 lck_mtx_lock(gIOObjectPortLock);
653
654 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
655
656 if (__improbable(machPort == NULL)) {
657 machPort = IOMachPort::withObjectAndType(obj, type);
658 if (__improbable(machPort == NULL)) {
659 goto end;
660 }
661 SLIST_INSERT_HEAD(bucket, machPort, link);
662 } else {
663 machPort->mscount++;
664 }
665
666 iokit_retain_port(machPort->port);
667 port = machPort->port;
668
669 end:
670 lck_mtx_unlock(gIOObjectPortLock);
671
672 return port;
673 }
674
675 kern_return_t
iokit_client_died(io_object_t obj,ipc_port_t,ipc_kobject_type_t type,mach_port_mscount_t * mscount)676 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
677 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
678 {
679 IOUserClient * client;
680 IOMemoryMap * map;
681 IOUserNotification * notify;
682 IOUserServerCheckInToken * token;
683
684 if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
685 return kIOReturnNotReady;
686 }
687
688 switch (type) {
689 case IKOT_IOKIT_CONNECT:
690 if ((client = OSDynamicCast( IOUserClient, obj ))) {
691 IOStatisticsClientCall();
692 IORWLockWrite(&client->lock);
693 client->clientDied();
694 IORWLockUnlock(&client->lock);
695 }
696 break;
697 case IKOT_IOKIT_OBJECT:
698 if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
699 map->taskDied();
700 } else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
701 notify->setNotification( NULL );
702 }
703 break;
704 case IKOT_IOKIT_IDENT:
705 if ((token = OSDynamicCast( IOUserServerCheckInToken, obj ))) {
706 token->cancel();
707 }
708 break;
709 }
710
711 return kIOReturnSuccess;
712 }
713 }; /* extern "C" */
714
715 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
716
717 class IOServiceUserNotification : public IOUserNotification
718 {
719 OSDeclareDefaultStructors(IOServiceUserNotification);
720
721 struct PingMsgKdata {
722 mach_msg_header_t msgHdr;
723 };
724 struct PingMsgUdata {
725 OSNotificationHeader64 notifyHeader;
726 };
727
728 enum { kMaxOutstanding = 1024 };
729
730 ipc_port_t remotePort;
731 void *msgReference;
732 mach_msg_size_t msgReferenceSize;
733 natural_t msgType;
734 OSArray * newSet;
735 bool armed;
736 bool ipcLogged;
737
738 public:
739
740 virtual bool init( mach_port_t port, natural_t type,
741 void * reference, vm_size_t referenceSize,
742 bool clientIs64 );
743 virtual void free() APPLE_KEXT_OVERRIDE;
744 void invalidatePort(void);
745
746 static bool _handler( void * target,
747 void * ref, IOService * newService, IONotifier * notifier );
748 virtual bool handler( void * ref, IOService * newService );
749
750 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
751 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
752 };
753
754 class IOServiceMessageUserNotification : public IOUserNotification
755 {
756 OSDeclareDefaultStructors(IOServiceMessageUserNotification);
757
758 struct PingMsgKdata {
759 mach_msg_header_t msgHdr;
760 mach_msg_body_t msgBody;
761 mach_msg_port_descriptor_t ports[1];
762 };
763 struct PingMsgUdata {
764 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
765 };
766
767 ipc_port_t remotePort;
768 void *msgReference;
769 mach_msg_size_t msgReferenceSize;
770 mach_msg_size_t msgExtraSize;
771 natural_t msgType;
772 uint8_t clientIs64;
773 int owningPID;
774 bool ipcLogged;
775
776 public:
777
778 virtual bool init( mach_port_t port, natural_t type,
779 void * reference, vm_size_t referenceSize,
780 bool clientIs64 );
781
782 virtual void free() APPLE_KEXT_OVERRIDE;
783 void invalidatePort(void);
784
785 static IOReturn _handler( void * target, void * ref,
786 UInt32 messageType, IOService * provider,
787 void * messageArgument, vm_size_t argSize );
788 virtual IOReturn handler( void * ref,
789 UInt32 messageType, IOService * provider,
790 void * messageArgument, vm_size_t argSize );
791
792 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
793 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
794 };
795
796 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
797
798 #undef super
799 #define super IOUserIterator
800 OSDefineMetaClass( IOUserNotification, IOUserIterator );
801 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
802
803 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
804
805 void
free(void)806 IOUserNotification::free( void )
807 {
808 #if DEVELOPMENT || DEBUG
809 IOLockLock( gIOObjectPortLock);
810
811 assert(userIteratorObject == NULL);
812
813 IOLockUnlock( gIOObjectPortLock);
814 #endif /* DEVELOPMENT || DEBUG */
815
816 super::free();
817 }
818
819
820 void
setNotification(IONotifier * notify)821 IOUserNotification::setNotification( IONotifier * notify )
822 {
823 OSObject * previousNotify;
824
825 /*
826 * We must retain this object here before proceeding.
827 * Two threads may race in setNotification(). If one thread sets a new notifier while the
828 * other thread sets the notifier to NULL, it is possible for the second thread to call release()
829 * before the first thread calls retain(). Without the retain here, this thread interleaving
830 * would cause the object to get released and freed before it is retained by the first thread,
831 * which is a UaF.
832 */
833 retain();
834
835 IOLockLock( gIOObjectPortLock);
836
837 previousNotify = userIteratorObject;
838 userIteratorObject = notify;
839
840 IOLockUnlock( gIOObjectPortLock);
841
842 if (previousNotify) {
843 assert(OSDynamicCast(IONotifier, previousNotify));
844 ((IONotifier *)previousNotify)->remove();
845
846 if (notify == NULL) {
847 release();
848 }
849 } else if (notify) {
850 // new IONotifier, retain the object. release() will happen in setNotification(NULL)
851 retain();
852 }
853
854 release(); // paired with retain() at beginning of this method
855 }
856
857 void
reset()858 IOUserNotification::reset()
859 {
860 // ?
861 }
862
863 bool
isValid()864 IOUserNotification::isValid()
865 {
866 return true;
867 }
868
869 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
870
871 #undef super
872 #define super IOUserNotification
OSDefineMetaClassAndStructors(IOServiceUserNotification,IOUserNotification)873 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
874
875 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
876
877 bool
878 IOServiceUserNotification::init( mach_port_t port, natural_t type,
879 void * reference, vm_size_t referenceSize,
880 bool clientIs64 )
881 {
882 if (!super::init()) {
883 return false;
884 }
885
886 newSet = OSArray::withCapacity( 1 );
887 if (!newSet) {
888 return false;
889 }
890
891 if (referenceSize > sizeof(OSAsyncReference64)) {
892 return false;
893 }
894
895 msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
896 msgReference = IOMallocZeroData(msgReferenceSize);
897 if (!msgReference) {
898 return false;
899 }
900
901 remotePort = port;
902 msgType = type;
903 bcopy( reference, msgReference, referenceSize );
904
905 return true;
906 }
907
908 void
invalidatePort(void)909 IOServiceUserNotification::invalidatePort(void)
910 {
911 remotePort = MACH_PORT_NULL;
912 }
913
914 void
free(void)915 IOServiceUserNotification::free( void )
916 {
917 if (remotePort) {
918 iokit_release_port_send(remotePort);
919 }
920 IOFreeData(msgReference, msgReferenceSize);
921 OSSafeReleaseNULL(newSet);
922
923 super::free();
924 }
925
926 bool
_handler(void * target,void * ref,IOService * newService,IONotifier * notifier)927 IOServiceUserNotification::_handler( void * target,
928 void * ref, IOService * newService, IONotifier * notifier )
929 {
930 IOServiceUserNotification * targetObj = (IOServiceUserNotification *)target;
931 bool ret;
932
933 targetObj->retain();
934 ret = targetObj->handler( ref, newService );
935 targetObj->release();
936 return ret;
937 }
938
939 bool
handler(void * ref,IOService * newService)940 IOServiceUserNotification::handler( void * ref,
941 IOService * newService )
942 {
943 unsigned int count;
944 kern_return_t kr;
945 ipc_port_t port = NULL;
946 bool sendPing = false;
947 mach_msg_size_t msgSize, payloadSize;
948
949 IOTakeLock( &lock );
950
951 count = newSet->getCount();
952 if (count < kMaxOutstanding) {
953 newSet->setObject( newService );
954 if ((sendPing = (armed && (0 == count)))) {
955 armed = false;
956 }
957 }
958
959 IOUnlock( &lock );
960
961 if (kIOServiceTerminatedNotificationType == msgType) {
962 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
963 }
964
965 if (sendPing) {
966 port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
967
968 payloadSize = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
969 msgSize = (mach_msg_size_t)(sizeof(PingMsgKdata) + payloadSize);
970
971 kr = kernel_mach_msg_send_with_builder_internal(0, payloadSize,
972 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
973 MACH_MSG_TIMEOUT_NONE, NULL,
974 ^(mach_msg_header_t *hdr, __assert_only mach_msg_descriptor_t *descs, void *payload){
975 PingMsgUdata *udata = (PingMsgUdata *)payload;
976
977 hdr->msgh_remote_port = remotePort;
978 hdr->msgh_local_port = port;
979 hdr->msgh_bits = MACH_MSGH_BITS(
980 MACH_MSG_TYPE_COPY_SEND /*remote*/,
981 MACH_MSG_TYPE_MAKE_SEND /*local*/);
982 hdr->msgh_size = msgSize;
983 hdr->msgh_id = kOSNotificationMessageID;
984
985 assert(descs == NULL);
986 /* End of kernel processed data */
987
988 udata->notifyHeader.size = 0;
989 udata->notifyHeader.type = msgType;
990
991 assert((char *)udata->notifyHeader.reference + msgReferenceSize <= (char *)payload + payloadSize);
992 bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
993 });
994
995 if (port) {
996 iokit_release_port( port );
997 }
998
999 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1000 ipcLogged = true;
1001 IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1002 }
1003 }
1004
1005 return true;
1006 }
1007 OSObject *
getNextObject()1008 IOServiceUserNotification::getNextObject()
1009 {
1010 assert(false);
1011 return NULL;
1012 }
1013
1014 OSObject *
copyNextObject()1015 IOServiceUserNotification::copyNextObject()
1016 {
1017 unsigned int count;
1018 OSObject * result;
1019
1020 IOLockLock(&lock);
1021
1022 count = newSet->getCount();
1023 if (count) {
1024 result = newSet->getObject( count - 1 );
1025 result->retain();
1026 newSet->removeObject( count - 1);
1027 } else {
1028 result = NULL;
1029 armed = true;
1030 }
1031
1032 IOLockUnlock(&lock);
1033
1034 return result;
1035 }
1036
1037 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1038
OSDefineMetaClassAndStructors(IOServiceMessageUserNotification,IOUserNotification)1039 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
1040
1041 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1042
1043 bool
1044 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
1045 void * reference, vm_size_t referenceSize, bool client64 )
1046 {
1047 if (!super::init()) {
1048 return false;
1049 }
1050
1051 if (referenceSize > sizeof(OSAsyncReference64)) {
1052 return false;
1053 }
1054
1055 clientIs64 = client64;
1056
1057 owningPID = proc_selfpid();
1058
1059 msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
1060 msgReference = IOMallocZeroData(msgReferenceSize);
1061 if (!msgReference) {
1062 return false;
1063 }
1064
1065 remotePort = port;
1066 msgType = type;
1067 bcopy( reference, msgReference, referenceSize );
1068
1069 return true;
1070 }
1071
1072 void
invalidatePort(void)1073 IOServiceMessageUserNotification::invalidatePort(void)
1074 {
1075 remotePort = MACH_PORT_NULL;
1076 }
1077
1078 void
free(void)1079 IOServiceMessageUserNotification::free( void )
1080 {
1081 if (remotePort) {
1082 iokit_release_port_send(remotePort);
1083 }
1084 IOFreeData(msgReference, msgReferenceSize);
1085
1086 super::free();
1087 }
1088
1089 IOReturn
_handler(void * target,void * ref,UInt32 messageType,IOService * provider,void * argument,vm_size_t argSize)1090 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1091 UInt32 messageType, IOService * provider,
1092 void * argument, vm_size_t argSize )
1093 {
1094 IOServiceMessageUserNotification * targetObj = (IOServiceMessageUserNotification *)target;
1095 IOReturn ret;
1096
1097 targetObj->retain();
1098 ret = targetObj->handler(
1099 ref, messageType, provider, argument, argSize);
1100 targetObj->release();
1101 return ret;
1102 }
1103
1104 IOReturn
handler(void * ref,UInt32 messageType,IOService * provider,void * messageArgument,vm_size_t callerArgSize)1105 IOServiceMessageUserNotification::handler( void * ref,
1106 UInt32 messageType, IOService * provider,
1107 void * messageArgument, vm_size_t callerArgSize )
1108 {
1109 kern_return_t kr;
1110 vm_size_t argSize;
1111 mach_msg_size_t thisMsgSize;
1112 ipc_port_t thisPort, providerPort;
1113
1114 if (kIOMessageCopyClientID == messageType) {
1115 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1116 return kIOReturnSuccess;
1117 }
1118
1119 if (callerArgSize == 0) {
1120 if (clientIs64) {
1121 argSize = sizeof(io_user_reference_t);
1122 } else {
1123 argSize = sizeof(uint32_t);
1124 }
1125 } else {
1126 if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1127 callerArgSize = kIOUserNotifyMaxMessageSize;
1128 }
1129 argSize = callerArgSize;
1130 }
1131
1132 // adjust message size for ipc restrictions
1133 natural_t type = msgType;
1134 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1135 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1136 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1137
1138 mach_msg_size_t extraSize = kIOUserNotifyMaxMessageSize + sizeof(IOServiceInterestContent64);
1139 mach_msg_size_t msgSize = (mach_msg_size_t) (sizeof(PingMsgKdata) +
1140 sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize);
1141
1142 if (os_add3_overflow(msgSize, offsetof(IOServiceInterestContent64, messageArgument), argSize, &thisMsgSize)) {
1143 return kIOReturnBadArgument;
1144 }
1145 mach_msg_size_t payloadSize = thisMsgSize - sizeof(PingMsgKdata);
1146
1147 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1148 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1149
1150 kr = kernel_mach_msg_send_with_builder_internal(1, payloadSize,
1151 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1152 MACH_MSG_TIMEOUT_NONE, NULL,
1153 ^(mach_msg_header_t *hdr, mach_msg_descriptor_t *descs, void *payload){
1154 mach_msg_port_descriptor_t *port_desc = (mach_msg_port_descriptor_t *)descs;
1155 PingMsgUdata *udata = (PingMsgUdata *)payload;
1156 IOServiceInterestContent64 * data;
1157 mach_msg_size_t dataOffset;
1158
1159 hdr->msgh_remote_port = remotePort;
1160 hdr->msgh_local_port = thisPort;
1161 hdr->msgh_bits = MACH_MSGH_BITS_COMPLEX
1162 | MACH_MSGH_BITS(
1163 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1164 MACH_MSG_TYPE_MAKE_SEND /*local*/);
1165 hdr->msgh_size = thisMsgSize;
1166 hdr->msgh_id = kOSNotificationMessageID;
1167
1168 /* body.msgh_descriptor_count is set automatically after the closure */
1169
1170 port_desc[0].name = providerPort;
1171 port_desc[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
1172 port_desc[0].type = MACH_MSG_PORT_DESCRIPTOR;
1173 /* End of kernel processed data */
1174
1175 udata->notifyHeader.size = extraSize;
1176 udata->notifyHeader.type = type;
1177 bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1178
1179 /* data is after msgReference */
1180 dataOffset = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1181 data = (IOServiceInterestContent64 *) (((uint8_t *) udata) + dataOffset);
1182 data->messageType = messageType;
1183
1184 if (callerArgSize == 0) {
1185 assert((char *)data->messageArgument + argSize <= (char *)payload + payloadSize);
1186 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1187 if (!clientIs64) {
1188 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1189 }
1190 } else {
1191 assert((char *)data->messageArgument + callerArgSize <= (char *)payload + payloadSize);
1192 bcopy(messageArgument, data->messageArgument, callerArgSize);
1193 }
1194 });
1195
1196 if (thisPort) {
1197 iokit_release_port( thisPort );
1198 }
1199 if (providerPort) {
1200 iokit_release_port( providerPort );
1201 }
1202
1203 if (kr == MACH_SEND_NO_BUFFER) {
1204 return kIOReturnNoMemory;
1205 }
1206
1207 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1208 ipcLogged = true;
1209 IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1210 }
1211
1212 return kIOReturnSuccess;
1213 }
1214
1215 OSObject *
getNextObject()1216 IOServiceMessageUserNotification::getNextObject()
1217 {
1218 return NULL;
1219 }
1220
1221 OSObject *
copyNextObject()1222 IOServiceMessageUserNotification::copyNextObject()
1223 {
1224 return NULL;
1225 }
1226
1227 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1228
1229 #undef super
1230 #define super IOService
1231 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1232
1233 IOLock * gIOUserClientOwnersLock;
1234
1235 static_assert(offsetof(IOUserClient, __opaque_end) -
1236 offsetof(IOUserClient, __opaque_start) == sizeof(void *) * 9,
1237 "ABI check: Opaque ivars for IOUserClient must be 9 void * big");
1238
1239 void
initialize(void)1240 IOUserClient::initialize( void )
1241 {
1242 gIOObjectPortLock = IOLockAlloc();
1243 gIOUserClientOwnersLock = IOLockAlloc();
1244 gIOUserServerLock = IOLockAlloc();
1245 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1246
1247 #if IOTRACKING
1248 IOTrackingQueueCollectUser(IOUserIterator::gMetaClass.getTracking());
1249 IOTrackingQueueCollectUser(IOServiceMessageUserNotification::gMetaClass.getTracking());
1250 IOTrackingQueueCollectUser(IOServiceUserNotification::gMetaClass.getTracking());
1251 IOTrackingQueueCollectUser(IOUserClient::gMetaClass.getTracking());
1252 IOTrackingQueueCollectUser(IOMachPort::gMetaClass.getTracking());
1253 #endif /* IOTRACKING */
1254 }
1255
1256 void
1257 #if __LP64__
1258 __attribute__((__noreturn__))
1259 #endif
setAsyncReference(OSAsyncReference asyncRef,mach_port_t wakePort,void * callback,void * refcon)1260 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1261 mach_port_t wakePort,
1262 void *callback, void *refcon)
1263 {
1264 #if __LP64__
1265 panic("setAsyncReference not valid for 64b");
1266 #else
1267 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1268 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1269 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1270 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1271 #endif
1272 }
1273
1274 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon)1275 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1276 mach_port_t wakePort,
1277 mach_vm_address_t callback, io_user_reference_t refcon)
1278 {
1279 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1280 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1281 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1282 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1283 }
1284
1285 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon,task_t task)1286 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1287 mach_port_t wakePort,
1288 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1289 {
1290 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1291 if (vm_map_is_64bit(get_task_map(task))) {
1292 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1293 }
1294 }
1295
1296 static OSDictionary *
CopyConsoleUser(UInt32 uid)1297 CopyConsoleUser(UInt32 uid)
1298 {
1299 OSArray * array;
1300 OSDictionary * user = NULL;
1301
1302 OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1303 if ((array = OSDynamicCast(OSArray, ioProperty))) {
1304 for (unsigned int idx = 0;
1305 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1306 idx++) {
1307 OSNumber * num;
1308
1309 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1310 && (uid == num->unsigned32BitValue())) {
1311 user->retain();
1312 break;
1313 }
1314 }
1315 }
1316 OSSafeReleaseNULL(ioProperty);
1317 return user;
1318 }
1319
1320 static OSDictionary *
CopyUserOnConsole(void)1321 CopyUserOnConsole(void)
1322 {
1323 OSArray * array;
1324 OSDictionary * user = NULL;
1325
1326 OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1327 if ((array = OSDynamicCast(OSArray, ioProperty))) {
1328 for (unsigned int idx = 0;
1329 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1330 idx++) {
1331 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1332 user->retain();
1333 break;
1334 }
1335 }
1336 }
1337 OSSafeReleaseNULL(ioProperty);
1338 return user;
1339 }
1340
1341 IOReturn
clientHasAuthorization(task_t task,IOService * service)1342 IOUserClient::clientHasAuthorization( task_t task,
1343 IOService * service )
1344 {
1345 proc_t p;
1346
1347 p = (proc_t) get_bsdtask_info(task);
1348 if (p) {
1349 uint64_t authorizationID;
1350
1351 authorizationID = proc_uniqueid(p);
1352 if (authorizationID) {
1353 if (service->getAuthorizationID() == authorizationID) {
1354 return kIOReturnSuccess;
1355 }
1356 }
1357 }
1358
1359 return kIOReturnNotPermitted;
1360 }
1361
1362 IOReturn
clientHasPrivilege(void * securityToken,const char * privilegeName)1363 IOUserClient::clientHasPrivilege( void * securityToken,
1364 const char * privilegeName )
1365 {
1366 kern_return_t kr;
1367 security_token_t token;
1368 mach_msg_type_number_t count;
1369 task_t task;
1370 OSDictionary * user;
1371 bool secureConsole;
1372
1373
1374 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1375 sizeof(kIOClientPrivilegeForeground))) {
1376 if (task_is_gpu_denied(current_task())) {
1377 return kIOReturnNotPrivileged;
1378 } else {
1379 return kIOReturnSuccess;
1380 }
1381 }
1382
1383 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1384 sizeof(kIOClientPrivilegeConsoleSession))) {
1385 kauth_cred_t cred;
1386 proc_t p;
1387
1388 task = (task_t) securityToken;
1389 if (!task) {
1390 task = current_task();
1391 }
1392 p = (proc_t) get_bsdtask_info(task);
1393 kr = kIOReturnNotPrivileged;
1394
1395 if (p && (cred = kauth_cred_proc_ref(p))) {
1396 user = CopyUserOnConsole();
1397 if (user) {
1398 OSNumber * num;
1399 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1400 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1401 kr = kIOReturnSuccess;
1402 }
1403 user->release();
1404 }
1405 kauth_cred_unref(&cred);
1406 }
1407 return kr;
1408 }
1409
1410 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1411 sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1412 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1413 } else {
1414 task = (task_t)securityToken;
1415 }
1416
1417 count = TASK_SECURITY_TOKEN_COUNT;
1418 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1419
1420 if (KERN_SUCCESS != kr) {
1421 } else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1422 sizeof(kIOClientPrivilegeAdministrator))) {
1423 if (0 != token.val[0]) {
1424 kr = kIOReturnNotPrivileged;
1425 }
1426 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1427 sizeof(kIOClientPrivilegeLocalUser))) {
1428 user = CopyConsoleUser(token.val[0]);
1429 if (user) {
1430 user->release();
1431 } else {
1432 kr = kIOReturnNotPrivileged;
1433 }
1434 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1435 sizeof(kIOClientPrivilegeConsoleUser))) {
1436 user = CopyConsoleUser(token.val[0]);
1437 if (user) {
1438 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1439 kr = kIOReturnNotPrivileged;
1440 } else if (secureConsole) {
1441 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1442 if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1443 kr = kIOReturnNotPrivileged;
1444 }
1445 }
1446 user->release();
1447 } else {
1448 kr = kIOReturnNotPrivileged;
1449 }
1450 } else {
1451 kr = kIOReturnUnsupported;
1452 }
1453
1454 return kr;
1455 }
1456
1457 OSDictionary *
copyClientEntitlements(task_t task)1458 IOUserClient::copyClientEntitlements(task_t task)
1459 {
1460 proc_t p = NULL;
1461 pid_t pid = 0;
1462 OSDictionary *entitlements = NULL;
1463
1464 p = (proc_t)get_bsdtask_info(task);
1465 if (p == NULL) {
1466 return NULL;
1467 }
1468 pid = proc_pid(p);
1469
1470 if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1471 if (entitlements) {
1472 return entitlements;
1473 }
1474 }
1475
1476 // If the above fails, thats it
1477 return NULL;
1478 }
1479
1480 OSDictionary *
copyClientEntitlementsVnode(vnode_t vnode,off_t offset)1481 IOUserClient::copyClientEntitlementsVnode(vnode_t vnode, off_t offset)
1482 {
1483 OSDictionary *entitlements = NULL;
1484
1485 if (cs_entitlements_dictionary_copy_vnode(vnode, offset, (void**)&entitlements) != 0) {
1486 return NULL;
1487 }
1488 return entitlements;
1489 }
1490
1491 OSObject *
copyClientEntitlement(task_t task,const char * entitlement)1492 IOUserClient::copyClientEntitlement( task_t task,
1493 const char * entitlement )
1494 {
1495 void *entitlement_object = NULL;
1496
1497 if (task == NULL) {
1498 task = current_task();
1499 }
1500
1501 /* Validate input arguments */
1502 if (task == kernel_task || entitlement == NULL) {
1503 return NULL;
1504 }
1505 proc_t proc = (proc_t)get_bsdtask_info(task);
1506
1507 kern_return_t ret = amfi->OSEntitlements.copyEntitlementAsOSObjectWithProc(
1508 proc,
1509 entitlement,
1510 &entitlement_object);
1511
1512 if (ret != KERN_SUCCESS) {
1513 return NULL;
1514 }
1515 assert(entitlement_object != NULL);
1516
1517 return (OSObject*)entitlement_object;
1518 }
1519
1520 OSObject *
copyClientEntitlementVnode(struct vnode * vnode,off_t offset,const char * entitlement)1521 IOUserClient::copyClientEntitlementVnode(
1522 struct vnode *vnode,
1523 off_t offset,
1524 const char *entitlement)
1525 {
1526 OSDictionary *entitlements;
1527 OSObject *value;
1528
1529 entitlements = copyClientEntitlementsVnode(vnode, offset);
1530 if (entitlements == NULL) {
1531 return NULL;
1532 }
1533
1534 /* Fetch the entitlement value from the dictionary. */
1535 value = entitlements->getObject(entitlement);
1536 if (value != NULL) {
1537 value->retain();
1538 }
1539
1540 entitlements->release();
1541 return value;
1542 }
1543
1544 bool
init()1545 IOUserClient::init()
1546 {
1547 if (getPropertyTable() || super::init()) {
1548 return reserve();
1549 }
1550
1551 return false;
1552 }
1553
1554 bool
init(OSDictionary * dictionary)1555 IOUserClient::init(OSDictionary * dictionary)
1556 {
1557 if (getPropertyTable() || super::init(dictionary)) {
1558 return reserve();
1559 }
1560
1561 return false;
1562 }
1563
1564 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type)1565 IOUserClient::initWithTask(task_t owningTask,
1566 void * securityID,
1567 UInt32 type )
1568 {
1569 if (getPropertyTable() || super::init()) {
1570 return reserve();
1571 }
1572
1573 return false;
1574 }
1575
1576 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type,OSDictionary * properties)1577 IOUserClient::initWithTask(task_t owningTask,
1578 void * securityID,
1579 UInt32 type,
1580 OSDictionary * properties )
1581 {
1582 bool ok;
1583
1584 ok = super::init( properties );
1585 ok &= initWithTask( owningTask, securityID, type );
1586
1587 return ok;
1588 }
1589
1590 bool
reserve()1591 IOUserClient::reserve()
1592 {
1593 if (!reserved) {
1594 reserved = IOMallocType(ExpansionData);
1595 }
1596 setTerminateDefer(NULL, true);
1597 IOStatisticsRegisterCounter();
1598 IORWLockInlineInit(&lock);
1599 IOLockInlineInit(&filterLock);
1600
1601 return true;
1602 }
1603
1604 struct IOUserClientOwner {
1605 task_t task;
1606 queue_chain_t taskLink;
1607 IOUserClient * uc;
1608 queue_chain_t ucLink;
1609 };
1610
1611 IOReturn
registerOwner(task_t task)1612 IOUserClient::registerOwner(task_t task)
1613 {
1614 IOUserClientOwner * owner;
1615 IOReturn ret;
1616 bool newOwner;
1617
1618 IOLockLock(gIOUserClientOwnersLock);
1619
1620 newOwner = true;
1621 ret = kIOReturnSuccess;
1622
1623 if (!owners.next) {
1624 queue_init(&owners);
1625 } else {
1626 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1627 {
1628 if (task != owner->task) {
1629 continue;
1630 }
1631 newOwner = false;
1632 break;
1633 }
1634 }
1635 if (newOwner) {
1636 owner = IOMallocType(IOUserClientOwner);
1637
1638 owner->task = task;
1639 owner->uc = this;
1640 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1641 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1642 if (messageAppSuspended) {
1643 task_set_message_app_suspended(task, true);
1644 }
1645 }
1646
1647 IOLockUnlock(gIOUserClientOwnersLock);
1648
1649 return ret;
1650 }
1651
1652 void
noMoreSenders(void)1653 IOUserClient::noMoreSenders(void)
1654 {
1655 IOUserClientOwner * owner;
1656 IOUserClientOwner * iter;
1657 queue_head_t * taskque;
1658 bool hasMessageAppSuspended;
1659
1660 IOLockLock(gIOUserClientOwnersLock);
1661
1662 if (owners.next) {
1663 while (!queue_empty(&owners)) {
1664 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1665 taskque = task_io_user_clients(owner->task);
1666 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1667 hasMessageAppSuspended = false;
1668 queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1669 hasMessageAppSuspended = iter->uc->messageAppSuspended;
1670 if (hasMessageAppSuspended) {
1671 break;
1672 }
1673 }
1674 task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1675 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1676 IOFreeType(owner, IOUserClientOwner);
1677 }
1678 owners.next = owners.prev = NULL;
1679 }
1680
1681 IOLockUnlock(gIOUserClientOwnersLock);
1682 }
1683
1684
1685 extern "C" void
iokit_task_app_suspended_changed(task_t task)1686 iokit_task_app_suspended_changed(task_t task)
1687 {
1688 queue_head_t * taskque;
1689 IOUserClientOwner * owner;
1690 OSSet * set;
1691
1692 IOLockLock(gIOUserClientOwnersLock);
1693
1694 taskque = task_io_user_clients(task);
1695 set = NULL;
1696 queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1697 if (!owner->uc->messageAppSuspended) {
1698 continue;
1699 }
1700 if (!set) {
1701 set = OSSet::withCapacity(4);
1702 if (!set) {
1703 break;
1704 }
1705 }
1706 set->setObject(owner->uc);
1707 }
1708
1709 IOLockUnlock(gIOUserClientOwnersLock);
1710
1711 if (set) {
1712 set->iterateObjects(^bool (OSObject * obj) {
1713 IOUserClient * uc;
1714
1715 uc = (typeof(uc))obj;
1716 #if 0
1717 {
1718 OSString * str;
1719 str = IOCopyLogNameForPID(task_pid(task));
1720 IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1721 uc->getName(), task_is_app_suspended(task));
1722 OSSafeReleaseNULL(str);
1723 }
1724 #endif
1725 uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1726
1727 return false;
1728 });
1729 set->release();
1730 }
1731 }
1732
1733 extern "C" kern_return_t
iokit_task_terminate(task_t task)1734 iokit_task_terminate(task_t task)
1735 {
1736 IOUserClientOwner * owner;
1737 IOUserClient * dead;
1738 IOUserClient * uc;
1739 queue_head_t * taskque;
1740
1741 IOLockLock(gIOUserClientOwnersLock);
1742
1743 taskque = task_io_user_clients(task);
1744 dead = NULL;
1745 while (!queue_empty(taskque)) {
1746 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1747 uc = owner->uc;
1748 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1749 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1750 if (queue_empty(&uc->owners)) {
1751 uc->retain();
1752 IOLog("destroying out of band connect for %s\n", uc->getName());
1753 // now using the uc queue head as a singly linked queue,
1754 // leaving .next as NULL to mark it empty
1755 uc->owners.next = NULL;
1756 uc->owners.prev = (queue_entry_t) dead;
1757 dead = uc;
1758 }
1759 IOFreeType(owner, IOUserClientOwner);
1760 }
1761
1762 IOLockUnlock(gIOUserClientOwnersLock);
1763
1764 while (dead) {
1765 uc = dead;
1766 dead = (IOUserClient *)(void *) dead->owners.prev;
1767 uc->owners.prev = NULL;
1768 if (uc->sharedInstance || !uc->closed) {
1769 uc->clientDied();
1770 }
1771 uc->release();
1772 }
1773
1774 return KERN_SUCCESS;
1775 }
1776
1777 struct IOUCFilterPolicy {
1778 task_t task;
1779 io_filter_policy_t filterPolicy;
1780 IOUCFilterPolicy * next;
1781 };
1782
1783 io_filter_policy_t
filterForTask(task_t task,io_filter_policy_t addFilterPolicy)1784 IOUserClient::filterForTask(task_t task, io_filter_policy_t addFilterPolicy)
1785 {
1786 IOUCFilterPolicy * elem;
1787 io_filter_policy_t filterPolicy;
1788
1789 filterPolicy = 0;
1790 IOLockLock(&filterLock);
1791
1792 for (elem = reserved->filterPolicies; elem && (elem->task != task); elem = elem->next) {
1793 }
1794
1795 if (elem) {
1796 if (addFilterPolicy) {
1797 assert(addFilterPolicy == elem->filterPolicy);
1798 }
1799 filterPolicy = elem->filterPolicy;
1800 } else if (addFilterPolicy) {
1801 elem = IOMallocType(IOUCFilterPolicy);
1802 elem->task = task;
1803 elem->filterPolicy = addFilterPolicy;
1804 elem->next = reserved->filterPolicies;
1805 reserved->filterPolicies = elem;
1806 filterPolicy = addFilterPolicy;
1807 }
1808
1809 IOLockUnlock(&filterLock);
1810 return filterPolicy;
1811 }
1812
1813 void
free()1814 IOUserClient::free()
1815 {
1816 if (mappings) {
1817 mappings->release();
1818 }
1819
1820 IOStatisticsUnregisterCounter();
1821
1822 assert(!owners.next);
1823 assert(!owners.prev);
1824
1825 if (reserved) {
1826 IOUCFilterPolicy * elem;
1827 IOUCFilterPolicy * nextElem;
1828 for (elem = reserved->filterPolicies; elem; elem = nextElem) {
1829 nextElem = elem->next;
1830 if (elem->filterPolicy && gIOUCFilterCallbacks->io_filter_release) {
1831 gIOUCFilterCallbacks->io_filter_release(elem->filterPolicy);
1832 }
1833 IOFreeType(elem, IOUCFilterPolicy);
1834 }
1835 IOFreeType(reserved, ExpansionData);
1836 IORWLockInlineDestroy(&lock);
1837 IOLockInlineDestroy(&filterLock);
1838 }
1839
1840 super::free();
1841 }
1842
1843 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1844
OSDefineMetaClassAndAbstractStructors(IOUserClient2022,IOUserClient)1845 OSDefineMetaClassAndAbstractStructors( IOUserClient2022, IOUserClient )
1846
1847
1848 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1849
1850 IOReturn
1851 IOUserClient::clientDied( void )
1852 {
1853 IOReturn ret = kIOReturnNotReady;
1854
1855 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1856 ret = clientClose();
1857 }
1858
1859 return ret;
1860 }
1861
1862 IOReturn
clientClose(void)1863 IOUserClient::clientClose( void )
1864 {
1865 return kIOReturnUnsupported;
1866 }
1867
1868 IOService *
getService(void)1869 IOUserClient::getService( void )
1870 {
1871 return NULL;
1872 }
1873
1874 IOReturn
registerNotificationPort(mach_port_t,UInt32,UInt32)1875 IOUserClient::registerNotificationPort(
1876 mach_port_t /* port */,
1877 UInt32 /* type */,
1878 UInt32 /* refCon */)
1879 {
1880 return kIOReturnUnsupported;
1881 }
1882
1883 IOReturn
registerNotificationPort(mach_port_t port,UInt32 type,io_user_reference_t refCon)1884 IOUserClient::registerNotificationPort(
1885 mach_port_t port,
1886 UInt32 type,
1887 io_user_reference_t refCon)
1888 {
1889 return registerNotificationPort(port, type, (UInt32) refCon);
1890 }
1891
1892 IOReturn
getNotificationSemaphore(UInt32 notification_type,semaphore_t * semaphore)1893 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1894 semaphore_t * semaphore )
1895 {
1896 return kIOReturnUnsupported;
1897 }
1898
1899 IOReturn
connectClient(IOUserClient *)1900 IOUserClient::connectClient( IOUserClient * /* client */ )
1901 {
1902 return kIOReturnUnsupported;
1903 }
1904
1905 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,IOMemoryDescriptor ** memory)1906 IOUserClient::clientMemoryForType( UInt32 type,
1907 IOOptionBits * options,
1908 IOMemoryDescriptor ** memory )
1909 {
1910 return kIOReturnUnsupported;
1911 }
1912
1913 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,OSSharedPtr<IOMemoryDescriptor> & memory)1914 IOUserClient::clientMemoryForType( UInt32 type,
1915 IOOptionBits * options,
1916 OSSharedPtr<IOMemoryDescriptor>& memory )
1917 {
1918 IOMemoryDescriptor* memoryRaw = nullptr;
1919 IOReturn result = clientMemoryForType(type, options, &memoryRaw);
1920 memory.reset(memoryRaw, OSNoRetain);
1921 return result;
1922 }
1923
1924 #if !__LP64__
1925 IOMemoryMap *
mapClientMemory(IOOptionBits type,task_t task,IOOptionBits mapFlags,IOVirtualAddress atAddress)1926 IOUserClient::mapClientMemory(
1927 IOOptionBits type,
1928 task_t task,
1929 IOOptionBits mapFlags,
1930 IOVirtualAddress atAddress )
1931 {
1932 return NULL;
1933 }
1934 #endif
1935
1936 IOMemoryMap *
mapClientMemory64(IOOptionBits type,task_t task,IOOptionBits mapFlags,mach_vm_address_t atAddress)1937 IOUserClient::mapClientMemory64(
1938 IOOptionBits type,
1939 task_t task,
1940 IOOptionBits mapFlags,
1941 mach_vm_address_t atAddress )
1942 {
1943 IOReturn err;
1944 IOOptionBits options = 0;
1945 IOMemoryDescriptor * memory = NULL;
1946 IOMemoryMap * map = NULL;
1947
1948 err = clientMemoryForType((UInt32) type, &options, &memory );
1949
1950 if (memory && (kIOReturnSuccess == err)) {
1951 FAKE_STACK_FRAME(getMetaClass());
1952
1953 options = (options & ~kIOMapUserOptionsMask)
1954 | (mapFlags & kIOMapUserOptionsMask);
1955 map = memory->createMappingInTask( task, atAddress, options );
1956 memory->release();
1957
1958 FAKE_STACK_FRAME_END();
1959 }
1960
1961 return map;
1962 }
1963
1964 IOReturn
exportObjectToClient(task_t task,OSObject * obj,io_object_t * clientObj)1965 IOUserClient::exportObjectToClient(task_t task,
1966 OSObject *obj, io_object_t *clientObj)
1967 {
1968 mach_port_name_t name;
1969
1970 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1971
1972 *clientObj = (io_object_t)(uintptr_t) name;
1973
1974 if (obj) {
1975 obj->release();
1976 }
1977
1978 return kIOReturnSuccess;
1979 }
1980
1981 IOReturn
copyPortNameForObjectInTask(task_t task,OSObject * obj,mach_port_name_t * port_name)1982 IOUserClient::copyPortNameForObjectInTask(task_t task,
1983 OSObject *obj, mach_port_name_t * port_name)
1984 {
1985 mach_port_name_t name;
1986
1987 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
1988
1989 *(mach_port_name_t *) port_name = name;
1990
1991 return kIOReturnSuccess;
1992 }
1993
1994 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSObject ** obj)1995 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
1996 OSObject **obj)
1997 {
1998 OSObject * object;
1999
2000 object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
2001
2002 *obj = object;
2003
2004 return object ? kIOReturnSuccess : kIOReturnIPCError;
2005 }
2006
2007 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSSharedPtr<OSObject> & obj)2008 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2009 OSSharedPtr<OSObject>& obj)
2010 {
2011 OSObject* objRaw = NULL;
2012 IOReturn result = copyObjectForPortNameInTask(task, port_name, &objRaw);
2013 obj.reset(objRaw, OSNoRetain);
2014 return result;
2015 }
2016
2017 IOReturn
adjustPortNameReferencesInTask(task_t task,mach_port_name_t port_name,mach_port_delta_t delta)2018 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
2019 {
2020 return iokit_mod_send_right(task, port_name, delta);
2021 }
2022
2023 IOExternalMethod *
getExternalMethodForIndex(UInt32)2024 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
2025 {
2026 return NULL;
2027 }
2028
2029 IOExternalAsyncMethod *
getExternalAsyncMethodForIndex(UInt32)2030 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
2031 {
2032 return NULL;
2033 }
2034
2035 IOExternalTrap *
2036 IOUserClient::
getExternalTrapForIndex(UInt32 index)2037 getExternalTrapForIndex(UInt32 index)
2038 {
2039 return NULL;
2040 }
2041
2042 #pragma clang diagnostic push
2043 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2044
2045 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
2046 // functions can break clients of kexts implementing getExternalMethodForIndex()
2047 IOExternalMethod *
2048 IOUserClient::
getTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2049 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
2050 {
2051 IOExternalMethod *method = getExternalMethodForIndex(index);
2052
2053 if (method) {
2054 *targetP = (IOService *) method->object;
2055 }
2056
2057 return method;
2058 }
2059
2060 IOExternalMethod *
2061 IOUserClient::
getTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2062 getTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2063 {
2064 IOService* targetPRaw = NULL;
2065 IOExternalMethod* result = getTargetAndMethodForIndex(&targetPRaw, index);
2066 targetP.reset(targetPRaw, OSRetain);
2067 return result;
2068 }
2069
2070 IOExternalAsyncMethod *
2071 IOUserClient::
getAsyncTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2072 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
2073 {
2074 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
2075
2076 if (method) {
2077 *targetP = (IOService *) method->object;
2078 }
2079
2080 return method;
2081 }
2082
2083 IOExternalAsyncMethod *
2084 IOUserClient::
getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2085 getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2086 {
2087 IOService* targetPRaw = NULL;
2088 IOExternalAsyncMethod* result = getAsyncTargetAndMethodForIndex(&targetPRaw, index);
2089 targetP.reset(targetPRaw, OSRetain);
2090 return result;
2091 }
2092
2093 IOExternalTrap *
2094 IOUserClient::
getTargetAndTrapForIndex(IOService ** targetP,UInt32 index)2095 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
2096 {
2097 IOExternalTrap *trap = getExternalTrapForIndex(index);
2098
2099 if (trap) {
2100 *targetP = trap->object;
2101 }
2102
2103 return trap;
2104 }
2105 #pragma clang diagnostic pop
2106
2107 IOReturn
releaseAsyncReference64(OSAsyncReference64 reference)2108 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
2109 {
2110 mach_port_t port;
2111 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2112
2113 if (MACH_PORT_NULL != port) {
2114 iokit_release_port_send(port);
2115 }
2116
2117 return kIOReturnSuccess;
2118 }
2119
2120 IOReturn
releaseNotificationPort(mach_port_t port)2121 IOUserClient::releaseNotificationPort(mach_port_t port)
2122 {
2123 if (MACH_PORT_NULL != port) {
2124 iokit_release_port_send(port);
2125 }
2126
2127 return kIOReturnSuccess;
2128 }
2129
2130 IOReturn
sendAsyncResult(OSAsyncReference reference,IOReturn result,void * args[],UInt32 numArgs)2131 IOUserClient::sendAsyncResult(OSAsyncReference reference,
2132 IOReturn result, void *args[], UInt32 numArgs)
2133 {
2134 OSAsyncReference64 reference64;
2135 OSBoundedArray<io_user_reference_t, kMaxAsyncArgs> args64;
2136 unsigned int idx;
2137
2138 if (numArgs > kMaxAsyncArgs) {
2139 return kIOReturnMessageTooLarge;
2140 }
2141
2142 for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2143 reference64[idx] = REF64(reference[idx]);
2144 }
2145
2146 for (idx = 0; idx < numArgs; idx++) {
2147 args64[idx] = REF64(args[idx]);
2148 }
2149
2150 return sendAsyncResult64(reference64, result, args64.data(), numArgs);
2151 }
2152
2153 IOReturn
sendAsyncResult64WithOptions(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2154 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2155 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2156 {
2157 return _sendAsyncResult64(reference, result, args, numArgs, options);
2158 }
2159
2160 IOReturn
sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs)2161 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2162 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2163 {
2164 return _sendAsyncResult64(reference, result, args, numArgs, 0);
2165 }
2166
2167 IOReturn
_sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2168 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2169 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2170 {
2171 struct ReplyMsg {
2172 mach_msg_header_t msgHdr;
2173 union{
2174 struct{
2175 OSNotificationHeader notifyHdr;
2176 IOAsyncCompletionContent asyncContent;
2177 uint32_t args[kMaxAsyncArgs];
2178 } msg32;
2179 struct{
2180 OSNotificationHeader64 notifyHdr;
2181 IOAsyncCompletionContent asyncContent;
2182 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
2183 } msg64;
2184 } m;
2185 };
2186 ReplyMsg replyMsg;
2187 mach_port_t replyPort;
2188 kern_return_t kr;
2189
2190 // If no reply port, do nothing.
2191 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2192 if (replyPort == MACH_PORT_NULL) {
2193 return kIOReturnSuccess;
2194 }
2195
2196 if (numArgs > kMaxAsyncArgs) {
2197 return kIOReturnMessageTooLarge;
2198 }
2199
2200 bzero(&replyMsg, sizeof(replyMsg));
2201 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2202 0 /*local*/);
2203 replyMsg.msgHdr.msgh_remote_port = replyPort;
2204 replyMsg.msgHdr.msgh_local_port = NULL;
2205 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
2206 if (kIOUCAsync64Flag & reference[0]) {
2207 replyMsg.msgHdr.msgh_size =
2208 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2209 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2210 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2211 + numArgs * sizeof(io_user_reference_t);
2212 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2213 /* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2214 bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2215
2216 replyMsg.m.msg64.asyncContent.result = result;
2217 if (numArgs) {
2218 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2219 }
2220 } else {
2221 unsigned int idx;
2222
2223 replyMsg.msgHdr.msgh_size =
2224 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2225 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2226
2227 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2228 + numArgs * sizeof(uint32_t);
2229 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2230
2231 /* Skip reference[0] which is left as 0 from the earlier bzero */
2232 for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2233 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2234 }
2235
2236 replyMsg.m.msg32.asyncContent.result = result;
2237
2238 for (idx = 0; idx < numArgs; idx++) {
2239 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2240 }
2241 }
2242
2243 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2244 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2245 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2246 } else {
2247 /* Fail on full queue. */
2248 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
2249 replyMsg.msgHdr.msgh_size);
2250 }
2251 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2252 reference[0] |= kIOUCAsyncErrorLoggedFlag;
2253 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
2254 }
2255 return kr;
2256 }
2257
2258
2259 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2260
2261 extern "C" {
2262 #define CHECK(cls, obj, out) \
2263 cls * out; \
2264 if( !(out = OSDynamicCast( cls, obj))) \
2265 return( kIOReturnBadArgument )
2266
2267 #define CHECKLOCKED(cls, obj, out) \
2268 IOUserIterator * oIter; \
2269 cls * out; \
2270 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
2271 return (kIOReturnBadArgument); \
2272 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
2273 return (kIOReturnBadArgument)
2274
2275 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2276
2277 // Create a vm_map_copy_t or kalloc'ed data for memory
2278 // to be copied out. ipc will free after the copyout.
2279
2280 static kern_return_t
copyoutkdata(const void * data,vm_size_t len,io_buf_ptr_t * buf)2281 copyoutkdata( const void * data, vm_size_t len,
2282 io_buf_ptr_t * buf )
2283 {
2284 kern_return_t err;
2285 vm_map_copy_t copy;
2286
2287 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2288 false /* src_destroy */, ©);
2289
2290 assert( err == KERN_SUCCESS );
2291 if (err == KERN_SUCCESS) {
2292 *buf = (char *) copy;
2293 }
2294
2295 return err;
2296 }
2297
2298 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2299
2300 /* Routine io_server_version */
2301 kern_return_t
is_io_server_version(mach_port_t main_port,uint64_t * version)2302 is_io_server_version(
2303 mach_port_t main_port,
2304 uint64_t *version)
2305 {
2306 *version = IOKIT_SERVER_VERSION;
2307 return kIOReturnSuccess;
2308 }
2309
2310 /* Routine io_object_get_class */
2311 kern_return_t
is_io_object_get_class(io_object_t object,io_name_t className)2312 is_io_object_get_class(
2313 io_object_t object,
2314 io_name_t className )
2315 {
2316 const OSMetaClass* my_obj = NULL;
2317
2318 if (!object) {
2319 return kIOReturnBadArgument;
2320 }
2321
2322 my_obj = object->getMetaClass();
2323 if (!my_obj) {
2324 return kIOReturnNotFound;
2325 }
2326
2327 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2328
2329 return kIOReturnSuccess;
2330 }
2331
2332 /* Routine io_object_get_superclass */
2333 kern_return_t
is_io_object_get_superclass(mach_port_t main_port,io_name_t obj_name,io_name_t class_name)2334 is_io_object_get_superclass(
2335 mach_port_t main_port,
2336 io_name_t obj_name,
2337 io_name_t class_name)
2338 {
2339 IOReturn ret;
2340 const OSMetaClass * meta;
2341 const OSMetaClass * super;
2342 const OSSymbol * name;
2343 const char * cstr;
2344
2345 if (!obj_name || !class_name) {
2346 return kIOReturnBadArgument;
2347 }
2348 if (main_port != main_device_port) {
2349 return kIOReturnNotPrivileged;
2350 }
2351
2352 ret = kIOReturnNotFound;
2353 meta = NULL;
2354 do{
2355 name = OSSymbol::withCString(obj_name);
2356 if (!name) {
2357 break;
2358 }
2359 meta = OSMetaClass::copyMetaClassWithName(name);
2360 if (!meta) {
2361 break;
2362 }
2363 super = meta->getSuperClass();
2364 if (!super) {
2365 break;
2366 }
2367 cstr = super->getClassName();
2368 if (!cstr) {
2369 break;
2370 }
2371 strlcpy(class_name, cstr, sizeof(io_name_t));
2372 ret = kIOReturnSuccess;
2373 }while (false);
2374
2375 OSSafeReleaseNULL(name);
2376 if (meta) {
2377 meta->releaseMetaClass();
2378 }
2379
2380 return ret;
2381 }
2382
2383 /* Routine io_object_get_bundle_identifier */
2384 kern_return_t
is_io_object_get_bundle_identifier(mach_port_t main_port,io_name_t obj_name,io_name_t bundle_name)2385 is_io_object_get_bundle_identifier(
2386 mach_port_t main_port,
2387 io_name_t obj_name,
2388 io_name_t bundle_name)
2389 {
2390 IOReturn ret;
2391 const OSMetaClass * meta;
2392 const OSSymbol * name;
2393 const OSSymbol * identifier;
2394 const char * cstr;
2395
2396 if (!obj_name || !bundle_name) {
2397 return kIOReturnBadArgument;
2398 }
2399 if (main_port != main_device_port) {
2400 return kIOReturnNotPrivileged;
2401 }
2402
2403 ret = kIOReturnNotFound;
2404 meta = NULL;
2405 do{
2406 name = OSSymbol::withCString(obj_name);
2407 if (!name) {
2408 break;
2409 }
2410 meta = OSMetaClass::copyMetaClassWithName(name);
2411 if (!meta) {
2412 break;
2413 }
2414 identifier = meta->getKmodName();
2415 if (!identifier) {
2416 break;
2417 }
2418 cstr = identifier->getCStringNoCopy();
2419 if (!cstr) {
2420 break;
2421 }
2422 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2423 ret = kIOReturnSuccess;
2424 }while (false);
2425
2426 OSSafeReleaseNULL(name);
2427 if (meta) {
2428 meta->releaseMetaClass();
2429 }
2430
2431 return ret;
2432 }
2433
2434 /* Routine io_object_conforms_to */
2435 kern_return_t
is_io_object_conforms_to(io_object_t object,io_name_t className,boolean_t * conforms)2436 is_io_object_conforms_to(
2437 io_object_t object,
2438 io_name_t className,
2439 boolean_t *conforms )
2440 {
2441 if (!object) {
2442 return kIOReturnBadArgument;
2443 }
2444
2445 *conforms = (NULL != object->metaCast( className ));
2446
2447 return kIOReturnSuccess;
2448 }
2449
2450 /* Routine io_object_get_retain_count */
2451 kern_return_t
is_io_object_get_retain_count(io_object_t object,uint32_t * retainCount)2452 is_io_object_get_retain_count(
2453 io_object_t object,
2454 uint32_t *retainCount )
2455 {
2456 if (!object) {
2457 return kIOReturnBadArgument;
2458 }
2459
2460 *retainCount = object->getRetainCount();
2461 return kIOReturnSuccess;
2462 }
2463
2464 /* Routine io_iterator_next */
2465 kern_return_t
is_io_iterator_next(io_object_t iterator,io_object_t * object)2466 is_io_iterator_next(
2467 io_object_t iterator,
2468 io_object_t *object )
2469 {
2470 IOReturn ret;
2471 OSObject * obj;
2472 OSIterator * iter;
2473 IOUserIterator * uiter;
2474
2475 if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2476 obj = uiter->copyNextObject();
2477 } else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2478 obj = iter->getNextObject();
2479 if (obj) {
2480 obj->retain();
2481 }
2482 } else {
2483 return kIOReturnBadArgument;
2484 }
2485
2486 if (obj) {
2487 *object = obj;
2488 ret = kIOReturnSuccess;
2489 } else {
2490 ret = kIOReturnNoDevice;
2491 }
2492
2493 return ret;
2494 }
2495
2496 /* Routine io_iterator_reset */
2497 kern_return_t
is_io_iterator_reset(io_object_t iterator)2498 is_io_iterator_reset(
2499 io_object_t iterator )
2500 {
2501 CHECK( OSIterator, iterator, iter );
2502
2503 iter->reset();
2504
2505 return kIOReturnSuccess;
2506 }
2507
2508 /* Routine io_iterator_is_valid */
2509 kern_return_t
is_io_iterator_is_valid(io_object_t iterator,boolean_t * is_valid)2510 is_io_iterator_is_valid(
2511 io_object_t iterator,
2512 boolean_t *is_valid )
2513 {
2514 CHECK( OSIterator, iterator, iter );
2515
2516 *is_valid = iter->isValid();
2517
2518 return kIOReturnSuccess;
2519 }
2520
2521 static kern_return_t
internal_io_service_match_property_table(io_service_t _service,const char * matching,mach_msg_type_number_t matching_size,boolean_t * matches)2522 internal_io_service_match_property_table(
2523 io_service_t _service,
2524 const char * matching,
2525 mach_msg_type_number_t matching_size,
2526 boolean_t *matches)
2527 {
2528 CHECK( IOService, _service, service );
2529
2530 kern_return_t kr;
2531 OSObject * obj;
2532 OSDictionary * dict;
2533
2534 assert(matching_size);
2535
2536
2537 obj = OSUnserializeXML(matching, matching_size);
2538
2539 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2540 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2541 *matches = service->passiveMatch( dict );
2542 kr = kIOReturnSuccess;
2543 } else {
2544 kr = kIOReturnBadArgument;
2545 }
2546
2547 if (obj) {
2548 obj->release();
2549 }
2550
2551 return kr;
2552 }
2553
2554 /* Routine io_service_match_property_table */
2555 kern_return_t
is_io_service_match_property_table(io_service_t service,io_string_t matching,boolean_t * matches)2556 is_io_service_match_property_table(
2557 io_service_t service,
2558 io_string_t matching,
2559 boolean_t *matches )
2560 {
2561 return kIOReturnUnsupported;
2562 }
2563
2564
2565 /* Routine io_service_match_property_table_ool */
2566 kern_return_t
is_io_service_match_property_table_ool(io_object_t service,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,boolean_t * matches)2567 is_io_service_match_property_table_ool(
2568 io_object_t service,
2569 io_buf_ptr_t matching,
2570 mach_msg_type_number_t matchingCnt,
2571 kern_return_t *result,
2572 boolean_t *matches )
2573 {
2574 kern_return_t kr;
2575 vm_offset_t data;
2576 vm_map_offset_t map_data;
2577
2578 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2579 data = CAST_DOWN(vm_offset_t, map_data);
2580
2581 if (KERN_SUCCESS == kr) {
2582 // must return success after vm_map_copyout() succeeds
2583 *result = internal_io_service_match_property_table(service,
2584 (const char *)data, matchingCnt, matches );
2585 vm_deallocate( kernel_map, data, matchingCnt );
2586 }
2587
2588 return kr;
2589 }
2590
2591 /* Routine io_service_match_property_table_bin */
2592 kern_return_t
is_io_service_match_property_table_bin(io_object_t service,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,boolean_t * matches)2593 is_io_service_match_property_table_bin(
2594 io_object_t service,
2595 io_struct_inband_t matching,
2596 mach_msg_type_number_t matchingCnt,
2597 boolean_t *matches)
2598 {
2599 return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2600 }
2601
2602 static kern_return_t
internal_io_service_get_matching_services(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_iterator_t * existing)2603 internal_io_service_get_matching_services(
2604 mach_port_t main_port,
2605 const char * matching,
2606 mach_msg_type_number_t matching_size,
2607 io_iterator_t *existing )
2608 {
2609 kern_return_t kr;
2610 OSObject * obj;
2611 OSDictionary * dict;
2612
2613 if (main_port != main_device_port) {
2614 return kIOReturnNotPrivileged;
2615 }
2616
2617 assert(matching_size);
2618 obj = OSUnserializeXML(matching, matching_size);
2619
2620 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2621 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2622 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2623 kr = kIOReturnSuccess;
2624 } else {
2625 kr = kIOReturnBadArgument;
2626 }
2627
2628 if (obj) {
2629 obj->release();
2630 }
2631
2632 return kr;
2633 }
2634
2635 /* Routine io_service_get_matching_services */
2636 kern_return_t
is_io_service_get_matching_services(mach_port_t main_port,io_string_t matching,io_iterator_t * existing)2637 is_io_service_get_matching_services(
2638 mach_port_t main_port,
2639 io_string_t matching,
2640 io_iterator_t *existing )
2641 {
2642 return kIOReturnUnsupported;
2643 }
2644
2645 /* Routine io_service_get_matching_services_ool */
2646 kern_return_t
is_io_service_get_matching_services_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * existing)2647 is_io_service_get_matching_services_ool(
2648 mach_port_t main_port,
2649 io_buf_ptr_t matching,
2650 mach_msg_type_number_t matchingCnt,
2651 kern_return_t *result,
2652 io_object_t *existing )
2653 {
2654 kern_return_t kr;
2655 vm_offset_t data;
2656 vm_map_offset_t map_data;
2657
2658 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2659 data = CAST_DOWN(vm_offset_t, map_data);
2660
2661 if (KERN_SUCCESS == kr) {
2662 // must return success after vm_map_copyout() succeeds
2663 // and mig will copy out objects on success
2664 *existing = NULL;
2665 *result = internal_io_service_get_matching_services(main_port,
2666 (const char *) data, matchingCnt, existing);
2667 vm_deallocate( kernel_map, data, matchingCnt );
2668 }
2669
2670 return kr;
2671 }
2672
2673 /* Routine io_service_get_matching_services_bin */
2674 kern_return_t
is_io_service_get_matching_services_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * existing)2675 is_io_service_get_matching_services_bin(
2676 mach_port_t main_port,
2677 io_struct_inband_t matching,
2678 mach_msg_type_number_t matchingCnt,
2679 io_object_t *existing)
2680 {
2681 return internal_io_service_get_matching_services(main_port, matching, matchingCnt, existing);
2682 }
2683
2684
2685 static kern_return_t
internal_io_service_get_matching_service(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_service_t * service)2686 internal_io_service_get_matching_service(
2687 mach_port_t main_port,
2688 const char * matching,
2689 mach_msg_type_number_t matching_size,
2690 io_service_t *service )
2691 {
2692 kern_return_t kr;
2693 OSObject * obj;
2694 OSDictionary * dict;
2695
2696 if (main_port != main_device_port) {
2697 return kIOReturnNotPrivileged;
2698 }
2699
2700 assert(matching_size);
2701 obj = OSUnserializeXML(matching, matching_size);
2702
2703 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2704 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2705 *service = IOService::copyMatchingService( dict );
2706 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2707 } else {
2708 kr = kIOReturnBadArgument;
2709 }
2710
2711 if (obj) {
2712 obj->release();
2713 }
2714
2715 return kr;
2716 }
2717
2718 /* Routine io_service_get_matching_service */
2719 kern_return_t
is_io_service_get_matching_service(mach_port_t main_port,io_string_t matching,io_service_t * service)2720 is_io_service_get_matching_service(
2721 mach_port_t main_port,
2722 io_string_t matching,
2723 io_service_t *service )
2724 {
2725 return kIOReturnUnsupported;
2726 }
2727
2728 /* Routine io_service_get_matching_services_ool */
2729 kern_return_t
is_io_service_get_matching_service_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * service)2730 is_io_service_get_matching_service_ool(
2731 mach_port_t main_port,
2732 io_buf_ptr_t matching,
2733 mach_msg_type_number_t matchingCnt,
2734 kern_return_t *result,
2735 io_object_t *service )
2736 {
2737 kern_return_t kr;
2738 vm_offset_t data;
2739 vm_map_offset_t map_data;
2740
2741 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2742 data = CAST_DOWN(vm_offset_t, map_data);
2743
2744 if (KERN_SUCCESS == kr) {
2745 // must return success after vm_map_copyout() succeeds
2746 // and mig will copy out objects on success
2747 *service = NULL;
2748 *result = internal_io_service_get_matching_service(main_port,
2749 (const char *) data, matchingCnt, service );
2750 vm_deallocate( kernel_map, data, matchingCnt );
2751 }
2752
2753 return kr;
2754 }
2755
2756 /* Routine io_service_get_matching_service_bin */
2757 kern_return_t
is_io_service_get_matching_service_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * service)2758 is_io_service_get_matching_service_bin(
2759 mach_port_t main_port,
2760 io_struct_inband_t matching,
2761 mach_msg_type_number_t matchingCnt,
2762 io_object_t *service)
2763 {
2764 return internal_io_service_get_matching_service(main_port, matching, matchingCnt, service);
2765 }
2766
2767 static kern_return_t
internal_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,const char * matching,size_t matching_size,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)2768 internal_io_service_add_notification(
2769 mach_port_t main_port,
2770 io_name_t notification_type,
2771 const char * matching,
2772 size_t matching_size,
2773 mach_port_t port,
2774 void * reference,
2775 vm_size_t referenceSize,
2776 bool client64,
2777 io_object_t * notification )
2778 {
2779 IOServiceUserNotification * userNotify = NULL;
2780 IONotifier * notify = NULL;
2781 const OSSymbol * sym;
2782 OSObject * obj;
2783 OSDictionary * dict;
2784 IOReturn err;
2785 natural_t userMsgType;
2786
2787 if (main_port != main_device_port) {
2788 return kIOReturnNotPrivileged;
2789 }
2790
2791 do {
2792 err = kIOReturnNoResources;
2793
2794 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2795 return kIOReturnMessageTooLarge;
2796 }
2797
2798 if (!(sym = OSSymbol::withCString( notification_type ))) {
2799 err = kIOReturnNoResources;
2800 }
2801
2802 assert(matching_size);
2803 obj = OSUnserializeXML(matching, matching_size);
2804 dict = OSDynamicCast(OSDictionary, obj);
2805 if (!dict) {
2806 err = kIOReturnBadArgument;
2807 continue;
2808 }
2809 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2810
2811 if ((sym == gIOPublishNotification)
2812 || (sym == gIOFirstPublishNotification)) {
2813 userMsgType = kIOServicePublishNotificationType;
2814 } else if ((sym == gIOMatchedNotification)
2815 || (sym == gIOFirstMatchNotification)) {
2816 userMsgType = kIOServiceMatchedNotificationType;
2817 } else if ((sym == gIOTerminatedNotification)
2818 || (sym == gIOWillTerminateNotification)) {
2819 userMsgType = kIOServiceTerminatedNotificationType;
2820 } else {
2821 userMsgType = kLastIOKitNotificationType;
2822 }
2823
2824 userNotify = new IOServiceUserNotification;
2825
2826 if (userNotify && !userNotify->init( port, userMsgType,
2827 reference, referenceSize, client64)) {
2828 userNotify->release();
2829 userNotify = NULL;
2830 }
2831 if (!userNotify) {
2832 continue;
2833 }
2834
2835 notify = IOService::addMatchingNotification( sym, dict,
2836 &userNotify->_handler, userNotify );
2837 if (notify) {
2838 *notification = userNotify;
2839 userNotify->setNotification( notify );
2840 err = kIOReturnSuccess;
2841 } else {
2842 err = kIOReturnUnsupported;
2843 }
2844 } while (false);
2845
2846 if ((kIOReturnSuccess != err) && userNotify) {
2847 userNotify->setNotification(NULL);
2848 userNotify->invalidatePort();
2849 userNotify->release();
2850 userNotify = NULL;
2851 }
2852
2853 if (sym) {
2854 sym->release();
2855 }
2856 if (obj) {
2857 obj->release();
2858 }
2859
2860 return err;
2861 }
2862
2863
2864 /* Routine io_service_add_notification */
2865 kern_return_t
is_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2866 is_io_service_add_notification(
2867 mach_port_t main_port,
2868 io_name_t notification_type,
2869 io_string_t matching,
2870 mach_port_t port,
2871 io_async_ref_t reference,
2872 mach_msg_type_number_t referenceCnt,
2873 io_object_t * notification )
2874 {
2875 return kIOReturnUnsupported;
2876 }
2877
2878 /* Routine io_service_add_notification_64 */
2879 kern_return_t
is_io_service_add_notification_64(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2880 is_io_service_add_notification_64(
2881 mach_port_t main_port,
2882 io_name_t notification_type,
2883 io_string_t matching,
2884 mach_port_t wake_port,
2885 io_async_ref64_t reference,
2886 mach_msg_type_number_t referenceCnt,
2887 io_object_t *notification )
2888 {
2889 return kIOReturnUnsupported;
2890 }
2891
2892 /* Routine io_service_add_notification_bin */
2893 kern_return_t
is_io_service_add_notification_bin(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2894 is_io_service_add_notification_bin
2895 (
2896 mach_port_t main_port,
2897 io_name_t notification_type,
2898 io_struct_inband_t matching,
2899 mach_msg_type_number_t matchingCnt,
2900 mach_port_t wake_port,
2901 io_async_ref_t reference,
2902 mach_msg_type_number_t referenceCnt,
2903 io_object_t *notification)
2904 {
2905 io_async_ref_t zreference;
2906
2907 if (referenceCnt > ASYNC_REF_COUNT) {
2908 return kIOReturnBadArgument;
2909 }
2910 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2911 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2912
2913 return internal_io_service_add_notification(main_port, notification_type,
2914 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2915 false, notification);
2916 }
2917
2918 /* Routine io_service_add_notification_bin_64 */
2919 kern_return_t
is_io_service_add_notification_bin_64(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2920 is_io_service_add_notification_bin_64
2921 (
2922 mach_port_t main_port,
2923 io_name_t notification_type,
2924 io_struct_inband_t matching,
2925 mach_msg_type_number_t matchingCnt,
2926 mach_port_t wake_port,
2927 io_async_ref64_t reference,
2928 mach_msg_type_number_t referenceCnt,
2929 io_object_t *notification)
2930 {
2931 io_async_ref64_t zreference;
2932
2933 if (referenceCnt > ASYNC_REF64_COUNT) {
2934 return kIOReturnBadArgument;
2935 }
2936 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2937 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2938
2939 return internal_io_service_add_notification(main_port, notification_type,
2940 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2941 true, notification);
2942 }
2943
2944 static kern_return_t
internal_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,void * reference,vm_size_t referenceSize,bool client64,kern_return_t * result,io_object_t * notification)2945 internal_io_service_add_notification_ool(
2946 mach_port_t main_port,
2947 io_name_t notification_type,
2948 io_buf_ptr_t matching,
2949 mach_msg_type_number_t matchingCnt,
2950 mach_port_t wake_port,
2951 void * reference,
2952 vm_size_t referenceSize,
2953 bool client64,
2954 kern_return_t *result,
2955 io_object_t *notification )
2956 {
2957 kern_return_t kr;
2958 vm_offset_t data;
2959 vm_map_offset_t map_data;
2960
2961 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2962 data = CAST_DOWN(vm_offset_t, map_data);
2963
2964 if (KERN_SUCCESS == kr) {
2965 // must return success after vm_map_copyout() succeeds
2966 // and mig will copy out objects on success
2967 *notification = NULL;
2968 *result = internal_io_service_add_notification( main_port, notification_type,
2969 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2970 vm_deallocate( kernel_map, data, matchingCnt );
2971 }
2972
2973 return kr;
2974 }
2975
2976 /* Routine io_service_add_notification_ool */
2977 kern_return_t
is_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)2978 is_io_service_add_notification_ool(
2979 mach_port_t main_port,
2980 io_name_t notification_type,
2981 io_buf_ptr_t matching,
2982 mach_msg_type_number_t matchingCnt,
2983 mach_port_t wake_port,
2984 io_async_ref_t reference,
2985 mach_msg_type_number_t referenceCnt,
2986 kern_return_t *result,
2987 io_object_t *notification )
2988 {
2989 io_async_ref_t zreference;
2990
2991 if (referenceCnt > ASYNC_REF_COUNT) {
2992 return kIOReturnBadArgument;
2993 }
2994 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2995 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2996
2997 return internal_io_service_add_notification_ool(main_port, notification_type,
2998 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2999 false, result, notification);
3000 }
3001
3002 /* Routine io_service_add_notification_ool_64 */
3003 kern_return_t
is_io_service_add_notification_ool_64(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3004 is_io_service_add_notification_ool_64(
3005 mach_port_t main_port,
3006 io_name_t notification_type,
3007 io_buf_ptr_t matching,
3008 mach_msg_type_number_t matchingCnt,
3009 mach_port_t wake_port,
3010 io_async_ref64_t reference,
3011 mach_msg_type_number_t referenceCnt,
3012 kern_return_t *result,
3013 io_object_t *notification )
3014 {
3015 io_async_ref64_t zreference;
3016
3017 if (referenceCnt > ASYNC_REF64_COUNT) {
3018 return kIOReturnBadArgument;
3019 }
3020 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3021 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3022
3023 return internal_io_service_add_notification_ool(main_port, notification_type,
3024 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3025 true, result, notification);
3026 }
3027
3028 /* Routine io_service_add_notification_old */
3029 kern_return_t
is_io_service_add_notification_old(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,natural_t ref,io_object_t * notification)3030 is_io_service_add_notification_old(
3031 mach_port_t main_port,
3032 io_name_t notification_type,
3033 io_string_t matching,
3034 mach_port_t port,
3035 // for binary compatibility reasons, this must be natural_t for ILP32
3036 natural_t ref,
3037 io_object_t * notification )
3038 {
3039 return is_io_service_add_notification( main_port, notification_type,
3040 matching, port, &ref, 1, notification );
3041 }
3042
3043
3044 static kern_return_t
internal_io_service_add_interest_notification(io_object_t _service,io_name_t type_of_interest,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)3045 internal_io_service_add_interest_notification(
3046 io_object_t _service,
3047 io_name_t type_of_interest,
3048 mach_port_t port,
3049 void * reference,
3050 vm_size_t referenceSize,
3051 bool client64,
3052 io_object_t * notification )
3053 {
3054 IOServiceMessageUserNotification * userNotify = NULL;
3055 IONotifier * notify = NULL;
3056 const OSSymbol * sym;
3057 IOReturn err;
3058
3059 CHECK( IOService, _service, service );
3060
3061 err = kIOReturnNoResources;
3062 if ((sym = OSSymbol::withCString( type_of_interest ))) {
3063 do {
3064 userNotify = new IOServiceMessageUserNotification;
3065
3066 if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
3067 reference, referenceSize, client64 )) {
3068 userNotify->release();
3069 userNotify = NULL;
3070 }
3071 if (!userNotify) {
3072 continue;
3073 }
3074
3075 notify = service->registerInterest( sym,
3076 &userNotify->_handler, userNotify );
3077 if (notify) {
3078 *notification = userNotify;
3079 userNotify->setNotification( notify );
3080 err = kIOReturnSuccess;
3081 } else {
3082 err = kIOReturnUnsupported;
3083 }
3084 } while (false);
3085
3086 sym->release();
3087 }
3088
3089 if ((kIOReturnSuccess != err) && userNotify) {
3090 userNotify->setNotification(NULL);
3091 userNotify->invalidatePort();
3092 userNotify->release();
3093 userNotify = NULL;
3094 }
3095
3096 return err;
3097 }
3098
3099 /* Routine io_service_add_message_notification */
3100 kern_return_t
is_io_service_add_interest_notification(io_object_t service,io_name_t type_of_interest,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3101 is_io_service_add_interest_notification(
3102 io_object_t service,
3103 io_name_t type_of_interest,
3104 mach_port_t port,
3105 io_async_ref_t reference,
3106 mach_msg_type_number_t referenceCnt,
3107 io_object_t * notification )
3108 {
3109 io_async_ref_t zreference;
3110
3111 if (referenceCnt > ASYNC_REF_COUNT) {
3112 return kIOReturnBadArgument;
3113 }
3114 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3115 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3116
3117 return internal_io_service_add_interest_notification(service, type_of_interest,
3118 port, &zreference[0], sizeof(io_async_ref_t), false, notification);
3119 }
3120
3121 /* Routine io_service_add_interest_notification_64 */
3122 kern_return_t
is_io_service_add_interest_notification_64(io_object_t service,io_name_t type_of_interest,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3123 is_io_service_add_interest_notification_64(
3124 io_object_t service,
3125 io_name_t type_of_interest,
3126 mach_port_t wake_port,
3127 io_async_ref64_t reference,
3128 mach_msg_type_number_t referenceCnt,
3129 io_object_t *notification )
3130 {
3131 io_async_ref64_t zreference;
3132
3133 if (referenceCnt > ASYNC_REF64_COUNT) {
3134 return kIOReturnBadArgument;
3135 }
3136 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3137 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3138
3139 return internal_io_service_add_interest_notification(service, type_of_interest,
3140 wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
3141 }
3142
3143
3144 /* Routine io_service_acknowledge_notification */
3145 kern_return_t
is_io_service_acknowledge_notification(io_object_t _service,natural_t notify_ref,natural_t response)3146 is_io_service_acknowledge_notification(
3147 io_object_t _service,
3148 natural_t notify_ref,
3149 natural_t response )
3150 {
3151 CHECK( IOService, _service, service );
3152
3153 return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3154 (IOOptionBits) response );
3155 }
3156
3157 /* Routine io_connect_get_semaphore */
3158 kern_return_t
is_io_connect_get_notification_semaphore(io_connect_t connection,natural_t notification_type,semaphore_t * semaphore)3159 is_io_connect_get_notification_semaphore(
3160 io_connect_t connection,
3161 natural_t notification_type,
3162 semaphore_t *semaphore )
3163 {
3164 IOReturn ret;
3165 CHECK( IOUserClient, connection, client );
3166
3167 IOStatisticsClientCall();
3168 IORWLockWrite(&client->lock);
3169 ret = client->getNotificationSemaphore((UInt32) notification_type,
3170 semaphore );
3171 IORWLockUnlock(&client->lock);
3172
3173 return ret;
3174 }
3175
3176 /* Routine io_registry_get_root_entry */
3177 kern_return_t
is_io_registry_get_root_entry(mach_port_t main_port,io_object_t * root)3178 is_io_registry_get_root_entry(
3179 mach_port_t main_port,
3180 io_object_t *root )
3181 {
3182 IORegistryEntry * entry;
3183
3184 if (main_port != main_device_port) {
3185 return kIOReturnNotPrivileged;
3186 }
3187
3188 entry = IORegistryEntry::getRegistryRoot();
3189 if (entry) {
3190 entry->retain();
3191 }
3192 *root = entry;
3193
3194 return kIOReturnSuccess;
3195 }
3196
3197 /* Routine io_registry_create_iterator */
3198 kern_return_t
is_io_registry_create_iterator(mach_port_t main_port,io_name_t plane,uint32_t options,io_object_t * iterator)3199 is_io_registry_create_iterator(
3200 mach_port_t main_port,
3201 io_name_t plane,
3202 uint32_t options,
3203 io_object_t *iterator )
3204 {
3205 if (main_port != main_device_port) {
3206 return kIOReturnNotPrivileged;
3207 }
3208
3209 *iterator = IOUserIterator::withIterator(
3210 IORegistryIterator::iterateOver(
3211 IORegistryEntry::getPlane( plane ), options ));
3212
3213 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3214 }
3215
3216 /* Routine io_registry_entry_create_iterator */
3217 kern_return_t
is_io_registry_entry_create_iterator(io_object_t registry_entry,io_name_t plane,uint32_t options,io_object_t * iterator)3218 is_io_registry_entry_create_iterator(
3219 io_object_t registry_entry,
3220 io_name_t plane,
3221 uint32_t options,
3222 io_object_t *iterator )
3223 {
3224 CHECK( IORegistryEntry, registry_entry, entry );
3225
3226 *iterator = IOUserIterator::withIterator(
3227 IORegistryIterator::iterateOver( entry,
3228 IORegistryEntry::getPlane( plane ), options ));
3229
3230 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3231 }
3232
3233 /* Routine io_registry_iterator_enter */
3234 kern_return_t
is_io_registry_iterator_enter_entry(io_object_t iterator)3235 is_io_registry_iterator_enter_entry(
3236 io_object_t iterator )
3237 {
3238 CHECKLOCKED( IORegistryIterator, iterator, iter );
3239
3240 IOLockLock(&oIter->lock);
3241 iter->enterEntry();
3242 IOLockUnlock(&oIter->lock);
3243
3244 return kIOReturnSuccess;
3245 }
3246
3247 /* Routine io_registry_iterator_exit */
3248 kern_return_t
is_io_registry_iterator_exit_entry(io_object_t iterator)3249 is_io_registry_iterator_exit_entry(
3250 io_object_t iterator )
3251 {
3252 bool didIt;
3253
3254 CHECKLOCKED( IORegistryIterator, iterator, iter );
3255
3256 IOLockLock(&oIter->lock);
3257 didIt = iter->exitEntry();
3258 IOLockUnlock(&oIter->lock);
3259
3260 return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3261 }
3262
3263 /* Routine io_registry_entry_from_path */
3264 kern_return_t
is_io_registry_entry_from_path(mach_port_t main_port,io_string_t path,io_object_t * registry_entry)3265 is_io_registry_entry_from_path(
3266 mach_port_t main_port,
3267 io_string_t path,
3268 io_object_t *registry_entry )
3269 {
3270 IORegistryEntry * entry;
3271
3272 if (main_port != main_device_port) {
3273 return kIOReturnNotPrivileged;
3274 }
3275
3276 entry = IORegistryEntry::fromPath( path );
3277
3278 if (!entry && IOTaskRegistryCompatibility(current_task())) {
3279 OSDictionary * matching;
3280 const OSObject * objects[2] = { kOSBooleanTrue, NULL };
3281 const OSSymbol * keys[2] = { gIOCompatibilityMatchKey, gIOPathMatchKey };
3282
3283 objects[1] = OSString::withCStringNoCopy(path);
3284 matching = OSDictionary::withObjects(objects, keys, 2, 2);
3285 if (matching) {
3286 entry = IOService::copyMatchingService(matching);
3287 }
3288 OSSafeReleaseNULL(matching);
3289 OSSafeReleaseNULL(objects[1]);
3290 }
3291
3292 *registry_entry = entry;
3293
3294 return kIOReturnSuccess;
3295 }
3296
3297
3298 /* Routine io_registry_entry_from_path */
3299 kern_return_t
is_io_registry_entry_from_path_ool(mach_port_t main_port,io_string_inband_t path,io_buf_ptr_t path_ool,mach_msg_type_number_t path_oolCnt,kern_return_t * result,io_object_t * registry_entry)3300 is_io_registry_entry_from_path_ool(
3301 mach_port_t main_port,
3302 io_string_inband_t path,
3303 io_buf_ptr_t path_ool,
3304 mach_msg_type_number_t path_oolCnt,
3305 kern_return_t *result,
3306 io_object_t *registry_entry)
3307 {
3308 IORegistryEntry * entry;
3309 vm_map_offset_t map_data;
3310 const char * cpath;
3311 IOReturn res;
3312 kern_return_t err;
3313
3314 if (main_port != main_device_port) {
3315 return kIOReturnNotPrivileged;
3316 }
3317
3318 map_data = 0;
3319 entry = NULL;
3320 res = err = KERN_SUCCESS;
3321 if (path[0]) {
3322 cpath = path;
3323 } else {
3324 if (!path_oolCnt) {
3325 return kIOReturnBadArgument;
3326 }
3327 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3328 return kIOReturnMessageTooLarge;
3329 }
3330
3331 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3332 if (KERN_SUCCESS == err) {
3333 // must return success to mig after vm_map_copyout() succeeds, so result is actual
3334 cpath = CAST_DOWN(const char *, map_data);
3335 if (cpath[path_oolCnt - 1]) {
3336 res = kIOReturnBadArgument;
3337 }
3338 }
3339 }
3340
3341 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3342 entry = IORegistryEntry::fromPath(cpath);
3343 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3344 }
3345
3346 if (map_data) {
3347 vm_deallocate(kernel_map, map_data, path_oolCnt);
3348 }
3349
3350 if (KERN_SUCCESS != err) {
3351 res = err;
3352 }
3353 *registry_entry = entry;
3354 *result = res;
3355
3356 return err;
3357 }
3358
3359
3360 /* Routine io_registry_entry_in_plane */
3361 kern_return_t
is_io_registry_entry_in_plane(io_object_t registry_entry,io_name_t plane,boolean_t * inPlane)3362 is_io_registry_entry_in_plane(
3363 io_object_t registry_entry,
3364 io_name_t plane,
3365 boolean_t *inPlane )
3366 {
3367 CHECK( IORegistryEntry, registry_entry, entry );
3368
3369 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3370
3371 return kIOReturnSuccess;
3372 }
3373
3374
3375 /* Routine io_registry_entry_get_path */
3376 kern_return_t
is_io_registry_entry_get_path(io_object_t registry_entry,io_name_t plane,io_string_t path)3377 is_io_registry_entry_get_path(
3378 io_object_t registry_entry,
3379 io_name_t plane,
3380 io_string_t path )
3381 {
3382 int length;
3383 CHECK( IORegistryEntry, registry_entry, entry );
3384
3385 length = sizeof(io_string_t);
3386 if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3387 return kIOReturnSuccess;
3388 } else {
3389 return kIOReturnBadArgument;
3390 }
3391 }
3392
3393 /* Routine io_registry_entry_get_path */
3394 kern_return_t
is_io_registry_entry_get_path_ool(io_object_t registry_entry,io_name_t plane,io_string_inband_t path,io_buf_ptr_t * path_ool,mach_msg_type_number_t * path_oolCnt)3395 is_io_registry_entry_get_path_ool(
3396 io_object_t registry_entry,
3397 io_name_t plane,
3398 io_string_inband_t path,
3399 io_buf_ptr_t *path_ool,
3400 mach_msg_type_number_t *path_oolCnt)
3401 {
3402 enum { kMaxPath = 16384 };
3403 IOReturn err;
3404 int length;
3405 char * buf;
3406
3407 CHECK( IORegistryEntry, registry_entry, entry );
3408
3409 *path_ool = NULL;
3410 *path_oolCnt = 0;
3411 length = sizeof(io_string_inband_t);
3412 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3413 err = kIOReturnSuccess;
3414 } else {
3415 length = kMaxPath;
3416 buf = IONewData(char, length);
3417 if (!buf) {
3418 err = kIOReturnNoMemory;
3419 } else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3420 err = kIOReturnError;
3421 } else {
3422 *path_oolCnt = length;
3423 err = copyoutkdata(buf, length, path_ool);
3424 }
3425 if (buf) {
3426 IODeleteData(buf, char, kMaxPath);
3427 }
3428 }
3429
3430 return err;
3431 }
3432
3433
3434 /* Routine io_registry_entry_get_name */
3435 kern_return_t
is_io_registry_entry_get_name(io_object_t registry_entry,io_name_t name)3436 is_io_registry_entry_get_name(
3437 io_object_t registry_entry,
3438 io_name_t name )
3439 {
3440 CHECK( IORegistryEntry, registry_entry, entry );
3441
3442 strncpy( name, entry->getName(), sizeof(io_name_t));
3443
3444 return kIOReturnSuccess;
3445 }
3446
3447 /* Routine io_registry_entry_get_name_in_plane */
3448 kern_return_t
is_io_registry_entry_get_name_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t name)3449 is_io_registry_entry_get_name_in_plane(
3450 io_object_t registry_entry,
3451 io_name_t planeName,
3452 io_name_t name )
3453 {
3454 const IORegistryPlane * plane;
3455 CHECK( IORegistryEntry, registry_entry, entry );
3456
3457 if (planeName[0]) {
3458 plane = IORegistryEntry::getPlane( planeName );
3459 } else {
3460 plane = NULL;
3461 }
3462
3463 strncpy( name, entry->getName( plane), sizeof(io_name_t));
3464
3465 return kIOReturnSuccess;
3466 }
3467
3468 /* Routine io_registry_entry_get_location_in_plane */
3469 kern_return_t
is_io_registry_entry_get_location_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t location)3470 is_io_registry_entry_get_location_in_plane(
3471 io_object_t registry_entry,
3472 io_name_t planeName,
3473 io_name_t location )
3474 {
3475 const IORegistryPlane * plane;
3476 CHECK( IORegistryEntry, registry_entry, entry );
3477
3478 if (planeName[0]) {
3479 plane = IORegistryEntry::getPlane( planeName );
3480 } else {
3481 plane = NULL;
3482 }
3483
3484 const char * cstr = entry->getLocation( plane );
3485
3486 if (cstr) {
3487 strncpy( location, cstr, sizeof(io_name_t));
3488 return kIOReturnSuccess;
3489 } else {
3490 return kIOReturnNotFound;
3491 }
3492 }
3493
3494 /* Routine io_registry_entry_get_registry_entry_id */
3495 kern_return_t
is_io_registry_entry_get_registry_entry_id(io_object_t registry_entry,uint64_t * entry_id)3496 is_io_registry_entry_get_registry_entry_id(
3497 io_object_t registry_entry,
3498 uint64_t *entry_id )
3499 {
3500 CHECK( IORegistryEntry, registry_entry, entry );
3501
3502 *entry_id = entry->getRegistryEntryID();
3503
3504 return kIOReturnSuccess;
3505 }
3506
3507
3508 static OSObject *
IOCopyPropertyCompatible(IORegistryEntry * regEntry,const char * name)3509 IOCopyPropertyCompatible(IORegistryEntry * regEntry, const char * name)
3510 {
3511 OSObject * obj;
3512 OSObject * compatProperties;
3513 OSDictionary * props;
3514
3515 obj = regEntry->copyProperty(name);
3516 if (obj) {
3517 return obj;
3518 }
3519
3520 compatProperties = regEntry->copyProperty(gIOUserServicePropertiesKey);
3521 if (!compatProperties
3522 && IOTaskRegistryCompatibility(current_task())) {
3523 compatProperties = regEntry->copyProperty(gIOCompatibilityPropertiesKey);
3524 }
3525 if (compatProperties) {
3526 props = OSDynamicCast(OSDictionary, compatProperties);
3527 if (props) {
3528 obj = props->getObject(name);
3529 if (obj) {
3530 obj->retain();
3531 }
3532 }
3533 compatProperties->release();
3534 }
3535
3536 return obj;
3537 }
3538
3539 /* Routine io_registry_entry_get_property */
3540 kern_return_t
is_io_registry_entry_get_property_bytes(io_object_t registry_entry,io_name_t property_name,io_struct_inband_t buf,mach_msg_type_number_t * dataCnt)3541 is_io_registry_entry_get_property_bytes(
3542 io_object_t registry_entry,
3543 io_name_t property_name,
3544 io_struct_inband_t buf,
3545 mach_msg_type_number_t *dataCnt )
3546 {
3547 OSObject * obj;
3548 OSData * data;
3549 OSString * str;
3550 OSBoolean * boo;
3551 OSNumber * off;
3552 UInt64 offsetBytes;
3553 unsigned int len = 0;
3554 const void * bytes = NULL;
3555 IOReturn ret = kIOReturnSuccess;
3556
3557 CHECK( IORegistryEntry, registry_entry, entry );
3558
3559 #if CONFIG_MACF
3560 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3561 return kIOReturnNotPermitted;
3562 }
3563 #endif
3564
3565 obj = IOCopyPropertyCompatible(entry, property_name);
3566 if (!obj) {
3567 return kIOReturnNoResources;
3568 }
3569
3570 // One day OSData will be a common container base class
3571 // until then...
3572 if ((data = OSDynamicCast( OSData, obj ))) {
3573 len = data->getLength();
3574 bytes = data->getBytesNoCopy();
3575 if (!data->isSerializable()) {
3576 len = 0;
3577 }
3578 } else if ((str = OSDynamicCast( OSString, obj ))) {
3579 len = str->getLength() + 1;
3580 bytes = str->getCStringNoCopy();
3581 } else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3582 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3583 bytes = boo->isTrue() ? "Yes" : "No";
3584 } else if ((off = OSDynamicCast( OSNumber, obj ))) {
3585 offsetBytes = off->unsigned64BitValue();
3586 len = off->numberOfBytes();
3587 if (len > sizeof(offsetBytes)) {
3588 len = sizeof(offsetBytes);
3589 }
3590 bytes = &offsetBytes;
3591 #ifdef __BIG_ENDIAN__
3592 bytes = (const void *)
3593 (((UInt32) bytes) + (sizeof(UInt64) - len));
3594 #endif
3595 } else {
3596 ret = kIOReturnBadArgument;
3597 }
3598
3599 if (bytes) {
3600 if (*dataCnt < len) {
3601 ret = kIOReturnIPCError;
3602 } else {
3603 *dataCnt = len;
3604 bcopy( bytes, buf, len );
3605 }
3606 }
3607 obj->release();
3608
3609 return ret;
3610 }
3611
3612
3613 /* Routine io_registry_entry_get_property */
3614 kern_return_t
is_io_registry_entry_get_property(io_object_t registry_entry,io_name_t property_name,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3615 is_io_registry_entry_get_property(
3616 io_object_t registry_entry,
3617 io_name_t property_name,
3618 io_buf_ptr_t *properties,
3619 mach_msg_type_number_t *propertiesCnt )
3620 {
3621 kern_return_t err;
3622 unsigned int len;
3623 OSObject * obj;
3624
3625 CHECK( IORegistryEntry, registry_entry, entry );
3626
3627 #if CONFIG_MACF
3628 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3629 return kIOReturnNotPermitted;
3630 }
3631 #endif
3632
3633 obj = IOCopyPropertyCompatible(entry, property_name);
3634 if (!obj) {
3635 return kIOReturnNotFound;
3636 }
3637
3638 OSSerialize * s = OSSerialize::withCapacity(4096);
3639 if (!s) {
3640 obj->release();
3641 return kIOReturnNoMemory;
3642 }
3643
3644 if (obj->serialize( s )) {
3645 len = s->getLength();
3646 *propertiesCnt = len;
3647 err = copyoutkdata( s->text(), len, properties );
3648 } else {
3649 err = kIOReturnUnsupported;
3650 }
3651
3652 s->release();
3653 obj->release();
3654
3655 return err;
3656 }
3657
3658 /* Routine io_registry_entry_get_property_recursively */
3659 kern_return_t
is_io_registry_entry_get_property_recursively(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3660 is_io_registry_entry_get_property_recursively(
3661 io_object_t registry_entry,
3662 io_name_t plane,
3663 io_name_t property_name,
3664 uint32_t options,
3665 io_buf_ptr_t *properties,
3666 mach_msg_type_number_t *propertiesCnt )
3667 {
3668 kern_return_t err;
3669 unsigned int len;
3670 OSObject * obj;
3671
3672 CHECK( IORegistryEntry, registry_entry, entry );
3673
3674 #if CONFIG_MACF
3675 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3676 return kIOReturnNotPermitted;
3677 }
3678 #endif
3679
3680 obj = entry->copyProperty( property_name,
3681 IORegistryEntry::getPlane( plane ), options );
3682 if (!obj) {
3683 return kIOReturnNotFound;
3684 }
3685
3686 OSSerialize * s = OSSerialize::withCapacity(4096);
3687 if (!s) {
3688 obj->release();
3689 return kIOReturnNoMemory;
3690 }
3691
3692 if (obj->serialize( s )) {
3693 len = s->getLength();
3694 *propertiesCnt = len;
3695 err = copyoutkdata( s->text(), len, properties );
3696 } else {
3697 err = kIOReturnUnsupported;
3698 }
3699
3700 s->release();
3701 obj->release();
3702
3703 return err;
3704 }
3705
3706 /* Routine io_registry_entry_get_properties */
3707 kern_return_t
is_io_registry_entry_get_properties(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3708 is_io_registry_entry_get_properties(
3709 io_object_t registry_entry,
3710 io_buf_ptr_t *properties,
3711 mach_msg_type_number_t *propertiesCnt )
3712 {
3713 return kIOReturnUnsupported;
3714 }
3715
3716 #if CONFIG_MACF
3717
3718 struct GetPropertiesEditorRef {
3719 kauth_cred_t cred;
3720 IORegistryEntry * entry;
3721 OSCollection * root;
3722 };
3723
3724 static const LIBKERN_RETURNS_RETAINED OSMetaClassBase *
GetPropertiesEditor(void * reference,OSSerialize * s,OSCollection * container,const OSSymbol * name,const OSMetaClassBase * value)3725 GetPropertiesEditor(void * reference,
3726 OSSerialize * s,
3727 OSCollection * container,
3728 const OSSymbol * name,
3729 const OSMetaClassBase * value)
3730 {
3731 GetPropertiesEditorRef * ref = (typeof(ref))reference;
3732
3733 if (!ref->root) {
3734 ref->root = container;
3735 }
3736 if (ref->root == container) {
3737 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3738 value = NULL;
3739 }
3740 }
3741 if (value) {
3742 value->retain();
3743 }
3744 return value;
3745 }
3746
3747 #endif /* CONFIG_MACF */
3748
3749 /* Routine io_registry_entry_get_properties_bin_buf */
3750 kern_return_t
is_io_registry_entry_get_properties_bin_buf(io_object_t registry_entry,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3751 is_io_registry_entry_get_properties_bin_buf(
3752 io_object_t registry_entry,
3753 mach_vm_address_t buf,
3754 mach_vm_size_t *bufsize,
3755 io_buf_ptr_t *properties,
3756 mach_msg_type_number_t *propertiesCnt)
3757 {
3758 kern_return_t err = kIOReturnSuccess;
3759 unsigned int len;
3760 OSObject * compatProperties;
3761 OSSerialize * s;
3762 OSSerialize::Editor editor = NULL;
3763 void * editRef = NULL;
3764
3765 CHECK(IORegistryEntry, registry_entry, entry);
3766
3767 #if CONFIG_MACF
3768 GetPropertiesEditorRef ref;
3769 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3770 editor = &GetPropertiesEditor;
3771 editRef = &ref;
3772 ref.cred = kauth_cred_get();
3773 ref.entry = entry;
3774 ref.root = NULL;
3775 }
3776 #endif
3777
3778 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3779 if (!s) {
3780 return kIOReturnNoMemory;
3781 }
3782
3783
3784 compatProperties = entry->copyProperty(gIOUserServicePropertiesKey);
3785 if (!compatProperties
3786 && IOTaskRegistryCompatibility(current_task())) {
3787 compatProperties = entry->copyProperty(gIOCompatibilityPropertiesKey);
3788 }
3789
3790 if (compatProperties) {
3791 OSDictionary * dict;
3792
3793 dict = entry->dictionaryWithProperties();
3794 if (!dict) {
3795 err = kIOReturnNoMemory;
3796 } else {
3797 dict->removeObject(gIOUserServicePropertiesKey);
3798 dict->removeObject(gIOCompatibilityPropertiesKey);
3799 dict->merge(OSDynamicCast(OSDictionary, compatProperties));
3800 if (!dict->serialize(s)) {
3801 err = kIOReturnUnsupported;
3802 }
3803 dict->release();
3804 }
3805 compatProperties->release();
3806 } else if (!entry->serializeProperties(s)) {
3807 err = kIOReturnUnsupported;
3808 }
3809
3810 if (kIOReturnSuccess == err) {
3811 len = s->getLength();
3812 if (buf && bufsize && len <= *bufsize) {
3813 *bufsize = len;
3814 *propertiesCnt = 0;
3815 *properties = nullptr;
3816 if (copyout(s->text(), buf, len)) {
3817 err = kIOReturnVMError;
3818 } else {
3819 err = kIOReturnSuccess;
3820 }
3821 } else {
3822 if (bufsize) {
3823 *bufsize = 0;
3824 }
3825 *propertiesCnt = len;
3826 err = copyoutkdata( s->text(), len, properties );
3827 }
3828 }
3829 s->release();
3830
3831 return err;
3832 }
3833
3834 /* Routine io_registry_entry_get_properties_bin */
3835 kern_return_t
is_io_registry_entry_get_properties_bin(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3836 is_io_registry_entry_get_properties_bin(
3837 io_object_t registry_entry,
3838 io_buf_ptr_t *properties,
3839 mach_msg_type_number_t *propertiesCnt)
3840 {
3841 return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3842 0, NULL, properties, propertiesCnt);
3843 }
3844
3845 /* Routine io_registry_entry_get_property_bin_buf */
3846 kern_return_t
is_io_registry_entry_get_property_bin_buf(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3847 is_io_registry_entry_get_property_bin_buf(
3848 io_object_t registry_entry,
3849 io_name_t plane,
3850 io_name_t property_name,
3851 uint32_t options,
3852 mach_vm_address_t buf,
3853 mach_vm_size_t *bufsize,
3854 io_buf_ptr_t *properties,
3855 mach_msg_type_number_t *propertiesCnt )
3856 {
3857 kern_return_t err;
3858 unsigned int len;
3859 OSObject * obj;
3860 const OSSymbol * sym;
3861
3862 CHECK( IORegistryEntry, registry_entry, entry );
3863
3864 #if CONFIG_MACF
3865 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3866 return kIOReturnNotPermitted;
3867 }
3868 #endif
3869
3870 sym = OSSymbol::withCString(property_name);
3871 if (!sym) {
3872 return kIOReturnNoMemory;
3873 }
3874
3875 err = kIOReturnNotFound;
3876 if (gIORegistryEntryPropertyKeysKey == sym) {
3877 obj = entry->copyPropertyKeys();
3878 } else {
3879 if ((kIORegistryIterateRecursively & options) && plane[0]) {
3880 obj = IOCopyPropertyCompatible(entry, property_name);
3881 if (obj == NULL) {
3882 IORegistryIterator * iter = IORegistryIterator::iterateOver(entry, IORegistryEntry::getPlane(plane), options);
3883 if (iter) {
3884 while ((NULL == obj) && (entry = iter->getNextObject())) {
3885 OSObject * currentObj = IOCopyPropertyCompatible(entry, property_name);
3886 #if CONFIG_MACF
3887 if (currentObj != NULL && 0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3888 // Record that MAC hook blocked this entry and property, and continue to next entry
3889 err = kIOReturnNotPermitted;
3890 OSSafeReleaseNULL(currentObj);
3891 continue;
3892 }
3893 #endif
3894 obj = currentObj;
3895 }
3896 iter->release();
3897 }
3898 }
3899 } else {
3900 obj = IOCopyPropertyCompatible(entry, property_name);
3901 }
3902 if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3903 entry->removeProperty(sym);
3904 }
3905 }
3906
3907 sym->release();
3908 if (!obj) {
3909 return err;
3910 }
3911
3912 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3913 if (!s) {
3914 obj->release();
3915 return kIOReturnNoMemory;
3916 }
3917
3918 if (obj->serialize( s )) {
3919 len = s->getLength();
3920 if (buf && bufsize && len <= *bufsize) {
3921 *bufsize = len;
3922 *propertiesCnt = 0;
3923 *properties = nullptr;
3924 if (copyout(s->text(), buf, len)) {
3925 err = kIOReturnVMError;
3926 } else {
3927 err = kIOReturnSuccess;
3928 }
3929 } else {
3930 if (bufsize) {
3931 *bufsize = 0;
3932 }
3933 *propertiesCnt = len;
3934 err = copyoutkdata( s->text(), len, properties );
3935 }
3936 } else {
3937 err = kIOReturnUnsupported;
3938 }
3939
3940 s->release();
3941 obj->release();
3942
3943 return err;
3944 }
3945
3946 /* Routine io_registry_entry_get_property_bin */
3947 kern_return_t
is_io_registry_entry_get_property_bin(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3948 is_io_registry_entry_get_property_bin(
3949 io_object_t registry_entry,
3950 io_name_t plane,
3951 io_name_t property_name,
3952 uint32_t options,
3953 io_buf_ptr_t *properties,
3954 mach_msg_type_number_t *propertiesCnt )
3955 {
3956 return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
3957 property_name, options, 0, NULL, properties, propertiesCnt);
3958 }
3959
3960
3961 /* Routine io_registry_entry_set_properties */
3962 kern_return_t
is_io_registry_entry_set_properties(io_object_t registry_entry,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)3963 is_io_registry_entry_set_properties
3964 (
3965 io_object_t registry_entry,
3966 io_buf_ptr_t properties,
3967 mach_msg_type_number_t propertiesCnt,
3968 kern_return_t * result)
3969 {
3970 OSObject * obj;
3971 kern_return_t err;
3972 IOReturn res;
3973 vm_offset_t data;
3974 vm_map_offset_t map_data;
3975
3976 CHECK( IORegistryEntry, registry_entry, entry );
3977
3978 if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
3979 return kIOReturnMessageTooLarge;
3980 }
3981
3982 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3983 data = CAST_DOWN(vm_offset_t, map_data);
3984
3985 if (KERN_SUCCESS == err) {
3986 FAKE_STACK_FRAME(entry->getMetaClass());
3987
3988 // must return success after vm_map_copyout() succeeds
3989 obj = OSUnserializeXML((const char *) data, propertiesCnt );
3990 vm_deallocate( kernel_map, data, propertiesCnt );
3991
3992 if (!obj) {
3993 res = kIOReturnBadArgument;
3994 }
3995 #if CONFIG_MACF
3996 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3997 registry_entry, obj)) {
3998 res = kIOReturnNotPermitted;
3999 }
4000 #endif
4001 else {
4002 IOService * service = OSDynamicCast(IOService, entry);
4003 OSDictionary * props = OSDynamicCast(OSDictionary, obj);
4004 OSObject * allowable = entry->copyProperty(gIORegistryEntryAllowableSetPropertiesKey);
4005 OSArray * allowableArray;
4006
4007 if (!allowable) {
4008 res = kIOReturnSuccess;
4009 } else {
4010 if (!props) {
4011 res = kIOReturnNotPermitted;
4012 } else if (!(allowableArray = OSDynamicCast(OSArray, allowable))) {
4013 res = kIOReturnNotPermitted;
4014 } else {
4015 bool allFound __block, found __block;
4016
4017 allFound = true;
4018 props->iterateObjects(^(const OSSymbol * key, OSObject * value) {
4019 found = false;
4020 for (unsigned int idx = 0; !found; idx++) {
4021 OSObject * next = allowableArray->getObject(idx);
4022 if (!next) {
4023 break;
4024 }
4025 found = next->isEqualTo(key);
4026 }
4027 allFound &= found;
4028 if (!found) {
4029 IOLog("IORegistryEntrySetProperties(%s, %s) disallowed due to " kIORegistryEntryAllowableSetPropertiesKey "\n",
4030 entry->getName(), key->getCStringNoCopy());
4031 }
4032 return !allFound;
4033 });
4034 res = allFound ? kIOReturnSuccess : kIOReturnBadArgument;
4035 }
4036 }
4037 if (kIOReturnSuccess == res) {
4038 IOUserClient *
4039 client = OSDynamicCast(IOUserClient, entry);
4040
4041 if (client && client->defaultLockingSetProperties) {
4042 IORWLockWrite(&client->lock);
4043 }
4044
4045 if (!client && (kOSBooleanTrue == entry->getProperty(gIORegistryEntryDefaultLockingSetPropertiesKey))) {
4046 res = entry->runPropertyActionBlock(^IOReturn (void) {
4047 return entry->setProperties( obj );
4048 });
4049 } else {
4050 res = entry->setProperties( obj );
4051 }
4052
4053 if (client && client->defaultLockingSetProperties) {
4054 IORWLockUnlock(&client->lock);
4055 }
4056 if (service && props && service->hasUserServer()) {
4057 res = service->UserSetProperties(props);
4058 }
4059 }
4060 OSSafeReleaseNULL(allowable);
4061 }
4062 if (obj) {
4063 obj->release();
4064 }
4065
4066 FAKE_STACK_FRAME_END();
4067 } else {
4068 res = err;
4069 }
4070
4071 *result = res;
4072 return err;
4073 }
4074
4075 /* Routine io_registry_entry_get_child_iterator */
4076 kern_return_t
is_io_registry_entry_get_child_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4077 is_io_registry_entry_get_child_iterator(
4078 io_object_t registry_entry,
4079 io_name_t plane,
4080 io_object_t *iterator )
4081 {
4082 CHECK( IORegistryEntry, registry_entry, entry );
4083
4084 *iterator = IOUserIterator::withIterator(entry->getChildIterator(
4085 IORegistryEntry::getPlane( plane )));
4086
4087 return kIOReturnSuccess;
4088 }
4089
4090 /* Routine io_registry_entry_get_parent_iterator */
4091 kern_return_t
is_io_registry_entry_get_parent_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4092 is_io_registry_entry_get_parent_iterator(
4093 io_object_t registry_entry,
4094 io_name_t plane,
4095 io_object_t *iterator)
4096 {
4097 CHECK( IORegistryEntry, registry_entry, entry );
4098
4099 *iterator = IOUserIterator::withIterator(entry->getParentIterator(
4100 IORegistryEntry::getPlane( plane )));
4101
4102 return kIOReturnSuccess;
4103 }
4104
4105 /* Routine io_service_get_busy_state */
4106 kern_return_t
is_io_service_get_busy_state(io_object_t _service,uint32_t * busyState)4107 is_io_service_get_busy_state(
4108 io_object_t _service,
4109 uint32_t *busyState )
4110 {
4111 CHECK( IOService, _service, service );
4112
4113 *busyState = service->getBusyState();
4114
4115 return kIOReturnSuccess;
4116 }
4117
4118 /* Routine io_service_get_state */
4119 kern_return_t
is_io_service_get_state(io_object_t _service,uint64_t * state,uint32_t * busy_state,uint64_t * accumulated_busy_time)4120 is_io_service_get_state(
4121 io_object_t _service,
4122 uint64_t *state,
4123 uint32_t *busy_state,
4124 uint64_t *accumulated_busy_time )
4125 {
4126 CHECK( IOService, _service, service );
4127
4128 *state = service->getState();
4129 *busy_state = service->getBusyState();
4130 *accumulated_busy_time = service->getAccumulatedBusyTime();
4131
4132 return kIOReturnSuccess;
4133 }
4134
4135 /* Routine io_service_wait_quiet */
4136 kern_return_t
is_io_service_wait_quiet(io_object_t _service,mach_timespec_t wait_time)4137 is_io_service_wait_quiet(
4138 io_object_t _service,
4139 mach_timespec_t wait_time )
4140 {
4141 uint64_t timeoutNS;
4142
4143 CHECK( IOService, _service, service );
4144
4145 timeoutNS = wait_time.tv_sec;
4146 timeoutNS *= kSecondScale;
4147 timeoutNS += wait_time.tv_nsec;
4148
4149 return service->waitQuiet(timeoutNS);
4150 }
4151
4152 /* Routine io_service_wait_quiet_with_options */
4153 kern_return_t
is_io_service_wait_quiet_with_options(io_object_t _service,mach_timespec_t wait_time,uint32_t options)4154 is_io_service_wait_quiet_with_options(
4155 io_object_t _service,
4156 mach_timespec_t wait_time,
4157 uint32_t options )
4158 {
4159 uint64_t timeoutNS;
4160
4161 CHECK( IOService, _service, service );
4162
4163 timeoutNS = wait_time.tv_sec;
4164 timeoutNS *= kSecondScale;
4165 timeoutNS += wait_time.tv_nsec;
4166
4167 if ((options & kIOWaitQuietPanicOnFailure) && !IOCurrentTaskHasEntitlement(kIOWaitQuietPanicsEntitlement)) {
4168 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4169 IOLog("IOServiceWaitQuietWithOptions(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4170 OSSafeReleaseNULL(taskName);
4171
4172 /* strip this option from the options before calling waitQuietWithOptions */
4173 options &= ~kIOWaitQuietPanicOnFailure;
4174 }
4175
4176 return service->waitQuietWithOptions(timeoutNS, options);
4177 }
4178
4179
4180 /* Routine io_service_request_probe */
4181 kern_return_t
is_io_service_request_probe(io_object_t _service,uint32_t options)4182 is_io_service_request_probe(
4183 io_object_t _service,
4184 uint32_t options )
4185 {
4186 CHECK( IOService, _service, service );
4187
4188 return service->requestProbe( options );
4189 }
4190
4191 /* Routine io_service_get_authorization_id */
4192 kern_return_t
is_io_service_get_authorization_id(io_object_t _service,uint64_t * authorization_id)4193 is_io_service_get_authorization_id(
4194 io_object_t _service,
4195 uint64_t *authorization_id )
4196 {
4197 kern_return_t kr;
4198
4199 CHECK( IOService, _service, service );
4200
4201 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
4202 kIOClientPrivilegeAdministrator );
4203 if (kIOReturnSuccess != kr) {
4204 return kr;
4205 }
4206
4207 #if defined(XNU_TARGET_OS_OSX)
4208 *authorization_id = service->getAuthorizationID();
4209 #else /* defined(XNU_TARGET_OS_OSX) */
4210 *authorization_id = 0;
4211 kr = kIOReturnUnsupported;
4212 #endif /* defined(XNU_TARGET_OS_OSX) */
4213
4214 return kr;
4215 }
4216
4217 /* Routine io_service_set_authorization_id */
4218 kern_return_t
is_io_service_set_authorization_id(io_object_t _service,uint64_t authorization_id)4219 is_io_service_set_authorization_id(
4220 io_object_t _service,
4221 uint64_t authorization_id )
4222 {
4223 CHECK( IOService, _service, service );
4224
4225 #if defined(XNU_TARGET_OS_OSX)
4226 return service->setAuthorizationID( authorization_id );
4227 #else /* defined(XNU_TARGET_OS_OSX) */
4228 return kIOReturnUnsupported;
4229 #endif /* defined(XNU_TARGET_OS_OSX) */
4230 }
4231
4232 /* Routine io_service_open_ndr */
4233 kern_return_t
is_io_service_open_extended(io_object_t _service,task_t owningTask,uint32_t connect_type,NDR_record_t ndr,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result,io_object_t * connection)4234 is_io_service_open_extended(
4235 io_object_t _service,
4236 task_t owningTask,
4237 uint32_t connect_type,
4238 NDR_record_t ndr,
4239 io_buf_ptr_t properties,
4240 mach_msg_type_number_t propertiesCnt,
4241 kern_return_t * result,
4242 io_object_t *connection )
4243 {
4244 IOUserClient * client = NULL;
4245 kern_return_t err = KERN_SUCCESS;
4246 IOReturn res = kIOReturnSuccess;
4247 OSDictionary * propertiesDict = NULL;
4248 bool disallowAccess = false;
4249
4250 CHECK( IOService, _service, service );
4251
4252 if (!owningTask) {
4253 return kIOReturnBadArgument;
4254 }
4255 assert(owningTask == current_task());
4256 if (owningTask != current_task()) {
4257 return kIOReturnBadArgument;
4258 }
4259
4260 #if CONFIG_MACF
4261 if (mac_iokit_check_open_service(kauth_cred_get(), service, connect_type) != 0) {
4262 return kIOReturnNotPermitted;
4263 }
4264 #endif
4265 do{
4266 if (properties) {
4267 return kIOReturnUnsupported;
4268 }
4269 #if 0
4270 {
4271 OSObject * obj;
4272 vm_offset_t data;
4273 vm_map_offset_t map_data;
4274
4275 if (propertiesCnt > sizeof(io_struct_inband_t)) {
4276 return kIOReturnMessageTooLarge;
4277 }
4278
4279 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4280 res = err;
4281 data = CAST_DOWN(vm_offset_t, map_data);
4282 if (KERN_SUCCESS == err) {
4283 // must return success after vm_map_copyout() succeeds
4284 obj = OSUnserializeXML((const char *) data, propertiesCnt );
4285 vm_deallocate( kernel_map, data, propertiesCnt );
4286 propertiesDict = OSDynamicCast(OSDictionary, obj);
4287 if (!propertiesDict) {
4288 res = kIOReturnBadArgument;
4289 if (obj) {
4290 obj->release();
4291 }
4292 }
4293 }
4294 if (kIOReturnSuccess != res) {
4295 break;
4296 }
4297 }
4298 #endif
4299 res = service->newUserClient( owningTask, (void *) owningTask,
4300 connect_type, propertiesDict, &client );
4301
4302 if (propertiesDict) {
4303 propertiesDict->release();
4304 }
4305
4306 if (res == kIOReturnSuccess && OSDynamicCast(IOUserClient, client) == NULL) {
4307 // client should always be a IOUserClient
4308 res = kIOReturnError;
4309 }
4310
4311 if (res == kIOReturnSuccess) {
4312 if (!client->reserved) {
4313 if (!client->reserve()) {
4314 client->clientClose();
4315 OSSafeReleaseNULL(client);
4316 res = kIOReturnNoMemory;
4317 }
4318 }
4319 }
4320
4321 if (res == kIOReturnSuccess) {
4322 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4323 if (creatorName) {
4324 client->setProperty(kIOUserClientCreatorKey, creatorName);
4325 }
4326 const char * creatorNameCStr = creatorName ? creatorName->getCStringNoCopy() : "<unknown>";
4327 client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
4328 if (client->sharedInstance) {
4329 IOLockLock(gIOUserClientOwnersLock);
4330 }
4331 if (!client->opened) {
4332 client->opened = true;
4333
4334 client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
4335 {
4336 OSObject * obj;
4337 extern const OSSymbol * gIOSurfaceIdentifier;
4338 obj = client->getProperty(kIOUserClientDefaultLockingKey);
4339 bool hasProps = false;
4340
4341 client->uc2022 = (NULL != OSDynamicCast(IOUserClient2022, client));
4342 if (obj) {
4343 hasProps = true;
4344 client->defaultLocking = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingKey));
4345 } else if (client->uc2022) {
4346 res = kIOReturnError;
4347 }
4348 obj = client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey);
4349 if (obj) {
4350 hasProps = true;
4351 client->defaultLockingSetProperties = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey));
4352 } else if (client->uc2022) {
4353 res = kIOReturnError;
4354 }
4355 obj = client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey);
4356 if (obj) {
4357 hasProps = true;
4358 client->defaultLockingSingleThreadExternalMethod = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey));
4359 } else if (client->uc2022) {
4360 res = kIOReturnError;
4361 }
4362 if (kIOReturnSuccess != res) {
4363 IOLog("IOUC %s requires kIOUserClientDefaultLockingKey, kIOUserClientDefaultLockingSetPropertiesKey, kIOUserClientDefaultLockingSingleThreadExternalMethodKey\n",
4364 client->getMetaClass()->getClassName());
4365 }
4366 if (!hasProps) {
4367 const OSMetaClass * meta;
4368 OSKext * kext;
4369 meta = client->getMetaClass();
4370 kext = meta->getKext();
4371 if (!kext || !kext->hasDependency(gIOSurfaceIdentifier)) {
4372 client->defaultLocking = true;
4373 client->defaultLockingSetProperties = false;
4374 client->defaultLockingSingleThreadExternalMethod = false;
4375 client->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
4376 }
4377 }
4378 }
4379 }
4380 if (client->sharedInstance) {
4381 IOLockUnlock(gIOUserClientOwnersLock);
4382 }
4383
4384 OSObject * requiredEntitlement = client->copyProperty(gIOUserClientEntitlementsKey);
4385 OSString * requiredEntitlementString = OSDynamicCast(OSString, requiredEntitlement);
4386 //If this is an IOUserClient2022, having kIOUserClientEntitlementsKey is mandatory.
4387 //If it has kIOUserClientEntitlementsKey, the value must be either kOSBooleanFalse or an OSString
4388 //If the value is kOSBooleanFalse, we allow access.
4389 //If the value is an OSString, we allow access if the task has the named entitlement
4390 if (client->uc2022) {
4391 if (!requiredEntitlement) {
4392 IOLog("IOUC %s missing " kIOUserClientEntitlementsKey " property\n",
4393 client->getMetaClass()->getClassName());
4394 disallowAccess = true;
4395 } else if (!requiredEntitlementString && requiredEntitlement != kOSBooleanFalse) {
4396 IOLog("IOUC %s had " kIOUserClientEntitlementsKey "with value not boolean false or string\n", client->getMetaClass()->getClassName());
4397 disallowAccess = true;
4398 }
4399 }
4400
4401 if (requiredEntitlement && disallowAccess == false) {
4402 if (kOSBooleanFalse == requiredEntitlement) {
4403 // allow
4404 disallowAccess = false;
4405 } else {
4406 disallowAccess = !IOTaskHasEntitlement(owningTask, requiredEntitlementString->getCStringNoCopy());
4407 if (disallowAccess) {
4408 IOLog("IOUC %s missing entitlement in process %s\n",
4409 client->getMetaClass()->getClassName(), creatorNameCStr);
4410 }
4411 }
4412 }
4413
4414 OSSafeReleaseNULL(requiredEntitlement);
4415
4416 if (disallowAccess) {
4417 res = kIOReturnNotPrivileged;
4418 }
4419 #if CONFIG_MACF
4420 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4421 IOLog("IOUC %s failed MACF in process %s\n",
4422 client->getMetaClass()->getClassName(), creatorNameCStr);
4423 res = kIOReturnNotPermitted;
4424 }
4425 #endif
4426
4427 if ((kIOReturnSuccess == res)
4428 && gIOUCFilterCallbacks
4429 && gIOUCFilterCallbacks->io_filter_resolver) {
4430 io_filter_policy_t filterPolicy;
4431 filterPolicy = client->filterForTask(owningTask, 0);
4432 if (!filterPolicy) {
4433 res = gIOUCFilterCallbacks->io_filter_resolver(owningTask, client, connect_type, &filterPolicy);
4434 if (kIOReturnUnsupported == res) {
4435 res = kIOReturnSuccess;
4436 } else if (kIOReturnSuccess == res) {
4437 client->filterForTask(owningTask, filterPolicy);
4438 } else {
4439 IOLog("IOUC %s failed sandbox in process %s\n",
4440 client->getMetaClass()->getClassName(), creatorNameCStr);
4441 }
4442 }
4443 }
4444
4445 if (kIOReturnSuccess == res) {
4446 res = client->registerOwner(owningTask);
4447 }
4448 OSSafeReleaseNULL(creatorName);
4449
4450 if (kIOReturnSuccess != res) {
4451 IOStatisticsClientCall();
4452 client->clientClose();
4453 client->setTerminateDefer(service, false);
4454 client->release();
4455 client = NULL;
4456 break;
4457 }
4458 client->setTerminateDefer(service, false);
4459 }
4460 }while (false);
4461
4462 *connection = client;
4463 *result = res;
4464
4465 return err;
4466 }
4467
4468 /* Routine io_service_close */
4469 kern_return_t
is_io_service_close(io_object_t connection)4470 is_io_service_close(
4471 io_object_t connection )
4472 {
4473 OSSet * mappings;
4474 if ((mappings = OSDynamicCast(OSSet, connection))) {
4475 return kIOReturnSuccess;
4476 }
4477
4478 CHECK( IOUserClient, connection, client );
4479
4480 IOStatisticsClientCall();
4481
4482 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4483 IORWLockWrite(&client->lock);
4484 client->clientClose();
4485 IORWLockUnlock(&client->lock);
4486 } else {
4487 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4488 client->getRegistryEntryID(), client->getName());
4489 }
4490
4491 return kIOReturnSuccess;
4492 }
4493
4494 /* Routine io_connect_get_service */
4495 kern_return_t
is_io_connect_get_service(io_object_t connection,io_object_t * service)4496 is_io_connect_get_service(
4497 io_object_t connection,
4498 io_object_t *service )
4499 {
4500 IOService * theService;
4501
4502 CHECK( IOUserClient, connection, client );
4503
4504 theService = client->getService();
4505 if (theService) {
4506 theService->retain();
4507 }
4508
4509 *service = theService;
4510
4511 return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4512 }
4513
4514 /* Routine io_connect_set_notification_port */
4515 kern_return_t
is_io_connect_set_notification_port(io_object_t connection,uint32_t notification_type,mach_port_t port,uint32_t reference)4516 is_io_connect_set_notification_port(
4517 io_object_t connection,
4518 uint32_t notification_type,
4519 mach_port_t port,
4520 uint32_t reference)
4521 {
4522 kern_return_t ret;
4523 CHECK( IOUserClient, connection, client );
4524
4525 IOStatisticsClientCall();
4526 IORWLockWrite(&client->lock);
4527 ret = client->registerNotificationPort( port, notification_type,
4528 (io_user_reference_t) reference );
4529 IORWLockUnlock(&client->lock);
4530 return ret;
4531 }
4532
4533 /* Routine io_connect_set_notification_port */
4534 kern_return_t
is_io_connect_set_notification_port_64(io_object_t connection,uint32_t notification_type,mach_port_t port,io_user_reference_t reference)4535 is_io_connect_set_notification_port_64(
4536 io_object_t connection,
4537 uint32_t notification_type,
4538 mach_port_t port,
4539 io_user_reference_t reference)
4540 {
4541 kern_return_t ret;
4542 CHECK( IOUserClient, connection, client );
4543
4544 IOStatisticsClientCall();
4545 IORWLockWrite(&client->lock);
4546 ret = client->registerNotificationPort( port, notification_type,
4547 reference );
4548 IORWLockUnlock(&client->lock);
4549 return ret;
4550 }
4551
4552 /* Routine io_connect_map_memory_into_task */
4553 kern_return_t
is_io_connect_map_memory_into_task(io_connect_t connection,uint32_t memory_type,task_t into_task,mach_vm_address_t * address,mach_vm_size_t * size,uint32_t flags)4554 is_io_connect_map_memory_into_task
4555 (
4556 io_connect_t connection,
4557 uint32_t memory_type,
4558 task_t into_task,
4559 mach_vm_address_t *address,
4560 mach_vm_size_t *size,
4561 uint32_t flags
4562 )
4563 {
4564 IOReturn err;
4565 IOMemoryMap * map;
4566
4567 CHECK( IOUserClient, connection, client );
4568
4569 if (!into_task) {
4570 return kIOReturnBadArgument;
4571 }
4572
4573 IOStatisticsClientCall();
4574 if (client->defaultLocking) {
4575 IORWLockWrite(&client->lock);
4576 }
4577 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4578 if (client->defaultLocking) {
4579 IORWLockUnlock(&client->lock);
4580 }
4581
4582 if (map) {
4583 *address = map->getAddress();
4584 if (size) {
4585 *size = map->getSize();
4586 }
4587
4588 if (client->sharedInstance
4589 || (into_task != current_task())) {
4590 // push a name out to the task owning the map,
4591 // so we can clean up maps
4592 mach_port_name_t name __unused =
4593 IOMachPort::makeSendRightForTask(
4594 into_task, map, IKOT_IOKIT_OBJECT );
4595 map->release();
4596 } else {
4597 // keep it with the user client
4598 IOLockLock( gIOObjectPortLock);
4599 if (NULL == client->mappings) {
4600 client->mappings = OSSet::withCapacity(2);
4601 }
4602 if (client->mappings) {
4603 client->mappings->setObject( map);
4604 }
4605 IOLockUnlock( gIOObjectPortLock);
4606 map->release();
4607 }
4608 err = kIOReturnSuccess;
4609 } else {
4610 err = kIOReturnBadArgument;
4611 }
4612
4613 return err;
4614 }
4615
4616 /* Routine is_io_connect_map_memory */
4617 kern_return_t
is_io_connect_map_memory(io_object_t connect,uint32_t type,task_t task,uint32_t * mapAddr,uint32_t * mapSize,uint32_t flags)4618 is_io_connect_map_memory(
4619 io_object_t connect,
4620 uint32_t type,
4621 task_t task,
4622 uint32_t * mapAddr,
4623 uint32_t * mapSize,
4624 uint32_t flags )
4625 {
4626 IOReturn err;
4627 mach_vm_address_t address;
4628 mach_vm_size_t size;
4629
4630 address = SCALAR64(*mapAddr);
4631 size = SCALAR64(*mapSize);
4632
4633 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4634
4635 *mapAddr = SCALAR32(address);
4636 *mapSize = SCALAR32(size);
4637
4638 return err;
4639 }
4640 } /* extern "C" */
4641
4642 IOMemoryMap *
removeMappingForDescriptor(IOMemoryDescriptor * mem)4643 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4644 {
4645 OSIterator * iter;
4646 IOMemoryMap * map = NULL;
4647
4648 IOLockLock(gIOObjectPortLock);
4649
4650 iter = OSCollectionIterator::withCollection(mappings);
4651 if (iter) {
4652 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4653 if (mem == map->getMemoryDescriptor()) {
4654 map->retain();
4655 mappings->removeObject(map);
4656 break;
4657 }
4658 }
4659 iter->release();
4660 }
4661
4662 IOLockUnlock(gIOObjectPortLock);
4663
4664 return map;
4665 }
4666
4667 extern "C" {
4668 /* Routine io_connect_unmap_memory_from_task */
4669 kern_return_t
is_io_connect_unmap_memory_from_task(io_connect_t connection,uint32_t memory_type,task_t from_task,mach_vm_address_t address)4670 is_io_connect_unmap_memory_from_task
4671 (
4672 io_connect_t connection,
4673 uint32_t memory_type,
4674 task_t from_task,
4675 mach_vm_address_t address)
4676 {
4677 IOReturn err;
4678 IOOptionBits options = 0;
4679 IOMemoryDescriptor * memory = NULL;
4680 IOMemoryMap * map;
4681
4682 CHECK( IOUserClient, connection, client );
4683
4684 if (!from_task) {
4685 return kIOReturnBadArgument;
4686 }
4687
4688 IOStatisticsClientCall();
4689 if (client->defaultLocking) {
4690 IORWLockWrite(&client->lock);
4691 }
4692 err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4693 if (client->defaultLocking) {
4694 IORWLockUnlock(&client->lock);
4695 }
4696
4697 if (memory && (kIOReturnSuccess == err)) {
4698 options = (options & ~kIOMapUserOptionsMask)
4699 | kIOMapAnywhere | kIOMapReference;
4700
4701 map = memory->createMappingInTask( from_task, address, options );
4702 memory->release();
4703 if (map) {
4704 IOLockLock( gIOObjectPortLock);
4705 if (client->mappings) {
4706 client->mappings->removeObject( map);
4707 }
4708 IOLockUnlock( gIOObjectPortLock);
4709
4710 mach_port_name_t name = 0;
4711 bool is_shared_instance_or_from_current_task = from_task != current_task() || client->sharedInstance;
4712 if (is_shared_instance_or_from_current_task) {
4713 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4714 map->release();
4715 }
4716
4717 if (name) {
4718 map->userClientUnmap();
4719 err = iokit_mod_send_right( from_task, name, -2 );
4720 err = kIOReturnSuccess;
4721 } else {
4722 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4723 }
4724 if (!is_shared_instance_or_from_current_task) {
4725 map->release();
4726 }
4727 } else {
4728 err = kIOReturnBadArgument;
4729 }
4730 }
4731
4732 return err;
4733 }
4734
4735 kern_return_t
is_io_connect_unmap_memory(io_object_t connect,uint32_t type,task_t task,uint32_t mapAddr)4736 is_io_connect_unmap_memory(
4737 io_object_t connect,
4738 uint32_t type,
4739 task_t task,
4740 uint32_t mapAddr )
4741 {
4742 IOReturn err;
4743 mach_vm_address_t address;
4744
4745 address = SCALAR64(mapAddr);
4746
4747 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4748
4749 return err;
4750 }
4751
4752
4753 /* Routine io_connect_add_client */
4754 kern_return_t
is_io_connect_add_client(io_object_t connection,io_object_t connect_to)4755 is_io_connect_add_client(
4756 io_object_t connection,
4757 io_object_t connect_to)
4758 {
4759 CHECK( IOUserClient, connection, client );
4760 CHECK( IOUserClient, connect_to, to );
4761
4762 IOReturn ret;
4763
4764 IOStatisticsClientCall();
4765 if (client->defaultLocking) {
4766 IORWLockWrite(&client->lock);
4767 }
4768 ret = client->connectClient( to );
4769 if (client->defaultLocking) {
4770 IORWLockUnlock(&client->lock);
4771 }
4772 return ret;
4773 }
4774
4775
4776 /* Routine io_connect_set_properties */
4777 kern_return_t
is_io_connect_set_properties(io_object_t connection,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4778 is_io_connect_set_properties(
4779 io_object_t connection,
4780 io_buf_ptr_t properties,
4781 mach_msg_type_number_t propertiesCnt,
4782 kern_return_t * result)
4783 {
4784 return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4785 }
4786
4787 /* Routine io_user_client_method */
4788 kern_return_t
is_io_connect_method_var_output(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,io_buf_ptr_t * var_output,mach_msg_type_number_t * var_outputCnt)4789 is_io_connect_method_var_output
4790 (
4791 io_connect_t connection,
4792 uint32_t selector,
4793 io_scalar_inband64_t scalar_input,
4794 mach_msg_type_number_t scalar_inputCnt,
4795 io_struct_inband_t inband_input,
4796 mach_msg_type_number_t inband_inputCnt,
4797 mach_vm_address_t ool_input,
4798 mach_vm_size_t ool_input_size,
4799 io_struct_inband_t inband_output,
4800 mach_msg_type_number_t *inband_outputCnt,
4801 io_scalar_inband64_t scalar_output,
4802 mach_msg_type_number_t *scalar_outputCnt,
4803 io_buf_ptr_t *var_output,
4804 mach_msg_type_number_t *var_outputCnt
4805 )
4806 {
4807 CHECK( IOUserClient, connection, client );
4808
4809 IOExternalMethodArguments args;
4810 IOReturn ret;
4811 IOMemoryDescriptor * inputMD = NULL;
4812 OSObject * structureVariableOutputData = NULL;
4813
4814 bzero(&args.__reserved[0], sizeof(args.__reserved));
4815 args.__reservedA = 0;
4816 args.version = kIOExternalMethodArgumentsCurrentVersion;
4817
4818 args.selector = selector;
4819
4820 args.asyncWakePort = MACH_PORT_NULL;
4821 args.asyncReference = NULL;
4822 args.asyncReferenceCount = 0;
4823 args.structureVariableOutputData = &structureVariableOutputData;
4824
4825 args.scalarInput = scalar_input;
4826 args.scalarInputCount = scalar_inputCnt;
4827 args.structureInput = inband_input;
4828 args.structureInputSize = inband_inputCnt;
4829
4830 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4831 return kIOReturnIPCError;
4832 }
4833
4834 if (ool_input) {
4835 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4836 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4837 current_task());
4838 }
4839
4840 args.structureInputDescriptor = inputMD;
4841
4842 args.scalarOutput = scalar_output;
4843 args.scalarOutputCount = *scalar_outputCnt;
4844 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4845 args.structureOutput = inband_output;
4846 args.structureOutputSize = *inband_outputCnt;
4847 args.structureOutputDescriptor = NULL;
4848 args.structureOutputDescriptorSize = 0;
4849
4850 IOStatisticsClientCall();
4851 ret = kIOReturnSuccess;
4852
4853 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4854 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4855 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
4856 }
4857
4858 if (kIOReturnSuccess == ret) {
4859 ret = client->callExternalMethod(selector, &args);
4860 }
4861
4862 *scalar_outputCnt = args.scalarOutputCount;
4863 *inband_outputCnt = args.structureOutputSize;
4864
4865 if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4866 OSSerialize * serialize;
4867 OSData * data;
4868 unsigned int len;
4869
4870 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4871 len = serialize->getLength();
4872 *var_outputCnt = len;
4873 ret = copyoutkdata(serialize->text(), len, var_output);
4874 } else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4875 data->clipForCopyout();
4876 len = data->getLength();
4877 *var_outputCnt = len;
4878 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4879 } else {
4880 ret = kIOReturnUnderrun;
4881 }
4882 }
4883
4884 if (inputMD) {
4885 inputMD->release();
4886 }
4887 if (structureVariableOutputData) {
4888 structureVariableOutputData->release();
4889 }
4890
4891 return ret;
4892 }
4893
4894 /* Routine io_user_client_method */
4895 kern_return_t
is_io_connect_method(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)4896 is_io_connect_method
4897 (
4898 io_connect_t connection,
4899 uint32_t selector,
4900 io_scalar_inband64_t scalar_input,
4901 mach_msg_type_number_t scalar_inputCnt,
4902 io_struct_inband_t inband_input,
4903 mach_msg_type_number_t inband_inputCnt,
4904 mach_vm_address_t ool_input,
4905 mach_vm_size_t ool_input_size,
4906 io_struct_inband_t inband_output,
4907 mach_msg_type_number_t *inband_outputCnt,
4908 io_scalar_inband64_t scalar_output,
4909 mach_msg_type_number_t *scalar_outputCnt,
4910 mach_vm_address_t ool_output,
4911 mach_vm_size_t *ool_output_size
4912 )
4913 {
4914 CHECK( IOUserClient, connection, client );
4915
4916 IOExternalMethodArguments args;
4917 IOReturn ret;
4918 IOMemoryDescriptor * inputMD = NULL;
4919 IOMemoryDescriptor * outputMD = NULL;
4920
4921 bzero(&args.__reserved[0], sizeof(args.__reserved));
4922 args.__reservedA = 0;
4923 args.version = kIOExternalMethodArgumentsCurrentVersion;
4924
4925 args.selector = selector;
4926
4927 args.asyncWakePort = MACH_PORT_NULL;
4928 args.asyncReference = NULL;
4929 args.asyncReferenceCount = 0;
4930 args.structureVariableOutputData = NULL;
4931
4932 args.scalarInput = scalar_input;
4933 args.scalarInputCount = scalar_inputCnt;
4934 args.structureInput = inband_input;
4935 args.structureInputSize = inband_inputCnt;
4936
4937 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4938 return kIOReturnIPCError;
4939 }
4940 if (ool_output) {
4941 if (*ool_output_size <= sizeof(io_struct_inband_t)) {
4942 return kIOReturnIPCError;
4943 }
4944 if (*ool_output_size > UINT_MAX) {
4945 return kIOReturnIPCError;
4946 }
4947 }
4948
4949 if (ool_input) {
4950 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4951 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4952 current_task());
4953 }
4954
4955 args.structureInputDescriptor = inputMD;
4956
4957 args.scalarOutput = scalar_output;
4958 args.scalarOutputCount = *scalar_outputCnt;
4959 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4960 args.structureOutput = inband_output;
4961 args.structureOutputSize = *inband_outputCnt;
4962
4963 if (ool_output && ool_output_size) {
4964 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4965 kIODirectionIn, current_task());
4966 }
4967
4968 args.structureOutputDescriptor = outputMD;
4969 args.structureOutputDescriptorSize = ool_output_size
4970 ? ((typeof(args.structureOutputDescriptorSize)) * ool_output_size)
4971 : 0;
4972
4973 IOStatisticsClientCall();
4974 ret = kIOReturnSuccess;
4975 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4976 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4977 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
4978 }
4979 if (kIOReturnSuccess == ret) {
4980 ret = client->callExternalMethod( selector, &args );
4981 }
4982
4983 *scalar_outputCnt = args.scalarOutputCount;
4984 *inband_outputCnt = args.structureOutputSize;
4985 *ool_output_size = args.structureOutputDescriptorSize;
4986
4987 if (inputMD) {
4988 inputMD->release();
4989 }
4990 if (outputMD) {
4991 outputMD->release();
4992 }
4993
4994 return ret;
4995 }
4996
4997 /* Routine io_async_user_client_method */
4998 kern_return_t
is_io_connect_async_method(io_connect_t connection,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)4999 is_io_connect_async_method
5000 (
5001 io_connect_t connection,
5002 mach_port_t wake_port,
5003 io_async_ref64_t reference,
5004 mach_msg_type_number_t referenceCnt,
5005 uint32_t selector,
5006 io_scalar_inband64_t scalar_input,
5007 mach_msg_type_number_t scalar_inputCnt,
5008 io_struct_inband_t inband_input,
5009 mach_msg_type_number_t inband_inputCnt,
5010 mach_vm_address_t ool_input,
5011 mach_vm_size_t ool_input_size,
5012 io_struct_inband_t inband_output,
5013 mach_msg_type_number_t *inband_outputCnt,
5014 io_scalar_inband64_t scalar_output,
5015 mach_msg_type_number_t *scalar_outputCnt,
5016 mach_vm_address_t ool_output,
5017 mach_vm_size_t * ool_output_size
5018 )
5019 {
5020 CHECK( IOUserClient, connection, client );
5021
5022 IOExternalMethodArguments args;
5023 IOReturn ret;
5024 IOMemoryDescriptor * inputMD = NULL;
5025 IOMemoryDescriptor * outputMD = NULL;
5026
5027 if (referenceCnt < 1) {
5028 return kIOReturnBadArgument;
5029 }
5030
5031 bzero(&args.__reserved[0], sizeof(args.__reserved));
5032 args.__reservedA = 0;
5033 args.version = kIOExternalMethodArgumentsCurrentVersion;
5034
5035 reference[0] = (io_user_reference_t) wake_port;
5036 if (vm_map_is_64bit(get_task_map(current_task()))) {
5037 reference[0] |= kIOUCAsync64Flag;
5038 }
5039
5040 args.selector = selector;
5041
5042 args.asyncWakePort = wake_port;
5043 args.asyncReference = reference;
5044 args.asyncReferenceCount = referenceCnt;
5045
5046 args.structureVariableOutputData = NULL;
5047
5048 args.scalarInput = scalar_input;
5049 args.scalarInputCount = scalar_inputCnt;
5050 args.structureInput = inband_input;
5051 args.structureInputSize = inband_inputCnt;
5052
5053 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5054 return kIOReturnIPCError;
5055 }
5056 if (ool_output) {
5057 if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5058 return kIOReturnIPCError;
5059 }
5060 if (*ool_output_size > UINT_MAX) {
5061 return kIOReturnIPCError;
5062 }
5063 }
5064
5065 if (ool_input) {
5066 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5067 kIODirectionOut | kIOMemoryMapCopyOnWrite,
5068 current_task());
5069 }
5070
5071 args.structureInputDescriptor = inputMD;
5072
5073 args.scalarOutput = scalar_output;
5074 args.scalarOutputCount = *scalar_outputCnt;
5075 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5076 args.structureOutput = inband_output;
5077 args.structureOutputSize = *inband_outputCnt;
5078
5079 if (ool_output) {
5080 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5081 kIODirectionIn, current_task());
5082 }
5083
5084 args.structureOutputDescriptor = outputMD;
5085 args.structureOutputDescriptorSize = ((typeof(args.structureOutputDescriptorSize)) * ool_output_size);
5086
5087 IOStatisticsClientCall();
5088 ret = kIOReturnSuccess;
5089 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5090 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5091 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_async_method, selector);
5092 }
5093 if (kIOReturnSuccess == ret) {
5094 ret = client->callExternalMethod( selector, &args );
5095 }
5096
5097 *scalar_outputCnt = args.scalarOutputCount;
5098 *inband_outputCnt = args.structureOutputSize;
5099 *ool_output_size = args.structureOutputDescriptorSize;
5100
5101 if (inputMD) {
5102 inputMD->release();
5103 }
5104 if (outputMD) {
5105 outputMD->release();
5106 }
5107
5108 return ret;
5109 }
5110
5111 /* Routine io_connect_method_scalarI_scalarO */
5112 kern_return_t
is_io_connect_method_scalarI_scalarO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5113 is_io_connect_method_scalarI_scalarO(
5114 io_object_t connect,
5115 uint32_t index,
5116 io_scalar_inband_t input,
5117 mach_msg_type_number_t inputCount,
5118 io_scalar_inband_t output,
5119 mach_msg_type_number_t * outputCount )
5120 {
5121 IOReturn err;
5122 uint32_t i;
5123 io_scalar_inband64_t _input;
5124 io_scalar_inband64_t _output;
5125
5126 mach_msg_type_number_t struct_outputCnt = 0;
5127 mach_vm_size_t ool_output_size = 0;
5128
5129 bzero(&_output[0], sizeof(_output));
5130 for (i = 0; i < inputCount; i++) {
5131 _input[i] = SCALAR64(input[i]);
5132 }
5133
5134 err = is_io_connect_method(connect, index,
5135 _input, inputCount,
5136 NULL, 0,
5137 0, 0,
5138 NULL, &struct_outputCnt,
5139 _output, outputCount,
5140 0, &ool_output_size);
5141
5142 for (i = 0; i < *outputCount; i++) {
5143 output[i] = SCALAR32(_output[i]);
5144 }
5145
5146 return err;
5147 }
5148
5149 kern_return_t
shim_io_connect_method_scalarI_scalarO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5150 shim_io_connect_method_scalarI_scalarO(
5151 IOExternalMethod * method,
5152 IOService * object,
5153 const io_user_scalar_t * input,
5154 mach_msg_type_number_t inputCount,
5155 io_user_scalar_t * output,
5156 mach_msg_type_number_t * outputCount )
5157 {
5158 IOMethod func;
5159 io_scalar_inband_t _output;
5160 IOReturn err;
5161 err = kIOReturnBadArgument;
5162
5163 bzero(&_output[0], sizeof(_output));
5164 do {
5165 if (inputCount != method->count0) {
5166 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5167 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5168 continue;
5169 }
5170 if (*outputCount != method->count1) {
5171 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5172 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5173 continue;
5174 }
5175
5176 func = method->func;
5177
5178 switch (inputCount) {
5179 case 6:
5180 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5181 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5182 break;
5183 case 5:
5184 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5185 ARG32(input[3]), ARG32(input[4]),
5186 &_output[0] );
5187 break;
5188 case 4:
5189 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5190 ARG32(input[3]),
5191 &_output[0], &_output[1] );
5192 break;
5193 case 3:
5194 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5195 &_output[0], &_output[1], &_output[2] );
5196 break;
5197 case 2:
5198 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5199 &_output[0], &_output[1], &_output[2],
5200 &_output[3] );
5201 break;
5202 case 1:
5203 err = (object->*func)( ARG32(input[0]),
5204 &_output[0], &_output[1], &_output[2],
5205 &_output[3], &_output[4] );
5206 break;
5207 case 0:
5208 err = (object->*func)( &_output[0], &_output[1], &_output[2],
5209 &_output[3], &_output[4], &_output[5] );
5210 break;
5211
5212 default:
5213 IOLog("%s: Bad method table\n", object->getName());
5214 }
5215 }while (false);
5216
5217 uint32_t i;
5218 for (i = 0; i < *outputCount; i++) {
5219 output[i] = SCALAR32(_output[i]);
5220 }
5221
5222 return err;
5223 }
5224
5225 /* Routine io_async_method_scalarI_scalarO */
5226 kern_return_t
is_io_async_method_scalarI_scalarO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5227 is_io_async_method_scalarI_scalarO(
5228 io_object_t connect,
5229 mach_port_t wake_port,
5230 io_async_ref_t reference,
5231 mach_msg_type_number_t referenceCnt,
5232 uint32_t index,
5233 io_scalar_inband_t input,
5234 mach_msg_type_number_t inputCount,
5235 io_scalar_inband_t output,
5236 mach_msg_type_number_t * outputCount )
5237 {
5238 IOReturn err;
5239 uint32_t i;
5240 io_scalar_inband64_t _input;
5241 io_scalar_inband64_t _output;
5242 io_async_ref64_t _reference;
5243
5244 if (referenceCnt > ASYNC_REF64_COUNT) {
5245 return kIOReturnBadArgument;
5246 }
5247 bzero(&_output[0], sizeof(_output));
5248 for (i = 0; i < referenceCnt; i++) {
5249 _reference[i] = REF64(reference[i]);
5250 }
5251 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5252
5253 mach_msg_type_number_t struct_outputCnt = 0;
5254 mach_vm_size_t ool_output_size = 0;
5255
5256 for (i = 0; i < inputCount; i++) {
5257 _input[i] = SCALAR64(input[i]);
5258 }
5259
5260 err = is_io_connect_async_method(connect,
5261 wake_port, _reference, referenceCnt,
5262 index,
5263 _input, inputCount,
5264 NULL, 0,
5265 0, 0,
5266 NULL, &struct_outputCnt,
5267 _output, outputCount,
5268 0, &ool_output_size);
5269
5270 for (i = 0; i < *outputCount; i++) {
5271 output[i] = SCALAR32(_output[i]);
5272 }
5273
5274 return err;
5275 }
5276 /* Routine io_async_method_scalarI_structureO */
5277 kern_return_t
is_io_async_method_scalarI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5278 is_io_async_method_scalarI_structureO(
5279 io_object_t connect,
5280 mach_port_t wake_port,
5281 io_async_ref_t reference,
5282 mach_msg_type_number_t referenceCnt,
5283 uint32_t index,
5284 io_scalar_inband_t input,
5285 mach_msg_type_number_t inputCount,
5286 io_struct_inband_t output,
5287 mach_msg_type_number_t * outputCount )
5288 {
5289 uint32_t i;
5290 io_scalar_inband64_t _input;
5291 io_async_ref64_t _reference;
5292
5293 if (referenceCnt > ASYNC_REF64_COUNT) {
5294 return kIOReturnBadArgument;
5295 }
5296 for (i = 0; i < referenceCnt; i++) {
5297 _reference[i] = REF64(reference[i]);
5298 }
5299 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5300
5301 mach_msg_type_number_t scalar_outputCnt = 0;
5302 mach_vm_size_t ool_output_size = 0;
5303
5304 for (i = 0; i < inputCount; i++) {
5305 _input[i] = SCALAR64(input[i]);
5306 }
5307
5308 return is_io_connect_async_method(connect,
5309 wake_port, _reference, referenceCnt,
5310 index,
5311 _input, inputCount,
5312 NULL, 0,
5313 0, 0,
5314 output, outputCount,
5315 NULL, &scalar_outputCnt,
5316 0, &ool_output_size);
5317 }
5318
5319 /* Routine io_async_method_scalarI_structureI */
5320 kern_return_t
is_io_async_method_scalarI_structureI(io_connect_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5321 is_io_async_method_scalarI_structureI(
5322 io_connect_t connect,
5323 mach_port_t wake_port,
5324 io_async_ref_t reference,
5325 mach_msg_type_number_t referenceCnt,
5326 uint32_t index,
5327 io_scalar_inband_t input,
5328 mach_msg_type_number_t inputCount,
5329 io_struct_inband_t inputStruct,
5330 mach_msg_type_number_t inputStructCount )
5331 {
5332 uint32_t i;
5333 io_scalar_inband64_t _input;
5334 io_async_ref64_t _reference;
5335
5336 if (referenceCnt > ASYNC_REF64_COUNT) {
5337 return kIOReturnBadArgument;
5338 }
5339 for (i = 0; i < referenceCnt; i++) {
5340 _reference[i] = REF64(reference[i]);
5341 }
5342 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5343
5344 mach_msg_type_number_t scalar_outputCnt = 0;
5345 mach_msg_type_number_t inband_outputCnt = 0;
5346 mach_vm_size_t ool_output_size = 0;
5347
5348 for (i = 0; i < inputCount; i++) {
5349 _input[i] = SCALAR64(input[i]);
5350 }
5351
5352 return is_io_connect_async_method(connect,
5353 wake_port, _reference, referenceCnt,
5354 index,
5355 _input, inputCount,
5356 inputStruct, inputStructCount,
5357 0, 0,
5358 NULL, &inband_outputCnt,
5359 NULL, &scalar_outputCnt,
5360 0, &ool_output_size);
5361 }
5362
5363 /* Routine io_async_method_structureI_structureO */
5364 kern_return_t
is_io_async_method_structureI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5365 is_io_async_method_structureI_structureO(
5366 io_object_t connect,
5367 mach_port_t wake_port,
5368 io_async_ref_t reference,
5369 mach_msg_type_number_t referenceCnt,
5370 uint32_t index,
5371 io_struct_inband_t input,
5372 mach_msg_type_number_t inputCount,
5373 io_struct_inband_t output,
5374 mach_msg_type_number_t * outputCount )
5375 {
5376 uint32_t i;
5377 mach_msg_type_number_t scalar_outputCnt = 0;
5378 mach_vm_size_t ool_output_size = 0;
5379 io_async_ref64_t _reference;
5380
5381 if (referenceCnt > ASYNC_REF64_COUNT) {
5382 return kIOReturnBadArgument;
5383 }
5384 for (i = 0; i < referenceCnt; i++) {
5385 _reference[i] = REF64(reference[i]);
5386 }
5387 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5388
5389 return is_io_connect_async_method(connect,
5390 wake_port, _reference, referenceCnt,
5391 index,
5392 NULL, 0,
5393 input, inputCount,
5394 0, 0,
5395 output, outputCount,
5396 NULL, &scalar_outputCnt,
5397 0, &ool_output_size);
5398 }
5399
5400
5401 kern_return_t
shim_io_async_method_scalarI_scalarO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5402 shim_io_async_method_scalarI_scalarO(
5403 IOExternalAsyncMethod * method,
5404 IOService * object,
5405 mach_port_t asyncWakePort,
5406 io_user_reference_t * asyncReference,
5407 uint32_t asyncReferenceCount,
5408 const io_user_scalar_t * input,
5409 mach_msg_type_number_t inputCount,
5410 io_user_scalar_t * output,
5411 mach_msg_type_number_t * outputCount )
5412 {
5413 IOAsyncMethod func;
5414 uint32_t i;
5415 io_scalar_inband_t _output;
5416 IOReturn err;
5417 io_async_ref_t reference;
5418
5419 bzero(&_output[0], sizeof(_output));
5420 for (i = 0; i < asyncReferenceCount; i++) {
5421 reference[i] = REF32(asyncReference[i]);
5422 }
5423
5424 err = kIOReturnBadArgument;
5425
5426 do {
5427 if (inputCount != method->count0) {
5428 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5429 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5430 continue;
5431 }
5432 if (*outputCount != method->count1) {
5433 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5434 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5435 continue;
5436 }
5437
5438 func = method->func;
5439
5440 switch (inputCount) {
5441 case 6:
5442 err = (object->*func)( reference,
5443 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5444 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5445 break;
5446 case 5:
5447 err = (object->*func)( reference,
5448 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5449 ARG32(input[3]), ARG32(input[4]),
5450 &_output[0] );
5451 break;
5452 case 4:
5453 err = (object->*func)( reference,
5454 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5455 ARG32(input[3]),
5456 &_output[0], &_output[1] );
5457 break;
5458 case 3:
5459 err = (object->*func)( reference,
5460 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5461 &_output[0], &_output[1], &_output[2] );
5462 break;
5463 case 2:
5464 err = (object->*func)( reference,
5465 ARG32(input[0]), ARG32(input[1]),
5466 &_output[0], &_output[1], &_output[2],
5467 &_output[3] );
5468 break;
5469 case 1:
5470 err = (object->*func)( reference,
5471 ARG32(input[0]),
5472 &_output[0], &_output[1], &_output[2],
5473 &_output[3], &_output[4] );
5474 break;
5475 case 0:
5476 err = (object->*func)( reference,
5477 &_output[0], &_output[1], &_output[2],
5478 &_output[3], &_output[4], &_output[5] );
5479 break;
5480
5481 default:
5482 IOLog("%s: Bad method table\n", object->getName());
5483 }
5484 }while (false);
5485
5486 for (i = 0; i < *outputCount; i++) {
5487 output[i] = SCALAR32(_output[i]);
5488 }
5489
5490 return err;
5491 }
5492
5493
5494 /* Routine io_connect_method_scalarI_structureO */
5495 kern_return_t
is_io_connect_method_scalarI_structureO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5496 is_io_connect_method_scalarI_structureO(
5497 io_object_t connect,
5498 uint32_t index,
5499 io_scalar_inband_t input,
5500 mach_msg_type_number_t inputCount,
5501 io_struct_inband_t output,
5502 mach_msg_type_number_t * outputCount )
5503 {
5504 uint32_t i;
5505 io_scalar_inband64_t _input;
5506
5507 mach_msg_type_number_t scalar_outputCnt = 0;
5508 mach_vm_size_t ool_output_size = 0;
5509
5510 for (i = 0; i < inputCount; i++) {
5511 _input[i] = SCALAR64(input[i]);
5512 }
5513
5514 return is_io_connect_method(connect, index,
5515 _input, inputCount,
5516 NULL, 0,
5517 0, 0,
5518 output, outputCount,
5519 NULL, &scalar_outputCnt,
5520 0, &ool_output_size);
5521 }
5522
5523 kern_return_t
shim_io_connect_method_scalarI_structureO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5524 shim_io_connect_method_scalarI_structureO(
5525
5526 IOExternalMethod * method,
5527 IOService * object,
5528 const io_user_scalar_t * input,
5529 mach_msg_type_number_t inputCount,
5530 io_struct_inband_t output,
5531 IOByteCount * outputCount )
5532 {
5533 IOMethod func;
5534 IOReturn err;
5535
5536 err = kIOReturnBadArgument;
5537
5538 do {
5539 if (inputCount != method->count0) {
5540 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5541 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5542 continue;
5543 }
5544 if ((kIOUCVariableStructureSize != method->count1)
5545 && (*outputCount != method->count1)) {
5546 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5547 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5548 continue;
5549 }
5550
5551 func = method->func;
5552
5553 switch (inputCount) {
5554 case 5:
5555 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5556 ARG32(input[3]), ARG32(input[4]),
5557 output );
5558 break;
5559 case 4:
5560 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5561 ARG32(input[3]),
5562 output, (void *)outputCount );
5563 break;
5564 case 3:
5565 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5566 output, (void *)outputCount, NULL );
5567 break;
5568 case 2:
5569 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5570 output, (void *)outputCount, NULL, NULL );
5571 break;
5572 case 1:
5573 err = (object->*func)( ARG32(input[0]),
5574 output, (void *)outputCount, NULL, NULL, NULL );
5575 break;
5576 case 0:
5577 err = (object->*func)( output, (void *)outputCount, NULL, NULL, NULL, NULL );
5578 break;
5579
5580 default:
5581 IOLog("%s: Bad method table\n", object->getName());
5582 }
5583 }while (false);
5584
5585 return err;
5586 }
5587
5588
5589 kern_return_t
shim_io_async_method_scalarI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5590 shim_io_async_method_scalarI_structureO(
5591 IOExternalAsyncMethod * method,
5592 IOService * object,
5593 mach_port_t asyncWakePort,
5594 io_user_reference_t * asyncReference,
5595 uint32_t asyncReferenceCount,
5596 const io_user_scalar_t * input,
5597 mach_msg_type_number_t inputCount,
5598 io_struct_inband_t output,
5599 mach_msg_type_number_t * outputCount )
5600 {
5601 IOAsyncMethod func;
5602 uint32_t i;
5603 IOReturn err;
5604 io_async_ref_t reference;
5605
5606 for (i = 0; i < asyncReferenceCount; i++) {
5607 reference[i] = REF32(asyncReference[i]);
5608 }
5609
5610 err = kIOReturnBadArgument;
5611 do {
5612 if (inputCount != method->count0) {
5613 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5614 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5615 continue;
5616 }
5617 if ((kIOUCVariableStructureSize != method->count1)
5618 && (*outputCount != method->count1)) {
5619 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5620 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5621 continue;
5622 }
5623
5624 func = method->func;
5625
5626 switch (inputCount) {
5627 case 5:
5628 err = (object->*func)( reference,
5629 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5630 ARG32(input[3]), ARG32(input[4]),
5631 output );
5632 break;
5633 case 4:
5634 err = (object->*func)( reference,
5635 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5636 ARG32(input[3]),
5637 output, (void *)outputCount );
5638 break;
5639 case 3:
5640 err = (object->*func)( reference,
5641 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5642 output, (void *)outputCount, NULL );
5643 break;
5644 case 2:
5645 err = (object->*func)( reference,
5646 ARG32(input[0]), ARG32(input[1]),
5647 output, (void *)outputCount, NULL, NULL );
5648 break;
5649 case 1:
5650 err = (object->*func)( reference,
5651 ARG32(input[0]),
5652 output, (void *)outputCount, NULL, NULL, NULL );
5653 break;
5654 case 0:
5655 err = (object->*func)( reference,
5656 output, (void *)outputCount, NULL, NULL, NULL, NULL );
5657 break;
5658
5659 default:
5660 IOLog("%s: Bad method table\n", object->getName());
5661 }
5662 }while (false);
5663
5664 return err;
5665 }
5666
5667 /* Routine io_connect_method_scalarI_structureI */
5668 kern_return_t
is_io_connect_method_scalarI_structureI(io_connect_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5669 is_io_connect_method_scalarI_structureI(
5670 io_connect_t connect,
5671 uint32_t index,
5672 io_scalar_inband_t input,
5673 mach_msg_type_number_t inputCount,
5674 io_struct_inband_t inputStruct,
5675 mach_msg_type_number_t inputStructCount )
5676 {
5677 uint32_t i;
5678 io_scalar_inband64_t _input;
5679
5680 mach_msg_type_number_t scalar_outputCnt = 0;
5681 mach_msg_type_number_t inband_outputCnt = 0;
5682 mach_vm_size_t ool_output_size = 0;
5683
5684 for (i = 0; i < inputCount; i++) {
5685 _input[i] = SCALAR64(input[i]);
5686 }
5687
5688 return is_io_connect_method(connect, index,
5689 _input, inputCount,
5690 inputStruct, inputStructCount,
5691 0, 0,
5692 NULL, &inband_outputCnt,
5693 NULL, &scalar_outputCnt,
5694 0, &ool_output_size);
5695 }
5696
5697 kern_return_t
shim_io_connect_method_scalarI_structureI(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5698 shim_io_connect_method_scalarI_structureI(
5699 IOExternalMethod * method,
5700 IOService * object,
5701 const io_user_scalar_t * input,
5702 mach_msg_type_number_t inputCount,
5703 io_struct_inband_t inputStruct,
5704 mach_msg_type_number_t inputStructCount )
5705 {
5706 IOMethod func;
5707 IOReturn err = kIOReturnBadArgument;
5708
5709 do{
5710 if (inputCount != method->count0) {
5711 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5712 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5713 continue;
5714 }
5715 if ((kIOUCVariableStructureSize != method->count1)
5716 && (inputStructCount != method->count1)) {
5717 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5718 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5719 continue;
5720 }
5721
5722 func = method->func;
5723
5724 switch (inputCount) {
5725 case 5:
5726 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5727 ARG32(input[3]), ARG32(input[4]),
5728 inputStruct );
5729 break;
5730 case 4:
5731 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
5732 ARG32(input[3]),
5733 inputStruct, (void *)(uintptr_t)inputStructCount );
5734 break;
5735 case 3:
5736 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5737 inputStruct, (void *)(uintptr_t)inputStructCount,
5738 NULL );
5739 break;
5740 case 2:
5741 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5742 inputStruct, (void *)(uintptr_t)inputStructCount,
5743 NULL, NULL );
5744 break;
5745 case 1:
5746 err = (object->*func)( ARG32(input[0]),
5747 inputStruct, (void *)(uintptr_t)inputStructCount,
5748 NULL, NULL, NULL );
5749 break;
5750 case 0:
5751 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5752 NULL, NULL, NULL, NULL );
5753 break;
5754
5755 default:
5756 IOLog("%s: Bad method table\n", object->getName());
5757 }
5758 }while (false);
5759
5760 return err;
5761 }
5762
5763 kern_return_t
shim_io_async_method_scalarI_structureI(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5764 shim_io_async_method_scalarI_structureI(
5765 IOExternalAsyncMethod * method,
5766 IOService * object,
5767 mach_port_t asyncWakePort,
5768 io_user_reference_t * asyncReference,
5769 uint32_t asyncReferenceCount,
5770 const io_user_scalar_t * input,
5771 mach_msg_type_number_t inputCount,
5772 io_struct_inband_t inputStruct,
5773 mach_msg_type_number_t inputStructCount )
5774 {
5775 IOAsyncMethod func;
5776 uint32_t i;
5777 IOReturn err = kIOReturnBadArgument;
5778 io_async_ref_t reference;
5779
5780 for (i = 0; i < asyncReferenceCount; i++) {
5781 reference[i] = REF32(asyncReference[i]);
5782 }
5783
5784 do{
5785 if (inputCount != method->count0) {
5786 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5787 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5788 continue;
5789 }
5790 if ((kIOUCVariableStructureSize != method->count1)
5791 && (inputStructCount != method->count1)) {
5792 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5793 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5794 continue;
5795 }
5796
5797 func = method->func;
5798
5799 switch (inputCount) {
5800 case 5:
5801 err = (object->*func)( reference,
5802 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5803 ARG32(input[3]), ARG32(input[4]),
5804 inputStruct );
5805 break;
5806 case 4:
5807 err = (object->*func)( reference,
5808 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5809 ARG32(input[3]),
5810 inputStruct, (void *)(uintptr_t)inputStructCount );
5811 break;
5812 case 3:
5813 err = (object->*func)( reference,
5814 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5815 inputStruct, (void *)(uintptr_t)inputStructCount,
5816 NULL );
5817 break;
5818 case 2:
5819 err = (object->*func)( reference,
5820 ARG32(input[0]), ARG32(input[1]),
5821 inputStruct, (void *)(uintptr_t)inputStructCount,
5822 NULL, NULL );
5823 break;
5824 case 1:
5825 err = (object->*func)( reference,
5826 ARG32(input[0]),
5827 inputStruct, (void *)(uintptr_t)inputStructCount,
5828 NULL, NULL, NULL );
5829 break;
5830 case 0:
5831 err = (object->*func)( reference,
5832 inputStruct, (void *)(uintptr_t)inputStructCount,
5833 NULL, NULL, NULL, NULL );
5834 break;
5835
5836 default:
5837 IOLog("%s: Bad method table\n", object->getName());
5838 }
5839 }while (false);
5840
5841 return err;
5842 }
5843
5844 /* Routine io_connect_method_structureI_structureO */
5845 kern_return_t
is_io_connect_method_structureI_structureO(io_object_t connect,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5846 is_io_connect_method_structureI_structureO(
5847 io_object_t connect,
5848 uint32_t index,
5849 io_struct_inband_t input,
5850 mach_msg_type_number_t inputCount,
5851 io_struct_inband_t output,
5852 mach_msg_type_number_t * outputCount )
5853 {
5854 mach_msg_type_number_t scalar_outputCnt = 0;
5855 mach_vm_size_t ool_output_size = 0;
5856
5857 return is_io_connect_method(connect, index,
5858 NULL, 0,
5859 input, inputCount,
5860 0, 0,
5861 output, outputCount,
5862 NULL, &scalar_outputCnt,
5863 0, &ool_output_size);
5864 }
5865
5866 kern_return_t
shim_io_connect_method_structureI_structureO(IOExternalMethod * method,IOService * object,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5867 shim_io_connect_method_structureI_structureO(
5868 IOExternalMethod * method,
5869 IOService * object,
5870 io_struct_inband_t input,
5871 mach_msg_type_number_t inputCount,
5872 io_struct_inband_t output,
5873 IOByteCount * outputCount )
5874 {
5875 IOMethod func;
5876 IOReturn err = kIOReturnBadArgument;
5877
5878 do{
5879 if ((kIOUCVariableStructureSize != method->count0)
5880 && (inputCount != method->count0)) {
5881 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5882 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5883 continue;
5884 }
5885 if ((kIOUCVariableStructureSize != method->count1)
5886 && (*outputCount != method->count1)) {
5887 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5888 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5889 continue;
5890 }
5891
5892 func = method->func;
5893
5894 if (method->count1) {
5895 if (method->count0) {
5896 err = (object->*func)( input, output,
5897 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5898 } else {
5899 err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5900 }
5901 } else {
5902 err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5903 }
5904 }while (false);
5905
5906
5907 return err;
5908 }
5909
5910 kern_return_t
shim_io_async_method_structureI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5911 shim_io_async_method_structureI_structureO(
5912 IOExternalAsyncMethod * method,
5913 IOService * object,
5914 mach_port_t asyncWakePort,
5915 io_user_reference_t * asyncReference,
5916 uint32_t asyncReferenceCount,
5917 io_struct_inband_t input,
5918 mach_msg_type_number_t inputCount,
5919 io_struct_inband_t output,
5920 mach_msg_type_number_t * outputCount )
5921 {
5922 IOAsyncMethod func;
5923 uint32_t i;
5924 IOReturn err;
5925 io_async_ref_t reference;
5926
5927 for (i = 0; i < asyncReferenceCount; i++) {
5928 reference[i] = REF32(asyncReference[i]);
5929 }
5930
5931 err = kIOReturnBadArgument;
5932 do{
5933 if ((kIOUCVariableStructureSize != method->count0)
5934 && (inputCount != method->count0)) {
5935 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5936 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5937 continue;
5938 }
5939 if ((kIOUCVariableStructureSize != method->count1)
5940 && (*outputCount != method->count1)) {
5941 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5942 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5943 continue;
5944 }
5945
5946 func = method->func;
5947
5948 if (method->count1) {
5949 if (method->count0) {
5950 err = (object->*func)( reference,
5951 input, output,
5952 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5953 } else {
5954 err = (object->*func)( reference,
5955 output, outputCount, NULL, NULL, NULL, NULL );
5956 }
5957 } else {
5958 err = (object->*func)( reference,
5959 input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5960 }
5961 }while (false);
5962
5963 return err;
5964 }
5965
5966 /* Routine io_catalog_send_data */
5967 kern_return_t
is_io_catalog_send_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t inData,mach_msg_type_number_t inDataCount,kern_return_t * result)5968 is_io_catalog_send_data(
5969 mach_port_t main_port,
5970 uint32_t flag,
5971 io_buf_ptr_t inData,
5972 mach_msg_type_number_t inDataCount,
5973 kern_return_t * result)
5974 {
5975 // Allow sending catalog data if there is no kextd and the kernel is DEVELOPMENT || DEBUG
5976 #if NO_KEXTD && !(DEVELOPMENT || DEBUG)
5977 return kIOReturnNotPrivileged;
5978 #else /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
5979 OSObject * obj = NULL;
5980 vm_offset_t data;
5981 kern_return_t kr = kIOReturnError;
5982
5983 //printf("io_catalog_send_data called. flag: %d\n", flag);
5984
5985 if (main_port != main_device_port) {
5986 return kIOReturnNotPrivileged;
5987 }
5988
5989 if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
5990 flag != kIOCatalogKextdActive &&
5991 flag != kIOCatalogKextdFinishedLaunching) &&
5992 (!inData || !inDataCount)) {
5993 return kIOReturnBadArgument;
5994 }
5995
5996 if (!IOCurrentTaskHasEntitlement(kIOCatalogManagementEntitlement)) {
5997 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
5998 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
5999 OSSafeReleaseNULL(taskName);
6000 // For now, fake success to not break applications relying on this function succeeding.
6001 // See <rdar://problem/32554970> for more details.
6002 return kIOReturnSuccess;
6003 }
6004
6005 if (inData) {
6006 vm_map_offset_t map_data;
6007
6008 if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
6009 return kIOReturnMessageTooLarge;
6010 }
6011
6012 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
6013 data = CAST_DOWN(vm_offset_t, map_data);
6014
6015 if (kr != KERN_SUCCESS) {
6016 return kr;
6017 }
6018
6019 // must return success after vm_map_copyout() succeeds
6020
6021 if (inDataCount) {
6022 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
6023 vm_deallocate( kernel_map, data, inDataCount );
6024 if (!obj) {
6025 *result = kIOReturnNoMemory;
6026 return KERN_SUCCESS;
6027 }
6028 }
6029 }
6030
6031 switch (flag) {
6032 case kIOCatalogResetDrivers:
6033 case kIOCatalogResetDriversNoMatch: {
6034 OSArray * array;
6035
6036 array = OSDynamicCast(OSArray, obj);
6037 if (array) {
6038 if (!gIOCatalogue->resetAndAddDrivers(array,
6039 flag == kIOCatalogResetDrivers)) {
6040 kr = kIOReturnError;
6041 }
6042 } else {
6043 kr = kIOReturnBadArgument;
6044 }
6045 }
6046 break;
6047
6048 case kIOCatalogAddDrivers:
6049 case kIOCatalogAddDriversNoMatch: {
6050 OSArray * array;
6051
6052 array = OSDynamicCast(OSArray, obj);
6053 if (array) {
6054 if (!gIOCatalogue->addDrivers( array,
6055 flag == kIOCatalogAddDrivers)) {
6056 kr = kIOReturnError;
6057 }
6058 } else {
6059 kr = kIOReturnBadArgument;
6060 }
6061 }
6062 break;
6063
6064 case kIOCatalogRemoveDrivers:
6065 case kIOCatalogRemoveDriversNoMatch: {
6066 OSDictionary * dict;
6067
6068 dict = OSDynamicCast(OSDictionary, obj);
6069 if (dict) {
6070 if (!gIOCatalogue->removeDrivers( dict,
6071 flag == kIOCatalogRemoveDrivers )) {
6072 kr = kIOReturnError;
6073 }
6074 } else {
6075 kr = kIOReturnBadArgument;
6076 }
6077 }
6078 break;
6079
6080 case kIOCatalogStartMatching__Removed:
6081 case kIOCatalogRemoveKernelLinker__Removed:
6082 case kIOCatalogKextdActive:
6083 case kIOCatalogKextdFinishedLaunching:
6084 kr = KERN_NOT_SUPPORTED;
6085 break;
6086
6087 default:
6088 kr = kIOReturnBadArgument;
6089 break;
6090 }
6091
6092 if (obj) {
6093 obj->release();
6094 }
6095
6096 *result = kr;
6097 return KERN_SUCCESS;
6098 #endif /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6099 }
6100
6101 /* Routine io_catalog_terminate */
6102 kern_return_t
is_io_catalog_terminate(mach_port_t main_port,uint32_t flag,io_name_t name)6103 is_io_catalog_terminate(
6104 mach_port_t main_port,
6105 uint32_t flag,
6106 io_name_t name )
6107 {
6108 kern_return_t kr;
6109
6110 if (main_port != main_device_port) {
6111 return kIOReturnNotPrivileged;
6112 }
6113
6114 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
6115 kIOClientPrivilegeAdministrator );
6116 if (kIOReturnSuccess != kr) {
6117 return kr;
6118 }
6119
6120 switch (flag) {
6121 #if !defined(SECURE_KERNEL)
6122 case kIOCatalogServiceTerminate:
6123 kr = gIOCatalogue->terminateDrivers(NULL, name, false);
6124 break;
6125
6126 case kIOCatalogModuleUnload:
6127 case kIOCatalogModuleTerminate:
6128 kr = gIOCatalogue->terminateDriversForModule(name,
6129 flag == kIOCatalogModuleUnload);
6130 break;
6131 #endif
6132
6133 default:
6134 kr = kIOReturnBadArgument;
6135 break;
6136 }
6137
6138 return kr;
6139 }
6140
6141 /* Routine io_catalog_get_data */
6142 kern_return_t
is_io_catalog_get_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t * outData,mach_msg_type_number_t * outDataCount)6143 is_io_catalog_get_data(
6144 mach_port_t main_port,
6145 uint32_t flag,
6146 io_buf_ptr_t *outData,
6147 mach_msg_type_number_t *outDataCount)
6148 {
6149 kern_return_t kr = kIOReturnSuccess;
6150 OSSerialize * s;
6151
6152 if (main_port != main_device_port) {
6153 return kIOReturnNotPrivileged;
6154 }
6155
6156 //printf("io_catalog_get_data called. flag: %d\n", flag);
6157
6158 s = OSSerialize::withCapacity(4096);
6159 if (!s) {
6160 return kIOReturnNoMemory;
6161 }
6162
6163 kr = gIOCatalogue->serializeData(flag, s);
6164
6165 if (kr == kIOReturnSuccess) {
6166 mach_vm_address_t data;
6167 vm_map_copy_t copy;
6168 unsigned int size;
6169
6170 size = s->getLength();
6171 kr = mach_vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
6172 if (kr == kIOReturnSuccess) {
6173 bcopy(s->text(), (void *)data, size);
6174 kr = vm_map_copyin(kernel_map, data, size, true, ©);
6175 *outData = (char *)copy;
6176 *outDataCount = size;
6177 }
6178 }
6179
6180 s->release();
6181
6182 return kr;
6183 }
6184
6185 /* Routine io_catalog_get_gen_count */
6186 kern_return_t
is_io_catalog_get_gen_count(mach_port_t main_port,uint32_t * genCount)6187 is_io_catalog_get_gen_count(
6188 mach_port_t main_port,
6189 uint32_t *genCount)
6190 {
6191 if (main_port != main_device_port) {
6192 return kIOReturnNotPrivileged;
6193 }
6194
6195 //printf("io_catalog_get_gen_count called.\n");
6196
6197 if (!genCount) {
6198 return kIOReturnBadArgument;
6199 }
6200
6201 *genCount = gIOCatalogue->getGenerationCount();
6202
6203 return kIOReturnSuccess;
6204 }
6205
6206 /* Routine io_catalog_module_loaded.
6207 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
6208 */
6209 kern_return_t
is_io_catalog_module_loaded(mach_port_t main_port,io_name_t name)6210 is_io_catalog_module_loaded(
6211 mach_port_t main_port,
6212 io_name_t name)
6213 {
6214 if (main_port != main_device_port) {
6215 return kIOReturnNotPrivileged;
6216 }
6217
6218 //printf("io_catalog_module_loaded called. name %s\n", name);
6219
6220 if (!name) {
6221 return kIOReturnBadArgument;
6222 }
6223
6224 gIOCatalogue->moduleHasLoaded(name);
6225
6226 return kIOReturnSuccess;
6227 }
6228
6229 kern_return_t
is_io_catalog_reset(mach_port_t main_port,uint32_t flag)6230 is_io_catalog_reset(
6231 mach_port_t main_port,
6232 uint32_t flag)
6233 {
6234 if (main_port != main_device_port) {
6235 return kIOReturnNotPrivileged;
6236 }
6237
6238 switch (flag) {
6239 case kIOCatalogResetDefault:
6240 gIOCatalogue->reset();
6241 break;
6242
6243 default:
6244 return kIOReturnBadArgument;
6245 }
6246
6247 return kIOReturnSuccess;
6248 }
6249
6250 kern_return_t
iokit_user_client_trap(struct iokit_user_client_trap_args * args)6251 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
6252 {
6253 kern_return_t result = kIOReturnBadArgument;
6254 IOUserClient * userClient;
6255 OSObject * object;
6256 uintptr_t ref;
6257 mach_port_name_t portName;
6258
6259 ref = (uintptr_t) args->userClientRef;
6260
6261 if ((ref == MACH_PORT_DEAD) || (ref == (uintptr_t) MACH_PORT_NULL)) {
6262 return kIOReturnBadArgument;
6263 }
6264 // kobject port names always have b0-1 set, so we use these bits as flags to
6265 // iokit_user_client_trap()
6266 // keep this up to date with ipc_entry_name_mask();
6267 portName = (mach_port_name_t) (ref | 3);
6268 if (((1ULL << 32) & ref) || !(1 & ref)) {
6269 object = iokit_lookup_uext_ref_current_task(portName);
6270 if (object) {
6271 result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6272 }
6273 OSSafeReleaseNULL(object);
6274 } else {
6275 io_object_t ref_current_task = iokit_lookup_connect_ref_current_task((mach_port_name_t) ref);
6276 if ((userClient = OSDynamicCast(IOUserClient, ref_current_task))) {
6277 IOExternalTrap *trap = NULL;
6278 IOService *target = NULL;
6279
6280 result = kIOReturnSuccess;
6281 io_filter_policy_t filterPolicy = userClient->filterForTask(current_task(), 0);
6282 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
6283 result = gIOUCFilterCallbacks->io_filter_applier(userClient, filterPolicy, io_filter_type_trap, args->index);
6284 }
6285 if (kIOReturnSuccess == result) {
6286 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
6287 }
6288 if (trap && target) {
6289 IOTrap func;
6290
6291 func = trap->func;
6292
6293 if (func) {
6294 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6295 }
6296 }
6297
6298 iokit_remove_connect_reference(userClient);
6299 } else {
6300 OSSafeReleaseNULL(ref_current_task);
6301 }
6302 }
6303
6304 return result;
6305 }
6306
6307 /* Routine io_device_tree_entry_exists_with_name */
6308 kern_return_t
is_io_device_tree_entry_exists_with_name(mach_port_t main_port,io_name_t name,boolean_t * exists)6309 is_io_device_tree_entry_exists_with_name(
6310 mach_port_t main_port,
6311 io_name_t name,
6312 boolean_t *exists )
6313 {
6314 OSCollectionIterator *iter;
6315
6316 if (main_port != main_device_port) {
6317 return kIOReturnNotPrivileged;
6318 }
6319
6320 iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
6321 *exists = iter && iter->getNextObject();
6322 OSSafeReleaseNULL(iter);
6323
6324 return kIOReturnSuccess;
6325 }
6326 } /* extern "C" */
6327
6328 IOReturn
callExternalMethod(uint32_t selector,IOExternalMethodArguments * args)6329 IOUserClient::callExternalMethod(uint32_t selector, IOExternalMethodArguments * args)
6330 {
6331 IOReturn ret;
6332
6333 if (defaultLocking) {
6334 if (defaultLockingSingleThreadExternalMethod) {
6335 IORWLockWrite(&lock);
6336 } else {
6337 IORWLockRead(&lock);
6338 }
6339 }
6340 if (uc2022) {
6341 ret = ((IOUserClient2022 *) this)->externalMethod(selector, (IOExternalMethodArgumentsOpaque *) args);
6342 } else {
6343 ret = externalMethod(selector, args);
6344 }
6345 if (defaultLocking) {
6346 IORWLockUnlock(&lock);
6347 }
6348 return ret;
6349 }
6350
6351 MIG_SERVER_ROUTINE IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * arguments,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6352 IOUserClient2022::externalMethod(uint32_t selector, IOExternalMethodArguments * arguments,
6353 IOExternalMethodDispatch *dispatch,
6354 OSObject *target, void *reference)
6355 {
6356 panic("wrong externalMethod for IOUserClient2022");
6357 }
6358
6359 IOReturn
dispatchExternalMethod(uint32_t selector,IOExternalMethodArgumentsOpaque * arguments,const IOExternalMethodDispatch2022 dispatchArray[],size_t dispatchArrayCount,OSObject * target,void * reference)6360 IOUserClient2022::dispatchExternalMethod(uint32_t selector, IOExternalMethodArgumentsOpaque *arguments,
6361 const IOExternalMethodDispatch2022 dispatchArray[], size_t dispatchArrayCount,
6362 OSObject * target, void * reference)
6363 {
6364 IOReturn err;
6365 IOExternalMethodArguments * args = (typeof(args))arguments;
6366 const IOExternalMethodDispatch2022 * dispatch;
6367
6368 if (!dispatchArray) {
6369 return kIOReturnError;
6370 }
6371 if (selector >= dispatchArrayCount) {
6372 return kIOReturnBadArgument;
6373 }
6374 dispatch = &dispatchArray[selector];
6375
6376 uint32_t count;
6377 count = dispatch->checkScalarInputCount;
6378 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6379 return kIOReturnBadArgument;
6380 }
6381
6382 count = dispatch->checkStructureInputSize;
6383 if ((kIOUCVariableStructureSize != count)
6384 && (count != ((args->structureInputDescriptor)
6385 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6386 return kIOReturnBadArgument;
6387 }
6388
6389 count = dispatch->checkScalarOutputCount;
6390 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6391 return kIOReturnBadArgument;
6392 }
6393
6394 count = dispatch->checkStructureOutputSize;
6395 if ((kIOUCVariableStructureSize != count)
6396 && (count != ((args->structureOutputDescriptor)
6397 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6398 return kIOReturnBadArgument;
6399 }
6400
6401 if (args->asyncWakePort && !dispatch->allowAsync) {
6402 return kIOReturnBadArgument;
6403 }
6404
6405 if (dispatch->checkEntitlement) {
6406 if (!IOCurrentTaskHasEntitlement(dispatch->checkEntitlement)) {
6407 return kIOReturnNotPrivileged;
6408 }
6409 }
6410
6411 if (dispatch->function) {
6412 err = (*dispatch->function)(target, reference, args);
6413 } else {
6414 err = kIOReturnNoCompletion; /* implementer can dispatch */
6415 }
6416 return err;
6417 }
6418
6419 IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * args,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6420 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
6421 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
6422 {
6423 IOReturn err;
6424 IOService * object;
6425 IOByteCount structureOutputSize;
6426
6427 if (dispatch) {
6428 uint32_t count;
6429 count = dispatch->checkScalarInputCount;
6430 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6431 return kIOReturnBadArgument;
6432 }
6433
6434 count = dispatch->checkStructureInputSize;
6435 if ((kIOUCVariableStructureSize != count)
6436 && (count != ((args->structureInputDescriptor)
6437 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6438 return kIOReturnBadArgument;
6439 }
6440
6441 count = dispatch->checkScalarOutputCount;
6442 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6443 return kIOReturnBadArgument;
6444 }
6445
6446 count = dispatch->checkStructureOutputSize;
6447 if ((kIOUCVariableStructureSize != count)
6448 && (count != ((args->structureOutputDescriptor)
6449 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6450 return kIOReturnBadArgument;
6451 }
6452
6453 if (dispatch->function) {
6454 err = (*dispatch->function)(target, reference, args);
6455 } else {
6456 err = kIOReturnNoCompletion; /* implementer can dispatch */
6457 }
6458 return err;
6459 }
6460
6461
6462 // pre-Leopard API's don't do ool structs
6463 if (args->structureInputDescriptor || args->structureOutputDescriptor) {
6464 err = kIOReturnIPCError;
6465 return err;
6466 }
6467
6468 structureOutputSize = args->structureOutputSize;
6469
6470 if (args->asyncWakePort) {
6471 IOExternalAsyncMethod * method;
6472 object = NULL;
6473 if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
6474 return kIOReturnUnsupported;
6475 }
6476
6477 if (kIOUCForegroundOnly & method->flags) {
6478 if (task_is_gpu_denied(current_task())) {
6479 return kIOReturnNotPermitted;
6480 }
6481 }
6482
6483 switch (method->flags & kIOUCTypeMask) {
6484 case kIOUCScalarIStructI:
6485 err = shim_io_async_method_scalarI_structureI( method, object,
6486 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6487 args->scalarInput, args->scalarInputCount,
6488 (char *)args->structureInput, args->structureInputSize );
6489 break;
6490
6491 case kIOUCScalarIScalarO:
6492 err = shim_io_async_method_scalarI_scalarO( method, object,
6493 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6494 args->scalarInput, args->scalarInputCount,
6495 args->scalarOutput, &args->scalarOutputCount );
6496 break;
6497
6498 case kIOUCScalarIStructO:
6499 err = shim_io_async_method_scalarI_structureO( method, object,
6500 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6501 args->scalarInput, args->scalarInputCount,
6502 (char *) args->structureOutput, &args->structureOutputSize );
6503 break;
6504
6505
6506 case kIOUCStructIStructO:
6507 err = shim_io_async_method_structureI_structureO( method, object,
6508 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6509 (char *)args->structureInput, args->structureInputSize,
6510 (char *) args->structureOutput, &args->structureOutputSize );
6511 break;
6512
6513 default:
6514 err = kIOReturnBadArgument;
6515 break;
6516 }
6517 } else {
6518 IOExternalMethod * method;
6519 object = NULL;
6520 if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
6521 return kIOReturnUnsupported;
6522 }
6523
6524 if (kIOUCForegroundOnly & method->flags) {
6525 if (task_is_gpu_denied(current_task())) {
6526 return kIOReturnNotPermitted;
6527 }
6528 }
6529
6530 switch (method->flags & kIOUCTypeMask) {
6531 case kIOUCScalarIStructI:
6532 err = shim_io_connect_method_scalarI_structureI( method, object,
6533 args->scalarInput, args->scalarInputCount,
6534 (char *) args->structureInput, args->structureInputSize );
6535 break;
6536
6537 case kIOUCScalarIScalarO:
6538 err = shim_io_connect_method_scalarI_scalarO( method, object,
6539 args->scalarInput, args->scalarInputCount,
6540 args->scalarOutput, &args->scalarOutputCount );
6541 break;
6542
6543 case kIOUCScalarIStructO:
6544 err = shim_io_connect_method_scalarI_structureO( method, object,
6545 args->scalarInput, args->scalarInputCount,
6546 (char *) args->structureOutput, &structureOutputSize );
6547 break;
6548
6549
6550 case kIOUCStructIStructO:
6551 err = shim_io_connect_method_structureI_structureO( method, object,
6552 (char *) args->structureInput, args->structureInputSize,
6553 (char *) args->structureOutput, &structureOutputSize );
6554 break;
6555
6556 default:
6557 err = kIOReturnBadArgument;
6558 break;
6559 }
6560 }
6561
6562 if (structureOutputSize > UINT_MAX) {
6563 structureOutputSize = 0;
6564 err = kIOReturnBadArgument;
6565 }
6566
6567 args->structureOutputSize = ((typeof(args->structureOutputSize))structureOutputSize);
6568
6569 return err;
6570 }
6571
6572 IOReturn
registerFilterCallbacks(const struct io_filter_callbacks * callbacks,size_t size)6573 IOUserClient::registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size)
6574 {
6575 if (size < sizeof(*callbacks)) {
6576 return kIOReturnBadArgument;
6577 }
6578 if (!OSCompareAndSwapPtr(NULL, __DECONST(void *, callbacks), &gIOUCFilterCallbacks)) {
6579 return kIOReturnBusy;
6580 }
6581 return kIOReturnSuccess;
6582 }
6583
6584
6585 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6586 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6587 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6588 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6589 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6590 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6591 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6592 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6593 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6594 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6595 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6596 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6597 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6598 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6599 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6600 OSMetaClassDefineReservedUnused(IOUserClient, 15);
6601
6602 OSMetaClassDefineReservedUnused(IOUserClient2022, 0);
6603 OSMetaClassDefineReservedUnused(IOUserClient2022, 1);
6604 OSMetaClassDefineReservedUnused(IOUserClient2022, 2);
6605 OSMetaClassDefineReservedUnused(IOUserClient2022, 3);
6606