1 /*
2 * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <libkern/c++/OSKext.h>
30 #include <libkern/c++/OSSharedPtr.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51 #include <sys/code_signing.h>
52 #include <vm/vm_kern_xnu.h>
53
54 #include <mach/sdt.h>
55 #include <os/hash.h>
56
57 #include <libkern/amfi/amfi.h>
58
59 #if CONFIG_MACF
60
61 extern "C" {
62 #include <security/mac_framework.h>
63 };
64 #include <sys/kauth.h>
65
66 #define IOMACF_LOG 0
67
68 #endif /* CONFIG_MACF */
69
70 #include <IOKit/assert.h>
71
72 #include "IOServicePrivate.h"
73 #include "IOKitKernelInternal.h"
74
75 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
76 #define SCALAR32(x) ((uint32_t )x)
77 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
78 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
79 #define REF32(x) ((int)(x))
80
81 enum{
82 kIOUCAsync0Flags = 3ULL,
83 kIOUCAsync64Flag = 1ULL,
84 kIOUCAsyncErrorLoggedFlag = 2ULL
85 };
86
87 #if IOKITSTATS
88
89 #define IOStatisticsRegisterCounter() \
90 do { \
91 reserved->counter = IOStatistics::registerUserClient(this); \
92 } while (0)
93
94 #define IOStatisticsUnregisterCounter() \
95 do { \
96 if (reserved) \
97 IOStatistics::unregisterUserClient(reserved->counter); \
98 } while (0)
99
100 #define IOStatisticsClientCall() \
101 do { \
102 IOStatistics::countUserClientCall(client); \
103 } while (0)
104
105 #else
106
107 #define IOStatisticsRegisterCounter()
108 #define IOStatisticsUnregisterCounter()
109 #define IOStatisticsClientCall()
110
111 #endif /* IOKITSTATS */
112
113 #if DEVELOPMENT || DEBUG
114
115 #define FAKE_STACK_FRAME(a) \
116 const void ** __frameptr; \
117 const void * __retaddr; \
118 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
119 __retaddr = __frameptr[1]; \
120 __frameptr[1] = (a);
121
122 #define FAKE_STACK_FRAME_END() \
123 __frameptr[1] = __retaddr;
124
125 #else /* DEVELOPMENT || DEBUG */
126
127 #define FAKE_STACK_FRAME(a)
128 #define FAKE_STACK_FRAME_END()
129
130 #endif /* DEVELOPMENT || DEBUG */
131
132 #define ASYNC_REF_COUNT (sizeof(io_async_ref_t) / sizeof(natural_t))
133 #define ASYNC_REF64_COUNT (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
134
135 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
136
137 extern "C" {
138 #include <mach/mach_traps.h>
139 #include <vm/vm_map_xnu.h>
140 } /* extern "C" */
141
142 struct IOMachPortHashList;
143
144 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
145
146 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
147 class IOMachPort final : public OSObject
148 {
149 OSDeclareDefaultStructors(IOMachPort);
150 public:
151 bool hashed;
152 SLIST_ENTRY(IOMachPort) link;
153 ipc_port_t port;
154 OSObject* XNU_PTRAUTH_SIGNED_PTR("IOMachPort.object") object;
155
156 static IOMachPort* withObject(OSObject *obj);
157
158 static IOMachPortHashList* bucketForObject(OSObject *obj);
159
160 static LIBKERN_RETURNS_NOT_RETAINED IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
161
162 static IOMachPort *noMoreSenders( ipc_port_t port,
163 ipc_kobject_type_t type, mach_port_mscount_t mscount );
164 static void releasePortForObject( OSObject * obj,
165 ipc_kobject_type_t type );
166
167 static mach_port_name_t makeSendRightForTask( task_t task,
168 io_object_t obj, ipc_kobject_type_t type );
169
170 virtual void free() APPLE_KEXT_OVERRIDE;
171
172 void
makePort(ipc_kobject_type_t type)173 makePort(ipc_kobject_type_t type)
174 {
175 port = iokit_alloc_object_port(this, type);
176 }
177
178 void
adoptPort(IOMachPort * other,ipc_kobject_type_t type)179 adoptPort(IOMachPort *other, ipc_kobject_type_t type)
180 {
181 port = other->port;
182 ipc_kobject_enable(port, this, IKOT_IOKIT_CONNECT);
183 other->port = NULL;
184 }
185
186 void
disablePort(ipc_kobject_type_t type)187 disablePort(ipc_kobject_type_t type)
188 {
189 __assert_only ipc_kobject_t kobj;
190 kobj = ipc_kobject_disable(port, type);
191 assert(kobj == this);
192 }
193
194 template<typename T>
195 inline T *
getAs() const196 getAs() const
197 {
198 return OSDynamicCast(T, object);
199 }
200 };
201
202 #define super OSObject
203 OSDefineMetaClassAndStructorsWithZone(IOMachPort, OSObject, ZC_ZFREE_CLEARMEM)
204
205 static IOLock * gIOObjectPortLock;
206 IOLock * gIOUserServerLock;
207
208 SECURITY_READ_ONLY_LATE(const struct io_filter_callbacks *) gIOUCFilterCallbacks;
209
210 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
211
212 SLIST_HEAD(IOMachPortHashList, IOMachPort);
213
214 #if defined(XNU_TARGET_OS_OSX)
215 #define PORT_HASH_SIZE 4096
216 #else /* defined(!XNU_TARGET_OS_OSX) */
217 #define PORT_HASH_SIZE 256
218 #endif /* !defined(!XNU_TARGET_OS_OSX) */
219
220 IOMachPortHashList gIOMachPortHash[PORT_HASH_SIZE];
221
222 void
IOMachPortInitialize(void)223 IOMachPortInitialize(void)
224 {
225 for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
226 SLIST_INIT(&gIOMachPortHash[i]);
227 }
228 }
229
230 IOMachPortHashList*
bucketForObject(OSObject * obj)231 IOMachPort::bucketForObject(OSObject *obj)
232 {
233 return &gIOMachPortHash[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
234 }
235
236 IOMachPort*
portForObjectInBucket(IOMachPortHashList * bucket,OSObject * obj,ipc_kobject_type_t type)237 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
238 {
239 IOMachPort *machPort;
240
241 SLIST_FOREACH(machPort, bucket, link) {
242 if (machPort->object == obj && iokit_port_type(machPort->port) == type) {
243 return machPort;
244 }
245 }
246 return NULL;
247 }
248
249 IOMachPort*
withObject(OSObject * obj)250 IOMachPort::withObject(OSObject *obj)
251 {
252 IOMachPort *machPort = NULL;
253
254 machPort = new IOMachPort;
255 release_assert(machPort->init());
256 machPort->object = obj;
257
258 obj->taggedRetain(OSTypeID(OSCollection));
259
260 return machPort;
261 }
262
263 IOMachPort *
noMoreSenders(ipc_port_t port,ipc_kobject_type_t type,mach_port_mscount_t mscount)264 IOMachPort::noMoreSenders( ipc_port_t port, ipc_kobject_type_t type,
265 mach_port_mscount_t mscount )
266 {
267 IOUserClient *uc = NULL;
268 IOMachPort *machPort;
269 bool destroyed;
270
271 lck_mtx_lock(gIOObjectPortLock);
272
273 iokit_lock_port(port);
274 machPort = (IOMachPort *)ipc_kobject_get_locked(port, type);
275 destroyed = ipc_kobject_is_mscount_current_locked(port, mscount);
276 iokit_unlock_port(port);
277
278 if (machPort == NULL) {
279 lck_mtx_unlock(gIOObjectPortLock);
280 return NULL;
281 }
282
283 assert(machPort->port == port);
284
285 if (destroyed) {
286 if (machPort->hashed) {
287 IOMachPortHashList *bucket;
288
289 bucket = IOMachPort::bucketForObject(machPort->object);
290 machPort->hashed = false;
291 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
292 }
293
294 machPort->disablePort(type);
295
296 if (IKOT_IOKIT_CONNECT == type) {
297 uc = machPort->getAs<IOUserClient>();
298 }
299 }
300
301 if (uc) {
302 uc->noMoreSenders();
303 }
304
305 lck_mtx_unlock(gIOObjectPortLock);
306
307 if (IKOT_UEXT_OBJECT == type) {
308 if (OSAction *action = machPort->getAs<OSAction>()) {
309 action->Aborted();
310 }
311
312 if (IOUserServer::shouldLeakObjects()) {
313 // Leak object
314 machPort->object->retain();
315 }
316 }
317
318 return destroyed ? machPort : NULL;
319 }
320
321 void
releasePortForObject(OSObject * obj,ipc_kobject_type_t type)322 IOMachPort::releasePortForObject( OSObject * obj, ipc_kobject_type_t type )
323 {
324 bool destroyed = false;
325 IOMachPort *machPort;
326 IOService *service;
327 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj);
328
329 assert(IKOT_IOKIT_CONNECT != type);
330
331 lck_mtx_lock(gIOObjectPortLock);
332
333 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
334
335 if (machPort
336 && ((type != IKOT_IOKIT_OBJECT)
337 || !(service = OSDynamicCast(IOService, obj))
338 || !service->machPortHoldDestroy())) {
339 machPort->hashed = false;
340 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
341 machPort->disablePort(type);
342 destroyed = true;
343 }
344
345 lck_mtx_unlock(gIOObjectPortLock);
346
347 if (destroyed) {
348 machPort->release();
349 }
350 }
351
352 void
destroyUserReferences(OSObject * obj)353 IOUserClient::destroyUserReferences( OSObject * obj )
354 {
355 IOMachPort *machPort = NULL;
356 OSObject *mappings = NULL;
357
358 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
359
360 IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
361 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj);
362
363 lck_mtx_lock(gIOObjectPortLock);
364
365 machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
366
367 if (machPort == NULL) {
368 lck_mtx_unlock(gIOObjectPortLock);
369 return;
370 }
371
372 machPort->hashed = false;
373 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
374 machPort->disablePort(IKOT_IOKIT_CONNECT);
375
376 if (uc) {
377 mappings = uc->mappings;
378 uc->mappings = NULL;
379
380 uc->noMoreSenders();
381
382 if (mappings) {
383 IOMachPort *newPort;
384
385 newPort = IOMachPort::withObject(mappings);
386 newPort->adoptPort(machPort, IKOT_IOKIT_CONNECT);
387 }
388 }
389
390 lck_mtx_unlock(gIOObjectPortLock);
391
392 OSSafeReleaseNULL(mappings);
393 machPort->release();
394 }
395
396 mach_port_name_t
makeSendRightForTask(task_t task,io_object_t obj,ipc_kobject_type_t type)397 IOMachPort::makeSendRightForTask( task_t task,
398 io_object_t obj, ipc_kobject_type_t type )
399 {
400 return iokit_make_send_right( task, obj, type );
401 }
402
403 void
free(void)404 IOMachPort::free( void )
405 {
406 if (port) {
407 iokit_destroy_object_port(port, iokit_port_type(port));
408 }
409 object->taggedRelease(OSTypeID(OSCollection));
410 super::free();
411 }
412
413
414 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
415
416 static bool
IOTaskRegistryCompatibility(task_t task)417 IOTaskRegistryCompatibility(task_t task)
418 {
419 return false;
420 }
421
422 static void
IOTaskRegistryCompatibilityMatching(task_t task,OSDictionary * matching)423 IOTaskRegistryCompatibilityMatching(task_t task, OSDictionary * matching)
424 {
425 matching->setObject(gIOServiceNotificationUserKey, kOSBooleanTrue);
426 if (!IOTaskRegistryCompatibility(task)) {
427 return;
428 }
429 matching->setObject(gIOCompatibilityMatchKey, kOSBooleanTrue);
430 }
431
432 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
433
OSDefineMetaClassAndStructors(IOUserIterator,OSIterator)434 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
435
436 IOUserIterator *
437 IOUserIterator::withIterator(OSIterator * iter)
438 {
439 IOUserIterator * me;
440
441 if (!iter) {
442 return NULL;
443 }
444
445 me = new IOUserIterator;
446 if (me && !me->init()) {
447 me->release();
448 me = NULL;
449 }
450 if (!me) {
451 iter->release();
452 return me;
453 }
454 me->userIteratorObject = iter;
455
456 return me;
457 }
458
459 bool
init(void)460 IOUserIterator::init( void )
461 {
462 if (!OSObject::init()) {
463 return false;
464 }
465
466 IOLockInlineInit(&lock);
467 return true;
468 }
469
470 void
free()471 IOUserIterator::free()
472 {
473 if (userIteratorObject) {
474 userIteratorObject->release();
475 }
476 IOLockInlineDestroy(&lock);
477 OSObject::free();
478 }
479
480 void
reset()481 IOUserIterator::reset()
482 {
483 IOLockLock(&lock);
484 assert(OSDynamicCast(OSIterator, userIteratorObject));
485 ((OSIterator *)userIteratorObject)->reset();
486 IOLockUnlock(&lock);
487 }
488
489 bool
isValid()490 IOUserIterator::isValid()
491 {
492 bool ret;
493
494 IOLockLock(&lock);
495 assert(OSDynamicCast(OSIterator, userIteratorObject));
496 ret = ((OSIterator *)userIteratorObject)->isValid();
497 IOLockUnlock(&lock);
498
499 return ret;
500 }
501
502 OSObject *
getNextObject()503 IOUserIterator::getNextObject()
504 {
505 assert(false);
506 return NULL;
507 }
508
509 OSObject *
copyNextObject()510 IOUserIterator::copyNextObject()
511 {
512 OSObject * ret = NULL;
513
514 IOLockLock(&lock);
515 if (userIteratorObject) {
516 ret = ((OSIterator *)userIteratorObject)->getNextObject();
517 if (ret) {
518 ret->retain();
519 }
520 }
521 IOLockUnlock(&lock);
522
523 return ret;
524 }
525
526 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
527 extern "C" {
528 // functions called from osfmk/device/iokit_rpc.c
529
530 void
iokit_port_object_description(io_object_t obj,kobject_description_t desc)531 iokit_port_object_description(io_object_t obj, kobject_description_t desc)
532 {
533 IORegistryEntry * regEntry;
534 IOUserNotification * __unused noti;
535 _IOServiceNotifier * __unused serviceNoti;
536 OSSerialize * __unused s;
537 OSDictionary * __unused matching = NULL;
538
539 if ((regEntry = OSDynamicCast(IORegistryEntry, obj))) {
540 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(0x%qx)", obj->getMetaClass()->getClassName(), regEntry->getRegistryEntryID());
541 #if DEVELOPMENT || DEBUG
542 } else if ((noti = OSDynamicCast(IOUserNotification, obj))) {
543 // serviceNoti->matching may become NULL if the port gets a no-senders notification, so we have to lock gIOObjectPortLock
544 IOLockLock(gIOObjectPortLock);
545 serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->userIteratorObject);
546 if (serviceNoti && (matching = serviceNoti->matching)) {
547 matching->retain();
548 }
549 IOLockUnlock(gIOObjectPortLock);
550
551 if (matching) {
552 s = OSSerialize::withCapacity((unsigned int) page_size);
553 if (s && matching->serialize(s)) {
554 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text());
555 }
556 OSSafeReleaseNULL(s);
557 OSSafeReleaseNULL(matching);
558 }
559 #endif /* DEVELOPMENT || DEBUG */
560 } else {
561 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s", obj->getMetaClass()->getClassName());
562 }
563 }
564
565 // FIXME: Implementation of these functions are hidden from the static analyzer.
566 // As for now, the analyzer doesn't consistently support wrapper functions
567 // for retain and release.
568 #ifndef __clang_analyzer__
569 void
iokit_add_reference(io_object_t obj)570 iokit_add_reference( io_object_t obj )
571 {
572 if (!obj) {
573 return;
574 }
575 obj->retain();
576 }
577
578 void
iokit_remove_reference(io_object_t obj)579 iokit_remove_reference( io_object_t obj )
580 {
581 if (obj) {
582 obj->release();
583 }
584 }
585 #endif // __clang_analyzer__
586
587 void
iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj)588 iokit_remove_connect_reference(LIBKERN_CONSUMED io_object_t obj )
589 {
590 if (!obj) {
591 return;
592 }
593 obj->release();
594 }
595
596 enum {
597 kIPCLockNone = 0,
598 kIPCLockRead = 1,
599 kIPCLockWrite = 2
600 };
601
602 void
ipcEnter(int locking)603 IOUserClient::ipcEnter(int locking)
604 {
605 switch (locking) {
606 case kIPCLockWrite:
607 IORWLockWrite(&lock);
608 break;
609 case kIPCLockRead:
610 IORWLockRead(&lock);
611 break;
612 case kIPCLockNone:
613 break;
614 default:
615 panic("ipcEnter");
616 }
617
618 OSIncrementAtomic(&__ipc);
619 }
620
621 void
ipcExit(int locking)622 IOUserClient::ipcExit(int locking)
623 {
624 bool finalize = false;
625
626 assert(__ipc);
627 if (1 == OSDecrementAtomic(&__ipc) && isInactive()) {
628 IOLockLock(gIOObjectPortLock);
629 if ((finalize = __ipcFinal)) {
630 __ipcFinal = false;
631 }
632 IOLockUnlock(gIOObjectPortLock);
633 if (finalize) {
634 scheduleFinalize(true);
635 }
636 }
637 switch (locking) {
638 case kIPCLockWrite:
639 case kIPCLockRead:
640 IORWLockUnlock(&lock);
641 break;
642 case kIPCLockNone:
643 break;
644 default:
645 panic("ipcExit");
646 }
647 }
648
649 void
iokit_kobject_retain(io_kobject_t machPort)650 iokit_kobject_retain(io_kobject_t machPort)
651 {
652 assert(OSDynamicCast(IOMachPort, machPort));
653 machPort->retain();
654 }
655
656 io_object_t
iokit_copy_object_for_consumed_kobject(LIBKERN_CONSUMED io_kobject_t machPort)657 iokit_copy_object_for_consumed_kobject(LIBKERN_CONSUMED io_kobject_t machPort)
658 {
659 io_object_t result;
660
661 assert(OSDynamicCast(IOMachPort, machPort));
662
663 /*
664 * IOMachPort::object is never nil-ed, so this just borrows its port
665 * reference to make new rights.
666 */
667 result = machPort->object;
668 iokit_add_reference(result);
669 machPort->release();
670 return result;
671 }
672
673 bool
finalizeUserReferences(OSObject * obj)674 IOUserClient::finalizeUserReferences(OSObject * obj)
675 {
676 IOUserClient * uc;
677 bool ok = true;
678
679 if ((uc = OSDynamicCast(IOUserClient, obj))) {
680 IOLockLock(gIOObjectPortLock);
681 if ((uc->__ipcFinal = (0 != uc->__ipc))) {
682 ok = false;
683 }
684 IOLockUnlock(gIOObjectPortLock);
685 }
686 return ok;
687 }
688
689 ipc_port_t
iokit_port_make_send_for_object(io_object_t obj,ipc_kobject_type_t type)690 iokit_port_make_send_for_object( io_object_t obj, ipc_kobject_type_t type )
691 {
692 IOMachPort *machPort = NULL;
693 ipc_port_t port = NULL;
694
695 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj);
696
697 lck_mtx_lock(gIOObjectPortLock);
698
699 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
700
701 if (__improbable(machPort == NULL)) {
702 machPort = IOMachPort::withObject(obj);
703 machPort->makePort(type);
704 machPort->hashed = true;
705 SLIST_INSERT_HEAD(bucket, machPort, link);
706 }
707
708 port = ipc_kobject_make_send( machPort->port, machPort, type );
709
710 lck_mtx_unlock(gIOObjectPortLock);
711
712 return port;
713 }
714
715 /*
716 * Handle the No-More_Senders notification generated from a device port destroy.
717 * Since there are no longer any tasks which hold a send right to this device
718 * port a NMS notification has been generated.
719 */
720
721 void
iokit_ident_no_senders(ipc_port_t port,mach_port_mscount_t mscount)722 iokit_ident_no_senders( ipc_port_t port, mach_port_mscount_t mscount )
723 {
724 IOMachPort *machPort;
725
726 machPort = IOMachPort::noMoreSenders(port, IKOT_IOKIT_IDENT, mscount);
727
728 if (machPort) {
729 if (IOUserServerCheckInToken *token =
730 machPort->getAs<IOUserServerCheckInToken>()) {
731 token->cancel();
732 }
733 machPort->release();
734 }
735 }
736
737 void
iokit_object_no_senders(ipc_port_t port,mach_port_mscount_t mscount)738 iokit_object_no_senders( ipc_port_t port, mach_port_mscount_t mscount )
739 {
740 IOMachPort *machPort;
741
742 machPort = IOMachPort::noMoreSenders(port, IKOT_IOKIT_OBJECT, mscount);
743
744 if (machPort) {
745 if (IOMemoryMap *map = machPort->getAs<IOMemoryMap>()) {
746 map->taskDied();
747 } else if (IOUserNotification *notify =
748 machPort->getAs<IOUserNotification>()) {
749 notify->setNotification( NULL );
750 }
751 machPort->release();
752 }
753 }
754
755 void
iokit_connect_no_senders(ipc_port_t port,mach_port_mscount_t mscount)756 iokit_connect_no_senders( ipc_port_t port, mach_port_mscount_t mscount )
757 {
758 IOMachPort *machPort;
759
760 machPort = IOMachPort::noMoreSenders(port, IKOT_IOKIT_CONNECT, mscount);
761
762 if (machPort) {
763 if (IOUserClient *client = machPort->getAs<IOUserClient>()) {
764 IOStatisticsClientCall();
765 IORWLockWrite(&client->lock);
766 client->clientDied();
767 IORWLockUnlock(&client->lock);
768 }
769 machPort->release();
770 }
771 }
772
773 void
iokit_uext_no_senders(ipc_port_t port,mach_port_mscount_t mscount)774 iokit_uext_no_senders( ipc_port_t port, mach_port_mscount_t mscount )
775 {
776 IOMachPort *machPort;
777
778 machPort = IOMachPort::noMoreSenders(port, IKOT_UEXT_OBJECT, mscount);
779
780 if (machPort) {
781 if (IOUserClient *uc = machPort->getAs<IOUserUserClient>()) {
782 IOService *provider = NULL;
783 uc->lockForArbitration();
784 provider = uc->getProvider();
785 if (provider) {
786 provider->retain();
787 }
788 uc->unlockForArbitration();
789 uc->setTerminateDefer(provider, false);
790 OSSafeReleaseNULL(provider);
791 }
792 machPort->release();
793 }
794 }
795 }; /* extern "C" */
796
797 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
798
799 class IOServiceUserNotification : public IOUserNotification
800 {
801 OSDeclareDefaultStructors(IOServiceUserNotification);
802
803 struct PingMsgKdata {
804 mach_msg_header_t msgHdr;
805 };
806 struct PingMsgUdata {
807 OSNotificationHeader64 notifyHeader;
808 };
809
810 enum { kMaxOutstanding = 1024 };
811
812 ipc_port_t remotePort;
813 void *msgReference;
814 mach_msg_size_t msgReferenceSize;
815 natural_t msgType;
816 OSArray * newSet;
817 bool armed;
818 bool ipcLogged;
819
820 public:
821
822 virtual bool init( mach_port_t port, natural_t type,
823 void * reference, vm_size_t referenceSize,
824 bool clientIs64 );
825 virtual void free() APPLE_KEXT_OVERRIDE;
826 void invalidatePort(void);
827
828 static bool _handler( void * target,
829 void * ref, IOService * newService, IONotifier * notifier );
830 virtual bool handler( void * ref, IOService * newService );
831
832 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
833 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
834 };
835
836 class IOServiceMessageUserNotification : public IOUserNotification
837 {
838 OSDeclareDefaultStructors(IOServiceMessageUserNotification);
839
840 struct PingMsgKdata {
841 mach_msg_header_t msgHdr;
842 mach_msg_body_t msgBody;
843 mach_msg_port_descriptor_t ports[1];
844 };
845 struct PingMsgUdata {
846 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
847 };
848
849 ipc_port_t remotePort;
850 void *msgReference;
851 mach_msg_size_t msgReferenceSize;
852 mach_msg_size_t msgExtraSize;
853 natural_t msgType;
854 uint8_t clientIs64;
855 int owningPID;
856 bool ipcLogged;
857
858 public:
859
860 virtual bool init( mach_port_t port, natural_t type,
861 void * reference, vm_size_t referenceSize,
862 bool clientIs64 );
863
864 virtual void free() APPLE_KEXT_OVERRIDE;
865 void invalidatePort(void);
866
867 static IOReturn _handler( void * target, void * ref,
868 UInt32 messageType, IOService * provider,
869 void * messageArgument, vm_size_t argSize );
870 virtual IOReturn handler( void * ref,
871 UInt32 messageType, IOService * provider,
872 void * messageArgument, vm_size_t argSize );
873
874 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
875 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
876 };
877
878 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
879
880 #undef super
881 #define super IOUserIterator
882 OSDefineMetaClass( IOUserNotification, IOUserIterator );
883 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
884
885 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
886
887 void
free(void)888 IOUserNotification::free( void )
889 {
890 #if DEVELOPMENT || DEBUG
891 IOLockLock( gIOObjectPortLock);
892
893 assert(userIteratorObject == NULL);
894
895 IOLockUnlock( gIOObjectPortLock);
896 #endif /* DEVELOPMENT || DEBUG */
897
898 super::free();
899 }
900
901
902 void
setNotification(IONotifier * notify)903 IOUserNotification::setNotification( IONotifier * notify )
904 {
905 OSObject * previousNotify;
906
907 /*
908 * We must retain this object here before proceeding.
909 * Two threads may race in setNotification(). If one thread sets a new notifier while the
910 * other thread sets the notifier to NULL, it is possible for the second thread to call release()
911 * before the first thread calls retain(). Without the retain here, this thread interleaving
912 * would cause the object to get released and freed before it is retained by the first thread,
913 * which is a UaF.
914 */
915 retain();
916
917 IOLockLock( gIOObjectPortLock);
918
919 previousNotify = userIteratorObject;
920 userIteratorObject = notify;
921
922 IOLockUnlock( gIOObjectPortLock);
923
924 if (previousNotify) {
925 assert(OSDynamicCast(IONotifier, previousNotify));
926 ((IONotifier *)previousNotify)->remove();
927
928 if (notify == NULL) {
929 release();
930 }
931 } else if (notify) {
932 // new IONotifier, retain the object. release() will happen in setNotification(NULL)
933 retain();
934 }
935
936 release(); // paired with retain() at beginning of this method
937 }
938
939 void
reset()940 IOUserNotification::reset()
941 {
942 // ?
943 }
944
945 bool
isValid()946 IOUserNotification::isValid()
947 {
948 return true;
949 }
950
951 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
952
953 #undef super
954 #define super IOUserNotification
OSDefineMetaClassAndStructors(IOServiceUserNotification,IOUserNotification)955 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
956
957 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
958
959 bool
960 IOServiceUserNotification::init( mach_port_t port, natural_t type,
961 void * reference, vm_size_t referenceSize,
962 bool clientIs64 )
963 {
964 if (!super::init()) {
965 return false;
966 }
967
968 newSet = OSArray::withCapacity( 1 );
969 if (!newSet) {
970 return false;
971 }
972
973 if (referenceSize > sizeof(OSAsyncReference64)) {
974 return false;
975 }
976
977 msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
978 msgReference = IOMallocZeroData(msgReferenceSize);
979 if (!msgReference) {
980 return false;
981 }
982
983 remotePort = port;
984 msgType = type;
985 bcopy( reference, msgReference, referenceSize );
986
987 return true;
988 }
989
990 void
invalidatePort(void)991 IOServiceUserNotification::invalidatePort(void)
992 {
993 remotePort = MACH_PORT_NULL;
994 }
995
996 void
free(void)997 IOServiceUserNotification::free( void )
998 {
999 if (remotePort) {
1000 iokit_release_port_send(remotePort);
1001 }
1002 IOFreeData(msgReference, msgReferenceSize);
1003 OSSafeReleaseNULL(newSet);
1004
1005 super::free();
1006 }
1007
1008 bool
_handler(void * target,void * ref,IOService * newService,IONotifier * notifier)1009 IOServiceUserNotification::_handler( void * target,
1010 void * ref, IOService * newService, IONotifier * notifier )
1011 {
1012 IOServiceUserNotification * targetObj = (IOServiceUserNotification *)target;
1013 bool ret;
1014
1015 targetObj->retain();
1016 ret = targetObj->handler( ref, newService );
1017 targetObj->release();
1018 return ret;
1019 }
1020
1021 bool
handler(void * ref,IOService * newService)1022 IOServiceUserNotification::handler( void * ref,
1023 IOService * newService )
1024 {
1025 unsigned int count;
1026 kern_return_t kr;
1027 ipc_port_t port = NULL;
1028 bool sendPing = false;
1029 mach_msg_size_t msgSize, payloadSize;
1030
1031 IOTakeLock( &lock );
1032
1033 count = newSet->getCount();
1034 if (count < kMaxOutstanding) {
1035 newSet->setObject( newService );
1036 if ((sendPing = (armed && (0 == count)))) {
1037 armed = false;
1038 }
1039 }
1040
1041 IOUnlock( &lock );
1042
1043 if (kIOServiceTerminatedNotificationType == msgType) {
1044 lck_mtx_lock(gIOObjectPortLock);
1045 newService->setMachPortHoldDestroy(true);
1046 lck_mtx_unlock(gIOObjectPortLock);
1047 }
1048
1049 if (sendPing) {
1050 /*
1051 * This right will be consumed when the message we form below
1052 * is sent by kernel_mach_msg_send_with_builder_internal(),
1053 * because we make the disposition for the right move-send.
1054 */
1055 port = iokit_port_make_send_for_object( this, IKOT_IOKIT_OBJECT );
1056
1057 payloadSize = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1058 msgSize = (mach_msg_size_t)(sizeof(PingMsgKdata) + payloadSize);
1059
1060 kr = kernel_mach_msg_send_with_builder_internal(0, payloadSize,
1061 MACH_SEND_KERNEL_IMPORTANCE, MACH_MSG_TIMEOUT_NONE, NULL,
1062 ^(mach_msg_header_t *hdr, __assert_only mach_msg_descriptor_t *descs, void *payload){
1063 PingMsgUdata *udata = (PingMsgUdata *)payload;
1064
1065 hdr->msgh_remote_port = remotePort;
1066 hdr->msgh_local_port = port;
1067 hdr->msgh_bits = MACH_MSGH_BITS(
1068 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1069 MACH_MSG_TYPE_MOVE_SEND /*local*/);
1070 hdr->msgh_size = msgSize;
1071 hdr->msgh_id = kOSNotificationMessageID;
1072
1073 assert(descs == NULL);
1074 /* End of kernel processed data */
1075
1076 udata->notifyHeader.size = 0;
1077 udata->notifyHeader.type = msgType;
1078
1079 assert((char *)udata->notifyHeader.reference + msgReferenceSize <= (char *)payload + payloadSize);
1080 bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1081 });
1082
1083 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1084 ipcLogged = true;
1085 IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1086 }
1087 }
1088
1089 return true;
1090 }
1091 OSObject *
getNextObject()1092 IOServiceUserNotification::getNextObject()
1093 {
1094 assert(false);
1095 return NULL;
1096 }
1097
1098 OSObject *
copyNextObject()1099 IOServiceUserNotification::copyNextObject()
1100 {
1101 unsigned int count;
1102 OSObject * result;
1103
1104 IOLockLock(&lock);
1105
1106 count = newSet->getCount();
1107 if (count) {
1108 result = newSet->getObject( count - 1 );
1109 result->retain();
1110 newSet->removeObject( count - 1);
1111 } else {
1112 result = NULL;
1113 armed = true;
1114 }
1115
1116 IOLockUnlock(&lock);
1117
1118 return result;
1119 }
1120
1121 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1122
OSDefineMetaClassAndStructors(IOServiceMessageUserNotification,IOUserNotification)1123 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
1124
1125 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1126
1127 bool
1128 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
1129 void * reference, vm_size_t referenceSize, bool client64 )
1130 {
1131 if (!super::init()) {
1132 return false;
1133 }
1134
1135 if (referenceSize > sizeof(OSAsyncReference64)) {
1136 return false;
1137 }
1138
1139 clientIs64 = client64;
1140
1141 owningPID = proc_selfpid();
1142
1143 msgReferenceSize = mach_round_msg((mach_msg_size_t)referenceSize);
1144 msgReference = IOMallocZeroData(msgReferenceSize);
1145 if (!msgReference) {
1146 return false;
1147 }
1148
1149 remotePort = port;
1150 msgType = type;
1151 bcopy( reference, msgReference, referenceSize );
1152
1153 return true;
1154 }
1155
1156 void
invalidatePort(void)1157 IOServiceMessageUserNotification::invalidatePort(void)
1158 {
1159 remotePort = MACH_PORT_NULL;
1160 }
1161
1162 void
free(void)1163 IOServiceMessageUserNotification::free( void )
1164 {
1165 if (remotePort) {
1166 iokit_release_port_send(remotePort);
1167 }
1168 IOFreeData(msgReference, msgReferenceSize);
1169
1170 super::free();
1171 }
1172
1173 IOReturn
_handler(void * target,void * ref,UInt32 messageType,IOService * provider,void * argument,vm_size_t argSize)1174 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1175 UInt32 messageType, IOService * provider,
1176 void * argument, vm_size_t argSize )
1177 {
1178 IOServiceMessageUserNotification * targetObj = (IOServiceMessageUserNotification *)target;
1179 IOReturn ret;
1180
1181 targetObj->retain();
1182 ret = targetObj->handler(
1183 ref, messageType, provider, argument, argSize);
1184 targetObj->release();
1185 return ret;
1186 }
1187
1188 IOReturn
handler(void * ref,UInt32 messageType,IOService * provider,void * messageArgument,vm_size_t callerArgSize)1189 IOServiceMessageUserNotification::handler( void * ref,
1190 UInt32 messageType, IOService * provider,
1191 void * messageArgument, vm_size_t callerArgSize )
1192 {
1193 kern_return_t kr;
1194 vm_size_t argSize;
1195 mach_msg_size_t thisMsgSize;
1196 ipc_port_t thisPort, providerPort;
1197
1198 if (kIOMessageCopyClientID == messageType) {
1199 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1200 return kIOReturnSuccess;
1201 }
1202
1203 if (callerArgSize == 0) {
1204 if (clientIs64) {
1205 argSize = sizeof(io_user_reference_t);
1206 } else {
1207 argSize = sizeof(uint32_t);
1208 }
1209 } else {
1210 if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1211 callerArgSize = kIOUserNotifyMaxMessageSize;
1212 }
1213 argSize = callerArgSize;
1214 }
1215
1216 // adjust message size for ipc restrictions
1217 natural_t type = msgType;
1218 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1219 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1220 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1221
1222 mach_msg_size_t extraSize = kIOUserNotifyMaxMessageSize + sizeof(IOServiceInterestContent64);
1223 mach_msg_size_t msgSize = (mach_msg_size_t) (sizeof(PingMsgKdata) +
1224 sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize);
1225
1226 if (os_add3_overflow(msgSize, offsetof(IOServiceInterestContent64, messageArgument), argSize, &thisMsgSize)) {
1227 return kIOReturnBadArgument;
1228 }
1229 mach_msg_size_t payloadSize = thisMsgSize - sizeof(PingMsgKdata);
1230
1231 /*
1232 * These rights will be consumed when the message we form below
1233 * is sent by kernel_mach_msg_send_with_builder_internal(),
1234 * because we make the disposition for the rights move-send.
1235 */
1236 providerPort = iokit_port_make_send_for_object( provider, IKOT_IOKIT_OBJECT );
1237 thisPort = iokit_port_make_send_for_object( this, IKOT_IOKIT_OBJECT );
1238
1239 kr = kernel_mach_msg_send_with_builder_internal(1, payloadSize,
1240 MACH_SEND_KERNEL_IMPORTANCE, MACH_MSG_TIMEOUT_NONE, NULL,
1241 ^(mach_msg_header_t *hdr, mach_msg_descriptor_t *descs, void *payload){
1242 mach_msg_port_descriptor_t *port_desc = (mach_msg_port_descriptor_t *)descs;
1243 PingMsgUdata *udata = (PingMsgUdata *)payload;
1244 IOServiceInterestContent64 * data;
1245 mach_msg_size_t dataOffset;
1246
1247 hdr->msgh_remote_port = remotePort;
1248 hdr->msgh_local_port = thisPort;
1249 hdr->msgh_bits = MACH_MSGH_BITS_SET(
1250 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1251 MACH_MSG_TYPE_MOVE_SEND /*local*/,
1252 MACH_MSG_TYPE_NONE /*voucher*/,
1253 MACH_MSGH_BITS_COMPLEX);
1254 hdr->msgh_size = thisMsgSize;
1255 hdr->msgh_id = kOSNotificationMessageID;
1256
1257 /* body.msgh_descriptor_count is set automatically after the closure */
1258
1259 port_desc[0].name = providerPort;
1260 port_desc[0].disposition = MACH_MSG_TYPE_MOVE_SEND;
1261 port_desc[0].type = MACH_MSG_PORT_DESCRIPTOR;
1262 /* End of kernel processed data */
1263
1264 udata->notifyHeader.size = extraSize;
1265 udata->notifyHeader.type = type;
1266 bcopy( msgReference, udata->notifyHeader.reference, msgReferenceSize );
1267
1268 /* data is after msgReference */
1269 dataOffset = sizeof(PingMsgUdata) - sizeof(OSAsyncReference64) + msgReferenceSize;
1270 data = (IOServiceInterestContent64 *) (((uint8_t *) udata) + dataOffset);
1271 data->messageType = messageType;
1272
1273 if (callerArgSize == 0) {
1274 assert((char *)data->messageArgument + argSize <= (char *)payload + payloadSize);
1275 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1276 if (!clientIs64) {
1277 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1278 }
1279 } else {
1280 assert((char *)data->messageArgument + callerArgSize <= (char *)payload + payloadSize);
1281 bcopy(messageArgument, data->messageArgument, callerArgSize);
1282 }
1283 });
1284
1285 if (kr == MACH_SEND_NO_BUFFER) {
1286 return kIOReturnNoMemory;
1287 }
1288
1289 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1290 ipcLogged = true;
1291 IOLog("%s: kernel_mach_msg_send (0x%x)\n", __PRETTY_FUNCTION__, kr );
1292 }
1293
1294 return kIOReturnSuccess;
1295 }
1296
1297 OSObject *
getNextObject()1298 IOServiceMessageUserNotification::getNextObject()
1299 {
1300 return NULL;
1301 }
1302
1303 OSObject *
copyNextObject()1304 IOServiceMessageUserNotification::copyNextObject()
1305 {
1306 return NULL;
1307 }
1308
1309 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1310
1311 #undef super
1312 #define super IOService
1313 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1314
1315 IOLock * gIOUserClientOwnersLock;
1316
1317 static TUNABLE(bool, gEnforcePowerEntitlement, "enforce-power-entitlement", false);
1318
1319 static_assert(offsetof(IOUserClient, __opaque_end) -
1320 offsetof(IOUserClient, __opaque_start) == sizeof(void *) * 9,
1321 "ABI check: Opaque ivars for IOUserClient must be 9 void * big");
1322
1323 void
initialize(void)1324 IOUserClient::initialize( void )
1325 {
1326 gIOObjectPortLock = IOLockAlloc();
1327 gIOUserClientOwnersLock = IOLockAlloc();
1328 gIOUserServerLock = IOLockAlloc();
1329 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1330
1331 #if IOTRACKING
1332 IOTrackingQueueCollectUser(IOUserIterator::gMetaClass.getTracking());
1333 IOTrackingQueueCollectUser(IOServiceMessageUserNotification::gMetaClass.getTracking());
1334 IOTrackingQueueCollectUser(IOServiceUserNotification::gMetaClass.getTracking());
1335 IOTrackingQueueCollectUser(IOUserClient::gMetaClass.getTracking());
1336 #endif /* IOTRACKING */
1337 }
1338
1339 void
1340 #if __LP64__
1341 __attribute__((__noreturn__))
1342 #endif
setAsyncReference(OSAsyncReference asyncRef,mach_port_t wakePort,void * callback,void * refcon)1343 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1344 mach_port_t wakePort,
1345 void *callback, void *refcon)
1346 {
1347 #if __LP64__
1348 panic("setAsyncReference not valid for 64b");
1349 #else
1350 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1351 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1352 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1353 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1354 #endif
1355 }
1356
1357 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon)1358 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1359 mach_port_t wakePort,
1360 mach_vm_address_t callback, io_user_reference_t refcon)
1361 {
1362 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1363 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1364 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1365 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1366 }
1367
1368 void
setAsyncReference64(OSAsyncReference64 asyncRef,mach_port_t wakePort,mach_vm_address_t callback,io_user_reference_t refcon,task_t task)1369 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1370 mach_port_t wakePort,
1371 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1372 {
1373 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1374 if (vm_map_is_64bit(get_task_map(task))) {
1375 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1376 }
1377 }
1378
1379 static OSDictionary *
CopyConsoleUser(UInt32 uid)1380 CopyConsoleUser(UInt32 uid)
1381 {
1382 OSArray * array;
1383 OSDictionary * user = NULL;
1384
1385 OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1386 if ((array = OSDynamicCast(OSArray, ioProperty))) {
1387 for (unsigned int idx = 0;
1388 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1389 idx++) {
1390 OSNumber * num;
1391
1392 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1393 && (uid == num->unsigned32BitValue())) {
1394 user->retain();
1395 break;
1396 }
1397 }
1398 }
1399 OSSafeReleaseNULL(ioProperty);
1400 return user;
1401 }
1402
1403 static OSDictionary *
CopyUserOnConsole(void)1404 CopyUserOnConsole(void)
1405 {
1406 OSArray * array;
1407 OSDictionary * user = NULL;
1408
1409 OSObject * ioProperty = IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey);
1410 if ((array = OSDynamicCast(OSArray, ioProperty))) {
1411 for (unsigned int idx = 0;
1412 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1413 idx++) {
1414 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1415 user->retain();
1416 break;
1417 }
1418 }
1419 }
1420 OSSafeReleaseNULL(ioProperty);
1421 return user;
1422 }
1423
1424 IOReturn
clientHasAuthorization(task_t task,IOService * service)1425 IOUserClient::clientHasAuthorization( task_t task,
1426 IOService * service )
1427 {
1428 proc_t p;
1429
1430 p = (proc_t) get_bsdtask_info(task);
1431 if (p) {
1432 uint64_t authorizationID;
1433
1434 authorizationID = proc_uniqueid(p);
1435 if (authorizationID) {
1436 if (service->getAuthorizationID() == authorizationID) {
1437 return kIOReturnSuccess;
1438 }
1439 }
1440 }
1441
1442 return kIOReturnNotPermitted;
1443 }
1444
1445 IOReturn
clientHasPrivilege(void * securityToken,const char * privilegeName)1446 IOUserClient::clientHasPrivilege( void * securityToken,
1447 const char * privilegeName )
1448 {
1449 kern_return_t kr;
1450 security_token_t token;
1451 mach_msg_type_number_t count;
1452 task_t task;
1453 OSDictionary * user;
1454 bool secureConsole;
1455
1456
1457 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1458 sizeof(kIOClientPrivilegeForeground))) {
1459 if (task_is_gpu_denied(current_task())) {
1460 return kIOReturnNotPrivileged;
1461 } else {
1462 return kIOReturnSuccess;
1463 }
1464 }
1465
1466 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1467 sizeof(kIOClientPrivilegeConsoleSession))) {
1468 kauth_cred_t cred;
1469 proc_t p;
1470
1471 task = (task_t) securityToken;
1472 if (!task) {
1473 task = current_task();
1474 }
1475 p = (proc_t) get_bsdtask_info(task);
1476 kr = kIOReturnNotPrivileged;
1477
1478 if (p && (cred = kauth_cred_proc_ref(p))) {
1479 user = CopyUserOnConsole();
1480 if (user) {
1481 OSNumber * num;
1482 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1483 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1484 kr = kIOReturnSuccess;
1485 }
1486 user->release();
1487 }
1488 kauth_cred_unref(&cred);
1489 }
1490 return kr;
1491 }
1492
1493 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1494 sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1495 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1496 } else {
1497 task = (task_t)securityToken;
1498 }
1499
1500 count = TASK_SECURITY_TOKEN_COUNT;
1501 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1502
1503 if (KERN_SUCCESS != kr) {
1504 } else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1505 sizeof(kIOClientPrivilegeAdministrator))) {
1506 if (0 != token.val[0]) {
1507 kr = kIOReturnNotPrivileged;
1508 }
1509 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1510 sizeof(kIOClientPrivilegeLocalUser))) {
1511 user = CopyConsoleUser(token.val[0]);
1512 if (user) {
1513 user->release();
1514 } else {
1515 kr = kIOReturnNotPrivileged;
1516 }
1517 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1518 sizeof(kIOClientPrivilegeConsoleUser))) {
1519 user = CopyConsoleUser(token.val[0]);
1520 if (user) {
1521 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1522 kr = kIOReturnNotPrivileged;
1523 } else if (secureConsole) {
1524 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1525 if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1526 kr = kIOReturnNotPrivileged;
1527 }
1528 }
1529 user->release();
1530 } else {
1531 kr = kIOReturnNotPrivileged;
1532 }
1533 } else {
1534 kr = kIOReturnUnsupported;
1535 }
1536
1537 return kr;
1538 }
1539
1540 OSDictionary *
copyClientEntitlements(task_t task)1541 IOUserClient::copyClientEntitlements(task_t task)
1542 {
1543 proc_t p = NULL;
1544 pid_t pid = 0;
1545 OSDictionary *entitlements = NULL;
1546
1547 p = (proc_t)get_bsdtask_info(task);
1548 if (p == NULL) {
1549 return NULL;
1550 }
1551 pid = proc_pid(p);
1552
1553 if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1554 if (entitlements) {
1555 return entitlements;
1556 }
1557 }
1558
1559 // If the above fails, thats it
1560 return NULL;
1561 }
1562
1563 OSDictionary *
copyClientEntitlementsVnode(vnode_t vnode,off_t offset)1564 IOUserClient::copyClientEntitlementsVnode(vnode_t vnode, off_t offset)
1565 {
1566 OSDictionary *entitlements = NULL;
1567
1568 if (cs_entitlements_dictionary_copy_vnode(vnode, offset, (void**)&entitlements) != 0) {
1569 return NULL;
1570 }
1571 return entitlements;
1572 }
1573
1574 OSObject *
copyClientEntitlement(task_t task,const char * entitlement)1575 IOUserClient::copyClientEntitlement( task_t task,
1576 const char * entitlement )
1577 {
1578 void *entitlement_object = NULL;
1579
1580 if (task == NULL) {
1581 task = current_task();
1582 }
1583
1584 /* Validate input arguments */
1585 if (task == kernel_task || entitlement == NULL) {
1586 return NULL;
1587 }
1588 proc_t proc = (proc_t)get_bsdtask_info(task);
1589
1590 if (proc == NULL) {
1591 return NULL;
1592 }
1593
1594 kern_return_t ret = amfi->OSEntitlements.copyEntitlementAsOSObjectWithProc(
1595 proc,
1596 entitlement,
1597 &entitlement_object);
1598
1599 if (ret != KERN_SUCCESS) {
1600 return NULL;
1601 }
1602 assert(entitlement_object != NULL);
1603
1604 return (OSObject*)entitlement_object;
1605 }
1606
1607 OSObject *
copyClientEntitlementVnode(struct vnode * vnode,off_t offset,const char * entitlement)1608 IOUserClient::copyClientEntitlementVnode(
1609 struct vnode *vnode,
1610 off_t offset,
1611 const char *entitlement)
1612 {
1613 OSDictionary *entitlements;
1614 OSObject *value;
1615
1616 entitlements = copyClientEntitlementsVnode(vnode, offset);
1617 if (entitlements == NULL) {
1618 return NULL;
1619 }
1620
1621 /* Fetch the entitlement value from the dictionary. */
1622 value = entitlements->getObject(entitlement);
1623 if (value != NULL) {
1624 value->retain();
1625 }
1626
1627 entitlements->release();
1628 return value;
1629 }
1630
1631 bool
init()1632 IOUserClient::init()
1633 {
1634 if (getPropertyTable() || super::init()) {
1635 return reserve();
1636 }
1637
1638 return false;
1639 }
1640
1641 bool
init(OSDictionary * dictionary)1642 IOUserClient::init(OSDictionary * dictionary)
1643 {
1644 if (getPropertyTable() || super::init(dictionary)) {
1645 return reserve();
1646 }
1647
1648 return false;
1649 }
1650
1651 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type)1652 IOUserClient::initWithTask(task_t owningTask,
1653 void * securityID,
1654 UInt32 type )
1655 {
1656 if (getPropertyTable() || super::init()) {
1657 return reserve();
1658 }
1659
1660 return false;
1661 }
1662
1663 bool
initWithTask(task_t owningTask,void * securityID,UInt32 type,OSDictionary * properties)1664 IOUserClient::initWithTask(task_t owningTask,
1665 void * securityID,
1666 UInt32 type,
1667 OSDictionary * properties )
1668 {
1669 bool ok;
1670
1671 ok = super::init( properties );
1672 ok &= initWithTask( owningTask, securityID, type );
1673
1674 return ok;
1675 }
1676
1677 bool
reserve()1678 IOUserClient::reserve()
1679 {
1680 if (!reserved) {
1681 reserved = IOMallocType(ExpansionData);
1682 }
1683 setTerminateDefer(NULL, true);
1684 IOStatisticsRegisterCounter();
1685 IORWLockInlineInit(&lock);
1686 IOLockInlineInit(&filterLock);
1687
1688 return true;
1689 }
1690
1691 struct IOUserClientOwner {
1692 task_t task;
1693 queue_chain_t taskLink;
1694 IOUserClient * uc;
1695 queue_chain_t ucLink;
1696 };
1697
1698 IOReturn
registerOwner(task_t task)1699 IOUserClient::registerOwner(task_t task)
1700 {
1701 IOUserClientOwner * owner;
1702 IOReturn ret;
1703 bool newOwner;
1704
1705 IOLockLock(gIOUserClientOwnersLock);
1706
1707 newOwner = true;
1708 ret = kIOReturnSuccess;
1709
1710 if (!owners.next) {
1711 queue_init(&owners);
1712 } else {
1713 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1714 {
1715 if (task != owner->task) {
1716 continue;
1717 }
1718 newOwner = false;
1719 break;
1720 }
1721 }
1722 if (newOwner) {
1723 owner = IOMallocType(IOUserClientOwner);
1724
1725 owner->task = task;
1726 owner->uc = this;
1727 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1728 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1729 if (messageAppSuspended) {
1730 task_set_message_app_suspended(task, true);
1731 }
1732 }
1733
1734 IOLockUnlock(gIOUserClientOwnersLock);
1735
1736 return ret;
1737 }
1738
1739 void
noMoreSenders(void)1740 IOUserClient::noMoreSenders(void)
1741 {
1742 IOUserClientOwner * owner;
1743 IOUserClientOwner * iter;
1744 queue_head_t * taskque;
1745 bool hasMessageAppSuspended;
1746
1747 IOLockLock(gIOUserClientOwnersLock);
1748
1749 if (owners.next) {
1750 while (!queue_empty(&owners)) {
1751 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1752 taskque = task_io_user_clients(owner->task);
1753 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1754 hasMessageAppSuspended = false;
1755 queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1756 hasMessageAppSuspended = iter->uc->messageAppSuspended;
1757 if (hasMessageAppSuspended) {
1758 break;
1759 }
1760 }
1761 task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1762 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1763 IOFreeType(owner, IOUserClientOwner);
1764 }
1765 owners.next = owners.prev = NULL;
1766 }
1767
1768 IOLockUnlock(gIOUserClientOwnersLock);
1769 }
1770
1771
1772 extern "C" void
iokit_task_app_suspended_changed(task_t task)1773 iokit_task_app_suspended_changed(task_t task)
1774 {
1775 queue_head_t * taskque;
1776 IOUserClientOwner * owner;
1777 OSSet * set;
1778
1779 IOLockLock(gIOUserClientOwnersLock);
1780
1781 taskque = task_io_user_clients(task);
1782 set = NULL;
1783 queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1784 if (!owner->uc->messageAppSuspended) {
1785 continue;
1786 }
1787 if (!set) {
1788 set = OSSet::withCapacity(4);
1789 if (!set) {
1790 break;
1791 }
1792 }
1793 set->setObject(owner->uc);
1794 }
1795
1796 IOLockUnlock(gIOUserClientOwnersLock);
1797
1798 if (set) {
1799 set->iterateObjects(^bool (OSObject * obj) {
1800 IOUserClient * uc;
1801
1802 uc = (typeof(uc))obj;
1803 #if 0
1804 {
1805 OSString * str;
1806 str = IOCopyLogNameForPID(task_pid(task));
1807 IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1808 uc->getName(), task_is_app_suspended(task));
1809 OSSafeReleaseNULL(str);
1810 }
1811 #endif
1812 uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1813
1814 return false;
1815 });
1816 set->release();
1817 }
1818 }
1819
1820 static kern_return_t
iokit_task_terminate_phase1(task_t task)1821 iokit_task_terminate_phase1(task_t task)
1822 {
1823 queue_head_t * taskque;
1824 IOUserClientOwner * iter;
1825 OSSet * userServers = NULL;
1826
1827 if (!task_is_driver(task)) {
1828 return KERN_SUCCESS;
1829 }
1830 userServers = OSSet::withCapacity(1);
1831
1832 IOLockLock(gIOUserClientOwnersLock);
1833
1834 taskque = task_io_user_clients(task);
1835 queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1836 userServers->setObject(iter->uc);
1837 }
1838 IOLockUnlock(gIOUserClientOwnersLock);
1839
1840 if (userServers) {
1841 IOUserServer * userServer;
1842 while ((userServer = OSRequiredCast(IOUserServer, userServers->getAnyObject()))) {
1843 userServer->clientDied();
1844 userServers->removeObject(userServer);
1845 }
1846 userServers->release();
1847 }
1848 return KERN_SUCCESS;
1849 }
1850
1851 static kern_return_t
iokit_task_terminate_phase2(task_t task)1852 iokit_task_terminate_phase2(task_t task)
1853 {
1854 queue_head_t * taskque;
1855 IOUserClientOwner * owner;
1856 IOUserClient * dead;
1857 IOUserClient * uc;
1858
1859 IOLockLock(gIOUserClientOwnersLock);
1860 taskque = task_io_user_clients(task);
1861 dead = NULL;
1862 while (!queue_empty(taskque)) {
1863 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1864 uc = owner->uc;
1865 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1866 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1867 if (queue_empty(&uc->owners)) {
1868 uc->retain();
1869 IOLog("destroying out of band connect for %s\n", uc->getName());
1870 // now using the uc queue head as a singly linked queue,
1871 // leaving .next as NULL to mark it empty
1872 uc->owners.next = NULL;
1873 uc->owners.prev = (queue_entry_t) dead;
1874 dead = uc;
1875 }
1876 IOFreeType(owner, IOUserClientOwner);
1877 }
1878 IOLockUnlock(gIOUserClientOwnersLock);
1879
1880 while (dead) {
1881 uc = dead;
1882 dead = (IOUserClient *)(void *) dead->owners.prev;
1883 uc->owners.prev = NULL;
1884 if (uc->sharedInstance || !uc->closed) {
1885 uc->clientDied();
1886 }
1887 uc->release();
1888 }
1889
1890 return KERN_SUCCESS;
1891 }
1892
1893 extern "C" kern_return_t
iokit_task_terminate(task_t task,int phase)1894 iokit_task_terminate(task_t task, int phase)
1895 {
1896 switch (phase) {
1897 case 1:
1898 return iokit_task_terminate_phase1(task);
1899 case 2:
1900 return iokit_task_terminate_phase2(task);
1901 default:
1902 panic("iokit_task_terminate phase %d", phase);
1903 }
1904 }
1905
1906 struct IOUCFilterPolicy {
1907 task_t task;
1908 io_filter_policy_t filterPolicy;
1909 IOUCFilterPolicy * next;
1910 };
1911
1912 io_filter_policy_t
filterForTask(task_t task,io_filter_policy_t addFilterPolicy)1913 IOUserClient::filterForTask(task_t task, io_filter_policy_t addFilterPolicy)
1914 {
1915 IOUCFilterPolicy * elem;
1916 io_filter_policy_t filterPolicy;
1917
1918 filterPolicy = 0;
1919 IOLockLock(&filterLock);
1920
1921 for (elem = reserved->filterPolicies; elem && (elem->task != task); elem = elem->next) {
1922 }
1923
1924 if (elem) {
1925 if (addFilterPolicy) {
1926 assert(addFilterPolicy == elem->filterPolicy);
1927 }
1928 filterPolicy = elem->filterPolicy;
1929 } else if (addFilterPolicy) {
1930 elem = IOMallocType(IOUCFilterPolicy);
1931 elem->task = task;
1932 elem->filterPolicy = addFilterPolicy;
1933 elem->next = reserved->filterPolicies;
1934 reserved->filterPolicies = elem;
1935 filterPolicy = addFilterPolicy;
1936 }
1937
1938 IOLockUnlock(&filterLock);
1939 return filterPolicy;
1940 }
1941
1942 void
free()1943 IOUserClient::free()
1944 {
1945 if (mappings) {
1946 mappings->release();
1947 }
1948
1949 IOStatisticsUnregisterCounter();
1950
1951 assert(!owners.next);
1952 assert(!owners.prev);
1953
1954 if (reserved) {
1955 IOUCFilterPolicy * elem;
1956 IOUCFilterPolicy * nextElem;
1957 for (elem = reserved->filterPolicies; elem; elem = nextElem) {
1958 nextElem = elem->next;
1959 if (elem->filterPolicy && gIOUCFilterCallbacks->io_filter_release) {
1960 gIOUCFilterCallbacks->io_filter_release(elem->filterPolicy);
1961 }
1962 IOFreeType(elem, IOUCFilterPolicy);
1963 }
1964 IOFreeType(reserved, ExpansionData);
1965 IORWLockInlineDestroy(&lock);
1966 IOLockInlineDestroy(&filterLock);
1967 }
1968
1969 super::free();
1970 }
1971
1972 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1973
OSDefineMetaClassAndAbstractStructors(IOUserClient2022,IOUserClient)1974 OSDefineMetaClassAndAbstractStructors( IOUserClient2022, IOUserClient )
1975
1976
1977 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1978
1979 IOReturn
1980 IOUserClient::clientDied( void )
1981 {
1982 IOReturn ret = kIOReturnNotReady;
1983
1984 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1985 ret = clientClose();
1986 }
1987
1988 return ret;
1989 }
1990
1991 IOReturn
clientClose(void)1992 IOUserClient::clientClose( void )
1993 {
1994 return kIOReturnUnsupported;
1995 }
1996
1997 IOService *
getService(void)1998 IOUserClient::getService( void )
1999 {
2000 return NULL;
2001 }
2002
2003 IOReturn
registerNotificationPort(mach_port_t,UInt32,UInt32)2004 IOUserClient::registerNotificationPort(
2005 mach_port_t /* port */,
2006 UInt32 /* type */,
2007 UInt32 /* refCon */)
2008 {
2009 return kIOReturnUnsupported;
2010 }
2011
2012 IOReturn
registerNotificationPort(mach_port_t port,UInt32 type,io_user_reference_t refCon)2013 IOUserClient::registerNotificationPort(
2014 mach_port_t port,
2015 UInt32 type,
2016 io_user_reference_t refCon)
2017 {
2018 return registerNotificationPort(port, type, (UInt32) refCon);
2019 }
2020
2021 IOReturn
getNotificationSemaphore(UInt32 notification_type,semaphore_t * semaphore)2022 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
2023 semaphore_t * semaphore )
2024 {
2025 return kIOReturnUnsupported;
2026 }
2027
2028 IOReturn
connectClient(IOUserClient *)2029 IOUserClient::connectClient( IOUserClient * /* client */ )
2030 {
2031 return kIOReturnUnsupported;
2032 }
2033
2034 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,IOMemoryDescriptor ** memory)2035 IOUserClient::clientMemoryForType( UInt32 type,
2036 IOOptionBits * options,
2037 IOMemoryDescriptor ** memory )
2038 {
2039 return kIOReturnUnsupported;
2040 }
2041
2042 IOReturn
clientMemoryForType(UInt32 type,IOOptionBits * options,OSSharedPtr<IOMemoryDescriptor> & memory)2043 IOUserClient::clientMemoryForType( UInt32 type,
2044 IOOptionBits * options,
2045 OSSharedPtr<IOMemoryDescriptor>& memory )
2046 {
2047 IOMemoryDescriptor* memoryRaw = nullptr;
2048 IOReturn result = clientMemoryForType(type, options, &memoryRaw);
2049 memory.reset(memoryRaw, OSNoRetain);
2050 return result;
2051 }
2052
2053 #if !__LP64__
2054 IOMemoryMap *
mapClientMemory(IOOptionBits type,task_t task,IOOptionBits mapFlags,IOVirtualAddress atAddress)2055 IOUserClient::mapClientMemory(
2056 IOOptionBits type,
2057 task_t task,
2058 IOOptionBits mapFlags,
2059 IOVirtualAddress atAddress )
2060 {
2061 return NULL;
2062 }
2063 #endif
2064
2065 IOMemoryMap *
mapClientMemory64(IOOptionBits type,task_t task,IOOptionBits mapFlags,mach_vm_address_t atAddress)2066 IOUserClient::mapClientMemory64(
2067 IOOptionBits type,
2068 task_t task,
2069 IOOptionBits mapFlags,
2070 mach_vm_address_t atAddress )
2071 {
2072 IOReturn err;
2073 IOOptionBits options = 0;
2074 IOMemoryDescriptor * memory = NULL;
2075 IOMemoryMap * map = NULL;
2076
2077 err = clientMemoryForType((UInt32) type, &options, &memory );
2078
2079 if ((kIOReturnSuccess == err) && memory && !memory->hasSharingContext()) {
2080 FAKE_STACK_FRAME(getMetaClass());
2081
2082 options = (options & ~kIOMapUserOptionsMask)
2083 | (mapFlags & kIOMapUserOptionsMask);
2084 map = memory->createMappingInTask( task, atAddress, options );
2085 memory->release();
2086
2087 FAKE_STACK_FRAME_END();
2088 }
2089
2090 return map;
2091 }
2092
2093 IOReturn
exportObjectToClient(task_t task,OSObject * obj,io_object_t * clientObj)2094 IOUserClient::exportObjectToClient(task_t task,
2095 OSObject *obj, io_object_t *clientObj)
2096 {
2097 mach_port_name_t name;
2098
2099 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
2100
2101 *clientObj = (io_object_t)(uintptr_t) name;
2102
2103 if (obj) {
2104 obj->release();
2105 }
2106
2107 return kIOReturnSuccess;
2108 }
2109
2110 IOReturn
copyPortNameForObjectInTask(task_t task,OSObject * obj,mach_port_name_t * port_name)2111 IOUserClient::copyPortNameForObjectInTask(task_t task,
2112 OSObject *obj, mach_port_name_t * port_name)
2113 {
2114 mach_port_name_t name;
2115
2116 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
2117
2118 *(mach_port_name_t *) port_name = name;
2119
2120 return kIOReturnSuccess;
2121 }
2122
2123 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSObject ** obj)2124 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2125 OSObject **obj)
2126 {
2127 OSObject * object;
2128
2129 object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
2130
2131 *obj = object;
2132
2133 return object ? kIOReturnSuccess : kIOReturnIPCError;
2134 }
2135
2136 IOReturn
copyObjectForPortNameInTask(task_t task,mach_port_name_t port_name,OSSharedPtr<OSObject> & obj)2137 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2138 OSSharedPtr<OSObject>& obj)
2139 {
2140 OSObject* objRaw = NULL;
2141 IOReturn result = copyObjectForPortNameInTask(task, port_name, &objRaw);
2142 obj.reset(objRaw, OSNoRetain);
2143 return result;
2144 }
2145
2146 IOReturn
adjustPortNameReferencesInTask(task_t task,mach_port_name_t port_name,mach_port_delta_t delta)2147 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
2148 {
2149 return iokit_mod_send_right(task, port_name, delta);
2150 }
2151
2152 IOExternalMethod *
getExternalMethodForIndex(UInt32)2153 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
2154 {
2155 return NULL;
2156 }
2157
2158 IOExternalAsyncMethod *
getExternalAsyncMethodForIndex(UInt32)2159 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
2160 {
2161 return NULL;
2162 }
2163
2164 IOExternalTrap *
2165 IOUserClient::
getExternalTrapForIndex(UInt32 index)2166 getExternalTrapForIndex(UInt32 index)
2167 {
2168 return NULL;
2169 }
2170
2171 #pragma clang diagnostic push
2172 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2173
2174 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
2175 // functions can break clients of kexts implementing getExternalMethodForIndex()
2176 IOExternalMethod *
2177 IOUserClient::
getTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2178 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
2179 {
2180 IOExternalMethod *method = getExternalMethodForIndex(index);
2181
2182 if (method) {
2183 *targetP = (IOService *) method->object;
2184 }
2185
2186 return method;
2187 }
2188
2189 IOExternalMethod *
2190 IOUserClient::
getTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2191 getTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2192 {
2193 IOService* targetPRaw = NULL;
2194 IOExternalMethod* result = getTargetAndMethodForIndex(&targetPRaw, index);
2195 targetP.reset(targetPRaw, OSRetain);
2196 return result;
2197 }
2198
2199 IOExternalAsyncMethod *
2200 IOUserClient::
getAsyncTargetAndMethodForIndex(IOService ** targetP,UInt32 index)2201 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
2202 {
2203 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
2204
2205 if (method) {
2206 *targetP = (IOService *) method->object;
2207 }
2208
2209 return method;
2210 }
2211
2212 IOExternalAsyncMethod *
2213 IOUserClient::
getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService> & targetP,UInt32 index)2214 getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2215 {
2216 IOService* targetPRaw = NULL;
2217 IOExternalAsyncMethod* result = getAsyncTargetAndMethodForIndex(&targetPRaw, index);
2218 targetP.reset(targetPRaw, OSRetain);
2219 return result;
2220 }
2221
2222 IOExternalTrap *
2223 IOUserClient::
getTargetAndTrapForIndex(IOService ** targetP,UInt32 index)2224 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
2225 {
2226 IOExternalTrap *trap = getExternalTrapForIndex(index);
2227
2228 if (trap) {
2229 *targetP = trap->object;
2230 }
2231
2232 return trap;
2233 }
2234 #pragma clang diagnostic pop
2235
2236 IOReturn
releaseAsyncReference64(OSAsyncReference64 reference)2237 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
2238 {
2239 mach_port_t port;
2240 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2241
2242 if (MACH_PORT_NULL != port) {
2243 iokit_release_port_send(port);
2244 }
2245
2246 return kIOReturnSuccess;
2247 }
2248
2249 IOReturn
releaseNotificationPort(mach_port_t port)2250 IOUserClient::releaseNotificationPort(mach_port_t port)
2251 {
2252 if (MACH_PORT_NULL != port) {
2253 iokit_release_port_send(port);
2254 }
2255
2256 return kIOReturnSuccess;
2257 }
2258
2259 IOReturn
sendAsyncResult(OSAsyncReference reference,IOReturn result,void * args[],UInt32 numArgs)2260 IOUserClient::sendAsyncResult(OSAsyncReference reference,
2261 IOReturn result, void *args[], UInt32 numArgs)
2262 {
2263 OSAsyncReference64 reference64;
2264 OSBoundedArray<io_user_reference_t, kMaxAsyncArgs> args64;
2265 unsigned int idx;
2266
2267 if (numArgs > kMaxAsyncArgs) {
2268 return kIOReturnMessageTooLarge;
2269 }
2270
2271 for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2272 reference64[idx] = REF64(reference[idx]);
2273 }
2274
2275 for (idx = 0; idx < numArgs; idx++) {
2276 args64[idx] = REF64(args[idx]);
2277 }
2278
2279 return sendAsyncResult64(reference64, result, args64.data(), numArgs);
2280 }
2281
2282 IOReturn
sendAsyncResult64WithOptions(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2283 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2284 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2285 {
2286 return _sendAsyncResult64(reference, result, args, numArgs, options);
2287 }
2288
2289 IOReturn
sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs)2290 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2291 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2292 {
2293 return _sendAsyncResult64(reference, result, args, numArgs, 0);
2294 }
2295
2296 IOReturn
_sendAsyncResult64(OSAsyncReference64 reference,IOReturn result,io_user_reference_t args[],UInt32 numArgs,IOOptionBits options)2297 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2298 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2299 {
2300 struct ReplyMsg {
2301 mach_msg_header_t msgHdr;
2302 union{
2303 struct{
2304 OSNotificationHeader notifyHdr;
2305 IOAsyncCompletionContent asyncContent;
2306 uint32_t args[kMaxAsyncArgs];
2307 } msg32;
2308 struct{
2309 OSNotificationHeader64 notifyHdr;
2310 IOAsyncCompletionContent asyncContent;
2311 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
2312 } msg64;
2313 } m;
2314 };
2315 ReplyMsg replyMsg;
2316 mach_port_t replyPort;
2317 kern_return_t kr;
2318
2319 // If no reply port, do nothing.
2320 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2321 if (replyPort == MACH_PORT_NULL) {
2322 return kIOReturnSuccess;
2323 }
2324
2325 if (numArgs > kMaxAsyncArgs) {
2326 return kIOReturnMessageTooLarge;
2327 }
2328
2329 bzero(&replyMsg, sizeof(replyMsg));
2330 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2331 0 /*local*/);
2332 replyMsg.msgHdr.msgh_remote_port = replyPort;
2333 replyMsg.msgHdr.msgh_local_port = NULL;
2334 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
2335 if (kIOUCAsync64Flag & reference[0]) {
2336 replyMsg.msgHdr.msgh_size =
2337 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2338 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2339 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2340 + numArgs * sizeof(io_user_reference_t);
2341 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2342 /* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2343 bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2344
2345 replyMsg.m.msg64.asyncContent.result = result;
2346 if (numArgs) {
2347 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2348 }
2349 } else {
2350 unsigned int idx;
2351
2352 replyMsg.msgHdr.msgh_size =
2353 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2354 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2355
2356 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2357 + numArgs * sizeof(uint32_t);
2358 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2359
2360 /* Skip reference[0] which is left as 0 from the earlier bzero */
2361 for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2362 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2363 }
2364
2365 replyMsg.m.msg32.asyncContent.result = result;
2366
2367 for (idx = 0; idx < numArgs; idx++) {
2368 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2369 }
2370 }
2371
2372 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2373 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2374 replyMsg.msgHdr.msgh_size, MACH64_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2375 } else {
2376 /* Fail on full queue. */
2377 kr = mach_msg_send_from_kernel(&replyMsg.msgHdr,
2378 replyMsg.msgHdr.msgh_size);
2379 }
2380 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2381 reference[0] |= kIOUCAsyncErrorLoggedFlag;
2382 IOLog("%s: mach_msg_send_from_kernel(0x%x)\n", __PRETTY_FUNCTION__, kr );
2383 }
2384 return kr;
2385 }
2386
2387
2388 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2389
2390 extern "C" {
2391 #define CHECK(cls, obj, out) \
2392 cls * out; \
2393 if( !(out = OSDynamicCast( cls, obj))) \
2394 return( kIOReturnBadArgument )
2395
2396 #define CHECKLOCKED(cls, obj, out) \
2397 IOUserIterator * oIter; \
2398 cls * out; \
2399 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
2400 return (kIOReturnBadArgument); \
2401 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
2402 return (kIOReturnBadArgument)
2403
2404 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2405
2406 // Create a vm_map_copy_t or kalloc'ed data for memory
2407 // to be copied out. ipc will free after the copyout.
2408
2409 static kern_return_t
copyoutkdata(const void * data,vm_size_t len,io_buf_ptr_t * buf)2410 copyoutkdata( const void * data, vm_size_t len,
2411 io_buf_ptr_t * buf )
2412 {
2413 kern_return_t err;
2414 vm_map_copy_t copy;
2415
2416 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2417 false /* src_destroy */, ©);
2418
2419 assert( err == KERN_SUCCESS );
2420 if (err == KERN_SUCCESS) {
2421 *buf = (char *) copy;
2422 }
2423
2424 return err;
2425 }
2426
2427 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2428
2429 /* Routine io_server_version */
2430 kern_return_t
is_io_server_version(mach_port_t main_port,uint64_t * version)2431 is_io_server_version(
2432 mach_port_t main_port,
2433 uint64_t *version)
2434 {
2435 *version = IOKIT_SERVER_VERSION;
2436 return kIOReturnSuccess;
2437 }
2438
2439 /* Routine io_object_get_class */
2440 kern_return_t
is_io_object_get_class(io_object_t object,io_name_t className)2441 is_io_object_get_class(
2442 io_object_t object,
2443 io_name_t className )
2444 {
2445 const OSMetaClass* my_obj = NULL;
2446
2447 if (!object) {
2448 return kIOReturnBadArgument;
2449 }
2450
2451 my_obj = object->getMetaClass();
2452 if (!my_obj) {
2453 return kIOReturnNotFound;
2454 }
2455
2456 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2457
2458 return kIOReturnSuccess;
2459 }
2460
2461 /* Routine io_object_get_superclass */
2462 kern_return_t
is_io_object_get_superclass(mach_port_t main_port,io_name_t obj_name,io_name_t class_name)2463 is_io_object_get_superclass(
2464 mach_port_t main_port,
2465 io_name_t obj_name,
2466 io_name_t class_name)
2467 {
2468 IOReturn ret;
2469 const OSMetaClass * meta;
2470 const OSMetaClass * super;
2471 const OSSymbol * name;
2472 const char * cstr;
2473
2474 if (!obj_name || !class_name) {
2475 return kIOReturnBadArgument;
2476 }
2477 if (main_port != main_device_port) {
2478 return kIOReturnNotPrivileged;
2479 }
2480
2481 ret = kIOReturnNotFound;
2482 meta = NULL;
2483 do{
2484 name = OSSymbol::withCString(obj_name);
2485 if (!name) {
2486 break;
2487 }
2488 meta = OSMetaClass::copyMetaClassWithName(name);
2489 if (!meta) {
2490 break;
2491 }
2492 super = meta->getSuperClass();
2493 if (!super) {
2494 break;
2495 }
2496 cstr = super->getClassName();
2497 if (!cstr) {
2498 break;
2499 }
2500 strlcpy(class_name, cstr, sizeof(io_name_t));
2501 ret = kIOReturnSuccess;
2502 }while (false);
2503
2504 OSSafeReleaseNULL(name);
2505 if (meta) {
2506 meta->releaseMetaClass();
2507 }
2508
2509 return ret;
2510 }
2511
2512 /* Routine io_object_get_bundle_identifier */
2513 kern_return_t
is_io_object_get_bundle_identifier(mach_port_t main_port,io_name_t obj_name,io_name_t bundle_name)2514 is_io_object_get_bundle_identifier(
2515 mach_port_t main_port,
2516 io_name_t obj_name,
2517 io_name_t bundle_name)
2518 {
2519 IOReturn ret;
2520 const OSMetaClass * meta;
2521 const OSSymbol * name;
2522 const OSSymbol * identifier;
2523 const char * cstr;
2524
2525 if (!obj_name || !bundle_name) {
2526 return kIOReturnBadArgument;
2527 }
2528 if (main_port != main_device_port) {
2529 return kIOReturnNotPrivileged;
2530 }
2531
2532 ret = kIOReturnNotFound;
2533 meta = NULL;
2534 do{
2535 name = OSSymbol::withCString(obj_name);
2536 if (!name) {
2537 break;
2538 }
2539 meta = OSMetaClass::copyMetaClassWithName(name);
2540 if (!meta) {
2541 break;
2542 }
2543 identifier = meta->getKmodName();
2544 if (!identifier) {
2545 break;
2546 }
2547 cstr = identifier->getCStringNoCopy();
2548 if (!cstr) {
2549 break;
2550 }
2551 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2552 ret = kIOReturnSuccess;
2553 }while (false);
2554
2555 OSSafeReleaseNULL(name);
2556 if (meta) {
2557 meta->releaseMetaClass();
2558 }
2559
2560 return ret;
2561 }
2562
2563 /* Routine io_object_conforms_to */
2564 kern_return_t
is_io_object_conforms_to(io_object_t object,io_name_t className,boolean_t * conforms)2565 is_io_object_conforms_to(
2566 io_object_t object,
2567 io_name_t className,
2568 boolean_t *conforms )
2569 {
2570 if (!object) {
2571 return kIOReturnBadArgument;
2572 }
2573
2574 *conforms = (NULL != object->metaCast( className ));
2575
2576 return kIOReturnSuccess;
2577 }
2578
2579 /* Routine io_object_get_retain_count */
2580 kern_return_t
is_io_object_get_retain_count(io_object_t object,uint32_t * retainCount)2581 is_io_object_get_retain_count(
2582 io_object_t object,
2583 uint32_t *retainCount )
2584 {
2585 if (!object) {
2586 return kIOReturnBadArgument;
2587 }
2588
2589 *retainCount = object->getRetainCount();
2590 return kIOReturnSuccess;
2591 }
2592
2593 /* Routine io_iterator_next */
2594 kern_return_t
is_io_iterator_next(io_object_t iterator,io_object_t * object)2595 is_io_iterator_next(
2596 io_object_t iterator,
2597 io_object_t *object )
2598 {
2599 IOReturn ret;
2600 OSObject * obj;
2601 OSIterator * iter;
2602 IOUserIterator * uiter;
2603
2604 if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2605 obj = uiter->copyNextObject();
2606 } else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2607 obj = iter->getNextObject();
2608 if (obj) {
2609 obj->retain();
2610 }
2611 } else {
2612 return kIOReturnBadArgument;
2613 }
2614
2615 if (obj) {
2616 *object = obj;
2617 ret = kIOReturnSuccess;
2618 } else {
2619 ret = kIOReturnNoDevice;
2620 }
2621
2622 return ret;
2623 }
2624
2625 /* Routine io_iterator_reset */
2626 kern_return_t
is_io_iterator_reset(io_object_t iterator)2627 is_io_iterator_reset(
2628 io_object_t iterator )
2629 {
2630 CHECK( OSIterator, iterator, iter );
2631
2632 iter->reset();
2633
2634 return kIOReturnSuccess;
2635 }
2636
2637 /* Routine io_iterator_is_valid */
2638 kern_return_t
is_io_iterator_is_valid(io_object_t iterator,boolean_t * is_valid)2639 is_io_iterator_is_valid(
2640 io_object_t iterator,
2641 boolean_t *is_valid )
2642 {
2643 CHECK( OSIterator, iterator, iter );
2644
2645 *is_valid = iter->isValid();
2646
2647 return kIOReturnSuccess;
2648 }
2649
2650 static kern_return_t
internal_io_service_match_property_table(io_service_t _service,const char * matching,mach_msg_type_number_t matching_size,boolean_t * matches)2651 internal_io_service_match_property_table(
2652 io_service_t _service,
2653 const char * matching,
2654 mach_msg_type_number_t matching_size,
2655 boolean_t *matches)
2656 {
2657 CHECK( IOService, _service, service );
2658
2659 kern_return_t kr;
2660 OSObject * obj;
2661 OSDictionary * dict;
2662
2663 assert(matching_size);
2664
2665
2666 obj = OSUnserializeXML(matching, matching_size);
2667
2668 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2669 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2670 *matches = service->passiveMatch( dict );
2671 kr = kIOReturnSuccess;
2672 } else {
2673 kr = kIOReturnBadArgument;
2674 }
2675
2676 if (obj) {
2677 obj->release();
2678 }
2679
2680 return kr;
2681 }
2682
2683 /* Routine io_service_match_property_table */
2684 kern_return_t
is_io_service_match_property_table(io_service_t service,io_string_t matching,boolean_t * matches)2685 is_io_service_match_property_table(
2686 io_service_t service,
2687 io_string_t matching,
2688 boolean_t *matches )
2689 {
2690 return kIOReturnUnsupported;
2691 }
2692
2693
2694 /* Routine io_service_match_property_table_ool */
2695 kern_return_t
is_io_service_match_property_table_ool(io_object_t service,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,boolean_t * matches)2696 is_io_service_match_property_table_ool(
2697 io_object_t service,
2698 io_buf_ptr_t matching,
2699 mach_msg_type_number_t matchingCnt,
2700 kern_return_t *result,
2701 boolean_t *matches )
2702 {
2703 kern_return_t kr;
2704 vm_offset_t data;
2705 vm_map_offset_t map_data;
2706
2707 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2708 data = CAST_DOWN(vm_offset_t, map_data);
2709
2710 if (KERN_SUCCESS == kr) {
2711 // must return success after vm_map_copyout() succeeds
2712 *result = internal_io_service_match_property_table(service,
2713 (const char *)data, matchingCnt, matches );
2714 vm_deallocate( kernel_map, data, matchingCnt );
2715 }
2716
2717 return kr;
2718 }
2719
2720 /* Routine io_service_match_property_table_bin */
2721 kern_return_t
is_io_service_match_property_table_bin(io_object_t service,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,boolean_t * matches)2722 is_io_service_match_property_table_bin(
2723 io_object_t service,
2724 io_struct_inband_t matching,
2725 mach_msg_type_number_t matchingCnt,
2726 boolean_t *matches)
2727 {
2728 return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2729 }
2730
2731 static kern_return_t
internal_io_service_get_matching_services(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_iterator_t * existing)2732 internal_io_service_get_matching_services(
2733 mach_port_t main_port,
2734 const char * matching,
2735 mach_msg_type_number_t matching_size,
2736 io_iterator_t *existing )
2737 {
2738 kern_return_t kr;
2739 OSObject * obj;
2740 OSDictionary * dict;
2741
2742 if (main_port != main_device_port) {
2743 return kIOReturnNotPrivileged;
2744 }
2745
2746 assert(matching_size);
2747 obj = OSUnserializeXML(matching, matching_size);
2748
2749 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2750 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2751 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2752 kr = kIOReturnSuccess;
2753 } else {
2754 kr = kIOReturnBadArgument;
2755 }
2756
2757 if (obj) {
2758 obj->release();
2759 }
2760
2761 return kr;
2762 }
2763
2764 /* Routine io_service_get_matching_services */
2765 kern_return_t
is_io_service_get_matching_services(mach_port_t main_port,io_string_t matching,io_iterator_t * existing)2766 is_io_service_get_matching_services(
2767 mach_port_t main_port,
2768 io_string_t matching,
2769 io_iterator_t *existing )
2770 {
2771 return kIOReturnUnsupported;
2772 }
2773
2774 /* Routine io_service_get_matching_services_ool */
2775 kern_return_t
is_io_service_get_matching_services_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * existing)2776 is_io_service_get_matching_services_ool(
2777 mach_port_t main_port,
2778 io_buf_ptr_t matching,
2779 mach_msg_type_number_t matchingCnt,
2780 kern_return_t *result,
2781 io_object_t *existing )
2782 {
2783 kern_return_t kr;
2784 vm_offset_t data;
2785 vm_map_offset_t map_data;
2786
2787 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2788 data = CAST_DOWN(vm_offset_t, map_data);
2789
2790 if (KERN_SUCCESS == kr) {
2791 // must return success after vm_map_copyout() succeeds
2792 // and mig will copy out objects on success
2793 *existing = NULL;
2794 *result = internal_io_service_get_matching_services(main_port,
2795 (const char *) data, matchingCnt, existing);
2796 vm_deallocate( kernel_map, data, matchingCnt );
2797 }
2798
2799 return kr;
2800 }
2801
2802 /* Routine io_service_get_matching_services_bin */
2803 kern_return_t
is_io_service_get_matching_services_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * existing)2804 is_io_service_get_matching_services_bin(
2805 mach_port_t main_port,
2806 io_struct_inband_t matching,
2807 mach_msg_type_number_t matchingCnt,
2808 io_object_t *existing)
2809 {
2810 return internal_io_service_get_matching_services(main_port, matching, matchingCnt, existing);
2811 }
2812
2813
2814 static kern_return_t
internal_io_service_get_matching_service(mach_port_t main_port,const char * matching,mach_msg_type_number_t matching_size,io_service_t * service)2815 internal_io_service_get_matching_service(
2816 mach_port_t main_port,
2817 const char * matching,
2818 mach_msg_type_number_t matching_size,
2819 io_service_t *service )
2820 {
2821 kern_return_t kr;
2822 OSObject * obj;
2823 OSDictionary * dict;
2824
2825 if (main_port != main_device_port) {
2826 return kIOReturnNotPrivileged;
2827 }
2828
2829 assert(matching_size);
2830 obj = OSUnserializeXML(matching, matching_size);
2831
2832 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2833 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2834 *service = IOService::copyMatchingService( dict );
2835 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2836 } else {
2837 kr = kIOReturnBadArgument;
2838 }
2839
2840 if (obj) {
2841 obj->release();
2842 }
2843
2844 return kr;
2845 }
2846
2847 /* Routine io_service_get_matching_service */
2848 kern_return_t
is_io_service_get_matching_service(mach_port_t main_port,io_string_t matching,io_service_t * service)2849 is_io_service_get_matching_service(
2850 mach_port_t main_port,
2851 io_string_t matching,
2852 io_service_t *service )
2853 {
2854 return kIOReturnUnsupported;
2855 }
2856
2857 /* Routine io_service_get_matching_services_ool */
2858 kern_return_t
is_io_service_get_matching_service_ool(mach_port_t main_port,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,kern_return_t * result,io_object_t * service)2859 is_io_service_get_matching_service_ool(
2860 mach_port_t main_port,
2861 io_buf_ptr_t matching,
2862 mach_msg_type_number_t matchingCnt,
2863 kern_return_t *result,
2864 io_object_t *service )
2865 {
2866 kern_return_t kr;
2867 vm_offset_t data;
2868 vm_map_offset_t map_data;
2869
2870 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2871 data = CAST_DOWN(vm_offset_t, map_data);
2872
2873 if (KERN_SUCCESS == kr) {
2874 // must return success after vm_map_copyout() succeeds
2875 // and mig will copy out objects on success
2876 *service = NULL;
2877 *result = internal_io_service_get_matching_service(main_port,
2878 (const char *) data, matchingCnt, service );
2879 vm_deallocate( kernel_map, data, matchingCnt );
2880 }
2881
2882 return kr;
2883 }
2884
2885 /* Routine io_service_get_matching_service_bin */
2886 kern_return_t
is_io_service_get_matching_service_bin(mach_port_t main_port,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,io_object_t * service)2887 is_io_service_get_matching_service_bin(
2888 mach_port_t main_port,
2889 io_struct_inband_t matching,
2890 mach_msg_type_number_t matchingCnt,
2891 io_object_t *service)
2892 {
2893 return internal_io_service_get_matching_service(main_port, matching, matchingCnt, service);
2894 }
2895
2896 static kern_return_t
internal_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,const char * matching,size_t matching_size,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)2897 internal_io_service_add_notification(
2898 mach_port_t main_port,
2899 io_name_t notification_type,
2900 const char * matching,
2901 size_t matching_size,
2902 mach_port_t port,
2903 void * reference,
2904 vm_size_t referenceSize,
2905 bool client64,
2906 io_object_t * notification )
2907 {
2908 IOServiceUserNotification * userNotify = NULL;
2909 IONotifier * notify = NULL;
2910 const OSSymbol * sym;
2911 OSObject * obj;
2912 OSDictionary * dict;
2913 IOReturn err;
2914 natural_t userMsgType;
2915
2916 if (main_port != main_device_port) {
2917 return kIOReturnNotPrivileged;
2918 }
2919
2920 do {
2921 err = kIOReturnNoResources;
2922
2923 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2924 return kIOReturnMessageTooLarge;
2925 }
2926
2927 if (!(sym = OSSymbol::withCString( notification_type ))) {
2928 err = kIOReturnNoResources;
2929 }
2930
2931 assert(matching_size);
2932 obj = OSUnserializeXML(matching, matching_size);
2933 dict = OSDynamicCast(OSDictionary, obj);
2934 if (!dict) {
2935 err = kIOReturnBadArgument;
2936 continue;
2937 }
2938 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2939
2940 if ((sym == gIOPublishNotification)
2941 || (sym == gIOFirstPublishNotification)) {
2942 userMsgType = kIOServicePublishNotificationType;
2943 } else if ((sym == gIOMatchedNotification)
2944 || (sym == gIOFirstMatchNotification)) {
2945 userMsgType = kIOServiceMatchedNotificationType;
2946 } else if ((sym == gIOTerminatedNotification)
2947 || (sym == gIOWillTerminateNotification)) {
2948 userMsgType = kIOServiceTerminatedNotificationType;
2949 } else {
2950 userMsgType = kLastIOKitNotificationType;
2951 }
2952
2953 userNotify = new IOServiceUserNotification;
2954
2955 if (userNotify && !userNotify->init( port, userMsgType,
2956 reference, referenceSize, client64)) {
2957 userNotify->release();
2958 userNotify = NULL;
2959 }
2960 if (!userNotify) {
2961 continue;
2962 }
2963
2964 notify = IOService::addMatchingNotification( sym, dict,
2965 &userNotify->_handler, userNotify );
2966 if (notify) {
2967 *notification = userNotify;
2968 userNotify->setNotification( notify );
2969 err = kIOReturnSuccess;
2970 } else {
2971 err = kIOReturnUnsupported;
2972 }
2973 } while (false);
2974
2975 if ((kIOReturnSuccess != err) && userNotify) {
2976 userNotify->setNotification(NULL);
2977 userNotify->invalidatePort();
2978 userNotify->release();
2979 userNotify = NULL;
2980 }
2981
2982 if (sym) {
2983 sym->release();
2984 }
2985 if (obj) {
2986 obj->release();
2987 }
2988
2989 return err;
2990 }
2991
2992
2993 /* Routine io_service_add_notification */
2994 kern_return_t
is_io_service_add_notification(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)2995 is_io_service_add_notification(
2996 mach_port_t main_port,
2997 io_name_t notification_type,
2998 io_string_t matching,
2999 mach_port_t port,
3000 io_async_ref_t reference,
3001 mach_msg_type_number_t referenceCnt,
3002 io_object_t * notification )
3003 {
3004 return kIOReturnUnsupported;
3005 }
3006
3007 /* Routine io_service_add_notification_64 */
3008 kern_return_t
is_io_service_add_notification_64(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3009 is_io_service_add_notification_64(
3010 mach_port_t main_port,
3011 io_name_t notification_type,
3012 io_string_t matching,
3013 mach_port_t wake_port,
3014 io_async_ref64_t reference,
3015 mach_msg_type_number_t referenceCnt,
3016 io_object_t *notification )
3017 {
3018 return kIOReturnUnsupported;
3019 }
3020
3021 /* Routine io_service_add_notification_bin */
3022 kern_return_t
is_io_service_add_notification_bin(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3023 is_io_service_add_notification_bin
3024 (
3025 mach_port_t main_port,
3026 io_name_t notification_type,
3027 io_struct_inband_t matching,
3028 mach_msg_type_number_t matchingCnt,
3029 mach_port_t wake_port,
3030 io_async_ref_t reference,
3031 mach_msg_type_number_t referenceCnt,
3032 io_object_t *notification)
3033 {
3034 io_async_ref_t zreference;
3035
3036 if (referenceCnt > ASYNC_REF_COUNT) {
3037 return kIOReturnBadArgument;
3038 }
3039 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3040 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3041
3042 return internal_io_service_add_notification(main_port, notification_type,
3043 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3044 false, notification);
3045 }
3046
3047 /* Routine io_service_add_notification_bin_64 */
3048 kern_return_t
is_io_service_add_notification_bin_64(mach_port_t main_port,io_name_t notification_type,io_struct_inband_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3049 is_io_service_add_notification_bin_64
3050 (
3051 mach_port_t main_port,
3052 io_name_t notification_type,
3053 io_struct_inband_t matching,
3054 mach_msg_type_number_t matchingCnt,
3055 mach_port_t wake_port,
3056 io_async_ref64_t reference,
3057 mach_msg_type_number_t referenceCnt,
3058 io_object_t *notification)
3059 {
3060 io_async_ref64_t zreference;
3061
3062 if (referenceCnt > ASYNC_REF64_COUNT) {
3063 return kIOReturnBadArgument;
3064 }
3065 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3066 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3067
3068 return internal_io_service_add_notification(main_port, notification_type,
3069 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3070 true, notification);
3071 }
3072
3073 static kern_return_t
internal_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,void * reference,vm_size_t referenceSize,bool client64,kern_return_t * result,io_object_t * notification)3074 internal_io_service_add_notification_ool(
3075 mach_port_t main_port,
3076 io_name_t notification_type,
3077 io_buf_ptr_t matching,
3078 mach_msg_type_number_t matchingCnt,
3079 mach_port_t wake_port,
3080 void * reference,
3081 vm_size_t referenceSize,
3082 bool client64,
3083 kern_return_t *result,
3084 io_object_t *notification )
3085 {
3086 kern_return_t kr;
3087 vm_offset_t data;
3088 vm_map_offset_t map_data;
3089
3090 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
3091 data = CAST_DOWN(vm_offset_t, map_data);
3092
3093 if (KERN_SUCCESS == kr) {
3094 // must return success after vm_map_copyout() succeeds
3095 // and mig will copy out objects on success
3096 *notification = NULL;
3097 *result = internal_io_service_add_notification( main_port, notification_type,
3098 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
3099 vm_deallocate( kernel_map, data, matchingCnt );
3100 }
3101
3102 return kr;
3103 }
3104
3105 /* Routine io_service_add_notification_ool */
3106 kern_return_t
is_io_service_add_notification_ool(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3107 is_io_service_add_notification_ool(
3108 mach_port_t main_port,
3109 io_name_t notification_type,
3110 io_buf_ptr_t matching,
3111 mach_msg_type_number_t matchingCnt,
3112 mach_port_t wake_port,
3113 io_async_ref_t reference,
3114 mach_msg_type_number_t referenceCnt,
3115 kern_return_t *result,
3116 io_object_t *notification )
3117 {
3118 io_async_ref_t zreference;
3119
3120 if (referenceCnt > ASYNC_REF_COUNT) {
3121 return kIOReturnBadArgument;
3122 }
3123 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3124 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3125
3126 return internal_io_service_add_notification_ool(main_port, notification_type,
3127 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3128 false, result, notification);
3129 }
3130
3131 /* Routine io_service_add_notification_ool_64 */
3132 kern_return_t
is_io_service_add_notification_ool_64(mach_port_t main_port,io_name_t notification_type,io_buf_ptr_t matching,mach_msg_type_number_t matchingCnt,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,kern_return_t * result,io_object_t * notification)3133 is_io_service_add_notification_ool_64(
3134 mach_port_t main_port,
3135 io_name_t notification_type,
3136 io_buf_ptr_t matching,
3137 mach_msg_type_number_t matchingCnt,
3138 mach_port_t wake_port,
3139 io_async_ref64_t reference,
3140 mach_msg_type_number_t referenceCnt,
3141 kern_return_t *result,
3142 io_object_t *notification )
3143 {
3144 io_async_ref64_t zreference;
3145
3146 if (referenceCnt > ASYNC_REF64_COUNT) {
3147 return kIOReturnBadArgument;
3148 }
3149 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3150 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3151
3152 return internal_io_service_add_notification_ool(main_port, notification_type,
3153 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3154 true, result, notification);
3155 }
3156
3157 /* Routine io_service_add_notification_old */
3158 kern_return_t
is_io_service_add_notification_old(mach_port_t main_port,io_name_t notification_type,io_string_t matching,mach_port_t port,natural_t ref,io_object_t * notification)3159 is_io_service_add_notification_old(
3160 mach_port_t main_port,
3161 io_name_t notification_type,
3162 io_string_t matching,
3163 mach_port_t port,
3164 // for binary compatibility reasons, this must be natural_t for ILP32
3165 natural_t ref,
3166 io_object_t * notification )
3167 {
3168 return is_io_service_add_notification( main_port, notification_type,
3169 matching, port, &ref, 1, notification );
3170 }
3171
3172 static kern_return_t
internal_io_service_add_interest_notification(io_object_t _service,io_name_t type_of_interest,mach_port_t port,void * reference,vm_size_t referenceSize,bool client64,io_object_t * notification)3173 internal_io_service_add_interest_notification(
3174 io_object_t _service,
3175 io_name_t type_of_interest,
3176 mach_port_t port,
3177 void * reference,
3178 vm_size_t referenceSize,
3179 bool client64,
3180 io_object_t * notification )
3181 {
3182 IOServiceMessageUserNotification * userNotify = NULL;
3183 IONotifier * notify = NULL;
3184 const OSSymbol * sym;
3185 IOReturn err;
3186
3187 CHECK( IOService, _service, service );
3188
3189 err = kIOReturnNoResources;
3190 if ((sym = OSSymbol::withCString( type_of_interest ))) {
3191 do {
3192 #if XNU_PLATFORM_WatchOS
3193 if (sym == gIOAppPowerStateInterest &&
3194 !(IOCurrentTaskHasEntitlement("com.apple.private.power.notifications") || IOCurrentTaskHasEntitlement("com.apple.private.power.notifications-temp"))) {
3195 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
3196 IOLog("IORegisterForSystemPower called by %s without \"com.apple.private.power.notifications\" entitlement\n",
3197 taskName ? taskName->getCStringNoCopy() : "???");
3198 OSSafeReleaseNULL(taskName);
3199
3200 if (gEnforcePowerEntitlement) {
3201 err = kIOReturnNotPermitted;
3202 continue;
3203 }
3204 }
3205 #endif // XNU_PLATFORM_WatchOS
3206
3207 userNotify = new IOServiceMessageUserNotification;
3208
3209 if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
3210 reference, referenceSize, client64 )) {
3211 userNotify->release();
3212 userNotify = NULL;
3213 }
3214 if (!userNotify) {
3215 continue;
3216 }
3217
3218 notify = service->registerInterest( sym,
3219 &userNotify->_handler, userNotify );
3220 if (notify) {
3221 *notification = userNotify;
3222 userNotify->setNotification( notify );
3223 err = kIOReturnSuccess;
3224 } else {
3225 err = kIOReturnUnsupported;
3226 }
3227 } while (false);
3228
3229 sym->release();
3230 }
3231
3232 if ((kIOReturnSuccess != err) && userNotify) {
3233 userNotify->setNotification(NULL);
3234 userNotify->invalidatePort();
3235 userNotify->release();
3236 userNotify = NULL;
3237 }
3238
3239 return err;
3240 }
3241
3242 /* Routine io_service_add_message_notification */
3243 kern_return_t
is_io_service_add_interest_notification(io_object_t service,io_name_t type_of_interest,mach_port_t port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3244 is_io_service_add_interest_notification(
3245 io_object_t service,
3246 io_name_t type_of_interest,
3247 mach_port_t port,
3248 io_async_ref_t reference,
3249 mach_msg_type_number_t referenceCnt,
3250 io_object_t * notification )
3251 {
3252 io_async_ref_t zreference;
3253
3254 if (referenceCnt > ASYNC_REF_COUNT) {
3255 return kIOReturnBadArgument;
3256 }
3257 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3258 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3259
3260 return internal_io_service_add_interest_notification(service, type_of_interest,
3261 port, &zreference[0], sizeof(io_async_ref_t), false, notification);
3262 }
3263
3264 /* Routine io_service_add_interest_notification_64 */
3265 kern_return_t
is_io_service_add_interest_notification_64(io_object_t service,io_name_t type_of_interest,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,io_object_t * notification)3266 is_io_service_add_interest_notification_64(
3267 io_object_t service,
3268 io_name_t type_of_interest,
3269 mach_port_t wake_port,
3270 io_async_ref64_t reference,
3271 mach_msg_type_number_t referenceCnt,
3272 io_object_t *notification )
3273 {
3274 io_async_ref64_t zreference;
3275
3276 if (referenceCnt > ASYNC_REF64_COUNT) {
3277 return kIOReturnBadArgument;
3278 }
3279 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3280 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3281
3282 return internal_io_service_add_interest_notification(service, type_of_interest,
3283 wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
3284 }
3285
3286
3287 /* Routine io_service_acknowledge_notification */
3288 kern_return_t
is_io_service_acknowledge_notification(io_object_t _service,natural_t notify_ref,natural_t response)3289 is_io_service_acknowledge_notification(
3290 io_object_t _service,
3291 natural_t notify_ref,
3292 natural_t response )
3293 {
3294 CHECK( IOService, _service, service );
3295
3296 return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3297 (IOOptionBits) response );
3298 }
3299
3300 /* Routine io_connect_get_semaphore */
3301 kern_return_t
is_io_connect_get_notification_semaphore(io_connect_t connection,natural_t notification_type,semaphore_t * semaphore)3302 is_io_connect_get_notification_semaphore(
3303 io_connect_t connection,
3304 natural_t notification_type,
3305 semaphore_t *semaphore )
3306 {
3307 IOReturn ret;
3308 CHECK( IOUserClient, connection, client );
3309
3310 IOStatisticsClientCall();
3311 client->ipcEnter(kIPCLockWrite);
3312 ret = client->getNotificationSemaphore((UInt32) notification_type,
3313 semaphore );
3314 client->ipcExit(kIPCLockWrite);
3315
3316 return ret;
3317 }
3318
3319 /* Routine io_registry_get_root_entry */
3320 kern_return_t
is_io_registry_get_root_entry(mach_port_t main_port,io_object_t * root)3321 is_io_registry_get_root_entry(
3322 mach_port_t main_port,
3323 io_object_t *root )
3324 {
3325 IORegistryEntry * entry;
3326
3327 if (main_port != main_device_port) {
3328 return kIOReturnNotPrivileged;
3329 }
3330
3331 entry = IORegistryEntry::getRegistryRoot();
3332 if (entry) {
3333 entry->retain();
3334 }
3335 *root = entry;
3336
3337 return kIOReturnSuccess;
3338 }
3339
3340 /* Routine io_registry_create_iterator */
3341 kern_return_t
is_io_registry_create_iterator(mach_port_t main_port,io_name_t plane,uint32_t options,io_object_t * iterator)3342 is_io_registry_create_iterator(
3343 mach_port_t main_port,
3344 io_name_t plane,
3345 uint32_t options,
3346 io_object_t *iterator )
3347 {
3348 if (main_port != main_device_port) {
3349 return kIOReturnNotPrivileged;
3350 }
3351
3352 *iterator = IOUserIterator::withIterator(
3353 IORegistryIterator::iterateOver(
3354 IORegistryEntry::getPlane( plane ), options ));
3355
3356 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3357 }
3358
3359 /* Routine io_registry_entry_create_iterator */
3360 kern_return_t
is_io_registry_entry_create_iterator(io_object_t registry_entry,io_name_t plane,uint32_t options,io_object_t * iterator)3361 is_io_registry_entry_create_iterator(
3362 io_object_t registry_entry,
3363 io_name_t plane,
3364 uint32_t options,
3365 io_object_t *iterator )
3366 {
3367 CHECK( IORegistryEntry, registry_entry, entry );
3368
3369 *iterator = IOUserIterator::withIterator(
3370 IORegistryIterator::iterateOver( entry,
3371 IORegistryEntry::getPlane( plane ), options ));
3372
3373 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3374 }
3375
3376 /* Routine io_registry_iterator_enter */
3377 kern_return_t
is_io_registry_iterator_enter_entry(io_object_t iterator)3378 is_io_registry_iterator_enter_entry(
3379 io_object_t iterator )
3380 {
3381 CHECKLOCKED( IORegistryIterator, iterator, iter );
3382
3383 IOLockLock(&oIter->lock);
3384 iter->enterEntry();
3385 IOLockUnlock(&oIter->lock);
3386
3387 return kIOReturnSuccess;
3388 }
3389
3390 /* Routine io_registry_iterator_exit */
3391 kern_return_t
is_io_registry_iterator_exit_entry(io_object_t iterator)3392 is_io_registry_iterator_exit_entry(
3393 io_object_t iterator )
3394 {
3395 bool didIt;
3396
3397 CHECKLOCKED( IORegistryIterator, iterator, iter );
3398
3399 IOLockLock(&oIter->lock);
3400 didIt = iter->exitEntry();
3401 IOLockUnlock(&oIter->lock);
3402
3403 return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3404 }
3405
3406 /* Routine io_registry_entry_from_path */
3407 kern_return_t
is_io_registry_entry_from_path(mach_port_t main_port,io_string_t path,io_object_t * registry_entry)3408 is_io_registry_entry_from_path(
3409 mach_port_t main_port,
3410 io_string_t path,
3411 io_object_t *registry_entry )
3412 {
3413 IORegistryEntry * entry;
3414
3415 if (main_port != main_device_port) {
3416 return kIOReturnNotPrivileged;
3417 }
3418
3419 entry = IORegistryEntry::fromPath( path );
3420
3421 if (!entry && IOTaskRegistryCompatibility(current_task())) {
3422 OSDictionary * matching;
3423 const OSObject * objects[2] = { kOSBooleanTrue, NULL };
3424 const OSSymbol * keys[2] = { gIOCompatibilityMatchKey, gIOPathMatchKey };
3425
3426 objects[1] = OSString::withCStringNoCopy(path);
3427 matching = OSDictionary::withObjects(objects, keys, 2, 2);
3428 if (matching) {
3429 entry = IOService::copyMatchingService(matching);
3430 }
3431 OSSafeReleaseNULL(matching);
3432 OSSafeReleaseNULL(objects[1]);
3433 }
3434
3435 *registry_entry = entry;
3436
3437 return kIOReturnSuccess;
3438 }
3439
3440
3441 /* Routine io_registry_entry_from_path */
3442 kern_return_t
is_io_registry_entry_from_path_ool(mach_port_t main_port,io_string_inband_t path,io_buf_ptr_t path_ool,mach_msg_type_number_t path_oolCnt,kern_return_t * result,io_object_t * registry_entry)3443 is_io_registry_entry_from_path_ool(
3444 mach_port_t main_port,
3445 io_string_inband_t path,
3446 io_buf_ptr_t path_ool,
3447 mach_msg_type_number_t path_oolCnt,
3448 kern_return_t *result,
3449 io_object_t *registry_entry)
3450 {
3451 IORegistryEntry * entry;
3452 vm_map_offset_t map_data;
3453 const char * cpath;
3454 IOReturn res;
3455 kern_return_t err;
3456
3457 if (main_port != main_device_port) {
3458 return kIOReturnNotPrivileged;
3459 }
3460
3461 map_data = 0;
3462 entry = NULL;
3463 res = err = KERN_SUCCESS;
3464 if (path[0]) {
3465 cpath = path;
3466 } else {
3467 if (!path_oolCnt) {
3468 return kIOReturnBadArgument;
3469 }
3470 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3471 return kIOReturnMessageTooLarge;
3472 }
3473
3474 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3475 if (KERN_SUCCESS == err) {
3476 // must return success to mig after vm_map_copyout() succeeds, so result is actual
3477 cpath = CAST_DOWN(const char *, map_data);
3478 if (cpath[path_oolCnt - 1]) {
3479 res = kIOReturnBadArgument;
3480 }
3481 }
3482 }
3483
3484 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3485 entry = IORegistryEntry::fromPath(cpath);
3486 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3487 }
3488
3489 if (map_data) {
3490 vm_deallocate(kernel_map, map_data, path_oolCnt);
3491 }
3492
3493 if (KERN_SUCCESS != err) {
3494 res = err;
3495 }
3496 *registry_entry = entry;
3497 *result = res;
3498
3499 return err;
3500 }
3501
3502
3503 /* Routine io_registry_entry_in_plane */
3504 kern_return_t
is_io_registry_entry_in_plane(io_object_t registry_entry,io_name_t plane,boolean_t * inPlane)3505 is_io_registry_entry_in_plane(
3506 io_object_t registry_entry,
3507 io_name_t plane,
3508 boolean_t *inPlane )
3509 {
3510 CHECK( IORegistryEntry, registry_entry, entry );
3511
3512 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3513
3514 return kIOReturnSuccess;
3515 }
3516
3517
3518 /* Routine io_registry_entry_get_path */
3519 kern_return_t
is_io_registry_entry_get_path(io_object_t registry_entry,io_name_t plane,io_string_t path)3520 is_io_registry_entry_get_path(
3521 io_object_t registry_entry,
3522 io_name_t plane,
3523 io_string_t path )
3524 {
3525 int length;
3526 CHECK( IORegistryEntry, registry_entry, entry );
3527
3528 length = sizeof(io_string_t);
3529 if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3530 return kIOReturnSuccess;
3531 } else {
3532 return kIOReturnBadArgument;
3533 }
3534 }
3535
3536 /* Routine io_registry_entry_get_path */
3537 kern_return_t
is_io_registry_entry_get_path_ool(io_object_t registry_entry,io_name_t plane,io_string_inband_t path,io_buf_ptr_t * path_ool,mach_msg_type_number_t * path_oolCnt)3538 is_io_registry_entry_get_path_ool(
3539 io_object_t registry_entry,
3540 io_name_t plane,
3541 io_string_inband_t path,
3542 io_buf_ptr_t *path_ool,
3543 mach_msg_type_number_t *path_oolCnt)
3544 {
3545 enum { kMaxPath = 16384 };
3546 IOReturn err;
3547 int length;
3548 char * buf;
3549
3550 CHECK( IORegistryEntry, registry_entry, entry );
3551
3552 *path_ool = NULL;
3553 *path_oolCnt = 0;
3554 length = sizeof(io_string_inband_t);
3555 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3556 err = kIOReturnSuccess;
3557 } else {
3558 length = kMaxPath;
3559 buf = IONewData(char, length);
3560 if (!buf) {
3561 err = kIOReturnNoMemory;
3562 } else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3563 err = kIOReturnError;
3564 } else {
3565 *path_oolCnt = length;
3566 err = copyoutkdata(buf, length, path_ool);
3567 }
3568 if (buf) {
3569 IODeleteData(buf, char, kMaxPath);
3570 }
3571 }
3572
3573 return err;
3574 }
3575
3576
3577 /* Routine io_registry_entry_get_name */
3578 kern_return_t
is_io_registry_entry_get_name(io_object_t registry_entry,io_name_t name)3579 is_io_registry_entry_get_name(
3580 io_object_t registry_entry,
3581 io_name_t name )
3582 {
3583 CHECK( IORegistryEntry, registry_entry, entry );
3584
3585 strncpy( name, entry->getName(), sizeof(io_name_t));
3586
3587 return kIOReturnSuccess;
3588 }
3589
3590 /* Routine io_registry_entry_get_name_in_plane */
3591 kern_return_t
is_io_registry_entry_get_name_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t name)3592 is_io_registry_entry_get_name_in_plane(
3593 io_object_t registry_entry,
3594 io_name_t planeName,
3595 io_name_t name )
3596 {
3597 const IORegistryPlane * plane;
3598 CHECK( IORegistryEntry, registry_entry, entry );
3599
3600 if (planeName[0]) {
3601 plane = IORegistryEntry::getPlane( planeName );
3602 } else {
3603 plane = NULL;
3604 }
3605
3606 strncpy( name, entry->getName( plane), sizeof(io_name_t));
3607
3608 return kIOReturnSuccess;
3609 }
3610
3611 /* Routine io_registry_entry_get_location_in_plane */
3612 kern_return_t
is_io_registry_entry_get_location_in_plane(io_object_t registry_entry,io_name_t planeName,io_name_t location)3613 is_io_registry_entry_get_location_in_plane(
3614 io_object_t registry_entry,
3615 io_name_t planeName,
3616 io_name_t location )
3617 {
3618 const IORegistryPlane * plane;
3619 CHECK( IORegistryEntry, registry_entry, entry );
3620
3621 if (planeName[0]) {
3622 plane = IORegistryEntry::getPlane( planeName );
3623 } else {
3624 plane = NULL;
3625 }
3626
3627 const char * cstr = entry->getLocation( plane );
3628
3629 if (cstr) {
3630 strncpy( location, cstr, sizeof(io_name_t));
3631 return kIOReturnSuccess;
3632 } else {
3633 return kIOReturnNotFound;
3634 }
3635 }
3636
3637 /* Routine io_registry_entry_get_registry_entry_id */
3638 kern_return_t
is_io_registry_entry_get_registry_entry_id(io_object_t registry_entry,uint64_t * entry_id)3639 is_io_registry_entry_get_registry_entry_id(
3640 io_object_t registry_entry,
3641 uint64_t *entry_id )
3642 {
3643 CHECK( IORegistryEntry, registry_entry, entry );
3644
3645 *entry_id = entry->getRegistryEntryID();
3646
3647 return kIOReturnSuccess;
3648 }
3649
3650
3651 static OSObject *
IOCopyPropertyCompatible(IORegistryEntry * regEntry,const char * name)3652 IOCopyPropertyCompatible(IORegistryEntry * regEntry, const char * name)
3653 {
3654 OSObject * obj;
3655 OSObject * compatProperties;
3656 OSDictionary * props;
3657
3658 obj = regEntry->copyProperty(name);
3659 if (obj) {
3660 return obj;
3661 }
3662
3663 compatProperties = regEntry->copyProperty(gIOUserServicePropertiesKey);
3664 if (!compatProperties
3665 && IOTaskRegistryCompatibility(current_task())) {
3666 compatProperties = regEntry->copyProperty(gIOCompatibilityPropertiesKey);
3667 }
3668 if (compatProperties) {
3669 props = OSDynamicCast(OSDictionary, compatProperties);
3670 if (props) {
3671 obj = props->getObject(name);
3672 if (obj) {
3673 obj->retain();
3674 }
3675 }
3676 compatProperties->release();
3677 }
3678
3679 return obj;
3680 }
3681
3682 /* Routine io_registry_entry_get_property */
3683 kern_return_t
is_io_registry_entry_get_property_bytes(io_object_t registry_entry,io_name_t property_name,io_struct_inband_t buf,mach_msg_type_number_t * dataCnt)3684 is_io_registry_entry_get_property_bytes(
3685 io_object_t registry_entry,
3686 io_name_t property_name,
3687 io_struct_inband_t buf,
3688 mach_msg_type_number_t *dataCnt )
3689 {
3690 OSObject * obj;
3691 OSData * data;
3692 OSString * str;
3693 OSBoolean * boo;
3694 OSNumber * off;
3695 UInt64 offsetBytes;
3696 unsigned int len = 0;
3697 const void * bytes = NULL;
3698 IOReturn ret = kIOReturnSuccess;
3699
3700 CHECK( IORegistryEntry, registry_entry, entry );
3701
3702 #if CONFIG_MACF
3703 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3704 return kIOReturnNotPermitted;
3705 }
3706 #endif
3707
3708 obj = IOCopyPropertyCompatible(entry, property_name);
3709 if (!obj) {
3710 return kIOReturnNoResources;
3711 }
3712
3713 // One day OSData will be a common container base class
3714 // until then...
3715 if ((data = OSDynamicCast( OSData, obj ))) {
3716 len = data->getLength();
3717 bytes = data->getBytesNoCopy();
3718 if (!data->isSerializable()) {
3719 len = 0;
3720 }
3721 } else if ((str = OSDynamicCast( OSString, obj ))) {
3722 len = str->getLength() + 1;
3723 bytes = str->getCStringNoCopy();
3724 } else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3725 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3726 bytes = boo->isTrue() ? "Yes" : "No";
3727 } else if ((off = OSDynamicCast( OSNumber, obj ))) {
3728 offsetBytes = off->unsigned64BitValue();
3729 len = off->numberOfBytes();
3730 if (len > sizeof(offsetBytes)) {
3731 len = sizeof(offsetBytes);
3732 }
3733 bytes = &offsetBytes;
3734 #ifdef __BIG_ENDIAN__
3735 bytes = (const void *)
3736 (((UInt32) bytes) + (sizeof(UInt64) - len));
3737 #endif
3738 } else {
3739 ret = kIOReturnBadArgument;
3740 }
3741
3742 if (bytes) {
3743 if (*dataCnt < len) {
3744 ret = kIOReturnIPCError;
3745 } else {
3746 *dataCnt = len;
3747 bcopy( bytes, buf, len );
3748 }
3749 }
3750 obj->release();
3751
3752 return ret;
3753 }
3754
3755
3756 /* Routine io_registry_entry_get_property */
3757 kern_return_t
is_io_registry_entry_get_property(io_object_t registry_entry,io_name_t property_name,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3758 is_io_registry_entry_get_property(
3759 io_object_t registry_entry,
3760 io_name_t property_name,
3761 io_buf_ptr_t *properties,
3762 mach_msg_type_number_t *propertiesCnt )
3763 {
3764 kern_return_t err;
3765 unsigned int len;
3766 OSObject * obj;
3767
3768 CHECK( IORegistryEntry, registry_entry, entry );
3769
3770 #if CONFIG_MACF
3771 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3772 return kIOReturnNotPermitted;
3773 }
3774 #endif
3775
3776 obj = IOCopyPropertyCompatible(entry, property_name);
3777 if (!obj) {
3778 return kIOReturnNotFound;
3779 }
3780
3781 OSSerialize * s = OSSerialize::withCapacity(4096);
3782 if (!s) {
3783 obj->release();
3784 return kIOReturnNoMemory;
3785 }
3786
3787 if (obj->serialize( s )) {
3788 len = s->getLength();
3789 *propertiesCnt = len;
3790 err = copyoutkdata( s->text(), len, properties );
3791 } else {
3792 err = kIOReturnUnsupported;
3793 }
3794
3795 s->release();
3796 obj->release();
3797
3798 return err;
3799 }
3800
3801 /* Routine io_registry_entry_get_property_recursively */
3802 kern_return_t
is_io_registry_entry_get_property_recursively(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3803 is_io_registry_entry_get_property_recursively(
3804 io_object_t registry_entry,
3805 io_name_t plane,
3806 io_name_t property_name,
3807 uint32_t options,
3808 io_buf_ptr_t *properties,
3809 mach_msg_type_number_t *propertiesCnt )
3810 {
3811 kern_return_t err;
3812 unsigned int len;
3813 OSObject * obj;
3814
3815 CHECK( IORegistryEntry, registry_entry, entry );
3816
3817 #if CONFIG_MACF
3818 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3819 return kIOReturnNotPermitted;
3820 }
3821 #endif
3822
3823 obj = entry->copyProperty( property_name,
3824 IORegistryEntry::getPlane( plane ), options );
3825 if (!obj) {
3826 return kIOReturnNotFound;
3827 }
3828
3829 OSSerialize * s = OSSerialize::withCapacity(4096);
3830 if (!s) {
3831 obj->release();
3832 return kIOReturnNoMemory;
3833 }
3834
3835 if (obj->serialize( s )) {
3836 len = s->getLength();
3837 *propertiesCnt = len;
3838 err = copyoutkdata( s->text(), len, properties );
3839 } else {
3840 err = kIOReturnUnsupported;
3841 }
3842
3843 s->release();
3844 obj->release();
3845
3846 return err;
3847 }
3848
3849 /* Routine io_registry_entry_get_properties */
3850 kern_return_t
is_io_registry_entry_get_properties(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3851 is_io_registry_entry_get_properties(
3852 io_object_t registry_entry,
3853 io_buf_ptr_t *properties,
3854 mach_msg_type_number_t *propertiesCnt )
3855 {
3856 return kIOReturnUnsupported;
3857 }
3858
3859 #if CONFIG_MACF
3860
3861 struct GetPropertiesEditorRef {
3862 kauth_cred_t cred;
3863 IORegistryEntry * entry;
3864 OSCollection * root;
3865 };
3866
3867 static const LIBKERN_RETURNS_RETAINED OSMetaClassBase *
GetPropertiesEditor(void * reference,OSSerialize * s,OSCollection * container,const OSSymbol * name,const OSMetaClassBase * value)3868 GetPropertiesEditor(void * reference,
3869 OSSerialize * s,
3870 OSCollection * container,
3871 const OSSymbol * name,
3872 const OSMetaClassBase * value)
3873 {
3874 GetPropertiesEditorRef * ref = (typeof(ref))reference;
3875
3876 if (!ref->root) {
3877 ref->root = container;
3878 }
3879 if (ref->root == container) {
3880 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3881 value = NULL;
3882 }
3883 }
3884 if (value) {
3885 value->retain();
3886 }
3887 return value;
3888 }
3889
3890 #endif /* CONFIG_MACF */
3891
3892 /* Routine io_registry_entry_get_properties_bin_buf */
3893 kern_return_t
is_io_registry_entry_get_properties_bin_buf(io_object_t registry_entry,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3894 is_io_registry_entry_get_properties_bin_buf(
3895 io_object_t registry_entry,
3896 mach_vm_address_t buf,
3897 mach_vm_size_t *bufsize,
3898 io_buf_ptr_t *properties,
3899 mach_msg_type_number_t *propertiesCnt)
3900 {
3901 kern_return_t err = kIOReturnSuccess;
3902 unsigned int len;
3903 OSObject * compatProperties;
3904 OSSerialize * s;
3905 OSSerialize::Editor editor = NULL;
3906 void * editRef = NULL;
3907
3908 CHECK(IORegistryEntry, registry_entry, entry);
3909
3910 #if CONFIG_MACF
3911 GetPropertiesEditorRef ref;
3912 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3913 editor = &GetPropertiesEditor;
3914 editRef = &ref;
3915 ref.cred = kauth_cred_get();
3916 ref.entry = entry;
3917 ref.root = NULL;
3918 }
3919 #endif
3920
3921 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3922 if (!s) {
3923 return kIOReturnNoMemory;
3924 }
3925
3926
3927 compatProperties = entry->copyProperty(gIOUserServicePropertiesKey);
3928 if (!compatProperties
3929 && IOTaskRegistryCompatibility(current_task())) {
3930 compatProperties = entry->copyProperty(gIOCompatibilityPropertiesKey);
3931 }
3932
3933 if (compatProperties) {
3934 OSDictionary * dict;
3935
3936 dict = entry->dictionaryWithProperties();
3937 if (!dict) {
3938 err = kIOReturnNoMemory;
3939 } else {
3940 dict->removeObject(gIOUserServicePropertiesKey);
3941 dict->removeObject(gIOCompatibilityPropertiesKey);
3942 dict->merge(OSDynamicCast(OSDictionary, compatProperties));
3943 if (!dict->serialize(s)) {
3944 err = kIOReturnUnsupported;
3945 }
3946 dict->release();
3947 }
3948 compatProperties->release();
3949 } else if (!entry->serializeProperties(s)) {
3950 err = kIOReturnUnsupported;
3951 }
3952
3953 if (kIOReturnSuccess == err) {
3954 len = s->getLength();
3955 if (buf && bufsize && len <= *bufsize) {
3956 *bufsize = len;
3957 *propertiesCnt = 0;
3958 *properties = nullptr;
3959 if (copyout(s->text(), buf, len)) {
3960 err = kIOReturnVMError;
3961 } else {
3962 err = kIOReturnSuccess;
3963 }
3964 } else {
3965 if (bufsize) {
3966 *bufsize = 0;
3967 }
3968 *propertiesCnt = len;
3969 err = copyoutkdata( s->text(), len, properties );
3970 }
3971 }
3972 s->release();
3973
3974 return err;
3975 }
3976
3977 /* Routine io_registry_entry_get_properties_bin */
3978 kern_return_t
is_io_registry_entry_get_properties_bin(io_object_t registry_entry,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3979 is_io_registry_entry_get_properties_bin(
3980 io_object_t registry_entry,
3981 io_buf_ptr_t *properties,
3982 mach_msg_type_number_t *propertiesCnt)
3983 {
3984 return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3985 0, NULL, properties, propertiesCnt);
3986 }
3987
3988 /* Routine io_registry_entry_get_property_bin_buf */
3989 kern_return_t
is_io_registry_entry_get_property_bin_buf(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,mach_vm_address_t buf,mach_vm_size_t * bufsize,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)3990 is_io_registry_entry_get_property_bin_buf(
3991 io_object_t registry_entry,
3992 io_name_t plane,
3993 io_name_t property_name,
3994 uint32_t options,
3995 mach_vm_address_t buf,
3996 mach_vm_size_t *bufsize,
3997 io_buf_ptr_t *properties,
3998 mach_msg_type_number_t *propertiesCnt )
3999 {
4000 kern_return_t err;
4001 unsigned int len;
4002 OSObject * obj;
4003 const OSSymbol * sym;
4004
4005 CHECK( IORegistryEntry, registry_entry, entry );
4006
4007 #if CONFIG_MACF
4008 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
4009 return kIOReturnNotPermitted;
4010 }
4011 #endif
4012
4013 sym = OSSymbol::withCString(property_name);
4014 if (!sym) {
4015 return kIOReturnNoMemory;
4016 }
4017
4018 err = kIOReturnNotFound;
4019 if (gIORegistryEntryPropertyKeysKey == sym) {
4020 obj = entry->copyPropertyKeys();
4021 } else {
4022 if ((kIORegistryIterateRecursively & options) && plane[0]) {
4023 obj = IOCopyPropertyCompatible(entry, property_name);
4024 if (obj == NULL) {
4025 IORegistryIterator * iter = IORegistryIterator::iterateOver(entry, IORegistryEntry::getPlane(plane), options);
4026 if (iter) {
4027 while ((NULL == obj) && (entry = iter->getNextObject())) {
4028 OSObject * currentObj = IOCopyPropertyCompatible(entry, property_name);
4029 #if CONFIG_MACF
4030 if (currentObj != NULL && 0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
4031 // Record that MAC hook blocked this entry and property, and continue to next entry
4032 err = kIOReturnNotPermitted;
4033 OSSafeReleaseNULL(currentObj);
4034 continue;
4035 }
4036 #endif
4037 obj = currentObj;
4038 }
4039 iter->release();
4040 }
4041 }
4042 } else {
4043 obj = IOCopyPropertyCompatible(entry, property_name);
4044 }
4045 if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
4046 entry->removeProperty(sym);
4047 }
4048 }
4049
4050 sym->release();
4051 if (!obj) {
4052 return err;
4053 }
4054
4055 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
4056 if (!s) {
4057 obj->release();
4058 return kIOReturnNoMemory;
4059 }
4060
4061 if (obj->serialize( s )) {
4062 len = s->getLength();
4063 if (buf && bufsize && len <= *bufsize) {
4064 *bufsize = len;
4065 *propertiesCnt = 0;
4066 *properties = nullptr;
4067 if (copyout(s->text(), buf, len)) {
4068 err = kIOReturnVMError;
4069 } else {
4070 err = kIOReturnSuccess;
4071 }
4072 } else {
4073 if (bufsize) {
4074 *bufsize = 0;
4075 }
4076 *propertiesCnt = len;
4077 err = copyoutkdata( s->text(), len, properties );
4078 }
4079 } else {
4080 err = kIOReturnUnsupported;
4081 }
4082
4083 s->release();
4084 obj->release();
4085
4086 return err;
4087 }
4088
4089 /* Routine io_registry_entry_get_property_bin */
4090 kern_return_t
is_io_registry_entry_get_property_bin(io_object_t registry_entry,io_name_t plane,io_name_t property_name,uint32_t options,io_buf_ptr_t * properties,mach_msg_type_number_t * propertiesCnt)4091 is_io_registry_entry_get_property_bin(
4092 io_object_t registry_entry,
4093 io_name_t plane,
4094 io_name_t property_name,
4095 uint32_t options,
4096 io_buf_ptr_t *properties,
4097 mach_msg_type_number_t *propertiesCnt )
4098 {
4099 return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
4100 property_name, options, 0, NULL, properties, propertiesCnt);
4101 }
4102
4103
4104 /* Routine io_registry_entry_set_properties */
4105 kern_return_t
is_io_registry_entry_set_properties(io_object_t registry_entry,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)4106 is_io_registry_entry_set_properties
4107 (
4108 io_object_t registry_entry,
4109 io_buf_ptr_t properties,
4110 mach_msg_type_number_t propertiesCnt,
4111 kern_return_t * result)
4112 {
4113 OSObject * obj;
4114 kern_return_t err;
4115 IOReturn res;
4116 vm_offset_t data;
4117 vm_map_offset_t map_data;
4118
4119 CHECK( IORegistryEntry, registry_entry, entry );
4120
4121 if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
4122 return kIOReturnMessageTooLarge;
4123 }
4124
4125 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4126 data = CAST_DOWN(vm_offset_t, map_data);
4127
4128 if (KERN_SUCCESS == err) {
4129 FAKE_STACK_FRAME(entry->getMetaClass());
4130
4131 // must return success after vm_map_copyout() succeeds
4132 obj = OSUnserializeXML((const char *) data, propertiesCnt );
4133 vm_deallocate( kernel_map, data, propertiesCnt );
4134
4135 if (!obj) {
4136 res = kIOReturnBadArgument;
4137 }
4138 #if CONFIG_MACF
4139 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
4140 registry_entry, obj)) {
4141 res = kIOReturnNotPermitted;
4142 }
4143 #endif
4144 else {
4145 IOService * service = OSDynamicCast(IOService, entry);
4146 OSDictionary * props = OSDynamicCast(OSDictionary, obj);
4147 OSObject * allowable = entry->copyProperty(gIORegistryEntryAllowableSetPropertiesKey);
4148 OSArray * allowableArray;
4149
4150 if (!allowable) {
4151 res = kIOReturnSuccess;
4152 } else {
4153 if (!props) {
4154 res = kIOReturnNotPermitted;
4155 } else if (!(allowableArray = OSDynamicCast(OSArray, allowable))) {
4156 res = kIOReturnNotPermitted;
4157 } else {
4158 bool allFound __block, found __block;
4159
4160 allFound = true;
4161 props->iterateObjects(^(const OSSymbol * key, OSObject * value) {
4162 found = false;
4163 for (unsigned int idx = 0; !found; idx++) {
4164 OSObject * next = allowableArray->getObject(idx);
4165 if (!next) {
4166 break;
4167 }
4168 found = next->isEqualTo(key);
4169 }
4170 allFound &= found;
4171 if (!found) {
4172 IOLog("IORegistryEntrySetProperties(%s, %s) disallowed due to " kIORegistryEntryAllowableSetPropertiesKey "\n",
4173 entry->getName(), key->getCStringNoCopy());
4174 }
4175 return !allFound;
4176 });
4177 res = allFound ? kIOReturnSuccess : kIOReturnBadArgument;
4178 }
4179 }
4180 if (kIOReturnSuccess == res) {
4181 IOUserClient *
4182 client = OSDynamicCast(IOUserClient, entry);
4183
4184 if (client && client->defaultLockingSetProperties) {
4185 IORWLockWrite(&client->lock);
4186 }
4187
4188 if (!client && (kOSBooleanTrue == entry->getProperty(gIORegistryEntryDefaultLockingSetPropertiesKey))) {
4189 res = entry->runPropertyActionBlock(^IOReturn (void) {
4190 return entry->setProperties( obj );
4191 });
4192 } else {
4193 res = entry->setProperties( obj );
4194 }
4195
4196 if (client && client->defaultLockingSetProperties) {
4197 IORWLockUnlock(&client->lock);
4198 }
4199 if (service && props && service->hasUserServer()) {
4200 res = service->UserSetProperties(props);
4201 }
4202 }
4203 OSSafeReleaseNULL(allowable);
4204 }
4205 if (obj) {
4206 obj->release();
4207 }
4208
4209 FAKE_STACK_FRAME_END();
4210 } else {
4211 res = err;
4212 }
4213
4214 *result = res;
4215 return err;
4216 }
4217
4218 /* Routine io_registry_entry_get_child_iterator */
4219 kern_return_t
is_io_registry_entry_get_child_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4220 is_io_registry_entry_get_child_iterator(
4221 io_object_t registry_entry,
4222 io_name_t plane,
4223 io_object_t *iterator )
4224 {
4225 CHECK( IORegistryEntry, registry_entry, entry );
4226
4227 *iterator = IOUserIterator::withIterator(entry->getChildIterator(
4228 IORegistryEntry::getPlane( plane )));
4229
4230 return kIOReturnSuccess;
4231 }
4232
4233 /* Routine io_registry_entry_get_parent_iterator */
4234 kern_return_t
is_io_registry_entry_get_parent_iterator(io_object_t registry_entry,io_name_t plane,io_object_t * iterator)4235 is_io_registry_entry_get_parent_iterator(
4236 io_object_t registry_entry,
4237 io_name_t plane,
4238 io_object_t *iterator)
4239 {
4240 CHECK( IORegistryEntry, registry_entry, entry );
4241
4242 *iterator = IOUserIterator::withIterator(entry->getParentIterator(
4243 IORegistryEntry::getPlane( plane )));
4244
4245 return kIOReturnSuccess;
4246 }
4247
4248 /* Routine io_service_get_busy_state */
4249 kern_return_t
is_io_service_get_busy_state(io_object_t _service,uint32_t * busyState)4250 is_io_service_get_busy_state(
4251 io_object_t _service,
4252 uint32_t *busyState )
4253 {
4254 CHECK( IOService, _service, service );
4255
4256 *busyState = service->getBusyState();
4257
4258 return kIOReturnSuccess;
4259 }
4260
4261 /* Routine io_service_get_state */
4262 kern_return_t
is_io_service_get_state(io_object_t _service,uint64_t * state,uint32_t * busy_state,uint64_t * accumulated_busy_time)4263 is_io_service_get_state(
4264 io_object_t _service,
4265 uint64_t *state,
4266 uint32_t *busy_state,
4267 uint64_t *accumulated_busy_time )
4268 {
4269 CHECK( IOService, _service, service );
4270
4271 *state = service->getState();
4272 *busy_state = service->getBusyState();
4273 *accumulated_busy_time = service->getAccumulatedBusyTime();
4274
4275 return kIOReturnSuccess;
4276 }
4277
4278 /* Routine io_service_wait_quiet */
4279 kern_return_t
is_io_service_wait_quiet(io_object_t _service,mach_timespec_t wait_time)4280 is_io_service_wait_quiet(
4281 io_object_t _service,
4282 mach_timespec_t wait_time )
4283 {
4284 uint64_t timeoutNS;
4285
4286 CHECK( IOService, _service, service );
4287
4288 timeoutNS = wait_time.tv_sec;
4289 timeoutNS *= kSecondScale;
4290 timeoutNS += wait_time.tv_nsec;
4291
4292 return service->waitQuiet(timeoutNS);
4293 }
4294
4295 /* Routine io_service_wait_quiet_with_options */
4296 kern_return_t
is_io_service_wait_quiet_with_options(io_object_t _service,mach_timespec_t wait_time,uint32_t options)4297 is_io_service_wait_quiet_with_options(
4298 io_object_t _service,
4299 mach_timespec_t wait_time,
4300 uint32_t options )
4301 {
4302 uint64_t timeoutNS;
4303
4304 CHECK( IOService, _service, service );
4305
4306 timeoutNS = wait_time.tv_sec;
4307 timeoutNS *= kSecondScale;
4308 timeoutNS += wait_time.tv_nsec;
4309
4310 if ((options & kIOWaitQuietPanicOnFailure) && !IOCurrentTaskHasEntitlement(kIOWaitQuietPanicsEntitlement)) {
4311 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4312 IOLog("IOServiceWaitQuietWithOptions(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4313 OSSafeReleaseNULL(taskName);
4314
4315 /* strip this option from the options before calling waitQuietWithOptions */
4316 options &= ~kIOWaitQuietPanicOnFailure;
4317 }
4318
4319 return service->waitQuietWithOptions(timeoutNS, options);
4320 }
4321
4322
4323 /* Routine io_service_request_probe */
4324 kern_return_t
is_io_service_request_probe(io_object_t _service,uint32_t options)4325 is_io_service_request_probe(
4326 io_object_t _service,
4327 uint32_t options )
4328 {
4329 CHECK( IOService, _service, service );
4330
4331 return service->requestProbe( options );
4332 }
4333
4334 /* Routine io_service_get_authorization_id */
4335 kern_return_t
is_io_service_get_authorization_id(io_object_t _service,uint64_t * authorization_id)4336 is_io_service_get_authorization_id(
4337 io_object_t _service,
4338 uint64_t *authorization_id )
4339 {
4340 kern_return_t kr;
4341
4342 CHECK( IOService, _service, service );
4343
4344 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
4345 kIOClientPrivilegeAdministrator );
4346 if (kIOReturnSuccess != kr) {
4347 return kr;
4348 }
4349
4350 #if defined(XNU_TARGET_OS_OSX)
4351 *authorization_id = service->getAuthorizationID();
4352 #else /* defined(XNU_TARGET_OS_OSX) */
4353 *authorization_id = 0;
4354 kr = kIOReturnUnsupported;
4355 #endif /* defined(XNU_TARGET_OS_OSX) */
4356
4357 return kr;
4358 }
4359
4360 /* Routine io_service_set_authorization_id */
4361 kern_return_t
is_io_service_set_authorization_id(io_object_t _service,uint64_t authorization_id)4362 is_io_service_set_authorization_id(
4363 io_object_t _service,
4364 uint64_t authorization_id )
4365 {
4366 CHECK( IOService, _service, service );
4367
4368 #if defined(XNU_TARGET_OS_OSX)
4369 return service->setAuthorizationID( authorization_id );
4370 #else /* defined(XNU_TARGET_OS_OSX) */
4371 return kIOReturnUnsupported;
4372 #endif /* defined(XNU_TARGET_OS_OSX) */
4373 }
4374
4375 /* Routine io_service_open_ndr */
4376 kern_return_t
is_io_service_open_extended(io_object_t _service,task_t owningTask,uint32_t connect_type,NDR_record_t ndr,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result,io_object_t * connection)4377 is_io_service_open_extended(
4378 io_object_t _service,
4379 task_t owningTask,
4380 uint32_t connect_type,
4381 NDR_record_t ndr,
4382 io_buf_ptr_t properties,
4383 mach_msg_type_number_t propertiesCnt,
4384 kern_return_t * result,
4385 io_object_t *connection )
4386 {
4387 IOUserClient * client = NULL;
4388 kern_return_t err = KERN_SUCCESS;
4389 IOReturn res = kIOReturnSuccess;
4390 OSDictionary * propertiesDict = NULL;
4391 bool disallowAccess = false;
4392
4393 CHECK( IOService, _service, service );
4394
4395 if (!owningTask) {
4396 return kIOReturnBadArgument;
4397 }
4398 assert(owningTask == current_task());
4399 if (owningTask != current_task()) {
4400 return kIOReturnBadArgument;
4401 }
4402
4403 #if CONFIG_MACF
4404 if (mac_iokit_check_open_service(kauth_cred_get(), service, connect_type) != 0) {
4405 return kIOReturnNotPermitted;
4406 }
4407 #endif
4408 do{
4409 if (properties) {
4410 return kIOReturnUnsupported;
4411 }
4412 #if 0
4413 {
4414 OSObject * obj;
4415 vm_offset_t data;
4416 vm_map_offset_t map_data;
4417
4418 if (propertiesCnt > sizeof(io_struct_inband_t)) {
4419 return kIOReturnMessageTooLarge;
4420 }
4421
4422 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4423 res = err;
4424 data = CAST_DOWN(vm_offset_t, map_data);
4425 if (KERN_SUCCESS == err) {
4426 // must return success after vm_map_copyout() succeeds
4427 obj = OSUnserializeXML((const char *) data, propertiesCnt );
4428 vm_deallocate( kernel_map, data, propertiesCnt );
4429 propertiesDict = OSDynamicCast(OSDictionary, obj);
4430 if (!propertiesDict) {
4431 res = kIOReturnBadArgument;
4432 if (obj) {
4433 obj->release();
4434 }
4435 }
4436 }
4437 if (kIOReturnSuccess != res) {
4438 break;
4439 }
4440 }
4441 #endif
4442 res = service->newUserClient( owningTask, (void *) owningTask,
4443 connect_type, propertiesDict, &client );
4444
4445 if (propertiesDict) {
4446 propertiesDict->release();
4447 }
4448
4449 if (res == kIOReturnSuccess && OSDynamicCast(IOUserClient, client) == NULL) {
4450 // client should always be a IOUserClient
4451 res = kIOReturnError;
4452 }
4453
4454 if (res == kIOReturnSuccess) {
4455 if (!client->reserved) {
4456 if (!client->reserve()) {
4457 client->clientClose();
4458 OSSafeReleaseNULL(client);
4459 res = kIOReturnNoMemory;
4460 }
4461 }
4462 }
4463
4464 if (res == kIOReturnSuccess) {
4465 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4466 if (creatorName) {
4467 client->setProperty(kIOUserClientCreatorKey, creatorName);
4468 }
4469 const char * creatorNameCStr = creatorName ? creatorName->getCStringNoCopy() : "<unknown>";
4470 client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
4471 if (client->sharedInstance) {
4472 IOLockLock(gIOUserClientOwnersLock);
4473 }
4474 if (!client->opened) {
4475 client->opened = true;
4476
4477 client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
4478 {
4479 OSObject * obj;
4480 extern const OSSymbol * gIOSurfaceIdentifier;
4481 obj = client->getProperty(kIOUserClientDefaultLockingKey);
4482 bool hasProps = false;
4483
4484 client->uc2022 = (NULL != OSDynamicCast(IOUserClient2022, client));
4485 if (obj) {
4486 hasProps = true;
4487 client->defaultLocking = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingKey));
4488 } else if (client->uc2022) {
4489 res = kIOReturnError;
4490 }
4491 obj = client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey);
4492 if (obj) {
4493 hasProps = true;
4494 client->defaultLockingSetProperties = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSetPropertiesKey));
4495 } else if (client->uc2022) {
4496 res = kIOReturnError;
4497 }
4498 obj = client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey);
4499 if (obj) {
4500 hasProps = true;
4501 client->defaultLockingSingleThreadExternalMethod = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingSingleThreadExternalMethodKey));
4502 } else if (client->uc2022) {
4503 res = kIOReturnError;
4504 }
4505 if (kIOReturnSuccess != res) {
4506 IOLog("IOUC %s requires kIOUserClientDefaultLockingKey, kIOUserClientDefaultLockingSetPropertiesKey, kIOUserClientDefaultLockingSingleThreadExternalMethodKey\n",
4507 client->getMetaClass()->getClassName());
4508 }
4509 if (!hasProps) {
4510 const OSMetaClass * meta;
4511 OSKext * kext;
4512 meta = client->getMetaClass();
4513 kext = meta->getKext();
4514 if (!kext || !kext->hasDependency(gIOSurfaceIdentifier)) {
4515 client->defaultLocking = true;
4516 client->defaultLockingSetProperties = false;
4517 client->defaultLockingSingleThreadExternalMethod = false;
4518 client->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
4519 }
4520 }
4521 }
4522 }
4523 if (client->sharedInstance) {
4524 IOLockUnlock(gIOUserClientOwnersLock);
4525 }
4526
4527 OSObject * requiredEntitlement = client->copyProperty(gIOUserClientEntitlementsKey);
4528 OSString * requiredEntitlementString = OSDynamicCast(OSString, requiredEntitlement);
4529 //If this is an IOUserClient2022, having kIOUserClientEntitlementsKey is mandatory.
4530 //If it has kIOUserClientEntitlementsKey, the value must be either kOSBooleanFalse or an OSString
4531 //If the value is kOSBooleanFalse, we allow access.
4532 //If the value is an OSString, we allow access if the task has the named entitlement
4533 if (client->uc2022) {
4534 if (!requiredEntitlement) {
4535 IOLog("IOUC %s missing " kIOUserClientEntitlementsKey " property\n",
4536 client->getMetaClass()->getClassName());
4537 disallowAccess = true;
4538 } else if (!requiredEntitlementString && requiredEntitlement != kOSBooleanFalse) {
4539 IOLog("IOUC %s had " kIOUserClientEntitlementsKey "with value not boolean false or string\n", client->getMetaClass()->getClassName());
4540 disallowAccess = true;
4541 }
4542 }
4543
4544 if (requiredEntitlement && disallowAccess == false) {
4545 if (kOSBooleanFalse == requiredEntitlement) {
4546 // allow
4547 disallowAccess = false;
4548 } else {
4549 disallowAccess = !IOTaskHasEntitlement(owningTask, requiredEntitlementString->getCStringNoCopy());
4550 if (disallowAccess) {
4551 IOLog("IOUC %s missing entitlement in process %s\n",
4552 client->getMetaClass()->getClassName(), creatorNameCStr);
4553 }
4554 }
4555 }
4556
4557 OSSafeReleaseNULL(requiredEntitlement);
4558
4559 if (disallowAccess) {
4560 res = kIOReturnNotPrivileged;
4561 }
4562 #if CONFIG_MACF
4563 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4564 IOLog("IOUC %s failed MACF in process %s\n",
4565 client->getMetaClass()->getClassName(), creatorNameCStr);
4566 res = kIOReturnNotPermitted;
4567 }
4568 #endif
4569
4570 if ((kIOReturnSuccess == res)
4571 && gIOUCFilterCallbacks
4572 && gIOUCFilterCallbacks->io_filter_resolver) {
4573 io_filter_policy_t filterPolicy;
4574 filterPolicy = client->filterForTask(owningTask, 0);
4575 if (!filterPolicy) {
4576 res = gIOUCFilterCallbacks->io_filter_resolver(owningTask, client, connect_type, &filterPolicy);
4577 if (kIOReturnUnsupported == res) {
4578 res = kIOReturnSuccess;
4579 } else if (kIOReturnSuccess == res) {
4580 client->filterForTask(owningTask, filterPolicy);
4581 } else {
4582 IOLog("IOUC %s failed sandbox in process %s\n",
4583 client->getMetaClass()->getClassName(), creatorNameCStr);
4584 }
4585 }
4586 }
4587
4588 if (kIOReturnSuccess == res) {
4589 res = client->registerOwner(owningTask);
4590 }
4591 OSSafeReleaseNULL(creatorName);
4592
4593 if (kIOReturnSuccess != res) {
4594 IOStatisticsClientCall();
4595 client->clientClose();
4596 client->setTerminateDefer(service, false);
4597 client->release();
4598 client = NULL;
4599 break;
4600 }
4601 client->setTerminateDefer(service, false);
4602 }
4603 }while (false);
4604
4605 *connection = client;
4606 *result = res;
4607
4608 return err;
4609 }
4610
4611 /* Routine io_service_close */
4612 kern_return_t
is_io_service_close(io_connect_t connection)4613 is_io_service_close(
4614 io_connect_t connection )
4615 {
4616 OSSet * mappings;
4617 if ((mappings = OSDynamicCast(OSSet, connection))) {
4618 return kIOReturnSuccess;
4619 }
4620
4621 CHECK( IOUserClient, connection, client );
4622
4623 IOStatisticsClientCall();
4624
4625 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4626 client->ipcEnter(kIPCLockWrite);
4627 client->clientClose();
4628 client->ipcExit(kIPCLockWrite);
4629 } else {
4630 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4631 client->getRegistryEntryID(), client->getName());
4632 }
4633
4634 return kIOReturnSuccess;
4635 }
4636
4637 /* Routine io_connect_get_service */
4638 kern_return_t
is_io_connect_get_service(io_connect_t connection,io_object_t * service)4639 is_io_connect_get_service(
4640 io_connect_t connection,
4641 io_object_t *service )
4642 {
4643 IOService * theService;
4644
4645 CHECK( IOUserClient, connection, client );
4646
4647 client->ipcEnter(kIPCLockNone);
4648
4649 theService = client->getService();
4650 if (theService) {
4651 theService->retain();
4652 }
4653
4654 client->ipcExit(kIPCLockNone);
4655
4656 *service = theService;
4657
4658 return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4659 }
4660
4661 /* Routine io_connect_set_notification_port */
4662 kern_return_t
is_io_connect_set_notification_port(io_connect_t connection,uint32_t notification_type,mach_port_t port,uint32_t reference)4663 is_io_connect_set_notification_port(
4664 io_connect_t connection,
4665 uint32_t notification_type,
4666 mach_port_t port,
4667 uint32_t reference)
4668 {
4669 kern_return_t ret;
4670 CHECK( IOUserClient, connection, client );
4671
4672 IOStatisticsClientCall();
4673
4674 client->ipcEnter(kIPCLockWrite);
4675 ret = client->registerNotificationPort( port, notification_type,
4676 (io_user_reference_t) reference );
4677 client->ipcExit(kIPCLockWrite);
4678
4679 return ret;
4680 }
4681
4682 /* Routine io_connect_set_notification_port */
4683 kern_return_t
is_io_connect_set_notification_port_64(io_connect_t connection,uint32_t notification_type,mach_port_t port,io_user_reference_t reference)4684 is_io_connect_set_notification_port_64(
4685 io_connect_t connection,
4686 uint32_t notification_type,
4687 mach_port_t port,
4688 io_user_reference_t reference)
4689 {
4690 kern_return_t ret;
4691 CHECK( IOUserClient, connection, client );
4692
4693 IOStatisticsClientCall();
4694
4695 client->ipcEnter(kIPCLockWrite);
4696 ret = client->registerNotificationPort( port, notification_type,
4697 reference );
4698 client->ipcExit(kIPCLockWrite);
4699
4700 return ret;
4701 }
4702
4703
4704 /* Routine io_connect_map_shared_memory */
4705 kern_return_t
is_io_connect_map_shared_memory(io_connect_t connection,uint32_t memory_type,task_t into_task,mach_vm_address_t * address,mach_vm_size_t * size,uint32_t map_flags,io_name_t property_name,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt)4706 is_io_connect_map_shared_memory
4707 (
4708 io_connect_t connection,
4709 uint32_t memory_type,
4710 task_t into_task,
4711 mach_vm_address_t *address,
4712 mach_vm_size_t *size,
4713 uint32_t map_flags,
4714 io_name_t property_name,
4715 io_struct_inband_t inband_output,
4716 mach_msg_type_number_t *inband_outputCnt
4717 )
4718 {
4719 IOReturn err;
4720 IOMemoryMap * map = NULL;
4721 IOOptionBits options = 0;
4722 IOMemoryDescriptor * memory = NULL;
4723
4724 CHECK( IOUserClient, connection, client );
4725
4726 if (!into_task) {
4727 return kIOReturnBadArgument;
4728 }
4729 if (client->sharedInstance
4730 || (into_task != current_task())) {
4731 return kIOReturnUnsupported;
4732 }
4733
4734 IOStatisticsClientCall();
4735
4736 client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4737
4738 err = client->clientMemoryForType(memory_type, &options, &memory );
4739
4740 if (memory && (kIOReturnSuccess == err)) {
4741 OSObject * context = memory->copySharingContext(property_name);
4742 OSData * desc;
4743 if (!(desc = OSDynamicCast(OSData, context))) {
4744 err = kIOReturnNotReady;
4745 } else {
4746 if (!(kIOMapReadOnly & options)
4747 && !IOCurrentTaskHasEntitlement(kIOMapSharedMemoryWritableEntitlement)) {
4748 err = kIOReturnNotPermitted;
4749 } else if (desc->getLength() > *inband_outputCnt) {
4750 err = kIOReturnOverrun;
4751 } else {
4752 memcpy(inband_output, desc->getBytesNoCopy(), desc->getLength());
4753 *inband_outputCnt = desc->getLength();
4754 }
4755 OSSafeReleaseNULL(context);
4756 }
4757 if (kIOReturnSuccess == err) {
4758 FAKE_STACK_FRAME(client->getMetaClass());
4759
4760 options = (options & ~kIOMapUserOptionsMask)
4761 | (map_flags & kIOMapUserOptionsMask)
4762 | kIOMapAnywhere;
4763 map = memory->createMappingInTask( into_task, 0, options );
4764
4765 FAKE_STACK_FRAME_END();
4766 if (!map) {
4767 err = kIOReturnNotReadable;
4768 }
4769 }
4770 memory->release();
4771 }
4772
4773 if (map) {
4774 *address = map->getAddress();
4775 if (size) {
4776 *size = map->getSize();
4777 }
4778 // keep it with the user client
4779 IOLockLock( gIOObjectPortLock);
4780 if (NULL == client->mappings) {
4781 client->mappings = OSSet::withCapacity(2);
4782 }
4783 if (client->mappings) {
4784 client->mappings->setObject( map);
4785 }
4786 IOLockUnlock( gIOObjectPortLock);
4787 map->release();
4788 err = kIOReturnSuccess;
4789 }
4790
4791 client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4792
4793 return err;
4794 }
4795 /* Routine io_connect_map_memory_into_task */
4796 kern_return_t
is_io_connect_map_memory_into_task(io_connect_t connection,uint32_t memory_type,task_t into_task,mach_vm_address_t * address,mach_vm_size_t * size,uint32_t flags)4797 is_io_connect_map_memory_into_task
4798 (
4799 io_connect_t connection,
4800 uint32_t memory_type,
4801 task_t into_task,
4802 mach_vm_address_t *address,
4803 mach_vm_size_t *size,
4804 uint32_t flags
4805 )
4806 {
4807 IOReturn err;
4808 IOMemoryMap * map;
4809
4810 CHECK( IOUserClient, connection, client );
4811
4812 if (!into_task) {
4813 return kIOReturnBadArgument;
4814 }
4815
4816 IOStatisticsClientCall();
4817
4818 client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4819 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4820
4821 if (map) {
4822 *address = map->getAddress();
4823 if (size) {
4824 *size = map->getSize();
4825 }
4826
4827 if (client->sharedInstance
4828 || (into_task != current_task())) {
4829 // push a name out to the task owning the map,
4830 // so we can clean up maps
4831 mach_port_name_t name __unused =
4832 IOMachPort::makeSendRightForTask(
4833 into_task, map, IKOT_IOKIT_OBJECT );
4834 map->release();
4835 } else {
4836 // keep it with the user client
4837 IOLockLock( gIOObjectPortLock);
4838 if (NULL == client->mappings) {
4839 client->mappings = OSSet::withCapacity(2);
4840 }
4841 if (client->mappings) {
4842 client->mappings->setObject( map);
4843 }
4844 IOLockUnlock( gIOObjectPortLock);
4845 map->release();
4846 }
4847 err = kIOReturnSuccess;
4848 } else {
4849 err = kIOReturnBadArgument;
4850 }
4851
4852 client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4853
4854 return err;
4855 }
4856
4857 /* Routine is_io_connect_map_memory */
4858 kern_return_t
is_io_connect_map_memory(io_object_t connect,uint32_t type,task_t task,uint32_t * mapAddr,uint32_t * mapSize,uint32_t flags)4859 is_io_connect_map_memory(
4860 io_object_t connect,
4861 uint32_t type,
4862 task_t task,
4863 uint32_t * mapAddr,
4864 uint32_t * mapSize,
4865 uint32_t flags )
4866 {
4867 IOReturn err;
4868 mach_vm_address_t address;
4869 mach_vm_size_t size;
4870
4871 address = SCALAR64(*mapAddr);
4872 size = SCALAR64(*mapSize);
4873
4874 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4875
4876 *mapAddr = SCALAR32(address);
4877 *mapSize = SCALAR32(size);
4878
4879 return err;
4880 }
4881 } /* extern "C" */
4882
4883 IOMemoryMap *
removeMappingForDescriptor(IOMemoryDescriptor * mem)4884 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4885 {
4886 OSIterator * iter;
4887 IOMemoryMap * map = NULL;
4888
4889 IOLockLock(gIOObjectPortLock);
4890
4891 iter = OSCollectionIterator::withCollection(mappings);
4892 if (iter) {
4893 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4894 if (mem == map->getMemoryDescriptor()) {
4895 map->retain();
4896 mappings->removeObject(map);
4897 break;
4898 }
4899 }
4900 iter->release();
4901 }
4902
4903 IOLockUnlock(gIOObjectPortLock);
4904
4905 return map;
4906 }
4907
4908 extern "C" {
4909 /* Routine io_connect_unmap_memory_from_task */
4910 kern_return_t
is_io_connect_unmap_memory_from_task(io_connect_t connection,uint32_t memory_type,task_t from_task,mach_vm_address_t address)4911 is_io_connect_unmap_memory_from_task
4912 (
4913 io_connect_t connection,
4914 uint32_t memory_type,
4915 task_t from_task,
4916 mach_vm_address_t address)
4917 {
4918 IOReturn err;
4919 IOOptionBits options = 0;
4920 IOMemoryDescriptor * memory = NULL;
4921 IOMemoryMap * map;
4922
4923 CHECK( IOUserClient, connection, client );
4924
4925 if (!from_task) {
4926 return kIOReturnBadArgument;
4927 }
4928
4929 IOStatisticsClientCall();
4930
4931 client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4932 err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4933
4934 if (memory && (kIOReturnSuccess == err)) {
4935 options = (options & ~kIOMapUserOptionsMask)
4936 | kIOMapAnywhere | kIOMapReference;
4937
4938 map = memory->createMappingInTask( from_task, address, options );
4939 memory->release();
4940 if (map) {
4941 IOLockLock( gIOObjectPortLock);
4942 if (client->mappings) {
4943 client->mappings->removeObject( map);
4944 }
4945 IOLockUnlock( gIOObjectPortLock);
4946
4947 mach_port_name_t name = 0;
4948 bool is_shared_instance_or_from_current_task = from_task != current_task() || client->sharedInstance;
4949 if (is_shared_instance_or_from_current_task) {
4950 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4951 map->release();
4952 }
4953
4954 if (name) {
4955 map->userClientUnmap();
4956 err = iokit_mod_send_right( from_task, name, -2 );
4957 err = kIOReturnSuccess;
4958 } else {
4959 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4960 }
4961 if (!is_shared_instance_or_from_current_task) {
4962 map->release();
4963 }
4964 } else {
4965 err = kIOReturnBadArgument;
4966 }
4967 }
4968
4969 client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
4970
4971 return err;
4972 }
4973
4974 kern_return_t
is_io_connect_unmap_memory(io_object_t connect,uint32_t type,task_t task,uint32_t mapAddr)4975 is_io_connect_unmap_memory(
4976 io_object_t connect,
4977 uint32_t type,
4978 task_t task,
4979 uint32_t mapAddr )
4980 {
4981 IOReturn err;
4982 mach_vm_address_t address;
4983
4984 address = SCALAR64(mapAddr);
4985
4986 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4987
4988 return err;
4989 }
4990
4991
4992 /* Routine io_connect_add_client */
4993 kern_return_t
is_io_connect_add_client(io_connect_t connection,io_object_t connect_to)4994 is_io_connect_add_client(
4995 io_connect_t connection,
4996 io_object_t connect_to)
4997 {
4998 CHECK( IOUserClient, connection, client );
4999 CHECK( IOUserClient, connect_to, to );
5000
5001 IOReturn ret;
5002
5003 IOStatisticsClientCall();
5004
5005 client->ipcEnter(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
5006 ret = client->connectClient( to );
5007 client->ipcExit(client->defaultLocking ? kIPCLockWrite : kIPCLockNone);
5008
5009 return ret;
5010 }
5011
5012
5013 /* Routine io_connect_set_properties */
5014 kern_return_t
is_io_connect_set_properties(io_connect_t connection,io_buf_ptr_t properties,mach_msg_type_number_t propertiesCnt,kern_return_t * result)5015 is_io_connect_set_properties(
5016 io_connect_t connection,
5017 io_buf_ptr_t properties,
5018 mach_msg_type_number_t propertiesCnt,
5019 kern_return_t * result)
5020 {
5021 return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
5022 }
5023
5024 /* Routine io_user_client_method */
5025 kern_return_t
is_io_connect_method_var_output(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,io_buf_ptr_t * var_output,mach_msg_type_number_t * var_outputCnt)5026 is_io_connect_method_var_output
5027 (
5028 io_connect_t connection,
5029 uint32_t selector,
5030 io_scalar_inband64_t scalar_input,
5031 mach_msg_type_number_t scalar_inputCnt,
5032 io_struct_inband_t inband_input,
5033 mach_msg_type_number_t inband_inputCnt,
5034 mach_vm_address_t ool_input,
5035 mach_vm_size_t ool_input_size,
5036 io_struct_inband_t inband_output,
5037 mach_msg_type_number_t *inband_outputCnt,
5038 io_scalar_inband64_t scalar_output,
5039 mach_msg_type_number_t *scalar_outputCnt,
5040 io_buf_ptr_t *var_output,
5041 mach_msg_type_number_t *var_outputCnt
5042 )
5043 {
5044 CHECK( IOUserClient, connection, client );
5045
5046 IOExternalMethodArguments args;
5047 IOReturn ret;
5048 IOMemoryDescriptor * inputMD = NULL;
5049 OSObject * structureVariableOutputData = NULL;
5050
5051 bzero(&args.__reserved[0], sizeof(args.__reserved));
5052 args.__reservedA = 0;
5053 args.version = kIOExternalMethodArgumentsCurrentVersion;
5054
5055 args.selector = selector;
5056
5057 args.asyncWakePort = MACH_PORT_NULL;
5058 args.asyncReference = NULL;
5059 args.asyncReferenceCount = 0;
5060 args.structureVariableOutputData = &structureVariableOutputData;
5061
5062 args.scalarInput = scalar_input;
5063 args.scalarInputCount = scalar_inputCnt;
5064 args.structureInput = inband_input;
5065 args.structureInputSize = inband_inputCnt;
5066
5067 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5068 return kIOReturnIPCError;
5069 }
5070
5071 if (ool_input) {
5072 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5073 kIODirectionOut | kIOMemoryMapCopyOnWrite,
5074 current_task());
5075 }
5076
5077 args.structureInputDescriptor = inputMD;
5078
5079 args.scalarOutput = scalar_output;
5080 args.scalarOutputCount = *scalar_outputCnt;
5081 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5082 args.structureOutput = inband_output;
5083 args.structureOutputSize = *inband_outputCnt;
5084 args.structureOutputDescriptor = NULL;
5085 args.structureOutputDescriptorSize = 0;
5086
5087 IOStatisticsClientCall();
5088 ret = kIOReturnSuccess;
5089
5090 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5091 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5092 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
5093 }
5094
5095 if (kIOReturnSuccess == ret) {
5096 ret = client->callExternalMethod(selector, &args);
5097 }
5098
5099 *scalar_outputCnt = args.scalarOutputCount;
5100 *inband_outputCnt = args.structureOutputSize;
5101
5102 if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
5103 OSSerialize * serialize;
5104 OSData * data;
5105 unsigned int len;
5106
5107 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
5108 len = serialize->getLength();
5109 *var_outputCnt = len;
5110 ret = copyoutkdata(serialize->text(), len, var_output);
5111 } else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
5112 data->clipForCopyout();
5113 len = data->getLength();
5114 *var_outputCnt = len;
5115 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
5116 } else {
5117 ret = kIOReturnUnderrun;
5118 }
5119 }
5120
5121 if (inputMD) {
5122 inputMD->release();
5123 }
5124 if (structureVariableOutputData) {
5125 structureVariableOutputData->release();
5126 }
5127
5128 return ret;
5129 }
5130
5131 /* Routine io_user_client_method */
5132 kern_return_t
is_io_connect_method(io_connect_t connection,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)5133 is_io_connect_method
5134 (
5135 io_connect_t connection,
5136 uint32_t selector,
5137 io_scalar_inband64_t scalar_input,
5138 mach_msg_type_number_t scalar_inputCnt,
5139 io_struct_inband_t inband_input,
5140 mach_msg_type_number_t inband_inputCnt,
5141 mach_vm_address_t ool_input,
5142 mach_vm_size_t ool_input_size,
5143 io_struct_inband_t inband_output,
5144 mach_msg_type_number_t *inband_outputCnt,
5145 io_scalar_inband64_t scalar_output,
5146 mach_msg_type_number_t *scalar_outputCnt,
5147 mach_vm_address_t ool_output,
5148 mach_vm_size_t *ool_output_size
5149 )
5150 {
5151 CHECK( IOUserClient, connection, client );
5152
5153 IOExternalMethodArguments args;
5154 IOReturn ret;
5155 IOMemoryDescriptor * inputMD = NULL;
5156 IOMemoryDescriptor * outputMD = NULL;
5157
5158 bzero(&args.__reserved[0], sizeof(args.__reserved));
5159 args.__reservedA = 0;
5160 args.version = kIOExternalMethodArgumentsCurrentVersion;
5161
5162 args.selector = selector;
5163
5164 args.asyncWakePort = MACH_PORT_NULL;
5165 args.asyncReference = NULL;
5166 args.asyncReferenceCount = 0;
5167 args.structureVariableOutputData = NULL;
5168
5169 args.scalarInput = scalar_input;
5170 args.scalarInputCount = scalar_inputCnt;
5171 args.structureInput = inband_input;
5172 args.structureInputSize = inband_inputCnt;
5173
5174 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5175 return kIOReturnIPCError;
5176 }
5177 if (ool_output) {
5178 if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5179 return kIOReturnIPCError;
5180 }
5181 if (*ool_output_size > UINT_MAX) {
5182 return kIOReturnIPCError;
5183 }
5184 }
5185
5186 if (ool_input) {
5187 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5188 kIODirectionOut | kIOMemoryMapCopyOnWrite,
5189 current_task());
5190 }
5191
5192 args.structureInputDescriptor = inputMD;
5193
5194 args.scalarOutput = scalar_output;
5195 args.scalarOutputCount = *scalar_outputCnt;
5196 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5197 args.structureOutput = inband_output;
5198 args.structureOutputSize = *inband_outputCnt;
5199
5200 if (ool_output && ool_output_size) {
5201 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5202 kIODirectionIn, current_task());
5203 }
5204
5205 args.structureOutputDescriptor = outputMD;
5206 args.structureOutputDescriptorSize = ool_output_size
5207 ? ((typeof(args.structureOutputDescriptorSize)) * ool_output_size)
5208 : 0;
5209
5210 IOStatisticsClientCall();
5211 ret = kIOReturnSuccess;
5212 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5213 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5214 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_method, selector);
5215 }
5216 if (kIOReturnSuccess == ret) {
5217 ret = client->callExternalMethod( selector, &args );
5218 }
5219
5220 *scalar_outputCnt = args.scalarOutputCount;
5221 *inband_outputCnt = args.structureOutputSize;
5222 *ool_output_size = args.structureOutputDescriptorSize;
5223
5224 if (inputMD) {
5225 inputMD->release();
5226 }
5227 if (outputMD) {
5228 outputMD->release();
5229 }
5230
5231 return ret;
5232 }
5233
5234 /* Routine io_async_user_client_method */
5235 kern_return_t
is_io_connect_async_method(io_connect_t connection,mach_port_t wake_port,io_async_ref64_t reference,mach_msg_type_number_t referenceCnt,uint32_t selector,io_scalar_inband64_t scalar_input,mach_msg_type_number_t scalar_inputCnt,io_struct_inband_t inband_input,mach_msg_type_number_t inband_inputCnt,mach_vm_address_t ool_input,mach_vm_size_t ool_input_size,io_struct_inband_t inband_output,mach_msg_type_number_t * inband_outputCnt,io_scalar_inband64_t scalar_output,mach_msg_type_number_t * scalar_outputCnt,mach_vm_address_t ool_output,mach_vm_size_t * ool_output_size)5236 is_io_connect_async_method
5237 (
5238 io_connect_t connection,
5239 mach_port_t wake_port,
5240 io_async_ref64_t reference,
5241 mach_msg_type_number_t referenceCnt,
5242 uint32_t selector,
5243 io_scalar_inband64_t scalar_input,
5244 mach_msg_type_number_t scalar_inputCnt,
5245 io_struct_inband_t inband_input,
5246 mach_msg_type_number_t inband_inputCnt,
5247 mach_vm_address_t ool_input,
5248 mach_vm_size_t ool_input_size,
5249 io_struct_inband_t inband_output,
5250 mach_msg_type_number_t *inband_outputCnt,
5251 io_scalar_inband64_t scalar_output,
5252 mach_msg_type_number_t *scalar_outputCnt,
5253 mach_vm_address_t ool_output,
5254 mach_vm_size_t * ool_output_size
5255 )
5256 {
5257 CHECK( IOUserClient, connection, client );
5258
5259 IOExternalMethodArguments args;
5260 IOReturn ret;
5261 IOMemoryDescriptor * inputMD = NULL;
5262 IOMemoryDescriptor * outputMD = NULL;
5263
5264 if (referenceCnt < 1) {
5265 return kIOReturnBadArgument;
5266 }
5267
5268 bzero(&args.__reserved[0], sizeof(args.__reserved));
5269 args.__reservedA = 0;
5270 args.version = kIOExternalMethodArgumentsCurrentVersion;
5271
5272 reference[0] = (io_user_reference_t) wake_port;
5273 if (vm_map_is_64bit(get_task_map(current_task()))) {
5274 reference[0] |= kIOUCAsync64Flag;
5275 }
5276
5277 args.selector = selector;
5278
5279 args.asyncWakePort = wake_port;
5280 args.asyncReference = reference;
5281 args.asyncReferenceCount = referenceCnt;
5282
5283 args.structureVariableOutputData = NULL;
5284
5285 args.scalarInput = scalar_input;
5286 args.scalarInputCount = scalar_inputCnt;
5287 args.structureInput = inband_input;
5288 args.structureInputSize = inband_inputCnt;
5289
5290 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
5291 return kIOReturnIPCError;
5292 }
5293 if (ool_output) {
5294 if (*ool_output_size <= sizeof(io_struct_inband_t)) {
5295 return kIOReturnIPCError;
5296 }
5297 if (*ool_output_size > UINT_MAX) {
5298 return kIOReturnIPCError;
5299 }
5300 }
5301
5302 if (ool_input) {
5303 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
5304 kIODirectionOut | kIOMemoryMapCopyOnWrite,
5305 current_task());
5306 }
5307
5308 args.structureInputDescriptor = inputMD;
5309
5310 args.scalarOutput = scalar_output;
5311 args.scalarOutputCount = *scalar_outputCnt;
5312 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
5313 args.structureOutput = inband_output;
5314 args.structureOutputSize = *inband_outputCnt;
5315
5316 if (ool_output) {
5317 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
5318 kIODirectionIn, current_task());
5319 }
5320
5321 args.structureOutputDescriptor = outputMD;
5322 args.structureOutputDescriptorSize = ((typeof(args.structureOutputDescriptorSize)) * ool_output_size);
5323
5324 IOStatisticsClientCall();
5325 ret = kIOReturnSuccess;
5326 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5327 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5328 ret = gIOUCFilterCallbacks->io_filter_applier(client, filterPolicy, io_filter_type_external_async_method, selector);
5329 }
5330 if (kIOReturnSuccess == ret) {
5331 ret = client->callExternalMethod( selector, &args );
5332 }
5333
5334 *scalar_outputCnt = args.scalarOutputCount;
5335 *inband_outputCnt = args.structureOutputSize;
5336 *ool_output_size = args.structureOutputDescriptorSize;
5337
5338 if (inputMD) {
5339 inputMD->release();
5340 }
5341 if (outputMD) {
5342 outputMD->release();
5343 }
5344
5345 return ret;
5346 }
5347
5348 /* Routine io_connect_method_scalarI_scalarO */
5349 kern_return_t
is_io_connect_method_scalarI_scalarO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5350 is_io_connect_method_scalarI_scalarO(
5351 io_object_t connect,
5352 uint32_t index,
5353 io_scalar_inband_t input,
5354 mach_msg_type_number_t inputCount,
5355 io_scalar_inband_t output,
5356 mach_msg_type_number_t * outputCount )
5357 {
5358 IOReturn err;
5359 uint32_t i;
5360 io_scalar_inband64_t _input;
5361 io_scalar_inband64_t _output;
5362
5363 mach_msg_type_number_t struct_outputCnt = 0;
5364 mach_vm_size_t ool_output_size = 0;
5365
5366 bzero(&_output[0], sizeof(_output));
5367 for (i = 0; i < inputCount; i++) {
5368 _input[i] = SCALAR64(input[i]);
5369 }
5370
5371 err = is_io_connect_method(connect, index,
5372 _input, inputCount,
5373 NULL, 0,
5374 0, 0,
5375 NULL, &struct_outputCnt,
5376 _output, outputCount,
5377 0, &ool_output_size);
5378
5379 for (i = 0; i < *outputCount; i++) {
5380 output[i] = SCALAR32(_output[i]);
5381 }
5382
5383 return err;
5384 }
5385
5386 kern_return_t
shim_io_connect_method_scalarI_scalarO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5387 shim_io_connect_method_scalarI_scalarO(
5388 IOExternalMethod * method,
5389 IOService * object,
5390 const io_user_scalar_t * input,
5391 mach_msg_type_number_t inputCount,
5392 io_user_scalar_t * output,
5393 mach_msg_type_number_t * outputCount )
5394 {
5395 IOMethod func;
5396 io_scalar_inband_t _output;
5397 IOReturn err;
5398 err = kIOReturnBadArgument;
5399
5400 bzero(&_output[0], sizeof(_output));
5401 do {
5402 if (inputCount != method->count0) {
5403 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5404 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5405 continue;
5406 }
5407 if (*outputCount != method->count1) {
5408 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5409 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5410 continue;
5411 }
5412
5413 func = method->func;
5414
5415 switch (inputCount) {
5416 case 6:
5417 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5418 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5419 break;
5420 case 5:
5421 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5422 ARG32(input[3]), ARG32(input[4]),
5423 &_output[0] );
5424 break;
5425 case 4:
5426 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5427 ARG32(input[3]),
5428 &_output[0], &_output[1] );
5429 break;
5430 case 3:
5431 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5432 &_output[0], &_output[1], &_output[2] );
5433 break;
5434 case 2:
5435 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5436 &_output[0], &_output[1], &_output[2],
5437 &_output[3] );
5438 break;
5439 case 1:
5440 err = (object->*func)( ARG32(input[0]),
5441 &_output[0], &_output[1], &_output[2],
5442 &_output[3], &_output[4] );
5443 break;
5444 case 0:
5445 err = (object->*func)( &_output[0], &_output[1], &_output[2],
5446 &_output[3], &_output[4], &_output[5] );
5447 break;
5448
5449 default:
5450 IOLog("%s: Bad method table\n", object->getName());
5451 }
5452 }while (false);
5453
5454 uint32_t i;
5455 for (i = 0; i < *outputCount; i++) {
5456 output[i] = SCALAR32(_output[i]);
5457 }
5458
5459 return err;
5460 }
5461
5462 /* Routine io_async_method_scalarI_scalarO */
5463 kern_return_t
is_io_async_method_scalarI_scalarO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_scalar_inband_t output,mach_msg_type_number_t * outputCount)5464 is_io_async_method_scalarI_scalarO(
5465 io_object_t connect,
5466 mach_port_t wake_port,
5467 io_async_ref_t reference,
5468 mach_msg_type_number_t referenceCnt,
5469 uint32_t index,
5470 io_scalar_inband_t input,
5471 mach_msg_type_number_t inputCount,
5472 io_scalar_inband_t output,
5473 mach_msg_type_number_t * outputCount )
5474 {
5475 IOReturn err;
5476 uint32_t i;
5477 io_scalar_inband64_t _input;
5478 io_scalar_inband64_t _output;
5479 io_async_ref64_t _reference;
5480
5481 if (referenceCnt > ASYNC_REF64_COUNT) {
5482 return kIOReturnBadArgument;
5483 }
5484 bzero(&_output[0], sizeof(_output));
5485 for (i = 0; i < referenceCnt; i++) {
5486 _reference[i] = REF64(reference[i]);
5487 }
5488 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5489
5490 mach_msg_type_number_t struct_outputCnt = 0;
5491 mach_vm_size_t ool_output_size = 0;
5492
5493 for (i = 0; i < inputCount; i++) {
5494 _input[i] = SCALAR64(input[i]);
5495 }
5496
5497 err = is_io_connect_async_method(connect,
5498 wake_port, _reference, referenceCnt,
5499 index,
5500 _input, inputCount,
5501 NULL, 0,
5502 0, 0,
5503 NULL, &struct_outputCnt,
5504 _output, outputCount,
5505 0, &ool_output_size);
5506
5507 for (i = 0; i < *outputCount; i++) {
5508 output[i] = SCALAR32(_output[i]);
5509 }
5510
5511 return err;
5512 }
5513 /* Routine io_async_method_scalarI_structureO */
5514 kern_return_t
is_io_async_method_scalarI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5515 is_io_async_method_scalarI_structureO(
5516 io_object_t connect,
5517 mach_port_t wake_port,
5518 io_async_ref_t reference,
5519 mach_msg_type_number_t referenceCnt,
5520 uint32_t index,
5521 io_scalar_inband_t input,
5522 mach_msg_type_number_t inputCount,
5523 io_struct_inband_t output,
5524 mach_msg_type_number_t * outputCount )
5525 {
5526 uint32_t i;
5527 io_scalar_inband64_t _input;
5528 io_async_ref64_t _reference;
5529
5530 if (referenceCnt > ASYNC_REF64_COUNT) {
5531 return kIOReturnBadArgument;
5532 }
5533 for (i = 0; i < referenceCnt; i++) {
5534 _reference[i] = REF64(reference[i]);
5535 }
5536 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5537
5538 mach_msg_type_number_t scalar_outputCnt = 0;
5539 mach_vm_size_t ool_output_size = 0;
5540
5541 for (i = 0; i < inputCount; i++) {
5542 _input[i] = SCALAR64(input[i]);
5543 }
5544
5545 return is_io_connect_async_method(connect,
5546 wake_port, _reference, referenceCnt,
5547 index,
5548 _input, inputCount,
5549 NULL, 0,
5550 0, 0,
5551 output, outputCount,
5552 NULL, &scalar_outputCnt,
5553 0, &ool_output_size);
5554 }
5555
5556 /* Routine io_async_method_scalarI_structureI */
5557 kern_return_t
is_io_async_method_scalarI_structureI(io_connect_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5558 is_io_async_method_scalarI_structureI(
5559 io_connect_t connect,
5560 mach_port_t wake_port,
5561 io_async_ref_t reference,
5562 mach_msg_type_number_t referenceCnt,
5563 uint32_t index,
5564 io_scalar_inband_t input,
5565 mach_msg_type_number_t inputCount,
5566 io_struct_inband_t inputStruct,
5567 mach_msg_type_number_t inputStructCount )
5568 {
5569 uint32_t i;
5570 io_scalar_inband64_t _input;
5571 io_async_ref64_t _reference;
5572
5573 if (referenceCnt > ASYNC_REF64_COUNT) {
5574 return kIOReturnBadArgument;
5575 }
5576 for (i = 0; i < referenceCnt; i++) {
5577 _reference[i] = REF64(reference[i]);
5578 }
5579 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5580
5581 mach_msg_type_number_t scalar_outputCnt = 0;
5582 mach_msg_type_number_t inband_outputCnt = 0;
5583 mach_vm_size_t ool_output_size = 0;
5584
5585 for (i = 0; i < inputCount; i++) {
5586 _input[i] = SCALAR64(input[i]);
5587 }
5588
5589 return is_io_connect_async_method(connect,
5590 wake_port, _reference, referenceCnt,
5591 index,
5592 _input, inputCount,
5593 inputStruct, inputStructCount,
5594 0, 0,
5595 NULL, &inband_outputCnt,
5596 NULL, &scalar_outputCnt,
5597 0, &ool_output_size);
5598 }
5599
5600 /* Routine io_async_method_structureI_structureO */
5601 kern_return_t
is_io_async_method_structureI_structureO(io_object_t connect,mach_port_t wake_port,io_async_ref_t reference,mach_msg_type_number_t referenceCnt,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5602 is_io_async_method_structureI_structureO(
5603 io_object_t connect,
5604 mach_port_t wake_port,
5605 io_async_ref_t reference,
5606 mach_msg_type_number_t referenceCnt,
5607 uint32_t index,
5608 io_struct_inband_t input,
5609 mach_msg_type_number_t inputCount,
5610 io_struct_inband_t output,
5611 mach_msg_type_number_t * outputCount )
5612 {
5613 uint32_t i;
5614 mach_msg_type_number_t scalar_outputCnt = 0;
5615 mach_vm_size_t ool_output_size = 0;
5616 io_async_ref64_t _reference;
5617
5618 if (referenceCnt > ASYNC_REF64_COUNT) {
5619 return kIOReturnBadArgument;
5620 }
5621 for (i = 0; i < referenceCnt; i++) {
5622 _reference[i] = REF64(reference[i]);
5623 }
5624 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5625
5626 return is_io_connect_async_method(connect,
5627 wake_port, _reference, referenceCnt,
5628 index,
5629 NULL, 0,
5630 input, inputCount,
5631 0, 0,
5632 output, outputCount,
5633 NULL, &scalar_outputCnt,
5634 0, &ool_output_size);
5635 }
5636
5637
5638 kern_return_t
shim_io_async_method_scalarI_scalarO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_user_scalar_t * output,mach_msg_type_number_t * outputCount)5639 shim_io_async_method_scalarI_scalarO(
5640 IOExternalAsyncMethod * method,
5641 IOService * object,
5642 mach_port_t asyncWakePort,
5643 io_user_reference_t * asyncReference,
5644 uint32_t asyncReferenceCount,
5645 const io_user_scalar_t * input,
5646 mach_msg_type_number_t inputCount,
5647 io_user_scalar_t * output,
5648 mach_msg_type_number_t * outputCount )
5649 {
5650 IOAsyncMethod func;
5651 uint32_t i;
5652 io_scalar_inband_t _output;
5653 IOReturn err;
5654 io_async_ref_t reference;
5655
5656 bzero(&_output[0], sizeof(_output));
5657 for (i = 0; i < asyncReferenceCount; i++) {
5658 reference[i] = REF32(asyncReference[i]);
5659 }
5660
5661 err = kIOReturnBadArgument;
5662
5663 do {
5664 if (inputCount != method->count0) {
5665 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5666 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5667 continue;
5668 }
5669 if (*outputCount != method->count1) {
5670 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5671 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5672 continue;
5673 }
5674
5675 func = method->func;
5676
5677 switch (inputCount) {
5678 case 6:
5679 err = (object->*func)( reference,
5680 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5681 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5682 break;
5683 case 5:
5684 err = (object->*func)( reference,
5685 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5686 ARG32(input[3]), ARG32(input[4]),
5687 &_output[0] );
5688 break;
5689 case 4:
5690 err = (object->*func)( reference,
5691 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5692 ARG32(input[3]),
5693 &_output[0], &_output[1] );
5694 break;
5695 case 3:
5696 err = (object->*func)( reference,
5697 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5698 &_output[0], &_output[1], &_output[2] );
5699 break;
5700 case 2:
5701 err = (object->*func)( reference,
5702 ARG32(input[0]), ARG32(input[1]),
5703 &_output[0], &_output[1], &_output[2],
5704 &_output[3] );
5705 break;
5706 case 1:
5707 err = (object->*func)( reference,
5708 ARG32(input[0]),
5709 &_output[0], &_output[1], &_output[2],
5710 &_output[3], &_output[4] );
5711 break;
5712 case 0:
5713 err = (object->*func)( reference,
5714 &_output[0], &_output[1], &_output[2],
5715 &_output[3], &_output[4], &_output[5] );
5716 break;
5717
5718 default:
5719 IOLog("%s: Bad method table\n", object->getName());
5720 }
5721 }while (false);
5722
5723 for (i = 0; i < *outputCount; i++) {
5724 output[i] = SCALAR32(_output[i]);
5725 }
5726
5727 return err;
5728 }
5729
5730
5731 /* Routine io_connect_method_scalarI_structureO */
5732 kern_return_t
is_io_connect_method_scalarI_structureO(io_object_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5733 is_io_connect_method_scalarI_structureO(
5734 io_object_t connect,
5735 uint32_t index,
5736 io_scalar_inband_t input,
5737 mach_msg_type_number_t inputCount,
5738 io_struct_inband_t output,
5739 mach_msg_type_number_t * outputCount )
5740 {
5741 uint32_t i;
5742 io_scalar_inband64_t _input;
5743
5744 mach_msg_type_number_t scalar_outputCnt = 0;
5745 mach_vm_size_t ool_output_size = 0;
5746
5747 for (i = 0; i < inputCount; i++) {
5748 _input[i] = SCALAR64(input[i]);
5749 }
5750
5751 return is_io_connect_method(connect, index,
5752 _input, inputCount,
5753 NULL, 0,
5754 0, 0,
5755 output, outputCount,
5756 NULL, &scalar_outputCnt,
5757 0, &ool_output_size);
5758 }
5759
5760 kern_return_t
shim_io_connect_method_scalarI_structureO(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)5761 shim_io_connect_method_scalarI_structureO(
5762
5763 IOExternalMethod * method,
5764 IOService * object,
5765 const io_user_scalar_t * input,
5766 mach_msg_type_number_t inputCount,
5767 io_struct_inband_t output,
5768 IOByteCount * outputCount )
5769 {
5770 IOMethod func;
5771 IOReturn err;
5772
5773 err = kIOReturnBadArgument;
5774
5775 do {
5776 if (inputCount != method->count0) {
5777 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5778 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5779 continue;
5780 }
5781 if ((kIOUCVariableStructureSize != method->count1)
5782 && (*outputCount != method->count1)) {
5783 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5784 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5785 continue;
5786 }
5787
5788 func = method->func;
5789
5790 switch (inputCount) {
5791 case 5:
5792 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5793 ARG32(input[3]), ARG32(input[4]),
5794 output );
5795 break;
5796 case 4:
5797 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5798 ARG32(input[3]),
5799 output, (void *)outputCount );
5800 break;
5801 case 3:
5802 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5803 output, (void *)outputCount, NULL );
5804 break;
5805 case 2:
5806 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5807 output, (void *)outputCount, NULL, NULL );
5808 break;
5809 case 1:
5810 err = (object->*func)( ARG32(input[0]),
5811 output, (void *)outputCount, NULL, NULL, NULL );
5812 break;
5813 case 0:
5814 err = (object->*func)( output, (void *)outputCount, NULL, NULL, NULL, NULL );
5815 break;
5816
5817 default:
5818 IOLog("%s: Bad method table\n", object->getName());
5819 }
5820 }while (false);
5821
5822 return err;
5823 }
5824
5825
5826 kern_return_t
shim_io_async_method_scalarI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)5827 shim_io_async_method_scalarI_structureO(
5828 IOExternalAsyncMethod * method,
5829 IOService * object,
5830 mach_port_t asyncWakePort,
5831 io_user_reference_t * asyncReference,
5832 uint32_t asyncReferenceCount,
5833 const io_user_scalar_t * input,
5834 mach_msg_type_number_t inputCount,
5835 io_struct_inband_t output,
5836 mach_msg_type_number_t * outputCount )
5837 {
5838 IOAsyncMethod func;
5839 uint32_t i;
5840 IOReturn err;
5841 io_async_ref_t reference;
5842
5843 for (i = 0; i < asyncReferenceCount; i++) {
5844 reference[i] = REF32(asyncReference[i]);
5845 }
5846
5847 err = kIOReturnBadArgument;
5848 do {
5849 if (inputCount != method->count0) {
5850 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5851 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5852 continue;
5853 }
5854 if ((kIOUCVariableStructureSize != method->count1)
5855 && (*outputCount != method->count1)) {
5856 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5857 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5858 continue;
5859 }
5860
5861 func = method->func;
5862
5863 switch (inputCount) {
5864 case 5:
5865 err = (object->*func)( reference,
5866 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5867 ARG32(input[3]), ARG32(input[4]),
5868 output );
5869 break;
5870 case 4:
5871 err = (object->*func)( reference,
5872 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5873 ARG32(input[3]),
5874 output, (void *)outputCount );
5875 break;
5876 case 3:
5877 err = (object->*func)( reference,
5878 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5879 output, (void *)outputCount, NULL );
5880 break;
5881 case 2:
5882 err = (object->*func)( reference,
5883 ARG32(input[0]), ARG32(input[1]),
5884 output, (void *)outputCount, NULL, NULL );
5885 break;
5886 case 1:
5887 err = (object->*func)( reference,
5888 ARG32(input[0]),
5889 output, (void *)outputCount, NULL, NULL, NULL );
5890 break;
5891 case 0:
5892 err = (object->*func)( reference,
5893 output, (void *)outputCount, NULL, NULL, NULL, NULL );
5894 break;
5895
5896 default:
5897 IOLog("%s: Bad method table\n", object->getName());
5898 }
5899 }while (false);
5900
5901 return err;
5902 }
5903
5904 /* Routine io_connect_method_scalarI_structureI */
5905 kern_return_t
is_io_connect_method_scalarI_structureI(io_connect_t connect,uint32_t index,io_scalar_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5906 is_io_connect_method_scalarI_structureI(
5907 io_connect_t connect,
5908 uint32_t index,
5909 io_scalar_inband_t input,
5910 mach_msg_type_number_t inputCount,
5911 io_struct_inband_t inputStruct,
5912 mach_msg_type_number_t inputStructCount )
5913 {
5914 uint32_t i;
5915 io_scalar_inband64_t _input;
5916
5917 mach_msg_type_number_t scalar_outputCnt = 0;
5918 mach_msg_type_number_t inband_outputCnt = 0;
5919 mach_vm_size_t ool_output_size = 0;
5920
5921 for (i = 0; i < inputCount; i++) {
5922 _input[i] = SCALAR64(input[i]);
5923 }
5924
5925 return is_io_connect_method(connect, index,
5926 _input, inputCount,
5927 inputStruct, inputStructCount,
5928 0, 0,
5929 NULL, &inband_outputCnt,
5930 NULL, &scalar_outputCnt,
5931 0, &ool_output_size);
5932 }
5933
5934 kern_return_t
shim_io_connect_method_scalarI_structureI(IOExternalMethod * method,IOService * object,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)5935 shim_io_connect_method_scalarI_structureI(
5936 IOExternalMethod * method,
5937 IOService * object,
5938 const io_user_scalar_t * input,
5939 mach_msg_type_number_t inputCount,
5940 io_struct_inband_t inputStruct,
5941 mach_msg_type_number_t inputStructCount )
5942 {
5943 IOMethod func;
5944 IOReturn err = kIOReturnBadArgument;
5945
5946 do{
5947 if (inputCount != method->count0) {
5948 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5949 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5950 continue;
5951 }
5952 if ((kIOUCVariableStructureSize != method->count1)
5953 && (inputStructCount != method->count1)) {
5954 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5955 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5956 continue;
5957 }
5958
5959 func = method->func;
5960
5961 switch (inputCount) {
5962 case 5:
5963 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5964 ARG32(input[3]), ARG32(input[4]),
5965 inputStruct );
5966 break;
5967 case 4:
5968 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
5969 ARG32(input[3]),
5970 inputStruct, (void *)(uintptr_t)inputStructCount );
5971 break;
5972 case 3:
5973 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5974 inputStruct, (void *)(uintptr_t)inputStructCount,
5975 NULL );
5976 break;
5977 case 2:
5978 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5979 inputStruct, (void *)(uintptr_t)inputStructCount,
5980 NULL, NULL );
5981 break;
5982 case 1:
5983 err = (object->*func)( ARG32(input[0]),
5984 inputStruct, (void *)(uintptr_t)inputStructCount,
5985 NULL, NULL, NULL );
5986 break;
5987 case 0:
5988 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5989 NULL, NULL, NULL, NULL );
5990 break;
5991
5992 default:
5993 IOLog("%s: Bad method table\n", object->getName());
5994 }
5995 }while (false);
5996
5997 return err;
5998 }
5999
6000 kern_return_t
shim_io_async_method_scalarI_structureI(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,const io_user_scalar_t * input,mach_msg_type_number_t inputCount,io_struct_inband_t inputStruct,mach_msg_type_number_t inputStructCount)6001 shim_io_async_method_scalarI_structureI(
6002 IOExternalAsyncMethod * method,
6003 IOService * object,
6004 mach_port_t asyncWakePort,
6005 io_user_reference_t * asyncReference,
6006 uint32_t asyncReferenceCount,
6007 const io_user_scalar_t * input,
6008 mach_msg_type_number_t inputCount,
6009 io_struct_inband_t inputStruct,
6010 mach_msg_type_number_t inputStructCount )
6011 {
6012 IOAsyncMethod func;
6013 uint32_t i;
6014 IOReturn err = kIOReturnBadArgument;
6015 io_async_ref_t reference;
6016
6017 for (i = 0; i < asyncReferenceCount; i++) {
6018 reference[i] = REF32(asyncReference[i]);
6019 }
6020
6021 do{
6022 if (inputCount != method->count0) {
6023 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
6024 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
6025 continue;
6026 }
6027 if ((kIOUCVariableStructureSize != method->count1)
6028 && (inputStructCount != method->count1)) {
6029 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
6030 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
6031 continue;
6032 }
6033
6034 func = method->func;
6035
6036 switch (inputCount) {
6037 case 5:
6038 err = (object->*func)( reference,
6039 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
6040 ARG32(input[3]), ARG32(input[4]),
6041 inputStruct );
6042 break;
6043 case 4:
6044 err = (object->*func)( reference,
6045 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
6046 ARG32(input[3]),
6047 inputStruct, (void *)(uintptr_t)inputStructCount );
6048 break;
6049 case 3:
6050 err = (object->*func)( reference,
6051 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
6052 inputStruct, (void *)(uintptr_t)inputStructCount,
6053 NULL );
6054 break;
6055 case 2:
6056 err = (object->*func)( reference,
6057 ARG32(input[0]), ARG32(input[1]),
6058 inputStruct, (void *)(uintptr_t)inputStructCount,
6059 NULL, NULL );
6060 break;
6061 case 1:
6062 err = (object->*func)( reference,
6063 ARG32(input[0]),
6064 inputStruct, (void *)(uintptr_t)inputStructCount,
6065 NULL, NULL, NULL );
6066 break;
6067 case 0:
6068 err = (object->*func)( reference,
6069 inputStruct, (void *)(uintptr_t)inputStructCount,
6070 NULL, NULL, NULL, NULL );
6071 break;
6072
6073 default:
6074 IOLog("%s: Bad method table\n", object->getName());
6075 }
6076 }while (false);
6077
6078 return err;
6079 }
6080
6081 /* Routine io_connect_method_structureI_structureO */
6082 kern_return_t
is_io_connect_method_structureI_structureO(io_object_t connect,uint32_t index,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)6083 is_io_connect_method_structureI_structureO(
6084 io_object_t connect,
6085 uint32_t index,
6086 io_struct_inband_t input,
6087 mach_msg_type_number_t inputCount,
6088 io_struct_inband_t output,
6089 mach_msg_type_number_t * outputCount )
6090 {
6091 mach_msg_type_number_t scalar_outputCnt = 0;
6092 mach_vm_size_t ool_output_size = 0;
6093
6094 return is_io_connect_method(connect, index,
6095 NULL, 0,
6096 input, inputCount,
6097 0, 0,
6098 output, outputCount,
6099 NULL, &scalar_outputCnt,
6100 0, &ool_output_size);
6101 }
6102
6103 kern_return_t
shim_io_connect_method_structureI_structureO(IOExternalMethod * method,IOService * object,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,IOByteCount * outputCount)6104 shim_io_connect_method_structureI_structureO(
6105 IOExternalMethod * method,
6106 IOService * object,
6107 io_struct_inband_t input,
6108 mach_msg_type_number_t inputCount,
6109 io_struct_inband_t output,
6110 IOByteCount * outputCount )
6111 {
6112 IOMethod func;
6113 IOReturn err = kIOReturnBadArgument;
6114
6115 do{
6116 if ((kIOUCVariableStructureSize != method->count0)
6117 && (inputCount != method->count0)) {
6118 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
6119 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
6120 continue;
6121 }
6122 if ((kIOUCVariableStructureSize != method->count1)
6123 && (*outputCount != method->count1)) {
6124 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
6125 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
6126 continue;
6127 }
6128
6129 func = method->func;
6130
6131 if (method->count1) {
6132 if (method->count0) {
6133 err = (object->*func)( input, output,
6134 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
6135 } else {
6136 err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
6137 }
6138 } else {
6139 err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
6140 }
6141 }while (false);
6142
6143
6144 return err;
6145 }
6146
6147 kern_return_t
shim_io_async_method_structureI_structureO(IOExternalAsyncMethod * method,IOService * object,mach_port_t asyncWakePort,io_user_reference_t * asyncReference,uint32_t asyncReferenceCount,io_struct_inband_t input,mach_msg_type_number_t inputCount,io_struct_inband_t output,mach_msg_type_number_t * outputCount)6148 shim_io_async_method_structureI_structureO(
6149 IOExternalAsyncMethod * method,
6150 IOService * object,
6151 mach_port_t asyncWakePort,
6152 io_user_reference_t * asyncReference,
6153 uint32_t asyncReferenceCount,
6154 io_struct_inband_t input,
6155 mach_msg_type_number_t inputCount,
6156 io_struct_inband_t output,
6157 mach_msg_type_number_t * outputCount )
6158 {
6159 IOAsyncMethod func;
6160 uint32_t i;
6161 IOReturn err;
6162 io_async_ref_t reference;
6163
6164 for (i = 0; i < asyncReferenceCount; i++) {
6165 reference[i] = REF32(asyncReference[i]);
6166 }
6167
6168 err = kIOReturnBadArgument;
6169 do{
6170 if ((kIOUCVariableStructureSize != method->count0)
6171 && (inputCount != method->count0)) {
6172 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
6173 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
6174 continue;
6175 }
6176 if ((kIOUCVariableStructureSize != method->count1)
6177 && (*outputCount != method->count1)) {
6178 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
6179 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
6180 continue;
6181 }
6182
6183 func = method->func;
6184
6185 if (method->count1) {
6186 if (method->count0) {
6187 err = (object->*func)( reference,
6188 input, output,
6189 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
6190 } else {
6191 err = (object->*func)( reference,
6192 output, outputCount, NULL, NULL, NULL, NULL );
6193 }
6194 } else {
6195 err = (object->*func)( reference,
6196 input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
6197 }
6198 }while (false);
6199
6200 return err;
6201 }
6202
6203 /* Routine io_catalog_send_data */
6204 kern_return_t
is_io_catalog_send_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t inData,mach_msg_type_number_t inDataCount,kern_return_t * result)6205 is_io_catalog_send_data(
6206 mach_port_t main_port,
6207 uint32_t flag,
6208 io_buf_ptr_t inData,
6209 mach_msg_type_number_t inDataCount,
6210 kern_return_t * result)
6211 {
6212 // Allow sending catalog data if there is no kextd and the kernel is DEVELOPMENT || DEBUG
6213 #if NO_KEXTD && !(DEVELOPMENT || DEBUG)
6214 return kIOReturnNotPrivileged;
6215 #else /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6216 OSObject * obj = NULL;
6217 vm_offset_t data;
6218 kern_return_t kr = kIOReturnError;
6219
6220 //printf("io_catalog_send_data called. flag: %d\n", flag);
6221
6222 if (main_port != main_device_port) {
6223 return kIOReturnNotPrivileged;
6224 }
6225
6226 if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
6227 flag != kIOCatalogKextdActive &&
6228 flag != kIOCatalogKextdFinishedLaunching) &&
6229 (!inData || !inDataCount)) {
6230 return kIOReturnBadArgument;
6231 }
6232
6233 if (!IOCurrentTaskHasEntitlement(kIOCatalogManagementEntitlement)) {
6234 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
6235 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
6236 OSSafeReleaseNULL(taskName);
6237 // For now, fake success to not break applications relying on this function succeeding.
6238 // See <rdar://problem/32554970> for more details.
6239 return kIOReturnSuccess;
6240 }
6241
6242 if (inData) {
6243 vm_map_offset_t map_data;
6244
6245 if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
6246 return kIOReturnMessageTooLarge;
6247 }
6248
6249 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
6250 data = CAST_DOWN(vm_offset_t, map_data);
6251
6252 if (kr != KERN_SUCCESS) {
6253 return kr;
6254 }
6255
6256 // must return success after vm_map_copyout() succeeds
6257
6258 if (inDataCount) {
6259 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
6260 vm_deallocate( kernel_map, data, inDataCount );
6261 if (!obj) {
6262 *result = kIOReturnNoMemory;
6263 return KERN_SUCCESS;
6264 }
6265 }
6266 }
6267
6268 switch (flag) {
6269 case kIOCatalogResetDrivers:
6270 case kIOCatalogResetDriversNoMatch: {
6271 OSArray * array;
6272
6273 array = OSDynamicCast(OSArray, obj);
6274 if (array) {
6275 if (!gIOCatalogue->resetAndAddDrivers(array,
6276 flag == kIOCatalogResetDrivers)) {
6277 kr = kIOReturnError;
6278 }
6279 } else {
6280 kr = kIOReturnBadArgument;
6281 }
6282 }
6283 break;
6284
6285 case kIOCatalogAddDrivers:
6286 case kIOCatalogAddDriversNoMatch: {
6287 OSArray * array;
6288
6289 array = OSDynamicCast(OSArray, obj);
6290 if (array) {
6291 if (!gIOCatalogue->addDrivers( array,
6292 flag == kIOCatalogAddDrivers)) {
6293 kr = kIOReturnError;
6294 }
6295 } else {
6296 kr = kIOReturnBadArgument;
6297 }
6298 }
6299 break;
6300
6301 case kIOCatalogRemoveDrivers:
6302 case kIOCatalogRemoveDriversNoMatch: {
6303 OSDictionary * dict;
6304
6305 dict = OSDynamicCast(OSDictionary, obj);
6306 if (dict) {
6307 if (!gIOCatalogue->removeDrivers( dict,
6308 flag == kIOCatalogRemoveDrivers )) {
6309 kr = kIOReturnError;
6310 }
6311 } else {
6312 kr = kIOReturnBadArgument;
6313 }
6314 }
6315 break;
6316
6317 case kIOCatalogStartMatching__Removed:
6318 case kIOCatalogRemoveKernelLinker__Removed:
6319 case kIOCatalogKextdActive:
6320 case kIOCatalogKextdFinishedLaunching:
6321 kr = KERN_NOT_SUPPORTED;
6322 break;
6323
6324 default:
6325 kr = kIOReturnBadArgument;
6326 break;
6327 }
6328
6329 if (obj) {
6330 obj->release();
6331 }
6332
6333 *result = kr;
6334 return KERN_SUCCESS;
6335 #endif /* NO_KEXTD && !(DEVELOPMENT || DEBUG) */
6336 }
6337
6338 /* Routine io_catalog_terminate */
6339 kern_return_t
is_io_catalog_terminate(mach_port_t main_port,uint32_t flag,io_name_t name)6340 is_io_catalog_terminate(
6341 mach_port_t main_port,
6342 uint32_t flag,
6343 io_name_t name )
6344 {
6345 kern_return_t kr;
6346
6347 if (main_port != main_device_port) {
6348 return kIOReturnNotPrivileged;
6349 }
6350
6351 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
6352 kIOClientPrivilegeAdministrator );
6353 if (kIOReturnSuccess != kr) {
6354 return kr;
6355 }
6356
6357 switch (flag) {
6358 #if !defined(SECURE_KERNEL)
6359 case kIOCatalogServiceTerminate:
6360 kr = gIOCatalogue->terminateDrivers(NULL, name, false);
6361 break;
6362
6363 case kIOCatalogModuleUnload:
6364 case kIOCatalogModuleTerminate:
6365 kr = gIOCatalogue->terminateDriversForModule(name,
6366 flag == kIOCatalogModuleUnload);
6367 break;
6368 #endif
6369
6370 default:
6371 kr = kIOReturnBadArgument;
6372 break;
6373 }
6374
6375 return kr;
6376 }
6377
6378 /* Routine io_catalog_get_data */
6379 kern_return_t
is_io_catalog_get_data(mach_port_t main_port,uint32_t flag,io_buf_ptr_t * outData,mach_msg_type_number_t * outDataCount)6380 is_io_catalog_get_data(
6381 mach_port_t main_port,
6382 uint32_t flag,
6383 io_buf_ptr_t *outData,
6384 mach_msg_type_number_t *outDataCount)
6385 {
6386 kern_return_t kr = kIOReturnSuccess;
6387 OSSerialize * s;
6388
6389 if (main_port != main_device_port) {
6390 return kIOReturnNotPrivileged;
6391 }
6392
6393 //printf("io_catalog_get_data called. flag: %d\n", flag);
6394
6395 s = OSSerialize::withCapacity(4096);
6396 if (!s) {
6397 return kIOReturnNoMemory;
6398 }
6399
6400 kr = gIOCatalogue->serializeData(flag, s);
6401
6402 if (kr == kIOReturnSuccess) {
6403 mach_vm_address_t data;
6404 vm_map_copy_t copy;
6405 unsigned int size;
6406
6407 size = s->getLength();
6408 kr = mach_vm_allocate_kernel(kernel_map, &data, size,
6409 VM_MAP_KERNEL_FLAGS_ANYWHERE(.vm_tag = VM_KERN_MEMORY_IOKIT));
6410 if (kr == kIOReturnSuccess) {
6411 bcopy(s->text(), (void *)data, size);
6412 kr = vm_map_copyin(kernel_map, data, size, true, ©);
6413 *outData = (char *)copy;
6414 *outDataCount = size;
6415 }
6416 }
6417
6418 s->release();
6419
6420 return kr;
6421 }
6422
6423 /* Routine io_catalog_get_gen_count */
6424 kern_return_t
is_io_catalog_get_gen_count(mach_port_t main_port,uint32_t * genCount)6425 is_io_catalog_get_gen_count(
6426 mach_port_t main_port,
6427 uint32_t *genCount)
6428 {
6429 if (main_port != main_device_port) {
6430 return kIOReturnNotPrivileged;
6431 }
6432
6433 //printf("io_catalog_get_gen_count called.\n");
6434
6435 if (!genCount) {
6436 return kIOReturnBadArgument;
6437 }
6438
6439 *genCount = gIOCatalogue->getGenerationCount();
6440
6441 return kIOReturnSuccess;
6442 }
6443
6444 /* Routine io_catalog_module_loaded.
6445 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
6446 */
6447 kern_return_t
is_io_catalog_module_loaded(mach_port_t main_port,io_name_t name)6448 is_io_catalog_module_loaded(
6449 mach_port_t main_port,
6450 io_name_t name)
6451 {
6452 if (main_port != main_device_port) {
6453 return kIOReturnNotPrivileged;
6454 }
6455
6456 //printf("io_catalog_module_loaded called. name %s\n", name);
6457
6458 if (!name) {
6459 return kIOReturnBadArgument;
6460 }
6461
6462 gIOCatalogue->moduleHasLoaded(name);
6463
6464 return kIOReturnSuccess;
6465 }
6466
6467 kern_return_t
is_io_catalog_reset(mach_port_t main_port,uint32_t flag)6468 is_io_catalog_reset(
6469 mach_port_t main_port,
6470 uint32_t flag)
6471 {
6472 if (main_port != main_device_port) {
6473 return kIOReturnNotPrivileged;
6474 }
6475
6476 switch (flag) {
6477 case kIOCatalogResetDefault:
6478 gIOCatalogue->reset();
6479 break;
6480
6481 default:
6482 return kIOReturnBadArgument;
6483 }
6484
6485 return kIOReturnSuccess;
6486 }
6487
6488 kern_return_t
iokit_user_client_trap(struct iokit_user_client_trap_args * args)6489 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
6490 {
6491 kern_return_t result = kIOReturnBadArgument;
6492 IOUserClient * userClient;
6493 OSObject * object;
6494 uintptr_t ref;
6495 mach_port_name_t portName;
6496
6497 ref = (uintptr_t) args->userClientRef;
6498
6499 if ((ref == MACH_PORT_DEAD) || (ref == (uintptr_t) MACH_PORT_NULL)) {
6500 return kIOReturnBadArgument;
6501 }
6502 // kobject port names always have b0-1 set, so we use these bits as flags to
6503 // iokit_user_client_trap()
6504 // keep this up to date with ipc_entry_name_mask();
6505 portName = (mach_port_name_t) (ref | 3);
6506 if (((1ULL << 32) & ref) || !(1 & ref)) {
6507 object = iokit_lookup_uext_ref_current_task(portName);
6508 if (object) {
6509 result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6510 }
6511 OSSafeReleaseNULL(object);
6512 } else {
6513 io_object_t ref_current_task = iokit_lookup_connect_ref_current_task((mach_port_name_t) ref);
6514 if ((userClient = OSDynamicCast(IOUserClient, ref_current_task))) {
6515 IOExternalTrap *trap = NULL;
6516 IOService *target = NULL;
6517
6518 result = kIOReturnSuccess;
6519 io_filter_policy_t filterPolicy = userClient->filterForTask(current_task(), 0);
6520 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
6521 result = gIOUCFilterCallbacks->io_filter_applier(userClient, filterPolicy, io_filter_type_trap, args->index);
6522 }
6523 if (kIOReturnSuccess == result) {
6524 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
6525 }
6526 if (trap && target) {
6527 IOTrap func;
6528
6529 func = trap->func;
6530
6531 if (func) {
6532 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6533 }
6534 }
6535
6536 iokit_remove_connect_reference(userClient);
6537 } else {
6538 OSSafeReleaseNULL(ref_current_task);
6539 }
6540 }
6541
6542 return result;
6543 }
6544
6545 /* Routine io_device_tree_entry_exists_with_name */
6546 kern_return_t
is_io_device_tree_entry_exists_with_name(mach_port_t main_port,io_name_t name,boolean_t * exists)6547 is_io_device_tree_entry_exists_with_name(
6548 mach_port_t main_port,
6549 io_name_t name,
6550 boolean_t *exists )
6551 {
6552 OSCollectionIterator *iter;
6553 IORegistryEntry *entry;
6554 io_name_t namebuf;
6555 const char *entryname;
6556 const char *propname;
6557
6558 if (main_port != main_device_port) {
6559 return kIOReturnNotPrivileged;
6560 }
6561
6562 if ((propname = strchr(name, ':'))) {
6563 propname++;
6564 strlcpy(namebuf, name, propname - name);
6565 entryname = namebuf;
6566 } else {
6567 entryname = name;
6568 }
6569
6570 iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, entryname);
6571 if (iter && (entry = (IORegistryEntry *) iter->getNextObject())) {
6572 *exists = !propname || entry->propertyExists(propname);
6573 } else {
6574 *exists = FALSE;
6575 }
6576 OSSafeReleaseNULL(iter);
6577
6578 return kIOReturnSuccess;
6579 }
6580 } /* extern "C" */
6581
6582 IOReturn
callExternalMethod(uint32_t selector,IOExternalMethodArguments * args)6583 IOUserClient::callExternalMethod(uint32_t selector, IOExternalMethodArguments * args)
6584 {
6585 IOReturn ret;
6586
6587 ipcEnter(defaultLocking ? (defaultLockingSingleThreadExternalMethod ? kIPCLockWrite : kIPCLockRead) : kIPCLockNone);
6588 if (uc2022) {
6589 ret = ((IOUserClient2022 *) this)->externalMethod(selector, (IOExternalMethodArgumentsOpaque *) args);
6590 } else {
6591 ret = externalMethod(selector, args);
6592 }
6593 ipcExit(defaultLocking ? (defaultLockingSingleThreadExternalMethod ? kIPCLockWrite : kIPCLockRead) : kIPCLockNone);
6594
6595 return ret;
6596 }
6597
6598 MIG_SERVER_ROUTINE IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * arguments,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6599 IOUserClient2022::externalMethod(uint32_t selector, IOExternalMethodArguments * arguments,
6600 IOExternalMethodDispatch *dispatch,
6601 OSObject *target, void *reference)
6602 {
6603 panic("wrong externalMethod for IOUserClient2022");
6604 }
6605
6606 IOReturn
dispatchExternalMethod(uint32_t selector,IOExternalMethodArgumentsOpaque * arguments,const IOExternalMethodDispatch2022 dispatchArray[],size_t dispatchArrayCount,OSObject * target,void * reference)6607 IOUserClient2022::dispatchExternalMethod(uint32_t selector, IOExternalMethodArgumentsOpaque *arguments,
6608 const IOExternalMethodDispatch2022 dispatchArray[], size_t dispatchArrayCount,
6609 OSObject * target, void * reference)
6610 {
6611 IOReturn err;
6612 IOExternalMethodArguments * args = (typeof(args))arguments;
6613 const IOExternalMethodDispatch2022 * dispatch;
6614
6615 if (!dispatchArray) {
6616 return kIOReturnError;
6617 }
6618 if (selector >= dispatchArrayCount) {
6619 return kIOReturnBadArgument;
6620 }
6621 dispatch = &dispatchArray[selector];
6622
6623 uint32_t count;
6624 count = dispatch->checkScalarInputCount;
6625 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6626 return kIOReturnBadArgument;
6627 }
6628
6629 count = dispatch->checkStructureInputSize;
6630 if ((kIOUCVariableStructureSize != count)
6631 && (count != ((args->structureInputDescriptor)
6632 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6633 return kIOReturnBadArgument;
6634 }
6635
6636 count = dispatch->checkScalarOutputCount;
6637 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6638 return kIOReturnBadArgument;
6639 }
6640
6641 count = dispatch->checkStructureOutputSize;
6642 if ((kIOUCVariableStructureSize != count)
6643 && (count != ((args->structureOutputDescriptor)
6644 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6645 return kIOReturnBadArgument;
6646 }
6647
6648 if (args->asyncWakePort && !dispatch->allowAsync) {
6649 return kIOReturnBadArgument;
6650 }
6651
6652 if (dispatch->checkEntitlement) {
6653 if (!IOCurrentTaskHasEntitlement(dispatch->checkEntitlement)) {
6654 return kIOReturnNotPrivileged;
6655 }
6656 }
6657
6658 if (dispatch->function) {
6659 err = (*dispatch->function)(target, reference, args);
6660 } else {
6661 err = kIOReturnNoCompletion; /* implementer can dispatch */
6662 }
6663 return err;
6664 }
6665
6666 IOReturn
externalMethod(uint32_t selector,IOExternalMethodArguments * args,IOExternalMethodDispatch * dispatch,OSObject * target,void * reference)6667 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
6668 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
6669 {
6670 IOReturn err;
6671 IOService * object;
6672 IOByteCount structureOutputSize;
6673
6674 if (dispatch) {
6675 uint32_t count;
6676 count = dispatch->checkScalarInputCount;
6677 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6678 return kIOReturnBadArgument;
6679 }
6680
6681 count = dispatch->checkStructureInputSize;
6682 if ((kIOUCVariableStructureSize != count)
6683 && (count != ((args->structureInputDescriptor)
6684 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6685 return kIOReturnBadArgument;
6686 }
6687
6688 count = dispatch->checkScalarOutputCount;
6689 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6690 return kIOReturnBadArgument;
6691 }
6692
6693 count = dispatch->checkStructureOutputSize;
6694 if ((kIOUCVariableStructureSize != count)
6695 && (count != ((args->structureOutputDescriptor)
6696 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6697 return kIOReturnBadArgument;
6698 }
6699
6700 if (dispatch->function) {
6701 err = (*dispatch->function)(target, reference, args);
6702 } else {
6703 err = kIOReturnNoCompletion; /* implementer can dispatch */
6704 }
6705 return err;
6706 }
6707
6708
6709 // pre-Leopard API's don't do ool structs
6710 if (args->structureInputDescriptor || args->structureOutputDescriptor) {
6711 err = kIOReturnIPCError;
6712 return err;
6713 }
6714
6715 structureOutputSize = args->structureOutputSize;
6716
6717 if (args->asyncWakePort) {
6718 IOExternalAsyncMethod * method;
6719 object = NULL;
6720 if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
6721 return kIOReturnUnsupported;
6722 }
6723
6724 if (kIOUCForegroundOnly & method->flags) {
6725 if (task_is_gpu_denied(current_task())) {
6726 return kIOReturnNotPermitted;
6727 }
6728 }
6729
6730 switch (method->flags & kIOUCTypeMask) {
6731 case kIOUCScalarIStructI:
6732 err = shim_io_async_method_scalarI_structureI( method, object,
6733 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6734 args->scalarInput, args->scalarInputCount,
6735 (char *)args->structureInput, args->structureInputSize );
6736 break;
6737
6738 case kIOUCScalarIScalarO:
6739 err = shim_io_async_method_scalarI_scalarO( method, object,
6740 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6741 args->scalarInput, args->scalarInputCount,
6742 args->scalarOutput, &args->scalarOutputCount );
6743 break;
6744
6745 case kIOUCScalarIStructO:
6746 err = shim_io_async_method_scalarI_structureO( method, object,
6747 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6748 args->scalarInput, args->scalarInputCount,
6749 (char *) args->structureOutput, &args->structureOutputSize );
6750 break;
6751
6752
6753 case kIOUCStructIStructO:
6754 err = shim_io_async_method_structureI_structureO( method, object,
6755 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6756 (char *)args->structureInput, args->structureInputSize,
6757 (char *) args->structureOutput, &args->structureOutputSize );
6758 break;
6759
6760 default:
6761 err = kIOReturnBadArgument;
6762 break;
6763 }
6764 } else {
6765 IOExternalMethod * method;
6766 object = NULL;
6767 if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
6768 return kIOReturnUnsupported;
6769 }
6770
6771 if (kIOUCForegroundOnly & method->flags) {
6772 if (task_is_gpu_denied(current_task())) {
6773 return kIOReturnNotPermitted;
6774 }
6775 }
6776
6777 switch (method->flags & kIOUCTypeMask) {
6778 case kIOUCScalarIStructI:
6779 err = shim_io_connect_method_scalarI_structureI( method, object,
6780 args->scalarInput, args->scalarInputCount,
6781 (char *) args->structureInput, args->structureInputSize );
6782 break;
6783
6784 case kIOUCScalarIScalarO:
6785 err = shim_io_connect_method_scalarI_scalarO( method, object,
6786 args->scalarInput, args->scalarInputCount,
6787 args->scalarOutput, &args->scalarOutputCount );
6788 break;
6789
6790 case kIOUCScalarIStructO:
6791 err = shim_io_connect_method_scalarI_structureO( method, object,
6792 args->scalarInput, args->scalarInputCount,
6793 (char *) args->structureOutput, &structureOutputSize );
6794 break;
6795
6796
6797 case kIOUCStructIStructO:
6798 err = shim_io_connect_method_structureI_structureO( method, object,
6799 (char *) args->structureInput, args->structureInputSize,
6800 (char *) args->structureOutput, &structureOutputSize );
6801 break;
6802
6803 default:
6804 err = kIOReturnBadArgument;
6805 break;
6806 }
6807 }
6808
6809 if (structureOutputSize > UINT_MAX) {
6810 structureOutputSize = 0;
6811 err = kIOReturnBadArgument;
6812 }
6813
6814 args->structureOutputSize = ((typeof(args->structureOutputSize))structureOutputSize);
6815
6816 return err;
6817 }
6818
6819 IOReturn
registerFilterCallbacks(const struct io_filter_callbacks * callbacks,size_t size)6820 IOUserClient::registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size)
6821 {
6822 if (size < sizeof(*callbacks)) {
6823 return kIOReturnBadArgument;
6824 }
6825 if (!OSCompareAndSwapPtr(NULL, __DECONST(void *, callbacks), &gIOUCFilterCallbacks)) {
6826 return kIOReturnBusy;
6827 }
6828 return kIOReturnSuccess;
6829 }
6830
6831
6832 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6833 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6834 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6835 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6836 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6837 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6838 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6839 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6840 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6841 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6842 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6843 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6844 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6845 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6846 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6847 OSMetaClassDefineReservedUnused(IOUserClient, 15);
6848
6849 OSMetaClassDefineReservedUnused(IOUserClient2022, 0);
6850 OSMetaClassDefineReservedUnused(IOUserClient2022, 1);
6851 OSMetaClassDefineReservedUnused(IOUserClient2022, 2);
6852 OSMetaClassDefineReservedUnused(IOUserClient2022, 3);
6853