1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/boolean.h>
29 #include <mach/kern_return.h>
30 #include <mach/mig_errors.h>
31 #include <mach/port.h>
32 #include <mach/vm_param.h>
33 #include <mach/notify.h>
34 //#include <mach/mach_host_server.h>
35 #include <mach/mach_types.h>
36
37 #include <machine/machparam.h> /* spl definitions */
38
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_space.h>
41
42 #include <kern/clock.h>
43 #include <kern/spl.h>
44 #include <kern/queue.h>
45 #include <kern/zalloc.h>
46 #include <kern/thread.h>
47 #include <kern/task.h>
48 #include <kern/sched_prim.h>
49 #include <kern/misc_protos.h>
50
51 #include <vm/pmap.h>
52 #include <vm/vm_map.h>
53 #include <vm/vm_kern.h>
54
55 #include <device/device_types.h>
56 #include <device/device_port.h>
57 #include <device/device_server.h>
58
59 #include <machine/machparam.h>
60
61 #if defined(__i386__) || defined(__x86_64__)
62 #include <i386/pmap.h>
63 #endif
64 #if defined(__arm64__)
65 #include <arm/pmap.h>
66 #endif
67 #include <IOKit/IOKitServer.h>
68
69 #define EXTERN
70 #define MIGEXTERN
71
72 LCK_GRP_DECLARE(dev_lck_grp, "device");
73 LCK_MTX_DECLARE(iokit_obj_to_port_binding_lock, &dev_lck_grp);
74
75 static void
76 iokit_no_senders( ipc_port_t port, mach_port_mscount_t mscount );
77
78 /*
79 * Lifetime:
80 * - non lazy port with no-more senders
81 * - can be destroyed by iokit_destroy_object_port
82 *
83 */
84 IPC_KOBJECT_DEFINE(IKOT_IOKIT_IDENT,
85 .iko_op_no_senders = iokit_no_senders);
86 IPC_KOBJECT_DEFINE(IKOT_IOKIT_OBJECT,
87 .iko_op_no_senders = iokit_no_senders);
88 IPC_KOBJECT_DEFINE(IKOT_IOKIT_CONNECT,
89 .iko_op_no_senders = iokit_no_senders);
90 IPC_KOBJECT_DEFINE(IKOT_UEXT_OBJECT,
91 .iko_op_no_senders = iokit_no_senders);
92
93 /*
94 * Lookup a device by its port.
95 * Doesn't consume the naked send right; produces a device reference.
96 */
97 io_object_t
iokit_lookup_io_object(ipc_port_t port,ipc_kobject_type_t type)98 iokit_lookup_io_object(ipc_port_t port, ipc_kobject_type_t type)
99 {
100 io_object_t obj = NULL;
101
102 if (!IP_VALID(port)) {
103 return NULL;
104 }
105
106 iokit_lock_port(port);
107 /*
108 * iokit uses the iokit_lock_port to serialize all its updates
109 * so we do not need to actually hold the port lock.
110 */
111 obj = ipc_kobject_get_locked(port, type);
112 if (obj) {
113 iokit_add_reference( obj, type );
114 }
115 iokit_unlock_port(port);
116
117 return obj;
118 }
119
120 MIGEXTERN io_object_t
iokit_lookup_object_port(ipc_port_t port)121 iokit_lookup_object_port(
122 ipc_port_t port)
123 {
124 return iokit_lookup_io_object(port, IKOT_IOKIT_OBJECT);
125 }
126
127 MIGEXTERN io_object_t
iokit_lookup_connect_port(ipc_port_t port)128 iokit_lookup_connect_port(
129 ipc_port_t port)
130 {
131 return iokit_lookup_io_object(port, IKOT_IOKIT_CONNECT);
132 }
133
134 MIGEXTERN io_object_t
iokit_lookup_ident_port(ipc_port_t port)135 iokit_lookup_ident_port(
136 ipc_port_t port)
137 {
138 return iokit_lookup_io_object(port, IKOT_IOKIT_IDENT);
139 }
140
141 MIGEXTERN io_object_t
iokit_lookup_uext_object_port(ipc_port_t port)142 iokit_lookup_uext_object_port(
143 ipc_port_t port)
144 {
145 return iokit_lookup_io_object(port, IKOT_UEXT_OBJECT);
146 }
147
148 static io_object_t
iokit_lookup_object_in_space_with_port_name(mach_port_name_t name,ipc_kobject_type_t type,ipc_space_t space)149 iokit_lookup_object_in_space_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, ipc_space_t space)
150 {
151 io_object_t obj = NULL;
152
153 if (name && MACH_PORT_VALID(name)) {
154 ipc_port_t port;
155 kern_return_t kr;
156
157 kr = ipc_port_translate_send(space, name, &port);
158
159 if (kr == KERN_SUCCESS) {
160 assert(IP_VALID(port));
161 assert(ip_active(port));
162 ip_reference(port);
163 ip_mq_unlock(port);
164
165 iokit_lock_port(port);
166 /*
167 * iokit uses the iokit_lock_port to serialize all its updates
168 * so we do not need to actually hold the port lock.
169 */
170 obj = ipc_kobject_get_locked(port, type);
171 if (obj) {
172 iokit_add_reference(obj, type);
173 }
174 iokit_unlock_port(port);
175
176 ip_release(port);
177 }
178 }
179
180 return obj;
181 }
182
183 EXTERN io_object_t
iokit_lookup_object_with_port_name(mach_port_name_t name,ipc_kobject_type_t type,task_t task)184 iokit_lookup_object_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, task_t task)
185 {
186 return iokit_lookup_object_in_space_with_port_name(name, type, task->itk_space);
187 }
188
189 EXTERN io_object_t
iokit_lookup_connect_ref_current_task(mach_port_name_t name)190 iokit_lookup_connect_ref_current_task(mach_port_name_t name)
191 {
192 return iokit_lookup_object_in_space_with_port_name(name, IKOT_IOKIT_CONNECT, current_space());
193 }
194
195 EXTERN io_object_t
iokit_lookup_uext_ref_current_task(mach_port_name_t name)196 iokit_lookup_uext_ref_current_task(mach_port_name_t name)
197 {
198 return iokit_lookup_object_in_space_with_port_name(name, IKOT_UEXT_OBJECT, current_space());
199 }
200
201 /*
202 * Look up a port given a port name.
203 * This returns the port unlocked with a +1 send right.
204 * Release with iokit_release_port_send()
205 */
206 EXTERN ipc_port_t
iokit_lookup_raw_current_task(mach_port_name_t name,ipc_kobject_type_t * type)207 iokit_lookup_raw_current_task(mach_port_name_t name, ipc_kobject_type_t * type)
208 {
209 ipc_port_t port = NULL;
210 if (name && MACH_PORT_VALID(name)) {
211 kern_return_t kr = ipc_object_copyin(current_space(), name, MACH_MSG_TYPE_COPY_SEND, (ipc_object_t *)&port, 0, NULL, IPC_OBJECT_COPYIN_FLAGS_NONE);
212 if (kr == KERN_SUCCESS) {
213 assert(IP_VALID(port));
214 assert(ip_active(port));
215 if (type != NULL) {
216 *type = ip_kotype(port);
217 }
218 }
219 }
220
221 return port;
222 }
223
224 EXTERN void
iokit_retain_port(ipc_port_t port)225 iokit_retain_port( ipc_port_t port )
226 {
227 ipc_port_reference( port );
228 }
229
230 EXTERN void
iokit_release_port(ipc_port_t port)231 iokit_release_port( ipc_port_t port )
232 {
233 ipc_port_release( port );
234 }
235
236 EXTERN void
iokit_release_port_send(ipc_port_t port)237 iokit_release_port_send( ipc_port_t port )
238 {
239 ipc_port_release_send( port );
240 }
241
242 EXTERN void
iokit_lock_port(__unused ipc_port_t port)243 iokit_lock_port( __unused ipc_port_t port )
244 {
245 lck_mtx_lock(&iokit_obj_to_port_binding_lock);
246 }
247
248 EXTERN void
iokit_unlock_port(__unused ipc_port_t port)249 iokit_unlock_port( __unused ipc_port_t port )
250 {
251 lck_mtx_unlock(&iokit_obj_to_port_binding_lock);
252 }
253
254 /*
255 * Get the port for a device.
256 * Consumes a device reference; produces a naked send right.
257 */
258
259 static ipc_port_t
iokit_make_port_of_type(io_object_t obj,ipc_kobject_type_t type)260 iokit_make_port_of_type(io_object_t obj, ipc_kobject_type_t type)
261 {
262 ipc_port_t port;
263 ipc_port_t sendPort;
264
265 if (obj == NULL) {
266 return IP_NULL;
267 }
268
269 port = iokit_port_for_object( obj, type );
270 if (port) {
271 sendPort = ipc_kobject_make_send( port, obj, type );
272 iokit_release_port( port );
273 } else {
274 sendPort = IP_NULL;
275 }
276
277 iokit_remove_reference( obj );
278
279 return sendPort;
280 }
281
282 MIGEXTERN ipc_port_t
iokit_make_object_port(io_object_t obj)283 iokit_make_object_port(
284 io_object_t obj )
285 {
286 return iokit_make_port_of_type(obj, IKOT_IOKIT_OBJECT);
287 }
288
289 MIGEXTERN ipc_port_t
iokit_make_connect_port(io_object_t obj)290 iokit_make_connect_port(
291 io_object_t obj )
292 {
293 return iokit_make_port_of_type(obj, IKOT_IOKIT_CONNECT);
294 }
295
296 MIGEXTERN ipc_port_t
iokit_make_ident_port(io_object_t obj)297 iokit_make_ident_port(
298 io_object_t obj )
299 {
300 return iokit_make_port_of_type(obj, IKOT_IOKIT_IDENT);
301 }
302
303 int gIOKitPortCount;
304
305 EXTERN ipc_port_t
iokit_alloc_object_port(io_object_t obj,ipc_kobject_type_t type)306 iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type )
307 {
308 /* Allocate port, keeping a reference for it. */
309 gIOKitPortCount++;
310 ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_NSREQUEST;
311 if (type == IKOT_IOKIT_CONNECT) {
312 options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
313 }
314 if (type == IKOT_UEXT_OBJECT) {
315 ipc_label_t label = IPC_LABEL_DEXT;
316 return ipc_kobject_alloc_labeled_port((ipc_kobject_t) obj, type, label, options);
317 } else {
318 return ipc_kobject_alloc_port((ipc_kobject_t) obj, type, options);
319 }
320 }
321
322 EXTERN kern_return_t
iokit_destroy_object_port(ipc_port_t port,ipc_kobject_type_t type)323 iokit_destroy_object_port( ipc_port_t port, ipc_kobject_type_t type )
324 {
325 iokit_lock_port(port);
326 ipc_kobject_disable(port, type);
327 // iokit_remove_reference( obj );
328 iokit_unlock_port(port);
329 gIOKitPortCount--;
330
331 ipc_kobject_dealloc_port(port, 0, type);
332 return KERN_SUCCESS;
333 }
334
335 EXTERN kern_return_t
iokit_switch_object_port(ipc_port_t port,io_object_t obj,ipc_kobject_type_t type)336 iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type )
337 {
338 iokit_lock_port(port);
339 ipc_kobject_enable( port, (ipc_kobject_t) obj, type);
340 iokit_unlock_port(port);
341
342 return KERN_SUCCESS;
343 }
344
345 EXTERN mach_port_name_t
iokit_make_send_right(task_t task,io_object_t obj,ipc_kobject_type_t type)346 iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
347 {
348 ipc_port_t port;
349 ipc_port_t sendPort;
350 mach_port_name_t name = 0;
351
352 if (obj == NULL) {
353 return MACH_PORT_NULL;
354 }
355
356 port = iokit_port_for_object( obj, type );
357 if (port) {
358 sendPort = ipc_kobject_make_send( port, obj, type );
359 iokit_release_port( port );
360 } else {
361 sendPort = IP_NULL;
362 }
363
364 if (IP_VALID( sendPort )) {
365 kern_return_t kr;
366 // Remove once <rdar://problem/45522961> is fixed.
367 // We need to make ith_knote NULL as ipc_object_copyout() uses
368 // thread-argument-passing and its value should not be garbage
369 current_thread()->ith_knote = ITH_KNOTE_NULL;
370 kr = ipc_object_copyout( task->itk_space, ip_to_object(sendPort),
371 MACH_MSG_TYPE_PORT_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, &name);
372 if (kr != KERN_SUCCESS) {
373 name = MACH_PORT_NULL;
374 }
375 } else if (sendPort == IP_NULL) {
376 name = MACH_PORT_NULL;
377 } else if (sendPort == IP_DEAD) {
378 name = MACH_PORT_DEAD;
379 }
380
381 return name;
382 }
383
384 EXTERN kern_return_t
iokit_mod_send_right(task_t task,mach_port_name_t name,mach_port_delta_t delta)385 iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta )
386 {
387 return mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta );
388 }
389
390 /*
391 * Handle the No-More_Senders notification generated from a device port destroy.
392 * Since there are no longer any tasks which hold a send right to this device
393 * port a NMS notification has been generated.
394 */
395
396 static void
iokit_no_senders(ipc_port_t port,mach_port_mscount_t mscount)397 iokit_no_senders( ipc_port_t port, mach_port_mscount_t mscount )
398 {
399 io_object_t obj = NULL;
400 ipc_kobject_type_t type = IKOT_NONE;
401
402 // convert a port to io_object_t.
403 if (IP_VALID(port)) {
404 iokit_lock_port(port);
405 if (ip_active(port)) {
406 type = ip_kotype( port );
407 assert((IKOT_IOKIT_OBJECT == type)
408 || (IKOT_IOKIT_CONNECT == type)
409 || (IKOT_IOKIT_IDENT == type)
410 || (IKOT_UEXT_OBJECT == type));
411 /*
412 * iokit uses the iokit_lock_port to serialize all its updates
413 * so we do not need to actually hold the port lock.
414 */
415 obj = ipc_kobject_get_locked(port, type);
416 iokit_add_reference( obj, IKOT_IOKIT_OBJECT );
417 }
418 iokit_unlock_port(port);
419 }
420
421 if (obj) {
422 while (iokit_client_died( obj, port, type, &mscount ) != KERN_SUCCESS) {
423 kern_return_t kr;
424
425 /* Re-request no-senders notifications on the port (if still active) */
426 kr = ipc_kobject_nsrequest(port, mscount + 1, &mscount);
427 if (kr != KERN_FAILURE) {
428 break;
429 }
430 /*
431 * port has no outstanding rights or pending make-sends,
432 * and the notification would fire recursively, try again.
433 */
434 }
435
436 iokit_remove_reference( obj );
437 }
438 }
439
440
441 kern_return_t
iokit_label_dext_task(task_t task)442 iokit_label_dext_task(task_t task)
443 {
444 return ipc_space_add_label(task->itk_space, IPC_LABEL_DEXT);
445 }
446
447 /*
448 * Routine: iokit_clear_registered_ports
449 * Purpose:
450 * Clean up a task's registered IOKit kobject ports.
451 * Conditions:
452 * Nothing locked.
453 */
454 void
iokit_clear_registered_ports(task_t task)455 iokit_clear_registered_ports(
456 task_t task)
457 {
458 mach_port_t port;
459 ipc_kobject_type_t type;
460
461 itk_lock(task);
462 for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
463 port = task->itk_registered[i];
464 if (!IP_VALID(port)) {
465 continue;
466 }
467 type = ip_kotype( port );
468 if ((IKOT_IOKIT_OBJECT == type)
469 || (IKOT_IOKIT_CONNECT == type)
470 || (IKOT_IOKIT_IDENT == type)
471 || (IKOT_UEXT_OBJECT == type)) {
472 ipc_port_release_send(port);
473 task->itk_registered[i] = IP_NULL;
474 }
475 }
476 itk_unlock(task);
477 }
478
479 /* need to create a pmap function to generalize */
480 unsigned int
IODefaultCacheBits(addr64_t pa)481 IODefaultCacheBits(addr64_t pa)
482 {
483 return pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT));
484 }
485
486 kern_return_t
IOMapPages(vm_map_t map,mach_vm_address_t va,mach_vm_address_t pa,mach_vm_size_t length,unsigned int options)487 IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
488 mach_vm_size_t length, unsigned int options)
489 {
490 vm_prot_t prot;
491 unsigned int flags;
492 ppnum_t pagenum;
493 pmap_t pmap = map->pmap;
494
495 prot = (options & kIOMapReadOnly)
496 ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
497
498 pagenum = (ppnum_t)atop_64(pa);
499
500 switch (options & kIOMapCacheMask) { /* What cache mode do we need? */
501 case kIOMapDefaultCache:
502 default:
503 flags = IODefaultCacheBits(pa);
504 break;
505
506 case kIOMapInhibitCache:
507 flags = VM_WIMG_IO;
508 break;
509
510 case kIOMapWriteThruCache:
511 flags = VM_WIMG_WTHRU;
512 break;
513
514 case kIOMapWriteCombineCache:
515 flags = VM_WIMG_WCOMB;
516 break;
517
518 case kIOMapCopybackCache:
519 flags = VM_WIMG_COPYBACK;
520 break;
521
522 case kIOMapCopybackInnerCache:
523 flags = VM_WIMG_INNERWBACK;
524 break;
525
526 case kIOMapPostedWrite:
527 flags = VM_WIMG_POSTED;
528 break;
529
530 case kIOMapRealTimeCache:
531 flags = VM_WIMG_RT;
532 break;
533 }
534
535 pmap_set_cache_attributes(pagenum, flags);
536
537 vm_map_set_cache_attr(map, (vm_map_offset_t)va);
538
539
540 // Set up a block mapped area
541 return pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);
542 }
543
544 kern_return_t
IOUnmapPages(vm_map_t map,mach_vm_address_t va,mach_vm_size_t length)545 IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
546 {
547 pmap_t pmap = map->pmap;
548
549 pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
550
551 return KERN_SUCCESS;
552 }
553
554 kern_return_t
IOProtectCacheMode(vm_map_t __unused map,mach_vm_address_t __unused va,mach_vm_size_t __unused length,unsigned int __unused options)555 IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
556 mach_vm_size_t __unused length, unsigned int __unused options)
557 {
558 mach_vm_size_t off;
559 vm_prot_t prot;
560 unsigned int flags;
561 pmap_t pmap = map->pmap;
562 pmap_flush_context pmap_flush_context_storage;
563 boolean_t delayed_pmap_flush = FALSE;
564
565 prot = (options & kIOMapReadOnly)
566 ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
567
568 switch (options & kIOMapCacheMask) {
569 // what cache mode do we need?
570 case kIOMapDefaultCache:
571 default:
572 return KERN_INVALID_ARGUMENT;
573
574 case kIOMapInhibitCache:
575 flags = VM_WIMG_IO;
576 break;
577
578 case kIOMapWriteThruCache:
579 flags = VM_WIMG_WTHRU;
580 break;
581
582 case kIOMapWriteCombineCache:
583 flags = VM_WIMG_WCOMB;
584 break;
585
586 case kIOMapCopybackCache:
587 flags = VM_WIMG_COPYBACK;
588 break;
589
590 case kIOMapCopybackInnerCache:
591 flags = VM_WIMG_INNERWBACK;
592 break;
593
594 case kIOMapPostedWrite:
595 flags = VM_WIMG_POSTED;
596 break;
597
598 case kIOMapRealTimeCache:
599 flags = VM_WIMG_RT;
600 break;
601 }
602
603 pmap_flush_context_init(&pmap_flush_context_storage);
604 delayed_pmap_flush = FALSE;
605
606 // enter each page's physical address in the target map
607 for (off = 0; off < length; off += page_size) {
608 ppnum_t ppnum = pmap_find_phys(pmap, va + off);
609 if (ppnum) {
610 pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE,
611 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
612 delayed_pmap_flush = TRUE;
613 }
614 }
615 if (delayed_pmap_flush == TRUE) {
616 pmap_flush(&pmap_flush_context_storage);
617 }
618
619 return KERN_SUCCESS;
620 }
621
622 ppnum_t
IOGetLastPageNumber(void)623 IOGetLastPageNumber(void)
624 {
625 #if __i386__ || __x86_64__
626 ppnum_t lastPage, highest = 0;
627 unsigned int idx;
628
629 for (idx = 0; idx < pmap_memory_region_count; idx++) {
630 lastPage = pmap_memory_regions[idx].end - 1;
631 if (lastPage > highest) {
632 highest = lastPage;
633 }
634 }
635 return highest;
636 #elif __arm64__
637 return 0;
638 #else
639 #error unknown arch
640 #endif
641 }
642
643
644 void IOGetTime( mach_timespec_t * clock_time);
645 void
IOGetTime(mach_timespec_t * clock_time)646 IOGetTime( mach_timespec_t * clock_time)
647 {
648 clock_sec_t sec;
649 clock_nsec_t nsec;
650 clock_get_system_nanotime(&sec, &nsec);
651 clock_time->tv_sec = (typeof(clock_time->tv_sec))sec;
652 clock_time->tv_nsec = nsec;
653 }
654