1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/boolean.h>
29 #include <mach/kern_return.h>
30 #include <mach/mig_errors.h>
31 #include <mach/port.h>
32 #include <mach/vm_param.h>
33 #include <mach/notify.h>
34 //#include <mach/mach_host_server.h>
35 #include <mach/mach_types.h>
36
37 #include <machine/machparam.h> /* spl definitions */
38
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_space.h>
41
42 #include <kern/clock.h>
43 #include <kern/spl.h>
44 #include <kern/queue.h>
45 #include <kern/zalloc.h>
46 #include <kern/thread.h>
47 #include <kern/task.h>
48 #include <kern/sched_prim.h>
49 #include <kern/misc_protos.h>
50
51 #include <vm/pmap.h>
52 #include <vm/vm_map.h>
53 #include <vm/vm_kern.h>
54
55 #include <device/device_types.h>
56 #include <device/device_port.h>
57 #include <device/device_server.h>
58
59 #include <machine/machparam.h>
60
61 #if defined(__i386__) || defined(__x86_64__)
62 #include <i386/pmap.h>
63 #endif
64 #if defined(__arm__) || defined(__arm64__)
65 #include <arm/pmap.h>
66 #endif
67 #include <IOKit/IOKitServer.h>
68
69 #define EXTERN
70 #define MIGEXTERN
71
72 LCK_GRP_DECLARE(dev_lck_grp, "device");
73 LCK_MTX_DECLARE(iokit_obj_to_port_binding_lock, &dev_lck_grp);
74
75 static void
76 iokit_no_senders( ipc_port_t port, mach_port_mscount_t mscount );
77
78 /*
79 * Lifetime:
80 * - non lazy port with no-more senders
81 * - can be destroyed by iokit_destroy_object_port
82 *
83 */
84 IPC_KOBJECT_DEFINE(IKOT_IOKIT_IDENT,
85 .iko_op_no_senders = iokit_no_senders);
86 IPC_KOBJECT_DEFINE(IKOT_IOKIT_OBJECT,
87 .iko_op_no_senders = iokit_no_senders);
88 IPC_KOBJECT_DEFINE(IKOT_IOKIT_CONNECT,
89 .iko_op_no_senders = iokit_no_senders);
90 IPC_KOBJECT_DEFINE(IKOT_UEXT_OBJECT,
91 .iko_op_no_senders = iokit_no_senders);
92
93 /*
94 * Lookup a device by its port.
95 * Doesn't consume the naked send right; produces a device reference.
96 */
97 io_object_t
iokit_lookup_io_object(ipc_port_t port,ipc_kobject_type_t type)98 iokit_lookup_io_object(ipc_port_t port, ipc_kobject_type_t type)
99 {
100 io_object_t obj = NULL;
101
102 if (!IP_VALID(port)) {
103 return NULL;
104 }
105
106 iokit_lock_port(port);
107 /*
108 * iokit uses the iokit_lock_port to serialize all its updates
109 * so we do not need to actually hold the port lock.
110 */
111 obj = ipc_kobject_get_locked(port, type);
112 if (obj) {
113 iokit_add_reference( obj, type );
114 }
115 iokit_unlock_port(port);
116
117 return obj;
118 }
119
120 MIGEXTERN io_object_t
iokit_lookup_object_port(ipc_port_t port)121 iokit_lookup_object_port(
122 ipc_port_t port)
123 {
124 return iokit_lookup_io_object(port, IKOT_IOKIT_OBJECT);
125 }
126
127 MIGEXTERN io_object_t
iokit_lookup_connect_port(ipc_port_t port)128 iokit_lookup_connect_port(
129 ipc_port_t port)
130 {
131 return iokit_lookup_io_object(port, IKOT_IOKIT_CONNECT);
132 }
133
134 MIGEXTERN io_object_t
iokit_lookup_ident_port(ipc_port_t port)135 iokit_lookup_ident_port(
136 ipc_port_t port)
137 {
138 return iokit_lookup_io_object(port, IKOT_IOKIT_IDENT);
139 }
140
141 MIGEXTERN io_object_t
iokit_lookup_uext_object_port(ipc_port_t port)142 iokit_lookup_uext_object_port(
143 ipc_port_t port)
144 {
145 return iokit_lookup_io_object(port, IKOT_UEXT_OBJECT);
146 }
147
148 static io_object_t
iokit_lookup_object_in_space_with_port_name(mach_port_name_t name,ipc_kobject_type_t type,ipc_space_t space)149 iokit_lookup_object_in_space_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, ipc_space_t space)
150 {
151 io_object_t obj = NULL;
152
153 if (name && MACH_PORT_VALID(name)) {
154 ipc_port_t port;
155 kern_return_t kr;
156
157 kr = ipc_port_translate_send(space, name, &port);
158
159 if (kr == KERN_SUCCESS) {
160 assert(IP_VALID(port));
161 assert(ip_active(port));
162 ip_reference(port);
163 ip_mq_unlock(port);
164
165 iokit_lock_port(port);
166 /*
167 * iokit uses the iokit_lock_port to serialize all its updates
168 * so we do not need to actually hold the port lock.
169 */
170 obj = ipc_kobject_get_locked(port, type);
171 if (obj) {
172 iokit_add_reference(obj, type);
173 }
174 iokit_unlock_port(port);
175
176 ip_release(port);
177 }
178 }
179
180 return obj;
181 }
182
183 EXTERN io_object_t
iokit_lookup_object_with_port_name(mach_port_name_t name,ipc_kobject_type_t type,task_t task)184 iokit_lookup_object_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, task_t task)
185 {
186 return iokit_lookup_object_in_space_with_port_name(name, type, task->itk_space);
187 }
188
189 EXTERN io_object_t
iokit_lookup_connect_ref_current_task(mach_port_name_t name)190 iokit_lookup_connect_ref_current_task(mach_port_name_t name)
191 {
192 return iokit_lookup_object_in_space_with_port_name(name, IKOT_IOKIT_CONNECT, current_space());
193 }
194
195 EXTERN io_object_t
iokit_lookup_uext_ref_current_task(mach_port_name_t name)196 iokit_lookup_uext_ref_current_task(mach_port_name_t name)
197 {
198 return iokit_lookup_object_in_space_with_port_name(name, IKOT_UEXT_OBJECT, current_space());
199 }
200
201 /*
202 * Look up a port given a port name.
203 * This returns the port unlocked with a +1 send right.
204 * Release with iokit_release_port_send()
205 */
206 EXTERN ipc_port_t
iokit_lookup_raw_current_task(mach_port_name_t name,ipc_kobject_type_t * type)207 iokit_lookup_raw_current_task(mach_port_name_t name, ipc_kobject_type_t * type)
208 {
209 ipc_port_t port = NULL;
210 if (name && MACH_PORT_VALID(name)) {
211 kern_return_t kr = ipc_object_copyin(current_space(), name, MACH_MSG_TYPE_COPY_SEND, (ipc_object_t *)&port, 0, NULL, IPC_OBJECT_COPYIN_FLAGS_NONE);
212 if (kr == KERN_SUCCESS) {
213 assert(IP_VALID(port));
214 assert(ip_active(port));
215 if (type != NULL) {
216 *type = ip_kotype(port);
217 }
218 }
219 }
220
221 return port;
222 }
223
224 EXTERN void
iokit_retain_port(ipc_port_t port)225 iokit_retain_port( ipc_port_t port )
226 {
227 ipc_port_reference( port );
228 }
229
230 EXTERN void
iokit_release_port(ipc_port_t port)231 iokit_release_port( ipc_port_t port )
232 {
233 ipc_port_release( port );
234 }
235
236 EXTERN void
iokit_release_port_send(ipc_port_t port)237 iokit_release_port_send( ipc_port_t port )
238 {
239 ipc_port_release_send( port );
240 }
241
242 EXTERN void
iokit_lock_port(__unused ipc_port_t port)243 iokit_lock_port( __unused ipc_port_t port )
244 {
245 lck_mtx_lock(&iokit_obj_to_port_binding_lock);
246 }
247
248 EXTERN void
iokit_unlock_port(__unused ipc_port_t port)249 iokit_unlock_port( __unused ipc_port_t port )
250 {
251 lck_mtx_unlock(&iokit_obj_to_port_binding_lock);
252 }
253
254 /*
255 * Get the port for a device.
256 * Consumes a device reference; produces a naked send right.
257 */
258
259 static ipc_port_t
iokit_make_port_of_type(io_object_t obj,ipc_kobject_type_t type)260 iokit_make_port_of_type(io_object_t obj, ipc_kobject_type_t type)
261 {
262 ipc_port_t port;
263 ipc_port_t sendPort;
264
265 if (obj == NULL) {
266 return IP_NULL;
267 }
268
269 port = iokit_port_for_object( obj, type );
270 if (port) {
271 sendPort = ipc_port_make_send( port);
272 iokit_release_port( port );
273 } else {
274 sendPort = IP_NULL;
275 }
276
277 iokit_remove_reference( obj );
278
279 return sendPort;
280 }
281
282 MIGEXTERN ipc_port_t
iokit_make_object_port(io_object_t obj)283 iokit_make_object_port(
284 io_object_t obj )
285 {
286 return iokit_make_port_of_type(obj, IKOT_IOKIT_OBJECT);
287 }
288
289 MIGEXTERN ipc_port_t
iokit_make_connect_port(io_object_t obj)290 iokit_make_connect_port(
291 io_object_t obj )
292 {
293 return iokit_make_port_of_type(obj, IKOT_IOKIT_CONNECT);
294 }
295
296 MIGEXTERN ipc_port_t
iokit_make_ident_port(io_object_t obj)297 iokit_make_ident_port(
298 io_object_t obj )
299 {
300 return iokit_make_port_of_type(obj, IKOT_IOKIT_IDENT);
301 }
302
303 int gIOKitPortCount;
304
305 EXTERN ipc_port_t
iokit_alloc_object_port(io_object_t obj,ipc_kobject_type_t type)306 iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type )
307 {
308 /* Allocate port, keeping a reference for it. */
309 gIOKitPortCount++;
310 ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_NSREQUEST;
311 if (type == IKOT_IOKIT_CONNECT) {
312 options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
313 }
314 if (type == IKOT_UEXT_OBJECT) {
315 ipc_label_t label = IPC_LABEL_DEXT;
316 return ipc_kobject_alloc_labeled_port((ipc_kobject_t) obj, type, label, options);
317 } else {
318 return ipc_kobject_alloc_port((ipc_kobject_t) obj, type, options);
319 }
320 }
321
322 EXTERN kern_return_t
iokit_destroy_object_port(ipc_port_t port,ipc_kobject_type_t type)323 iokit_destroy_object_port( ipc_port_t port, ipc_kobject_type_t type )
324 {
325 iokit_lock_port(port);
326 ipc_kobject_disable(port, type);
327 // iokit_remove_reference( obj );
328 iokit_unlock_port(port);
329 gIOKitPortCount--;
330
331 ipc_kobject_dealloc_port(port, 0, type);
332 return KERN_SUCCESS;
333 }
334
335 EXTERN kern_return_t
iokit_switch_object_port(ipc_port_t port,io_object_t obj,ipc_kobject_type_t type)336 iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type )
337 {
338 iokit_lock_port(port);
339 ipc_kobject_enable( port, (ipc_kobject_t) obj, type);
340 iokit_unlock_port(port);
341
342 return KERN_SUCCESS;
343 }
344
345 EXTERN mach_port_name_t
iokit_make_send_right(task_t task,io_object_t obj,ipc_kobject_type_t type)346 iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
347 {
348 ipc_port_t port;
349 ipc_port_t sendPort;
350 mach_port_name_t name = 0;
351
352 if (obj == NULL) {
353 return MACH_PORT_NULL;
354 }
355
356 port = iokit_port_for_object( obj, type );
357 if (port) {
358 sendPort = ipc_port_make_send( port);
359 iokit_release_port( port );
360 } else {
361 sendPort = IP_NULL;
362 }
363
364 if (IP_VALID( sendPort )) {
365 kern_return_t kr;
366 // Remove once <rdar://problem/45522961> is fixed.
367 // We need to make ith_knote NULL as ipc_object_copyout() uses
368 // thread-argument-passing and its value should not be garbage
369 current_thread()->ith_knote = ITH_KNOTE_NULL;
370 kr = ipc_object_copyout( task->itk_space, ip_to_object(sendPort),
371 MACH_MSG_TYPE_PORT_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, &name);
372 if (kr != KERN_SUCCESS) {
373 name = MACH_PORT_NULL;
374 }
375 } else if (sendPort == IP_NULL) {
376 name = MACH_PORT_NULL;
377 } else if (sendPort == IP_DEAD) {
378 name = MACH_PORT_DEAD;
379 }
380
381 return name;
382 }
383
384 EXTERN kern_return_t
iokit_mod_send_right(task_t task,mach_port_name_t name,mach_port_delta_t delta)385 iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta )
386 {
387 return mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta );
388 }
389
390 /*
391 * Handle the No-More_Senders notification generated from a device port destroy.
392 * Since there are no longer any tasks which hold a send right to this device
393 * port a NMS notification has been generated.
394 */
395
396 static void
iokit_no_senders(ipc_port_t port,mach_port_mscount_t mscount)397 iokit_no_senders( ipc_port_t port, mach_port_mscount_t mscount )
398 {
399 io_object_t obj = NULL;
400 ipc_kobject_type_t type = IKOT_NONE;
401
402 // convert a port to io_object_t.
403 if (IP_VALID(port)) {
404 iokit_lock_port(port);
405 if (ip_active(port)) {
406 type = ip_kotype( port );
407 assert((IKOT_IOKIT_OBJECT == type)
408 || (IKOT_IOKIT_CONNECT == type)
409 || (IKOT_IOKIT_IDENT == type)
410 || (IKOT_UEXT_OBJECT == type));
411 /*
412 * iokit uses the iokit_lock_port to serialize all its updates
413 * so we do not need to actually hold the port lock.
414 */
415 obj = ipc_kobject_get_locked(port, type);
416 iokit_add_reference( obj, IKOT_IOKIT_OBJECT );
417 }
418 iokit_unlock_port(port);
419 }
420
421 if (obj) {
422 while (iokit_client_died( obj, port, type, &mscount ) != KERN_SUCCESS) {
423 kern_return_t kr;
424
425 /* Re-request no-senders notifications on the port (if still active) */
426 kr = ipc_kobject_nsrequest(port, mscount + 1, &mscount);
427 if (kr != KERN_FAILURE) {
428 break;
429 }
430 /*
431 * port has no outstanding rights or pending make-sends,
432 * and the notification would fire recursively, try again.
433 */
434 }
435
436 iokit_remove_reference( obj );
437 }
438 }
439
440
441 kern_return_t
iokit_label_dext_task(task_t task)442 iokit_label_dext_task(task_t task)
443 {
444 return ipc_space_add_label(task->itk_space, IPC_LABEL_DEXT);
445 }
446
447 /* need to create a pmap function to generalize */
448 unsigned int
IODefaultCacheBits(addr64_t pa)449 IODefaultCacheBits(addr64_t pa)
450 {
451 return pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT));
452 }
453
454 kern_return_t
IOMapPages(vm_map_t map,mach_vm_address_t va,mach_vm_address_t pa,mach_vm_size_t length,unsigned int options)455 IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
456 mach_vm_size_t length, unsigned int options)
457 {
458 vm_prot_t prot;
459 unsigned int flags;
460 ppnum_t pagenum;
461 pmap_t pmap = map->pmap;
462
463 prot = (options & kIOMapReadOnly)
464 ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
465
466 pagenum = (ppnum_t)atop_64(pa);
467
468 switch (options & kIOMapCacheMask) { /* What cache mode do we need? */
469 case kIOMapDefaultCache:
470 default:
471 flags = IODefaultCacheBits(pa);
472 break;
473
474 case kIOMapInhibitCache:
475 flags = VM_WIMG_IO;
476 break;
477
478 case kIOMapWriteThruCache:
479 flags = VM_WIMG_WTHRU;
480 break;
481
482 case kIOMapWriteCombineCache:
483 flags = VM_WIMG_WCOMB;
484 break;
485
486 case kIOMapCopybackCache:
487 flags = VM_WIMG_COPYBACK;
488 break;
489
490 case kIOMapCopybackInnerCache:
491 flags = VM_WIMG_INNERWBACK;
492 break;
493
494 case kIOMapPostedWrite:
495 flags = VM_WIMG_POSTED;
496 break;
497
498 case kIOMapRealTimeCache:
499 flags = VM_WIMG_RT;
500 break;
501 }
502
503 pmap_set_cache_attributes(pagenum, flags);
504
505 vm_map_set_cache_attr(map, (vm_map_offset_t)va);
506
507
508 // Set up a block mapped area
509 return pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);
510 }
511
512 kern_return_t
IOUnmapPages(vm_map_t map,mach_vm_address_t va,mach_vm_size_t length)513 IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
514 {
515 pmap_t pmap = map->pmap;
516
517 pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
518
519 return KERN_SUCCESS;
520 }
521
522 kern_return_t
IOProtectCacheMode(vm_map_t __unused map,mach_vm_address_t __unused va,mach_vm_size_t __unused length,unsigned int __unused options)523 IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
524 mach_vm_size_t __unused length, unsigned int __unused options)
525 {
526 mach_vm_size_t off;
527 vm_prot_t prot;
528 unsigned int flags;
529 pmap_t pmap = map->pmap;
530 pmap_flush_context pmap_flush_context_storage;
531 boolean_t delayed_pmap_flush = FALSE;
532
533 prot = (options & kIOMapReadOnly)
534 ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
535
536 switch (options & kIOMapCacheMask) {
537 // what cache mode do we need?
538 case kIOMapDefaultCache:
539 default:
540 return KERN_INVALID_ARGUMENT;
541
542 case kIOMapInhibitCache:
543 flags = VM_WIMG_IO;
544 break;
545
546 case kIOMapWriteThruCache:
547 flags = VM_WIMG_WTHRU;
548 break;
549
550 case kIOMapWriteCombineCache:
551 flags = VM_WIMG_WCOMB;
552 break;
553
554 case kIOMapCopybackCache:
555 flags = VM_WIMG_COPYBACK;
556 break;
557
558 case kIOMapCopybackInnerCache:
559 flags = VM_WIMG_INNERWBACK;
560 break;
561
562 case kIOMapPostedWrite:
563 flags = VM_WIMG_POSTED;
564 break;
565
566 case kIOMapRealTimeCache:
567 flags = VM_WIMG_RT;
568 break;
569 }
570
571 pmap_flush_context_init(&pmap_flush_context_storage);
572 delayed_pmap_flush = FALSE;
573
574 // enter each page's physical address in the target map
575 for (off = 0; off < length; off += page_size) {
576 ppnum_t ppnum = pmap_find_phys(pmap, va + off);
577 if (ppnum) {
578 pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE,
579 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
580 delayed_pmap_flush = TRUE;
581 }
582 }
583 if (delayed_pmap_flush == TRUE) {
584 pmap_flush(&pmap_flush_context_storage);
585 }
586
587 return KERN_SUCCESS;
588 }
589
590 ppnum_t
IOGetLastPageNumber(void)591 IOGetLastPageNumber(void)
592 {
593 #if __i386__ || __x86_64__
594 ppnum_t lastPage, highest = 0;
595 unsigned int idx;
596
597 for (idx = 0; idx < pmap_memory_region_count; idx++) {
598 lastPage = pmap_memory_regions[idx].end - 1;
599 if (lastPage > highest) {
600 highest = lastPage;
601 }
602 }
603 return highest;
604 #elif __arm__ || __arm64__
605 return 0;
606 #else
607 #error unknown arch
608 #endif
609 }
610
611
612 void IOGetTime( mach_timespec_t * clock_time);
613 void
IOGetTime(mach_timespec_t * clock_time)614 IOGetTime( mach_timespec_t * clock_time)
615 {
616 clock_sec_t sec;
617 clock_nsec_t nsec;
618 clock_get_system_nanotime(&sec, &nsec);
619 clock_time->tv_sec = (typeof(clock_time->tv_sec))sec;
620 clock_time->tv_nsec = nsec;
621 }
622