xref: /xnu-12377.1.9/osfmk/device/iokit_rpc.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <mach/boolean.h>
29 #include <mach/kern_return.h>
30 #include <mach/mig_errors.h>
31 #include <mach/port.h>
32 #include <mach/vm_param.h>
33 #include <mach/notify.h>
34 #include <mach/mach_types.h>
35 
36 #include <machine/machparam.h>          /* spl definitions */
37 
38 #include <ipc/ipc_space.h>
39 
40 #include <kern/clock.h>
41 #include <kern/spl.h>
42 #include <kern/queue.h>
43 #include <kern/zalloc.h>
44 #include <kern/thread.h>
45 #include <kern/task.h>
46 #include <kern/sched_prim.h>
47 #include <kern/misc_protos.h>
48 
49 #include <vm/pmap.h>
50 #include <vm/vm_map_xnu.h>
51 #include <vm/vm_kern.h>
52 
53 #include <device/device_types.h>
54 #include <device/device_port.h>
55 #include <device/device_server.h>
56 
57 #include <machine/machparam.h>
58 
59 #if defined(__i386__) || defined(__x86_64__)
60 #include <i386/pmap.h>
61 #endif
62 #if defined(__arm64__)
63 #include <arm/pmap.h>
64 #endif
65 #include <IOKit/IOKitServer.h>
66 
67 #define EXTERN
68 #define MIGEXTERN
69 
70 /*
71  * Lifetime:
72  * - non lazy port with no-more senders
73  * - the object is not stable because of IOUserClient::destroyUserReferences(),
74  *   which can kill the port even when there are outstanding send rights.
75  */
76 IPC_KOBJECT_DEFINE(IKOT_IOKIT_IDENT,
77     .iko_op_movable_send = true,
78     .iko_op_no_senders = iokit_ident_no_senders);
79 IPC_KOBJECT_DEFINE(IKOT_IOKIT_OBJECT,
80     .iko_op_movable_send = true,
81     .iko_op_no_senders = iokit_object_no_senders);
82 IPC_KOBJECT_DEFINE(IKOT_IOKIT_CONNECT,
83     .iko_op_no_senders = iokit_connect_no_senders);
84 IPC_KOBJECT_DEFINE(IKOT_UEXT_OBJECT,
85     .iko_op_movable_send = true,
86     .iko_op_no_senders = iokit_uext_no_senders,
87     .iko_op_label_free = ipc_kobject_label_free);
88 
89 /*
90  * Lookup a device by its port.
91  * Doesn't consume the naked send right; produces a device reference.
92  */
93 EXTERN io_object_t
iokit_lookup_io_object(ipc_port_t port,ipc_kobject_type_t type)94 iokit_lookup_io_object(ipc_port_t port, ipc_kobject_type_t type)
95 {
96 	io_object_t  obj = NULL;
97 	io_kobject_t kobj = NULL;
98 
99 	if (!IP_VALID(port)) {
100 		return NULL;
101 	}
102 
103 	ip_mq_lock(port);
104 	kobj = ipc_kobject_get_locked(port, type);
105 	if (kobj) {
106 		iokit_kobject_retain(kobj);
107 	}
108 	ip_mq_unlock(port);
109 	if (kobj) {
110 		obj = iokit_copy_object_for_consumed_kobject(kobj);
111 	}
112 
113 	return obj;
114 }
115 
116 MIGEXTERN io_object_t
iokit_lookup_object_port(ipc_port_t port)117 iokit_lookup_object_port(ipc_port_t port)
118 {
119 	return iokit_lookup_io_object(port, IKOT_IOKIT_OBJECT);
120 }
121 
122 MIGEXTERN io_object_t
iokit_lookup_connect_port(ipc_port_t port)123 iokit_lookup_connect_port(ipc_port_t port)
124 {
125 	return iokit_lookup_io_object(port, IKOT_IOKIT_CONNECT);
126 }
127 
128 MIGEXTERN io_object_t
iokit_lookup_ident_port(ipc_port_t port)129 iokit_lookup_ident_port(ipc_port_t port)
130 {
131 	return iokit_lookup_io_object(port, IKOT_IOKIT_IDENT);
132 }
133 
134 MIGEXTERN io_object_t
iokit_lookup_uext_object_port(ipc_port_t port)135 iokit_lookup_uext_object_port(ipc_port_t port)
136 {
137 	return iokit_lookup_io_object(port, IKOT_UEXT_OBJECT);
138 }
139 
140 static io_object_t
iokit_lookup_object_in_space_with_port_name(mach_port_name_t name,ipc_kobject_type_t type,ipc_space_t space)141 iokit_lookup_object_in_space_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, ipc_space_t space)
142 {
143 	io_object_t obj = NULL;
144 	io_kobject_t kobj;
145 
146 	if (MACH_PORT_VALID(name)) {
147 		ipc_port_t port;
148 		kern_return_t kr;
149 
150 		kr = ipc_port_translate_send(space, name, &port);
151 
152 		if (kr == KERN_SUCCESS) {
153 			assert(IP_VALID(port));
154 			assert(ip_active(port));
155 			kobj = ipc_kobject_get_locked(port, type);
156 			if (kobj) {
157 				iokit_kobject_retain(kobj);
158 			}
159 			ip_mq_unlock(port);
160 			if (kobj) {
161 				obj = iokit_copy_object_for_consumed_kobject(kobj);
162 			}
163 		}
164 	}
165 
166 	return obj;
167 }
168 
169 EXTERN io_object_t
iokit_lookup_object_with_port_name(mach_port_name_t name,ipc_kobject_type_t type,task_t task)170 iokit_lookup_object_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, task_t task)
171 {
172 	return iokit_lookup_object_in_space_with_port_name(name, type, task->itk_space);
173 }
174 
175 EXTERN io_object_t
iokit_lookup_connect_ref_current_task(mach_port_name_t name)176 iokit_lookup_connect_ref_current_task(mach_port_name_t name)
177 {
178 	return iokit_lookup_object_in_space_with_port_name(name, IKOT_IOKIT_CONNECT, current_space());
179 }
180 
181 EXTERN io_object_t
iokit_lookup_uext_ref_current_task(mach_port_name_t name)182 iokit_lookup_uext_ref_current_task(mach_port_name_t name)
183 {
184 	return iokit_lookup_object_in_space_with_port_name(name, IKOT_UEXT_OBJECT, current_space());
185 }
186 
187 /*
188  * Look up a port given a port name.
189  * This returns the port unlocked with a +1 send right.
190  * Release with iokit_release_port_send()
191  */
192 EXTERN kern_return_t
iokit_lookup_raw_current_task(mach_port_name_t name,ipc_kobject_type_t type,ipc_port_t * portp)193 iokit_lookup_raw_current_task(mach_port_name_t name, ipc_kobject_type_t type, ipc_port_t *portp)
194 {
195 	kern_return_t kr;
196 	ipc_port_t port;
197 
198 	/*
199 	 * Backward compatbility
200 	 *
201 	 * We can't use ipc_typed_port_copyin_send() builtin's capability to
202 	 * check type here, because of legacy reasons.
203 	 *
204 	 * type mismatch used to be returned as kIOReturnBadArgument to callers,
205 	 * but other cases of errors would yield kIOReturnNotFound.
206 	 *
207 	 * Do the dance by hand to respect this past.
208 	 */
209 	if (MACH_PORT_VALID(name)) {
210 		kr = ipc_typed_port_copyin_send(current_space(), name,
211 		    IOT_ANY, &port);
212 
213 		if (kr != KERN_SUCCESS || !IP_VALID(port)) {
214 			return kIOReturnNotFound;
215 		}
216 
217 		if (type != IOT_ANY && ip_type(port) != type) {
218 			ipc_typed_port_release_send(port, IOT_ANY);
219 			return kIOReturnBadArgument;
220 		}
221 
222 		*portp = port;
223 		return kIOReturnSuccess;
224 	}
225 
226 	return kIOReturnNotFound;
227 }
228 
229 EXTERN void
iokit_release_port_send(ipc_port_t port)230 iokit_release_port_send( ipc_port_t port )
231 {
232 	ipc_port_release_send( port );
233 }
234 
235 /*
236  * Get the port for a device.
237  * Consumes a device reference; produces a naked send right.
238  */
239 
240 static ipc_port_t
iokit_make_port_of_type(io_object_t obj,ipc_kobject_type_t type)241 iokit_make_port_of_type(io_object_t obj, ipc_kobject_type_t type)
242 {
243 	ipc_port_t sendPort = IP_NULL;
244 
245 	if (obj != NULL) {
246 		sendPort = iokit_port_make_send_for_object(obj, type);
247 		iokit_remove_reference( obj );
248 	}
249 
250 	return sendPort;
251 }
252 
253 MIGEXTERN ipc_port_t
iokit_make_object_port(io_object_t obj)254 iokit_make_object_port( io_object_t obj )
255 {
256 	return iokit_make_port_of_type(obj, IKOT_IOKIT_OBJECT);
257 }
258 
259 MIGEXTERN ipc_port_t
iokit_make_connect_port(io_object_t obj)260 iokit_make_connect_port( io_object_t obj )
261 {
262 	return iokit_make_port_of_type(obj, IKOT_IOKIT_CONNECT);
263 }
264 
265 MIGEXTERN ipc_port_t
iokit_make_ident_port(io_object_t obj)266 iokit_make_ident_port( io_object_t obj )
267 {
268 	return iokit_make_port_of_type(obj, IKOT_IOKIT_IDENT);
269 }
270 
271 EXTERN ipc_port_t
iokit_alloc_object_port(io_kobject_t obj,ipc_kobject_type_t type)272 iokit_alloc_object_port( io_kobject_t obj, ipc_kobject_type_t type )
273 {
274 	/* Allocate port, keeping a reference for it. */
275 	ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_NONE;
276 	ipc_object_label_t label = IPC_OBJECT_LABEL(type);
277 
278 	if (type == IKOT_UEXT_OBJECT) {
279 		label = ipc_kobject_label_alloc(IKOT_UEXT_OBJECT,
280 		    IPC_LABEL_DEXT, IP_NULL);
281 	}
282 	return ipc_kobject_alloc_port(obj, label, options);
283 }
284 
285 EXTERN void
iokit_lock_port(ipc_port_t port)286 iokit_lock_port( ipc_port_t port )
287 {
288 	ip_mq_lock(port);
289 }
290 
291 EXTERN void
iokit_unlock_port(ipc_port_t port)292 iokit_unlock_port( ipc_port_t port )
293 {
294 	ip_mq_unlock(port);
295 }
296 
297 EXTERN void
iokit_destroy_object_port(ipc_port_t port,ipc_kobject_type_t type)298 iokit_destroy_object_port( ipc_port_t port, ipc_kobject_type_t type )
299 {
300 	ipc_kobject_dealloc_port(port, IPC_KOBJECT_NO_MSCOUNT, type);
301 }
302 
303 EXTERN ipc_kobject_type_t
iokit_port_type(ipc_port_t port)304 iokit_port_type(ipc_port_t port)
305 {
306 	return ip_type(port);
307 }
308 
309 EXTERN mach_port_name_t
iokit_make_send_right(task_t task,io_object_t obj,ipc_kobject_type_t type)310 iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
311 {
312 	ipc_port_t          sendPort;
313 	mach_port_name_t    name = 0;
314 
315 	if (obj == NULL) {
316 		return MACH_PORT_NULL;
317 	}
318 
319 	sendPort = iokit_port_make_send_for_object( obj, type );
320 
321 	if (IP_VALID( sendPort )) {
322 		kern_return_t   kr;
323 		// Remove once <rdar://problem/45522961> is fixed.
324 		// We need to make ith_knote NULL as ipc_object_copyout() uses
325 		// thread-argument-passing and its value should not be garbage
326 		current_thread()->ith_knote = ITH_KNOTE_NULL;
327 		kr = ipc_object_copyout( task->itk_space, sendPort,
328 		    MACH_MSG_TYPE_PORT_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
329 		    NULL, &name);
330 		if (kr != KERN_SUCCESS) {
331 			name = MACH_PORT_NULL;
332 		}
333 	} else if (sendPort == IP_NULL) {
334 		name = MACH_PORT_NULL;
335 	} else if (sendPort == IP_DEAD) {
336 		name = MACH_PORT_DEAD;
337 	}
338 
339 	return name;
340 }
341 
342 EXTERN kern_return_t
iokit_mod_send_right(task_t task,mach_port_name_t name,mach_port_delta_t delta)343 iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta )
344 {
345 	return mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta );
346 }
347 
348 kern_return_t
iokit_label_dext_task(task_t task)349 iokit_label_dext_task(task_t task)
350 {
351 	return ipc_space_add_label(task->itk_space, IPC_LABEL_DEXT);
352 }
353 
354 /*
355  *	Routine:	iokit_clear_registered_ports
356  *	Purpose:
357  *		Clean up a task's registered IOKit kobject ports.
358  *	Conditions:
359  *		Nothing locked.
360  */
361 void
iokit_clear_registered_ports(task_t task)362 iokit_clear_registered_ports(
363 	task_t task)
364 {
365 	mach_port_t port;
366 	ipc_kobject_type_t type;
367 
368 	itk_lock(task);
369 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
370 		port = task->itk_registered[i];
371 		if (!IP_VALID(port)) {
372 			continue;
373 		}
374 		type = ip_type( port );
375 		if ((IKOT_IOKIT_OBJECT == type)
376 		    || (IKOT_IOKIT_CONNECT == type)
377 		    || (IKOT_IOKIT_IDENT == type)
378 		    || (IKOT_UEXT_OBJECT == type)) {
379 			ipc_port_release_send(port);
380 			task->itk_registered[i] = IP_NULL;
381 		}
382 	}
383 	itk_unlock(task);
384 }
385 
386 /* need to create a pmap function to generalize */
387 unsigned int
IODefaultCacheBits(addr64_t pa)388 IODefaultCacheBits(addr64_t pa)
389 {
390 	return pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT));
391 }
392 
393 kern_return_t
IOMapPages(vm_map_t map,mach_vm_address_t va,mach_vm_address_t pa,mach_vm_size_t length,unsigned int options)394 IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
395     mach_vm_size_t length, unsigned int options)
396 {
397 	vm_prot_t    prot;
398 	unsigned int flags;
399 	ppnum_t      pagenum;
400 	pmap_t       pmap = map->pmap;
401 
402 	prot = (options & kIOMapReadOnly)
403 	    ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
404 
405 	pagenum = (ppnum_t)atop_64(pa);
406 
407 	switch (options & kIOMapCacheMask) {                    /* What cache mode do we need? */
408 	case kIOMapDefaultCache:
409 	default:
410 		flags = IODefaultCacheBits(pa);
411 		break;
412 
413 	case kIOMapInhibitCache:
414 		flags = VM_WIMG_IO;
415 		break;
416 
417 	case kIOMapWriteThruCache:
418 		flags = VM_WIMG_WTHRU;
419 		break;
420 
421 	case kIOMapWriteCombineCache:
422 		flags = VM_WIMG_WCOMB;
423 		break;
424 
425 	case kIOMapCopybackCache:
426 		flags = VM_WIMG_COPYBACK;
427 		break;
428 
429 	case kIOMapCopybackInnerCache:
430 		flags = VM_WIMG_INNERWBACK;
431 		break;
432 
433 	case kIOMapPostedWrite:
434 		flags = VM_WIMG_POSTED;
435 		break;
436 
437 	case kIOMapRealTimeCache:
438 		flags = VM_WIMG_RT;
439 		break;
440 	}
441 
442 	pmap_set_cache_attributes(pagenum, flags);
443 
444 	vm_map_set_cache_attr(map, (vm_map_offset_t)va);
445 
446 
447 	// Set up a block mapped area
448 	return pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);
449 }
450 
451 kern_return_t
IOUnmapPages(vm_map_t map,mach_vm_address_t va,mach_vm_size_t length)452 IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
453 {
454 	pmap_t      pmap = map->pmap;
455 
456 	pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
457 
458 	return KERN_SUCCESS;
459 }
460 
461 kern_return_t
IOProtectCacheMode(vm_map_t __unused map,mach_vm_address_t __unused va,mach_vm_size_t __unused length,unsigned int __unused options)462 IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
463     mach_vm_size_t __unused length, unsigned int __unused options)
464 {
465 	mach_vm_size_t off;
466 	vm_prot_t      prot;
467 	unsigned int   flags;
468 	pmap_t         pmap = map->pmap;
469 	pmap_flush_context  pmap_flush_context_storage;
470 	boolean_t           delayed_pmap_flush = FALSE;
471 
472 	prot = (options & kIOMapReadOnly)
473 	    ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
474 
475 	switch (options & kIOMapCacheMask) {
476 	// what cache mode do we need?
477 	case kIOMapDefaultCache:
478 	default:
479 		return KERN_INVALID_ARGUMENT;
480 
481 	case kIOMapInhibitCache:
482 		flags = VM_WIMG_IO;
483 		break;
484 
485 	case kIOMapWriteThruCache:
486 		flags = VM_WIMG_WTHRU;
487 		break;
488 
489 	case kIOMapWriteCombineCache:
490 		flags = VM_WIMG_WCOMB;
491 		break;
492 
493 	case kIOMapCopybackCache:
494 		flags = VM_WIMG_COPYBACK;
495 		break;
496 
497 	case kIOMapCopybackInnerCache:
498 		flags = VM_WIMG_INNERWBACK;
499 		break;
500 
501 	case kIOMapPostedWrite:
502 		flags = VM_WIMG_POSTED;
503 		break;
504 
505 	case kIOMapRealTimeCache:
506 		flags = VM_WIMG_RT;
507 		break;
508 	}
509 
510 	pmap_flush_context_init(&pmap_flush_context_storage);
511 	delayed_pmap_flush = FALSE;
512 
513 	//  enter each page's physical address in the target map
514 	for (off = 0; off < length; off += page_size) {
515 		ppnum_t ppnum = pmap_find_phys(pmap, va + off);
516 		if (ppnum) {
517 			pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE,
518 			    PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage, PMAP_MAPPING_TYPE_INFER);
519 			delayed_pmap_flush = TRUE;
520 		}
521 	}
522 	if (delayed_pmap_flush == TRUE) {
523 		pmap_flush(&pmap_flush_context_storage);
524 	}
525 
526 	return KERN_SUCCESS;
527 }
528 
529 ppnum_t
IOGetLastPageNumber(void)530 IOGetLastPageNumber(void)
531 {
532 #if __i386__ || __x86_64__
533 	ppnum_t  lastPage, highest = 0;
534 	unsigned int idx;
535 
536 	for (idx = 0; idx < pmap_memory_region_count; idx++) {
537 		lastPage = pmap_memory_regions[idx].end - 1;
538 		if (lastPage > highest) {
539 			highest = lastPage;
540 		}
541 	}
542 	return highest;
543 #elif __arm64__
544 	return 0;
545 #else
546 #error unknown arch
547 #endif
548 }
549 
550 
551 void IOGetTime( mach_timespec_t * clock_time);
552 void
IOGetTime(mach_timespec_t * clock_time)553 IOGetTime( mach_timespec_t * clock_time)
554 {
555 	clock_sec_t sec;
556 	clock_nsec_t nsec;
557 	clock_get_system_nanotime(&sec, &nsec);
558 	clock_time->tv_sec = (typeof(clock_time->tv_sec))sec;
559 	clock_time->tv_nsec = nsec;
560 }
561