xref: /xnu-11417.101.15/osfmk/device/iokit_rpc.c (revision e3723e1f17661b24996789d8afc084c0c3303b26)
1 /*
2  * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <mach/boolean.h>
29 #include <mach/kern_return.h>
30 #include <mach/mig_errors.h>
31 #include <mach/port.h>
32 #include <mach/vm_param.h>
33 #include <mach/notify.h>
34 //#include <mach/mach_host_server.h>
35 #include <mach/mach_types.h>
36 
37 #include <machine/machparam.h>          /* spl definitions */
38 
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_space.h>
41 
42 #include <kern/clock.h>
43 #include <kern/spl.h>
44 #include <kern/queue.h>
45 #include <kern/zalloc.h>
46 #include <kern/thread.h>
47 #include <kern/task.h>
48 #include <kern/sched_prim.h>
49 #include <kern/misc_protos.h>
50 
51 #include <vm/pmap.h>
52 #include <vm/vm_map_xnu.h>
53 #include <vm/vm_kern.h>
54 
55 #include <device/device_types.h>
56 #include <device/device_port.h>
57 #include <device/device_server.h>
58 
59 #include <machine/machparam.h>
60 
61 #if defined(__i386__) || defined(__x86_64__)
62 #include <i386/pmap.h>
63 #endif
64 #if defined(__arm64__)
65 #include <arm/pmap.h>
66 #endif
67 #include <IOKit/IOKitServer.h>
68 
69 #define EXTERN
70 #define MIGEXTERN
71 
72 static void
73 iokit_no_senders( ipc_port_t port, mach_port_mscount_t mscount );
74 
75 /*
76  * Lifetime:
77  * - non lazy port with no-more senders
78  * - can be destroyed by iokit_destroy_object_port
79  *
80  */
81 IPC_KOBJECT_DEFINE(IKOT_IOKIT_IDENT,
82     .iko_op_no_senders = iokit_no_senders);
83 IPC_KOBJECT_DEFINE(IKOT_IOKIT_OBJECT,
84     .iko_op_no_senders = iokit_no_senders);
85 IPC_KOBJECT_DEFINE(IKOT_IOKIT_CONNECT,
86     .iko_op_no_senders = iokit_no_senders);
87 IPC_KOBJECT_DEFINE(IKOT_UEXT_OBJECT,
88     .iko_op_no_senders = iokit_no_senders);
89 
90 /*
91  * Lookup a device by its port.
92  * Doesn't consume the naked send right; produces a device reference.
93  */
94 io_object_t
iokit_lookup_io_object(ipc_port_t port,ipc_kobject_type_t type)95 iokit_lookup_io_object(ipc_port_t port, ipc_kobject_type_t type)
96 {
97 	io_object_t  obj = NULL;
98 	io_kobject_t kobj = NULL;
99 
100 	if (!IP_VALID(port)) {
101 		return NULL;
102 	}
103 
104 	ip_mq_lock(port);
105 	if (ip_active(port)) {
106 		kobj = ipc_kobject_get_locked(port, type);
107 		if (kobj) {
108 			iokit_kobject_retain(kobj);
109 		}
110 	}
111 	ip_mq_unlock(port);
112 	if (kobj) {
113 		obj = iokit_copy_object_for_consumed_kobject(kobj, type);
114 	}
115 
116 	return obj;
117 }
118 
119 MIGEXTERN io_object_t
iokit_lookup_object_port(ipc_port_t port)120 iokit_lookup_object_port(
121 	ipc_port_t      port)
122 {
123 	return iokit_lookup_io_object(port, IKOT_IOKIT_OBJECT);
124 }
125 
126 MIGEXTERN io_object_t
iokit_lookup_connect_port(ipc_port_t port)127 iokit_lookup_connect_port(
128 	ipc_port_t      port)
129 {
130 	return iokit_lookup_io_object(port, IKOT_IOKIT_CONNECT);
131 }
132 
133 MIGEXTERN io_object_t
iokit_lookup_ident_port(ipc_port_t port)134 iokit_lookup_ident_port(
135 	ipc_port_t      port)
136 {
137 	return iokit_lookup_io_object(port, IKOT_IOKIT_IDENT);
138 }
139 
140 MIGEXTERN io_object_t
iokit_lookup_uext_object_port(ipc_port_t port)141 iokit_lookup_uext_object_port(
142 	ipc_port_t      port)
143 {
144 	return iokit_lookup_io_object(port, IKOT_UEXT_OBJECT);
145 }
146 
147 static io_object_t
iokit_lookup_object_in_space_with_port_name(mach_port_name_t name,ipc_kobject_type_t type,ipc_space_t space)148 iokit_lookup_object_in_space_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, ipc_space_t space)
149 {
150 	io_object_t obj = NULL;
151 	io_kobject_t kobj;
152 
153 	if (MACH_PORT_VALID(name)) {
154 		ipc_port_t port;
155 		kern_return_t kr;
156 
157 		kr = ipc_port_translate_send(space, name, &port);
158 
159 		if (kr == KERN_SUCCESS) {
160 			assert(IP_VALID(port));
161 			assert(ip_active(port));
162 			kobj = ipc_kobject_get_locked(port, type);
163 			if (kobj) {
164 				iokit_kobject_retain(kobj);
165 			}
166 			ip_mq_unlock(port);
167 			if (kobj) {
168 				obj = iokit_copy_object_for_consumed_kobject(kobj, type);
169 			}
170 		}
171 	}
172 
173 	return obj;
174 }
175 
176 EXTERN io_object_t
iokit_lookup_object_with_port_name(mach_port_name_t name,ipc_kobject_type_t type,task_t task)177 iokit_lookup_object_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, task_t task)
178 {
179 	return iokit_lookup_object_in_space_with_port_name(name, type, task->itk_space);
180 }
181 
182 EXTERN io_object_t
iokit_lookup_connect_ref_current_task(mach_port_name_t name)183 iokit_lookup_connect_ref_current_task(mach_port_name_t name)
184 {
185 	return iokit_lookup_object_in_space_with_port_name(name, IKOT_IOKIT_CONNECT, current_space());
186 }
187 
188 EXTERN io_object_t
iokit_lookup_uext_ref_current_task(mach_port_name_t name)189 iokit_lookup_uext_ref_current_task(mach_port_name_t name)
190 {
191 	return iokit_lookup_object_in_space_with_port_name(name, IKOT_UEXT_OBJECT, current_space());
192 }
193 
194 /*
195  * Look up a port given a port name.
196  * This returns the port unlocked with a +1 send right.
197  * Release with iokit_release_port_send()
198  */
199 EXTERN kern_return_t
iokit_lookup_raw_current_task(mach_port_name_t name,ipc_kobject_type_t type,ipc_port_t * portp)200 iokit_lookup_raw_current_task(mach_port_name_t name, ipc_kobject_type_t type, ipc_port_t *portp)
201 {
202 	kern_return_t kr;
203 	ipc_port_t port;
204 
205 	/*
206 	 * Backward compatbility
207 	 *
208 	 * We can't use ipc_typed_port_copyin_send() builtin's capability to
209 	 * check type here, because of legacy reasons.
210 	 *
211 	 * type mismatch used to be returned as kIOReturnBadArgument to callers,
212 	 * but other cases of errors would yield kIOReturnNotFound.
213 	 *
214 	 * Do the dance by hand to respect this past.
215 	 */
216 	if (MACH_PORT_VALID(name)) {
217 		kr = ipc_typed_port_copyin_send(current_space(), name,
218 		    IKOT_UNKNOWN, &port);
219 
220 		if (kr != KERN_SUCCESS || !IP_VALID(port)) {
221 			return kIOReturnNotFound;
222 		}
223 
224 		if (type != IKOT_UNKNOWN && ip_kotype(port) != type) {
225 			ipc_typed_port_release_send(port, IKOT_UNKNOWN);
226 			return kIOReturnBadArgument;
227 		}
228 
229 		*portp = port;
230 		return kIOReturnSuccess;
231 	}
232 
233 	return kIOReturnNotFound;
234 }
235 
236 EXTERN void
iokit_retain_port(ipc_port_t port)237 iokit_retain_port( ipc_port_t port )
238 {
239 	ipc_port_reference( port );
240 }
241 
242 EXTERN void
iokit_release_port(ipc_port_t port)243 iokit_release_port( ipc_port_t port )
244 {
245 	ipc_port_release( port );
246 }
247 
248 EXTERN void
iokit_release_port_send(ipc_port_t port)249 iokit_release_port_send( ipc_port_t port )
250 {
251 	ipc_port_release_send( port );
252 }
253 
254 /*
255  * Get the port for a device.
256  * Consumes a device reference; produces a naked send right.
257  */
258 
259 static ipc_port_t
iokit_make_port_of_type(io_object_t obj,ipc_kobject_type_t type)260 iokit_make_port_of_type(io_object_t obj, ipc_kobject_type_t type)
261 {
262 	ipc_port_t  port;
263 	ipc_port_t  sendPort;
264 	ipc_kobject_t kobj;
265 
266 	if (obj == NULL) {
267 		return IP_NULL;
268 	}
269 
270 	port = iokit_port_for_object(obj, type, &kobj);
271 	if (port) {
272 		sendPort = ipc_kobject_make_send( port, kobj, type );
273 		iokit_release_port( port );
274 	} else {
275 		sendPort = IP_NULL;
276 	}
277 
278 	iokit_remove_reference( obj );
279 
280 	return sendPort;
281 }
282 
283 MIGEXTERN ipc_port_t
iokit_make_object_port(io_object_t obj)284 iokit_make_object_port(
285 	io_object_t     obj )
286 {
287 	return iokit_make_port_of_type(obj, IKOT_IOKIT_OBJECT);
288 }
289 
290 MIGEXTERN ipc_port_t
iokit_make_connect_port(io_object_t obj)291 iokit_make_connect_port(
292 	io_object_t     obj )
293 {
294 	return iokit_make_port_of_type(obj, IKOT_IOKIT_CONNECT);
295 }
296 
297 MIGEXTERN ipc_port_t
iokit_make_ident_port(io_object_t obj)298 iokit_make_ident_port(
299 	io_object_t     obj )
300 {
301 	return iokit_make_port_of_type(obj, IKOT_IOKIT_IDENT);
302 }
303 
304 EXTERN ipc_port_t
iokit_alloc_object_port(io_kobject_t obj,ipc_kobject_type_t type)305 iokit_alloc_object_port( io_kobject_t obj, ipc_kobject_type_t type )
306 {
307 	/* Allocate port, keeping a reference for it. */
308 	ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_NSREQUEST;
309 	if (type == IKOT_IOKIT_CONNECT) {
310 		options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
311 	}
312 	if (type == IKOT_UEXT_OBJECT) {
313 		ipc_label_t label = IPC_LABEL_DEXT;
314 		return ipc_kobject_alloc_labeled_port((ipc_kobject_t) obj, type, label, options);
315 	} else {
316 		return ipc_kobject_alloc_port((ipc_kobject_t) obj, type, options);
317 	}
318 }
319 
320 EXTERN void
iokit_remove_object_port(ipc_port_t port,ipc_kobject_type_t type)321 iokit_remove_object_port( ipc_port_t port, ipc_kobject_type_t type )
322 {
323 	ipc_kobject_disable(port, type);
324 }
325 
326 EXTERN kern_return_t
iokit_destroy_object_port(ipc_port_t port,ipc_kobject_type_t type)327 iokit_destroy_object_port( ipc_port_t port, ipc_kobject_type_t type )
328 {
329 	ipc_kobject_dealloc_port(port, 0, type);
330 	return KERN_SUCCESS;
331 }
332 
333 EXTERN ipc_kobject_type_t
iokit_port_type(ipc_port_t port)334 iokit_port_type(ipc_port_t port)
335 {
336 	return ip_kotype(port);
337 }
338 
339 EXTERN mach_port_name_t
iokit_make_send_right(task_t task,io_object_t obj,ipc_kobject_type_t type)340 iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
341 {
342 	ipc_port_t          port;
343 	ipc_port_t          sendPort;
344 	mach_port_name_t    name = 0;
345 	ipc_kobject_t       kobj;
346 
347 	if (obj == NULL) {
348 		return MACH_PORT_NULL;
349 	}
350 
351 	port = iokit_port_for_object( obj, type, &kobj );
352 	if (port) {
353 		sendPort = ipc_kobject_make_send( port, kobj, type );
354 		iokit_release_port( port );
355 	} else {
356 		sendPort = IP_NULL;
357 	}
358 
359 	if (IP_VALID( sendPort )) {
360 		kern_return_t   kr;
361 		// Remove once <rdar://problem/45522961> is fixed.
362 		// We need to make ith_knote NULL as ipc_object_copyout() uses
363 		// thread-argument-passing and its value should not be garbage
364 		current_thread()->ith_knote = ITH_KNOTE_NULL;
365 		kr = ipc_object_copyout( task->itk_space, sendPort,
366 		    MACH_MSG_TYPE_PORT_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
367 		    NULL, &name);
368 		if (kr != KERN_SUCCESS) {
369 			name = MACH_PORT_NULL;
370 		}
371 	} else if (sendPort == IP_NULL) {
372 		name = MACH_PORT_NULL;
373 	} else if (sendPort == IP_DEAD) {
374 		name = MACH_PORT_DEAD;
375 	}
376 
377 	return name;
378 }
379 
380 EXTERN kern_return_t
iokit_mod_send_right(task_t task,mach_port_name_t name,mach_port_delta_t delta)381 iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta )
382 {
383 	return mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta );
384 }
385 
386 /*
387  * Handle the No-More_Senders notification generated from a device port destroy.
388  * Since there are no longer any tasks which hold a send right to this device
389  * port a NMS notification has been generated.
390  */
391 
392 static void
iokit_no_senders(ipc_port_t port,mach_port_mscount_t mscount)393 iokit_no_senders( ipc_port_t port, mach_port_mscount_t mscount )
394 {
395 	io_object_t         obj = NULL;
396 	io_kobject_t        kobj = NULL;
397 	ipc_kobject_type_t  type = IKOT_NONE;
398 
399 	// convert a port to io_object_t.
400 	if (IP_VALID(port)) {
401 		ip_mq_lock(port);
402 		if (ip_active(port)) {
403 			type = ip_kotype( port );
404 			assert((IKOT_IOKIT_OBJECT == type)
405 			    || (IKOT_IOKIT_CONNECT == type)
406 			    || (IKOT_IOKIT_IDENT == type)
407 			    || (IKOT_UEXT_OBJECT == type));
408 			kobj = ipc_kobject_get_locked(port, type);
409 			if (kobj) {
410 				iokit_kobject_retain(kobj);
411 			}
412 		}
413 		ip_mq_unlock(port);
414 		if (kobj) {
415 			// IKOT_IOKIT_OBJECT since iokit_remove_reference() follows
416 			obj = iokit_copy_object_for_consumed_kobject(kobj, IKOT_IOKIT_OBJECT);
417 		}
418 	}
419 
420 	if (obj) {
421 		while (iokit_client_died( obj, port, type, &mscount ) != KERN_SUCCESS) {
422 			kern_return_t kr;
423 
424 			/* Re-request no-senders notifications on the port (if still active) */
425 			kr = ipc_kobject_nsrequest(port, mscount + 1, &mscount);
426 			if (kr != KERN_FAILURE) {
427 				break;
428 			}
429 			/*
430 			 * port has no outstanding rights or pending make-sends,
431 			 * and the notification would fire recursively, try again.
432 			 */
433 		}
434 
435 		iokit_remove_reference( obj );
436 	}
437 }
438 
439 
440 kern_return_t
iokit_label_dext_task(task_t task)441 iokit_label_dext_task(task_t task)
442 {
443 	return ipc_space_add_label(task->itk_space, IPC_LABEL_DEXT);
444 }
445 
446 /*
447  *	Routine:	iokit_clear_registered_ports
448  *	Purpose:
449  *		Clean up a task's registered IOKit kobject ports.
450  *	Conditions:
451  *		Nothing locked.
452  */
453 void
iokit_clear_registered_ports(task_t task)454 iokit_clear_registered_ports(
455 	task_t task)
456 {
457 	mach_port_t port;
458 	ipc_kobject_type_t type;
459 
460 	itk_lock(task);
461 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
462 		port = task->itk_registered[i];
463 		if (!IP_VALID(port)) {
464 			continue;
465 		}
466 		type = ip_kotype( port );
467 		if ((IKOT_IOKIT_OBJECT == type)
468 		    || (IKOT_IOKIT_CONNECT == type)
469 		    || (IKOT_IOKIT_IDENT == type)
470 		    || (IKOT_UEXT_OBJECT == type)) {
471 			ipc_port_release_send(port);
472 			task->itk_registered[i] = IP_NULL;
473 		}
474 	}
475 	itk_unlock(task);
476 }
477 
478 /* need to create a pmap function to generalize */
479 unsigned int
IODefaultCacheBits(addr64_t pa)480 IODefaultCacheBits(addr64_t pa)
481 {
482 	return pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT));
483 }
484 
485 kern_return_t
IOMapPages(vm_map_t map,mach_vm_address_t va,mach_vm_address_t pa,mach_vm_size_t length,unsigned int options)486 IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
487     mach_vm_size_t length, unsigned int options)
488 {
489 	vm_prot_t    prot;
490 	unsigned int flags;
491 	ppnum_t      pagenum;
492 	pmap_t       pmap = map->pmap;
493 
494 	prot = (options & kIOMapReadOnly)
495 	    ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
496 
497 	pagenum = (ppnum_t)atop_64(pa);
498 
499 	switch (options & kIOMapCacheMask) {                    /* What cache mode do we need? */
500 	case kIOMapDefaultCache:
501 	default:
502 		flags = IODefaultCacheBits(pa);
503 		break;
504 
505 	case kIOMapInhibitCache:
506 		flags = VM_WIMG_IO;
507 		break;
508 
509 	case kIOMapWriteThruCache:
510 		flags = VM_WIMG_WTHRU;
511 		break;
512 
513 	case kIOMapWriteCombineCache:
514 		flags = VM_WIMG_WCOMB;
515 		break;
516 
517 	case kIOMapCopybackCache:
518 		flags = VM_WIMG_COPYBACK;
519 		break;
520 
521 	case kIOMapCopybackInnerCache:
522 		flags = VM_WIMG_INNERWBACK;
523 		break;
524 
525 	case kIOMapPostedWrite:
526 		flags = VM_WIMG_POSTED;
527 		break;
528 
529 	case kIOMapRealTimeCache:
530 		flags = VM_WIMG_RT;
531 		break;
532 	}
533 
534 	pmap_set_cache_attributes(pagenum, flags);
535 
536 	vm_map_set_cache_attr(map, (vm_map_offset_t)va);
537 
538 
539 	// Set up a block mapped area
540 	return pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);
541 }
542 
543 kern_return_t
IOUnmapPages(vm_map_t map,mach_vm_address_t va,mach_vm_size_t length)544 IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
545 {
546 	pmap_t      pmap = map->pmap;
547 
548 	pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
549 
550 	return KERN_SUCCESS;
551 }
552 
553 kern_return_t
IOProtectCacheMode(vm_map_t __unused map,mach_vm_address_t __unused va,mach_vm_size_t __unused length,unsigned int __unused options)554 IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
555     mach_vm_size_t __unused length, unsigned int __unused options)
556 {
557 	mach_vm_size_t off;
558 	vm_prot_t      prot;
559 	unsigned int   flags;
560 	pmap_t         pmap = map->pmap;
561 	pmap_flush_context  pmap_flush_context_storage;
562 	boolean_t           delayed_pmap_flush = FALSE;
563 
564 	prot = (options & kIOMapReadOnly)
565 	    ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
566 
567 	switch (options & kIOMapCacheMask) {
568 	// what cache mode do we need?
569 	case kIOMapDefaultCache:
570 	default:
571 		return KERN_INVALID_ARGUMENT;
572 
573 	case kIOMapInhibitCache:
574 		flags = VM_WIMG_IO;
575 		break;
576 
577 	case kIOMapWriteThruCache:
578 		flags = VM_WIMG_WTHRU;
579 		break;
580 
581 	case kIOMapWriteCombineCache:
582 		flags = VM_WIMG_WCOMB;
583 		break;
584 
585 	case kIOMapCopybackCache:
586 		flags = VM_WIMG_COPYBACK;
587 		break;
588 
589 	case kIOMapCopybackInnerCache:
590 		flags = VM_WIMG_INNERWBACK;
591 		break;
592 
593 	case kIOMapPostedWrite:
594 		flags = VM_WIMG_POSTED;
595 		break;
596 
597 	case kIOMapRealTimeCache:
598 		flags = VM_WIMG_RT;
599 		break;
600 	}
601 
602 	pmap_flush_context_init(&pmap_flush_context_storage);
603 	delayed_pmap_flush = FALSE;
604 
605 	//  enter each page's physical address in the target map
606 	for (off = 0; off < length; off += page_size) {
607 		ppnum_t ppnum = pmap_find_phys(pmap, va + off);
608 		if (ppnum) {
609 			pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE,
610 			    PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage, PMAP_MAPPING_TYPE_INFER);
611 			delayed_pmap_flush = TRUE;
612 		}
613 	}
614 	if (delayed_pmap_flush == TRUE) {
615 		pmap_flush(&pmap_flush_context_storage);
616 	}
617 
618 	return KERN_SUCCESS;
619 }
620 
621 ppnum_t
IOGetLastPageNumber(void)622 IOGetLastPageNumber(void)
623 {
624 #if __i386__ || __x86_64__
625 	ppnum_t  lastPage, highest = 0;
626 	unsigned int idx;
627 
628 	for (idx = 0; idx < pmap_memory_region_count; idx++) {
629 		lastPage = pmap_memory_regions[idx].end - 1;
630 		if (lastPage > highest) {
631 			highest = lastPage;
632 		}
633 	}
634 	return highest;
635 #elif __arm64__
636 	return 0;
637 #else
638 #error unknown arch
639 #endif
640 }
641 
642 
643 void IOGetTime( mach_timespec_t * clock_time);
644 void
IOGetTime(mach_timespec_t * clock_time)645 IOGetTime( mach_timespec_t * clock_time)
646 {
647 	clock_sec_t sec;
648 	clock_nsec_t nsec;
649 	clock_get_system_nanotime(&sec, &nsec);
650 	clock_time->tv_sec = (typeof(clock_time->tv_sec))sec;
651 	clock_time->tv_nsec = nsec;
652 }
653