xref: /xnu-8796.141.3/osfmk/kern/bsd_kern.c (revision 1b191cb58250d0705d8a51287127505aa4bc0789)
1 /*
2  * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
31 
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/spl.h>
38 #include <kern/ast.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43 #include <vm/pmap.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
46 #include <sys/signal.h>
47 #include <sys/errno.h>
48 #include <sys/proc_require.h>
49 
50 #if MONOTONIC
51 #include <kern/monotonic.h>
52 #include <machine/monotonic.h>
53 #endif /* MONOTONIC */
54 
55 #include <machine/limits.h>
56 #include <sys/codesign.h> /* CS_CDHASH_LEN */
57 
58 #undef thread_should_halt
59 
60 /* BSD KERN COMPONENT INTERFACE */
61 
62 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
63 
64 thread_t get_firstthread(task_t);
65 int get_task_userstop(task_t);
66 int get_thread_userstop(thread_t);
67 boolean_t current_thread_aborted(void);
68 void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
69 kern_return_t get_signalact(task_t, thread_t *, int);
70 int fill_task_rusage(task_t task, rusage_info_current *ri);
71 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
72 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
73 uint64_t get_task_logical_writes(task_t task, bool external);
74 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
75 void task_bsdtask_kill(task_t);
76 
77 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
78 extern uint64_t get_dispatchqueue_label_offset_from_proc(void *p);
79 extern uint64_t proc_uniqueid_task(void *p, void *t);
80 extern int proc_pidversion(void *p);
81 extern int proc_getcdhash(void *p, char *cdhash);
82 
83 int mach_to_bsd_errno(kern_return_t mach_err);
84 kern_return_t bsd_to_mach_failure(int bsd_err);
85 
86 #if MACH_BSD
87 extern void psignal(void *, int);
88 #endif
89 
90 /*
91  *
92  */
93 void  *
get_bsdtask_info(task_t t)94 get_bsdtask_info(task_t t)
95 {
96 	void *proc_from_task = task_get_proc_raw(t);
97 	proc_require(proc_from_task, PROC_REQUIRE_ALLOW_NULL | PROC_REQUIRE_ALLOW_ALL);
98 	return task_has_proc(t) ? proc_from_task : NULL;
99 }
100 
101 void
task_bsdtask_kill(task_t t)102 task_bsdtask_kill(task_t t)
103 {
104 	void * bsd_info = get_bsdtask_info(t);
105 	if (bsd_info != NULL) {
106 		psignal(bsd_info, SIGKILL);
107 	}
108 }
109 /*
110  *
111  */
112 void *
get_bsdthreadtask_info(thread_t th)113 get_bsdthreadtask_info(thread_t th)
114 {
115 	return get_thread_ro(th)->tro_proc;
116 }
117 
118 /*
119  *
120  */
121 void
set_bsdtask_info(task_t t,void * v)122 set_bsdtask_info(task_t t, void * v)
123 {
124 	void *proc_from_task = task_get_proc_raw(t);
125 	if (v == NULL) {
126 		task_clear_has_proc(t);
127 	} else {
128 		if (v != proc_from_task) {
129 			panic("set_bsdtask_info trying to set random bsd_info %p", v);
130 		}
131 		task_set_has_proc(t);
132 	}
133 }
134 
135 __abortlike
136 static void
__thread_ro_circularity_panic(thread_t th,thread_ro_t tro)137 __thread_ro_circularity_panic(thread_t th, thread_ro_t tro)
138 {
139 	panic("tro %p points back to %p instead of %p", tro, tro->tro_owner, th);
140 }
141 
142 __attribute__((always_inline))
143 thread_ro_t
get_thread_ro_unchecked(thread_t th)144 get_thread_ro_unchecked(thread_t th)
145 {
146 	return th->t_tro;
147 }
148 
149 thread_ro_t
get_thread_ro(thread_t th)150 get_thread_ro(thread_t th)
151 {
152 	thread_ro_t tro = th->t_tro;
153 
154 	zone_require_ro(ZONE_ID_THREAD_RO, sizeof(struct thread_ro), tro);
155 	if (tro->tro_owner != th) {
156 		__thread_ro_circularity_panic(th, tro);
157 	}
158 	return tro;
159 }
160 
161 __attribute__((always_inline))
162 thread_ro_t
current_thread_ro_unchecked(void)163 current_thread_ro_unchecked(void)
164 {
165 	return get_thread_ro_unchecked(current_thread());
166 }
167 
168 thread_ro_t
current_thread_ro(void)169 current_thread_ro(void)
170 {
171 	return get_thread_ro(current_thread());
172 }
173 
174 void
clear_thread_ro_proc(thread_t th)175 clear_thread_ro_proc(thread_t th)
176 {
177 	thread_ro_t tro = get_thread_ro(th);
178 
179 	zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_proc);
180 }
181 
182 struct uthread *
get_bsdthread_info(thread_t th)183 get_bsdthread_info(thread_t th)
184 {
185 	return (struct uthread *)((uintptr_t)th + sizeof(struct thread));
186 }
187 
188 thread_t
get_machthread(struct uthread * uth)189 get_machthread(struct uthread *uth)
190 {
191 	return (struct thread *)((uintptr_t)uth - sizeof(struct thread));
192 }
193 
194 /*
195  * This is used to remember any FS error from VNOP_PAGEIN code when
196  * invoked under vm_fault(). The value is an errno style value. It can
197  * be retrieved by exception handlers using thread_get_state().
198  */
199 void
set_thread_pagein_error(thread_t th,int error)200 set_thread_pagein_error(thread_t th, int error)
201 {
202 	assert(th == current_thread());
203 	if (error == 0 || th->t_pagein_error == 0) {
204 		th->t_pagein_error = error;
205 	}
206 }
207 
208 #if defined(__x86_64__)
209 /*
210  * Returns non-zero if the thread has a non-NULL task
211  * and that task has an LDT.
212  */
213 int
thread_task_has_ldt(thread_t th)214 thread_task_has_ldt(thread_t th)
215 {
216 	task_t task = get_threadtask(th);
217 	return task && task->i386_ldt != 0;
218 }
219 #endif /* __x86_64__ */
220 
221 /*
222  * XXX
223  */
224 int get_thread_lock_count(thread_t th);         /* forced forward */
225 int
get_thread_lock_count(thread_t th)226 get_thread_lock_count(thread_t th)
227 {
228 	return th->mutex_count;
229 }
230 
231 /*
232  * Returns a thread reference.
233  */
234 thread_t
get_firstthread(task_t task)235 get_firstthread(task_t task)
236 {
237 	thread_t thread = THREAD_NULL;
238 	task_lock(task);
239 
240 	if (!task->active) {
241 		task_unlock(task);
242 		return THREAD_NULL;
243 	}
244 
245 	thread = (thread_t)(void *)queue_first(&task->threads);
246 
247 	if (queue_end(&task->threads, (queue_entry_t)thread)) {
248 		task_unlock(task);
249 		return THREAD_NULL;
250 	}
251 
252 	thread_reference(thread);
253 	task_unlock(task);
254 	return thread;
255 }
256 
257 kern_return_t
get_signalact(task_t task,thread_t * result_out,int setast)258 get_signalact(
259 	task_t          task,
260 	thread_t        *result_out,
261 	int                     setast)
262 {
263 	kern_return_t   result = KERN_SUCCESS;
264 	thread_t                inc, thread = THREAD_NULL;
265 
266 	task_lock(task);
267 
268 	if (!task->active) {
269 		task_unlock(task);
270 
271 		return KERN_FAILURE;
272 	}
273 
274 	for (inc  = (thread_t)(void *)queue_first(&task->threads);
275 	    !queue_end(&task->threads, (queue_entry_t)inc);) {
276 		thread_mtx_lock(inc);
277 		if (inc->active &&
278 		    (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
279 			thread = inc;
280 			break;
281 		}
282 		thread_mtx_unlock(inc);
283 
284 		inc = (thread_t)(void *)queue_next(&inc->task_threads);
285 	}
286 
287 	if (result_out) {
288 		*result_out = thread;
289 	}
290 
291 	if (thread) {
292 		if (setast) {
293 			act_set_astbsd(thread);
294 		}
295 
296 		thread_mtx_unlock(thread);
297 	} else {
298 		result = KERN_FAILURE;
299 	}
300 
301 	task_unlock(task);
302 
303 	return result;
304 }
305 
306 
307 kern_return_t
check_actforsig(task_t task,thread_t thread,int setast)308 check_actforsig(
309 	task_t                  task,
310 	thread_t                thread,
311 	int                             setast)
312 {
313 	kern_return_t   result = KERN_FAILURE;
314 	thread_t                inc;
315 
316 	task_lock(task);
317 
318 	if (!task->active) {
319 		task_unlock(task);
320 
321 		return KERN_FAILURE;
322 	}
323 
324 	for (inc  = (thread_t)(void *)queue_first(&task->threads);
325 	    !queue_end(&task->threads, (queue_entry_t)inc);) {
326 		if (inc == thread) {
327 			thread_mtx_lock(inc);
328 
329 			if (inc->active &&
330 			    (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
331 				result = KERN_SUCCESS;
332 				break;
333 			}
334 
335 			thread_mtx_unlock(inc);
336 			break;
337 		}
338 
339 		inc = (thread_t)(void *)queue_next(&inc->task_threads);
340 	}
341 
342 	if (result == KERN_SUCCESS) {
343 		if (setast) {
344 			act_set_astbsd(thread);
345 		}
346 
347 		thread_mtx_unlock(thread);
348 	}
349 
350 	task_unlock(task);
351 
352 	return result;
353 }
354 
355 ledger_t
get_task_ledger(task_t t)356 get_task_ledger(task_t t)
357 {
358 	return t->ledger;
359 }
360 
361 /*
362  * This is only safe to call from a thread executing in
363  * in the task's context or if the task is locked. Otherwise,
364  * the map could be switched for the task (and freed) before
365  * we go to return it here.
366  */
367 vm_map_t
get_task_map(task_t t)368 get_task_map(task_t t)
369 {
370 	return t->map;
371 }
372 
373 vm_map_t
get_task_map_reference(task_t t)374 get_task_map_reference(task_t t)
375 {
376 	vm_map_t m;
377 
378 	if (t == NULL) {
379 		return VM_MAP_NULL;
380 	}
381 
382 	task_lock(t);
383 	if (!t->active) {
384 		task_unlock(t);
385 		return VM_MAP_NULL;
386 	}
387 	m = t->map;
388 	vm_map_reference(m);
389 	task_unlock(t);
390 	return m;
391 }
392 
393 /*
394  *
395  */
396 ipc_space_t
get_task_ipcspace(task_t t)397 get_task_ipcspace(task_t t)
398 {
399 	return t->itk_space;
400 }
401 
402 int
get_task_numacts(task_t t)403 get_task_numacts(task_t t)
404 {
405 	return t->thread_count;
406 }
407 
408 /* does this machine need  64bit register set for signal handler */
409 int
is_64signalregset(void)410 is_64signalregset(void)
411 {
412 	if (task_has_64Bit_data(current_task())) {
413 		return 1;
414 	}
415 
416 	return 0;
417 }
418 
419 /*
420  * Swap in a new map for the task/thread pair; the old map reference is
421  * returned. Also does a pmap switch if thread provided is current thread.
422  */
423 vm_map_t
swap_task_map(task_t task,thread_t thread,vm_map_t map)424 swap_task_map(task_t task, thread_t thread, vm_map_t map)
425 {
426 	vm_map_t old_map;
427 	boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
428 
429 	if (task != get_threadtask(thread)) {
430 		panic("swap_task_map");
431 	}
432 
433 	task_lock(task);
434 	mp_disable_preemption();
435 
436 	old_map = task->map;
437 	thread->map = task->map = map;
438 	vm_commit_pagezero_status(map);
439 
440 	if (doswitch) {
441 		PMAP_SWITCH_USER(thread, map, cpu_number());
442 	}
443 	mp_enable_preemption();
444 	task_unlock(task);
445 
446 	return old_map;
447 }
448 
449 /*
450  *
451  * This is only safe to call from a thread executing in
452  * in the task's context or if the task is locked. Otherwise,
453  * the map could be switched for the task (and freed) before
454  * we go to return it here.
455  */
456 pmap_t
get_task_pmap(task_t t)457 get_task_pmap(task_t t)
458 {
459 	return t->map->pmap;
460 }
461 
462 /*
463  *
464  */
465 uint64_t
get_task_resident_size(task_t task)466 get_task_resident_size(task_t task)
467 {
468 	uint64_t val;
469 
470 	ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
471 	return val;
472 }
473 
474 uint64_t
get_task_compressed(task_t task)475 get_task_compressed(task_t task)
476 {
477 	uint64_t val;
478 
479 	ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t *) &val);
480 	return val;
481 }
482 
483 uint64_t
get_task_resident_max(task_t task)484 get_task_resident_max(task_t task)
485 {
486 	uint64_t val;
487 
488 	ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
489 	return val;
490 }
491 
492 /*
493  * Get the balance for a given field in the task ledger.
494  * Returns 0 if the entry is invalid.
495  */
496 static uint64_t
get_task_ledger_balance(task_t task,int entry)497 get_task_ledger_balance(task_t task, int entry)
498 {
499 	ledger_amount_t balance = 0;
500 
501 	ledger_get_balance(task->ledger, entry, &balance);
502 	return balance;
503 }
504 
505 uint64_t
get_task_purgeable_size(task_t task)506 get_task_purgeable_size(task_t task)
507 {
508 	kern_return_t ret;
509 	ledger_amount_t balance = 0;
510 	uint64_t volatile_size = 0;
511 
512 	ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile, &balance);
513 	if (ret != KERN_SUCCESS) {
514 		return 0;
515 	}
516 
517 	volatile_size += balance;
518 
519 	ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile_compressed, &balance);
520 	if (ret != KERN_SUCCESS) {
521 		return 0;
522 	}
523 
524 	volatile_size += balance;
525 
526 	return volatile_size;
527 }
528 
529 /*
530  *
531  */
532 uint64_t
get_task_phys_footprint(task_t task)533 get_task_phys_footprint(task_t task)
534 {
535 	return get_task_ledger_balance(task, task_ledgers.phys_footprint);
536 }
537 
538 #if CONFIG_LEDGER_INTERVAL_MAX
539 /*
540  *
541  */
542 uint64_t
get_task_phys_footprint_interval_max(task_t task,int reset)543 get_task_phys_footprint_interval_max(task_t task, int reset)
544 {
545 	kern_return_t ret;
546 	ledger_amount_t max;
547 
548 	ret = ledger_get_interval_max(task->ledger, task_ledgers.phys_footprint, &max, reset);
549 
550 	if (KERN_SUCCESS == ret) {
551 		return max;
552 	}
553 
554 	return 0;
555 }
556 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
557 
558 /*
559  *
560  */
561 uint64_t
get_task_phys_footprint_lifetime_max(task_t task)562 get_task_phys_footprint_lifetime_max(task_t task)
563 {
564 	kern_return_t ret;
565 	ledger_amount_t max;
566 
567 	ret = ledger_get_lifetime_max(task->ledger, task_ledgers.phys_footprint, &max);
568 
569 	if (KERN_SUCCESS == ret) {
570 		return max;
571 	}
572 
573 	return 0;
574 }
575 
576 /*
577  *
578  */
579 uint64_t
get_task_phys_footprint_limit(task_t task)580 get_task_phys_footprint_limit(task_t task)
581 {
582 	kern_return_t ret;
583 	ledger_amount_t max;
584 
585 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max);
586 	if (KERN_SUCCESS == ret) {
587 		return max;
588 	}
589 
590 	return 0;
591 }
592 
593 uint64_t
get_task_internal(task_t task)594 get_task_internal(task_t task)
595 {
596 	return get_task_ledger_balance(task, task_ledgers.internal);
597 }
598 
599 uint64_t
get_task_internal_compressed(task_t task)600 get_task_internal_compressed(task_t task)
601 {
602 	return get_task_ledger_balance(task, task_ledgers.internal_compressed);
603 }
604 
605 uint64_t
get_task_purgeable_nonvolatile(task_t task)606 get_task_purgeable_nonvolatile(task_t task)
607 {
608 	return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile);
609 }
610 
611 uint64_t
get_task_purgeable_nonvolatile_compressed(task_t task)612 get_task_purgeable_nonvolatile_compressed(task_t task)
613 {
614 	return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile_compressed);
615 }
616 
617 uint64_t
get_task_alternate_accounting(task_t task)618 get_task_alternate_accounting(task_t task)
619 {
620 	return get_task_ledger_balance(task, task_ledgers.alternate_accounting);
621 }
622 
623 uint64_t
get_task_alternate_accounting_compressed(task_t task)624 get_task_alternate_accounting_compressed(task_t task)
625 {
626 	return get_task_ledger_balance(task, task_ledgers.alternate_accounting_compressed);
627 }
628 
629 uint64_t
get_task_page_table(task_t task)630 get_task_page_table(task_t task)
631 {
632 	return get_task_ledger_balance(task, task_ledgers.page_table);
633 }
634 
635 #if CONFIG_FREEZE
636 uint64_t
get_task_frozen_to_swap(task_t task)637 get_task_frozen_to_swap(task_t task)
638 {
639 	return get_task_ledger_balance(task, task_ledgers.frozen_to_swap);
640 }
641 #endif /* CONFIG_FREEZE */
642 
643 uint64_t
get_task_iokit_mapped(task_t task)644 get_task_iokit_mapped(task_t task)
645 {
646 	return get_task_ledger_balance(task, task_ledgers.iokit_mapped);
647 }
648 
649 uint64_t
get_task_network_nonvolatile(task_t task)650 get_task_network_nonvolatile(task_t task)
651 {
652 	return get_task_ledger_balance(task, task_ledgers.network_nonvolatile);
653 }
654 
655 uint64_t
get_task_network_nonvolatile_compressed(task_t task)656 get_task_network_nonvolatile_compressed(task_t task)
657 {
658 	return get_task_ledger_balance(task, task_ledgers.network_nonvolatile_compressed);
659 }
660 
661 uint64_t
get_task_wired_mem(task_t task)662 get_task_wired_mem(task_t task)
663 {
664 	return get_task_ledger_balance(task, task_ledgers.wired_mem);
665 }
666 
667 uint64_t
get_task_tagged_footprint(task_t task)668 get_task_tagged_footprint(task_t task)
669 {
670 	kern_return_t ret;
671 	ledger_amount_t credit, debit;
672 
673 	ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint, &credit, &debit);
674 	if (KERN_SUCCESS == ret) {
675 		return credit - debit;
676 	}
677 
678 	return 0;
679 }
680 
681 uint64_t
get_task_tagged_footprint_compressed(task_t task)682 get_task_tagged_footprint_compressed(task_t task)
683 {
684 	kern_return_t ret;
685 	ledger_amount_t credit, debit;
686 
687 	ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint_compressed, &credit, &debit);
688 	if (KERN_SUCCESS == ret) {
689 		return credit - debit;
690 	}
691 
692 	return 0;
693 }
694 
695 uint64_t
get_task_media_footprint(task_t task)696 get_task_media_footprint(task_t task)
697 {
698 	kern_return_t ret;
699 	ledger_amount_t credit, debit;
700 
701 	ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint, &credit, &debit);
702 	if (KERN_SUCCESS == ret) {
703 		return credit - debit;
704 	}
705 
706 	return 0;
707 }
708 
709 uint64_t
get_task_media_footprint_compressed(task_t task)710 get_task_media_footprint_compressed(task_t task)
711 {
712 	kern_return_t ret;
713 	ledger_amount_t credit, debit;
714 
715 	ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint_compressed, &credit, &debit);
716 	if (KERN_SUCCESS == ret) {
717 		return credit - debit;
718 	}
719 
720 	return 0;
721 }
722 
723 uint64_t
get_task_graphics_footprint(task_t task)724 get_task_graphics_footprint(task_t task)
725 {
726 	kern_return_t ret;
727 	ledger_amount_t credit, debit;
728 
729 	ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint, &credit, &debit);
730 	if (KERN_SUCCESS == ret) {
731 		return credit - debit;
732 	}
733 
734 	return 0;
735 }
736 
737 
738 uint64_t
get_task_graphics_footprint_compressed(task_t task)739 get_task_graphics_footprint_compressed(task_t task)
740 {
741 	kern_return_t ret;
742 	ledger_amount_t credit, debit;
743 
744 	ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint_compressed, &credit, &debit);
745 	if (KERN_SUCCESS == ret) {
746 		return credit - debit;
747 	}
748 
749 	return 0;
750 }
751 
752 uint64_t
get_task_neural_footprint(task_t task)753 get_task_neural_footprint(task_t task)
754 {
755 	kern_return_t ret;
756 	ledger_amount_t credit, debit;
757 
758 	ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint, &credit, &debit);
759 	if (KERN_SUCCESS == ret) {
760 		return credit - debit;
761 	}
762 
763 	return 0;
764 }
765 
766 uint64_t
get_task_neural_footprint_compressed(task_t task)767 get_task_neural_footprint_compressed(task_t task)
768 {
769 	kern_return_t ret;
770 	ledger_amount_t credit, debit;
771 
772 	ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint_compressed, &credit, &debit);
773 	if (KERN_SUCCESS == ret) {
774 		return credit - debit;
775 	}
776 
777 	return 0;
778 }
779 
780 uint64_t
get_task_cpu_time(task_t task)781 get_task_cpu_time(task_t task)
782 {
783 	return get_task_ledger_balance(task, task_ledgers.cpu_time);
784 }
785 
786 uint32_t
get_task_loadTag(task_t task)787 get_task_loadTag(task_t task)
788 {
789 	return os_atomic_load(&task->loadTag, relaxed);
790 }
791 
792 uint32_t
set_task_loadTag(task_t task,uint32_t loadTag)793 set_task_loadTag(task_t task, uint32_t loadTag)
794 {
795 	return os_atomic_xchg(&task->loadTag, loadTag, relaxed);
796 }
797 
798 
799 task_t
get_threadtask(thread_t th)800 get_threadtask(thread_t th)
801 {
802 	return get_thread_ro(th)->tro_task;
803 }
804 
805 task_t
get_threadtask_early(thread_t th)806 get_threadtask_early(thread_t th)
807 {
808 	if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
809 		if (th == THREAD_NULL || th->t_tro == NULL) {
810 			return TASK_NULL;
811 		}
812 	}
813 	return get_threadtask(th);
814 }
815 
816 /*
817  *
818  */
819 vm_map_offset_t
get_map_min(vm_map_t map)820 get_map_min(
821 	vm_map_t        map)
822 {
823 	return vm_map_min(map);
824 }
825 
826 /*
827  *
828  */
829 vm_map_offset_t
get_map_max(vm_map_t map)830 get_map_max(
831 	vm_map_t        map)
832 {
833 	return vm_map_max(map);
834 }
835 vm_map_size_t
get_vmmap_size(vm_map_t map)836 get_vmmap_size(
837 	vm_map_t        map)
838 {
839 	return vm_map_adjusted_size(map);
840 }
841 int
get_task_page_size(task_t task)842 get_task_page_size(
843 	task_t task)
844 {
845 	return vm_map_page_size(task->map);
846 }
847 
848 #if CONFIG_COREDUMP
849 
850 static int
get_vmsubmap_entries(vm_map_t map,vm_object_offset_t start,vm_object_offset_t end)851 get_vmsubmap_entries(
852 	vm_map_t        map,
853 	vm_object_offset_t      start,
854 	vm_object_offset_t      end)
855 {
856 	int     total_entries = 0;
857 	vm_map_entry_t  entry;
858 
859 	if (not_in_kdp) {
860 		vm_map_lock(map);
861 	}
862 	entry = vm_map_first_entry(map);
863 	while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
864 		entry = entry->vme_next;
865 	}
866 
867 	while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
868 		if (entry->is_sub_map) {
869 			total_entries +=
870 			    get_vmsubmap_entries(VME_SUBMAP(entry),
871 			    VME_OFFSET(entry),
872 			    (VME_OFFSET(entry) +
873 			    entry->vme_end -
874 			    entry->vme_start));
875 		} else {
876 			total_entries += 1;
877 		}
878 		entry = entry->vme_next;
879 	}
880 	if (not_in_kdp) {
881 		vm_map_unlock(map);
882 	}
883 	return total_entries;
884 }
885 
886 int
get_vmmap_entries(vm_map_t map)887 get_vmmap_entries(
888 	vm_map_t        map)
889 {
890 	int     total_entries = 0;
891 	vm_map_entry_t  entry;
892 
893 	if (not_in_kdp) {
894 		vm_map_lock(map);
895 	}
896 	entry = vm_map_first_entry(map);
897 
898 	while (entry != vm_map_to_entry(map)) {
899 		if (entry->is_sub_map) {
900 			total_entries +=
901 			    get_vmsubmap_entries(VME_SUBMAP(entry),
902 			    VME_OFFSET(entry),
903 			    (VME_OFFSET(entry) +
904 			    entry->vme_end -
905 			    entry->vme_start));
906 		} else {
907 			total_entries += 1;
908 		}
909 		entry = entry->vme_next;
910 	}
911 	if (not_in_kdp) {
912 		vm_map_unlock(map);
913 	}
914 	return total_entries;
915 }
916 #endif /* CONFIG_COREDUMP */
917 
918 int
get_task_userstop(task_t task)919 get_task_userstop(
920 	task_t task)
921 {
922 	return task->user_stop_count;
923 }
924 
925 int
get_thread_userstop(thread_t th)926 get_thread_userstop(
927 	thread_t th)
928 {
929 	return th->user_stop_count;
930 }
931 
932 boolean_t
get_task_pidsuspended(task_t task)933 get_task_pidsuspended(
934 	task_t task)
935 {
936 	return task->pidsuspended;
937 }
938 
939 boolean_t
get_task_frozen(task_t task)940 get_task_frozen(
941 	task_t task)
942 {
943 	return task->frozen;
944 }
945 
946 boolean_t
thread_should_abort(thread_t th)947 thread_should_abort(
948 	thread_t th)
949 {
950 	return (th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT;
951 }
952 
953 /*
954  * This routine is like thread_should_abort() above.  It checks to
955  * see if the current thread is aborted.  But unlike above, it also
956  * checks to see if thread is safely aborted.  If so, it returns
957  * that fact, and clears the condition (safe aborts only should
958  * have a single effect, and a poll of the abort status
959  * qualifies.
960  */
961 boolean_t
current_thread_aborted(void)962 current_thread_aborted(
963 	void)
964 {
965 	thread_t th = current_thread();
966 	spl_t s;
967 
968 	if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
969 	    (th->options & TH_OPT_INTMASK) != THREAD_UNINT) {
970 		return TRUE;
971 	}
972 	if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
973 		s = splsched();
974 		thread_lock(th);
975 		if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
976 			th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
977 		}
978 		thread_unlock(th);
979 		splx(s);
980 	}
981 	return FALSE;
982 }
983 
984 void
task_act_iterate_wth_args(task_t task,void (* func_callback)(thread_t,void *),void * func_arg)985 task_act_iterate_wth_args(
986 	task_t                  task,
987 	void                    (*func_callback)(thread_t, void *),
988 	void                    *func_arg)
989 {
990 	thread_t        inc;
991 
992 	task_lock(task);
993 
994 	for (inc  = (thread_t)(void *)queue_first(&task->threads);
995 	    !queue_end(&task->threads, (queue_entry_t)inc);) {
996 		(void) (*func_callback)(inc, func_arg);
997 		inc = (thread_t)(void *)queue_next(&inc->task_threads);
998 	}
999 
1000 	task_unlock(task);
1001 }
1002 
1003 #include <sys/bsdtask_info.h>
1004 
1005 void
fill_taskprocinfo(task_t task,struct proc_taskinfo_internal * ptinfo)1006 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
1007 {
1008 	vm_map_t map;
1009 	task_absolutetime_info_data_t   tinfo;
1010 	thread_t thread;
1011 	uint32_t cswitch = 0, numrunning = 0;
1012 	uint32_t syscalls_unix = 0;
1013 	uint32_t syscalls_mach = 0;
1014 
1015 	task_lock(task);
1016 
1017 	map = (task == kernel_task)? kernel_map: task->map;
1018 
1019 	ptinfo->pti_virtual_size  = vm_map_adjusted_size(map);
1020 	ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &ptinfo->pti_resident_size);
1021 
1022 	ptinfo->pti_policy = ((task != kernel_task)?
1023 	    POLICY_TIMESHARE: POLICY_RR);
1024 
1025 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
1026 		spl_t x;
1027 
1028 		if (thread->options & TH_OPT_IDLE_THREAD) {
1029 			continue;
1030 		}
1031 
1032 		x = splsched();
1033 		thread_lock(thread);
1034 
1035 		if ((thread->state & TH_RUN) == TH_RUN) {
1036 			numrunning++;
1037 		}
1038 		cswitch += thread->c_switch;
1039 
1040 		syscalls_unix += thread->syscalls_unix;
1041 		syscalls_mach += thread->syscalls_mach;
1042 
1043 		thread_unlock(thread);
1044 		splx(x);
1045 	}
1046 
1047 	struct recount_times_mach term_times = recount_task_terminated_times(task);
1048 	struct recount_times_mach total_times = recount_task_times(task);
1049 
1050 	tinfo.threads_user = total_times.rtm_user - term_times.rtm_user;
1051 	tinfo.threads_system = total_times.rtm_system - term_times.rtm_system;
1052 	ptinfo->pti_threads_system = tinfo.threads_system;
1053 	ptinfo->pti_threads_user = tinfo.threads_user;
1054 
1055 	ptinfo->pti_total_system = total_times.rtm_system;
1056 	ptinfo->pti_total_user = total_times.rtm_user;
1057 
1058 	ptinfo->pti_faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
1059 	ptinfo->pti_pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
1060 	ptinfo->pti_cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
1061 	ptinfo->pti_messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
1062 	ptinfo->pti_messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
1063 	ptinfo->pti_syscalls_mach = (int32_t) MIN(task->syscalls_mach + syscalls_mach, INT32_MAX);
1064 	ptinfo->pti_syscalls_unix = (int32_t) MIN(task->syscalls_unix + syscalls_unix, INT32_MAX);
1065 	ptinfo->pti_csw = (int32_t) MIN(task->c_switch + cswitch, INT32_MAX);
1066 	ptinfo->pti_threadnum = task->thread_count;
1067 	ptinfo->pti_numrunning = numrunning;
1068 	ptinfo->pti_priority = task->priority;
1069 
1070 	task_unlock(task);
1071 }
1072 
1073 int
fill_taskthreadinfo(task_t task,uint64_t thaddr,bool thuniqueid,struct proc_threadinfo_internal * ptinfo,void * vpp,int * vidp)1074 fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
1075 {
1076 	thread_t  thact;
1077 	int err = 0;
1078 	mach_msg_type_number_t count;
1079 	thread_basic_info_data_t basic_info;
1080 	kern_return_t kret;
1081 	uint64_t addr = 0;
1082 
1083 	task_lock(task);
1084 
1085 	for (thact  = (thread_t)(void *)queue_first(&task->threads);
1086 	    !queue_end(&task->threads, (queue_entry_t)thact);) {
1087 		addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1088 		if (addr == thaddr) {
1089 			count = THREAD_BASIC_INFO_COUNT;
1090 			if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
1091 				err = 1;
1092 				goto out;
1093 			}
1094 			ptinfo->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
1095 			ptinfo->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
1096 
1097 			ptinfo->pth_cpu_usage = basic_info.cpu_usage;
1098 			ptinfo->pth_policy = basic_info.policy;
1099 			ptinfo->pth_run_state = basic_info.run_state;
1100 			ptinfo->pth_flags = basic_info.flags;
1101 			ptinfo->pth_sleep_time = basic_info.sleep_time;
1102 			ptinfo->pth_curpri = thact->sched_pri;
1103 			ptinfo->pth_priority = thact->base_pri;
1104 			ptinfo->pth_maxpriority = thact->max_priority;
1105 
1106 			if (vpp != NULL) {
1107 				bsd_threadcdir(get_bsdthread_info(thact), vpp, vidp);
1108 			}
1109 			bsd_getthreadname(get_bsdthread_info(thact), ptinfo->pth_name);
1110 			err = 0;
1111 			goto out;
1112 		}
1113 		thact = (thread_t)(void *)queue_next(&thact->task_threads);
1114 	}
1115 	err = 1;
1116 
1117 out:
1118 	task_unlock(task);
1119 	return err;
1120 }
1121 
1122 int
fill_taskthreadlist(task_t task,void * buffer,int thcount,bool thuniqueid)1123 fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid)
1124 {
1125 	int numthr = 0;
1126 	thread_t thact;
1127 	uint64_t * uptr;
1128 	uint64_t  thaddr;
1129 
1130 	uptr = (uint64_t *)buffer;
1131 
1132 	task_lock(task);
1133 
1134 	for (thact  = (thread_t)(void *)queue_first(&task->threads);
1135 	    !queue_end(&task->threads, (queue_entry_t)thact);) {
1136 		thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1137 		*uptr++ = thaddr;
1138 		numthr++;
1139 		if (numthr >= thcount) {
1140 			goto out;
1141 		}
1142 		thact = (thread_t)(void *)queue_next(&thact->task_threads);
1143 	}
1144 
1145 out:
1146 	task_unlock(task);
1147 	return (int)(numthr * sizeof(uint64_t));
1148 }
1149 
1150 int
fill_taskthreadschedinfo(task_t task,uint64_t thread_id,struct proc_threadschedinfo_internal * thread_sched_info)1151 fill_taskthreadschedinfo(task_t task, uint64_t thread_id, struct proc_threadschedinfo_internal *thread_sched_info)
1152 {
1153 	int err = 0;
1154 
1155 	thread_t thread = current_thread();
1156 
1157 	/*
1158 	 * Looking up threads is pretty expensive and not realtime-safe
1159 	 * right now, requiring locking the task and iterating over all
1160 	 * threads. As long as that is the case, we officially only
1161 	 * support getting this info for the current thread.
1162 	 */
1163 	if (task != current_task() || thread_id != thread->thread_id) {
1164 		return -1;
1165 	}
1166 
1167 #if SCHED_HYGIENE_DEBUG
1168 	absolutetime_to_nanoseconds(thread->machine.int_time_mt, &thread_sched_info->int_time_ns);
1169 #else
1170 	(void)thread;
1171 	thread_sched_info->int_time_ns = 0;
1172 #endif
1173 
1174 	return err;
1175 }
1176 
1177 int
get_numthreads(task_t task)1178 get_numthreads(task_t task)
1179 {
1180 	return task->thread_count;
1181 }
1182 
1183 /*
1184  * Gather the various pieces of info about the designated task,
1185  * and collect it all into a single rusage_info.
1186  */
1187 int
fill_task_rusage(task_t task,rusage_info_current * ri)1188 fill_task_rusage(task_t task, rusage_info_current *ri)
1189 {
1190 	struct task_power_info powerinfo;
1191 
1192 	assert(task != TASK_NULL);
1193 	task_lock(task);
1194 
1195 	struct task_power_info_extra extra = { 0 };
1196 	task_power_info_locked(task, &powerinfo, NULL, NULL, &extra);
1197 	ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
1198 	ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
1199 	ri->ri_user_time = powerinfo.total_user;
1200 	ri->ri_system_time = powerinfo.total_system;
1201 	ri->ri_runnable_time = extra.runnable_time;
1202 	ri->ri_cycles = extra.cycles;
1203 	ri->ri_instructions = extra.instructions;
1204 	ri->ri_pcycles = extra.pcycles;
1205 	ri->ri_pinstructions = extra.pinstructions;
1206 	ri->ri_user_ptime = extra.user_ptime;
1207 	ri->ri_system_ptime = extra.system_ptime;
1208 	ri->ri_energy_nj = extra.energy;
1209 	ri->ri_penergy_nj = extra.penergy;
1210 
1211 	ri->ri_phys_footprint = get_task_phys_footprint(task);
1212 	ledger_get_balance(task->ledger, task_ledgers.phys_mem,
1213 	    (ledger_amount_t *)&ri->ri_resident_size);
1214 	ri->ri_wired_size = get_task_wired_mem(task);
1215 
1216 	ri->ri_pageins = counter_load(&task->pageins);
1217 
1218 	task_unlock(task);
1219 	return 0;
1220 }
1221 
1222 void
fill_task_billed_usage(task_t task __unused,rusage_info_current * ri)1223 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
1224 {
1225 	bank_billed_balance_safe(task, &ri->ri_billed_system_time, &ri->ri_billed_energy);
1226 	bank_serviced_balance_safe(task, &ri->ri_serviced_system_time, &ri->ri_serviced_energy);
1227 }
1228 
1229 int
fill_task_io_rusage(task_t task,rusage_info_current * ri)1230 fill_task_io_rusage(task_t task, rusage_info_current *ri)
1231 {
1232 	assert(task != TASK_NULL);
1233 	task_lock(task);
1234 
1235 	if (task->task_io_stats) {
1236 		ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
1237 		ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1238 	} else {
1239 		/* I/O Stats unavailable */
1240 		ri->ri_diskio_bytesread = 0;
1241 		ri->ri_diskio_byteswritten = 0;
1242 	}
1243 	task_unlock(task);
1244 	return 0;
1245 }
1246 
1247 int
fill_task_qos_rusage(task_t task,rusage_info_current * ri)1248 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1249 {
1250 	thread_t thread;
1251 
1252 	assert(task != TASK_NULL);
1253 	task_lock(task);
1254 
1255 	/* Rollup QoS time of all the threads to task */
1256 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
1257 		if (thread->options & TH_OPT_IDLE_THREAD) {
1258 			continue;
1259 		}
1260 
1261 		thread_update_qos_cpu_time(thread);
1262 	}
1263 	ri->ri_cpu_time_qos_default = task->cpu_time_eqos_stats.cpu_time_qos_default;
1264 	ri->ri_cpu_time_qos_maintenance = task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
1265 	ri->ri_cpu_time_qos_background = task->cpu_time_eqos_stats.cpu_time_qos_background;
1266 	ri->ri_cpu_time_qos_utility = task->cpu_time_eqos_stats.cpu_time_qos_utility;
1267 	ri->ri_cpu_time_qos_legacy = task->cpu_time_eqos_stats.cpu_time_qos_legacy;
1268 	ri->ri_cpu_time_qos_user_initiated = task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
1269 	ri->ri_cpu_time_qos_user_interactive = task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
1270 
1271 	task_unlock(task);
1272 	return 0;
1273 }
1274 
1275 uint64_t
get_task_logical_writes(task_t task,bool external)1276 get_task_logical_writes(task_t task, bool external)
1277 {
1278 	assert(task != TASK_NULL);
1279 	struct ledger_entry_info lei;
1280 	int entry = external ? task_ledgers.logical_writes_to_external :
1281 	    task_ledgers.logical_writes;
1282 
1283 	task_lock(task);
1284 	ledger_get_entry_info(task->ledger, entry, &lei);
1285 	task_unlock(task);
1286 
1287 	return lei.lei_balance;
1288 }
1289 
1290 uint64_t
get_task_dispatchqueue_serialno_offset(task_t task)1291 get_task_dispatchqueue_serialno_offset(task_t task)
1292 {
1293 	uint64_t dq_serialno_offset = 0;
1294 	void *bsd_info = get_bsdtask_info(task);
1295 
1296 	if (bsd_info) {
1297 		dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(bsd_info);
1298 	}
1299 
1300 	return dq_serialno_offset;
1301 }
1302 
1303 uint64_t
get_task_dispatchqueue_label_offset(task_t task)1304 get_task_dispatchqueue_label_offset(task_t task)
1305 {
1306 	uint64_t dq_label_offset = 0;
1307 	void *bsd_info = get_bsdtask_info(task);
1308 
1309 	if (bsd_info) {
1310 		dq_label_offset = get_dispatchqueue_label_offset_from_proc(bsd_info);
1311 	}
1312 
1313 	return dq_label_offset;
1314 }
1315 
1316 uint64_t
get_task_uniqueid(task_t task)1317 get_task_uniqueid(task_t task)
1318 {
1319 	void *bsd_info = get_bsdtask_info(task);
1320 
1321 	if (bsd_info) {
1322 		return proc_uniqueid_task(bsd_info, task);
1323 	} else {
1324 		return UINT64_MAX;
1325 	}
1326 }
1327 
1328 int
get_task_version(task_t task)1329 get_task_version(task_t task)
1330 {
1331 	void *bsd_info = get_bsdtask_info(task);
1332 
1333 	if (bsd_info) {
1334 		return proc_pidversion(bsd_info);
1335 	} else {
1336 		return INT_MAX;
1337 	}
1338 }
1339 
1340 #if CONFIG_MACF
1341 struct label *
get_task_crash_label(task_t task)1342 get_task_crash_label(task_t task)
1343 {
1344 	return task->crash_label;
1345 }
1346 
1347 void
set_task_crash_label(task_t task,struct label * label)1348 set_task_crash_label(task_t task, struct label *label)
1349 {
1350 	task->crash_label = label;
1351 }
1352 #endif
1353 
1354 int
fill_taskipctableinfo(task_t task,uint32_t * table_size,uint32_t * table_free)1355 fill_taskipctableinfo(task_t task, uint32_t *table_size, uint32_t *table_free)
1356 {
1357 	ipc_space_t space = task->itk_space;
1358 	if (space == NULL) {
1359 		return -1;
1360 	}
1361 
1362 	is_read_lock(space);
1363 	if (!is_active(space)) {
1364 		is_read_unlock(space);
1365 		return -1;
1366 	}
1367 
1368 	*table_size = ipc_entry_table_count(is_active_table(space));
1369 	*table_free = space->is_table_free;
1370 
1371 	is_read_unlock(space);
1372 
1373 	return 0;
1374 }
1375 
1376 int
get_task_cdhash(task_t task,char cdhash[static CS_CDHASH_LEN])1377 get_task_cdhash(task_t task, char cdhash[static CS_CDHASH_LEN])
1378 {
1379 	int result = 0;
1380 	void *bsd_info = NULL;
1381 
1382 	task_lock(task);
1383 	bsd_info = get_bsdtask_info(task);
1384 	result = bsd_info ? proc_getcdhash(bsd_info, cdhash) : ESRCH;
1385 	task_unlock(task);
1386 
1387 	return result;
1388 }
1389 
1390 /* moved from ubc_subr.c */
1391 int
mach_to_bsd_errno(kern_return_t mach_err)1392 mach_to_bsd_errno(kern_return_t mach_err)
1393 {
1394 	switch (mach_err) {
1395 	case KERN_SUCCESS:
1396 		return 0;
1397 
1398 	case KERN_INVALID_ADDRESS:
1399 	case KERN_INVALID_ARGUMENT:
1400 	case KERN_NOT_IN_SET:
1401 	case KERN_INVALID_NAME:
1402 	case KERN_INVALID_TASK:
1403 	case KERN_INVALID_RIGHT:
1404 	case KERN_INVALID_VALUE:
1405 	case KERN_INVALID_CAPABILITY:
1406 	case KERN_INVALID_HOST:
1407 	case KERN_MEMORY_PRESENT:
1408 	case KERN_INVALID_PROCESSOR_SET:
1409 	case KERN_INVALID_POLICY:
1410 	case KERN_ALREADY_WAITING:
1411 	case KERN_DEFAULT_SET:
1412 	case KERN_EXCEPTION_PROTECTED:
1413 	case KERN_INVALID_LEDGER:
1414 	case KERN_INVALID_MEMORY_CONTROL:
1415 	case KERN_INVALID_SECURITY:
1416 	case KERN_NOT_DEPRESSED:
1417 	case KERN_LOCK_OWNED:
1418 	case KERN_LOCK_OWNED_SELF:
1419 		return EINVAL;
1420 
1421 	case KERN_NOT_RECEIVER:
1422 	case KERN_NO_ACCESS:
1423 	case KERN_POLICY_STATIC:
1424 		return EACCES;
1425 
1426 	case KERN_NO_SPACE:
1427 	case KERN_RESOURCE_SHORTAGE:
1428 	case KERN_UREFS_OVERFLOW:
1429 	case KERN_INVALID_OBJECT:
1430 		return ENOMEM;
1431 
1432 	case KERN_MEMORY_FAILURE:
1433 	case KERN_MEMORY_ERROR:
1434 	case KERN_PROTECTION_FAILURE:
1435 		return EFAULT;
1436 
1437 	case KERN_POLICY_LIMIT:
1438 	case KERN_CODESIGN_ERROR:
1439 	case KERN_DENIED:
1440 		return EPERM;
1441 
1442 	case KERN_ALREADY_IN_SET:
1443 	case KERN_NAME_EXISTS:
1444 	case KERN_RIGHT_EXISTS:
1445 		return EEXIST;
1446 
1447 	case KERN_ABORTED:
1448 		return EINTR;
1449 
1450 	case KERN_TERMINATED:
1451 	case KERN_LOCK_SET_DESTROYED:
1452 	case KERN_LOCK_UNSTABLE:
1453 	case KERN_SEMAPHORE_DESTROYED:
1454 	case KERN_NOT_FOUND:
1455 	case KERN_NOT_WAITING:
1456 		return ENOENT;
1457 
1458 	case KERN_RPC_SERVER_TERMINATED:
1459 		return ECONNRESET;
1460 
1461 	case KERN_NOT_SUPPORTED:
1462 		return ENOTSUP;
1463 
1464 	case KERN_NODE_DOWN:
1465 		return ENETDOWN;
1466 
1467 	case KERN_OPERATION_TIMED_OUT:
1468 		return ETIMEDOUT;
1469 
1470 	default:
1471 		return EIO; /* 5 == KERN_FAILURE */
1472 	}
1473 }
1474 
1475 kern_return_t
bsd_to_mach_failure(int bsd_err)1476 bsd_to_mach_failure(int bsd_err)
1477 {
1478 	switch (bsd_err) {
1479 	case EIO:
1480 	case EACCES:
1481 	case ENOMEM:
1482 	case EFAULT:
1483 		return KERN_MEMORY_ERROR;
1484 
1485 	case EINVAL:
1486 		return KERN_INVALID_ARGUMENT;
1487 
1488 	case ETIMEDOUT:
1489 	case EBUSY:
1490 		return KERN_OPERATION_TIMED_OUT;
1491 
1492 	case ECONNRESET:
1493 		return KERN_RPC_SERVER_TERMINATED;
1494 
1495 	case ENOTSUP:
1496 		return KERN_NOT_SUPPORTED;
1497 
1498 	case ENETDOWN:
1499 		return KERN_NODE_DOWN;
1500 
1501 	case ENOENT:
1502 		return KERN_NOT_FOUND;
1503 
1504 	case EINTR:
1505 		return KERN_ABORTED;
1506 
1507 	case EPERM:
1508 		return KERN_DENIED;
1509 
1510 	case EEXIST:
1511 		return KERN_ALREADY_IN_SET;
1512 
1513 	default:
1514 		return KERN_FAILURE;
1515 	}
1516 }
1517