xref: /xnu-8792.61.2/osfmk/kern/bsd_kern.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
31 
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/spl.h>
38 #include <kern/ast.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43 #include <vm/pmap.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
46 #include <sys/signal.h>
47 #include <sys/errno.h>
48 #include <sys/proc_require.h>
49 
50 #if MONOTONIC
51 #include <kern/monotonic.h>
52 #include <machine/monotonic.h>
53 #endif /* MONOTONIC */
54 
55 #include <machine/limits.h>
56 #include <sys/codesign.h> /* CS_CDHASH_LEN */
57 
58 #undef thread_should_halt
59 
60 /* BSD KERN COMPONENT INTERFACE */
61 
62 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
63 
64 thread_t get_firstthread(task_t);
65 int get_task_userstop(task_t);
66 int get_thread_userstop(thread_t);
67 boolean_t current_thread_aborted(void);
68 void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
69 kern_return_t get_signalact(task_t, thread_t *, int);
70 int fill_task_rusage(task_t task, rusage_info_current *ri);
71 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
72 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
73 uint64_t get_task_logical_writes(task_t task, bool external);
74 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
75 void task_bsdtask_kill(task_t);
76 
77 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
78 extern uint64_t get_dispatchqueue_label_offset_from_proc(void *p);
79 extern uint64_t proc_uniqueid_task(void *p, void *t);
80 extern int proc_pidversion(void *p);
81 extern int proc_getcdhash(void *p, char *cdhash);
82 
83 int mach_to_bsd_errno(kern_return_t mach_err);
84 kern_return_t bsd_to_mach_failure(int bsd_err);
85 
86 #if MACH_BSD
87 extern void psignal(void *, int);
88 #endif
89 
90 /*
91  *
92  */
93 void  *
get_bsdtask_info(task_t t)94 get_bsdtask_info(task_t t)
95 {
96 	void *proc_from_task = task_get_proc_raw(t);
97 	proc_require(proc_from_task, PROC_REQUIRE_ALLOW_NULL | PROC_REQUIRE_ALLOW_ALL);
98 	return task_has_proc(t) ? proc_from_task : NULL;
99 }
100 
101 void
task_bsdtask_kill(task_t t)102 task_bsdtask_kill(task_t t)
103 {
104 	void * bsd_info = get_bsdtask_info(t);
105 	if (bsd_info != NULL) {
106 		psignal(bsd_info, SIGKILL);
107 	}
108 }
109 /*
110  *
111  */
112 void *
get_bsdthreadtask_info(thread_t th)113 get_bsdthreadtask_info(thread_t th)
114 {
115 	return get_thread_ro(th)->tro_proc;
116 }
117 
118 /*
119  *
120  */
121 void
set_bsdtask_info(task_t t,void * v)122 set_bsdtask_info(task_t t, void * v)
123 {
124 	void *proc_from_task = task_get_proc_raw(t);
125 	if (v == NULL) {
126 		task_clear_has_proc(t);
127 	} else {
128 		if (v != proc_from_task) {
129 			panic("set_bsdtask_info trying to set random bsd_info %p", v);
130 		}
131 		task_set_has_proc(t);
132 	}
133 }
134 
135 __abortlike
136 static void
__thread_ro_circularity_panic(thread_t th,thread_ro_t tro)137 __thread_ro_circularity_panic(thread_t th, thread_ro_t tro)
138 {
139 	panic("tro %p points back to %p instead of %p", tro, tro->tro_owner, th);
140 }
141 
142 __attribute__((always_inline))
143 thread_ro_t
get_thread_ro_unchecked(thread_t th)144 get_thread_ro_unchecked(thread_t th)
145 {
146 	return th->t_tro;
147 }
148 
149 thread_ro_t
get_thread_ro(thread_t th)150 get_thread_ro(thread_t th)
151 {
152 	thread_ro_t tro = th->t_tro;
153 
154 	zone_require_ro(ZONE_ID_THREAD_RO, sizeof(struct thread_ro), tro);
155 	if (tro->tro_owner != th) {
156 		__thread_ro_circularity_panic(th, tro);
157 	}
158 	return tro;
159 }
160 
161 __attribute__((always_inline))
162 thread_ro_t
current_thread_ro_unchecked(void)163 current_thread_ro_unchecked(void)
164 {
165 	return get_thread_ro_unchecked(current_thread());
166 }
167 
168 thread_ro_t
current_thread_ro(void)169 current_thread_ro(void)
170 {
171 	return get_thread_ro(current_thread());
172 }
173 
174 void
clear_thread_ro_proc(thread_t th)175 clear_thread_ro_proc(thread_t th)
176 {
177 	thread_ro_t tro = get_thread_ro(th);
178 
179 	zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_proc);
180 }
181 
182 struct uthread *
get_bsdthread_info(thread_t th)183 get_bsdthread_info(thread_t th)
184 {
185 	return (struct uthread *)((uintptr_t)th + sizeof(struct thread));
186 }
187 
188 thread_t
get_machthread(struct uthread * uth)189 get_machthread(struct uthread *uth)
190 {
191 	return (struct thread *)((uintptr_t)uth - sizeof(struct thread));
192 }
193 
194 /*
195  * This is used to remember any FS error from VNOP_PAGEIN code when
196  * invoked under vm_fault(). The value is an errno style value. It can
197  * be retrieved by exception handlers using thread_get_state().
198  */
199 void
set_thread_pagein_error(thread_t th,int error)200 set_thread_pagein_error(thread_t th, int error)
201 {
202 	assert(th == current_thread());
203 	if (error == 0 || th->t_pagein_error == 0) {
204 		th->t_pagein_error = error;
205 	}
206 }
207 
208 #if defined(__x86_64__)
209 /*
210  * Returns non-zero if the thread has a non-NULL task
211  * and that task has an LDT.
212  */
213 int
thread_task_has_ldt(thread_t th)214 thread_task_has_ldt(thread_t th)
215 {
216 	task_t task = get_threadtask(th);
217 	return task && task->i386_ldt != 0;
218 }
219 #endif /* __x86_64__ */
220 
221 /*
222  * XXX
223  */
224 int get_thread_lock_count(thread_t th);         /* forced forward */
225 int
get_thread_lock_count(thread_t th)226 get_thread_lock_count(thread_t th)
227 {
228 	return th->mutex_count;
229 }
230 
231 /*
232  * XXX: wait for BSD to  fix signal code
233  * Until then, we cannot block here.  We know the task
234  * can't go away, so we make sure it is still active after
235  * retrieving the first thread for extra safety.
236  */
237 thread_t
get_firstthread(task_t task)238 get_firstthread(task_t task)
239 {
240 	thread_t        thread = (thread_t)(void *)queue_first(&task->threads);
241 
242 	if (queue_end(&task->threads, (queue_entry_t)thread)) {
243 		thread = THREAD_NULL;
244 	}
245 
246 	if (!task->active) {
247 		return THREAD_NULL;
248 	}
249 
250 	return thread;
251 }
252 
253 kern_return_t
get_signalact(task_t task,thread_t * result_out,int setast)254 get_signalact(
255 	task_t          task,
256 	thread_t        *result_out,
257 	int                     setast)
258 {
259 	kern_return_t   result = KERN_SUCCESS;
260 	thread_t                inc, thread = THREAD_NULL;
261 
262 	task_lock(task);
263 
264 	if (!task->active) {
265 		task_unlock(task);
266 
267 		return KERN_FAILURE;
268 	}
269 
270 	for (inc  = (thread_t)(void *)queue_first(&task->threads);
271 	    !queue_end(&task->threads, (queue_entry_t)inc);) {
272 		thread_mtx_lock(inc);
273 		if (inc->active &&
274 		    (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
275 			thread = inc;
276 			break;
277 		}
278 		thread_mtx_unlock(inc);
279 
280 		inc = (thread_t)(void *)queue_next(&inc->task_threads);
281 	}
282 
283 	if (result_out) {
284 		*result_out = thread;
285 	}
286 
287 	if (thread) {
288 		if (setast) {
289 			act_set_astbsd(thread);
290 		}
291 
292 		thread_mtx_unlock(thread);
293 	} else {
294 		result = KERN_FAILURE;
295 	}
296 
297 	task_unlock(task);
298 
299 	return result;
300 }
301 
302 
303 kern_return_t
check_actforsig(task_t task,thread_t thread,int setast)304 check_actforsig(
305 	task_t                  task,
306 	thread_t                thread,
307 	int                             setast)
308 {
309 	kern_return_t   result = KERN_FAILURE;
310 	thread_t                inc;
311 
312 	task_lock(task);
313 
314 	if (!task->active) {
315 		task_unlock(task);
316 
317 		return KERN_FAILURE;
318 	}
319 
320 	for (inc  = (thread_t)(void *)queue_first(&task->threads);
321 	    !queue_end(&task->threads, (queue_entry_t)inc);) {
322 		if (inc == thread) {
323 			thread_mtx_lock(inc);
324 
325 			if (inc->active &&
326 			    (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
327 				result = KERN_SUCCESS;
328 				break;
329 			}
330 
331 			thread_mtx_unlock(inc);
332 			break;
333 		}
334 
335 		inc = (thread_t)(void *)queue_next(&inc->task_threads);
336 	}
337 
338 	if (result == KERN_SUCCESS) {
339 		if (setast) {
340 			act_set_astbsd(thread);
341 		}
342 
343 		thread_mtx_unlock(thread);
344 	}
345 
346 	task_unlock(task);
347 
348 	return result;
349 }
350 
351 ledger_t
get_task_ledger(task_t t)352 get_task_ledger(task_t t)
353 {
354 	return t->ledger;
355 }
356 
357 /*
358  * This is only safe to call from a thread executing in
359  * in the task's context or if the task is locked. Otherwise,
360  * the map could be switched for the task (and freed) before
361  * we go to return it here.
362  */
363 vm_map_t
get_task_map(task_t t)364 get_task_map(task_t t)
365 {
366 	return t->map;
367 }
368 
369 vm_map_t
get_task_map_reference(task_t t)370 get_task_map_reference(task_t t)
371 {
372 	vm_map_t m;
373 
374 	if (t == NULL) {
375 		return VM_MAP_NULL;
376 	}
377 
378 	task_lock(t);
379 	if (!t->active) {
380 		task_unlock(t);
381 		return VM_MAP_NULL;
382 	}
383 	m = t->map;
384 	vm_map_reference(m);
385 	task_unlock(t);
386 	return m;
387 }
388 
389 /*
390  *
391  */
392 ipc_space_t
get_task_ipcspace(task_t t)393 get_task_ipcspace(task_t t)
394 {
395 	return t->itk_space;
396 }
397 
398 int
get_task_numacts(task_t t)399 get_task_numacts(task_t t)
400 {
401 	return t->thread_count;
402 }
403 
404 /* does this machine need  64bit register set for signal handler */
405 int
is_64signalregset(void)406 is_64signalregset(void)
407 {
408 	if (task_has_64Bit_data(current_task())) {
409 		return 1;
410 	}
411 
412 	return 0;
413 }
414 
415 /*
416  * Swap in a new map for the task/thread pair; the old map reference is
417  * returned. Also does a pmap switch if thread provided is current thread.
418  */
419 vm_map_t
swap_task_map(task_t task,thread_t thread,vm_map_t map)420 swap_task_map(task_t task, thread_t thread, vm_map_t map)
421 {
422 	vm_map_t old_map;
423 	boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
424 
425 	if (task != get_threadtask(thread)) {
426 		panic("swap_task_map");
427 	}
428 
429 	task_lock(task);
430 	mp_disable_preemption();
431 
432 	old_map = task->map;
433 	thread->map = task->map = map;
434 	vm_commit_pagezero_status(map);
435 
436 	if (doswitch) {
437 		PMAP_SWITCH_USER(thread, map, cpu_number());
438 	}
439 	mp_enable_preemption();
440 	task_unlock(task);
441 
442 	return old_map;
443 }
444 
445 /*
446  *
447  * This is only safe to call from a thread executing in
448  * in the task's context or if the task is locked. Otherwise,
449  * the map could be switched for the task (and freed) before
450  * we go to return it here.
451  */
452 pmap_t
get_task_pmap(task_t t)453 get_task_pmap(task_t t)
454 {
455 	return t->map->pmap;
456 }
457 
458 /*
459  *
460  */
461 uint64_t
get_task_resident_size(task_t task)462 get_task_resident_size(task_t task)
463 {
464 	uint64_t val;
465 
466 	ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
467 	return val;
468 }
469 
470 uint64_t
get_task_compressed(task_t task)471 get_task_compressed(task_t task)
472 {
473 	uint64_t val;
474 
475 	ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t *) &val);
476 	return val;
477 }
478 
479 uint64_t
get_task_resident_max(task_t task)480 get_task_resident_max(task_t task)
481 {
482 	uint64_t val;
483 
484 	ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
485 	return val;
486 }
487 
488 /*
489  * Get the balance for a given field in the task ledger.
490  * Returns 0 if the entry is invalid.
491  */
492 static uint64_t
get_task_ledger_balance(task_t task,int entry)493 get_task_ledger_balance(task_t task, int entry)
494 {
495 	ledger_amount_t balance = 0;
496 
497 	ledger_get_balance(task->ledger, entry, &balance);
498 	return balance;
499 }
500 
501 uint64_t
get_task_purgeable_size(task_t task)502 get_task_purgeable_size(task_t task)
503 {
504 	kern_return_t ret;
505 	ledger_amount_t balance = 0;
506 	uint64_t volatile_size = 0;
507 
508 	ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile, &balance);
509 	if (ret != KERN_SUCCESS) {
510 		return 0;
511 	}
512 
513 	volatile_size += balance;
514 
515 	ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile_compressed, &balance);
516 	if (ret != KERN_SUCCESS) {
517 		return 0;
518 	}
519 
520 	volatile_size += balance;
521 
522 	return volatile_size;
523 }
524 
525 /*
526  *
527  */
528 uint64_t
get_task_phys_footprint(task_t task)529 get_task_phys_footprint(task_t task)
530 {
531 	return get_task_ledger_balance(task, task_ledgers.phys_footprint);
532 }
533 
534 #if CONFIG_LEDGER_INTERVAL_MAX
535 /*
536  *
537  */
538 uint64_t
get_task_phys_footprint_interval_max(task_t task,int reset)539 get_task_phys_footprint_interval_max(task_t task, int reset)
540 {
541 	kern_return_t ret;
542 	ledger_amount_t max;
543 
544 	ret = ledger_get_interval_max(task->ledger, task_ledgers.phys_footprint, &max, reset);
545 
546 	if (KERN_SUCCESS == ret) {
547 		return max;
548 	}
549 
550 	return 0;
551 }
552 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
553 
554 /*
555  *
556  */
557 uint64_t
get_task_phys_footprint_lifetime_max(task_t task)558 get_task_phys_footprint_lifetime_max(task_t task)
559 {
560 	kern_return_t ret;
561 	ledger_amount_t max;
562 
563 	ret = ledger_get_lifetime_max(task->ledger, task_ledgers.phys_footprint, &max);
564 
565 	if (KERN_SUCCESS == ret) {
566 		return max;
567 	}
568 
569 	return 0;
570 }
571 
572 /*
573  *
574  */
575 uint64_t
get_task_phys_footprint_limit(task_t task)576 get_task_phys_footprint_limit(task_t task)
577 {
578 	kern_return_t ret;
579 	ledger_amount_t max;
580 
581 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max);
582 	if (KERN_SUCCESS == ret) {
583 		return max;
584 	}
585 
586 	return 0;
587 }
588 
589 uint64_t
get_task_internal(task_t task)590 get_task_internal(task_t task)
591 {
592 	return get_task_ledger_balance(task, task_ledgers.internal);
593 }
594 
595 uint64_t
get_task_internal_compressed(task_t task)596 get_task_internal_compressed(task_t task)
597 {
598 	return get_task_ledger_balance(task, task_ledgers.internal_compressed);
599 }
600 
601 uint64_t
get_task_purgeable_nonvolatile(task_t task)602 get_task_purgeable_nonvolatile(task_t task)
603 {
604 	return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile);
605 }
606 
607 uint64_t
get_task_purgeable_nonvolatile_compressed(task_t task)608 get_task_purgeable_nonvolatile_compressed(task_t task)
609 {
610 	return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile_compressed);
611 }
612 
613 uint64_t
get_task_alternate_accounting(task_t task)614 get_task_alternate_accounting(task_t task)
615 {
616 	return get_task_ledger_balance(task, task_ledgers.alternate_accounting);
617 }
618 
619 uint64_t
get_task_alternate_accounting_compressed(task_t task)620 get_task_alternate_accounting_compressed(task_t task)
621 {
622 	return get_task_ledger_balance(task, task_ledgers.alternate_accounting_compressed);
623 }
624 
625 uint64_t
get_task_page_table(task_t task)626 get_task_page_table(task_t task)
627 {
628 	return get_task_ledger_balance(task, task_ledgers.page_table);
629 }
630 
631 #if CONFIG_FREEZE
632 uint64_t
get_task_frozen_to_swap(task_t task)633 get_task_frozen_to_swap(task_t task)
634 {
635 	return get_task_ledger_balance(task, task_ledgers.frozen_to_swap);
636 }
637 #endif /* CONFIG_FREEZE */
638 
639 uint64_t
get_task_iokit_mapped(task_t task)640 get_task_iokit_mapped(task_t task)
641 {
642 	return get_task_ledger_balance(task, task_ledgers.iokit_mapped);
643 }
644 
645 uint64_t
get_task_network_nonvolatile(task_t task)646 get_task_network_nonvolatile(task_t task)
647 {
648 	return get_task_ledger_balance(task, task_ledgers.network_nonvolatile);
649 }
650 
651 uint64_t
get_task_network_nonvolatile_compressed(task_t task)652 get_task_network_nonvolatile_compressed(task_t task)
653 {
654 	return get_task_ledger_balance(task, task_ledgers.network_nonvolatile_compressed);
655 }
656 
657 uint64_t
get_task_wired_mem(task_t task)658 get_task_wired_mem(task_t task)
659 {
660 	return get_task_ledger_balance(task, task_ledgers.wired_mem);
661 }
662 
663 uint64_t
get_task_tagged_footprint(task_t task)664 get_task_tagged_footprint(task_t task)
665 {
666 	kern_return_t ret;
667 	ledger_amount_t credit, debit;
668 
669 	ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint, &credit, &debit);
670 	if (KERN_SUCCESS == ret) {
671 		return credit - debit;
672 	}
673 
674 	return 0;
675 }
676 
677 uint64_t
get_task_tagged_footprint_compressed(task_t task)678 get_task_tagged_footprint_compressed(task_t task)
679 {
680 	kern_return_t ret;
681 	ledger_amount_t credit, debit;
682 
683 	ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint_compressed, &credit, &debit);
684 	if (KERN_SUCCESS == ret) {
685 		return credit - debit;
686 	}
687 
688 	return 0;
689 }
690 
691 uint64_t
get_task_media_footprint(task_t task)692 get_task_media_footprint(task_t task)
693 {
694 	kern_return_t ret;
695 	ledger_amount_t credit, debit;
696 
697 	ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint, &credit, &debit);
698 	if (KERN_SUCCESS == ret) {
699 		return credit - debit;
700 	}
701 
702 	return 0;
703 }
704 
705 uint64_t
get_task_media_footprint_compressed(task_t task)706 get_task_media_footprint_compressed(task_t task)
707 {
708 	kern_return_t ret;
709 	ledger_amount_t credit, debit;
710 
711 	ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint_compressed, &credit, &debit);
712 	if (KERN_SUCCESS == ret) {
713 		return credit - debit;
714 	}
715 
716 	return 0;
717 }
718 
719 uint64_t
get_task_graphics_footprint(task_t task)720 get_task_graphics_footprint(task_t task)
721 {
722 	kern_return_t ret;
723 	ledger_amount_t credit, debit;
724 
725 	ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint, &credit, &debit);
726 	if (KERN_SUCCESS == ret) {
727 		return credit - debit;
728 	}
729 
730 	return 0;
731 }
732 
733 
734 uint64_t
get_task_graphics_footprint_compressed(task_t task)735 get_task_graphics_footprint_compressed(task_t task)
736 {
737 	kern_return_t ret;
738 	ledger_amount_t credit, debit;
739 
740 	ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint_compressed, &credit, &debit);
741 	if (KERN_SUCCESS == ret) {
742 		return credit - debit;
743 	}
744 
745 	return 0;
746 }
747 
748 uint64_t
get_task_neural_footprint(task_t task)749 get_task_neural_footprint(task_t task)
750 {
751 	kern_return_t ret;
752 	ledger_amount_t credit, debit;
753 
754 	ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint, &credit, &debit);
755 	if (KERN_SUCCESS == ret) {
756 		return credit - debit;
757 	}
758 
759 	return 0;
760 }
761 
762 uint64_t
get_task_neural_footprint_compressed(task_t task)763 get_task_neural_footprint_compressed(task_t task)
764 {
765 	kern_return_t ret;
766 	ledger_amount_t credit, debit;
767 
768 	ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint_compressed, &credit, &debit);
769 	if (KERN_SUCCESS == ret) {
770 		return credit - debit;
771 	}
772 
773 	return 0;
774 }
775 
776 uint64_t
get_task_cpu_time(task_t task)777 get_task_cpu_time(task_t task)
778 {
779 	return get_task_ledger_balance(task, task_ledgers.cpu_time);
780 }
781 
782 uint32_t
get_task_loadTag(task_t task)783 get_task_loadTag(task_t task)
784 {
785 	return os_atomic_load(&task->loadTag, relaxed);
786 }
787 
788 uint32_t
set_task_loadTag(task_t task,uint32_t loadTag)789 set_task_loadTag(task_t task, uint32_t loadTag)
790 {
791 	return os_atomic_xchg(&task->loadTag, loadTag, relaxed);
792 }
793 
794 
795 task_t
get_threadtask(thread_t th)796 get_threadtask(thread_t th)
797 {
798 	return get_thread_ro(th)->tro_task;
799 }
800 
801 task_t
get_threadtask_early(thread_t th)802 get_threadtask_early(thread_t th)
803 {
804 	if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
805 		if (th == THREAD_NULL || th->t_tro == NULL) {
806 			return TASK_NULL;
807 		}
808 	}
809 	return get_threadtask(th);
810 }
811 
812 /*
813  *
814  */
815 vm_map_offset_t
get_map_min(vm_map_t map)816 get_map_min(
817 	vm_map_t        map)
818 {
819 	return vm_map_min(map);
820 }
821 
822 /*
823  *
824  */
825 vm_map_offset_t
get_map_max(vm_map_t map)826 get_map_max(
827 	vm_map_t        map)
828 {
829 	return vm_map_max(map);
830 }
831 vm_map_size_t
get_vmmap_size(vm_map_t map)832 get_vmmap_size(
833 	vm_map_t        map)
834 {
835 	return vm_map_adjusted_size(map);
836 }
837 int
get_task_page_size(task_t task)838 get_task_page_size(
839 	task_t task)
840 {
841 	return vm_map_page_size(task->map);
842 }
843 
844 #if CONFIG_COREDUMP
845 
846 static int
get_vmsubmap_entries(vm_map_t map,vm_object_offset_t start,vm_object_offset_t end)847 get_vmsubmap_entries(
848 	vm_map_t        map,
849 	vm_object_offset_t      start,
850 	vm_object_offset_t      end)
851 {
852 	int     total_entries = 0;
853 	vm_map_entry_t  entry;
854 
855 	if (not_in_kdp) {
856 		vm_map_lock(map);
857 	}
858 	entry = vm_map_first_entry(map);
859 	while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
860 		entry = entry->vme_next;
861 	}
862 
863 	while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
864 		if (entry->is_sub_map) {
865 			total_entries +=
866 			    get_vmsubmap_entries(VME_SUBMAP(entry),
867 			    VME_OFFSET(entry),
868 			    (VME_OFFSET(entry) +
869 			    entry->vme_end -
870 			    entry->vme_start));
871 		} else {
872 			total_entries += 1;
873 		}
874 		entry = entry->vme_next;
875 	}
876 	if (not_in_kdp) {
877 		vm_map_unlock(map);
878 	}
879 	return total_entries;
880 }
881 
882 int
get_vmmap_entries(vm_map_t map)883 get_vmmap_entries(
884 	vm_map_t        map)
885 {
886 	int     total_entries = 0;
887 	vm_map_entry_t  entry;
888 
889 	if (not_in_kdp) {
890 		vm_map_lock(map);
891 	}
892 	entry = vm_map_first_entry(map);
893 
894 	while (entry != vm_map_to_entry(map)) {
895 		if (entry->is_sub_map) {
896 			total_entries +=
897 			    get_vmsubmap_entries(VME_SUBMAP(entry),
898 			    VME_OFFSET(entry),
899 			    (VME_OFFSET(entry) +
900 			    entry->vme_end -
901 			    entry->vme_start));
902 		} else {
903 			total_entries += 1;
904 		}
905 		entry = entry->vme_next;
906 	}
907 	if (not_in_kdp) {
908 		vm_map_unlock(map);
909 	}
910 	return total_entries;
911 }
912 #endif /* CONFIG_COREDUMP */
913 
914 /*
915  *
916  */
917 /*
918  *
919  */
920 int
get_task_userstop(task_t task)921 get_task_userstop(
922 	task_t task)
923 {
924 	return task->user_stop_count;
925 }
926 
927 /*
928  *
929  */
930 int
get_thread_userstop(thread_t th)931 get_thread_userstop(
932 	thread_t th)
933 {
934 	return th->user_stop_count;
935 }
936 
937 /*
938  *
939  */
940 boolean_t
get_task_pidsuspended(task_t task)941 get_task_pidsuspended(
942 	task_t task)
943 {
944 	return task->pidsuspended;
945 }
946 
947 /*
948  *
949  */
950 boolean_t
get_task_frozen(task_t task)951 get_task_frozen(
952 	task_t task)
953 {
954 	return task->frozen;
955 }
956 
957 /*
958  *
959  */
960 boolean_t
thread_should_abort(thread_t th)961 thread_should_abort(
962 	thread_t th)
963 {
964 	return (th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT;
965 }
966 
967 /*
968  * This routine is like thread_should_abort() above.  It checks to
969  * see if the current thread is aborted.  But unlike above, it also
970  * checks to see if thread is safely aborted.  If so, it returns
971  * that fact, and clears the condition (safe aborts only should
972  * have a single effect, and a poll of the abort status
973  * qualifies.
974  */
975 boolean_t
current_thread_aborted(void)976 current_thread_aborted(
977 	void)
978 {
979 	thread_t th = current_thread();
980 	spl_t s;
981 
982 	if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
983 	    (th->options & TH_OPT_INTMASK) != THREAD_UNINT) {
984 		return TRUE;
985 	}
986 	if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
987 		s = splsched();
988 		thread_lock(th);
989 		if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
990 			th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
991 		}
992 		thread_unlock(th);
993 		splx(s);
994 	}
995 	return FALSE;
996 }
997 
998 /*
999  *
1000  */
1001 void
task_act_iterate_wth_args(task_t task,void (* func_callback)(thread_t,void *),void * func_arg)1002 task_act_iterate_wth_args(
1003 	task_t                  task,
1004 	void                    (*func_callback)(thread_t, void *),
1005 	void                    *func_arg)
1006 {
1007 	thread_t        inc;
1008 
1009 	task_lock(task);
1010 
1011 	for (inc  = (thread_t)(void *)queue_first(&task->threads);
1012 	    !queue_end(&task->threads, (queue_entry_t)inc);) {
1013 		(void) (*func_callback)(inc, func_arg);
1014 		inc = (thread_t)(void *)queue_next(&inc->task_threads);
1015 	}
1016 
1017 	task_unlock(task);
1018 }
1019 
1020 #include <sys/bsdtask_info.h>
1021 
1022 void
fill_taskprocinfo(task_t task,struct proc_taskinfo_internal * ptinfo)1023 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
1024 {
1025 	vm_map_t map;
1026 	task_absolutetime_info_data_t   tinfo;
1027 	thread_t thread;
1028 	uint32_t cswitch = 0, numrunning = 0;
1029 	uint32_t syscalls_unix = 0;
1030 	uint32_t syscalls_mach = 0;
1031 
1032 	task_lock(task);
1033 
1034 	map = (task == kernel_task)? kernel_map: task->map;
1035 
1036 	ptinfo->pti_virtual_size  = vm_map_adjusted_size(map);
1037 	ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &ptinfo->pti_resident_size);
1038 
1039 	ptinfo->pti_policy = ((task != kernel_task)?
1040 	    POLICY_TIMESHARE: POLICY_RR);
1041 
1042 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
1043 		spl_t x;
1044 
1045 		if (thread->options & TH_OPT_IDLE_THREAD) {
1046 			continue;
1047 		}
1048 
1049 		x = splsched();
1050 		thread_lock(thread);
1051 
1052 		if ((thread->state & TH_RUN) == TH_RUN) {
1053 			numrunning++;
1054 		}
1055 		cswitch += thread->c_switch;
1056 
1057 		syscalls_unix += thread->syscalls_unix;
1058 		syscalls_mach += thread->syscalls_mach;
1059 
1060 		thread_unlock(thread);
1061 		splx(x);
1062 	}
1063 
1064 	struct recount_times_mach term_times = recount_task_terminated_times(task);
1065 	struct recount_times_mach total_times = recount_task_times(task);
1066 
1067 	tinfo.threads_user = total_times.rtm_user - term_times.rtm_user;
1068 	tinfo.threads_system = total_times.rtm_system - term_times.rtm_system;
1069 	ptinfo->pti_threads_system = tinfo.threads_system;
1070 	ptinfo->pti_threads_user = tinfo.threads_user;
1071 
1072 	ptinfo->pti_total_system = total_times.rtm_system;
1073 	ptinfo->pti_total_user = total_times.rtm_user;
1074 
1075 	ptinfo->pti_faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
1076 	ptinfo->pti_pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
1077 	ptinfo->pti_cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
1078 	ptinfo->pti_messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
1079 	ptinfo->pti_messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
1080 	ptinfo->pti_syscalls_mach = (int32_t) MIN(task->syscalls_mach + syscalls_mach, INT32_MAX);
1081 	ptinfo->pti_syscalls_unix = (int32_t) MIN(task->syscalls_unix + syscalls_unix, INT32_MAX);
1082 	ptinfo->pti_csw = (int32_t) MIN(task->c_switch + cswitch, INT32_MAX);
1083 	ptinfo->pti_threadnum = task->thread_count;
1084 	ptinfo->pti_numrunning = numrunning;
1085 	ptinfo->pti_priority = task->priority;
1086 
1087 	task_unlock(task);
1088 }
1089 
1090 int
fill_taskthreadinfo(task_t task,uint64_t thaddr,bool thuniqueid,struct proc_threadinfo_internal * ptinfo,void * vpp,int * vidp)1091 fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
1092 {
1093 	thread_t  thact;
1094 	int err = 0;
1095 	mach_msg_type_number_t count;
1096 	thread_basic_info_data_t basic_info;
1097 	kern_return_t kret;
1098 	uint64_t addr = 0;
1099 
1100 	task_lock(task);
1101 
1102 	for (thact  = (thread_t)(void *)queue_first(&task->threads);
1103 	    !queue_end(&task->threads, (queue_entry_t)thact);) {
1104 		addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1105 		if (addr == thaddr) {
1106 			count = THREAD_BASIC_INFO_COUNT;
1107 			if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
1108 				err = 1;
1109 				goto out;
1110 			}
1111 			ptinfo->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
1112 			ptinfo->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
1113 
1114 			ptinfo->pth_cpu_usage = basic_info.cpu_usage;
1115 			ptinfo->pth_policy = basic_info.policy;
1116 			ptinfo->pth_run_state = basic_info.run_state;
1117 			ptinfo->pth_flags = basic_info.flags;
1118 			ptinfo->pth_sleep_time = basic_info.sleep_time;
1119 			ptinfo->pth_curpri = thact->sched_pri;
1120 			ptinfo->pth_priority = thact->base_pri;
1121 			ptinfo->pth_maxpriority = thact->max_priority;
1122 
1123 			if (vpp != NULL) {
1124 				bsd_threadcdir(get_bsdthread_info(thact), vpp, vidp);
1125 			}
1126 			bsd_getthreadname(get_bsdthread_info(thact), ptinfo->pth_name);
1127 			err = 0;
1128 			goto out;
1129 		}
1130 		thact = (thread_t)(void *)queue_next(&thact->task_threads);
1131 	}
1132 	err = 1;
1133 
1134 out:
1135 	task_unlock(task);
1136 	return err;
1137 }
1138 
1139 int
fill_taskthreadlist(task_t task,void * buffer,int thcount,bool thuniqueid)1140 fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid)
1141 {
1142 	int numthr = 0;
1143 	thread_t thact;
1144 	uint64_t * uptr;
1145 	uint64_t  thaddr;
1146 
1147 	uptr = (uint64_t *)buffer;
1148 
1149 	task_lock(task);
1150 
1151 	for (thact  = (thread_t)(void *)queue_first(&task->threads);
1152 	    !queue_end(&task->threads, (queue_entry_t)thact);) {
1153 		thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1154 		*uptr++ = thaddr;
1155 		numthr++;
1156 		if (numthr >= thcount) {
1157 			goto out;
1158 		}
1159 		thact = (thread_t)(void *)queue_next(&thact->task_threads);
1160 	}
1161 
1162 out:
1163 	task_unlock(task);
1164 	return (int)(numthr * sizeof(uint64_t));
1165 }
1166 
1167 int
fill_taskthreadschedinfo(task_t task,uint64_t thread_id,struct proc_threadschedinfo_internal * thread_sched_info)1168 fill_taskthreadschedinfo(task_t task, uint64_t thread_id, struct proc_threadschedinfo_internal *thread_sched_info)
1169 {
1170 	int err = 0;
1171 
1172 	thread_t thread = current_thread();
1173 
1174 	/*
1175 	 * Looking up threads is pretty expensive and not realtime-safe
1176 	 * right now, requiring locking the task and iterating over all
1177 	 * threads. As long as that is the case, we officially only
1178 	 * support getting this info for the current thread.
1179 	 */
1180 	if (task != current_task() || thread_id != thread->thread_id) {
1181 		return -1;
1182 	}
1183 
1184 #if SCHED_HYGIENE_DEBUG
1185 	absolutetime_to_nanoseconds(thread->machine.int_time_mt, &thread_sched_info->int_time_ns);
1186 #else
1187 	(void)thread;
1188 	thread_sched_info->int_time_ns = 0;
1189 #endif
1190 
1191 	return err;
1192 }
1193 
1194 int
get_numthreads(task_t task)1195 get_numthreads(task_t task)
1196 {
1197 	return task->thread_count;
1198 }
1199 
1200 /*
1201  * Gather the various pieces of info about the designated task,
1202  * and collect it all into a single rusage_info.
1203  */
1204 int
fill_task_rusage(task_t task,rusage_info_current * ri)1205 fill_task_rusage(task_t task, rusage_info_current *ri)
1206 {
1207 	struct task_power_info powerinfo;
1208 
1209 	assert(task != TASK_NULL);
1210 	task_lock(task);
1211 
1212 	struct task_power_info_extra extra = { 0 };
1213 	task_power_info_locked(task, &powerinfo, NULL, NULL, &extra);
1214 	ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
1215 	ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
1216 	ri->ri_user_time = powerinfo.total_user;
1217 	ri->ri_system_time = powerinfo.total_system;
1218 	ri->ri_runnable_time = extra.runnable_time;
1219 	ri->ri_cycles = extra.cycles;
1220 	ri->ri_instructions = extra.instructions;
1221 	ri->ri_pcycles = extra.pcycles;
1222 	ri->ri_pinstructions = extra.pinstructions;
1223 	ri->ri_user_ptime = extra.user_ptime;
1224 	ri->ri_system_ptime = extra.system_ptime;
1225 	ri->ri_energy_nj = extra.energy;
1226 	ri->ri_penergy_nj = extra.penergy;
1227 
1228 	ri->ri_phys_footprint = get_task_phys_footprint(task);
1229 	ledger_get_balance(task->ledger, task_ledgers.phys_mem,
1230 	    (ledger_amount_t *)&ri->ri_resident_size);
1231 	ri->ri_wired_size = get_task_wired_mem(task);
1232 
1233 	ri->ri_pageins = counter_load(&task->pageins);
1234 
1235 	task_unlock(task);
1236 	return 0;
1237 }
1238 
1239 void
fill_task_billed_usage(task_t task __unused,rusage_info_current * ri)1240 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
1241 {
1242 	bank_billed_balance_safe(task, &ri->ri_billed_system_time, &ri->ri_billed_energy);
1243 	bank_serviced_balance_safe(task, &ri->ri_serviced_system_time, &ri->ri_serviced_energy);
1244 }
1245 
1246 int
fill_task_io_rusage(task_t task,rusage_info_current * ri)1247 fill_task_io_rusage(task_t task, rusage_info_current *ri)
1248 {
1249 	assert(task != TASK_NULL);
1250 	task_lock(task);
1251 
1252 	if (task->task_io_stats) {
1253 		ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
1254 		ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1255 	} else {
1256 		/* I/O Stats unavailable */
1257 		ri->ri_diskio_bytesread = 0;
1258 		ri->ri_diskio_byteswritten = 0;
1259 	}
1260 	task_unlock(task);
1261 	return 0;
1262 }
1263 
1264 int
fill_task_qos_rusage(task_t task,rusage_info_current * ri)1265 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1266 {
1267 	thread_t thread;
1268 
1269 	assert(task != TASK_NULL);
1270 	task_lock(task);
1271 
1272 	/* Rollup QoS time of all the threads to task */
1273 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
1274 		if (thread->options & TH_OPT_IDLE_THREAD) {
1275 			continue;
1276 		}
1277 
1278 		thread_update_qos_cpu_time(thread);
1279 	}
1280 	ri->ri_cpu_time_qos_default = task->cpu_time_eqos_stats.cpu_time_qos_default;
1281 	ri->ri_cpu_time_qos_maintenance = task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
1282 	ri->ri_cpu_time_qos_background = task->cpu_time_eqos_stats.cpu_time_qos_background;
1283 	ri->ri_cpu_time_qos_utility = task->cpu_time_eqos_stats.cpu_time_qos_utility;
1284 	ri->ri_cpu_time_qos_legacy = task->cpu_time_eqos_stats.cpu_time_qos_legacy;
1285 	ri->ri_cpu_time_qos_user_initiated = task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
1286 	ri->ri_cpu_time_qos_user_interactive = task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
1287 
1288 	task_unlock(task);
1289 	return 0;
1290 }
1291 
1292 uint64_t
get_task_logical_writes(task_t task,bool external)1293 get_task_logical_writes(task_t task, bool external)
1294 {
1295 	assert(task != TASK_NULL);
1296 	struct ledger_entry_info lei;
1297 	int entry = external ? task_ledgers.logical_writes_to_external :
1298 	    task_ledgers.logical_writes;
1299 
1300 	task_lock(task);
1301 	ledger_get_entry_info(task->ledger, entry, &lei);
1302 	task_unlock(task);
1303 
1304 	return lei.lei_balance;
1305 }
1306 
1307 uint64_t
get_task_dispatchqueue_serialno_offset(task_t task)1308 get_task_dispatchqueue_serialno_offset(task_t task)
1309 {
1310 	uint64_t dq_serialno_offset = 0;
1311 	void *bsd_info = get_bsdtask_info(task);
1312 
1313 	if (bsd_info) {
1314 		dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(bsd_info);
1315 	}
1316 
1317 	return dq_serialno_offset;
1318 }
1319 
1320 uint64_t
get_task_dispatchqueue_label_offset(task_t task)1321 get_task_dispatchqueue_label_offset(task_t task)
1322 {
1323 	uint64_t dq_label_offset = 0;
1324 	void *bsd_info = get_bsdtask_info(task);
1325 
1326 	if (bsd_info) {
1327 		dq_label_offset = get_dispatchqueue_label_offset_from_proc(bsd_info);
1328 	}
1329 
1330 	return dq_label_offset;
1331 }
1332 
1333 uint64_t
get_task_uniqueid(task_t task)1334 get_task_uniqueid(task_t task)
1335 {
1336 	void *bsd_info = get_bsdtask_info(task);
1337 
1338 	if (bsd_info) {
1339 		return proc_uniqueid_task(bsd_info, task);
1340 	} else {
1341 		return UINT64_MAX;
1342 	}
1343 }
1344 
1345 int
get_task_version(task_t task)1346 get_task_version(task_t task)
1347 {
1348 	void *bsd_info = get_bsdtask_info(task);
1349 
1350 	if (bsd_info) {
1351 		return proc_pidversion(bsd_info);
1352 	} else {
1353 		return INT_MAX;
1354 	}
1355 }
1356 
1357 #if CONFIG_MACF
1358 struct label *
get_task_crash_label(task_t task)1359 get_task_crash_label(task_t task)
1360 {
1361 	return task->crash_label;
1362 }
1363 
1364 void
set_task_crash_label(task_t task,struct label * label)1365 set_task_crash_label(task_t task, struct label *label)
1366 {
1367 	task->crash_label = label;
1368 }
1369 #endif
1370 
1371 int
fill_taskipctableinfo(task_t task,uint32_t * table_size,uint32_t * table_free)1372 fill_taskipctableinfo(task_t task, uint32_t *table_size, uint32_t *table_free)
1373 {
1374 	ipc_space_t space = task->itk_space;
1375 	if (space == NULL) {
1376 		return -1;
1377 	}
1378 
1379 	is_read_lock(space);
1380 	if (!is_active(space)) {
1381 		is_read_unlock(space);
1382 		return -1;
1383 	}
1384 
1385 	*table_size = ipc_entry_table_count(is_active_table(space));
1386 	*table_free = space->is_table_free;
1387 
1388 	is_read_unlock(space);
1389 
1390 	return 0;
1391 }
1392 
1393 int
get_task_cdhash(task_t task,char cdhash[static CS_CDHASH_LEN])1394 get_task_cdhash(task_t task, char cdhash[static CS_CDHASH_LEN])
1395 {
1396 	int result = 0;
1397 	void *bsd_info = NULL;
1398 
1399 	task_lock(task);
1400 	bsd_info = get_bsdtask_info(task);
1401 	result = bsd_info ? proc_getcdhash(bsd_info, cdhash) : ESRCH;
1402 	task_unlock(task);
1403 
1404 	return result;
1405 }
1406 
1407 /* moved from ubc_subr.c */
1408 int
mach_to_bsd_errno(kern_return_t mach_err)1409 mach_to_bsd_errno(kern_return_t mach_err)
1410 {
1411 	switch (mach_err) {
1412 	case KERN_SUCCESS:
1413 		return 0;
1414 
1415 	case KERN_INVALID_ADDRESS:
1416 	case KERN_INVALID_ARGUMENT:
1417 	case KERN_NOT_IN_SET:
1418 	case KERN_INVALID_NAME:
1419 	case KERN_INVALID_TASK:
1420 	case KERN_INVALID_RIGHT:
1421 	case KERN_INVALID_VALUE:
1422 	case KERN_INVALID_CAPABILITY:
1423 	case KERN_INVALID_HOST:
1424 	case KERN_MEMORY_PRESENT:
1425 	case KERN_INVALID_PROCESSOR_SET:
1426 	case KERN_INVALID_POLICY:
1427 	case KERN_ALREADY_WAITING:
1428 	case KERN_DEFAULT_SET:
1429 	case KERN_EXCEPTION_PROTECTED:
1430 	case KERN_INVALID_LEDGER:
1431 	case KERN_INVALID_MEMORY_CONTROL:
1432 	case KERN_INVALID_SECURITY:
1433 	case KERN_NOT_DEPRESSED:
1434 	case KERN_LOCK_OWNED:
1435 	case KERN_LOCK_OWNED_SELF:
1436 		return EINVAL;
1437 
1438 	case KERN_NOT_RECEIVER:
1439 	case KERN_NO_ACCESS:
1440 	case KERN_POLICY_STATIC:
1441 		return EACCES;
1442 
1443 	case KERN_NO_SPACE:
1444 	case KERN_RESOURCE_SHORTAGE:
1445 	case KERN_UREFS_OVERFLOW:
1446 	case KERN_INVALID_OBJECT:
1447 		return ENOMEM;
1448 
1449 	case KERN_MEMORY_FAILURE:
1450 	case KERN_MEMORY_ERROR:
1451 	case KERN_PROTECTION_FAILURE:
1452 		return EFAULT;
1453 
1454 	case KERN_POLICY_LIMIT:
1455 	case KERN_CODESIGN_ERROR:
1456 	case KERN_DENIED:
1457 		return EPERM;
1458 
1459 	case KERN_ALREADY_IN_SET:
1460 	case KERN_NAME_EXISTS:
1461 	case KERN_RIGHT_EXISTS:
1462 		return EEXIST;
1463 
1464 	case KERN_ABORTED:
1465 		return EINTR;
1466 
1467 	case KERN_TERMINATED:
1468 	case KERN_LOCK_SET_DESTROYED:
1469 	case KERN_LOCK_UNSTABLE:
1470 	case KERN_SEMAPHORE_DESTROYED:
1471 	case KERN_NOT_FOUND:
1472 	case KERN_NOT_WAITING:
1473 		return ENOENT;
1474 
1475 	case KERN_RPC_SERVER_TERMINATED:
1476 		return ECONNRESET;
1477 
1478 	case KERN_NOT_SUPPORTED:
1479 		return ENOTSUP;
1480 
1481 	case KERN_NODE_DOWN:
1482 		return ENETDOWN;
1483 
1484 	case KERN_OPERATION_TIMED_OUT:
1485 		return ETIMEDOUT;
1486 
1487 	default:
1488 		return EIO; /* 5 == KERN_FAILURE */
1489 	}
1490 }
1491 
1492 kern_return_t
bsd_to_mach_failure(int bsd_err)1493 bsd_to_mach_failure(int bsd_err)
1494 {
1495 	switch (bsd_err) {
1496 	case EIO:
1497 	case EACCES:
1498 	case ENOMEM:
1499 	case EFAULT:
1500 		return KERN_MEMORY_ERROR;
1501 
1502 	case EINVAL:
1503 		return KERN_INVALID_ARGUMENT;
1504 
1505 	case ETIMEDOUT:
1506 	case EBUSY:
1507 		return KERN_OPERATION_TIMED_OUT;
1508 
1509 	case ECONNRESET:
1510 		return KERN_RPC_SERVER_TERMINATED;
1511 
1512 	case ENOTSUP:
1513 		return KERN_NOT_SUPPORTED;
1514 
1515 	case ENETDOWN:
1516 		return KERN_NODE_DOWN;
1517 
1518 	case ENOENT:
1519 		return KERN_NOT_FOUND;
1520 
1521 	case EINTR:
1522 		return KERN_ABORTED;
1523 
1524 	case EPERM:
1525 		return KERN_DENIED;
1526 
1527 	case EEXIST:
1528 		return KERN_ALREADY_IN_SET;
1529 
1530 	default:
1531 		return KERN_FAILURE;
1532 	}
1533 }
1534