xref: /xnu-8020.140.41/osfmk/kern/bsd_kern.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
31 
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/spl.h>
38 #include <kern/ast.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43 #include <vm/pmap.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
46 #include <sys/signal.h>
47 #include <sys/errno.h>
48 #include <sys/proc_require.h>
49 
50 #if MONOTONIC
51 #include <kern/monotonic.h>
52 #include <machine/monotonic.h>
53 #endif /* MONOTONIC */
54 
55 #include <machine/limits.h>
56 #include <sys/codesign.h> /* CS_CDHASH_LEN */
57 
58 #undef thread_should_halt
59 
60 /* BSD KERN COMPONENT INTERFACE */
61 
62 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
63 
64 thread_t get_firstthread(task_t);
65 int get_task_userstop(task_t);
66 int get_thread_userstop(thread_t);
67 boolean_t current_thread_aborted(void);
68 void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
69 kern_return_t get_signalact(task_t, thread_t *, int);
70 int fill_task_rusage(task_t task, rusage_info_current *ri);
71 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
72 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
73 void fill_task_monotonic_rusage(task_t task, rusage_info_current *ri);
74 uint64_t get_task_logical_writes(task_t task, bool external);
75 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
76 void task_bsdtask_kill(task_t);
77 
78 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
79 extern uint64_t get_dispatchqueue_label_offset_from_proc(void *p);
80 extern uint64_t proc_uniqueid_task(void *p, void *t);
81 extern int proc_pidversion(void *p);
82 extern int proc_getcdhash(void *p, char *cdhash);
83 
84 int mach_to_bsd_errno(kern_return_t mach_err);
85 kern_return_t bsd_to_mach_failure(int bsd_err);
86 
87 #if MACH_BSD
88 extern void psignal(void *, int);
89 #endif
90 
91 /*
92  *
93  */
94 void  *
get_bsdtask_info(task_t t)95 get_bsdtask_info(task_t t)
96 {
97 	proc_require(t->bsd_info, PROC_REQUIRE_ALLOW_NULL | PROC_REQUIRE_ALLOW_KERNPROC);
98 	return t->bsd_info;
99 }
100 
101 void
task_bsdtask_kill(task_t t)102 task_bsdtask_kill(task_t t)
103 {
104 	void * bsd_info = get_bsdtask_info(t);
105 	if (bsd_info != NULL) {
106 		psignal(bsd_info, SIGKILL);
107 	}
108 }
109 /*
110  *
111  */
112 void *
get_bsdthreadtask_info(thread_t th)113 get_bsdthreadtask_info(thread_t th)
114 {
115 	return get_thread_ro(th)->tro_proc;
116 }
117 
118 /*
119  *
120  */
121 void
set_bsdtask_info(task_t t,void * v)122 set_bsdtask_info(task_t t, void * v)
123 {
124 	t->bsd_info = v;
125 }
126 
127 __abortlike
128 static void
__thread_ro_circularity_panic(thread_t th,thread_ro_t tro)129 __thread_ro_circularity_panic(thread_t th, thread_ro_t tro)
130 {
131 	panic("tro %p points back to %p instead of %p", tro, tro->tro_owner, th);
132 }
133 
134 __attribute__((always_inline))
135 thread_ro_t
get_thread_ro_unchecked(thread_t th)136 get_thread_ro_unchecked(thread_t th)
137 {
138 	return th->t_tro;
139 }
140 
141 thread_ro_t
get_thread_ro(thread_t th)142 get_thread_ro(thread_t th)
143 {
144 	thread_ro_t tro = th->t_tro;
145 
146 	zone_require_ro(ZONE_ID_THREAD_RO, sizeof(struct thread_ro), tro);
147 	if (tro->tro_owner != th) {
148 		__thread_ro_circularity_panic(th, tro);
149 	}
150 	return tro;
151 }
152 
153 __attribute__((always_inline))
154 thread_ro_t
current_thread_ro_unchecked(void)155 current_thread_ro_unchecked(void)
156 {
157 	return get_thread_ro_unchecked(current_thread());
158 }
159 
160 thread_ro_t
current_thread_ro(void)161 current_thread_ro(void)
162 {
163 	return get_thread_ro(current_thread());
164 }
165 
166 void
clear_thread_ro_proc(thread_t th)167 clear_thread_ro_proc(thread_t th)
168 {
169 	thread_ro_t tro = get_thread_ro(th);
170 
171 	zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_proc);
172 }
173 
174 struct uthread *
get_bsdthread_info(thread_t th)175 get_bsdthread_info(thread_t th)
176 {
177 	return (struct uthread *)((uintptr_t)th + sizeof(struct thread));
178 }
179 
180 thread_t
get_machthread(struct uthread * uth)181 get_machthread(struct uthread *uth)
182 {
183 	return (struct thread *)((uintptr_t)uth - sizeof(struct thread));
184 }
185 
186 /*
187  * This is used to remember any FS error from VNOP_PAGEIN code when
188  * invoked under vm_fault(). The value is an errno style value. It can
189  * be retrieved by exception handlers using thread_get_state().
190  */
191 void
set_thread_pagein_error(thread_t th,int error)192 set_thread_pagein_error(thread_t th, int error)
193 {
194 	assert(th == current_thread());
195 	if (error == 0 || th->t_pagein_error == 0) {
196 		th->t_pagein_error = error;
197 	}
198 }
199 
200 #if defined(__x86_64__)
201 /*
202  * Returns non-zero if the thread has a non-NULL task
203  * and that task has an LDT.
204  */
205 int
thread_task_has_ldt(thread_t th)206 thread_task_has_ldt(thread_t th)
207 {
208 	task_t task = get_threadtask(th);
209 	return task && task->i386_ldt != 0;
210 }
211 #endif /* __x86_64__ */
212 
213 /*
214  * XXX
215  */
216 int get_thread_lock_count(thread_t th);         /* forced forward */
217 int
get_thread_lock_count(thread_t th)218 get_thread_lock_count(thread_t th)
219 {
220 	return th->mutex_count;
221 }
222 
223 /*
224  * XXX: wait for BSD to  fix signal code
225  * Until then, we cannot block here.  We know the task
226  * can't go away, so we make sure it is still active after
227  * retrieving the first thread for extra safety.
228  */
229 thread_t
get_firstthread(task_t task)230 get_firstthread(task_t task)
231 {
232 	thread_t        thread = (thread_t)(void *)queue_first(&task->threads);
233 
234 	if (queue_end(&task->threads, (queue_entry_t)thread)) {
235 		thread = THREAD_NULL;
236 	}
237 
238 	if (!task->active) {
239 		return THREAD_NULL;
240 	}
241 
242 	return thread;
243 }
244 
245 kern_return_t
get_signalact(task_t task,thread_t * result_out,int setast)246 get_signalact(
247 	task_t          task,
248 	thread_t        *result_out,
249 	int                     setast)
250 {
251 	kern_return_t   result = KERN_SUCCESS;
252 	thread_t                inc, thread = THREAD_NULL;
253 
254 	task_lock(task);
255 
256 	if (!task->active) {
257 		task_unlock(task);
258 
259 		return KERN_FAILURE;
260 	}
261 
262 	for (inc  = (thread_t)(void *)queue_first(&task->threads);
263 	    !queue_end(&task->threads, (queue_entry_t)inc);) {
264 		thread_mtx_lock(inc);
265 		if (inc->active &&
266 		    (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
267 			thread = inc;
268 			break;
269 		}
270 		thread_mtx_unlock(inc);
271 
272 		inc = (thread_t)(void *)queue_next(&inc->task_threads);
273 	}
274 
275 	if (result_out) {
276 		*result_out = thread;
277 	}
278 
279 	if (thread) {
280 		if (setast) {
281 			act_set_astbsd(thread);
282 		}
283 
284 		thread_mtx_unlock(thread);
285 	} else {
286 		result = KERN_FAILURE;
287 	}
288 
289 	task_unlock(task);
290 
291 	return result;
292 }
293 
294 
295 kern_return_t
check_actforsig(task_t task,thread_t thread,int setast)296 check_actforsig(
297 	task_t                  task,
298 	thread_t                thread,
299 	int                             setast)
300 {
301 	kern_return_t   result = KERN_FAILURE;
302 	thread_t                inc;
303 
304 	task_lock(task);
305 
306 	if (!task->active) {
307 		task_unlock(task);
308 
309 		return KERN_FAILURE;
310 	}
311 
312 	for (inc  = (thread_t)(void *)queue_first(&task->threads);
313 	    !queue_end(&task->threads, (queue_entry_t)inc);) {
314 		if (inc == thread) {
315 			thread_mtx_lock(inc);
316 
317 			if (inc->active &&
318 			    (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
319 				result = KERN_SUCCESS;
320 				break;
321 			}
322 
323 			thread_mtx_unlock(inc);
324 			break;
325 		}
326 
327 		inc = (thread_t)(void *)queue_next(&inc->task_threads);
328 	}
329 
330 	if (result == KERN_SUCCESS) {
331 		if (setast) {
332 			act_set_astbsd(thread);
333 		}
334 
335 		thread_mtx_unlock(thread);
336 	}
337 
338 	task_unlock(task);
339 
340 	return result;
341 }
342 
343 ledger_t
get_task_ledger(task_t t)344 get_task_ledger(task_t t)
345 {
346 	return t->ledger;
347 }
348 
349 /*
350  * This is only safe to call from a thread executing in
351  * in the task's context or if the task is locked. Otherwise,
352  * the map could be switched for the task (and freed) before
353  * we go to return it here.
354  */
355 vm_map_t
get_task_map(task_t t)356 get_task_map(task_t t)
357 {
358 	return t->map;
359 }
360 
361 vm_map_t
get_task_map_reference(task_t t)362 get_task_map_reference(task_t t)
363 {
364 	vm_map_t m;
365 
366 	if (t == NULL) {
367 		return VM_MAP_NULL;
368 	}
369 
370 	task_lock(t);
371 	if (!t->active) {
372 		task_unlock(t);
373 		return VM_MAP_NULL;
374 	}
375 	m = t->map;
376 	vm_map_reference(m);
377 	task_unlock(t);
378 	return m;
379 }
380 
381 /*
382  *
383  */
384 ipc_space_t
get_task_ipcspace(task_t t)385 get_task_ipcspace(task_t t)
386 {
387 	return t->itk_space;
388 }
389 
390 int
get_task_numacts(task_t t)391 get_task_numacts(task_t t)
392 {
393 	return t->thread_count;
394 }
395 
396 /* does this machine need  64bit register set for signal handler */
397 int
is_64signalregset(void)398 is_64signalregset(void)
399 {
400 	if (task_has_64Bit_data(current_task())) {
401 		return 1;
402 	}
403 
404 	return 0;
405 }
406 
407 /*
408  * Swap in a new map for the task/thread pair; the old map reference is
409  * returned. Also does a pmap switch if thread provided is current thread.
410  */
411 vm_map_t
swap_task_map(task_t task,thread_t thread,vm_map_t map)412 swap_task_map(task_t task, thread_t thread, vm_map_t map)
413 {
414 	vm_map_t old_map;
415 	boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
416 
417 	if (task != get_threadtask(thread)) {
418 		panic("swap_task_map");
419 	}
420 
421 	task_lock(task);
422 	mp_disable_preemption();
423 
424 	old_map = task->map;
425 	thread->map = task->map = map;
426 	vm_commit_pagezero_status(map);
427 
428 	if (doswitch) {
429 		PMAP_SWITCH_USER(thread, map, cpu_number());
430 	}
431 	mp_enable_preemption();
432 	task_unlock(task);
433 
434 	return old_map;
435 }
436 
437 /*
438  *
439  * This is only safe to call from a thread executing in
440  * in the task's context or if the task is locked. Otherwise,
441  * the map could be switched for the task (and freed) before
442  * we go to return it here.
443  */
444 pmap_t
get_task_pmap(task_t t)445 get_task_pmap(task_t t)
446 {
447 	return t->map->pmap;
448 }
449 
450 /*
451  *
452  */
453 uint64_t
get_task_resident_size(task_t task)454 get_task_resident_size(task_t task)
455 {
456 	uint64_t val;
457 
458 	ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
459 	return val;
460 }
461 
462 uint64_t
get_task_compressed(task_t task)463 get_task_compressed(task_t task)
464 {
465 	uint64_t val;
466 
467 	ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t *) &val);
468 	return val;
469 }
470 
471 uint64_t
get_task_resident_max(task_t task)472 get_task_resident_max(task_t task)
473 {
474 	uint64_t val;
475 
476 	ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
477 	return val;
478 }
479 
480 /*
481  * Get the balance for a given field in the task ledger.
482  * Returns 0 if the entry is invalid.
483  */
484 static uint64_t
get_task_ledger_balance(task_t task,int entry)485 get_task_ledger_balance(task_t task, int entry)
486 {
487 	ledger_amount_t balance = 0;
488 
489 	ledger_get_balance(task->ledger, entry, &balance);
490 	return balance;
491 }
492 
493 uint64_t
get_task_purgeable_size(task_t task)494 get_task_purgeable_size(task_t task)
495 {
496 	kern_return_t ret;
497 	ledger_amount_t balance = 0;
498 	uint64_t volatile_size = 0;
499 
500 	ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile, &balance);
501 	if (ret != KERN_SUCCESS) {
502 		return 0;
503 	}
504 
505 	volatile_size += balance;
506 
507 	ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile_compressed, &balance);
508 	if (ret != KERN_SUCCESS) {
509 		return 0;
510 	}
511 
512 	volatile_size += balance;
513 
514 	return volatile_size;
515 }
516 
517 /*
518  *
519  */
520 uint64_t
get_task_phys_footprint(task_t task)521 get_task_phys_footprint(task_t task)
522 {
523 	return get_task_ledger_balance(task, task_ledgers.phys_footprint);
524 }
525 
526 #if CONFIG_LEDGER_INTERVAL_MAX
527 /*
528  *
529  */
530 uint64_t
get_task_phys_footprint_interval_max(task_t task,int reset)531 get_task_phys_footprint_interval_max(task_t task, int reset)
532 {
533 	kern_return_t ret;
534 	ledger_amount_t max;
535 
536 	ret = ledger_get_interval_max(task->ledger, task_ledgers.phys_footprint, &max, reset);
537 
538 	if (KERN_SUCCESS == ret) {
539 		return max;
540 	}
541 
542 	return 0;
543 }
544 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
545 
546 /*
547  *
548  */
549 uint64_t
get_task_phys_footprint_lifetime_max(task_t task)550 get_task_phys_footprint_lifetime_max(task_t task)
551 {
552 	kern_return_t ret;
553 	ledger_amount_t max;
554 
555 	ret = ledger_get_lifetime_max(task->ledger, task_ledgers.phys_footprint, &max);
556 
557 	if (KERN_SUCCESS == ret) {
558 		return max;
559 	}
560 
561 	return 0;
562 }
563 
564 /*
565  *
566  */
567 uint64_t
get_task_phys_footprint_limit(task_t task)568 get_task_phys_footprint_limit(task_t task)
569 {
570 	kern_return_t ret;
571 	ledger_amount_t max;
572 
573 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max);
574 	if (KERN_SUCCESS == ret) {
575 		return max;
576 	}
577 
578 	return 0;
579 }
580 
581 uint64_t
get_task_internal(task_t task)582 get_task_internal(task_t task)
583 {
584 	return get_task_ledger_balance(task, task_ledgers.internal);
585 }
586 
587 uint64_t
get_task_internal_compressed(task_t task)588 get_task_internal_compressed(task_t task)
589 {
590 	return get_task_ledger_balance(task, task_ledgers.internal_compressed);
591 }
592 
593 uint64_t
get_task_purgeable_nonvolatile(task_t task)594 get_task_purgeable_nonvolatile(task_t task)
595 {
596 	return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile);
597 }
598 
599 uint64_t
get_task_purgeable_nonvolatile_compressed(task_t task)600 get_task_purgeable_nonvolatile_compressed(task_t task)
601 {
602 	return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile_compressed);
603 }
604 
605 uint64_t
get_task_alternate_accounting(task_t task)606 get_task_alternate_accounting(task_t task)
607 {
608 	return get_task_ledger_balance(task, task_ledgers.alternate_accounting);
609 }
610 
611 uint64_t
get_task_alternate_accounting_compressed(task_t task)612 get_task_alternate_accounting_compressed(task_t task)
613 {
614 	return get_task_ledger_balance(task, task_ledgers.alternate_accounting_compressed);
615 }
616 
617 uint64_t
get_task_page_table(task_t task)618 get_task_page_table(task_t task)
619 {
620 	return get_task_ledger_balance(task, task_ledgers.page_table);
621 }
622 
623 #if CONFIG_FREEZE
624 uint64_t
get_task_frozen_to_swap(task_t task)625 get_task_frozen_to_swap(task_t task)
626 {
627 	return get_task_ledger_balance(task, task_ledgers.frozen_to_swap);
628 }
629 #endif /* CONFIG_FREEZE */
630 
631 uint64_t
get_task_iokit_mapped(task_t task)632 get_task_iokit_mapped(task_t task)
633 {
634 	return get_task_ledger_balance(task, task_ledgers.iokit_mapped);
635 }
636 
637 uint64_t
get_task_network_nonvolatile(task_t task)638 get_task_network_nonvolatile(task_t task)
639 {
640 	return get_task_ledger_balance(task, task_ledgers.network_nonvolatile);
641 }
642 
643 uint64_t
get_task_network_nonvolatile_compressed(task_t task)644 get_task_network_nonvolatile_compressed(task_t task)
645 {
646 	return get_task_ledger_balance(task, task_ledgers.network_nonvolatile_compressed);
647 }
648 
649 uint64_t
get_task_wired_mem(task_t task)650 get_task_wired_mem(task_t task)
651 {
652 	return get_task_ledger_balance(task, task_ledgers.wired_mem);
653 }
654 
655 uint64_t
get_task_tagged_footprint(task_t task)656 get_task_tagged_footprint(task_t task)
657 {
658 	kern_return_t ret;
659 	ledger_amount_t credit, debit;
660 
661 	ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint, &credit, &debit);
662 	if (KERN_SUCCESS == ret) {
663 		return credit - debit;
664 	}
665 
666 	return 0;
667 }
668 
669 uint64_t
get_task_tagged_footprint_compressed(task_t task)670 get_task_tagged_footprint_compressed(task_t task)
671 {
672 	kern_return_t ret;
673 	ledger_amount_t credit, debit;
674 
675 	ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint_compressed, &credit, &debit);
676 	if (KERN_SUCCESS == ret) {
677 		return credit - debit;
678 	}
679 
680 	return 0;
681 }
682 
683 uint64_t
get_task_media_footprint(task_t task)684 get_task_media_footprint(task_t task)
685 {
686 	kern_return_t ret;
687 	ledger_amount_t credit, debit;
688 
689 	ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint, &credit, &debit);
690 	if (KERN_SUCCESS == ret) {
691 		return credit - debit;
692 	}
693 
694 	return 0;
695 }
696 
697 uint64_t
get_task_media_footprint_compressed(task_t task)698 get_task_media_footprint_compressed(task_t task)
699 {
700 	kern_return_t ret;
701 	ledger_amount_t credit, debit;
702 
703 	ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint_compressed, &credit, &debit);
704 	if (KERN_SUCCESS == ret) {
705 		return credit - debit;
706 	}
707 
708 	return 0;
709 }
710 
711 uint64_t
get_task_graphics_footprint(task_t task)712 get_task_graphics_footprint(task_t task)
713 {
714 	kern_return_t ret;
715 	ledger_amount_t credit, debit;
716 
717 	ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint, &credit, &debit);
718 	if (KERN_SUCCESS == ret) {
719 		return credit - debit;
720 	}
721 
722 	return 0;
723 }
724 
725 
726 uint64_t
get_task_graphics_footprint_compressed(task_t task)727 get_task_graphics_footprint_compressed(task_t task)
728 {
729 	kern_return_t ret;
730 	ledger_amount_t credit, debit;
731 
732 	ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint_compressed, &credit, &debit);
733 	if (KERN_SUCCESS == ret) {
734 		return credit - debit;
735 	}
736 
737 	return 0;
738 }
739 
740 uint64_t
get_task_neural_footprint(task_t task)741 get_task_neural_footprint(task_t task)
742 {
743 	kern_return_t ret;
744 	ledger_amount_t credit, debit;
745 
746 	ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint, &credit, &debit);
747 	if (KERN_SUCCESS == ret) {
748 		return credit - debit;
749 	}
750 
751 	return 0;
752 }
753 
754 uint64_t
get_task_neural_footprint_compressed(task_t task)755 get_task_neural_footprint_compressed(task_t task)
756 {
757 	kern_return_t ret;
758 	ledger_amount_t credit, debit;
759 
760 	ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint_compressed, &credit, &debit);
761 	if (KERN_SUCCESS == ret) {
762 		return credit - debit;
763 	}
764 
765 	return 0;
766 }
767 
768 uint64_t
get_task_cpu_time(task_t task)769 get_task_cpu_time(task_t task)
770 {
771 	return get_task_ledger_balance(task, task_ledgers.cpu_time);
772 }
773 
774 uint32_t
get_task_loadTag(task_t task)775 get_task_loadTag(task_t task)
776 {
777 	return os_atomic_load(&task->loadTag, relaxed);
778 }
779 
780 uint32_t
set_task_loadTag(task_t task,uint32_t loadTag)781 set_task_loadTag(task_t task, uint32_t loadTag)
782 {
783 	return os_atomic_xchg(&task->loadTag, loadTag, relaxed);
784 }
785 
786 
787 task_t
get_threadtask(thread_t th)788 get_threadtask(thread_t th)
789 {
790 	return get_thread_ro(th)->tro_task;
791 }
792 
793 task_t
get_threadtask_early(thread_t th)794 get_threadtask_early(thread_t th)
795 {
796 	if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
797 		if (th == THREAD_NULL || th->t_tro == NULL) {
798 			return TASK_NULL;
799 		}
800 	}
801 	return get_threadtask(th);
802 }
803 
804 /*
805  *
806  */
807 vm_map_offset_t
get_map_min(vm_map_t map)808 get_map_min(
809 	vm_map_t        map)
810 {
811 	return vm_map_min(map);
812 }
813 
814 /*
815  *
816  */
817 vm_map_offset_t
get_map_max(vm_map_t map)818 get_map_max(
819 	vm_map_t        map)
820 {
821 	return vm_map_max(map);
822 }
823 vm_map_size_t
get_vmmap_size(vm_map_t map)824 get_vmmap_size(
825 	vm_map_t        map)
826 {
827 	return vm_map_adjusted_size(map);
828 }
829 int
get_task_page_size(task_t task)830 get_task_page_size(
831 	task_t task)
832 {
833 	return vm_map_page_size(task->map);
834 }
835 
836 #if CONFIG_COREDUMP
837 
838 static int
get_vmsubmap_entries(vm_map_t map,vm_object_offset_t start,vm_object_offset_t end)839 get_vmsubmap_entries(
840 	vm_map_t        map,
841 	vm_object_offset_t      start,
842 	vm_object_offset_t      end)
843 {
844 	int     total_entries = 0;
845 	vm_map_entry_t  entry;
846 
847 	if (not_in_kdp) {
848 		vm_map_lock(map);
849 	}
850 	entry = vm_map_first_entry(map);
851 	while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
852 		entry = entry->vme_next;
853 	}
854 
855 	while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
856 		if (entry->is_sub_map) {
857 			total_entries +=
858 			    get_vmsubmap_entries(VME_SUBMAP(entry),
859 			    VME_OFFSET(entry),
860 			    (VME_OFFSET(entry) +
861 			    entry->vme_end -
862 			    entry->vme_start));
863 		} else {
864 			total_entries += 1;
865 		}
866 		entry = entry->vme_next;
867 	}
868 	if (not_in_kdp) {
869 		vm_map_unlock(map);
870 	}
871 	return total_entries;
872 }
873 
874 int
get_vmmap_entries(vm_map_t map)875 get_vmmap_entries(
876 	vm_map_t        map)
877 {
878 	int     total_entries = 0;
879 	vm_map_entry_t  entry;
880 
881 	if (not_in_kdp) {
882 		vm_map_lock(map);
883 	}
884 	entry = vm_map_first_entry(map);
885 
886 	while (entry != vm_map_to_entry(map)) {
887 		if (entry->is_sub_map) {
888 			total_entries +=
889 			    get_vmsubmap_entries(VME_SUBMAP(entry),
890 			    VME_OFFSET(entry),
891 			    (VME_OFFSET(entry) +
892 			    entry->vme_end -
893 			    entry->vme_start));
894 		} else {
895 			total_entries += 1;
896 		}
897 		entry = entry->vme_next;
898 	}
899 	if (not_in_kdp) {
900 		vm_map_unlock(map);
901 	}
902 	return total_entries;
903 }
904 #endif /* CONFIG_COREDUMP */
905 
906 /*
907  *
908  */
909 /*
910  *
911  */
912 int
get_task_userstop(task_t task)913 get_task_userstop(
914 	task_t task)
915 {
916 	return task->user_stop_count;
917 }
918 
919 /*
920  *
921  */
922 int
get_thread_userstop(thread_t th)923 get_thread_userstop(
924 	thread_t th)
925 {
926 	return th->user_stop_count;
927 }
928 
929 /*
930  *
931  */
932 boolean_t
get_task_pidsuspended(task_t task)933 get_task_pidsuspended(
934 	task_t task)
935 {
936 	return task->pidsuspended;
937 }
938 
939 /*
940  *
941  */
942 boolean_t
get_task_frozen(task_t task)943 get_task_frozen(
944 	task_t task)
945 {
946 	return task->frozen;
947 }
948 
949 /*
950  *
951  */
952 boolean_t
thread_should_abort(thread_t th)953 thread_should_abort(
954 	thread_t th)
955 {
956 	return (th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT;
957 }
958 
959 /*
960  * This routine is like thread_should_abort() above.  It checks to
961  * see if the current thread is aborted.  But unlike above, it also
962  * checks to see if thread is safely aborted.  If so, it returns
963  * that fact, and clears the condition (safe aborts only should
964  * have a single effect, and a poll of the abort status
965  * qualifies.
966  */
967 boolean_t
current_thread_aborted(void)968 current_thread_aborted(
969 	void)
970 {
971 	thread_t th = current_thread();
972 	spl_t s;
973 
974 	if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
975 	    (th->options & TH_OPT_INTMASK) != THREAD_UNINT) {
976 		return TRUE;
977 	}
978 	if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
979 		s = splsched();
980 		thread_lock(th);
981 		if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
982 			th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
983 		}
984 		thread_unlock(th);
985 		splx(s);
986 	}
987 	return FALSE;
988 }
989 
990 /*
991  *
992  */
993 void
task_act_iterate_wth_args(task_t task,void (* func_callback)(thread_t,void *),void * func_arg)994 task_act_iterate_wth_args(
995 	task_t                  task,
996 	void                    (*func_callback)(thread_t, void *),
997 	void                    *func_arg)
998 {
999 	thread_t        inc;
1000 
1001 	task_lock(task);
1002 
1003 	for (inc  = (thread_t)(void *)queue_first(&task->threads);
1004 	    !queue_end(&task->threads, (queue_entry_t)inc);) {
1005 		(void) (*func_callback)(inc, func_arg);
1006 		inc = (thread_t)(void *)queue_next(&inc->task_threads);
1007 	}
1008 
1009 	task_unlock(task);
1010 }
1011 
1012 
1013 #include <sys/bsdtask_info.h>
1014 
1015 void
fill_taskprocinfo(task_t task,struct proc_taskinfo_internal * ptinfo)1016 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
1017 {
1018 	vm_map_t map;
1019 	task_absolutetime_info_data_t   tinfo;
1020 	thread_t thread;
1021 	uint32_t cswitch = 0, numrunning = 0;
1022 	uint32_t syscalls_unix = 0;
1023 	uint32_t syscalls_mach = 0;
1024 
1025 	task_lock(task);
1026 
1027 	map = (task == kernel_task)? kernel_map: task->map;
1028 
1029 	ptinfo->pti_virtual_size  = vm_map_adjusted_size(map);
1030 	ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &ptinfo->pti_resident_size);
1031 
1032 	ptinfo->pti_policy = ((task != kernel_task)?
1033 	    POLICY_TIMESHARE: POLICY_RR);
1034 
1035 	tinfo.threads_user = tinfo.threads_system = 0;
1036 	tinfo.total_user = task->total_user_time;
1037 	tinfo.total_system = task->total_system_time;
1038 
1039 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
1040 		uint64_t    tval;
1041 		spl_t x;
1042 
1043 		if (thread->options & TH_OPT_IDLE_THREAD) {
1044 			continue;
1045 		}
1046 
1047 		x = splsched();
1048 		thread_lock(thread);
1049 
1050 		if ((thread->state & TH_RUN) == TH_RUN) {
1051 			numrunning++;
1052 		}
1053 		cswitch += thread->c_switch;
1054 		tval = timer_grab(&thread->user_timer);
1055 		tinfo.threads_user += tval;
1056 		tinfo.total_user += tval;
1057 
1058 		tval = timer_grab(&thread->system_timer);
1059 
1060 		if (thread->precise_user_kernel_time) {
1061 			tinfo.threads_system += tval;
1062 			tinfo.total_system += tval;
1063 		} else {
1064 			/* system_timer may represent either sys or user */
1065 			tinfo.threads_user += tval;
1066 			tinfo.total_user += tval;
1067 		}
1068 
1069 		syscalls_unix += thread->syscalls_unix;
1070 		syscalls_mach += thread->syscalls_mach;
1071 
1072 		thread_unlock(thread);
1073 		splx(x);
1074 	}
1075 
1076 	ptinfo->pti_total_system = tinfo.total_system;
1077 	ptinfo->pti_total_user = tinfo.total_user;
1078 	ptinfo->pti_threads_system = tinfo.threads_system;
1079 	ptinfo->pti_threads_user = tinfo.threads_user;
1080 
1081 	ptinfo->pti_faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
1082 	ptinfo->pti_pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
1083 	ptinfo->pti_cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
1084 	ptinfo->pti_messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
1085 	ptinfo->pti_messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
1086 	ptinfo->pti_syscalls_mach = (int32_t) MIN(task->syscalls_mach + syscalls_mach, INT32_MAX);
1087 	ptinfo->pti_syscalls_unix = (int32_t) MIN(task->syscalls_unix + syscalls_unix, INT32_MAX);
1088 	ptinfo->pti_csw = (int32_t) MIN(task->c_switch + cswitch, INT32_MAX);
1089 	ptinfo->pti_threadnum = task->thread_count;
1090 	ptinfo->pti_numrunning = numrunning;
1091 	ptinfo->pti_priority = task->priority;
1092 
1093 	task_unlock(task);
1094 }
1095 
1096 int
fill_taskthreadinfo(task_t task,uint64_t thaddr,bool thuniqueid,struct proc_threadinfo_internal * ptinfo,void * vpp,int * vidp)1097 fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
1098 {
1099 	thread_t  thact;
1100 	int err = 0;
1101 	mach_msg_type_number_t count;
1102 	thread_basic_info_data_t basic_info;
1103 	kern_return_t kret;
1104 	uint64_t addr = 0;
1105 
1106 	task_lock(task);
1107 
1108 	for (thact  = (thread_t)(void *)queue_first(&task->threads);
1109 	    !queue_end(&task->threads, (queue_entry_t)thact);) {
1110 		addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1111 		if (addr == thaddr) {
1112 			count = THREAD_BASIC_INFO_COUNT;
1113 			if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
1114 				err = 1;
1115 				goto out;
1116 			}
1117 			ptinfo->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
1118 			ptinfo->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
1119 
1120 			ptinfo->pth_cpu_usage = basic_info.cpu_usage;
1121 			ptinfo->pth_policy = basic_info.policy;
1122 			ptinfo->pth_run_state = basic_info.run_state;
1123 			ptinfo->pth_flags = basic_info.flags;
1124 			ptinfo->pth_sleep_time = basic_info.sleep_time;
1125 			ptinfo->pth_curpri = thact->sched_pri;
1126 			ptinfo->pth_priority = thact->base_pri;
1127 			ptinfo->pth_maxpriority = thact->max_priority;
1128 
1129 			if (vpp != NULL) {
1130 				bsd_threadcdir(get_bsdthread_info(thact), vpp, vidp);
1131 			}
1132 			bsd_getthreadname(get_bsdthread_info(thact), ptinfo->pth_name);
1133 			err = 0;
1134 			goto out;
1135 		}
1136 		thact = (thread_t)(void *)queue_next(&thact->task_threads);
1137 	}
1138 	err = 1;
1139 
1140 out:
1141 	task_unlock(task);
1142 	return err;
1143 }
1144 
1145 int
fill_taskthreadlist(task_t task,void * buffer,int thcount,bool thuniqueid)1146 fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid)
1147 {
1148 	int numthr = 0;
1149 	thread_t thact;
1150 	uint64_t * uptr;
1151 	uint64_t  thaddr;
1152 
1153 	uptr = (uint64_t *)buffer;
1154 
1155 	task_lock(task);
1156 
1157 	for (thact  = (thread_t)(void *)queue_first(&task->threads);
1158 	    !queue_end(&task->threads, (queue_entry_t)thact);) {
1159 		thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1160 		*uptr++ = thaddr;
1161 		numthr++;
1162 		if (numthr >= thcount) {
1163 			goto out;
1164 		}
1165 		thact = (thread_t)(void *)queue_next(&thact->task_threads);
1166 	}
1167 
1168 out:
1169 	task_unlock(task);
1170 	return (int)(numthr * sizeof(uint64_t));
1171 }
1172 
1173 int
fill_taskthreadschedinfo(task_t task,uint64_t thread_id,struct proc_threadschedinfo_internal * thread_sched_info)1174 fill_taskthreadschedinfo(task_t task, uint64_t thread_id, struct proc_threadschedinfo_internal *thread_sched_info)
1175 {
1176 	int err = 0;
1177 
1178 	thread_t thread = current_thread();
1179 
1180 	/*
1181 	 * Looking up threads is pretty expensive and not realtime-safe
1182 	 * right now, requiring locking the task and iterating over all
1183 	 * threads. As long as that is the case, we officially only
1184 	 * support getting this info for the current thread.
1185 	 */
1186 	if (task != current_task() || thread_id != thread->thread_id) {
1187 		return -1;
1188 	}
1189 
1190 #if INTERRUPT_MASKED_DEBUG
1191 	absolutetime_to_nanoseconds(thread->machine.int_time_mt, &thread_sched_info->int_time_ns);
1192 #else
1193 	(void)thread;
1194 	thread_sched_info->int_time_ns = 0;
1195 #endif
1196 
1197 	return err;
1198 }
1199 
1200 int
get_numthreads(task_t task)1201 get_numthreads(task_t task)
1202 {
1203 	return task->thread_count;
1204 }
1205 
1206 /*
1207  * Gather the various pieces of info about the designated task,
1208  * and collect it all into a single rusage_info.
1209  */
1210 int
fill_task_rusage(task_t task,rusage_info_current * ri)1211 fill_task_rusage(task_t task, rusage_info_current *ri)
1212 {
1213 	struct task_power_info powerinfo;
1214 
1215 	uint64_t runnable_time = 0;
1216 
1217 	assert(task != TASK_NULL);
1218 	task_lock(task);
1219 
1220 	task_power_info_locked(task, &powerinfo, NULL, NULL, &runnable_time);
1221 	ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
1222 	ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
1223 	ri->ri_user_time = powerinfo.total_user;
1224 	ri->ri_system_time = powerinfo.total_system;
1225 	ri->ri_runnable_time = runnable_time;
1226 
1227 	ri->ri_phys_footprint = get_task_phys_footprint(task);
1228 	ledger_get_balance(task->ledger, task_ledgers.phys_mem,
1229 	    (ledger_amount_t *)&ri->ri_resident_size);
1230 	ri->ri_wired_size = get_task_wired_mem(task);
1231 
1232 	ri->ri_pageins = counter_load(&task->pageins);
1233 
1234 	task_unlock(task);
1235 	return 0;
1236 }
1237 
1238 void
fill_task_billed_usage(task_t task __unused,rusage_info_current * ri)1239 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
1240 {
1241 	bank_billed_balance_safe(task, &ri->ri_billed_system_time, &ri->ri_billed_energy);
1242 	bank_serviced_balance_safe(task, &ri->ri_serviced_system_time, &ri->ri_serviced_energy);
1243 }
1244 
1245 int
fill_task_io_rusage(task_t task,rusage_info_current * ri)1246 fill_task_io_rusage(task_t task, rusage_info_current *ri)
1247 {
1248 	assert(task != TASK_NULL);
1249 	task_lock(task);
1250 
1251 	if (task->task_io_stats) {
1252 		ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
1253 		ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1254 	} else {
1255 		/* I/O Stats unavailable */
1256 		ri->ri_diskio_bytesread = 0;
1257 		ri->ri_diskio_byteswritten = 0;
1258 	}
1259 	task_unlock(task);
1260 	return 0;
1261 }
1262 
1263 int
fill_task_qos_rusage(task_t task,rusage_info_current * ri)1264 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1265 {
1266 	thread_t thread;
1267 
1268 	assert(task != TASK_NULL);
1269 	task_lock(task);
1270 
1271 	/* Rollup QoS time of all the threads to task */
1272 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
1273 		if (thread->options & TH_OPT_IDLE_THREAD) {
1274 			continue;
1275 		}
1276 
1277 		thread_update_qos_cpu_time(thread);
1278 	}
1279 	ri->ri_cpu_time_qos_default = task->cpu_time_eqos_stats.cpu_time_qos_default;
1280 	ri->ri_cpu_time_qos_maintenance = task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
1281 	ri->ri_cpu_time_qos_background = task->cpu_time_eqos_stats.cpu_time_qos_background;
1282 	ri->ri_cpu_time_qos_utility = task->cpu_time_eqos_stats.cpu_time_qos_utility;
1283 	ri->ri_cpu_time_qos_legacy = task->cpu_time_eqos_stats.cpu_time_qos_legacy;
1284 	ri->ri_cpu_time_qos_user_initiated = task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
1285 	ri->ri_cpu_time_qos_user_interactive = task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
1286 
1287 	task_unlock(task);
1288 	return 0;
1289 }
1290 
1291 void
fill_task_monotonic_rusage(task_t task,rusage_info_current * ri)1292 fill_task_monotonic_rusage(task_t task, rusage_info_current *ri)
1293 {
1294 #if MONOTONIC
1295 	if (!mt_core_supported) {
1296 		return;
1297 	}
1298 
1299 	assert(task != TASK_NULL);
1300 
1301 	uint64_t counts[MT_CORE_NFIXED] = { 0 };
1302 	mt_fixed_task_counts(task, counts);
1303 #ifdef MT_CORE_INSTRS
1304 	ri->ri_instructions = counts[MT_CORE_INSTRS];
1305 #endif /* defined(MT_CORE_INSTRS) */
1306 	ri->ri_cycles = counts[MT_CORE_CYCLES];
1307 #else /* MONOTONIC */
1308 #pragma unused(task, ri)
1309 #endif /* !MONOTONIC */
1310 }
1311 
1312 uint64_t
get_task_logical_writes(task_t task,bool external)1313 get_task_logical_writes(task_t task, bool external)
1314 {
1315 	assert(task != TASK_NULL);
1316 	struct ledger_entry_info lei;
1317 	int entry = external ? task_ledgers.logical_writes_to_external :
1318 	    task_ledgers.logical_writes;
1319 
1320 	task_lock(task);
1321 	ledger_get_entry_info(task->ledger, entry, &lei);
1322 	task_unlock(task);
1323 
1324 	return lei.lei_balance;
1325 }
1326 
1327 uint64_t
get_task_dispatchqueue_serialno_offset(task_t task)1328 get_task_dispatchqueue_serialno_offset(task_t task)
1329 {
1330 	uint64_t dq_serialno_offset = 0;
1331 
1332 	if (task->bsd_info) {
1333 		dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(task->bsd_info);
1334 	}
1335 
1336 	return dq_serialno_offset;
1337 }
1338 
1339 uint64_t
get_task_dispatchqueue_label_offset(task_t task)1340 get_task_dispatchqueue_label_offset(task_t task)
1341 {
1342 	uint64_t dq_label_offset = 0;
1343 
1344 	if (task->bsd_info) {
1345 		dq_label_offset = get_dispatchqueue_label_offset_from_proc(task->bsd_info);
1346 	}
1347 
1348 	return dq_label_offset;
1349 }
1350 
1351 uint64_t
get_task_uniqueid(task_t task)1352 get_task_uniqueid(task_t task)
1353 {
1354 	if (task->bsd_info) {
1355 		return proc_uniqueid_task(task->bsd_info, task);
1356 	} else {
1357 		return UINT64_MAX;
1358 	}
1359 }
1360 
1361 int
get_task_version(task_t task)1362 get_task_version(task_t task)
1363 {
1364 	if (task->bsd_info) {
1365 		return proc_pidversion(task->bsd_info);
1366 	} else {
1367 		return INT_MAX;
1368 	}
1369 }
1370 
1371 #if CONFIG_MACF
1372 struct label *
get_task_crash_label(task_t task)1373 get_task_crash_label(task_t task)
1374 {
1375 	return task->crash_label;
1376 }
1377 
1378 void
set_task_crash_label(task_t task,struct label * label)1379 set_task_crash_label(task_t task, struct label *label)
1380 {
1381 	task->crash_label = label;
1382 }
1383 #endif
1384 
1385 int
fill_taskipctableinfo(task_t task,uint32_t * table_size,uint32_t * table_free)1386 fill_taskipctableinfo(task_t task, uint32_t *table_size, uint32_t *table_free)
1387 {
1388 	ipc_space_t space = task->itk_space;
1389 	if (space == NULL) {
1390 		return -1;
1391 	}
1392 
1393 	is_read_lock(space);
1394 	if (!is_active(space)) {
1395 		is_read_unlock(space);
1396 		return -1;
1397 	}
1398 
1399 	*table_size = is_active_table(space)->ie_size;
1400 	*table_free = space->is_table_free;
1401 
1402 	is_read_unlock(space);
1403 
1404 	return 0;
1405 }
1406 
1407 int
get_task_cdhash(task_t task,char cdhash[static CS_CDHASH_LEN])1408 get_task_cdhash(task_t task, char cdhash[static CS_CDHASH_LEN])
1409 {
1410 	int result = 0;
1411 
1412 	task_lock(task);
1413 	result = task->bsd_info ? proc_getcdhash(task->bsd_info, cdhash) : ESRCH;
1414 	task_unlock(task);
1415 
1416 	return result;
1417 }
1418 
1419 /* moved from ubc_subr.c */
1420 int
mach_to_bsd_errno(kern_return_t mach_err)1421 mach_to_bsd_errno(kern_return_t mach_err)
1422 {
1423 	switch (mach_err) {
1424 	case KERN_SUCCESS:
1425 		return 0;
1426 
1427 	case KERN_INVALID_ADDRESS:
1428 	case KERN_INVALID_ARGUMENT:
1429 	case KERN_NOT_IN_SET:
1430 	case KERN_INVALID_NAME:
1431 	case KERN_INVALID_TASK:
1432 	case KERN_INVALID_RIGHT:
1433 	case KERN_INVALID_VALUE:
1434 	case KERN_INVALID_CAPABILITY:
1435 	case KERN_INVALID_HOST:
1436 	case KERN_MEMORY_PRESENT:
1437 	case KERN_INVALID_PROCESSOR_SET:
1438 	case KERN_INVALID_POLICY:
1439 	case KERN_ALREADY_WAITING:
1440 	case KERN_DEFAULT_SET:
1441 	case KERN_EXCEPTION_PROTECTED:
1442 	case KERN_INVALID_LEDGER:
1443 	case KERN_INVALID_MEMORY_CONTROL:
1444 	case KERN_INVALID_SECURITY:
1445 	case KERN_NOT_DEPRESSED:
1446 	case KERN_LOCK_OWNED:
1447 	case KERN_LOCK_OWNED_SELF:
1448 		return EINVAL;
1449 
1450 	case KERN_NOT_RECEIVER:
1451 	case KERN_NO_ACCESS:
1452 	case KERN_POLICY_STATIC:
1453 		return EACCES;
1454 
1455 	case KERN_NO_SPACE:
1456 	case KERN_RESOURCE_SHORTAGE:
1457 	case KERN_UREFS_OVERFLOW:
1458 	case KERN_INVALID_OBJECT:
1459 		return ENOMEM;
1460 
1461 	case KERN_MEMORY_FAILURE:
1462 	case KERN_MEMORY_ERROR:
1463 	case KERN_PROTECTION_FAILURE:
1464 		return EFAULT;
1465 
1466 	case KERN_POLICY_LIMIT:
1467 	case KERN_CODESIGN_ERROR:
1468 	case KERN_DENIED:
1469 		return EPERM;
1470 
1471 	case KERN_ALREADY_IN_SET:
1472 	case KERN_NAME_EXISTS:
1473 	case KERN_RIGHT_EXISTS:
1474 		return EEXIST;
1475 
1476 	case KERN_ABORTED:
1477 		return EINTR;
1478 
1479 	case KERN_TERMINATED:
1480 	case KERN_LOCK_SET_DESTROYED:
1481 	case KERN_LOCK_UNSTABLE:
1482 	case KERN_SEMAPHORE_DESTROYED:
1483 	case KERN_NOT_FOUND:
1484 	case KERN_NOT_WAITING:
1485 		return ENOENT;
1486 
1487 	case KERN_RPC_SERVER_TERMINATED:
1488 		return ECONNRESET;
1489 
1490 	case KERN_NOT_SUPPORTED:
1491 		return ENOTSUP;
1492 
1493 	case KERN_NODE_DOWN:
1494 		return ENETDOWN;
1495 
1496 	case KERN_OPERATION_TIMED_OUT:
1497 		return ETIMEDOUT;
1498 
1499 	default:
1500 		return EIO; /* 5 == KERN_FAILURE */
1501 	}
1502 }
1503 
1504 kern_return_t
bsd_to_mach_failure(int bsd_err)1505 bsd_to_mach_failure(int bsd_err)
1506 {
1507 	switch (bsd_err) {
1508 	case EIO:
1509 	case EACCES:
1510 	case ENOMEM:
1511 	case EFAULT:
1512 		return KERN_MEMORY_ERROR;
1513 
1514 	case EINVAL:
1515 		return KERN_INVALID_ARGUMENT;
1516 
1517 	case ETIMEDOUT:
1518 	case EBUSY:
1519 		return KERN_OPERATION_TIMED_OUT;
1520 
1521 	case ECONNRESET:
1522 		return KERN_RPC_SERVER_TERMINATED;
1523 
1524 	case ENOTSUP:
1525 		return KERN_NOT_SUPPORTED;
1526 
1527 	case ENETDOWN:
1528 		return KERN_NODE_DOWN;
1529 
1530 	case ENOENT:
1531 		return KERN_NOT_FOUND;
1532 
1533 	case EINTR:
1534 		return KERN_ABORTED;
1535 
1536 	case EPERM:
1537 		return KERN_DENIED;
1538 
1539 	case EEXIST:
1540 		return KERN_ALREADY_IN_SET;
1541 
1542 	default:
1543 		return KERN_FAILURE;
1544 	}
1545 }
1546