1 /*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include "mach/arm/vm_param.h"
29 #include "mach/kern_return.h"
30 #include <mach/mach_types.h>
31 #include <mach/machine/vm_param.h>
32 #include <mach/task.h>
33
34 #include <kern/kern_types.h>
35 #include <kern/ledger.h>
36 #include <kern/processor.h>
37 #include <kern/thread.h>
38 #include <kern/task.h>
39 #include <kern/spl.h>
40 #include <kern/ast.h>
41 #include <kern/monotonic.h>
42 #include <machine/monotonic.h>
43 #include <ipc/ipc_port.h>
44 #include <ipc/ipc_object.h>
45 #include <vm/vm_map_xnu.h>
46 #include <vm/vm_kern.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_protos.h> /* last */
49 #include <sys/resource.h>
50 #include <sys/signal.h>
51 #include <sys/errno.h>
52 #include <sys/proc_require.h>
53
54 #include <machine/limits.h>
55 #include <sys/codesign.h> /* CS_CDHASH_LEN */
56
57 #undef thread_should_halt
58
59 /* BSD KERN COMPONENT INTERFACE */
60
61 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
62
63 thread_t get_firstthread(task_t);
64 int get_task_userstop(task_t);
65 int get_thread_userstop(thread_t);
66 boolean_t current_thread_aborted(void);
67 void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
68 kern_return_t get_signalact(task_t, thread_t *, int);
69 int fill_task_rusage(task_t task, rusage_info_current *ri);
70 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
71 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
72 uint64_t get_task_logical_writes(task_t task, bool external);
73 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
74 void task_bsdtask_kill(task_t);
75
76 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
77 extern uint64_t get_dispatchqueue_label_offset_from_proc(void *p);
78 extern uint64_t proc_uniqueid_task(void *p, void *t);
79 extern int proc_pidversion(void *p);
80 extern int proc_getcdhash(void *p, char *cdhash);
81
82 int mach_to_bsd_errno(kern_return_t mach_err);
83 kern_return_t kern_return_for_errno(int bsd_errno);
84
85 #if MACH_BSD
86 extern void psignal(void *, int);
87 #endif
88
89 /*
90 *
91 */
92 void *
get_bsdtask_info(task_t t)93 get_bsdtask_info(task_t t)
94 {
95 void *proc_from_task = task_get_proc_raw(t);
96 proc_require(proc_from_task, PROC_REQUIRE_ALLOW_NULL | PROC_REQUIRE_ALLOW_ALL);
97 return task_has_proc(t) ? proc_from_task : NULL;
98 }
99
100 void
task_bsdtask_kill(task_t t)101 task_bsdtask_kill(task_t t)
102 {
103 void * bsd_info = get_bsdtask_info(t);
104 if (bsd_info != NULL) {
105 psignal(bsd_info, SIGKILL);
106 }
107 }
108 /*
109 *
110 */
111 void *
get_bsdthreadtask_info(thread_t th)112 get_bsdthreadtask_info(thread_t th)
113 {
114 return get_thread_ro(th)->tro_proc;
115 }
116
117 /*
118 *
119 */
120 void
set_bsdtask_info(task_t t,void * v)121 set_bsdtask_info(task_t t, void * v)
122 {
123 void *proc_from_task = task_get_proc_raw(t);
124 if (v == NULL) {
125 task_clear_has_proc(t);
126 } else {
127 if (v != proc_from_task) {
128 panic("set_bsdtask_info trying to set random bsd_info %p", v);
129 }
130 task_set_has_proc(t);
131 }
132 }
133
134 __abortlike
135 static void
__thread_ro_circularity_panic(thread_t th,thread_ro_t tro)136 __thread_ro_circularity_panic(thread_t th, thread_ro_t tro)
137 {
138 panic("tro %p points back to %p instead of %p", tro, tro->tro_owner, th);
139 }
140
141 __attribute__((always_inline))
142 thread_ro_t
get_thread_ro_unchecked(thread_t th)143 get_thread_ro_unchecked(thread_t th)
144 {
145 return th->t_tro;
146 }
147
148 thread_ro_t
get_thread_ro(thread_t th)149 get_thread_ro(thread_t th)
150 {
151 thread_ro_t tro = th->t_tro;
152
153 zone_require_ro(ZONE_ID_THREAD_RO, sizeof(struct thread_ro), tro);
154 if (tro->tro_owner != th) {
155 __thread_ro_circularity_panic(th, tro);
156 }
157 return tro;
158 }
159
160 __attribute__((always_inline))
161 thread_ro_t
current_thread_ro_unchecked(void)162 current_thread_ro_unchecked(void)
163 {
164 return get_thread_ro_unchecked(current_thread());
165 }
166
167 thread_ro_t
current_thread_ro(void)168 current_thread_ro(void)
169 {
170 return get_thread_ro(current_thread());
171 }
172
173 void
clear_thread_ro_proc(thread_t th)174 clear_thread_ro_proc(thread_t th)
175 {
176 thread_ro_t tro = get_thread_ro(th);
177
178 zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_proc);
179 }
180
181 struct uthread *
get_bsdthread_info(thread_t th)182 get_bsdthread_info(thread_t th)
183 {
184 return (struct uthread *)((uintptr_t)th + sizeof(struct thread));
185 }
186
187 thread_t
get_machthread(struct uthread * uth)188 get_machthread(struct uthread *uth)
189 {
190 return (struct thread *)((uintptr_t)uth - sizeof(struct thread));
191 }
192
193 /*
194 * This is used to remember any FS error from VNOP_PAGEIN code when
195 * invoked under vm_fault(). The value is an errno style value. It can
196 * be retrieved by exception handlers using thread_get_state().
197 */
198 void
set_thread_pagein_error(thread_t th,int error)199 set_thread_pagein_error(thread_t th, int error)
200 {
201 assert(th == current_thread());
202 if (error == 0 || th->t_pagein_error == 0) {
203 th->t_pagein_error = error;
204 }
205 }
206
207 #if defined(__x86_64__)
208 /*
209 * Returns non-zero if the thread has a non-NULL task
210 * and that task has an LDT.
211 */
212 int
thread_task_has_ldt(thread_t th)213 thread_task_has_ldt(thread_t th)
214 {
215 task_t task = get_threadtask(th);
216 return task && task->i386_ldt != 0;
217 }
218 #endif /* __x86_64__ */
219
220 /*
221 * XXX
222 */
223 int get_thread_lock_count(thread_t th); /* forced forward */
224 int
get_thread_lock_count(thread_t th __unused)225 get_thread_lock_count(thread_t th __unused)
226 {
227 /*
228 * TODO: one day: resurect counting locks held to disallow
229 * holding locks across upcalls.
230 *
231 * never worked on arm.
232 */
233 return 0;
234 }
235
236 /*
237 * Returns a thread reference.
238 */
239 thread_t
get_firstthread(task_t task)240 get_firstthread(task_t task)
241 {
242 thread_t thread = THREAD_NULL;
243 task_lock(task);
244
245 if (!task->active) {
246 task_unlock(task);
247 return THREAD_NULL;
248 }
249
250 thread = (thread_t)(void *)queue_first(&task->threads);
251
252 if (queue_end(&task->threads, (queue_entry_t)thread)) {
253 task_unlock(task);
254 return THREAD_NULL;
255 }
256
257 thread_reference(thread);
258 task_unlock(task);
259 return thread;
260 }
261
262 kern_return_t
get_signalact(task_t task,thread_t * result_out,int setast)263 get_signalact(
264 task_t task,
265 thread_t *result_out,
266 int setast)
267 {
268 kern_return_t result = KERN_SUCCESS;
269 thread_t inc, thread = THREAD_NULL;
270
271 task_lock(task);
272
273 if (!task->active) {
274 task_unlock(task);
275
276 return KERN_FAILURE;
277 }
278
279 for (inc = (thread_t)(void *)queue_first(&task->threads);
280 !queue_end(&task->threads, (queue_entry_t)inc);) {
281 thread_mtx_lock(inc);
282 if (inc->active &&
283 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
284 thread = inc;
285 break;
286 }
287 thread_mtx_unlock(inc);
288
289 inc = (thread_t)(void *)queue_next(&inc->task_threads);
290 }
291
292 if (result_out) {
293 *result_out = thread;
294 }
295
296 if (thread) {
297 if (setast) {
298 act_set_astbsd(thread);
299 }
300
301 thread_mtx_unlock(thread);
302 } else {
303 result = KERN_FAILURE;
304 }
305
306 task_unlock(task);
307
308 return result;
309 }
310
311
312 kern_return_t
check_actforsig(task_t task,thread_t thread,int setast)313 check_actforsig(
314 task_t task,
315 thread_t thread,
316 int setast)
317 {
318 kern_return_t result = KERN_FAILURE;
319 thread_t inc;
320
321 task_lock(task);
322
323 if (!task->active) {
324 task_unlock(task);
325
326 return KERN_FAILURE;
327 }
328
329 for (inc = (thread_t)(void *)queue_first(&task->threads);
330 !queue_end(&task->threads, (queue_entry_t)inc);) {
331 if (inc == thread) {
332 thread_mtx_lock(inc);
333
334 if (inc->active &&
335 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
336 result = KERN_SUCCESS;
337 break;
338 }
339
340 thread_mtx_unlock(inc);
341 break;
342 }
343
344 inc = (thread_t)(void *)queue_next(&inc->task_threads);
345 }
346
347 if (result == KERN_SUCCESS) {
348 if (setast) {
349 act_set_astbsd(thread);
350 }
351
352 thread_mtx_unlock(thread);
353 }
354
355 task_unlock(task);
356
357 return result;
358 }
359
360 ledger_t
get_task_ledger(task_t t)361 get_task_ledger(task_t t)
362 {
363 return t->ledger;
364 }
365
366 /*
367 * This is only safe to call from a thread executing in
368 * in the task's context or if the task is locked. Otherwise,
369 * the map could be switched for the task (and freed) before
370 * we go to return it here.
371 */
372 vm_map_t
get_task_map(task_t t)373 get_task_map(task_t t)
374 {
375 return t->map;
376 }
377
378 vm_map_t
get_task_map_reference(task_t t)379 get_task_map_reference(task_t t)
380 {
381 vm_map_t m;
382
383 if (t == NULL) {
384 return VM_MAP_NULL;
385 }
386
387 task_lock(t);
388 if (!t->active) {
389 task_unlock(t);
390 return VM_MAP_NULL;
391 }
392 m = t->map;
393 vm_map_reference(m);
394 task_unlock(t);
395 return m;
396 }
397
398 /*
399 *
400 */
401 ipc_space_t
get_task_ipcspace(task_t t)402 get_task_ipcspace(task_t t)
403 {
404 return t->itk_space;
405 }
406
407 int
get_task_numacts(task_t t)408 get_task_numacts(task_t t)
409 {
410 return t->thread_count;
411 }
412
413 /* does this machine need 64bit register set for signal handler */
414 int
is_64signalregset(void)415 is_64signalregset(void)
416 {
417 if (task_has_64Bit_data(current_task())) {
418 return 1;
419 }
420
421 return 0;
422 }
423
424 /*
425 * Swap in a new map for the task/thread pair; the old map reference is
426 * returned. Also does a pmap switch if thread provided is current thread.
427 */
428 vm_map_t
swap_task_map(task_t task,thread_t thread,vm_map_t map)429 swap_task_map(task_t task, thread_t thread, vm_map_t map)
430 {
431 vm_map_t old_map;
432 boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
433
434 if (task != get_threadtask(thread)) {
435 panic("swap_task_map");
436 }
437
438 task_lock(task);
439 mp_disable_preemption();
440
441 /* verify that the map has been activated if the task is enabled for IPC access */
442 assert(!task->ipc_active || (map->owning_task == task));
443
444 old_map = task->map;
445 thread->map = task->map = map;
446 vm_commit_pagezero_status(map);
447
448 if (doswitch) {
449 PMAP_SWITCH_USER(thread, map, cpu_number());
450 }
451 mp_enable_preemption();
452 task_unlock(task);
453
454 return old_map;
455 }
456
457 /*
458 *
459 * This is only safe to call from a thread executing in
460 * in the task's context or if the task is locked. Otherwise,
461 * the map could be switched for the task (and freed) before
462 * we go to return it here.
463 */
464 pmap_t
get_task_pmap(task_t t)465 get_task_pmap(task_t t)
466 {
467 return t->map->pmap;
468 }
469
470 /*
471 *
472 */
473 uint64_t
get_task_resident_size(task_t task)474 get_task_resident_size(task_t task)
475 {
476 uint64_t val;
477
478 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
479 return val;
480 }
481
482 uint64_t
get_task_compressed(task_t task)483 get_task_compressed(task_t task)
484 {
485 uint64_t val;
486
487 ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t *) &val);
488 return val;
489 }
490
491 uint64_t
get_task_resident_max(task_t task)492 get_task_resident_max(task_t task)
493 {
494 uint64_t val;
495
496 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
497 return val;
498 }
499
500 /*
501 * Get the balance for a given field in the task ledger.
502 * Returns 0 if the entry is invalid.
503 */
504 static uint64_t
get_task_ledger_balance(task_t task,int entry)505 get_task_ledger_balance(task_t task, int entry)
506 {
507 ledger_amount_t balance = 0;
508
509 ledger_get_balance(task->ledger, entry, &balance);
510 return balance;
511 }
512
513 uint64_t
get_task_purgeable_size(task_t task)514 get_task_purgeable_size(task_t task)
515 {
516 kern_return_t ret;
517 ledger_amount_t balance = 0;
518 uint64_t volatile_size = 0;
519
520 ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile, &balance);
521 if (ret != KERN_SUCCESS) {
522 return 0;
523 }
524
525 volatile_size += balance;
526
527 ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile_compressed, &balance);
528 if (ret != KERN_SUCCESS) {
529 return 0;
530 }
531
532 volatile_size += balance;
533
534 return volatile_size;
535 }
536
537 /*
538 *
539 */
540 uint64_t
get_task_phys_footprint(task_t task)541 get_task_phys_footprint(task_t task)
542 {
543 return get_task_ledger_balance(task, task_ledgers.phys_footprint);
544 }
545
546 #if CONFIG_LEDGER_INTERVAL_MAX
547 /*
548 *
549 */
550 uint64_t
get_task_phys_footprint_interval_max(task_t task,int reset)551 get_task_phys_footprint_interval_max(task_t task, int reset)
552 {
553 kern_return_t ret;
554 ledger_amount_t max;
555
556 ret = ledger_get_interval_max(task->ledger, task_ledgers.phys_footprint, &max, reset);
557
558 if (KERN_SUCCESS == ret) {
559 return max;
560 }
561
562 return 0;
563 }
564 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
565
566 /*
567 *
568 */
569 uint64_t
get_task_phys_footprint_lifetime_max(task_t task)570 get_task_phys_footprint_lifetime_max(task_t task)
571 {
572 kern_return_t ret;
573 ledger_amount_t max;
574
575 ret = ledger_get_lifetime_max(task->ledger, task_ledgers.phys_footprint, &max);
576
577 if (KERN_SUCCESS == ret) {
578 return max;
579 }
580
581 return 0;
582 }
583
584 /*
585 *
586 */
587 uint64_t
get_task_phys_footprint_limit(task_t task)588 get_task_phys_footprint_limit(task_t task)
589 {
590 kern_return_t ret;
591 ledger_amount_t max;
592
593 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max);
594 if (KERN_SUCCESS == ret) {
595 return max;
596 }
597
598 return 0;
599 }
600
601 uint64_t
get_task_internal(task_t task)602 get_task_internal(task_t task)
603 {
604 return get_task_ledger_balance(task, task_ledgers.internal);
605 }
606
607 uint64_t
get_task_internal_compressed(task_t task)608 get_task_internal_compressed(task_t task)
609 {
610 return get_task_ledger_balance(task, task_ledgers.internal_compressed);
611 }
612
613 uint64_t
get_task_purgeable_nonvolatile(task_t task)614 get_task_purgeable_nonvolatile(task_t task)
615 {
616 return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile);
617 }
618
619 uint64_t
get_task_purgeable_nonvolatile_compressed(task_t task)620 get_task_purgeable_nonvolatile_compressed(task_t task)
621 {
622 return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile_compressed);
623 }
624
625 uint64_t
get_task_alternate_accounting(task_t task)626 get_task_alternate_accounting(task_t task)
627 {
628 return get_task_ledger_balance(task, task_ledgers.alternate_accounting);
629 }
630
631 uint64_t
get_task_alternate_accounting_compressed(task_t task)632 get_task_alternate_accounting_compressed(task_t task)
633 {
634 return get_task_ledger_balance(task, task_ledgers.alternate_accounting_compressed);
635 }
636
637 uint64_t
get_task_page_table(task_t task)638 get_task_page_table(task_t task)
639 {
640 return get_task_ledger_balance(task, task_ledgers.page_table);
641 }
642
643 #if CONFIG_FREEZE
644 uint64_t
get_task_frozen_to_swap(task_t task)645 get_task_frozen_to_swap(task_t task)
646 {
647 return get_task_ledger_balance(task, task_ledgers.frozen_to_swap);
648 }
649 #endif /* CONFIG_FREEZE */
650
651 uint64_t
get_task_iokit_mapped(task_t task)652 get_task_iokit_mapped(task_t task)
653 {
654 return get_task_ledger_balance(task, task_ledgers.iokit_mapped);
655 }
656
657 uint64_t
get_task_network_nonvolatile(task_t task)658 get_task_network_nonvolatile(task_t task)
659 {
660 return get_task_ledger_balance(task, task_ledgers.network_nonvolatile);
661 }
662
663 uint64_t
get_task_network_nonvolatile_compressed(task_t task)664 get_task_network_nonvolatile_compressed(task_t task)
665 {
666 return get_task_ledger_balance(task, task_ledgers.network_nonvolatile_compressed);
667 }
668
669 uint64_t
get_task_wired_mem(task_t task)670 get_task_wired_mem(task_t task)
671 {
672 return get_task_ledger_balance(task, task_ledgers.wired_mem);
673 }
674
675 uint64_t
get_task_tagged_footprint(task_t task)676 get_task_tagged_footprint(task_t task)
677 {
678 kern_return_t ret;
679 ledger_amount_t credit, debit;
680
681 ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint, &credit, &debit);
682 if (KERN_SUCCESS == ret) {
683 return credit - debit;
684 }
685
686 return 0;
687 }
688
689 uint64_t
get_task_tagged_footprint_compressed(task_t task)690 get_task_tagged_footprint_compressed(task_t task)
691 {
692 kern_return_t ret;
693 ledger_amount_t credit, debit;
694
695 ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint_compressed, &credit, &debit);
696 if (KERN_SUCCESS == ret) {
697 return credit - debit;
698 }
699
700 return 0;
701 }
702
703 uint64_t
get_task_media_footprint(task_t task)704 get_task_media_footprint(task_t task)
705 {
706 kern_return_t ret;
707 ledger_amount_t credit, debit;
708
709 ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint, &credit, &debit);
710 if (KERN_SUCCESS == ret) {
711 return credit - debit;
712 }
713
714 return 0;
715 }
716
717 uint64_t
get_task_media_footprint_compressed(task_t task)718 get_task_media_footprint_compressed(task_t task)
719 {
720 kern_return_t ret;
721 ledger_amount_t credit, debit;
722
723 ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint_compressed, &credit, &debit);
724 if (KERN_SUCCESS == ret) {
725 return credit - debit;
726 }
727
728 return 0;
729 }
730
731 uint64_t
get_task_graphics_footprint(task_t task)732 get_task_graphics_footprint(task_t task)
733 {
734 kern_return_t ret;
735 ledger_amount_t credit, debit;
736
737 ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint, &credit, &debit);
738 if (KERN_SUCCESS == ret) {
739 return credit - debit;
740 }
741
742 return 0;
743 }
744
745
746 uint64_t
get_task_graphics_footprint_compressed(task_t task)747 get_task_graphics_footprint_compressed(task_t task)
748 {
749 kern_return_t ret;
750 ledger_amount_t credit, debit;
751
752 ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint_compressed, &credit, &debit);
753 if (KERN_SUCCESS == ret) {
754 return credit - debit;
755 }
756
757 return 0;
758 }
759
760 uint64_t
get_task_neural_footprint(task_t task)761 get_task_neural_footprint(task_t task)
762 {
763 kern_return_t ret;
764 ledger_amount_t credit, debit;
765
766 ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint, &credit, &debit);
767 if (KERN_SUCCESS == ret) {
768 return credit - debit;
769 }
770
771 return 0;
772 }
773
774 uint64_t
get_task_neural_footprint_compressed(task_t task)775 get_task_neural_footprint_compressed(task_t task)
776 {
777 kern_return_t ret;
778 ledger_amount_t credit, debit;
779
780 ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint_compressed, &credit, &debit);
781 if (KERN_SUCCESS == ret) {
782 return credit - debit;
783 }
784
785 return 0;
786 }
787
788 uint64_t
get_task_neural_nofootprint_total(task_t task)789 get_task_neural_nofootprint_total(task_t task)
790 {
791 kern_return_t ret;
792 ledger_amount_t credit, debit;
793
794 ret = ledger_get_entries(task->ledger, task_ledgers.neural_nofootprint_total, &credit, &debit);
795 if (KERN_SUCCESS == ret) {
796 return credit - debit;
797 }
798
799 return 0;
800 }
801
802 #if CONFIG_LEDGER_INTERVAL_MAX
803 uint64_t
get_task_neural_nofootprint_total_interval_max(task_t task,int reset)804 get_task_neural_nofootprint_total_interval_max(task_t task, int reset)
805 {
806 kern_return_t ret;
807 ledger_amount_t max;
808
809 ret = ledger_get_interval_max(task->ledger, task_ledgers.neural_nofootprint_total, &max, reset);
810
811 if (KERN_SUCCESS == ret) {
812 return max;
813 }
814
815 return 0;
816 }
817 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
818
819 uint64_t
get_task_neural_nofootprint_total_lifetime_max(task_t task)820 get_task_neural_nofootprint_total_lifetime_max(task_t task)
821 {
822 kern_return_t ret;
823 ledger_amount_t max;
824
825 ret = ledger_get_lifetime_max(task->ledger, task_ledgers.neural_nofootprint_total, &max);
826
827 if (KERN_SUCCESS == ret) {
828 return max;
829 }
830
831 return 0;
832 }
833
834 uint64_t
get_task_cpu_time(task_t task)835 get_task_cpu_time(task_t task)
836 {
837 return get_task_ledger_balance(task, task_ledgers.cpu_time);
838 }
839
840 uint32_t
get_task_loadTag(task_t task)841 get_task_loadTag(task_t task)
842 {
843 return os_atomic_load(&task->loadTag, relaxed);
844 }
845
846 uint32_t
set_task_loadTag(task_t task,uint32_t loadTag)847 set_task_loadTag(task_t task, uint32_t loadTag)
848 {
849 return os_atomic_xchg(&task->loadTag, loadTag, relaxed);
850 }
851
852
853 task_t
get_threadtask(thread_t th)854 get_threadtask(thread_t th)
855 {
856 return get_thread_ro(th)->tro_task;
857 }
858
859 task_t
get_threadtask_early(thread_t th)860 get_threadtask_early(thread_t th)
861 {
862 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
863 if (th == THREAD_NULL || th->t_tro == NULL) {
864 return TASK_NULL;
865 }
866 }
867 return get_threadtask(th);
868 }
869
870 /*
871 *
872 */
873 vm_map_offset_t
get_map_min(vm_map_t map)874 get_map_min(
875 vm_map_t map)
876 {
877 return vm_map_min(map);
878 }
879
880 /*
881 *
882 */
883 vm_map_offset_t
get_map_max(vm_map_t map)884 get_map_max(
885 vm_map_t map)
886 {
887 return vm_map_max(map);
888 }
889 vm_map_size_t
get_vmmap_size(vm_map_t map)890 get_vmmap_size(
891 vm_map_t map)
892 {
893 return vm_map_adjusted_size(map);
894 }
895 int
get_task_page_size(task_t task)896 get_task_page_size(
897 task_t task)
898 {
899 return vm_map_page_size(task->map);
900 }
901
902 #if CONFIG_COREDUMP
903
904 static int
get_vmsubmap_entries(vm_map_t map,vm_object_offset_t start,vm_object_offset_t end)905 get_vmsubmap_entries(
906 vm_map_t map,
907 vm_object_offset_t start,
908 vm_object_offset_t end)
909 {
910 int total_entries = 0;
911 vm_map_entry_t entry;
912
913 if (not_in_kdp) {
914 vm_map_lock(map);
915 }
916 entry = vm_map_first_entry(map);
917 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
918 entry = entry->vme_next;
919 }
920
921 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
922 if (entry->is_sub_map) {
923 total_entries +=
924 get_vmsubmap_entries(VME_SUBMAP(entry),
925 VME_OFFSET(entry),
926 (VME_OFFSET(entry) +
927 entry->vme_end -
928 entry->vme_start));
929 } else {
930 total_entries += 1;
931 }
932 entry = entry->vme_next;
933 }
934 if (not_in_kdp) {
935 vm_map_unlock(map);
936 }
937 return total_entries;
938 }
939
940 int
get_vmmap_entries(vm_map_t map)941 get_vmmap_entries(
942 vm_map_t map)
943 {
944 int total_entries = 0;
945 vm_map_entry_t entry;
946
947 if (not_in_kdp) {
948 vm_map_lock(map);
949 }
950 entry = vm_map_first_entry(map);
951
952 while (entry != vm_map_to_entry(map)) {
953 if (entry->is_sub_map) {
954 total_entries +=
955 get_vmsubmap_entries(VME_SUBMAP(entry),
956 VME_OFFSET(entry),
957 (VME_OFFSET(entry) +
958 entry->vme_end -
959 entry->vme_start));
960 } else {
961 total_entries += 1;
962 }
963 entry = entry->vme_next;
964 }
965 if (not_in_kdp) {
966 vm_map_unlock(map);
967 }
968 return total_entries;
969 }
970 #endif /* CONFIG_COREDUMP */
971
972 int
get_task_userstop(task_t task)973 get_task_userstop(
974 task_t task)
975 {
976 return task->user_stop_count;
977 }
978
979 int
get_thread_userstop(thread_t th)980 get_thread_userstop(
981 thread_t th)
982 {
983 return th->user_stop_count;
984 }
985
986 boolean_t
get_task_pidsuspended(task_t task)987 get_task_pidsuspended(
988 task_t task)
989 {
990 return task->pidsuspended;
991 }
992
993 boolean_t
get_task_frozen(task_t task)994 get_task_frozen(
995 task_t task)
996 {
997 return task->frozen;
998 }
999
1000 boolean_t
thread_should_abort(thread_t th)1001 thread_should_abort(
1002 thread_t th)
1003 {
1004 return (th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT;
1005 }
1006
1007 /*
1008 * This routine is like thread_should_abort() above. It checks to
1009 * see if the current thread is aborted. But unlike above, it also
1010 * checks to see if thread is safely aborted. If so, it returns
1011 * that fact, and clears the condition (safe aborts only should
1012 * have a single effect, and a poll of the abort status
1013 * qualifies.
1014 */
1015 boolean_t
current_thread_aborted(void)1016 current_thread_aborted(
1017 void)
1018 {
1019 thread_t th = current_thread();
1020 spl_t s;
1021
1022 if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
1023 (th->options & TH_OPT_INTMASK) != THREAD_UNINT) {
1024 return TRUE;
1025 }
1026 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
1027 s = splsched();
1028 thread_lock(th);
1029 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
1030 th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1031 }
1032 thread_unlock(th);
1033 splx(s);
1034 }
1035 return FALSE;
1036 }
1037
1038 void
task_act_iterate_wth_args(task_t task,void (* func_callback)(thread_t,void *),void * func_arg)1039 task_act_iterate_wth_args(
1040 task_t task,
1041 void (*func_callback)(thread_t, void *),
1042 void *func_arg)
1043 {
1044 thread_t inc;
1045
1046 task_lock(task);
1047
1048 for (inc = (thread_t)(void *)queue_first(&task->threads);
1049 !queue_end(&task->threads, (queue_entry_t)inc);) {
1050 (void) (*func_callback)(inc, func_arg);
1051 inc = (thread_t)(void *)queue_next(&inc->task_threads);
1052 }
1053
1054 task_unlock(task);
1055 }
1056
1057 #include <sys/bsdtask_info.h>
1058
1059 void
fill_taskprocinfo(task_t task,struct proc_taskinfo_internal * ptinfo)1060 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
1061 {
1062 vm_map_t map;
1063 task_absolutetime_info_data_t tinfo;
1064 thread_t thread;
1065 uint32_t cswitch = 0, numrunning = 0;
1066 uint32_t syscalls_unix = 0;
1067 uint32_t syscalls_mach = 0;
1068
1069 task_lock(task);
1070
1071 map = (task == kernel_task)? kernel_map: task->map;
1072
1073 ptinfo->pti_virtual_size = vm_map_adjusted_size(map);
1074 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &ptinfo->pti_resident_size);
1075
1076 ptinfo->pti_policy = ((task != kernel_task)?
1077 POLICY_TIMESHARE: POLICY_RR);
1078
1079 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1080 spl_t x;
1081
1082 if (thread->options & TH_OPT_IDLE_THREAD) {
1083 continue;
1084 }
1085
1086 x = splsched();
1087 thread_lock(thread);
1088
1089 if ((thread->state & TH_RUN) == TH_RUN) {
1090 numrunning++;
1091 }
1092 cswitch += thread->c_switch;
1093
1094 syscalls_unix += thread->syscalls_unix;
1095 syscalls_mach += thread->syscalls_mach;
1096
1097 thread_unlock(thread);
1098 splx(x);
1099 }
1100
1101 struct recount_times_mach term_times = recount_task_terminated_times(task);
1102 struct recount_times_mach total_times = recount_task_times(task);
1103
1104 tinfo.threads_user = total_times.rtm_user - term_times.rtm_user;
1105 tinfo.threads_system = total_times.rtm_system - term_times.rtm_system;
1106 ptinfo->pti_threads_system = tinfo.threads_system;
1107 ptinfo->pti_threads_user = tinfo.threads_user;
1108
1109 ptinfo->pti_total_system = total_times.rtm_system;
1110 ptinfo->pti_total_user = total_times.rtm_user;
1111
1112 ptinfo->pti_faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
1113 ptinfo->pti_pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
1114 ptinfo->pti_cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
1115 ptinfo->pti_messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
1116 ptinfo->pti_messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
1117 ptinfo->pti_syscalls_mach = (int32_t) MIN(task->syscalls_mach + syscalls_mach, INT32_MAX);
1118 ptinfo->pti_syscalls_unix = (int32_t) MIN(task->syscalls_unix + syscalls_unix, INT32_MAX);
1119 ptinfo->pti_csw = (int32_t) MIN(task->c_switch + cswitch, INT32_MAX);
1120 ptinfo->pti_threadnum = task->thread_count;
1121 ptinfo->pti_numrunning = numrunning;
1122 ptinfo->pti_priority = task->priority;
1123
1124 task_unlock(task);
1125 }
1126
1127 int
fill_taskthreadinfo(task_t task,uint64_t thaddr,bool thuniqueid,struct proc_threadinfo_internal * ptinfo,void * vpp,int * vidp)1128 fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
1129 {
1130 thread_t thact;
1131 int err = 0;
1132 mach_msg_type_number_t count;
1133 thread_basic_info_data_t basic_info;
1134 kern_return_t kret;
1135 uint64_t addr = 0;
1136
1137 task_lock(task);
1138
1139 for (thact = (thread_t)(void *)queue_first(&task->threads);
1140 !queue_end(&task->threads, (queue_entry_t)thact);) {
1141 addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1142 if (addr == thaddr) {
1143 count = THREAD_BASIC_INFO_COUNT;
1144 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
1145 err = 1;
1146 goto out;
1147 }
1148 ptinfo->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
1149 ptinfo->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
1150
1151 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
1152 ptinfo->pth_policy = basic_info.policy;
1153 ptinfo->pth_run_state = basic_info.run_state;
1154 ptinfo->pth_flags = basic_info.flags;
1155 ptinfo->pth_sleep_time = basic_info.sleep_time;
1156 ptinfo->pth_curpri = thact->sched_pri;
1157 ptinfo->pth_priority = thact->base_pri;
1158 ptinfo->pth_maxpriority = thact->max_priority;
1159
1160 if (vpp != NULL) {
1161 bsd_threadcdir(get_bsdthread_info(thact), vpp, vidp);
1162 }
1163 bsd_getthreadname(get_bsdthread_info(thact), ptinfo->pth_name);
1164 err = 0;
1165 goto out;
1166 }
1167 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1168 }
1169 err = 1;
1170
1171 out:
1172 task_unlock(task);
1173 return err;
1174 }
1175
1176 int
fill_taskthreadlist(task_t task,void * buffer,int thcount,bool thuniqueid)1177 fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid)
1178 {
1179 int numthr = 0;
1180 thread_t thact;
1181 uint64_t * uptr;
1182 uint64_t thaddr;
1183
1184 uptr = (uint64_t *)buffer;
1185
1186 task_lock(task);
1187
1188 for (thact = (thread_t)(void *)queue_first(&task->threads);
1189 !queue_end(&task->threads, (queue_entry_t)thact);) {
1190 thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1191 *uptr++ = thaddr;
1192 numthr++;
1193 if (numthr >= thcount) {
1194 goto out;
1195 }
1196 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1197 }
1198
1199 out:
1200 task_unlock(task);
1201 return (int)(numthr * sizeof(uint64_t));
1202 }
1203
1204 int
fill_taskthreadschedinfo(task_t task,uint64_t thread_id,struct proc_threadschedinfo_internal * thread_sched_info)1205 fill_taskthreadschedinfo(task_t task, uint64_t thread_id, struct proc_threadschedinfo_internal *thread_sched_info)
1206 {
1207 int err = 0;
1208
1209 thread_t thread = current_thread();
1210
1211 /*
1212 * Looking up threads is pretty expensive and not realtime-safe
1213 * right now, requiring locking the task and iterating over all
1214 * threads. As long as that is the case, we officially only
1215 * support getting this info for the current thread.
1216 */
1217 if (task != current_task() || thread_id != thread->thread_id) {
1218 return -1;
1219 }
1220
1221 #if SCHED_HYGIENE_DEBUG
1222 absolutetime_to_nanoseconds(thread->machine.int_time_mt, &thread_sched_info->int_time_ns);
1223 #else
1224 (void)thread;
1225 thread_sched_info->int_time_ns = 0;
1226 #endif
1227
1228 return err;
1229 }
1230
1231 int
get_numthreads(task_t task)1232 get_numthreads(task_t task)
1233 {
1234 return task->thread_count;
1235 }
1236
1237 /*
1238 * Gather the various pieces of info about the designated task,
1239 * and collect it all into a single rusage_info.
1240 */
1241 int
fill_task_rusage(task_t task,rusage_info_current * ri)1242 fill_task_rusage(task_t task, rusage_info_current *ri)
1243 {
1244 struct task_power_info powerinfo;
1245
1246 assert(task != TASK_NULL);
1247 task_lock(task);
1248
1249 struct task_power_info_extra extra = { 0 };
1250 task_power_info_locked(task, &powerinfo, NULL, NULL, &extra);
1251 ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
1252 ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
1253 ri->ri_user_time = powerinfo.total_user;
1254 ri->ri_system_time = powerinfo.total_system;
1255 ri->ri_runnable_time = extra.runnable_time;
1256 ri->ri_cycles = extra.cycles;
1257 ri->ri_instructions = extra.instructions;
1258 ri->ri_pcycles = extra.pcycles;
1259 ri->ri_pinstructions = extra.pinstructions;
1260 ri->ri_user_ptime = extra.user_ptime;
1261 ri->ri_system_ptime = extra.system_ptime;
1262 ri->ri_energy_nj = extra.energy;
1263 ri->ri_penergy_nj = extra.penergy;
1264 ri->ri_secure_time_in_system = extra.secure_time;
1265 ri->ri_secure_ptime_in_system = extra.secure_ptime;
1266
1267 ri->ri_phys_footprint = get_task_phys_footprint(task);
1268 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
1269 (ledger_amount_t *)&ri->ri_resident_size);
1270 ri->ri_wired_size = get_task_wired_mem(task);
1271
1272 ledger_get_balance(task->ledger, task_ledgers.neural_nofootprint_total,
1273 (ledger_amount_t *)&ri->ri_neural_footprint);
1274 ri->ri_pageins = counter_load(&task->pageins);
1275
1276 task_unlock(task);
1277 return 0;
1278 }
1279
1280 void
fill_task_billed_usage(task_t task __unused,rusage_info_current * ri)1281 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
1282 {
1283 bank_billed_balance_safe(task, &ri->ri_billed_system_time, &ri->ri_billed_energy);
1284 bank_serviced_balance_safe(task, &ri->ri_serviced_system_time, &ri->ri_serviced_energy);
1285 }
1286
1287 int
fill_task_io_rusage(task_t task,rusage_info_current * ri)1288 fill_task_io_rusage(task_t task, rusage_info_current *ri)
1289 {
1290 assert(task != TASK_NULL);
1291 task_lock(task);
1292
1293 if (task->task_io_stats) {
1294 ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
1295 ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1296 } else {
1297 /* I/O Stats unavailable */
1298 ri->ri_diskio_bytesread = 0;
1299 ri->ri_diskio_byteswritten = 0;
1300 }
1301 task_unlock(task);
1302 return 0;
1303 }
1304
1305 int
fill_task_qos_rusage(task_t task,rusage_info_current * ri)1306 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1307 {
1308 thread_t thread;
1309
1310 assert(task != TASK_NULL);
1311 task_lock(task);
1312
1313 /* Rollup QoS time of all the threads to task */
1314 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1315 if (thread->options & TH_OPT_IDLE_THREAD) {
1316 continue;
1317 }
1318
1319 thread_update_qos_cpu_time(thread);
1320 }
1321 ri->ri_cpu_time_qos_default = task->cpu_time_eqos_stats.cpu_time_qos_default;
1322 ri->ri_cpu_time_qos_maintenance = task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
1323 ri->ri_cpu_time_qos_background = task->cpu_time_eqos_stats.cpu_time_qos_background;
1324 ri->ri_cpu_time_qos_utility = task->cpu_time_eqos_stats.cpu_time_qos_utility;
1325 ri->ri_cpu_time_qos_legacy = task->cpu_time_eqos_stats.cpu_time_qos_legacy;
1326 ri->ri_cpu_time_qos_user_initiated = task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
1327 ri->ri_cpu_time_qos_user_interactive = task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
1328
1329 task_unlock(task);
1330 return 0;
1331 }
1332
1333 uint64_t
get_task_logical_writes(task_t task,bool external)1334 get_task_logical_writes(task_t task, bool external)
1335 {
1336 assert(task != TASK_NULL);
1337 struct ledger_entry_info lei;
1338 int entry = external ? task_ledgers.logical_writes_to_external :
1339 task_ledgers.logical_writes;
1340
1341 task_lock(task);
1342 ledger_get_entry_info(task->ledger, entry, &lei);
1343 task_unlock(task);
1344
1345 return lei.lei_balance;
1346 }
1347
1348 uint64_t
get_task_dispatchqueue_serialno_offset(task_t task)1349 get_task_dispatchqueue_serialno_offset(task_t task)
1350 {
1351 uint64_t dq_serialno_offset = 0;
1352 void *bsd_info = get_bsdtask_info(task);
1353
1354 if (bsd_info) {
1355 dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(bsd_info);
1356 }
1357
1358 return dq_serialno_offset;
1359 }
1360
1361 uint64_t
get_task_dispatchqueue_label_offset(task_t task)1362 get_task_dispatchqueue_label_offset(task_t task)
1363 {
1364 uint64_t dq_label_offset = 0;
1365 void *bsd_info = get_bsdtask_info(task);
1366
1367 if (bsd_info) {
1368 dq_label_offset = get_dispatchqueue_label_offset_from_proc(bsd_info);
1369 }
1370
1371 return dq_label_offset;
1372 }
1373
1374 uint64_t
get_task_uniqueid(task_t task)1375 get_task_uniqueid(task_t task)
1376 {
1377 void *bsd_info = get_bsdtask_info(task);
1378
1379 if (bsd_info) {
1380 return proc_uniqueid_task(bsd_info, task);
1381 } else {
1382 return UINT64_MAX;
1383 }
1384 }
1385
1386 int
get_task_version(task_t task)1387 get_task_version(task_t task)
1388 {
1389 void *bsd_info = get_bsdtask_info(task);
1390
1391 if (bsd_info) {
1392 return proc_pidversion(bsd_info);
1393 } else {
1394 return INT_MAX;
1395 }
1396 }
1397
1398 #if CONFIG_MACF
1399 struct label *
get_task_crash_label(task_t task)1400 get_task_crash_label(task_t task)
1401 {
1402 return task->crash_label;
1403 }
1404
1405 void
set_task_crash_label(task_t task,struct label * label)1406 set_task_crash_label(task_t task, struct label *label)
1407 {
1408 task->crash_label = label;
1409 }
1410 #endif
1411
1412 int
fill_taskipctableinfo(task_t task,uint32_t * table_size,uint32_t * table_free)1413 fill_taskipctableinfo(task_t task, uint32_t *table_size, uint32_t *table_free)
1414 {
1415 ipc_space_t space = task->itk_space;
1416 if (space == NULL) {
1417 return -1;
1418 }
1419
1420 is_read_lock(space);
1421 if (!is_active(space)) {
1422 is_read_unlock(space);
1423 return -1;
1424 }
1425
1426 *table_size = ipc_entry_table_count(is_active_table(space));
1427 *table_free = space->is_table_free;
1428
1429 is_read_unlock(space);
1430
1431 return 0;
1432 }
1433
1434 int
get_task_cdhash(task_t task,char cdhash[static CS_CDHASH_LEN])1435 get_task_cdhash(task_t task, char cdhash[static CS_CDHASH_LEN])
1436 {
1437 int result = 0;
1438 void *bsd_info = NULL;
1439
1440 task_lock(task);
1441 bsd_info = get_bsdtask_info(task);
1442 result = bsd_info ? proc_getcdhash(bsd_info, cdhash) : ESRCH;
1443 task_unlock(task);
1444
1445 return result;
1446 }
1447
1448 bool
current_thread_in_kernel_fault(void)1449 current_thread_in_kernel_fault(void)
1450 {
1451 if (current_thread()->recover) {
1452 return true;
1453 }
1454 return false;
1455 }
1456
1457 /* moved from ubc_subr.c */
1458 int
mach_to_bsd_errno(kern_return_t mach_err)1459 mach_to_bsd_errno(kern_return_t mach_err)
1460 {
1461 switch (mach_err) {
1462 case KERN_SUCCESS:
1463 return 0;
1464
1465 case KERN_INVALID_ADDRESS:
1466 case KERN_INVALID_ARGUMENT:
1467 case KERN_NOT_IN_SET:
1468 case KERN_INVALID_NAME:
1469 case KERN_INVALID_TASK:
1470 case KERN_INVALID_RIGHT:
1471 case KERN_INVALID_VALUE:
1472 case KERN_INVALID_CAPABILITY:
1473 case KERN_INVALID_HOST:
1474 case KERN_MEMORY_PRESENT:
1475 case KERN_INVALID_PROCESSOR_SET:
1476 case KERN_INVALID_POLICY:
1477 case KERN_ALREADY_WAITING:
1478 case KERN_DEFAULT_SET:
1479 case KERN_EXCEPTION_PROTECTED:
1480 case KERN_INVALID_LEDGER:
1481 case KERN_INVALID_MEMORY_CONTROL:
1482 case KERN_INVALID_SECURITY:
1483 case KERN_NOT_DEPRESSED:
1484 case KERN_LOCK_OWNED:
1485 case KERN_LOCK_OWNED_SELF:
1486 return EINVAL;
1487
1488 case KERN_NOT_RECEIVER:
1489 case KERN_NO_ACCESS:
1490 case KERN_POLICY_STATIC:
1491 return EACCES;
1492
1493 case KERN_NO_SPACE:
1494 case KERN_RESOURCE_SHORTAGE:
1495 case KERN_UREFS_OVERFLOW:
1496 case KERN_INVALID_OBJECT:
1497 return ENOMEM;
1498
1499 case KERN_MEMORY_FAILURE:
1500 case KERN_MEMORY_ERROR:
1501 case KERN_PROTECTION_FAILURE:
1502 return EFAULT;
1503
1504 case KERN_POLICY_LIMIT:
1505 case KERN_CODESIGN_ERROR:
1506 case KERN_DENIED:
1507 return EPERM;
1508
1509 case KERN_ALREADY_IN_SET:
1510 case KERN_NAME_EXISTS:
1511 case KERN_RIGHT_EXISTS:
1512 return EEXIST;
1513
1514 case KERN_ABORTED:
1515 return EINTR;
1516
1517 case KERN_TERMINATED:
1518 case KERN_LOCK_SET_DESTROYED:
1519 case KERN_LOCK_UNSTABLE:
1520 case KERN_SEMAPHORE_DESTROYED:
1521 case KERN_NOT_FOUND:
1522 case KERN_NOT_WAITING:
1523 return ENOENT;
1524
1525 case KERN_RPC_SERVER_TERMINATED:
1526 return ECONNRESET;
1527
1528 case KERN_NOT_SUPPORTED:
1529 return ENOTSUP;
1530
1531 case KERN_NODE_DOWN:
1532 return ENETDOWN;
1533
1534 case KERN_OPERATION_TIMED_OUT:
1535 return ETIMEDOUT;
1536
1537 default:
1538 return EIO; /* 5 == KERN_FAILURE */
1539 }
1540 }
1541
1542 /*
1543 * Return the mach return value corresponding to a given BSD errno.
1544 */
1545 kern_return_t
kern_return_for_errno(int bsd_errno)1546 kern_return_for_errno(int bsd_errno)
1547 {
1548 switch (bsd_errno) {
1549 case 0:
1550 return KERN_SUCCESS;
1551 case EIO:
1552 case EACCES:
1553 case ENOMEM:
1554 case EFAULT:
1555 return KERN_MEMORY_ERROR;
1556
1557 case EINVAL:
1558 return KERN_INVALID_ARGUMENT;
1559
1560 case ETIMEDOUT:
1561 case EBUSY:
1562 return KERN_OPERATION_TIMED_OUT;
1563
1564 case ECONNRESET:
1565 return KERN_RPC_SERVER_TERMINATED;
1566
1567 case ENOTSUP:
1568 return KERN_NOT_SUPPORTED;
1569
1570 case ENETDOWN:
1571 return KERN_NODE_DOWN;
1572
1573 case ENOENT:
1574 return KERN_NOT_FOUND;
1575
1576 case EINTR:
1577 return KERN_ABORTED;
1578
1579 case EPERM:
1580 return KERN_DENIED;
1581
1582 case EEXIST:
1583 return KERN_ALREADY_IN_SET;
1584
1585 default:
1586 return KERN_FAILURE;
1587 }
1588 }
1589