1 /*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/spl.h>
38 #include <kern/ast.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43 #include <vm/pmap.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
46 #include <sys/signal.h>
47 #include <sys/errno.h>
48 #include <sys/proc_require.h>
49
50 #if MONOTONIC
51 #include <kern/monotonic.h>
52 #include <machine/monotonic.h>
53 #endif /* MONOTONIC */
54
55 #include <machine/limits.h>
56 #include <sys/codesign.h> /* CS_CDHASH_LEN */
57
58 #undef thread_should_halt
59
60 /* BSD KERN COMPONENT INTERFACE */
61
62 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
63
64 thread_t get_firstthread(task_t);
65 int get_task_userstop(task_t);
66 int get_thread_userstop(thread_t);
67 boolean_t current_thread_aborted(void);
68 void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
69 kern_return_t get_signalact(task_t, thread_t *, int);
70 int fill_task_rusage(task_t task, rusage_info_current *ri);
71 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
72 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
73 uint64_t get_task_logical_writes(task_t task, bool external);
74 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
75 void task_bsdtask_kill(task_t);
76
77 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
78 extern uint64_t get_dispatchqueue_label_offset_from_proc(void *p);
79 extern uint64_t proc_uniqueid_task(void *p, void *t);
80 extern int proc_pidversion(void *p);
81 extern int proc_getcdhash(void *p, char *cdhash);
82
83 int mach_to_bsd_errno(kern_return_t mach_err);
84 kern_return_t bsd_to_mach_failure(int bsd_err);
85
86 #if MACH_BSD
87 extern void psignal(void *, int);
88 #endif
89
90 /*
91 *
92 */
93 void *
get_bsdtask_info(task_t t)94 get_bsdtask_info(task_t t)
95 {
96 void *proc_from_task = task_get_proc_raw(t);
97 proc_require(proc_from_task, PROC_REQUIRE_ALLOW_NULL | PROC_REQUIRE_ALLOW_ALL);
98 return task_has_proc(t) ? proc_from_task : NULL;
99 }
100
101 void
task_bsdtask_kill(task_t t)102 task_bsdtask_kill(task_t t)
103 {
104 void * bsd_info = get_bsdtask_info(t);
105 if (bsd_info != NULL) {
106 psignal(bsd_info, SIGKILL);
107 }
108 }
109 /*
110 *
111 */
112 void *
get_bsdthreadtask_info(thread_t th)113 get_bsdthreadtask_info(thread_t th)
114 {
115 return get_thread_ro(th)->tro_proc;
116 }
117
118 /*
119 *
120 */
121 void
set_bsdtask_info(task_t t,void * v)122 set_bsdtask_info(task_t t, void * v)
123 {
124 void *proc_from_task = task_get_proc_raw(t);
125 if (v == NULL) {
126 task_clear_has_proc(t);
127 } else {
128 if (v != proc_from_task) {
129 panic("set_bsdtask_info trying to set random bsd_info %p", v);
130 }
131 task_set_has_proc(t);
132 }
133 }
134
135 __abortlike
136 static void
__thread_ro_circularity_panic(thread_t th,thread_ro_t tro)137 __thread_ro_circularity_panic(thread_t th, thread_ro_t tro)
138 {
139 panic("tro %p points back to %p instead of %p", tro, tro->tro_owner, th);
140 }
141
142 __attribute__((always_inline))
143 thread_ro_t
get_thread_ro_unchecked(thread_t th)144 get_thread_ro_unchecked(thread_t th)
145 {
146 return th->t_tro;
147 }
148
149 thread_ro_t
get_thread_ro(thread_t th)150 get_thread_ro(thread_t th)
151 {
152 thread_ro_t tro = th->t_tro;
153
154 zone_require_ro(ZONE_ID_THREAD_RO, sizeof(struct thread_ro), tro);
155 if (tro->tro_owner != th) {
156 __thread_ro_circularity_panic(th, tro);
157 }
158 return tro;
159 }
160
161 __attribute__((always_inline))
162 thread_ro_t
current_thread_ro_unchecked(void)163 current_thread_ro_unchecked(void)
164 {
165 return get_thread_ro_unchecked(current_thread());
166 }
167
168 thread_ro_t
current_thread_ro(void)169 current_thread_ro(void)
170 {
171 return get_thread_ro(current_thread());
172 }
173
174 void
clear_thread_ro_proc(thread_t th)175 clear_thread_ro_proc(thread_t th)
176 {
177 thread_ro_t tro = get_thread_ro(th);
178
179 zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_proc);
180 }
181
182 struct uthread *
get_bsdthread_info(thread_t th)183 get_bsdthread_info(thread_t th)
184 {
185 return (struct uthread *)((uintptr_t)th + sizeof(struct thread));
186 }
187
188 thread_t
get_machthread(struct uthread * uth)189 get_machthread(struct uthread *uth)
190 {
191 return (struct thread *)((uintptr_t)uth - sizeof(struct thread));
192 }
193
194 /*
195 * This is used to remember any FS error from VNOP_PAGEIN code when
196 * invoked under vm_fault(). The value is an errno style value. It can
197 * be retrieved by exception handlers using thread_get_state().
198 */
199 void
set_thread_pagein_error(thread_t th,int error)200 set_thread_pagein_error(thread_t th, int error)
201 {
202 assert(th == current_thread());
203 if (error == 0 || th->t_pagein_error == 0) {
204 th->t_pagein_error = error;
205 }
206 }
207
208 #if defined(__x86_64__)
209 /*
210 * Returns non-zero if the thread has a non-NULL task
211 * and that task has an LDT.
212 */
213 int
thread_task_has_ldt(thread_t th)214 thread_task_has_ldt(thread_t th)
215 {
216 task_t task = get_threadtask(th);
217 return task && task->i386_ldt != 0;
218 }
219 #endif /* __x86_64__ */
220
221 /*
222 * XXX
223 */
224 int get_thread_lock_count(thread_t th); /* forced forward */
225 int
get_thread_lock_count(thread_t th)226 get_thread_lock_count(thread_t th)
227 {
228 return th->mutex_count;
229 }
230
231 /*
232 * XXX: wait for BSD to fix signal code
233 * Until then, we cannot block here. We know the task
234 * can't go away, so we make sure it is still active after
235 * retrieving the first thread for extra safety.
236 */
237 thread_t
get_firstthread(task_t task)238 get_firstthread(task_t task)
239 {
240 thread_t thread = (thread_t)(void *)queue_first(&task->threads);
241
242 if (queue_end(&task->threads, (queue_entry_t)thread)) {
243 thread = THREAD_NULL;
244 }
245
246 if (!task->active) {
247 return THREAD_NULL;
248 }
249
250 return thread;
251 }
252
253 kern_return_t
get_signalact(task_t task,thread_t * result_out,int setast)254 get_signalact(
255 task_t task,
256 thread_t *result_out,
257 int setast)
258 {
259 kern_return_t result = KERN_SUCCESS;
260 thread_t inc, thread = THREAD_NULL;
261
262 task_lock(task);
263
264 if (!task->active) {
265 task_unlock(task);
266
267 return KERN_FAILURE;
268 }
269
270 for (inc = (thread_t)(void *)queue_first(&task->threads);
271 !queue_end(&task->threads, (queue_entry_t)inc);) {
272 thread_mtx_lock(inc);
273 if (inc->active &&
274 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
275 thread = inc;
276 break;
277 }
278 thread_mtx_unlock(inc);
279
280 inc = (thread_t)(void *)queue_next(&inc->task_threads);
281 }
282
283 if (result_out) {
284 *result_out = thread;
285 }
286
287 if (thread) {
288 if (setast) {
289 act_set_astbsd(thread);
290 }
291
292 thread_mtx_unlock(thread);
293 } else {
294 result = KERN_FAILURE;
295 }
296
297 task_unlock(task);
298
299 return result;
300 }
301
302
303 kern_return_t
check_actforsig(task_t task,thread_t thread,int setast)304 check_actforsig(
305 task_t task,
306 thread_t thread,
307 int setast)
308 {
309 kern_return_t result = KERN_FAILURE;
310 thread_t inc;
311
312 task_lock(task);
313
314 if (!task->active) {
315 task_unlock(task);
316
317 return KERN_FAILURE;
318 }
319
320 for (inc = (thread_t)(void *)queue_first(&task->threads);
321 !queue_end(&task->threads, (queue_entry_t)inc);) {
322 if (inc == thread) {
323 thread_mtx_lock(inc);
324
325 if (inc->active &&
326 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
327 result = KERN_SUCCESS;
328 break;
329 }
330
331 thread_mtx_unlock(inc);
332 break;
333 }
334
335 inc = (thread_t)(void *)queue_next(&inc->task_threads);
336 }
337
338 if (result == KERN_SUCCESS) {
339 if (setast) {
340 act_set_astbsd(thread);
341 }
342
343 thread_mtx_unlock(thread);
344 }
345
346 task_unlock(task);
347
348 return result;
349 }
350
351 ledger_t
get_task_ledger(task_t t)352 get_task_ledger(task_t t)
353 {
354 return t->ledger;
355 }
356
357 /*
358 * This is only safe to call from a thread executing in
359 * in the task's context or if the task is locked. Otherwise,
360 * the map could be switched for the task (and freed) before
361 * we go to return it here.
362 */
363 vm_map_t
get_task_map(task_t t)364 get_task_map(task_t t)
365 {
366 return t->map;
367 }
368
369 vm_map_t
get_task_map_reference(task_t t)370 get_task_map_reference(task_t t)
371 {
372 vm_map_t m;
373
374 if (t == NULL) {
375 return VM_MAP_NULL;
376 }
377
378 task_lock(t);
379 if (!t->active) {
380 task_unlock(t);
381 return VM_MAP_NULL;
382 }
383 m = t->map;
384 vm_map_reference(m);
385 task_unlock(t);
386 return m;
387 }
388
389 /*
390 *
391 */
392 ipc_space_t
get_task_ipcspace(task_t t)393 get_task_ipcspace(task_t t)
394 {
395 return t->itk_space;
396 }
397
398 int
get_task_numacts(task_t t)399 get_task_numacts(task_t t)
400 {
401 return t->thread_count;
402 }
403
404 /* does this machine need 64bit register set for signal handler */
405 int
is_64signalregset(void)406 is_64signalregset(void)
407 {
408 if (task_has_64Bit_data(current_task())) {
409 return 1;
410 }
411
412 return 0;
413 }
414
415 /*
416 * Swap in a new map for the task/thread pair; the old map reference is
417 * returned. Also does a pmap switch if thread provided is current thread.
418 */
419 vm_map_t
swap_task_map(task_t task,thread_t thread,vm_map_t map)420 swap_task_map(task_t task, thread_t thread, vm_map_t map)
421 {
422 vm_map_t old_map;
423 boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
424
425 if (task != get_threadtask(thread)) {
426 panic("swap_task_map");
427 }
428
429 task_lock(task);
430 mp_disable_preemption();
431
432 old_map = task->map;
433 thread->map = task->map = map;
434 vm_commit_pagezero_status(map);
435
436 if (doswitch) {
437 PMAP_SWITCH_USER(thread, map, cpu_number());
438 }
439 mp_enable_preemption();
440 task_unlock(task);
441
442 return old_map;
443 }
444
445 /*
446 *
447 * This is only safe to call from a thread executing in
448 * in the task's context or if the task is locked. Otherwise,
449 * the map could be switched for the task (and freed) before
450 * we go to return it here.
451 */
452 pmap_t
get_task_pmap(task_t t)453 get_task_pmap(task_t t)
454 {
455 return t->map->pmap;
456 }
457
458 /*
459 *
460 */
461 uint64_t
get_task_resident_size(task_t task)462 get_task_resident_size(task_t task)
463 {
464 uint64_t val;
465
466 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
467 return val;
468 }
469
470 uint64_t
get_task_compressed(task_t task)471 get_task_compressed(task_t task)
472 {
473 uint64_t val;
474
475 ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t *) &val);
476 return val;
477 }
478
479 uint64_t
get_task_resident_max(task_t task)480 get_task_resident_max(task_t task)
481 {
482 uint64_t val;
483
484 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
485 return val;
486 }
487
488 /*
489 * Get the balance for a given field in the task ledger.
490 * Returns 0 if the entry is invalid.
491 */
492 static uint64_t
get_task_ledger_balance(task_t task,int entry)493 get_task_ledger_balance(task_t task, int entry)
494 {
495 ledger_amount_t balance = 0;
496
497 ledger_get_balance(task->ledger, entry, &balance);
498 return balance;
499 }
500
501 uint64_t
get_task_purgeable_size(task_t task)502 get_task_purgeable_size(task_t task)
503 {
504 kern_return_t ret;
505 ledger_amount_t balance = 0;
506 uint64_t volatile_size = 0;
507
508 ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile, &balance);
509 if (ret != KERN_SUCCESS) {
510 return 0;
511 }
512
513 volatile_size += balance;
514
515 ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile_compressed, &balance);
516 if (ret != KERN_SUCCESS) {
517 return 0;
518 }
519
520 volatile_size += balance;
521
522 return volatile_size;
523 }
524
525 /*
526 *
527 */
528 uint64_t
get_task_phys_footprint(task_t task)529 get_task_phys_footprint(task_t task)
530 {
531 return get_task_ledger_balance(task, task_ledgers.phys_footprint);
532 }
533
534 #if CONFIG_LEDGER_INTERVAL_MAX
535 /*
536 *
537 */
538 uint64_t
get_task_phys_footprint_interval_max(task_t task,int reset)539 get_task_phys_footprint_interval_max(task_t task, int reset)
540 {
541 kern_return_t ret;
542 ledger_amount_t max;
543
544 ret = ledger_get_interval_max(task->ledger, task_ledgers.phys_footprint, &max, reset);
545
546 if (KERN_SUCCESS == ret) {
547 return max;
548 }
549
550 return 0;
551 }
552 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
553
554 /*
555 *
556 */
557 uint64_t
get_task_phys_footprint_lifetime_max(task_t task)558 get_task_phys_footprint_lifetime_max(task_t task)
559 {
560 kern_return_t ret;
561 ledger_amount_t max;
562
563 ret = ledger_get_lifetime_max(task->ledger, task_ledgers.phys_footprint, &max);
564
565 if (KERN_SUCCESS == ret) {
566 return max;
567 }
568
569 return 0;
570 }
571
572 /*
573 *
574 */
575 uint64_t
get_task_phys_footprint_limit(task_t task)576 get_task_phys_footprint_limit(task_t task)
577 {
578 kern_return_t ret;
579 ledger_amount_t max;
580
581 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max);
582 if (KERN_SUCCESS == ret) {
583 return max;
584 }
585
586 return 0;
587 }
588
589 uint64_t
get_task_internal(task_t task)590 get_task_internal(task_t task)
591 {
592 return get_task_ledger_balance(task, task_ledgers.internal);
593 }
594
595 uint64_t
get_task_internal_compressed(task_t task)596 get_task_internal_compressed(task_t task)
597 {
598 return get_task_ledger_balance(task, task_ledgers.internal_compressed);
599 }
600
601 uint64_t
get_task_purgeable_nonvolatile(task_t task)602 get_task_purgeable_nonvolatile(task_t task)
603 {
604 return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile);
605 }
606
607 uint64_t
get_task_purgeable_nonvolatile_compressed(task_t task)608 get_task_purgeable_nonvolatile_compressed(task_t task)
609 {
610 return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile_compressed);
611 }
612
613 uint64_t
get_task_alternate_accounting(task_t task)614 get_task_alternate_accounting(task_t task)
615 {
616 return get_task_ledger_balance(task, task_ledgers.alternate_accounting);
617 }
618
619 uint64_t
get_task_alternate_accounting_compressed(task_t task)620 get_task_alternate_accounting_compressed(task_t task)
621 {
622 return get_task_ledger_balance(task, task_ledgers.alternate_accounting_compressed);
623 }
624
625 uint64_t
get_task_page_table(task_t task)626 get_task_page_table(task_t task)
627 {
628 return get_task_ledger_balance(task, task_ledgers.page_table);
629 }
630
631 #if CONFIG_FREEZE
632 uint64_t
get_task_frozen_to_swap(task_t task)633 get_task_frozen_to_swap(task_t task)
634 {
635 return get_task_ledger_balance(task, task_ledgers.frozen_to_swap);
636 }
637 #endif /* CONFIG_FREEZE */
638
639 uint64_t
get_task_iokit_mapped(task_t task)640 get_task_iokit_mapped(task_t task)
641 {
642 return get_task_ledger_balance(task, task_ledgers.iokit_mapped);
643 }
644
645 uint64_t
get_task_network_nonvolatile(task_t task)646 get_task_network_nonvolatile(task_t task)
647 {
648 return get_task_ledger_balance(task, task_ledgers.network_nonvolatile);
649 }
650
651 uint64_t
get_task_network_nonvolatile_compressed(task_t task)652 get_task_network_nonvolatile_compressed(task_t task)
653 {
654 return get_task_ledger_balance(task, task_ledgers.network_nonvolatile_compressed);
655 }
656
657 uint64_t
get_task_wired_mem(task_t task)658 get_task_wired_mem(task_t task)
659 {
660 return get_task_ledger_balance(task, task_ledgers.wired_mem);
661 }
662
663 uint64_t
get_task_tagged_footprint(task_t task)664 get_task_tagged_footprint(task_t task)
665 {
666 kern_return_t ret;
667 ledger_amount_t credit, debit;
668
669 ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint, &credit, &debit);
670 if (KERN_SUCCESS == ret) {
671 return credit - debit;
672 }
673
674 return 0;
675 }
676
677 uint64_t
get_task_tagged_footprint_compressed(task_t task)678 get_task_tagged_footprint_compressed(task_t task)
679 {
680 kern_return_t ret;
681 ledger_amount_t credit, debit;
682
683 ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint_compressed, &credit, &debit);
684 if (KERN_SUCCESS == ret) {
685 return credit - debit;
686 }
687
688 return 0;
689 }
690
691 uint64_t
get_task_media_footprint(task_t task)692 get_task_media_footprint(task_t task)
693 {
694 kern_return_t ret;
695 ledger_amount_t credit, debit;
696
697 ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint, &credit, &debit);
698 if (KERN_SUCCESS == ret) {
699 return credit - debit;
700 }
701
702 return 0;
703 }
704
705 uint64_t
get_task_media_footprint_compressed(task_t task)706 get_task_media_footprint_compressed(task_t task)
707 {
708 kern_return_t ret;
709 ledger_amount_t credit, debit;
710
711 ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint_compressed, &credit, &debit);
712 if (KERN_SUCCESS == ret) {
713 return credit - debit;
714 }
715
716 return 0;
717 }
718
719 uint64_t
get_task_graphics_footprint(task_t task)720 get_task_graphics_footprint(task_t task)
721 {
722 kern_return_t ret;
723 ledger_amount_t credit, debit;
724
725 ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint, &credit, &debit);
726 if (KERN_SUCCESS == ret) {
727 return credit - debit;
728 }
729
730 return 0;
731 }
732
733
734 uint64_t
get_task_graphics_footprint_compressed(task_t task)735 get_task_graphics_footprint_compressed(task_t task)
736 {
737 kern_return_t ret;
738 ledger_amount_t credit, debit;
739
740 ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint_compressed, &credit, &debit);
741 if (KERN_SUCCESS == ret) {
742 return credit - debit;
743 }
744
745 return 0;
746 }
747
748 uint64_t
get_task_neural_footprint(task_t task)749 get_task_neural_footprint(task_t task)
750 {
751 kern_return_t ret;
752 ledger_amount_t credit, debit;
753
754 ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint, &credit, &debit);
755 if (KERN_SUCCESS == ret) {
756 return credit - debit;
757 }
758
759 return 0;
760 }
761
762 uint64_t
get_task_neural_footprint_compressed(task_t task)763 get_task_neural_footprint_compressed(task_t task)
764 {
765 kern_return_t ret;
766 ledger_amount_t credit, debit;
767
768 ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint_compressed, &credit, &debit);
769 if (KERN_SUCCESS == ret) {
770 return credit - debit;
771 }
772
773 return 0;
774 }
775
776 uint64_t
get_task_cpu_time(task_t task)777 get_task_cpu_time(task_t task)
778 {
779 return get_task_ledger_balance(task, task_ledgers.cpu_time);
780 }
781
782 uint32_t
get_task_loadTag(task_t task)783 get_task_loadTag(task_t task)
784 {
785 return os_atomic_load(&task->loadTag, relaxed);
786 }
787
788 uint32_t
set_task_loadTag(task_t task,uint32_t loadTag)789 set_task_loadTag(task_t task, uint32_t loadTag)
790 {
791 return os_atomic_xchg(&task->loadTag, loadTag, relaxed);
792 }
793
794
795 task_t
get_threadtask(thread_t th)796 get_threadtask(thread_t th)
797 {
798 return get_thread_ro(th)->tro_task;
799 }
800
801 task_t
get_threadtask_early(thread_t th)802 get_threadtask_early(thread_t th)
803 {
804 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
805 if (th == THREAD_NULL || th->t_tro == NULL) {
806 return TASK_NULL;
807 }
808 }
809 return get_threadtask(th);
810 }
811
812 /*
813 *
814 */
815 vm_map_offset_t
get_map_min(vm_map_t map)816 get_map_min(
817 vm_map_t map)
818 {
819 return vm_map_min(map);
820 }
821
822 /*
823 *
824 */
825 vm_map_offset_t
get_map_max(vm_map_t map)826 get_map_max(
827 vm_map_t map)
828 {
829 return vm_map_max(map);
830 }
831 vm_map_size_t
get_vmmap_size(vm_map_t map)832 get_vmmap_size(
833 vm_map_t map)
834 {
835 return vm_map_adjusted_size(map);
836 }
837 int
get_task_page_size(task_t task)838 get_task_page_size(
839 task_t task)
840 {
841 return vm_map_page_size(task->map);
842 }
843
844 #if CONFIG_COREDUMP
845
846 static int
get_vmsubmap_entries(vm_map_t map,vm_object_offset_t start,vm_object_offset_t end)847 get_vmsubmap_entries(
848 vm_map_t map,
849 vm_object_offset_t start,
850 vm_object_offset_t end)
851 {
852 int total_entries = 0;
853 vm_map_entry_t entry;
854
855 if (not_in_kdp) {
856 vm_map_lock(map);
857 }
858 entry = vm_map_first_entry(map);
859 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
860 entry = entry->vme_next;
861 }
862
863 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
864 if (entry->is_sub_map) {
865 total_entries +=
866 get_vmsubmap_entries(VME_SUBMAP(entry),
867 VME_OFFSET(entry),
868 (VME_OFFSET(entry) +
869 entry->vme_end -
870 entry->vme_start));
871 } else {
872 total_entries += 1;
873 }
874 entry = entry->vme_next;
875 }
876 if (not_in_kdp) {
877 vm_map_unlock(map);
878 }
879 return total_entries;
880 }
881
882 int
get_vmmap_entries(vm_map_t map)883 get_vmmap_entries(
884 vm_map_t map)
885 {
886 int total_entries = 0;
887 vm_map_entry_t entry;
888
889 if (not_in_kdp) {
890 vm_map_lock(map);
891 }
892 entry = vm_map_first_entry(map);
893
894 while (entry != vm_map_to_entry(map)) {
895 if (entry->is_sub_map) {
896 total_entries +=
897 get_vmsubmap_entries(VME_SUBMAP(entry),
898 VME_OFFSET(entry),
899 (VME_OFFSET(entry) +
900 entry->vme_end -
901 entry->vme_start));
902 } else {
903 total_entries += 1;
904 }
905 entry = entry->vme_next;
906 }
907 if (not_in_kdp) {
908 vm_map_unlock(map);
909 }
910 return total_entries;
911 }
912 #endif /* CONFIG_COREDUMP */
913
914 int
get_task_userstop(task_t task)915 get_task_userstop(
916 task_t task)
917 {
918 return task->user_stop_count;
919 }
920
921 int
get_thread_userstop(thread_t th)922 get_thread_userstop(
923 thread_t th)
924 {
925 return th->user_stop_count;
926 }
927
928 boolean_t
get_task_pidsuspended(task_t task)929 get_task_pidsuspended(
930 task_t task)
931 {
932 return task->pidsuspended;
933 }
934
935 boolean_t
get_task_frozen(task_t task)936 get_task_frozen(
937 task_t task)
938 {
939 return task->frozen;
940 }
941
942 boolean_t
thread_should_abort(thread_t th)943 thread_should_abort(
944 thread_t th)
945 {
946 return (th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT;
947 }
948
949 /*
950 * This routine is like thread_should_abort() above. It checks to
951 * see if the current thread is aborted. But unlike above, it also
952 * checks to see if thread is safely aborted. If so, it returns
953 * that fact, and clears the condition (safe aborts only should
954 * have a single effect, and a poll of the abort status
955 * qualifies.
956 */
957 boolean_t
current_thread_aborted(void)958 current_thread_aborted(
959 void)
960 {
961 thread_t th = current_thread();
962 spl_t s;
963
964 if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
965 (th->options & TH_OPT_INTMASK) != THREAD_UNINT) {
966 return TRUE;
967 }
968 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
969 s = splsched();
970 thread_lock(th);
971 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
972 th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
973 }
974 thread_unlock(th);
975 splx(s);
976 }
977 return FALSE;
978 }
979
980 void
task_act_iterate_wth_args(task_t task,void (* func_callback)(thread_t,void *),void * func_arg)981 task_act_iterate_wth_args(
982 task_t task,
983 void (*func_callback)(thread_t, void *),
984 void *func_arg)
985 {
986 thread_t inc;
987
988 task_lock(task);
989
990 for (inc = (thread_t)(void *)queue_first(&task->threads);
991 !queue_end(&task->threads, (queue_entry_t)inc);) {
992 (void) (*func_callback)(inc, func_arg);
993 inc = (thread_t)(void *)queue_next(&inc->task_threads);
994 }
995
996 task_unlock(task);
997 }
998
999 #include <sys/bsdtask_info.h>
1000
1001 void
fill_taskprocinfo(task_t task,struct proc_taskinfo_internal * ptinfo)1002 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
1003 {
1004 vm_map_t map;
1005 task_absolutetime_info_data_t tinfo;
1006 thread_t thread;
1007 uint32_t cswitch = 0, numrunning = 0;
1008 uint32_t syscalls_unix = 0;
1009 uint32_t syscalls_mach = 0;
1010
1011 task_lock(task);
1012
1013 map = (task == kernel_task)? kernel_map: task->map;
1014
1015 ptinfo->pti_virtual_size = vm_map_adjusted_size(map);
1016 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &ptinfo->pti_resident_size);
1017
1018 ptinfo->pti_policy = ((task != kernel_task)?
1019 POLICY_TIMESHARE: POLICY_RR);
1020
1021 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1022 spl_t x;
1023
1024 if (thread->options & TH_OPT_IDLE_THREAD) {
1025 continue;
1026 }
1027
1028 x = splsched();
1029 thread_lock(thread);
1030
1031 if ((thread->state & TH_RUN) == TH_RUN) {
1032 numrunning++;
1033 }
1034 cswitch += thread->c_switch;
1035
1036 syscalls_unix += thread->syscalls_unix;
1037 syscalls_mach += thread->syscalls_mach;
1038
1039 thread_unlock(thread);
1040 splx(x);
1041 }
1042
1043 struct recount_times_mach term_times = recount_task_terminated_times(task);
1044 struct recount_times_mach total_times = recount_task_times(task);
1045
1046 tinfo.threads_user = total_times.rtm_user - term_times.rtm_user;
1047 tinfo.threads_system = total_times.rtm_system - term_times.rtm_system;
1048 ptinfo->pti_threads_system = tinfo.threads_system;
1049 ptinfo->pti_threads_user = tinfo.threads_user;
1050
1051 ptinfo->pti_total_system = total_times.rtm_system;
1052 ptinfo->pti_total_user = total_times.rtm_user;
1053
1054 ptinfo->pti_faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
1055 ptinfo->pti_pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
1056 ptinfo->pti_cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
1057 ptinfo->pti_messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
1058 ptinfo->pti_messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
1059 ptinfo->pti_syscalls_mach = (int32_t) MIN(task->syscalls_mach + syscalls_mach, INT32_MAX);
1060 ptinfo->pti_syscalls_unix = (int32_t) MIN(task->syscalls_unix + syscalls_unix, INT32_MAX);
1061 ptinfo->pti_csw = (int32_t) MIN(task->c_switch + cswitch, INT32_MAX);
1062 ptinfo->pti_threadnum = task->thread_count;
1063 ptinfo->pti_numrunning = numrunning;
1064 ptinfo->pti_priority = task->priority;
1065
1066 task_unlock(task);
1067 }
1068
1069 int
fill_taskthreadinfo(task_t task,uint64_t thaddr,bool thuniqueid,struct proc_threadinfo_internal * ptinfo,void * vpp,int * vidp)1070 fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
1071 {
1072 thread_t thact;
1073 int err = 0;
1074 mach_msg_type_number_t count;
1075 thread_basic_info_data_t basic_info;
1076 kern_return_t kret;
1077 uint64_t addr = 0;
1078
1079 task_lock(task);
1080
1081 for (thact = (thread_t)(void *)queue_first(&task->threads);
1082 !queue_end(&task->threads, (queue_entry_t)thact);) {
1083 addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1084 if (addr == thaddr) {
1085 count = THREAD_BASIC_INFO_COUNT;
1086 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
1087 err = 1;
1088 goto out;
1089 }
1090 ptinfo->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
1091 ptinfo->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
1092
1093 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
1094 ptinfo->pth_policy = basic_info.policy;
1095 ptinfo->pth_run_state = basic_info.run_state;
1096 ptinfo->pth_flags = basic_info.flags;
1097 ptinfo->pth_sleep_time = basic_info.sleep_time;
1098 ptinfo->pth_curpri = thact->sched_pri;
1099 ptinfo->pth_priority = thact->base_pri;
1100 ptinfo->pth_maxpriority = thact->max_priority;
1101
1102 if (vpp != NULL) {
1103 bsd_threadcdir(get_bsdthread_info(thact), vpp, vidp);
1104 }
1105 bsd_getthreadname(get_bsdthread_info(thact), ptinfo->pth_name);
1106 err = 0;
1107 goto out;
1108 }
1109 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1110 }
1111 err = 1;
1112
1113 out:
1114 task_unlock(task);
1115 return err;
1116 }
1117
1118 int
fill_taskthreadlist(task_t task,void * buffer,int thcount,bool thuniqueid)1119 fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid)
1120 {
1121 int numthr = 0;
1122 thread_t thact;
1123 uint64_t * uptr;
1124 uint64_t thaddr;
1125
1126 uptr = (uint64_t *)buffer;
1127
1128 task_lock(task);
1129
1130 for (thact = (thread_t)(void *)queue_first(&task->threads);
1131 !queue_end(&task->threads, (queue_entry_t)thact);) {
1132 thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1133 *uptr++ = thaddr;
1134 numthr++;
1135 if (numthr >= thcount) {
1136 goto out;
1137 }
1138 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1139 }
1140
1141 out:
1142 task_unlock(task);
1143 return (int)(numthr * sizeof(uint64_t));
1144 }
1145
1146 int
fill_taskthreadschedinfo(task_t task,uint64_t thread_id,struct proc_threadschedinfo_internal * thread_sched_info)1147 fill_taskthreadschedinfo(task_t task, uint64_t thread_id, struct proc_threadschedinfo_internal *thread_sched_info)
1148 {
1149 int err = 0;
1150
1151 thread_t thread = current_thread();
1152
1153 /*
1154 * Looking up threads is pretty expensive and not realtime-safe
1155 * right now, requiring locking the task and iterating over all
1156 * threads. As long as that is the case, we officially only
1157 * support getting this info for the current thread.
1158 */
1159 if (task != current_task() || thread_id != thread->thread_id) {
1160 return -1;
1161 }
1162
1163 #if SCHED_HYGIENE_DEBUG
1164 absolutetime_to_nanoseconds(thread->machine.int_time_mt, &thread_sched_info->int_time_ns);
1165 #else
1166 (void)thread;
1167 thread_sched_info->int_time_ns = 0;
1168 #endif
1169
1170 return err;
1171 }
1172
1173 int
get_numthreads(task_t task)1174 get_numthreads(task_t task)
1175 {
1176 return task->thread_count;
1177 }
1178
1179 /*
1180 * Gather the various pieces of info about the designated task,
1181 * and collect it all into a single rusage_info.
1182 */
1183 int
fill_task_rusage(task_t task,rusage_info_current * ri)1184 fill_task_rusage(task_t task, rusage_info_current *ri)
1185 {
1186 struct task_power_info powerinfo;
1187
1188 assert(task != TASK_NULL);
1189 task_lock(task);
1190
1191 struct task_power_info_extra extra = { 0 };
1192 task_power_info_locked(task, &powerinfo, NULL, NULL, &extra);
1193 ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
1194 ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
1195 ri->ri_user_time = powerinfo.total_user;
1196 ri->ri_system_time = powerinfo.total_system;
1197 ri->ri_runnable_time = extra.runnable_time;
1198 ri->ri_cycles = extra.cycles;
1199 ri->ri_instructions = extra.instructions;
1200 ri->ri_pcycles = extra.pcycles;
1201 ri->ri_pinstructions = extra.pinstructions;
1202 ri->ri_user_ptime = extra.user_ptime;
1203 ri->ri_system_ptime = extra.system_ptime;
1204 ri->ri_energy_nj = extra.energy;
1205 ri->ri_penergy_nj = extra.penergy;
1206
1207 ri->ri_phys_footprint = get_task_phys_footprint(task);
1208 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
1209 (ledger_amount_t *)&ri->ri_resident_size);
1210 ri->ri_wired_size = get_task_wired_mem(task);
1211
1212 ri->ri_pageins = counter_load(&task->pageins);
1213
1214 task_unlock(task);
1215 return 0;
1216 }
1217
1218 void
fill_task_billed_usage(task_t task __unused,rusage_info_current * ri)1219 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
1220 {
1221 bank_billed_balance_safe(task, &ri->ri_billed_system_time, &ri->ri_billed_energy);
1222 bank_serviced_balance_safe(task, &ri->ri_serviced_system_time, &ri->ri_serviced_energy);
1223 }
1224
1225 int
fill_task_io_rusage(task_t task,rusage_info_current * ri)1226 fill_task_io_rusage(task_t task, rusage_info_current *ri)
1227 {
1228 assert(task != TASK_NULL);
1229 task_lock(task);
1230
1231 if (task->task_io_stats) {
1232 ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
1233 ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1234 } else {
1235 /* I/O Stats unavailable */
1236 ri->ri_diskio_bytesread = 0;
1237 ri->ri_diskio_byteswritten = 0;
1238 }
1239 task_unlock(task);
1240 return 0;
1241 }
1242
1243 int
fill_task_qos_rusage(task_t task,rusage_info_current * ri)1244 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1245 {
1246 thread_t thread;
1247
1248 assert(task != TASK_NULL);
1249 task_lock(task);
1250
1251 /* Rollup QoS time of all the threads to task */
1252 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1253 if (thread->options & TH_OPT_IDLE_THREAD) {
1254 continue;
1255 }
1256
1257 thread_update_qos_cpu_time(thread);
1258 }
1259 ri->ri_cpu_time_qos_default = task->cpu_time_eqos_stats.cpu_time_qos_default;
1260 ri->ri_cpu_time_qos_maintenance = task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
1261 ri->ri_cpu_time_qos_background = task->cpu_time_eqos_stats.cpu_time_qos_background;
1262 ri->ri_cpu_time_qos_utility = task->cpu_time_eqos_stats.cpu_time_qos_utility;
1263 ri->ri_cpu_time_qos_legacy = task->cpu_time_eqos_stats.cpu_time_qos_legacy;
1264 ri->ri_cpu_time_qos_user_initiated = task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
1265 ri->ri_cpu_time_qos_user_interactive = task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
1266
1267 task_unlock(task);
1268 return 0;
1269 }
1270
1271 uint64_t
get_task_logical_writes(task_t task,bool external)1272 get_task_logical_writes(task_t task, bool external)
1273 {
1274 assert(task != TASK_NULL);
1275 struct ledger_entry_info lei;
1276 int entry = external ? task_ledgers.logical_writes_to_external :
1277 task_ledgers.logical_writes;
1278
1279 task_lock(task);
1280 ledger_get_entry_info(task->ledger, entry, &lei);
1281 task_unlock(task);
1282
1283 return lei.lei_balance;
1284 }
1285
1286 uint64_t
get_task_dispatchqueue_serialno_offset(task_t task)1287 get_task_dispatchqueue_serialno_offset(task_t task)
1288 {
1289 uint64_t dq_serialno_offset = 0;
1290 void *bsd_info = get_bsdtask_info(task);
1291
1292 if (bsd_info) {
1293 dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(bsd_info);
1294 }
1295
1296 return dq_serialno_offset;
1297 }
1298
1299 uint64_t
get_task_dispatchqueue_label_offset(task_t task)1300 get_task_dispatchqueue_label_offset(task_t task)
1301 {
1302 uint64_t dq_label_offset = 0;
1303 void *bsd_info = get_bsdtask_info(task);
1304
1305 if (bsd_info) {
1306 dq_label_offset = get_dispatchqueue_label_offset_from_proc(bsd_info);
1307 }
1308
1309 return dq_label_offset;
1310 }
1311
1312 uint64_t
get_task_uniqueid(task_t task)1313 get_task_uniqueid(task_t task)
1314 {
1315 void *bsd_info = get_bsdtask_info(task);
1316
1317 if (bsd_info) {
1318 return proc_uniqueid_task(bsd_info, task);
1319 } else {
1320 return UINT64_MAX;
1321 }
1322 }
1323
1324 int
get_task_version(task_t task)1325 get_task_version(task_t task)
1326 {
1327 void *bsd_info = get_bsdtask_info(task);
1328
1329 if (bsd_info) {
1330 return proc_pidversion(bsd_info);
1331 } else {
1332 return INT_MAX;
1333 }
1334 }
1335
1336 #if CONFIG_MACF
1337 struct label *
get_task_crash_label(task_t task)1338 get_task_crash_label(task_t task)
1339 {
1340 return task->crash_label;
1341 }
1342
1343 void
set_task_crash_label(task_t task,struct label * label)1344 set_task_crash_label(task_t task, struct label *label)
1345 {
1346 task->crash_label = label;
1347 }
1348 #endif
1349
1350 int
fill_taskipctableinfo(task_t task,uint32_t * table_size,uint32_t * table_free)1351 fill_taskipctableinfo(task_t task, uint32_t *table_size, uint32_t *table_free)
1352 {
1353 ipc_space_t space = task->itk_space;
1354 if (space == NULL) {
1355 return -1;
1356 }
1357
1358 is_read_lock(space);
1359 if (!is_active(space)) {
1360 is_read_unlock(space);
1361 return -1;
1362 }
1363
1364 *table_size = ipc_entry_table_count(is_active_table(space));
1365 *table_free = space->is_table_free;
1366
1367 is_read_unlock(space);
1368
1369 return 0;
1370 }
1371
1372 int
get_task_cdhash(task_t task,char cdhash[static CS_CDHASH_LEN])1373 get_task_cdhash(task_t task, char cdhash[static CS_CDHASH_LEN])
1374 {
1375 int result = 0;
1376 void *bsd_info = NULL;
1377
1378 task_lock(task);
1379 bsd_info = get_bsdtask_info(task);
1380 result = bsd_info ? proc_getcdhash(bsd_info, cdhash) : ESRCH;
1381 task_unlock(task);
1382
1383 return result;
1384 }
1385
1386 /* moved from ubc_subr.c */
1387 int
mach_to_bsd_errno(kern_return_t mach_err)1388 mach_to_bsd_errno(kern_return_t mach_err)
1389 {
1390 switch (mach_err) {
1391 case KERN_SUCCESS:
1392 return 0;
1393
1394 case KERN_INVALID_ADDRESS:
1395 case KERN_INVALID_ARGUMENT:
1396 case KERN_NOT_IN_SET:
1397 case KERN_INVALID_NAME:
1398 case KERN_INVALID_TASK:
1399 case KERN_INVALID_RIGHT:
1400 case KERN_INVALID_VALUE:
1401 case KERN_INVALID_CAPABILITY:
1402 case KERN_INVALID_HOST:
1403 case KERN_MEMORY_PRESENT:
1404 case KERN_INVALID_PROCESSOR_SET:
1405 case KERN_INVALID_POLICY:
1406 case KERN_ALREADY_WAITING:
1407 case KERN_DEFAULT_SET:
1408 case KERN_EXCEPTION_PROTECTED:
1409 case KERN_INVALID_LEDGER:
1410 case KERN_INVALID_MEMORY_CONTROL:
1411 case KERN_INVALID_SECURITY:
1412 case KERN_NOT_DEPRESSED:
1413 case KERN_LOCK_OWNED:
1414 case KERN_LOCK_OWNED_SELF:
1415 return EINVAL;
1416
1417 case KERN_NOT_RECEIVER:
1418 case KERN_NO_ACCESS:
1419 case KERN_POLICY_STATIC:
1420 return EACCES;
1421
1422 case KERN_NO_SPACE:
1423 case KERN_RESOURCE_SHORTAGE:
1424 case KERN_UREFS_OVERFLOW:
1425 case KERN_INVALID_OBJECT:
1426 return ENOMEM;
1427
1428 case KERN_MEMORY_FAILURE:
1429 case KERN_MEMORY_ERROR:
1430 case KERN_PROTECTION_FAILURE:
1431 return EFAULT;
1432
1433 case KERN_POLICY_LIMIT:
1434 case KERN_CODESIGN_ERROR:
1435 case KERN_DENIED:
1436 return EPERM;
1437
1438 case KERN_ALREADY_IN_SET:
1439 case KERN_NAME_EXISTS:
1440 case KERN_RIGHT_EXISTS:
1441 return EEXIST;
1442
1443 case KERN_ABORTED:
1444 return EINTR;
1445
1446 case KERN_TERMINATED:
1447 case KERN_LOCK_SET_DESTROYED:
1448 case KERN_LOCK_UNSTABLE:
1449 case KERN_SEMAPHORE_DESTROYED:
1450 case KERN_NOT_FOUND:
1451 case KERN_NOT_WAITING:
1452 return ENOENT;
1453
1454 case KERN_RPC_SERVER_TERMINATED:
1455 return ECONNRESET;
1456
1457 case KERN_NOT_SUPPORTED:
1458 return ENOTSUP;
1459
1460 case KERN_NODE_DOWN:
1461 return ENETDOWN;
1462
1463 case KERN_OPERATION_TIMED_OUT:
1464 return ETIMEDOUT;
1465
1466 default:
1467 return EIO; /* 5 == KERN_FAILURE */
1468 }
1469 }
1470
1471 kern_return_t
bsd_to_mach_failure(int bsd_err)1472 bsd_to_mach_failure(int bsd_err)
1473 {
1474 switch (bsd_err) {
1475 case EIO:
1476 case EACCES:
1477 case ENOMEM:
1478 case EFAULT:
1479 return KERN_MEMORY_ERROR;
1480
1481 case EINVAL:
1482 return KERN_INVALID_ARGUMENT;
1483
1484 case ETIMEDOUT:
1485 case EBUSY:
1486 return KERN_OPERATION_TIMED_OUT;
1487
1488 case ECONNRESET:
1489 return KERN_RPC_SERVER_TERMINATED;
1490
1491 case ENOTSUP:
1492 return KERN_NOT_SUPPORTED;
1493
1494 case ENETDOWN:
1495 return KERN_NODE_DOWN;
1496
1497 case ENOENT:
1498 return KERN_NOT_FOUND;
1499
1500 case EINTR:
1501 return KERN_ABORTED;
1502
1503 case EPERM:
1504 return KERN_DENIED;
1505
1506 case EEXIST:
1507 return KERN_ALREADY_IN_SET;
1508
1509 default:
1510 return KERN_FAILURE;
1511 }
1512 }
1513