1 /*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30 #include <mach/task.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/ledger.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/spl.h>
38 #include <kern/ast.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43 #include <vm/pmap.h>
44 #include <vm/vm_protos.h> /* last */
45 #include <sys/resource.h>
46 #include <sys/signal.h>
47 #include <sys/errno.h>
48 #include <sys/proc_require.h>
49
50 #if MONOTONIC
51 #include <kern/monotonic.h>
52 #include <machine/monotonic.h>
53 #endif /* MONOTONIC */
54
55 #include <machine/limits.h>
56 #include <sys/codesign.h> /* CS_CDHASH_LEN */
57
58 #undef thread_should_halt
59
60 /* BSD KERN COMPONENT INTERFACE */
61
62 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
63
64 thread_t get_firstthread(task_t);
65 int get_task_userstop(task_t);
66 int get_thread_userstop(thread_t);
67 boolean_t current_thread_aborted(void);
68 void task_act_iterate_wth_args_locked(task_t, void (*)(thread_t, void *), void *);
69 void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
70 kern_return_t get_signalact(task_t, thread_t *, int);
71 int fill_task_rusage(task_t task, rusage_info_current *ri);
72 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
73 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
74 uint64_t get_task_logical_writes(task_t task, bool external);
75 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
76 void task_bsdtask_kill(task_t);
77
78 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
79 extern uint64_t get_dispatchqueue_label_offset_from_proc(void *p);
80 extern uint64_t proc_uniqueid_task(void *p, void *t);
81 extern int proc_pidversion(void *p);
82 extern int proc_getcdhash(void *p, char *cdhash);
83
84 int mach_to_bsd_errno(kern_return_t mach_err);
85 kern_return_t bsd_to_mach_failure(int bsd_err);
86
87 #if MACH_BSD
88 extern void psignal(void *, int);
89 #endif
90
91 /*
92 *
93 */
94 void *
get_bsdtask_info(task_t t)95 get_bsdtask_info(task_t t)
96 {
97 void *proc_from_task = task_get_proc_raw(t);
98 proc_require(proc_from_task, PROC_REQUIRE_ALLOW_NULL | PROC_REQUIRE_ALLOW_ALL);
99 return task_has_proc(t) ? proc_from_task : NULL;
100 }
101
102 void
task_bsdtask_kill(task_t t)103 task_bsdtask_kill(task_t t)
104 {
105 void * bsd_info = get_bsdtask_info(t);
106 if (bsd_info != NULL) {
107 psignal(bsd_info, SIGKILL);
108 }
109 }
110 /*
111 *
112 */
113 void *
get_bsdthreadtask_info(thread_t th)114 get_bsdthreadtask_info(thread_t th)
115 {
116 return get_thread_ro(th)->tro_proc;
117 }
118
119 /*
120 *
121 */
122 void
set_bsdtask_info(task_t t,void * v)123 set_bsdtask_info(task_t t, void * v)
124 {
125 void *proc_from_task = task_get_proc_raw(t);
126 if (v == NULL) {
127 task_clear_has_proc(t);
128 } else {
129 if (v != proc_from_task) {
130 panic("set_bsdtask_info trying to set random bsd_info %p", v);
131 }
132 task_set_has_proc(t);
133 }
134 }
135
136 __abortlike
137 static void
__thread_ro_circularity_panic(thread_t th,thread_ro_t tro)138 __thread_ro_circularity_panic(thread_t th, thread_ro_t tro)
139 {
140 panic("tro %p points back to %p instead of %p", tro, tro->tro_owner, th);
141 }
142
143 __attribute__((always_inline))
144 thread_ro_t
get_thread_ro_unchecked(thread_t th)145 get_thread_ro_unchecked(thread_t th)
146 {
147 return th->t_tro;
148 }
149
150 thread_ro_t
get_thread_ro(thread_t th)151 get_thread_ro(thread_t th)
152 {
153 thread_ro_t tro = th->t_tro;
154
155 zone_require_ro(ZONE_ID_THREAD_RO, sizeof(struct thread_ro), tro);
156 if (tro->tro_owner != th) {
157 __thread_ro_circularity_panic(th, tro);
158 }
159 return tro;
160 }
161
162 __attribute__((always_inline))
163 thread_ro_t
current_thread_ro_unchecked(void)164 current_thread_ro_unchecked(void)
165 {
166 return get_thread_ro_unchecked(current_thread());
167 }
168
169 thread_ro_t
current_thread_ro(void)170 current_thread_ro(void)
171 {
172 return get_thread_ro(current_thread());
173 }
174
175 void
clear_thread_ro_proc(thread_t th)176 clear_thread_ro_proc(thread_t th)
177 {
178 thread_ro_t tro = get_thread_ro(th);
179
180 zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_proc);
181 }
182
183 struct uthread *
get_bsdthread_info(thread_t th)184 get_bsdthread_info(thread_t th)
185 {
186 return (struct uthread *)((uintptr_t)th + sizeof(struct thread));
187 }
188
189 thread_t
get_machthread(struct uthread * uth)190 get_machthread(struct uthread *uth)
191 {
192 return (struct thread *)((uintptr_t)uth - sizeof(struct thread));
193 }
194
195 /*
196 * This is used to remember any FS error from VNOP_PAGEIN code when
197 * invoked under vm_fault(). The value is an errno style value. It can
198 * be retrieved by exception handlers using thread_get_state().
199 */
200 void
set_thread_pagein_error(thread_t th,int error)201 set_thread_pagein_error(thread_t th, int error)
202 {
203 assert(th == current_thread());
204 if (error == 0 || th->t_pagein_error == 0) {
205 th->t_pagein_error = error;
206 }
207 }
208
209 #if defined(__x86_64__)
210 /*
211 * Returns non-zero if the thread has a non-NULL task
212 * and that task has an LDT.
213 */
214 int
thread_task_has_ldt(thread_t th)215 thread_task_has_ldt(thread_t th)
216 {
217 task_t task = get_threadtask(th);
218 return task && task->i386_ldt != 0;
219 }
220 #endif /* __x86_64__ */
221
222 /*
223 * XXX
224 */
225 int get_thread_lock_count(thread_t th); /* forced forward */
226 int
get_thread_lock_count(thread_t th __unused)227 get_thread_lock_count(thread_t th __unused)
228 {
229 /*
230 * TODO: one day: resurect counting locks held to disallow
231 * holding locks across upcalls.
232 *
233 * never worked on arm.
234 */
235 return 0;
236 }
237
238 /*
239 * Returns a thread reference.
240 */
241 thread_t
get_firstthread(task_t task)242 get_firstthread(task_t task)
243 {
244 thread_t thread = THREAD_NULL;
245 task_lock(task);
246
247 if (!task->active) {
248 task_unlock(task);
249 return THREAD_NULL;
250 }
251
252 thread = (thread_t)(void *)queue_first(&task->threads);
253
254 if (queue_end(&task->threads, (queue_entry_t)thread)) {
255 task_unlock(task);
256 return THREAD_NULL;
257 }
258
259 thread_reference(thread);
260 task_unlock(task);
261 return thread;
262 }
263
264 kern_return_t
get_signalact(task_t task,thread_t * result_out,int setast)265 get_signalact(
266 task_t task,
267 thread_t *result_out,
268 int setast)
269 {
270 kern_return_t result = KERN_SUCCESS;
271 thread_t inc, thread = THREAD_NULL;
272
273 task_lock(task);
274
275 if (!task->active) {
276 task_unlock(task);
277
278 return KERN_FAILURE;
279 }
280
281 for (inc = (thread_t)(void *)queue_first(&task->threads);
282 !queue_end(&task->threads, (queue_entry_t)inc);) {
283 thread_mtx_lock(inc);
284 if (inc->active &&
285 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
286 thread = inc;
287 break;
288 }
289 thread_mtx_unlock(inc);
290
291 inc = (thread_t)(void *)queue_next(&inc->task_threads);
292 }
293
294 if (result_out) {
295 *result_out = thread;
296 }
297
298 if (thread) {
299 if (setast) {
300 act_set_astbsd(thread);
301 }
302
303 thread_mtx_unlock(thread);
304 } else {
305 result = KERN_FAILURE;
306 }
307
308 task_unlock(task);
309
310 return result;
311 }
312
313
314 kern_return_t
check_actforsig(task_t task,thread_t thread,int setast)315 check_actforsig(
316 task_t task,
317 thread_t thread,
318 int setast)
319 {
320 kern_return_t result = KERN_FAILURE;
321 thread_t inc;
322
323 task_lock(task);
324
325 if (!task->active) {
326 task_unlock(task);
327
328 return KERN_FAILURE;
329 }
330
331 for (inc = (thread_t)(void *)queue_first(&task->threads);
332 !queue_end(&task->threads, (queue_entry_t)inc);) {
333 if (inc == thread) {
334 thread_mtx_lock(inc);
335
336 if (inc->active &&
337 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
338 result = KERN_SUCCESS;
339 break;
340 }
341
342 thread_mtx_unlock(inc);
343 break;
344 }
345
346 inc = (thread_t)(void *)queue_next(&inc->task_threads);
347 }
348
349 if (result == KERN_SUCCESS) {
350 if (setast) {
351 act_set_astbsd(thread);
352 }
353
354 thread_mtx_unlock(thread);
355 }
356
357 task_unlock(task);
358
359 return result;
360 }
361
362 ledger_t
get_task_ledger(task_t t)363 get_task_ledger(task_t t)
364 {
365 return t->ledger;
366 }
367
368 /*
369 * This is only safe to call from a thread executing in
370 * in the task's context or if the task is locked. Otherwise,
371 * the map could be switched for the task (and freed) before
372 * we go to return it here.
373 */
374 vm_map_t
get_task_map(task_t t)375 get_task_map(task_t t)
376 {
377 return t->map;
378 }
379
380 vm_map_t
get_task_map_reference(task_t t)381 get_task_map_reference(task_t t)
382 {
383 vm_map_t m;
384
385 if (t == NULL) {
386 return VM_MAP_NULL;
387 }
388
389 task_lock(t);
390 if (!t->active) {
391 task_unlock(t);
392 return VM_MAP_NULL;
393 }
394 m = t->map;
395 vm_map_reference(m);
396 task_unlock(t);
397 return m;
398 }
399
400 /*
401 *
402 */
403 ipc_space_t
get_task_ipcspace(task_t t)404 get_task_ipcspace(task_t t)
405 {
406 return t->itk_space;
407 }
408
409 int
get_task_numacts(task_t t)410 get_task_numacts(task_t t)
411 {
412 return t->thread_count;
413 }
414
415 /* does this machine need 64bit register set for signal handler */
416 int
is_64signalregset(void)417 is_64signalregset(void)
418 {
419 if (task_has_64Bit_data(current_task())) {
420 return 1;
421 }
422
423 return 0;
424 }
425
426 /*
427 * Swap in a new map for the task/thread pair; the old map reference is
428 * returned. Also does a pmap switch if thread provided is current thread.
429 */
430 vm_map_t
swap_task_map(task_t task,thread_t thread,vm_map_t map)431 swap_task_map(task_t task, thread_t thread, vm_map_t map)
432 {
433 vm_map_t old_map;
434 boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
435
436 if (task != get_threadtask(thread)) {
437 panic("swap_task_map");
438 }
439
440 task_lock(task);
441 mp_disable_preemption();
442
443 old_map = task->map;
444 thread->map = task->map = map;
445 vm_commit_pagezero_status(map);
446
447 if (doswitch) {
448 PMAP_SWITCH_USER(thread, map, cpu_number());
449 }
450 mp_enable_preemption();
451 task_unlock(task);
452
453 return old_map;
454 }
455
456 /*
457 *
458 * This is only safe to call from a thread executing in
459 * in the task's context or if the task is locked. Otherwise,
460 * the map could be switched for the task (and freed) before
461 * we go to return it here.
462 */
463 pmap_t
get_task_pmap(task_t t)464 get_task_pmap(task_t t)
465 {
466 return t->map->pmap;
467 }
468
469 /*
470 *
471 */
472 uint64_t
get_task_resident_size(task_t task)473 get_task_resident_size(task_t task)
474 {
475 uint64_t val;
476
477 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
478 return val;
479 }
480
481 uint64_t
get_task_compressed(task_t task)482 get_task_compressed(task_t task)
483 {
484 uint64_t val;
485
486 ledger_get_balance(task->ledger, task_ledgers.internal_compressed, (ledger_amount_t *) &val);
487 return val;
488 }
489
490 uint64_t
get_task_resident_max(task_t task)491 get_task_resident_max(task_t task)
492 {
493 uint64_t val;
494
495 ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
496 return val;
497 }
498
499 /*
500 * Get the balance for a given field in the task ledger.
501 * Returns 0 if the entry is invalid.
502 */
503 static uint64_t
get_task_ledger_balance(task_t task,int entry)504 get_task_ledger_balance(task_t task, int entry)
505 {
506 ledger_amount_t balance = 0;
507
508 ledger_get_balance(task->ledger, entry, &balance);
509 return balance;
510 }
511
512 uint64_t
get_task_purgeable_size(task_t task)513 get_task_purgeable_size(task_t task)
514 {
515 kern_return_t ret;
516 ledger_amount_t balance = 0;
517 uint64_t volatile_size = 0;
518
519 ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile, &balance);
520 if (ret != KERN_SUCCESS) {
521 return 0;
522 }
523
524 volatile_size += balance;
525
526 ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile_compressed, &balance);
527 if (ret != KERN_SUCCESS) {
528 return 0;
529 }
530
531 volatile_size += balance;
532
533 return volatile_size;
534 }
535
536 /*
537 *
538 */
539 uint64_t
get_task_phys_footprint(task_t task)540 get_task_phys_footprint(task_t task)
541 {
542 return get_task_ledger_balance(task, task_ledgers.phys_footprint);
543 }
544
545 #if CONFIG_LEDGER_INTERVAL_MAX
546 /*
547 *
548 */
549 uint64_t
get_task_phys_footprint_interval_max(task_t task,int reset)550 get_task_phys_footprint_interval_max(task_t task, int reset)
551 {
552 kern_return_t ret;
553 ledger_amount_t max;
554
555 ret = ledger_get_interval_max(task->ledger, task_ledgers.phys_footprint, &max, reset);
556
557 if (KERN_SUCCESS == ret) {
558 return max;
559 }
560
561 return 0;
562 }
563 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
564
565 /*
566 *
567 */
568 uint64_t
get_task_phys_footprint_lifetime_max(task_t task)569 get_task_phys_footprint_lifetime_max(task_t task)
570 {
571 kern_return_t ret;
572 ledger_amount_t max;
573
574 ret = ledger_get_lifetime_max(task->ledger, task_ledgers.phys_footprint, &max);
575
576 if (KERN_SUCCESS == ret) {
577 return max;
578 }
579
580 return 0;
581 }
582
583 /*
584 *
585 */
586 uint64_t
get_task_phys_footprint_limit(task_t task)587 get_task_phys_footprint_limit(task_t task)
588 {
589 kern_return_t ret;
590 ledger_amount_t max;
591
592 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max);
593 if (KERN_SUCCESS == ret) {
594 return max;
595 }
596
597 return 0;
598 }
599
600 uint64_t
get_task_internal(task_t task)601 get_task_internal(task_t task)
602 {
603 return get_task_ledger_balance(task, task_ledgers.internal);
604 }
605
606 uint64_t
get_task_internal_compressed(task_t task)607 get_task_internal_compressed(task_t task)
608 {
609 return get_task_ledger_balance(task, task_ledgers.internal_compressed);
610 }
611
612 uint64_t
get_task_purgeable_nonvolatile(task_t task)613 get_task_purgeable_nonvolatile(task_t task)
614 {
615 return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile);
616 }
617
618 uint64_t
get_task_purgeable_nonvolatile_compressed(task_t task)619 get_task_purgeable_nonvolatile_compressed(task_t task)
620 {
621 return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile_compressed);
622 }
623
624 uint64_t
get_task_alternate_accounting(task_t task)625 get_task_alternate_accounting(task_t task)
626 {
627 return get_task_ledger_balance(task, task_ledgers.alternate_accounting);
628 }
629
630 uint64_t
get_task_alternate_accounting_compressed(task_t task)631 get_task_alternate_accounting_compressed(task_t task)
632 {
633 return get_task_ledger_balance(task, task_ledgers.alternate_accounting_compressed);
634 }
635
636 uint64_t
get_task_page_table(task_t task)637 get_task_page_table(task_t task)
638 {
639 return get_task_ledger_balance(task, task_ledgers.page_table);
640 }
641
642 #if CONFIG_FREEZE
643 uint64_t
get_task_frozen_to_swap(task_t task)644 get_task_frozen_to_swap(task_t task)
645 {
646 return get_task_ledger_balance(task, task_ledgers.frozen_to_swap);
647 }
648 #endif /* CONFIG_FREEZE */
649
650 uint64_t
get_task_iokit_mapped(task_t task)651 get_task_iokit_mapped(task_t task)
652 {
653 return get_task_ledger_balance(task, task_ledgers.iokit_mapped);
654 }
655
656 uint64_t
get_task_network_nonvolatile(task_t task)657 get_task_network_nonvolatile(task_t task)
658 {
659 return get_task_ledger_balance(task, task_ledgers.network_nonvolatile);
660 }
661
662 uint64_t
get_task_network_nonvolatile_compressed(task_t task)663 get_task_network_nonvolatile_compressed(task_t task)
664 {
665 return get_task_ledger_balance(task, task_ledgers.network_nonvolatile_compressed);
666 }
667
668 uint64_t
get_task_wired_mem(task_t task)669 get_task_wired_mem(task_t task)
670 {
671 return get_task_ledger_balance(task, task_ledgers.wired_mem);
672 }
673
674 uint64_t
get_task_tagged_footprint(task_t task)675 get_task_tagged_footprint(task_t task)
676 {
677 kern_return_t ret;
678 ledger_amount_t credit, debit;
679
680 ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint, &credit, &debit);
681 if (KERN_SUCCESS == ret) {
682 return credit - debit;
683 }
684
685 return 0;
686 }
687
688 uint64_t
get_task_tagged_footprint_compressed(task_t task)689 get_task_tagged_footprint_compressed(task_t task)
690 {
691 kern_return_t ret;
692 ledger_amount_t credit, debit;
693
694 ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint_compressed, &credit, &debit);
695 if (KERN_SUCCESS == ret) {
696 return credit - debit;
697 }
698
699 return 0;
700 }
701
702 uint64_t
get_task_media_footprint(task_t task)703 get_task_media_footprint(task_t task)
704 {
705 kern_return_t ret;
706 ledger_amount_t credit, debit;
707
708 ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint, &credit, &debit);
709 if (KERN_SUCCESS == ret) {
710 return credit - debit;
711 }
712
713 return 0;
714 }
715
716 uint64_t
get_task_media_footprint_compressed(task_t task)717 get_task_media_footprint_compressed(task_t task)
718 {
719 kern_return_t ret;
720 ledger_amount_t credit, debit;
721
722 ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint_compressed, &credit, &debit);
723 if (KERN_SUCCESS == ret) {
724 return credit - debit;
725 }
726
727 return 0;
728 }
729
730 uint64_t
get_task_graphics_footprint(task_t task)731 get_task_graphics_footprint(task_t task)
732 {
733 kern_return_t ret;
734 ledger_amount_t credit, debit;
735
736 ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint, &credit, &debit);
737 if (KERN_SUCCESS == ret) {
738 return credit - debit;
739 }
740
741 return 0;
742 }
743
744
745 uint64_t
get_task_graphics_footprint_compressed(task_t task)746 get_task_graphics_footprint_compressed(task_t task)
747 {
748 kern_return_t ret;
749 ledger_amount_t credit, debit;
750
751 ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint_compressed, &credit, &debit);
752 if (KERN_SUCCESS == ret) {
753 return credit - debit;
754 }
755
756 return 0;
757 }
758
759 uint64_t
get_task_neural_footprint(task_t task)760 get_task_neural_footprint(task_t task)
761 {
762 kern_return_t ret;
763 ledger_amount_t credit, debit;
764
765 ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint, &credit, &debit);
766 if (KERN_SUCCESS == ret) {
767 return credit - debit;
768 }
769
770 return 0;
771 }
772
773 uint64_t
get_task_neural_footprint_compressed(task_t task)774 get_task_neural_footprint_compressed(task_t task)
775 {
776 kern_return_t ret;
777 ledger_amount_t credit, debit;
778
779 ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint_compressed, &credit, &debit);
780 if (KERN_SUCCESS == ret) {
781 return credit - debit;
782 }
783
784 return 0;
785 }
786
787 uint64_t
get_task_cpu_time(task_t task)788 get_task_cpu_time(task_t task)
789 {
790 return get_task_ledger_balance(task, task_ledgers.cpu_time);
791 }
792
793 uint32_t
get_task_loadTag(task_t task)794 get_task_loadTag(task_t task)
795 {
796 return os_atomic_load(&task->loadTag, relaxed);
797 }
798
799 uint32_t
set_task_loadTag(task_t task,uint32_t loadTag)800 set_task_loadTag(task_t task, uint32_t loadTag)
801 {
802 return os_atomic_xchg(&task->loadTag, loadTag, relaxed);
803 }
804
805
806 task_t
get_threadtask(thread_t th)807 get_threadtask(thread_t th)
808 {
809 return get_thread_ro(th)->tro_task;
810 }
811
812 task_t
get_threadtask_early(thread_t th)813 get_threadtask_early(thread_t th)
814 {
815 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
816 if (th == THREAD_NULL || th->t_tro == NULL) {
817 return TASK_NULL;
818 }
819 }
820 return get_threadtask(th);
821 }
822
823 /*
824 *
825 */
826 vm_map_offset_t
get_map_min(vm_map_t map)827 get_map_min(
828 vm_map_t map)
829 {
830 return vm_map_min(map);
831 }
832
833 /*
834 *
835 */
836 vm_map_offset_t
get_map_max(vm_map_t map)837 get_map_max(
838 vm_map_t map)
839 {
840 return vm_map_max(map);
841 }
842 vm_map_size_t
get_vmmap_size(vm_map_t map)843 get_vmmap_size(
844 vm_map_t map)
845 {
846 return vm_map_adjusted_size(map);
847 }
848 int
get_task_page_size(task_t task)849 get_task_page_size(
850 task_t task)
851 {
852 return vm_map_page_size(task->map);
853 }
854
855 #if CONFIG_COREDUMP
856
857 static int
get_vmsubmap_entries(vm_map_t map,vm_object_offset_t start,vm_object_offset_t end)858 get_vmsubmap_entries(
859 vm_map_t map,
860 vm_object_offset_t start,
861 vm_object_offset_t end)
862 {
863 int total_entries = 0;
864 vm_map_entry_t entry;
865
866 if (not_in_kdp) {
867 vm_map_lock(map);
868 }
869 entry = vm_map_first_entry(map);
870 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
871 entry = entry->vme_next;
872 }
873
874 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
875 if (entry->is_sub_map) {
876 total_entries +=
877 get_vmsubmap_entries(VME_SUBMAP(entry),
878 VME_OFFSET(entry),
879 (VME_OFFSET(entry) +
880 entry->vme_end -
881 entry->vme_start));
882 } else {
883 total_entries += 1;
884 }
885 entry = entry->vme_next;
886 }
887 if (not_in_kdp) {
888 vm_map_unlock(map);
889 }
890 return total_entries;
891 }
892
893 int
get_vmmap_entries(vm_map_t map)894 get_vmmap_entries(
895 vm_map_t map)
896 {
897 int total_entries = 0;
898 vm_map_entry_t entry;
899
900 if (not_in_kdp) {
901 vm_map_lock(map);
902 }
903 entry = vm_map_first_entry(map);
904
905 while (entry != vm_map_to_entry(map)) {
906 if (entry->is_sub_map) {
907 total_entries +=
908 get_vmsubmap_entries(VME_SUBMAP(entry),
909 VME_OFFSET(entry),
910 (VME_OFFSET(entry) +
911 entry->vme_end -
912 entry->vme_start));
913 } else {
914 total_entries += 1;
915 }
916 entry = entry->vme_next;
917 }
918 if (not_in_kdp) {
919 vm_map_unlock(map);
920 }
921 return total_entries;
922 }
923 #endif /* CONFIG_COREDUMP */
924
925 int
get_task_userstop(task_t task)926 get_task_userstop(
927 task_t task)
928 {
929 return task->user_stop_count;
930 }
931
932 int
get_thread_userstop(thread_t th)933 get_thread_userstop(
934 thread_t th)
935 {
936 return th->user_stop_count;
937 }
938
939 boolean_t
get_task_pidsuspended(task_t task)940 get_task_pidsuspended(
941 task_t task)
942 {
943 return task->pidsuspended;
944 }
945
946 boolean_t
get_task_frozen(task_t task)947 get_task_frozen(
948 task_t task)
949 {
950 return task->frozen;
951 }
952
953 boolean_t
thread_should_abort(thread_t th)954 thread_should_abort(
955 thread_t th)
956 {
957 return (th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT;
958 }
959
960 /*
961 * This routine is like thread_should_abort() above. It checks to
962 * see if the current thread is aborted. But unlike above, it also
963 * checks to see if thread is safely aborted. If so, it returns
964 * that fact, and clears the condition (safe aborts only should
965 * have a single effect, and a poll of the abort status
966 * qualifies.
967 */
968 boolean_t
current_thread_aborted(void)969 current_thread_aborted(
970 void)
971 {
972 thread_t th = current_thread();
973 spl_t s;
974
975 if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
976 (th->options & TH_OPT_INTMASK) != THREAD_UNINT) {
977 return TRUE;
978 }
979 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
980 s = splsched();
981 thread_lock(th);
982 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
983 th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
984 }
985 thread_unlock(th);
986 splx(s);
987 }
988 return FALSE;
989 }
990
991 /* Iterate over a task that is already protected by a held lock. */
992 void
task_act_iterate_wth_args_locked(task_t task,void (* func_callback)(thread_t,void *),void * func_arg)993 task_act_iterate_wth_args_locked(
994 task_t task,
995 void (*func_callback)(thread_t, void *),
996 void *func_arg)
997 {
998 for (thread_t inc = (thread_t)(void *)queue_first(&task->threads);
999 !queue_end(&task->threads, (queue_entry_t)inc);) {
1000 (void) (*func_callback)(inc, func_arg);
1001 inc = (thread_t)(void *)queue_next(&inc->task_threads);
1002 }
1003 }
1004
1005 void
task_act_iterate_wth_args(task_t task,void (* func_callback)(thread_t,void *),void * func_arg)1006 task_act_iterate_wth_args(
1007 task_t task,
1008 void (*func_callback)(thread_t, void *),
1009 void *func_arg)
1010 {
1011 task_lock(task);
1012 task_act_iterate_wth_args_locked(task, func_callback, func_arg);
1013 task_unlock(task);
1014 }
1015
1016 #include <sys/bsdtask_info.h>
1017
1018 void
fill_taskprocinfo(task_t task,struct proc_taskinfo_internal * ptinfo)1019 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
1020 {
1021 vm_map_t map;
1022 task_absolutetime_info_data_t tinfo;
1023 thread_t thread;
1024 uint32_t cswitch = 0, numrunning = 0;
1025 uint32_t syscalls_unix = 0;
1026 uint32_t syscalls_mach = 0;
1027
1028 task_lock(task);
1029
1030 map = (task == kernel_task)? kernel_map: task->map;
1031
1032 ptinfo->pti_virtual_size = vm_map_adjusted_size(map);
1033 ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &ptinfo->pti_resident_size);
1034
1035 ptinfo->pti_policy = ((task != kernel_task)?
1036 POLICY_TIMESHARE: POLICY_RR);
1037
1038 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1039 spl_t x;
1040
1041 if (thread->options & TH_OPT_IDLE_THREAD) {
1042 continue;
1043 }
1044
1045 x = splsched();
1046 thread_lock(thread);
1047
1048 if ((thread->state & TH_RUN) == TH_RUN) {
1049 numrunning++;
1050 }
1051 cswitch += thread->c_switch;
1052
1053 syscalls_unix += thread->syscalls_unix;
1054 syscalls_mach += thread->syscalls_mach;
1055
1056 thread_unlock(thread);
1057 splx(x);
1058 }
1059
1060 struct recount_times_mach term_times = recount_task_terminated_times(task);
1061 struct recount_times_mach total_times = recount_task_times(task);
1062
1063 tinfo.threads_user = total_times.rtm_user - term_times.rtm_user;
1064 tinfo.threads_system = total_times.rtm_system - term_times.rtm_system;
1065 ptinfo->pti_threads_system = tinfo.threads_system;
1066 ptinfo->pti_threads_user = tinfo.threads_user;
1067
1068 ptinfo->pti_total_system = total_times.rtm_system;
1069 ptinfo->pti_total_user = total_times.rtm_user;
1070
1071 ptinfo->pti_faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
1072 ptinfo->pti_pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
1073 ptinfo->pti_cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
1074 ptinfo->pti_messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
1075 ptinfo->pti_messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
1076 ptinfo->pti_syscalls_mach = (int32_t) MIN(task->syscalls_mach + syscalls_mach, INT32_MAX);
1077 ptinfo->pti_syscalls_unix = (int32_t) MIN(task->syscalls_unix + syscalls_unix, INT32_MAX);
1078 ptinfo->pti_csw = (int32_t) MIN(task->c_switch + cswitch, INT32_MAX);
1079 ptinfo->pti_threadnum = task->thread_count;
1080 ptinfo->pti_numrunning = numrunning;
1081 ptinfo->pti_priority = task->priority;
1082
1083 task_unlock(task);
1084 }
1085
1086 int
fill_taskthreadinfo(task_t task,uint64_t thaddr,bool thuniqueid,struct proc_threadinfo_internal * ptinfo,void * vpp,int * vidp)1087 fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
1088 {
1089 thread_t thact;
1090 int err = 0;
1091 mach_msg_type_number_t count;
1092 thread_basic_info_data_t basic_info;
1093 kern_return_t kret;
1094 uint64_t addr = 0;
1095
1096 task_lock(task);
1097
1098 for (thact = (thread_t)(void *)queue_first(&task->threads);
1099 !queue_end(&task->threads, (queue_entry_t)thact);) {
1100 addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1101 if (addr == thaddr) {
1102 count = THREAD_BASIC_INFO_COUNT;
1103 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
1104 err = 1;
1105 goto out;
1106 }
1107 ptinfo->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
1108 ptinfo->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
1109
1110 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
1111 ptinfo->pth_policy = basic_info.policy;
1112 ptinfo->pth_run_state = basic_info.run_state;
1113 ptinfo->pth_flags = basic_info.flags;
1114 ptinfo->pth_sleep_time = basic_info.sleep_time;
1115 ptinfo->pth_curpri = thact->sched_pri;
1116 ptinfo->pth_priority = thact->base_pri;
1117 ptinfo->pth_maxpriority = thact->max_priority;
1118
1119 if (vpp != NULL) {
1120 bsd_threadcdir(get_bsdthread_info(thact), vpp, vidp);
1121 }
1122 bsd_getthreadname(get_bsdthread_info(thact), ptinfo->pth_name);
1123 err = 0;
1124 goto out;
1125 }
1126 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1127 }
1128 err = 1;
1129
1130 out:
1131 task_unlock(task);
1132 return err;
1133 }
1134
1135 int
fill_taskthreadlist(task_t task,void * buffer,int thcount,bool thuniqueid)1136 fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid)
1137 {
1138 int numthr = 0;
1139 thread_t thact;
1140 uint64_t * uptr;
1141 uint64_t thaddr;
1142
1143 uptr = (uint64_t *)buffer;
1144
1145 task_lock(task);
1146
1147 for (thact = (thread_t)(void *)queue_first(&task->threads);
1148 !queue_end(&task->threads, (queue_entry_t)thact);) {
1149 thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1150 *uptr++ = thaddr;
1151 numthr++;
1152 if (numthr >= thcount) {
1153 goto out;
1154 }
1155 thact = (thread_t)(void *)queue_next(&thact->task_threads);
1156 }
1157
1158 out:
1159 task_unlock(task);
1160 return (int)(numthr * sizeof(uint64_t));
1161 }
1162
1163 int
fill_taskthreadschedinfo(task_t task,uint64_t thread_id,struct proc_threadschedinfo_internal * thread_sched_info)1164 fill_taskthreadschedinfo(task_t task, uint64_t thread_id, struct proc_threadschedinfo_internal *thread_sched_info)
1165 {
1166 int err = 0;
1167
1168 thread_t thread = current_thread();
1169
1170 /*
1171 * Looking up threads is pretty expensive and not realtime-safe
1172 * right now, requiring locking the task and iterating over all
1173 * threads. As long as that is the case, we officially only
1174 * support getting this info for the current thread.
1175 */
1176 if (task != current_task() || thread_id != thread->thread_id) {
1177 return -1;
1178 }
1179
1180 #if SCHED_HYGIENE_DEBUG
1181 absolutetime_to_nanoseconds(thread->machine.int_time_mt, &thread_sched_info->int_time_ns);
1182 #else
1183 (void)thread;
1184 thread_sched_info->int_time_ns = 0;
1185 #endif
1186
1187 return err;
1188 }
1189
1190 int
get_numthreads(task_t task)1191 get_numthreads(task_t task)
1192 {
1193 return task->thread_count;
1194 }
1195
1196 /*
1197 * Gather the various pieces of info about the designated task,
1198 * and collect it all into a single rusage_info.
1199 */
1200 int
fill_task_rusage(task_t task,rusage_info_current * ri)1201 fill_task_rusage(task_t task, rusage_info_current *ri)
1202 {
1203 struct task_power_info powerinfo;
1204
1205 assert(task != TASK_NULL);
1206 task_lock(task);
1207
1208 struct task_power_info_extra extra = { 0 };
1209 task_power_info_locked(task, &powerinfo, NULL, NULL, &extra);
1210 ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
1211 ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
1212 ri->ri_user_time = powerinfo.total_user;
1213 ri->ri_system_time = powerinfo.total_system;
1214 ri->ri_runnable_time = extra.runnable_time;
1215 ri->ri_cycles = extra.cycles;
1216 ri->ri_instructions = extra.instructions;
1217 ri->ri_pcycles = extra.pcycles;
1218 ri->ri_pinstructions = extra.pinstructions;
1219 ri->ri_user_ptime = extra.user_ptime;
1220 ri->ri_system_ptime = extra.system_ptime;
1221 ri->ri_energy_nj = extra.energy;
1222 ri->ri_penergy_nj = extra.penergy;
1223
1224 ri->ri_phys_footprint = get_task_phys_footprint(task);
1225 ledger_get_balance(task->ledger, task_ledgers.phys_mem,
1226 (ledger_amount_t *)&ri->ri_resident_size);
1227 ri->ri_wired_size = get_task_wired_mem(task);
1228
1229 ri->ri_pageins = counter_load(&task->pageins);
1230
1231 task_unlock(task);
1232 return 0;
1233 }
1234
1235 void
fill_task_billed_usage(task_t task __unused,rusage_info_current * ri)1236 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
1237 {
1238 bank_billed_balance_safe(task, &ri->ri_billed_system_time, &ri->ri_billed_energy);
1239 bank_serviced_balance_safe(task, &ri->ri_serviced_system_time, &ri->ri_serviced_energy);
1240 }
1241
1242 int
fill_task_io_rusage(task_t task,rusage_info_current * ri)1243 fill_task_io_rusage(task_t task, rusage_info_current *ri)
1244 {
1245 assert(task != TASK_NULL);
1246 task_lock(task);
1247
1248 if (task->task_io_stats) {
1249 ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
1250 ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1251 } else {
1252 /* I/O Stats unavailable */
1253 ri->ri_diskio_bytesread = 0;
1254 ri->ri_diskio_byteswritten = 0;
1255 }
1256 task_unlock(task);
1257 return 0;
1258 }
1259
1260 int
fill_task_qos_rusage(task_t task,rusage_info_current * ri)1261 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1262 {
1263 thread_t thread;
1264
1265 assert(task != TASK_NULL);
1266 task_lock(task);
1267
1268 /* Rollup QoS time of all the threads to task */
1269 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1270 if (thread->options & TH_OPT_IDLE_THREAD) {
1271 continue;
1272 }
1273
1274 thread_update_qos_cpu_time(thread);
1275 }
1276 ri->ri_cpu_time_qos_default = task->cpu_time_eqos_stats.cpu_time_qos_default;
1277 ri->ri_cpu_time_qos_maintenance = task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
1278 ri->ri_cpu_time_qos_background = task->cpu_time_eqos_stats.cpu_time_qos_background;
1279 ri->ri_cpu_time_qos_utility = task->cpu_time_eqos_stats.cpu_time_qos_utility;
1280 ri->ri_cpu_time_qos_legacy = task->cpu_time_eqos_stats.cpu_time_qos_legacy;
1281 ri->ri_cpu_time_qos_user_initiated = task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
1282 ri->ri_cpu_time_qos_user_interactive = task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
1283
1284 task_unlock(task);
1285 return 0;
1286 }
1287
1288 uint64_t
get_task_logical_writes(task_t task,bool external)1289 get_task_logical_writes(task_t task, bool external)
1290 {
1291 assert(task != TASK_NULL);
1292 struct ledger_entry_info lei;
1293 int entry = external ? task_ledgers.logical_writes_to_external :
1294 task_ledgers.logical_writes;
1295
1296 task_lock(task);
1297 ledger_get_entry_info(task->ledger, entry, &lei);
1298 task_unlock(task);
1299
1300 return lei.lei_balance;
1301 }
1302
1303 uint64_t
get_task_dispatchqueue_serialno_offset(task_t task)1304 get_task_dispatchqueue_serialno_offset(task_t task)
1305 {
1306 uint64_t dq_serialno_offset = 0;
1307 void *bsd_info = get_bsdtask_info(task);
1308
1309 if (bsd_info) {
1310 dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(bsd_info);
1311 }
1312
1313 return dq_serialno_offset;
1314 }
1315
1316 uint64_t
get_task_dispatchqueue_label_offset(task_t task)1317 get_task_dispatchqueue_label_offset(task_t task)
1318 {
1319 uint64_t dq_label_offset = 0;
1320 void *bsd_info = get_bsdtask_info(task);
1321
1322 if (bsd_info) {
1323 dq_label_offset = get_dispatchqueue_label_offset_from_proc(bsd_info);
1324 }
1325
1326 return dq_label_offset;
1327 }
1328
1329 uint64_t
get_task_uniqueid(task_t task)1330 get_task_uniqueid(task_t task)
1331 {
1332 void *bsd_info = get_bsdtask_info(task);
1333
1334 if (bsd_info) {
1335 return proc_uniqueid_task(bsd_info, task);
1336 } else {
1337 return UINT64_MAX;
1338 }
1339 }
1340
1341 int
get_task_version(task_t task)1342 get_task_version(task_t task)
1343 {
1344 void *bsd_info = get_bsdtask_info(task);
1345
1346 if (bsd_info) {
1347 return proc_pidversion(bsd_info);
1348 } else {
1349 return INT_MAX;
1350 }
1351 }
1352
1353 #if CONFIG_MACF
1354 struct label *
get_task_crash_label(task_t task)1355 get_task_crash_label(task_t task)
1356 {
1357 return task->crash_label;
1358 }
1359
1360 void
set_task_crash_label(task_t task,struct label * label)1361 set_task_crash_label(task_t task, struct label *label)
1362 {
1363 task->crash_label = label;
1364 }
1365 #endif
1366
1367 int
fill_taskipctableinfo(task_t task,uint32_t * table_size,uint32_t * table_free)1368 fill_taskipctableinfo(task_t task, uint32_t *table_size, uint32_t *table_free)
1369 {
1370 ipc_space_t space = task->itk_space;
1371 if (space == NULL) {
1372 return -1;
1373 }
1374
1375 is_read_lock(space);
1376 if (!is_active(space)) {
1377 is_read_unlock(space);
1378 return -1;
1379 }
1380
1381 *table_size = ipc_entry_table_count(is_active_table(space));
1382 *table_free = space->is_table_free;
1383
1384 is_read_unlock(space);
1385
1386 return 0;
1387 }
1388
1389 int
get_task_cdhash(task_t task,char cdhash[static CS_CDHASH_LEN])1390 get_task_cdhash(task_t task, char cdhash[static CS_CDHASH_LEN])
1391 {
1392 int result = 0;
1393 void *bsd_info = NULL;
1394
1395 task_lock(task);
1396 bsd_info = get_bsdtask_info(task);
1397 result = bsd_info ? proc_getcdhash(bsd_info, cdhash) : ESRCH;
1398 task_unlock(task);
1399
1400 return result;
1401 }
1402
1403 /* moved from ubc_subr.c */
1404 int
mach_to_bsd_errno(kern_return_t mach_err)1405 mach_to_bsd_errno(kern_return_t mach_err)
1406 {
1407 switch (mach_err) {
1408 case KERN_SUCCESS:
1409 return 0;
1410
1411 case KERN_INVALID_ADDRESS:
1412 case KERN_INVALID_ARGUMENT:
1413 case KERN_NOT_IN_SET:
1414 case KERN_INVALID_NAME:
1415 case KERN_INVALID_TASK:
1416 case KERN_INVALID_RIGHT:
1417 case KERN_INVALID_VALUE:
1418 case KERN_INVALID_CAPABILITY:
1419 case KERN_INVALID_HOST:
1420 case KERN_MEMORY_PRESENT:
1421 case KERN_INVALID_PROCESSOR_SET:
1422 case KERN_INVALID_POLICY:
1423 case KERN_ALREADY_WAITING:
1424 case KERN_DEFAULT_SET:
1425 case KERN_EXCEPTION_PROTECTED:
1426 case KERN_INVALID_LEDGER:
1427 case KERN_INVALID_MEMORY_CONTROL:
1428 case KERN_INVALID_SECURITY:
1429 case KERN_NOT_DEPRESSED:
1430 case KERN_LOCK_OWNED:
1431 case KERN_LOCK_OWNED_SELF:
1432 return EINVAL;
1433
1434 case KERN_NOT_RECEIVER:
1435 case KERN_NO_ACCESS:
1436 case KERN_POLICY_STATIC:
1437 return EACCES;
1438
1439 case KERN_NO_SPACE:
1440 case KERN_RESOURCE_SHORTAGE:
1441 case KERN_UREFS_OVERFLOW:
1442 case KERN_INVALID_OBJECT:
1443 return ENOMEM;
1444
1445 case KERN_MEMORY_FAILURE:
1446 case KERN_MEMORY_ERROR:
1447 case KERN_PROTECTION_FAILURE:
1448 return EFAULT;
1449
1450 case KERN_POLICY_LIMIT:
1451 case KERN_CODESIGN_ERROR:
1452 case KERN_DENIED:
1453 return EPERM;
1454
1455 case KERN_ALREADY_IN_SET:
1456 case KERN_NAME_EXISTS:
1457 case KERN_RIGHT_EXISTS:
1458 return EEXIST;
1459
1460 case KERN_ABORTED:
1461 return EINTR;
1462
1463 case KERN_TERMINATED:
1464 case KERN_LOCK_SET_DESTROYED:
1465 case KERN_LOCK_UNSTABLE:
1466 case KERN_SEMAPHORE_DESTROYED:
1467 case KERN_NOT_FOUND:
1468 case KERN_NOT_WAITING:
1469 return ENOENT;
1470
1471 case KERN_RPC_SERVER_TERMINATED:
1472 return ECONNRESET;
1473
1474 case KERN_NOT_SUPPORTED:
1475 return ENOTSUP;
1476
1477 case KERN_NODE_DOWN:
1478 return ENETDOWN;
1479
1480 case KERN_OPERATION_TIMED_OUT:
1481 return ETIMEDOUT;
1482
1483 default:
1484 return EIO; /* 5 == KERN_FAILURE */
1485 }
1486 }
1487
1488 kern_return_t
bsd_to_mach_failure(int bsd_err)1489 bsd_to_mach_failure(int bsd_err)
1490 {
1491 switch (bsd_err) {
1492 case EIO:
1493 case EACCES:
1494 case ENOMEM:
1495 case EFAULT:
1496 return KERN_MEMORY_ERROR;
1497
1498 case EINVAL:
1499 return KERN_INVALID_ARGUMENT;
1500
1501 case ETIMEDOUT:
1502 case EBUSY:
1503 return KERN_OPERATION_TIMED_OUT;
1504
1505 case ECONNRESET:
1506 return KERN_RPC_SERVER_TERMINATED;
1507
1508 case ENOTSUP:
1509 return KERN_NOT_SUPPORTED;
1510
1511 case ENETDOWN:
1512 return KERN_NODE_DOWN;
1513
1514 case ENOENT:
1515 return KERN_NOT_FOUND;
1516
1517 case EINTR:
1518 return KERN_ABORTED;
1519
1520 case EPERM:
1521 return KERN_DENIED;
1522
1523 case EEXIST:
1524 return KERN_ALREADY_IN_SET;
1525
1526 default:
1527 return KERN_FAILURE;
1528 }
1529 }
1530