xref: /xnu-12377.61.12/osfmk/kern/bsd_kern.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/mach_types.h>
30 #include <mach/machine/vm_param.h>
31 #include <mach/task.h>
32 
33 #include <kern/kern_types.h>
34 #include <kern/ledger.h>
35 #include <kern/processor.h>
36 #include <kern/thread.h>
37 #include <kern/task.h>
38 #include <kern/spl.h>
39 #include <kern/ast.h>
40 #include <kern/monotonic.h>
41 #include <machine/monotonic.h>
42 #include <ipc/ipc_port.h>
43 #include <ipc/ipc_space.h>
44 #include <vm/vm_map_xnu.h>
45 #include <vm/vm_kern.h>
46 #include <vm/pmap.h>
47 #include <vm/vm_protos.h> /* last */
48 #include <sys/resource.h>
49 #include <sys/signal.h>
50 #include <sys/errno.h>
51 #include <sys/proc_require.h>
52 
53 #include <machine/limits.h>
54 #include <sys/codesign.h> /* CS_CDHASH_LEN */
55 
56 #undef thread_should_halt
57 
58 /* BSD KERN COMPONENT INTERFACE */
59 
60 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
61 
62 thread_t get_firstthread(task_t);
63 int get_task_userstop(task_t);
64 int get_thread_userstop(thread_t);
65 boolean_t current_thread_aborted(void);
66 void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *);
67 kern_return_t get_signalact(task_t, thread_t *, int);
68 int fill_task_rusage(task_t task, rusage_info_current *ri);
69 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
70 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
71 uint64_t get_task_logical_writes(task_t task, bool external);
72 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
73 void task_bsdtask_kill(task_t);
74 
75 extern uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p);
76 extern uint64_t get_dispatchqueue_label_offset_from_proc(void *p);
77 extern uint64_t proc_uniqueid_task(void *p, void *t);
78 extern int proc_pidversion(void *p);
79 extern int proc_getcdhash(void *p, char *cdhash);
80 
81 int mach_to_bsd_errno(kern_return_t mach_err);
82 kern_return_t kern_return_for_errno(int bsd_errno);
83 
84 #if MACH_BSD
85 extern void psignal(void *, int);
86 #endif
87 
88 /*
89  *
90  */
91 void  *
get_bsdtask_info(task_t t)92 get_bsdtask_info(task_t t)
93 {
94 	void *proc_from_task = task_get_proc_raw(t);
95 	proc_require(proc_from_task, PROC_REQUIRE_ALLOW_NULL | PROC_REQUIRE_ALLOW_ALL);
96 	return task_has_proc(t) ? proc_from_task : NULL;
97 }
98 
99 void
task_bsdtask_kill(task_t t)100 task_bsdtask_kill(task_t t)
101 {
102 	void * bsd_info = get_bsdtask_info(t);
103 	if (bsd_info != NULL) {
104 		psignal(bsd_info, SIGKILL);
105 	}
106 }
107 /*
108  *
109  */
110 void *
get_bsdthreadtask_info(thread_t th)111 get_bsdthreadtask_info(thread_t th)
112 {
113 	return get_thread_ro(th)->tro_proc;
114 }
115 
116 /*
117  *
118  */
119 void
set_bsdtask_info(task_t t,void * v)120 set_bsdtask_info(task_t t, void * v)
121 {
122 	void *proc_from_task = task_get_proc_raw(t);
123 	if (v == NULL) {
124 		task_clear_has_proc(t);
125 	} else {
126 		if (v != proc_from_task) {
127 			panic("set_bsdtask_info trying to set random bsd_info %p", v);
128 		}
129 		task_set_has_proc(t);
130 	}
131 }
132 
133 __abortlike
134 static void
__thread_ro_circularity_panic(thread_t th,thread_ro_t tro)135 __thread_ro_circularity_panic(thread_t th, thread_ro_t tro)
136 {
137 	panic("tro %p points back to %p instead of %p", tro, tro->tro_owner, th);
138 }
139 
140 __attribute__((always_inline))
141 thread_ro_t
get_thread_ro_unchecked(thread_t th)142 get_thread_ro_unchecked(thread_t th)
143 {
144 	return th->t_tro;
145 }
146 
147 thread_ro_t
get_thread_ro(thread_t th)148 get_thread_ro(thread_t th)
149 {
150 	thread_ro_t tro = th->t_tro;
151 
152 	zone_require_ro(ZONE_ID_THREAD_RO, sizeof(struct thread_ro), tro);
153 	if (tro->tro_owner != th) {
154 		__thread_ro_circularity_panic(th, tro);
155 	}
156 	return tro;
157 }
158 
159 __attribute__((always_inline))
160 thread_ro_t
current_thread_ro_unchecked(void)161 current_thread_ro_unchecked(void)
162 {
163 	return get_thread_ro_unchecked(current_thread());
164 }
165 
166 thread_ro_t
current_thread_ro(void)167 current_thread_ro(void)
168 {
169 	return get_thread_ro(current_thread());
170 }
171 
172 void
clear_thread_ro_proc(thread_t th)173 clear_thread_ro_proc(thread_t th)
174 {
175 	thread_ro_t tro = get_thread_ro(th);
176 
177 	zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_proc);
178 }
179 
180 struct uthread *
get_bsdthread_info(thread_t th)181 get_bsdthread_info(thread_t th)
182 {
183 	return (struct uthread *)((uintptr_t)th + sizeof(struct thread));
184 }
185 
186 thread_t
get_machthread(struct uthread * uth)187 get_machthread(struct uthread *uth)
188 {
189 	return (struct thread *)((uintptr_t)uth - sizeof(struct thread));
190 }
191 
192 /*
193  * This is used to remember any FS error from VNOP_PAGEIN code when
194  * invoked under vm_fault(). The value is an errno style value. It can
195  * be retrieved by exception handlers using thread_get_state().
196  */
197 void
set_thread_pagein_error(thread_t th,int error)198 set_thread_pagein_error(thread_t th, int error)
199 {
200 	assert(th == current_thread());
201 	if (error == 0 || th->t_pagein_error == 0) {
202 		th->t_pagein_error = error;
203 	}
204 }
205 
206 #if defined(__x86_64__)
207 /*
208  * Returns non-zero if the thread has a non-NULL task
209  * and that task has an LDT.
210  */
211 int
thread_task_has_ldt(thread_t th)212 thread_task_has_ldt(thread_t th)
213 {
214 	task_t task = get_threadtask(th);
215 	return task && task->i386_ldt != 0;
216 }
217 #endif /* __x86_64__ */
218 
219 /*
220  * XXX
221  */
222 int get_thread_lock_count(thread_t th);         /* forced forward */
223 int
get_thread_lock_count(thread_t th __unused)224 get_thread_lock_count(thread_t th __unused)
225 {
226 	/*
227 	 * TODO: one day: resurect counting locks held to disallow
228 	 *       holding locks across upcalls.
229 	 *
230 	 *       never worked on arm.
231 	 */
232 	return 0;
233 }
234 
235 /*
236  * Returns a thread reference.
237  */
238 thread_t
get_firstthread(task_t task)239 get_firstthread(task_t task)
240 {
241 	thread_t thread = THREAD_NULL;
242 	task_lock(task);
243 
244 	if (!task->active) {
245 		task_unlock(task);
246 		return THREAD_NULL;
247 	}
248 
249 	thread = (thread_t)(void *)queue_first(&task->threads);
250 
251 	if (queue_end(&task->threads, (queue_entry_t)thread)) {
252 		task_unlock(task);
253 		return THREAD_NULL;
254 	}
255 
256 	thread_reference(thread);
257 	task_unlock(task);
258 	return thread;
259 }
260 
261 kern_return_t
get_signalact(task_t task,thread_t * result_out,int setast)262 get_signalact(
263 	task_t          task,
264 	thread_t        *result_out,
265 	int                     setast)
266 {
267 	kern_return_t   result = KERN_SUCCESS;
268 	thread_t                inc, thread = THREAD_NULL;
269 
270 	task_lock(task);
271 
272 	if (!task->active) {
273 		task_unlock(task);
274 
275 		return KERN_FAILURE;
276 	}
277 
278 	for (inc  = (thread_t)(void *)queue_first(&task->threads);
279 	    !queue_end(&task->threads, (queue_entry_t)inc);) {
280 		thread_mtx_lock(inc);
281 		if (inc->active &&
282 		    (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
283 			thread = inc;
284 			break;
285 		}
286 		thread_mtx_unlock(inc);
287 
288 		inc = (thread_t)(void *)queue_next(&inc->task_threads);
289 	}
290 
291 	if (result_out) {
292 		*result_out = thread;
293 	}
294 
295 	if (thread) {
296 		if (setast) {
297 			act_set_astbsd(thread);
298 		}
299 
300 		thread_mtx_unlock(thread);
301 	} else {
302 		result = KERN_FAILURE;
303 	}
304 
305 	task_unlock(task);
306 
307 	return result;
308 }
309 
310 
311 kern_return_t
check_actforsig(task_t task,thread_t thread,int setast)312 check_actforsig(
313 	task_t                  task,
314 	thread_t                thread,
315 	int                             setast)
316 {
317 	kern_return_t   result = KERN_FAILURE;
318 	thread_t                inc;
319 
320 	task_lock(task);
321 
322 	if (!task->active) {
323 		task_unlock(task);
324 
325 		return KERN_FAILURE;
326 	}
327 
328 	for (inc  = (thread_t)(void *)queue_first(&task->threads);
329 	    !queue_end(&task->threads, (queue_entry_t)inc);) {
330 		if (inc == thread) {
331 			thread_mtx_lock(inc);
332 
333 			if (inc->active &&
334 			    (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
335 				result = KERN_SUCCESS;
336 				break;
337 			}
338 
339 			thread_mtx_unlock(inc);
340 			break;
341 		}
342 
343 		inc = (thread_t)(void *)queue_next(&inc->task_threads);
344 	}
345 
346 	if (result == KERN_SUCCESS) {
347 		if (setast) {
348 			act_set_astbsd(thread);
349 		}
350 
351 		thread_mtx_unlock(thread);
352 	}
353 
354 	task_unlock(task);
355 
356 	return result;
357 }
358 
359 ledger_t
get_task_ledger(task_t t)360 get_task_ledger(task_t t)
361 {
362 	return t->ledger;
363 }
364 
365 /*
366  * This is only safe to call from a thread executing in
367  * in the task's context or if the task is locked. Otherwise,
368  * the map could be switched for the task (and freed) before
369  * we go to return it here.
370  */
371 vm_map_t
get_task_map(task_t t)372 get_task_map(task_t t)
373 {
374 	return t->map;
375 }
376 
377 vm_map_t
get_task_map_reference(task_t t)378 get_task_map_reference(task_t t)
379 {
380 	vm_map_t m;
381 
382 	if (t == NULL) {
383 		return VM_MAP_NULL;
384 	}
385 
386 	task_lock(t);
387 	if (!t->active) {
388 		task_unlock(t);
389 		return VM_MAP_NULL;
390 	}
391 	m = t->map;
392 	vm_map_reference(m);
393 	task_unlock(t);
394 	return m;
395 }
396 
397 /*
398  *
399  */
400 ipc_space_t
get_task_ipcspace(task_t t)401 get_task_ipcspace(task_t t)
402 {
403 	return t->itk_space;
404 }
405 
406 int
get_task_numacts(task_t t)407 get_task_numacts(task_t t)
408 {
409 	return t->thread_count;
410 }
411 
412 /* does this machine need  64bit register set for signal handler */
413 int
is_64signalregset(void)414 is_64signalregset(void)
415 {
416 	if (task_has_64Bit_data(current_task())) {
417 		return 1;
418 	}
419 
420 	return 0;
421 }
422 
423 /*
424  * Swap in a new map for the task/thread pair; the old map reference is
425  * returned. Also does a pmap switch if thread provided is current thread.
426  */
427 vm_map_t
swap_task_map(task_t task,thread_t thread,vm_map_t map)428 swap_task_map(task_t task, thread_t thread, vm_map_t map)
429 {
430 	vm_map_t old_map;
431 	boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
432 
433 	if (task != get_threadtask(thread)) {
434 		panic("swap_task_map");
435 	}
436 
437 	task_lock(task);
438 	mp_disable_preemption();
439 
440 	/* verify that the map has been activated if the task is enabled for IPC access and is not a corpse */
441 	assert(!task->ipc_active || task_is_a_corpse(task) || (map->owning_task == task));
442 
443 	old_map = task->map;
444 	thread->map = task->map = map;
445 	vm_commit_pagezero_status(map);
446 
447 	if (doswitch) {
448 		PMAP_SWITCH_USER(thread, map, cpu_number());
449 	}
450 	mp_enable_preemption();
451 	task_unlock(task);
452 
453 	return old_map;
454 }
455 
456 /*
457  *
458  * This is only safe to call from a thread executing in
459  * in the task's context or if the task is locked. Otherwise,
460  * the map could be switched for the task (and freed) before
461  * we go to return it here.
462  */
463 pmap_t
get_task_pmap(task_t t)464 get_task_pmap(task_t t)
465 {
466 	return t->map->pmap;
467 }
468 
469 /*
470  *
471  */
472 uint64_t
get_task_resident_size(task_t task)473 get_task_resident_size(task_t task)
474 {
475 	uint64_t val;
476 
477 	ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
478 	return val;
479 }
480 
481 uint64_t
get_task_resident_max(task_t task)482 get_task_resident_max(task_t task)
483 {
484 	uint64_t val;
485 
486 	ledger_get_lifetime_max(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &val);
487 	return val;
488 }
489 
490 /*
491  * Get the balance for a given field in the task ledger.
492  * Returns 0 if the entry is invalid.
493  */
494 static uint64_t
get_task_ledger_balance(task_t task,int entry)495 get_task_ledger_balance(task_t task, int entry)
496 {
497 	ledger_amount_t balance = 0;
498 
499 	ledger_get_balance(task->ledger, entry, &balance);
500 	return balance;
501 }
502 
503 uint64_t
get_task_compressed(task_t task)504 get_task_compressed(task_t task)
505 {
506 	uint64_t total = 0;
507 
508 	total += get_task_ledger_balance(task, task_ledgers.internal_compressed);
509 	total += get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile_compressed);
510 	/* Alt. Acct. is doubled counted between the purgeable and internal ledgers */
511 	total -= get_task_ledger_balance(task, task_ledgers.alternate_accounting_compressed);
512 	total += get_task_ledger_balance(task, task_ledgers.network_nonvolatile_compressed);
513 	total += get_task_ledger_balance(task, task_ledgers.tagged_footprint_compressed);
514 	total += get_task_ledger_balance(task, task_ledgers.media_footprint_compressed);
515 	total += get_task_ledger_balance(task, task_ledgers.graphics_footprint_compressed);
516 	total += get_task_ledger_balance(task, task_ledgers.neural_footprint_compressed);
517 #if !CONFIG_JETSAM
518 	total += get_task_ledger_balance(task, task_ledgers.tagged_nofootprint_compressed);
519 	total += get_task_ledger_balance(task, task_ledgers.media_nofootprint_compressed);
520 	total += get_task_ledger_balance(task, task_ledgers.graphics_nofootprint_compressed);
521 	total += get_task_ledger_balance(task, task_ledgers.neural_nofootprint_compressed);
522 #endif
523 	return total;
524 }
525 
526 
527 uint64_t
get_task_purgeable_size(task_t task)528 get_task_purgeable_size(task_t task)
529 {
530 	kern_return_t ret;
531 	ledger_amount_t balance = 0;
532 	uint64_t volatile_size = 0;
533 
534 	ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile, &balance);
535 	if (ret != KERN_SUCCESS) {
536 		return 0;
537 	}
538 
539 	volatile_size += balance;
540 
541 	ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile_compressed, &balance);
542 	if (ret != KERN_SUCCESS) {
543 		return 0;
544 	}
545 
546 	volatile_size += balance;
547 
548 	return volatile_size;
549 }
550 
551 /*
552  *
553  */
554 uint64_t
get_task_phys_footprint(task_t task)555 get_task_phys_footprint(task_t task)
556 {
557 	return get_task_ledger_balance(task, task_ledgers.phys_footprint);
558 }
559 
560 #if CONFIG_LEDGER_INTERVAL_MAX
561 /*
562  *
563  */
564 uint64_t
get_task_phys_footprint_interval_max(task_t task,int reset)565 get_task_phys_footprint_interval_max(task_t task, int reset)
566 {
567 	kern_return_t ret;
568 	ledger_amount_t max;
569 
570 	ret = ledger_get_interval_max(task->ledger, task_ledgers.phys_footprint, &max, reset);
571 
572 	if (KERN_SUCCESS == ret) {
573 		return max;
574 	}
575 
576 	return 0;
577 }
578 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
579 
580 /*
581  *
582  */
583 uint64_t
get_task_phys_footprint_lifetime_max(task_t task)584 get_task_phys_footprint_lifetime_max(task_t task)
585 {
586 	kern_return_t ret;
587 	ledger_amount_t max;
588 
589 	ret = ledger_get_lifetime_max(task->ledger, task_ledgers.phys_footprint, &max);
590 
591 	if (KERN_SUCCESS == ret) {
592 		return max;
593 	}
594 
595 	return 0;
596 }
597 
598 /*
599  *
600  */
601 uint64_t
get_task_phys_footprint_limit(task_t task)602 get_task_phys_footprint_limit(task_t task)
603 {
604 	kern_return_t ret;
605 	ledger_amount_t max;
606 
607 	ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max);
608 	if (KERN_SUCCESS == ret) {
609 		return max;
610 	}
611 
612 	return 0;
613 }
614 
615 uint64_t
get_task_internal(task_t task)616 get_task_internal(task_t task)
617 {
618 	return get_task_ledger_balance(task, task_ledgers.internal);
619 }
620 
621 uint64_t
get_task_internal_compressed(task_t task)622 get_task_internal_compressed(task_t task)
623 {
624 	return get_task_ledger_balance(task, task_ledgers.internal_compressed);
625 }
626 
627 uint64_t
get_task_purgeable_nonvolatile(task_t task)628 get_task_purgeable_nonvolatile(task_t task)
629 {
630 	return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile);
631 }
632 
633 uint64_t
get_task_purgeable_nonvolatile_compressed(task_t task)634 get_task_purgeable_nonvolatile_compressed(task_t task)
635 {
636 	return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile_compressed);
637 }
638 
639 uint64_t
get_task_alternate_accounting(task_t task)640 get_task_alternate_accounting(task_t task)
641 {
642 	return get_task_ledger_balance(task, task_ledgers.alternate_accounting);
643 }
644 
645 uint64_t
get_task_alternate_accounting_compressed(task_t task)646 get_task_alternate_accounting_compressed(task_t task)
647 {
648 	return get_task_ledger_balance(task, task_ledgers.alternate_accounting_compressed);
649 }
650 
651 uint64_t
get_task_page_table(task_t task)652 get_task_page_table(task_t task)
653 {
654 	return get_task_ledger_balance(task, task_ledgers.page_table);
655 }
656 
657 #if CONFIG_FREEZE
658 uint64_t
get_task_frozen_to_swap(task_t task)659 get_task_frozen_to_swap(task_t task)
660 {
661 	return get_task_ledger_balance(task, task_ledgers.frozen_to_swap);
662 }
663 #endif /* CONFIG_FREEZE */
664 
665 uint64_t
get_task_iokit_mapped(task_t task)666 get_task_iokit_mapped(task_t task)
667 {
668 	return get_task_ledger_balance(task, task_ledgers.iokit_mapped);
669 }
670 
671 uint64_t
get_task_network_nonvolatile(task_t task)672 get_task_network_nonvolatile(task_t task)
673 {
674 	return get_task_ledger_balance(task, task_ledgers.network_nonvolatile);
675 }
676 
677 uint64_t
get_task_network_nonvolatile_compressed(task_t task)678 get_task_network_nonvolatile_compressed(task_t task)
679 {
680 	return get_task_ledger_balance(task, task_ledgers.network_nonvolatile_compressed);
681 }
682 
683 uint64_t
get_task_wired_mem(task_t task)684 get_task_wired_mem(task_t task)
685 {
686 	return get_task_ledger_balance(task, task_ledgers.wired_mem);
687 }
688 
689 uint64_t
get_task_tagged_footprint(task_t task)690 get_task_tagged_footprint(task_t task)
691 {
692 	kern_return_t ret;
693 	ledger_amount_t credit, debit;
694 
695 	ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint, &credit, &debit);
696 	if (KERN_SUCCESS == ret) {
697 		return credit - debit;
698 	}
699 
700 	return 0;
701 }
702 
703 uint64_t
get_task_tagged_footprint_compressed(task_t task)704 get_task_tagged_footprint_compressed(task_t task)
705 {
706 	kern_return_t ret;
707 	ledger_amount_t credit, debit;
708 
709 	ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint_compressed, &credit, &debit);
710 	if (KERN_SUCCESS == ret) {
711 		return credit - debit;
712 	}
713 
714 	return 0;
715 }
716 
717 uint64_t
get_task_media_footprint(task_t task)718 get_task_media_footprint(task_t task)
719 {
720 	kern_return_t ret;
721 	ledger_amount_t credit, debit;
722 
723 	ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint, &credit, &debit);
724 	if (KERN_SUCCESS == ret) {
725 		return credit - debit;
726 	}
727 
728 	return 0;
729 }
730 
731 uint64_t
get_task_media_footprint_compressed(task_t task)732 get_task_media_footprint_compressed(task_t task)
733 {
734 	kern_return_t ret;
735 	ledger_amount_t credit, debit;
736 
737 	ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint_compressed, &credit, &debit);
738 	if (KERN_SUCCESS == ret) {
739 		return credit - debit;
740 	}
741 
742 	return 0;
743 }
744 
745 uint64_t
get_task_graphics_footprint(task_t task)746 get_task_graphics_footprint(task_t task)
747 {
748 	kern_return_t ret;
749 	ledger_amount_t credit, debit;
750 
751 	ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint, &credit, &debit);
752 	if (KERN_SUCCESS == ret) {
753 		return credit - debit;
754 	}
755 
756 	return 0;
757 }
758 
759 
760 uint64_t
get_task_graphics_footprint_compressed(task_t task)761 get_task_graphics_footprint_compressed(task_t task)
762 {
763 	kern_return_t ret;
764 	ledger_amount_t credit, debit;
765 
766 	ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint_compressed, &credit, &debit);
767 	if (KERN_SUCCESS == ret) {
768 		return credit - debit;
769 	}
770 
771 	return 0;
772 }
773 
774 uint64_t
get_task_neural_footprint(task_t task)775 get_task_neural_footprint(task_t task)
776 {
777 	kern_return_t ret;
778 	ledger_amount_t credit, debit;
779 
780 	ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint, &credit, &debit);
781 	if (KERN_SUCCESS == ret) {
782 		return credit - debit;
783 	}
784 
785 	return 0;
786 }
787 
788 uint64_t
get_task_neural_footprint_compressed(task_t task)789 get_task_neural_footprint_compressed(task_t task)
790 {
791 	kern_return_t ret;
792 	ledger_amount_t credit, debit;
793 
794 	ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint_compressed, &credit, &debit);
795 	if (KERN_SUCCESS == ret) {
796 		return credit - debit;
797 	}
798 
799 	return 0;
800 }
801 
802 uint64_t
get_task_neural_nofootprint_total(task_t task)803 get_task_neural_nofootprint_total(task_t task)
804 {
805 	kern_return_t ret;
806 	ledger_amount_t credit, debit;
807 
808 	ret = ledger_get_entries(task->ledger, task_ledgers.neural_nofootprint_total, &credit, &debit);
809 	if (KERN_SUCCESS == ret) {
810 		return credit - debit;
811 	}
812 
813 	return 0;
814 }
815 
816 #if CONFIG_LEDGER_INTERVAL_MAX
817 uint64_t
get_task_neural_nofootprint_total_interval_max(task_t task,int reset)818 get_task_neural_nofootprint_total_interval_max(task_t task, int reset)
819 {
820 	kern_return_t ret;
821 	ledger_amount_t max;
822 
823 	ret = ledger_get_interval_max(task->ledger, task_ledgers.neural_nofootprint_total, &max, reset);
824 
825 	if (KERN_SUCCESS == ret) {
826 		return max;
827 	}
828 
829 	return 0;
830 }
831 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
832 
833 uint64_t
get_task_neural_nofootprint_total_lifetime_max(task_t task)834 get_task_neural_nofootprint_total_lifetime_max(task_t task)
835 {
836 	kern_return_t ret;
837 	ledger_amount_t max;
838 
839 	ret = ledger_get_lifetime_max(task->ledger, task_ledgers.neural_nofootprint_total, &max);
840 
841 	if (KERN_SUCCESS == ret) {
842 		return max;
843 	}
844 
845 	return 0;
846 }
847 
848 uint64_t
get_task_cpu_time(task_t task)849 get_task_cpu_time(task_t task)
850 {
851 	return get_task_ledger_balance(task, task_ledgers.cpu_time);
852 }
853 
854 uint32_t
get_task_loadTag(task_t task)855 get_task_loadTag(task_t task)
856 {
857 	return os_atomic_load(&task->loadTag, relaxed);
858 }
859 
860 uint32_t
set_task_loadTag(task_t task,uint32_t loadTag)861 set_task_loadTag(task_t task, uint32_t loadTag)
862 {
863 	return os_atomic_xchg(&task->loadTag, loadTag, relaxed);
864 }
865 
866 
867 task_t
get_threadtask(thread_t th)868 get_threadtask(thread_t th)
869 {
870 	return get_thread_ro(th)->tro_task;
871 }
872 
873 task_t
get_threadtask_early(thread_t th)874 get_threadtask_early(thread_t th)
875 {
876 	if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
877 		if (th == THREAD_NULL || th->t_tro == NULL) {
878 			return TASK_NULL;
879 		}
880 	}
881 	return get_threadtask(th);
882 }
883 
884 /*
885  *
886  */
887 vm_map_offset_t
get_map_min(vm_map_t map)888 get_map_min(
889 	vm_map_t        map)
890 {
891 	return vm_map_min(map);
892 }
893 
894 /*
895  *
896  */
897 vm_map_offset_t
get_map_max(vm_map_t map)898 get_map_max(
899 	vm_map_t        map)
900 {
901 	return vm_map_max(map);
902 }
903 vm_map_size_t
get_vmmap_size(vm_map_t map)904 get_vmmap_size(
905 	vm_map_t        map)
906 {
907 	return vm_map_adjusted_size(map);
908 }
909 int
get_task_page_size(task_t task)910 get_task_page_size(
911 	task_t task)
912 {
913 	return vm_map_page_size(task->map);
914 }
915 
916 #if CONFIG_COREDUMP
917 
918 static int
get_vmsubmap_entries(vm_map_t map,vm_object_offset_t start,vm_object_offset_t end)919 get_vmsubmap_entries(
920 	vm_map_t        map,
921 	vm_object_offset_t      start,
922 	vm_object_offset_t      end)
923 {
924 	int     total_entries = 0;
925 	vm_map_entry_t  entry;
926 
927 	vmlp_api_start(GET_VMSUBMAP_ENTRIES);
928 
929 	if (not_in_kdp) {
930 		vm_map_lock_read(map);
931 	}
932 	vmlp_range_event(map, start, end - start);
933 	entry = vm_map_first_entry(map);
934 	while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
935 		entry = entry->vme_next;
936 	}
937 
938 	while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
939 		if (entry->is_sub_map) {
940 			total_entries +=
941 			    get_vmsubmap_entries(VME_SUBMAP(entry),
942 			    VME_OFFSET(entry),
943 			    (VME_OFFSET(entry) +
944 			    entry->vme_end -
945 			    entry->vme_start));
946 		} else {
947 			total_entries += 1;
948 		}
949 		entry = entry->vme_next;
950 	}
951 	if (not_in_kdp) {
952 		vm_map_unlock_read(map);
953 	}
954 	vmlp_api_end(GET_VMSUBMAP_ENTRIES, total_entries);
955 	return total_entries;
956 }
957 
958 int
get_vmmap_entries(vm_map_t map)959 get_vmmap_entries(
960 	vm_map_t        map)
961 {
962 	int     total_entries = 0;
963 	vm_map_entry_t  entry;
964 
965 	vmlp_api_start(GET_VMMAP_ENTRIES);
966 
967 	if (not_in_kdp) {
968 		vm_map_lock_read(map);
969 	}
970 	entry = vm_map_first_entry(map);
971 
972 	while (entry != vm_map_to_entry(map)) {
973 		vmlp_range_event_entry(map, entry);
974 		if (entry->is_sub_map) {
975 			total_entries +=
976 			    get_vmsubmap_entries(VME_SUBMAP(entry),
977 			    VME_OFFSET(entry),
978 			    (VME_OFFSET(entry) +
979 			    entry->vme_end -
980 			    entry->vme_start));
981 		} else {
982 			total_entries += 1;
983 		}
984 		entry = entry->vme_next;
985 	}
986 	if (not_in_kdp) {
987 		vm_map_unlock_read(map);
988 	}
989 	vmlp_api_end(GET_VMMAP_ENTRIES, total_entries);
990 	return total_entries;
991 }
992 #endif /* CONFIG_COREDUMP */
993 
994 int
get_task_userstop(task_t task)995 get_task_userstop(
996 	task_t task)
997 {
998 	return task->user_stop_count;
999 }
1000 
1001 int
get_thread_userstop(thread_t th)1002 get_thread_userstop(
1003 	thread_t th)
1004 {
1005 	return th->user_stop_count;
1006 }
1007 
1008 boolean_t
get_task_pidsuspended(task_t task)1009 get_task_pidsuspended(
1010 	task_t task)
1011 {
1012 	return task->pidsuspended;
1013 }
1014 
1015 boolean_t
get_task_frozen(task_t task)1016 get_task_frozen(
1017 	task_t task)
1018 {
1019 	return task->frozen;
1020 }
1021 
1022 boolean_t
thread_should_abort(thread_t th)1023 thread_should_abort(
1024 	thread_t th)
1025 {
1026 	return (th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT;
1027 }
1028 
1029 /*
1030  * This routine is like thread_should_abort() above.  It checks to
1031  * see if the current thread is aborted.  But unlike above, it also
1032  * checks to see if thread is safely aborted.  If so, it returns
1033  * that fact, and clears the condition (safe aborts only should
1034  * have a single effect, and a poll of the abort status
1035  * qualifies.
1036  */
1037 boolean_t
current_thread_aborted(void)1038 current_thread_aborted(
1039 	void)
1040 {
1041 	thread_t th = current_thread();
1042 	spl_t s;
1043 
1044 	if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
1045 	    (th->options & TH_OPT_INTMASK) != THREAD_UNINT) {
1046 		return TRUE;
1047 	}
1048 	if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
1049 		s = splsched();
1050 		thread_lock(th);
1051 		if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
1052 			th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1053 		}
1054 		thread_unlock(th);
1055 		splx(s);
1056 	}
1057 	return FALSE;
1058 }
1059 
1060 void
task_act_iterate_wth_args(task_t task,void (* func_callback)(thread_t,void *),void * func_arg)1061 task_act_iterate_wth_args(
1062 	task_t                  task,
1063 	void                    (*func_callback)(thread_t, void *),
1064 	void                    *func_arg)
1065 {
1066 	thread_t        inc;
1067 
1068 	task_lock(task);
1069 
1070 	for (inc  = (thread_t)(void *)queue_first(&task->threads);
1071 	    !queue_end(&task->threads, (queue_entry_t)inc);) {
1072 		(void) (*func_callback)(inc, func_arg);
1073 		inc = (thread_t)(void *)queue_next(&inc->task_threads);
1074 	}
1075 
1076 	task_unlock(task);
1077 }
1078 
1079 #include <sys/bsdtask_info.h>
1080 
1081 void
fill_taskprocinfo(task_t task,struct proc_taskinfo_internal * ptinfo)1082 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
1083 {
1084 	vm_map_t map;
1085 	task_absolutetime_info_data_t   tinfo;
1086 	thread_t thread;
1087 	uint32_t cswitch = 0, numrunning = 0;
1088 	uint32_t syscalls_unix = 0;
1089 	uint32_t syscalls_mach = 0;
1090 
1091 	task_lock(task);
1092 
1093 	map = (task == kernel_task)? kernel_map: task->map;
1094 
1095 	ptinfo->pti_virtual_size  = vm_map_adjusted_size(map);
1096 	ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *) &ptinfo->pti_resident_size);
1097 
1098 	ptinfo->pti_policy = ((task != kernel_task)?
1099 	    POLICY_TIMESHARE: POLICY_RR);
1100 
1101 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
1102 		spl_t x;
1103 
1104 		if (thread->options & TH_OPT_IDLE_THREAD) {
1105 			continue;
1106 		}
1107 
1108 		x = splsched();
1109 		thread_lock(thread);
1110 
1111 		if ((thread->state & TH_RUN) == TH_RUN) {
1112 			numrunning++;
1113 		}
1114 		cswitch += thread->c_switch;
1115 
1116 		syscalls_unix += thread->syscalls_unix;
1117 		syscalls_mach += thread->syscalls_mach;
1118 
1119 		thread_unlock(thread);
1120 		splx(x);
1121 	}
1122 
1123 	struct recount_times_mach term_times = recount_task_terminated_times(task);
1124 	struct recount_times_mach total_times = recount_task_times(task);
1125 
1126 	tinfo.threads_user = total_times.rtm_user - term_times.rtm_user;
1127 	tinfo.threads_system = total_times.rtm_system - term_times.rtm_system;
1128 	ptinfo->pti_threads_system = tinfo.threads_system;
1129 	ptinfo->pti_threads_user = tinfo.threads_user;
1130 
1131 	ptinfo->pti_total_system = total_times.rtm_system;
1132 	ptinfo->pti_total_user = total_times.rtm_user;
1133 
1134 	ptinfo->pti_faults = (int32_t) MIN(counter_load(&task->faults), INT32_MAX);
1135 	ptinfo->pti_pageins = (int32_t) MIN(counter_load(&task->pageins), INT32_MAX);
1136 	ptinfo->pti_cow_faults = (int32_t) MIN(counter_load(&task->cow_faults), INT32_MAX);
1137 	ptinfo->pti_messages_sent = (int32_t) MIN(counter_load(&task->messages_sent), INT32_MAX);
1138 	ptinfo->pti_messages_received = (int32_t) MIN(counter_load(&task->messages_received), INT32_MAX);
1139 	ptinfo->pti_syscalls_mach = (int32_t) MIN(task->syscalls_mach + syscalls_mach, INT32_MAX);
1140 	ptinfo->pti_syscalls_unix = (int32_t) MIN(task->syscalls_unix + syscalls_unix, INT32_MAX);
1141 	ptinfo->pti_csw = (int32_t) MIN(task->c_switch + cswitch, INT32_MAX);
1142 	ptinfo->pti_threadnum = task->thread_count;
1143 	ptinfo->pti_numrunning = numrunning;
1144 	ptinfo->pti_priority = task->priority;
1145 
1146 	task_unlock(task);
1147 }
1148 
1149 int
fill_taskthreadinfo(task_t task,uint64_t thaddr,bool thuniqueid,struct proc_threadinfo_internal * ptinfo,void * vpp,int * vidp)1150 fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
1151 {
1152 	thread_t  thact;
1153 	int err = 0;
1154 	mach_msg_type_number_t count;
1155 	thread_basic_info_data_t basic_info;
1156 	kern_return_t kret;
1157 	uint64_t addr = 0;
1158 
1159 	task_lock(task);
1160 
1161 	for (thact  = (thread_t)(void *)queue_first(&task->threads);
1162 	    !queue_end(&task->threads, (queue_entry_t)thact);) {
1163 		addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1164 		if (addr == thaddr) {
1165 			count = THREAD_BASIC_INFO_COUNT;
1166 			if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
1167 				err = 1;
1168 				goto out;
1169 			}
1170 			ptinfo->pth_user_time = (((uint64_t)basic_info.user_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.user_time.microseconds * NSEC_PER_USEC));
1171 			ptinfo->pth_system_time = (((uint64_t)basic_info.system_time.seconds * NSEC_PER_SEC) + ((uint64_t)basic_info.system_time.microseconds * NSEC_PER_USEC));
1172 
1173 			ptinfo->pth_cpu_usage = basic_info.cpu_usage;
1174 			ptinfo->pth_policy = basic_info.policy;
1175 			ptinfo->pth_run_state = basic_info.run_state;
1176 			ptinfo->pth_flags = basic_info.flags;
1177 			ptinfo->pth_sleep_time = basic_info.sleep_time;
1178 			ptinfo->pth_curpri = thact->sched_pri;
1179 			ptinfo->pth_priority = thact->base_pri;
1180 			ptinfo->pth_maxpriority = thact->max_priority;
1181 
1182 			if (vpp != NULL) {
1183 				bsd_threadcdir(get_bsdthread_info(thact), vpp, vidp);
1184 			}
1185 			bsd_getthreadname(get_bsdthread_info(thact), ptinfo->pth_name);
1186 			err = 0;
1187 			goto out;
1188 		}
1189 		thact = (thread_t)(void *)queue_next(&thact->task_threads);
1190 	}
1191 	err = 1;
1192 
1193 out:
1194 	task_unlock(task);
1195 	return err;
1196 }
1197 
1198 int
fill_taskthreadlist(task_t task,void * buffer,int thcount,bool thuniqueid)1199 fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid)
1200 {
1201 	int numthr = 0;
1202 	thread_t thact;
1203 	uint64_t * uptr;
1204 	uint64_t  thaddr;
1205 
1206 	uptr = (uint64_t *)buffer;
1207 
1208 	task_lock(task);
1209 
1210 	for (thact  = (thread_t)(void *)queue_first(&task->threads);
1211 	    !queue_end(&task->threads, (queue_entry_t)thact);) {
1212 		thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self;
1213 		*uptr++ = thaddr;
1214 		numthr++;
1215 		if (numthr >= thcount) {
1216 			goto out;
1217 		}
1218 		thact = (thread_t)(void *)queue_next(&thact->task_threads);
1219 	}
1220 
1221 out:
1222 	task_unlock(task);
1223 	return (int)(numthr * sizeof(uint64_t));
1224 }
1225 
1226 int
fill_taskthreadschedinfo(task_t task,uint64_t thread_id,struct proc_threadschedinfo_internal * thread_sched_info)1227 fill_taskthreadschedinfo(task_t task, uint64_t thread_id, struct proc_threadschedinfo_internal *thread_sched_info)
1228 {
1229 	int err = 0;
1230 
1231 	thread_t thread = current_thread();
1232 
1233 	/*
1234 	 * Looking up threads is pretty expensive and not realtime-safe
1235 	 * right now, requiring locking the task and iterating over all
1236 	 * threads. As long as that is the case, we officially only
1237 	 * support getting this info for the current thread.
1238 	 */
1239 	if (task != current_task() || thread_id != thread->thread_id) {
1240 		return -1;
1241 	}
1242 
1243 #if SCHED_HYGIENE_DEBUG
1244 	absolutetime_to_nanoseconds(thread->machine.int_time_mt, &thread_sched_info->int_time_ns);
1245 #else
1246 	(void)thread;
1247 	thread_sched_info->int_time_ns = 0;
1248 #endif
1249 
1250 	return err;
1251 }
1252 
1253 int
get_numthreads(task_t task)1254 get_numthreads(task_t task)
1255 {
1256 	return task->thread_count;
1257 }
1258 
1259 /*
1260  * Gather the various pieces of info about the designated task,
1261  * and collect it all into a single rusage_info.
1262  */
1263 int
fill_task_rusage(task_t task,rusage_info_current * ri)1264 fill_task_rusage(task_t task, rusage_info_current *ri)
1265 {
1266 	struct task_power_info powerinfo;
1267 
1268 	assert(task != TASK_NULL);
1269 	task_lock(task);
1270 
1271 	struct task_power_info_extra extra = { 0 };
1272 	task_power_info_locked(task, &powerinfo, NULL, NULL, &extra);
1273 	ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
1274 	ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
1275 	ri->ri_user_time = powerinfo.total_user;
1276 	ri->ri_system_time = powerinfo.total_system;
1277 	ri->ri_runnable_time = extra.runnable_time;
1278 	ri->ri_cycles = extra.cycles;
1279 	ri->ri_instructions = extra.instructions;
1280 	ri->ri_pcycles = extra.pcycles;
1281 	ri->ri_pinstructions = extra.pinstructions;
1282 	ri->ri_user_ptime = extra.user_ptime;
1283 	ri->ri_system_ptime = extra.system_ptime;
1284 	ri->ri_energy_nj = extra.energy;
1285 	ri->ri_penergy_nj = extra.penergy;
1286 	ri->ri_secure_time_in_system = extra.secure_time;
1287 	ri->ri_secure_ptime_in_system = extra.secure_ptime;
1288 
1289 	ri->ri_phys_footprint = get_task_phys_footprint(task);
1290 	ledger_get_balance(task->ledger, task_ledgers.phys_mem,
1291 	    (ledger_amount_t *)&ri->ri_resident_size);
1292 	ri->ri_wired_size = get_task_wired_mem(task);
1293 
1294 	ledger_get_balance(task->ledger, task_ledgers.neural_nofootprint_total,
1295 	    (ledger_amount_t *)&ri->ri_neural_footprint);
1296 	ri->ri_pageins = counter_load(&task->pageins);
1297 
1298 	task_unlock(task);
1299 	return 0;
1300 }
1301 
1302 void
fill_task_billed_usage(task_t task __unused,rusage_info_current * ri)1303 fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
1304 {
1305 	bank_billed_balance_safe(task, &ri->ri_billed_system_time, &ri->ri_billed_energy);
1306 	bank_serviced_balance_safe(task, &ri->ri_serviced_system_time, &ri->ri_serviced_energy);
1307 }
1308 
1309 int
fill_task_io_rusage(task_t task,rusage_info_current * ri)1310 fill_task_io_rusage(task_t task, rusage_info_current *ri)
1311 {
1312 	assert(task != TASK_NULL);
1313 	task_lock(task);
1314 
1315 	if (task->task_io_stats) {
1316 		ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
1317 		ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1318 	} else {
1319 		/* I/O Stats unavailable */
1320 		ri->ri_diskio_bytesread = 0;
1321 		ri->ri_diskio_byteswritten = 0;
1322 	}
1323 	task_unlock(task);
1324 	return 0;
1325 }
1326 
1327 int
fill_task_qos_rusage(task_t task,rusage_info_current * ri)1328 fill_task_qos_rusage(task_t task, rusage_info_current *ri)
1329 {
1330 	thread_t thread;
1331 
1332 	assert(task != TASK_NULL);
1333 	task_lock(task);
1334 
1335 	/* Rollup QoS time of all the threads to task */
1336 	queue_iterate(&task->threads, thread, thread_t, task_threads) {
1337 		if (thread->options & TH_OPT_IDLE_THREAD) {
1338 			continue;
1339 		}
1340 
1341 		thread_update_qos_cpu_time(thread);
1342 	}
1343 	ri->ri_cpu_time_qos_default = task->cpu_time_eqos_stats.cpu_time_qos_default;
1344 	ri->ri_cpu_time_qos_maintenance = task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
1345 	ri->ri_cpu_time_qos_background = task->cpu_time_eqos_stats.cpu_time_qos_background;
1346 	ri->ri_cpu_time_qos_utility = task->cpu_time_eqos_stats.cpu_time_qos_utility;
1347 	ri->ri_cpu_time_qos_legacy = task->cpu_time_eqos_stats.cpu_time_qos_legacy;
1348 	ri->ri_cpu_time_qos_user_initiated = task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
1349 	ri->ri_cpu_time_qos_user_interactive = task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
1350 
1351 	task_unlock(task);
1352 	return 0;
1353 }
1354 
1355 uint64_t
get_task_logical_writes(task_t task,bool external)1356 get_task_logical_writes(task_t task, bool external)
1357 {
1358 	assert(task != TASK_NULL);
1359 	struct ledger_entry_info lei;
1360 	int entry = external ? task_ledgers.logical_writes_to_external :
1361 	    task_ledgers.logical_writes;
1362 
1363 	task_lock(task);
1364 	ledger_get_entry_info(task->ledger, entry, &lei);
1365 	task_unlock(task);
1366 
1367 	return lei.lei_balance;
1368 }
1369 
1370 uint64_t
get_task_dispatchqueue_serialno_offset(task_t task)1371 get_task_dispatchqueue_serialno_offset(task_t task)
1372 {
1373 	uint64_t dq_serialno_offset = 0;
1374 	void *bsd_info = get_bsdtask_info(task);
1375 
1376 	if (bsd_info) {
1377 		dq_serialno_offset = get_dispatchqueue_serialno_offset_from_proc(bsd_info);
1378 	}
1379 
1380 	return dq_serialno_offset;
1381 }
1382 
1383 uint64_t
get_task_dispatchqueue_label_offset(task_t task)1384 get_task_dispatchqueue_label_offset(task_t task)
1385 {
1386 	uint64_t dq_label_offset = 0;
1387 	void *bsd_info = get_bsdtask_info(task);
1388 
1389 	if (bsd_info) {
1390 		dq_label_offset = get_dispatchqueue_label_offset_from_proc(bsd_info);
1391 	}
1392 
1393 	return dq_label_offset;
1394 }
1395 
1396 uint64_t
get_task_uniqueid(task_t task)1397 get_task_uniqueid(task_t task)
1398 {
1399 	void *bsd_info = get_bsdtask_info(task);
1400 
1401 	if (bsd_info) {
1402 		return proc_uniqueid_task(bsd_info, task);
1403 	} else {
1404 		return UINT64_MAX;
1405 	}
1406 }
1407 
1408 int
get_task_version(task_t task)1409 get_task_version(task_t task)
1410 {
1411 	void *bsd_info = get_bsdtask_info(task);
1412 
1413 	if (bsd_info) {
1414 		return proc_pidversion(bsd_info);
1415 	} else {
1416 		return INT_MAX;
1417 	}
1418 }
1419 
1420 #if CONFIG_MACF
1421 struct label *
get_task_crash_label(task_t task)1422 get_task_crash_label(task_t task)
1423 {
1424 	return task->crash_label;
1425 }
1426 
1427 void
set_task_crash_label(task_t task,struct label * label)1428 set_task_crash_label(task_t task, struct label *label)
1429 {
1430 	task->crash_label = label;
1431 }
1432 #endif
1433 
1434 int
fill_taskipctableinfo(task_t task,uint32_t * table_size,uint32_t * table_free)1435 fill_taskipctableinfo(task_t task, uint32_t *table_size, uint32_t *table_free)
1436 {
1437 	ipc_space_t space = task->itk_space;
1438 	if (space == NULL) {
1439 		return -1;
1440 	}
1441 
1442 	is_read_lock(space);
1443 	if (!is_active(space)) {
1444 		is_read_unlock(space);
1445 		return -1;
1446 	}
1447 
1448 	*table_size = ipc_entry_table_count(is_active_table(space));
1449 	*table_free = space->is_table_free;
1450 
1451 	is_read_unlock(space);
1452 
1453 	return 0;
1454 }
1455 
1456 int
get_task_cdhash(task_t task,char cdhash[static CS_CDHASH_LEN])1457 get_task_cdhash(task_t task, char cdhash[static CS_CDHASH_LEN])
1458 {
1459 	int result = 0;
1460 	void *bsd_info = NULL;
1461 
1462 	task_lock(task);
1463 	bsd_info = get_bsdtask_info(task);
1464 	result = bsd_info ? proc_getcdhash(bsd_info, cdhash) : ESRCH;
1465 	task_unlock(task);
1466 
1467 	return result;
1468 }
1469 
1470 bool
current_thread_in_kernel_fault(void)1471 current_thread_in_kernel_fault(void)
1472 {
1473 	if (current_thread()->recover) {
1474 		return true;
1475 	}
1476 	return false;
1477 }
1478 
1479 /* moved from ubc_subr.c */
1480 int
mach_to_bsd_errno(kern_return_t mach_err)1481 mach_to_bsd_errno(kern_return_t mach_err)
1482 {
1483 	switch (mach_err) {
1484 	case KERN_SUCCESS:
1485 		return 0;
1486 
1487 	case KERN_INVALID_ADDRESS:
1488 	case KERN_INVALID_ARGUMENT:
1489 	case KERN_NOT_IN_SET:
1490 	case KERN_INVALID_NAME:
1491 	case KERN_INVALID_TASK:
1492 	case KERN_INVALID_RIGHT:
1493 	case KERN_INVALID_VALUE:
1494 	case KERN_INVALID_CAPABILITY:
1495 	case KERN_INVALID_HOST:
1496 	case KERN_MEMORY_PRESENT:
1497 	case KERN_INVALID_PROCESSOR_SET:
1498 	case KERN_INVALID_POLICY:
1499 	case KERN_ALREADY_WAITING:
1500 	case KERN_DEFAULT_SET:
1501 	case KERN_EXCEPTION_PROTECTED:
1502 	case KERN_INVALID_LEDGER:
1503 	case KERN_INVALID_MEMORY_CONTROL:
1504 	case KERN_INVALID_SECURITY:
1505 	case KERN_NOT_DEPRESSED:
1506 	case KERN_LOCK_OWNED:
1507 	case KERN_LOCK_OWNED_SELF:
1508 		return EINVAL;
1509 
1510 	case KERN_NOT_RECEIVER:
1511 	case KERN_NO_ACCESS:
1512 	case KERN_POLICY_STATIC:
1513 		return EACCES;
1514 
1515 	case KERN_NO_SPACE:
1516 	case KERN_RESOURCE_SHORTAGE:
1517 	case KERN_UREFS_OVERFLOW:
1518 	case KERN_INVALID_OBJECT:
1519 		return ENOMEM;
1520 
1521 	case KERN_MEMORY_FAILURE:
1522 	case KERN_MEMORY_ERROR:
1523 	case KERN_PROTECTION_FAILURE:
1524 		return EFAULT;
1525 
1526 	case KERN_POLICY_LIMIT:
1527 	case KERN_CODESIGN_ERROR:
1528 	case KERN_DENIED:
1529 		return EPERM;
1530 
1531 	case KERN_ALREADY_IN_SET:
1532 	case KERN_NAME_EXISTS:
1533 	case KERN_RIGHT_EXISTS:
1534 		return EEXIST;
1535 
1536 	case KERN_ABORTED:
1537 		return EINTR;
1538 
1539 	case KERN_TERMINATED:
1540 	case KERN_LOCK_SET_DESTROYED:
1541 	case KERN_LOCK_UNSTABLE:
1542 	case KERN_SEMAPHORE_DESTROYED:
1543 	case KERN_NOT_FOUND:
1544 	case KERN_NOT_WAITING:
1545 		return ENOENT;
1546 
1547 	case KERN_RPC_SERVER_TERMINATED:
1548 		return ECONNRESET;
1549 
1550 	case KERN_NOT_SUPPORTED:
1551 		return ENOTSUP;
1552 
1553 	case KERN_NODE_DOWN:
1554 		return ENETDOWN;
1555 
1556 	case KERN_OPERATION_TIMED_OUT:
1557 		return ETIMEDOUT;
1558 
1559 	default:
1560 		return EIO; /* 5 == KERN_FAILURE */
1561 	}
1562 }
1563 
1564 /*
1565  * Return the mach return value corresponding to a given BSD errno.
1566  */
1567 kern_return_t
kern_return_for_errno(int bsd_errno)1568 kern_return_for_errno(int bsd_errno)
1569 {
1570 	switch (bsd_errno) {
1571 	case 0:
1572 		return KERN_SUCCESS;
1573 	case EIO:
1574 	case EACCES:
1575 	case ENOMEM:
1576 	case EFAULT:
1577 		return KERN_MEMORY_ERROR;
1578 
1579 	case EINVAL:
1580 		return KERN_INVALID_ARGUMENT;
1581 
1582 	case ETIMEDOUT:
1583 	case EBUSY:
1584 		return KERN_OPERATION_TIMED_OUT;
1585 
1586 	case ECONNRESET:
1587 		return KERN_RPC_SERVER_TERMINATED;
1588 
1589 	case ENOTSUP:
1590 		return KERN_NOT_SUPPORTED;
1591 
1592 	case ENETDOWN:
1593 		return KERN_NODE_DOWN;
1594 
1595 	case ENOENT:
1596 		return KERN_NOT_FOUND;
1597 
1598 	case EINTR:
1599 		return KERN_ABORTED;
1600 
1601 	case EPERM:
1602 		return KERN_DENIED;
1603 
1604 	case EEXIST:
1605 		return KERN_ALREADY_IN_SET;
1606 
1607 	default:
1608 		return KERN_FAILURE;
1609 	}
1610 }
1611