xref: /xnu-11215.41.3/bsd/kern/kern_resource.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*-
30  * Copyright (c) 1982, 1986, 1991, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/sysctl.h>
78 #include <sys/kernel.h>
79 #include <sys/file_internal.h>
80 #include <sys/resourcevar.h>
81 #include <sys/malloc.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/sysproto.h>
86 
87 #include <security/audit/audit.h>
88 
89 #include <machine/vmparam.h>
90 
91 #include <mach/mach_types.h>
92 #include <mach/time_value.h>
93 #include <mach/task.h>
94 #include <mach/task_info.h>
95 #include <mach/vm_map.h>
96 #include <mach/mach_vm.h>
97 #include <mach/thread_act.h>  /* for thread_policy_set( ) */
98 #include <kern/thread.h>
99 #include <kern/policy_internal.h>
100 
101 #include <kern/task.h>
102 #include <kern/clock.h>         /* for absolutetime_to_microtime() */
103 #include <netinet/in.h>         /* for TRAFFIC_MGT_SO_* */
104 #if CONFIG_FREEZE
105 #include <sys/kern_memorystatus_freeze.h> /* for memorystatus_freeze_mark_ui_transition */
106 #endif /* CONFIG_FREEZE */
107 #include <sys/socketvar.h>      /* for struct socket */
108 #if NECP
109 #include <net/necp.h>
110 #endif /* NECP */
111 
112 #include <vm/vm_map_xnu.h>
113 
114 #include <kern/assert.h>
115 #include <sys/resource.h>
116 #include <sys/resource_private.h>
117 #include <sys/priv.h>
118 #include <IOKit/IOBSD.h>
119 
120 #if CONFIG_MACF
121 #include <security/mac_framework.h>
122 #endif
123 
124 static void proc_limitblock(proc_t p);
125 static void proc_limitunblock(proc_t p);
126 static void proc_limitupdate(proc_t p, bool unblock,
127     void (^update)(struct plimit *plim));
128 
129 static int donice(struct proc *curp, struct proc *chgp, int n);
130 static int dosetrlimit(struct proc *p, u_int which, struct rlimit *limp);
131 static void do_background_socket(struct proc *p, thread_t thread);
132 static int do_background_thread(thread_t thread, int priority);
133 static int do_background_proc(struct proc *curp, struct proc *targetp, int priority);
134 static int set_gpudeny_proc(struct proc *curp, struct proc *targetp, int priority);
135 static int proc_set_darwin_role(proc_t curp, proc_t targetp, int priority);
136 static int proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority);
137 static int proc_set_game_mode(proc_t targetp, int priority);
138 static int proc_get_game_mode(proc_t targetp, int *priority);
139 static int proc_set_carplay_mode(proc_t targetp, int priority);
140 static int proc_get_carplay_mode(proc_t targetp, int *priority);
141 static int get_background_proc(struct proc *curp, struct proc *targetp, int *priority);
142 
143 int fill_task_rusage(task_t task, rusage_info_current *ri);
144 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
145 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
146 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
147 uint64_t get_task_logical_writes(task_t task, bool external);
148 
149 rlim_t maxdmap = MAXDSIZ;       /* XXX */
150 rlim_t maxsmap = MAXSSIZ - PAGE_MAX_SIZE;       /* XXX */
151 
152 /* For plimit reference count */
153 os_refgrp_decl(, rlimit_refgrp, "plimit_refcnt", NULL);
154 
155 static KALLOC_TYPE_DEFINE(plimit_zone, struct plimit, KT_DEFAULT);
156 
157 /*
158  * Limits on the number of open files per process, and the number
159  * of child processes per process.
160  *
161  * Note: would be in kern/subr_param.c in FreeBSD.
162  */
163 __private_extern__ int maxfilesperproc = OPEN_MAX;              /* per-proc open files limit */
164 
165 SYSCTL_INT(_kern, KERN_MAXPROCPERUID, maxprocperuid, CTLFLAG_RW | CTLFLAG_LOCKED,
166     &maxprocperuid, 0, "Maximum processes allowed per userid" );
167 
168 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW | CTLFLAG_LOCKED,
169     &maxfilesperproc, 0, "Maximum files allowed open per process" );
170 
171 /* Args and fn for proc_iteration callback used in setpriority */
172 struct puser_nice_args {
173 	proc_t curp;
174 	int     prio;
175 	id_t    who;
176 	int *   foundp;
177 	int *   errorp;
178 };
179 static int puser_donice_callback(proc_t p, void * arg);
180 
181 
182 /* Args and fn for proc_iteration callback used in setpriority */
183 struct ppgrp_nice_args {
184 	proc_t curp;
185 	int     prio;
186 	int *   foundp;
187 	int *   errorp;
188 };
189 static int ppgrp_donice_callback(proc_t p, void * arg);
190 
191 /*
192  * Resource controls and accounting.
193  */
194 int
getpriority(struct proc * curp,struct getpriority_args * uap,int32_t * retval)195 getpriority(struct proc *curp, struct getpriority_args *uap, int32_t *retval)
196 {
197 	struct proc *p;
198 	int low = PRIO_MAX + 1;
199 	kauth_cred_t my_cred;
200 	int refheld = 0;
201 	int error = 0;
202 
203 	/* would also test (uap->who < 0), but id_t is unsigned */
204 	if (uap->who > 0x7fffffff) {
205 		return EINVAL;
206 	}
207 
208 	switch (uap->which) {
209 	case PRIO_PROCESS:
210 		if (uap->who == 0) {
211 			p = curp;
212 			low = p->p_nice;
213 		} else {
214 			p = proc_find(uap->who);
215 			if (p == 0) {
216 				break;
217 			}
218 			low = p->p_nice;
219 			proc_rele(p);
220 		}
221 		break;
222 
223 	case PRIO_PGRP: {
224 		struct pgrp *pg = PGRP_NULL;
225 
226 		if (uap->who == 0) {
227 			/* returns the pgrp to ref */
228 			pg = proc_pgrp(curp, NULL);
229 		} else if ((pg = pgrp_find(uap->who)) == PGRP_NULL) {
230 			break;
231 		}
232 		/* No need for iteration as it is a simple scan */
233 		pgrp_lock(pg);
234 		PGMEMBERS_FOREACH(pg, p) {
235 			if (p->p_nice < low) {
236 				low = p->p_nice;
237 			}
238 		}
239 		pgrp_unlock(pg);
240 		pgrp_rele(pg);
241 		break;
242 	}
243 
244 	case PRIO_USER:
245 		if (uap->who == 0) {
246 			uap->who = kauth_cred_getuid(kauth_cred_get());
247 		}
248 
249 		proc_list_lock();
250 
251 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
252 			my_cred = kauth_cred_proc_ref(p);
253 			if (kauth_cred_getuid(my_cred) == uap->who &&
254 			    p->p_nice < low) {
255 				low = p->p_nice;
256 			}
257 			kauth_cred_unref(&my_cred);
258 		}
259 
260 		proc_list_unlock();
261 
262 		break;
263 
264 	case PRIO_DARWIN_THREAD:
265 		/* we currently only support the current thread */
266 		if (uap->who != 0) {
267 			return EINVAL;
268 		}
269 
270 		low = proc_get_thread_policy(current_thread(), TASK_POLICY_INTERNAL, TASK_POLICY_DARWIN_BG);
271 
272 		break;
273 
274 	case PRIO_DARWIN_PROCESS:
275 		if (uap->who == 0) {
276 			p = curp;
277 		} else {
278 			p = proc_find(uap->who);
279 			if (p == PROC_NULL) {
280 				break;
281 			}
282 			refheld = 1;
283 		}
284 
285 		error = get_background_proc(curp, p, &low);
286 
287 		if (refheld) {
288 			proc_rele(p);
289 		}
290 		if (error) {
291 			return error;
292 		}
293 		break;
294 
295 	case PRIO_DARWIN_ROLE:
296 		if (uap->who == 0) {
297 			p = curp;
298 		} else {
299 			p = proc_find(uap->who);
300 			if (p == PROC_NULL) {
301 				break;
302 			}
303 			refheld = 1;
304 		}
305 
306 		error = proc_get_darwin_role(curp, p, &low);
307 
308 		if (refheld) {
309 			proc_rele(p);
310 		}
311 		if (error) {
312 			return error;
313 		}
314 		break;
315 
316 	case PRIO_DARWIN_GAME_MODE:
317 		if (uap->who == 0) {
318 			p = curp;
319 		} else {
320 			p = proc_find(uap->who);
321 			if (p == PROC_NULL) {
322 				break;
323 			}
324 			refheld = 1;
325 		}
326 
327 
328 		error = proc_get_game_mode(p, &low);
329 
330 		if (refheld) {
331 			proc_rele(p);
332 		}
333 		if (error) {
334 			return error;
335 		}
336 		break;
337 
338 	case PRIO_DARWIN_CARPLAY_MODE:
339 		if (uap->who == 0) {
340 			p = curp;
341 		} else {
342 			p = proc_find(uap->who);
343 			if (p == PROC_NULL) {
344 				break;
345 			}
346 			refheld = 1;
347 		}
348 
349 
350 		error = proc_get_carplay_mode(p, &low);
351 
352 		if (refheld) {
353 			proc_rele(p);
354 		}
355 		if (error) {
356 			return error;
357 		}
358 		break;
359 
360 	default:
361 		return EINVAL;
362 	}
363 	if (low == PRIO_MAX + 1) {
364 		return ESRCH;
365 	}
366 	*retval = low;
367 	return 0;
368 }
369 
370 /* call back function used for proc iteration in PRIO_USER */
371 static int
puser_donice_callback(proc_t p,void * arg)372 puser_donice_callback(proc_t p, void * arg)
373 {
374 	int error, n;
375 	struct puser_nice_args * pun = (struct puser_nice_args *)arg;
376 	kauth_cred_t my_cred;
377 
378 	my_cred = kauth_cred_proc_ref(p);
379 	if (kauth_cred_getuid(my_cred) == pun->who) {
380 		error = donice(pun->curp, p, pun->prio);
381 		if (pun->errorp != NULL) {
382 			*pun->errorp = error;
383 		}
384 		if (pun->foundp != NULL) {
385 			n = *pun->foundp;
386 			*pun->foundp = n + 1;
387 		}
388 	}
389 	kauth_cred_unref(&my_cred);
390 
391 	return PROC_RETURNED;
392 }
393 
394 /* call back function used for proc iteration in PRIO_PGRP */
395 static int
ppgrp_donice_callback(proc_t p,void * arg)396 ppgrp_donice_callback(proc_t p, void * arg)
397 {
398 	int error;
399 	struct ppgrp_nice_args * pun = (struct ppgrp_nice_args *)arg;
400 	int n;
401 
402 	error = donice(pun->curp, p, pun->prio);
403 	if (pun->errorp != NULL) {
404 		*pun->errorp = error;
405 	}
406 	if (pun->foundp != NULL) {
407 		n = *pun->foundp;
408 		*pun->foundp = n + 1;
409 	}
410 
411 	return PROC_RETURNED;
412 }
413 
414 /*
415  * Returns:	0			Success
416  *		EINVAL
417  *		ESRCH
418  *	donice:EPERM
419  *	donice:EACCES
420  */
421 /* ARGSUSED */
422 int
setpriority(struct proc * curp,struct setpriority_args * uap,int32_t * retval)423 setpriority(struct proc *curp, struct setpriority_args *uap, int32_t *retval)
424 {
425 	struct proc *p;
426 	int found = 0, error = 0;
427 	int refheld = 0;
428 
429 	AUDIT_ARG(cmd, uap->which);
430 	AUDIT_ARG(owner, uap->who, 0);
431 	AUDIT_ARG(value32, uap->prio);
432 
433 	/* would also test (uap->who < 0), but id_t is unsigned */
434 	if (uap->who > 0x7fffffff) {
435 		return EINVAL;
436 	}
437 
438 	switch (uap->which) {
439 	case PRIO_PROCESS:
440 		if (uap->who == 0) {
441 			p = curp;
442 		} else {
443 			p = proc_find(uap->who);
444 			if (p == 0) {
445 				break;
446 			}
447 			refheld = 1;
448 		}
449 		error = donice(curp, p, uap->prio);
450 		found++;
451 		if (refheld != 0) {
452 			proc_rele(p);
453 		}
454 		break;
455 
456 	case PRIO_PGRP: {
457 		struct pgrp *pg = PGRP_NULL;
458 		struct ppgrp_nice_args ppgrp;
459 
460 		if (uap->who == 0) {
461 			pg = proc_pgrp(curp, NULL);
462 		} else if ((pg = pgrp_find(uap->who)) == PGRP_NULL) {
463 			break;
464 		}
465 
466 		ppgrp.curp = curp;
467 		ppgrp.prio = uap->prio;
468 		ppgrp.foundp = &found;
469 		ppgrp.errorp = &error;
470 
471 		pgrp_iterate(pg, ppgrp_donice_callback, (void *)&ppgrp, NULL);
472 		pgrp_rele(pg);
473 
474 		break;
475 	}
476 
477 	case PRIO_USER: {
478 		struct puser_nice_args punice;
479 
480 		if (uap->who == 0) {
481 			uap->who = kauth_cred_getuid(kauth_cred_get());
482 		}
483 
484 		punice.curp = curp;
485 		punice.prio = uap->prio;
486 		punice.who = uap->who;
487 		punice.foundp = &found;
488 		error = 0;
489 		punice.errorp = &error;
490 		proc_iterate(PROC_ALLPROCLIST, puser_donice_callback, (void *)&punice, NULL, NULL);
491 
492 		break;
493 	}
494 
495 	case PRIO_DARWIN_THREAD: {
496 		/* we currently only support the current thread */
497 		if (uap->who != 0) {
498 			return EINVAL;
499 		}
500 
501 		error = do_background_thread(current_thread(), uap->prio);
502 		found++;
503 		break;
504 	}
505 
506 	case PRIO_DARWIN_PROCESS: {
507 		if (uap->who == 0) {
508 			p = curp;
509 		} else {
510 			p = proc_find(uap->who);
511 			if (p == 0) {
512 				break;
513 			}
514 			refheld = 1;
515 		}
516 
517 		error = do_background_proc(curp, p, uap->prio);
518 
519 		found++;
520 		if (refheld != 0) {
521 			proc_rele(p);
522 		}
523 		break;
524 	}
525 
526 	case PRIO_DARWIN_GPU: {
527 		if (uap->who == 0) {
528 			return EINVAL;
529 		}
530 
531 		p = proc_find(uap->who);
532 		if (p == PROC_NULL) {
533 			break;
534 		}
535 
536 		error = set_gpudeny_proc(curp, p, uap->prio);
537 
538 		found++;
539 		proc_rele(p);
540 		break;
541 	}
542 
543 	case PRIO_DARWIN_ROLE: {
544 		if (uap->who == 0) {
545 			p = curp;
546 		} else {
547 			p = proc_find(uap->who);
548 			if (p == PROC_NULL) {
549 				break;
550 			}
551 			refheld = 1;
552 		}
553 
554 		error = proc_set_darwin_role(curp, p, uap->prio);
555 
556 		found++;
557 		if (refheld != 0) {
558 			proc_rele(p);
559 		}
560 		break;
561 	}
562 
563 	case PRIO_DARWIN_GAME_MODE: {
564 		if (uap->who == 0) {
565 			p = curp;
566 		} else {
567 			p = proc_find(uap->who);
568 			if (p == PROC_NULL) {
569 				break;
570 			}
571 			refheld = 1;
572 		}
573 
574 
575 		error = proc_set_game_mode(p, uap->prio);
576 
577 		found++;
578 		if (refheld != 0) {
579 			proc_rele(p);
580 		}
581 		break;
582 	}
583 
584 	case PRIO_DARWIN_CARPLAY_MODE: {
585 		if (uap->who == 0) {
586 			p = curp;
587 		} else {
588 			p = proc_find(uap->who);
589 			if (p == PROC_NULL) {
590 				break;
591 			}
592 			refheld = 1;
593 		}
594 
595 		error = proc_set_carplay_mode(p, uap->prio);
596 
597 		found++;
598 		if (refheld != 0) {
599 			proc_rele(p);
600 		}
601 		break;
602 	}
603 
604 	default:
605 		return EINVAL;
606 	}
607 	if (found == 0) {
608 		return ESRCH;
609 	}
610 	if (error == EIDRM) {
611 		*retval = -2;
612 		error = 0;
613 	}
614 	return error;
615 }
616 
617 
618 /*
619  * Returns:	0			Success
620  *		EPERM
621  *		EACCES
622  *	mac_check_proc_sched:???
623  */
624 static int
donice(struct proc * curp,struct proc * chgp,int n)625 donice(struct proc *curp, struct proc *chgp, int n)
626 {
627 	int error = 0;
628 	kauth_cred_t ucred;
629 	kauth_cred_t my_cred;
630 
631 	ucred = kauth_cred_proc_ref(curp);
632 	my_cred = kauth_cred_proc_ref(chgp);
633 
634 	if (suser(ucred, NULL) && kauth_cred_getruid(ucred) &&
635 	    kauth_cred_getuid(ucred) != kauth_cred_getuid(my_cred) &&
636 	    kauth_cred_getruid(ucred) != kauth_cred_getuid(my_cred)) {
637 		error = EPERM;
638 		goto out;
639 	}
640 	if (n > PRIO_MAX) {
641 		n = PRIO_MAX;
642 	}
643 	if (n < PRIO_MIN) {
644 		n = PRIO_MIN;
645 	}
646 	if (n < chgp->p_nice && suser(ucred, &curp->p_acflag)) {
647 		error = EACCES;
648 		goto out;
649 	}
650 #if CONFIG_MACF
651 	error = mac_proc_check_sched(curp, chgp);
652 	if (error) {
653 		goto out;
654 	}
655 #endif
656 	proc_lock(chgp);
657 	chgp->p_nice = (char)n;
658 	proc_unlock(chgp);
659 	(void)resetpriority(chgp);
660 out:
661 	kauth_cred_unref(&ucred);
662 	kauth_cred_unref(&my_cred);
663 	return error;
664 }
665 
666 static int
set_gpudeny_proc(struct proc * curp,struct proc * targetp,int priority)667 set_gpudeny_proc(struct proc *curp, struct proc *targetp, int priority)
668 {
669 	int error = 0;
670 	kauth_cred_t ucred;
671 	kauth_cred_t target_cred;
672 
673 	ucred = kauth_cred_get();
674 	target_cred = kauth_cred_proc_ref(targetp);
675 
676 	/* TODO: Entitlement instead of uid check */
677 
678 	if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
679 	    kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
680 	    kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
681 		error = EPERM;
682 		goto out;
683 	}
684 
685 	if (curp == targetp) {
686 		error = EPERM;
687 		goto out;
688 	}
689 
690 #if CONFIG_MACF
691 	error = mac_proc_check_sched(curp, targetp);
692 	if (error) {
693 		goto out;
694 	}
695 #endif
696 
697 	switch (priority) {
698 	case PRIO_DARWIN_GPU_DENY:
699 		task_set_gpu_denied(proc_task(targetp), TRUE);
700 		break;
701 	case PRIO_DARWIN_GPU_ALLOW:
702 		task_set_gpu_denied(proc_task(targetp), FALSE);
703 		break;
704 	default:
705 		error = EINVAL;
706 		goto out;
707 	}
708 
709 out:
710 	kauth_cred_unref(&target_cred);
711 	return error;
712 }
713 
714 static int
proc_set_darwin_role(proc_t curp,proc_t targetp,int priority)715 proc_set_darwin_role(proc_t curp, proc_t targetp, int priority)
716 {
717 	int error = 0;
718 	uint32_t flagsp = 0;
719 
720 	kauth_cred_t ucred, target_cred;
721 
722 	ucred = kauth_cred_get();
723 	target_cred = kauth_cred_proc_ref(targetp);
724 
725 	if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
726 	    kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
727 	    kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
728 		if (priv_check_cred(ucred, PRIV_SETPRIORITY_DARWIN_ROLE, 0) != 0) {
729 			error = EPERM;
730 			goto out;
731 		}
732 	}
733 
734 	if (curp != targetp) {
735 #if CONFIG_MACF
736 		if ((error = mac_proc_check_sched(curp, targetp))) {
737 			goto out;
738 		}
739 #endif
740 	}
741 
742 	proc_get_darwinbgstate(proc_task(targetp), &flagsp);
743 	if ((flagsp & PROC_FLAG_APPLICATION) != PROC_FLAG_APPLICATION) {
744 		error = ENOTSUP;
745 		goto out;
746 	}
747 
748 	task_role_t role = TASK_UNSPECIFIED;
749 
750 	if ((error = proc_darwin_role_to_task_role(priority, &role))) {
751 		goto out;
752 	}
753 
754 	proc_set_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE,
755 	    TASK_POLICY_ROLE, role);
756 
757 #if CONFIG_FREEZE
758 	if (priority == PRIO_DARWIN_ROLE_UI_FOCAL || priority == PRIO_DARWIN_ROLE_UI || priority == PRIO_DARWIN_ROLE_UI_NON_FOCAL) {
759 		memorystatus_freezer_mark_ui_transition(targetp);
760 	}
761 #endif /* CONFIG_FREEZE */
762 
763 out:
764 	kauth_cred_unref(&target_cred);
765 	return error;
766 }
767 
768 static int
proc_get_darwin_role(proc_t curp,proc_t targetp,int * priority)769 proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority)
770 {
771 	int error = 0;
772 	int role = 0;
773 
774 	kauth_cred_t ucred, target_cred;
775 
776 	ucred = kauth_cred_get();
777 	target_cred = kauth_cred_proc_ref(targetp);
778 
779 	if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
780 	    kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
781 	    kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
782 		error = EPERM;
783 		goto out;
784 	}
785 
786 	if (curp != targetp) {
787 #if CONFIG_MACF
788 		if ((error = mac_proc_check_sched(curp, targetp))) {
789 			goto out;
790 		}
791 #endif
792 	}
793 
794 	role = proc_get_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
795 
796 	*priority = proc_task_role_to_darwin_role(role);
797 
798 out:
799 	kauth_cred_unref(&target_cred);
800 	return error;
801 }
802 
803 #define SET_GAME_MODE_ENTITLEMENT "com.apple.private.set-game-mode"
804 
805 static int
proc_set_game_mode(proc_t targetp,int priority)806 proc_set_game_mode(proc_t targetp, int priority)
807 {
808 	int error = 0;
809 
810 	kauth_cred_t ucred, target_cred;
811 
812 	ucred = kauth_cred_get();
813 	target_cred = kauth_cred_proc_ref(targetp);
814 
815 	boolean_t entitled = FALSE;
816 	entitled = IOCurrentTaskHasEntitlement(SET_GAME_MODE_ENTITLEMENT);
817 	if (!entitled) {
818 		error = EPERM;
819 		goto out;
820 	}
821 
822 	/* Even with entitlement, non-root is only alllowed to set same-user */
823 	if (!kauth_cred_issuser(ucred) &&
824 	    kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
825 		error = EPERM;
826 		goto out;
827 	}
828 
829 	switch (priority) {
830 	case PRIO_DARWIN_GAME_MODE_OFF:
831 		task_set_game_mode(proc_task(targetp), false);
832 		break;
833 	case PRIO_DARWIN_GAME_MODE_ON:
834 		task_set_game_mode(proc_task(targetp), true);
835 		break;
836 	default:
837 		error = EINVAL;
838 		goto out;
839 	}
840 
841 out:
842 	kauth_cred_unref(&target_cred);
843 	return error;
844 }
845 
846 static int
proc_get_game_mode(proc_t targetp,int * priority)847 proc_get_game_mode(proc_t targetp, int *priority)
848 {
849 	int error = 0;
850 
851 	kauth_cred_t ucred, target_cred;
852 
853 	ucred = kauth_cred_get();
854 	target_cred = kauth_cred_proc_ref(targetp);
855 
856 	boolean_t entitled = FALSE;
857 	entitled = IOCurrentTaskHasEntitlement(SET_GAME_MODE_ENTITLEMENT);
858 
859 	/* Root is allowed to get without entitlement */
860 	if (!kauth_cred_issuser(ucred) && !entitled) {
861 		error = EPERM;
862 		goto out;
863 	}
864 
865 	/* Even with entitlement, non-root is only alllowed to see same-user */
866 	if (!kauth_cred_issuser(ucred) &&
867 	    kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
868 		error = EPERM;
869 		goto out;
870 	}
871 
872 	if (task_get_game_mode(proc_task(targetp))) {
873 		*priority = PRIO_DARWIN_GAME_MODE_ON;
874 	} else {
875 		*priority = PRIO_DARWIN_GAME_MODE_OFF;
876 	}
877 
878 out:
879 	kauth_cred_unref(&target_cred);
880 	return error;
881 }
882 
883 #define SET_CARPLAY_MODE_ENTITLEMENT "com.apple.private.set-carplay-mode"
884 
885 static int
proc_set_carplay_mode(proc_t targetp,int priority)886 proc_set_carplay_mode(proc_t targetp, int priority)
887 {
888 	int error = 0;
889 
890 	kauth_cred_t ucred, target_cred;
891 
892 	ucred = kauth_cred_get();
893 	target_cred = kauth_cred_proc_ref(targetp);
894 
895 	boolean_t entitled = FALSE;
896 	entitled = IOCurrentTaskHasEntitlement(SET_CARPLAY_MODE_ENTITLEMENT);
897 	if (!entitled) {
898 		error = EPERM;
899 		goto out;
900 	}
901 
902 	/* Even with entitlement, non-root is only alllowed to set same-user */
903 	if (!kauth_cred_issuser(ucred) &&
904 	    kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
905 		error = EPERM;
906 		goto out;
907 	}
908 
909 	switch (priority) {
910 	case PRIO_DARWIN_CARPLAY_MODE_OFF:
911 		task_set_carplay_mode(proc_task(targetp), false);
912 		break;
913 	case PRIO_DARWIN_CARPLAY_MODE_ON:
914 		task_set_carplay_mode(proc_task(targetp), true);
915 		break;
916 	default:
917 		error = EINVAL;
918 		goto out;
919 	}
920 
921 out:
922 	kauth_cred_unref(&target_cred);
923 	return error;
924 }
925 
926 static int
proc_get_carplay_mode(proc_t targetp,int * priority)927 proc_get_carplay_mode(proc_t targetp, int *priority)
928 {
929 	int error = 0;
930 
931 	kauth_cred_t ucred, target_cred;
932 
933 	ucred = kauth_cred_get();
934 	target_cred = kauth_cred_proc_ref(targetp);
935 
936 	boolean_t entitled = FALSE;
937 	entitled = IOCurrentTaskHasEntitlement(SET_CARPLAY_MODE_ENTITLEMENT);
938 
939 	/* Root is allowed to get without entitlement */
940 	if (!kauth_cred_issuser(ucred) && !entitled) {
941 		error = EPERM;
942 		goto out;
943 	}
944 
945 	/* Even with entitlement, non-root is only alllowed to see same-user */
946 	if (!kauth_cred_issuser(ucred) &&
947 	    kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
948 		error = EPERM;
949 		goto out;
950 	}
951 
952 	if (task_get_carplay_mode(proc_task(targetp))) {
953 		*priority = PRIO_DARWIN_CARPLAY_MODE_ON;
954 	} else {
955 		*priority = PRIO_DARWIN_CARPLAY_MODE_OFF;
956 	}
957 
958 out:
959 	kauth_cred_unref(&target_cred);
960 	return error;
961 }
962 
963 static int
get_background_proc(struct proc * curp,struct proc * targetp,int * priority)964 get_background_proc(struct proc *curp, struct proc *targetp, int *priority)
965 {
966 	int external = 0;
967 	int error = 0;
968 	kauth_cred_t ucred, target_cred;
969 
970 	ucred = kauth_cred_get();
971 	target_cred = kauth_cred_proc_ref(targetp);
972 
973 	if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
974 	    kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
975 	    kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
976 		error = EPERM;
977 		goto out;
978 	}
979 
980 	external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
981 
982 	*priority = proc_get_task_policy(current_task(), external, TASK_POLICY_DARWIN_BG);
983 
984 out:
985 	kauth_cred_unref(&target_cred);
986 	return error;
987 }
988 
989 static int
do_background_proc(struct proc * curp,struct proc * targetp,int priority)990 do_background_proc(struct proc *curp, struct proc *targetp, int priority)
991 {
992 #if !CONFIG_MACF
993 #pragma unused(curp)
994 #endif
995 	int error = 0;
996 	kauth_cred_t ucred;
997 	kauth_cred_t target_cred;
998 	int external;
999 	int enable;
1000 
1001 	ucred = kauth_cred_get();
1002 	target_cred = kauth_cred_proc_ref(targetp);
1003 
1004 	if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
1005 	    kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
1006 	    kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
1007 		error = EPERM;
1008 		goto out;
1009 	}
1010 
1011 #if CONFIG_MACF
1012 	error = mac_proc_check_sched(curp, targetp);
1013 	if (error) {
1014 		goto out;
1015 	}
1016 #endif
1017 
1018 	external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
1019 
1020 	switch (priority) {
1021 	case PRIO_DARWIN_BG:
1022 		enable = TASK_POLICY_ENABLE;
1023 		break;
1024 	case PRIO_DARWIN_NONUI:
1025 		/* ignored for compatibility */
1026 		goto out;
1027 	default:
1028 		/* TODO: EINVAL if priority != 0 */
1029 		enable = TASK_POLICY_DISABLE;
1030 		break;
1031 	}
1032 
1033 	proc_set_task_policy(proc_task(targetp), external, TASK_POLICY_DARWIN_BG, enable);
1034 
1035 out:
1036 	kauth_cred_unref(&target_cred);
1037 	return error;
1038 }
1039 
1040 static void
do_background_socket(struct proc * p,thread_t thread)1041 do_background_socket(struct proc *p, thread_t thread)
1042 {
1043 #if SOCKETS
1044 	struct fileproc *fp;
1045 	int              background = false;
1046 #if NECP
1047 	int              update_necp = false;
1048 #endif /* NECP */
1049 
1050 	if (thread != THREAD_NULL &&
1051 	    get_threadtask(thread) != proc_task(p)) {
1052 		return;
1053 	}
1054 
1055 	proc_fdlock(p);
1056 
1057 	if (thread != THREAD_NULL) {
1058 		background = proc_get_effective_thread_policy(thread, TASK_POLICY_ALL_SOCKETS_BG);
1059 	} else {
1060 		background = proc_get_effective_task_policy(proc_task(p), TASK_POLICY_ALL_SOCKETS_BG);
1061 	}
1062 
1063 	if (background) {
1064 		/*
1065 		 * For PRIO_DARWIN_PROCESS (thread is NULL), simply mark
1066 		 * the sockets with the background flag.  There's nothing
1067 		 * to do here for the PRIO_DARWIN_THREAD case.
1068 		 */
1069 		if (thread == THREAD_NULL) {
1070 			fdt_foreach(fp, p) {
1071 				if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) {
1072 					struct socket *sockp = (struct socket *)fp_get_data(fp);
1073 					socket_set_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
1074 					sockp->so_background_thread = NULL;
1075 				}
1076 #if NECP
1077 				else if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_NETPOLICY) {
1078 					if (necp_set_client_as_background(p, fp, background)) {
1079 						update_necp = true;
1080 					}
1081 				}
1082 #endif /* NECP */
1083 			}
1084 		}
1085 	} else {
1086 		/* disable networking IO throttle.
1087 		 * NOTE - It is a known limitation of the current design that we
1088 		 * could potentially clear TRAFFIC_MGT_SO_BACKGROUND bit for
1089 		 * sockets created by other threads within this process.
1090 		 */
1091 		fdt_foreach(fp, p) {
1092 			struct socket *sockp;
1093 
1094 			if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) {
1095 				sockp = (struct socket *)fp_get_data(fp);
1096 				/* skip if only clearing this thread's sockets */
1097 				if ((thread) && (sockp->so_background_thread != thread)) {
1098 					continue;
1099 				}
1100 				socket_clear_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
1101 				sockp->so_background_thread = NULL;
1102 			}
1103 #if NECP
1104 			else if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_NETPOLICY) {
1105 				if (necp_set_client_as_background(p, fp, background)) {
1106 					update_necp = true;
1107 				}
1108 			}
1109 #endif /* NECP */
1110 		}
1111 	}
1112 
1113 	proc_fdunlock(p);
1114 
1115 #if NECP
1116 	if (update_necp) {
1117 		necp_update_all_clients();
1118 	}
1119 #endif /* NECP */
1120 #else
1121 #pragma unused(p, thread)
1122 #endif
1123 }
1124 
1125 
1126 /*
1127  * do_background_thread
1128  *
1129  * Requires: thread reference
1130  *
1131  * Returns:     0                       Success
1132  *              EPERM                   Tried to background while in vfork
1133  * XXX - todo - does this need a MACF hook?
1134  */
1135 static int
do_background_thread(thread_t thread,int priority)1136 do_background_thread(thread_t thread, int priority)
1137 {
1138 	int enable, external;
1139 	int rv = 0;
1140 
1141 	/* Backgrounding is unsupported for workq threads */
1142 	if (thread_is_static_param(thread)) {
1143 		return EPERM;
1144 	}
1145 
1146 	/* Not allowed to combine QoS and DARWIN_BG, doing so strips the QoS */
1147 	if (thread_has_qos_policy(thread)) {
1148 		thread_remove_qos_policy(thread);
1149 		rv = EIDRM;
1150 	}
1151 
1152 	/* TODO: Fail if someone passes something besides 0 or PRIO_DARWIN_BG */
1153 	enable   = (priority == PRIO_DARWIN_BG) ? TASK_POLICY_ENABLE   : TASK_POLICY_DISABLE;
1154 	external = (current_thread() == thread) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
1155 
1156 	proc_set_thread_policy(thread, external, TASK_POLICY_DARWIN_BG, enable);
1157 
1158 	return rv;
1159 }
1160 
1161 
1162 /*
1163  * Returns:	0			Success
1164  *	copyin:EFAULT
1165  *	dosetrlimit:
1166  */
1167 /* ARGSUSED */
1168 int
setrlimit(struct proc * p,struct setrlimit_args * uap,__unused int32_t * retval)1169 setrlimit(struct proc *p, struct setrlimit_args *uap, __unused int32_t *retval)
1170 {
1171 	struct rlimit alim;
1172 	int error;
1173 
1174 	if ((error = copyin(uap->rlp, (caddr_t)&alim,
1175 	    sizeof(struct rlimit)))) {
1176 		return error;
1177 	}
1178 
1179 	return dosetrlimit(p, uap->which, &alim);
1180 }
1181 
1182 /*
1183  * Returns:	0			Success
1184  *		EINVAL
1185  *	suser:EPERM
1186  *
1187  * Notes:	EINVAL is returned both for invalid arguments, and in the
1188  *		case that the current usage (e.g. RLIMIT_STACK) is already
1189  *		in excess of the requested limit.
1190  */
1191 static int
dosetrlimit(struct proc * p,u_int which,struct rlimit * newrlim)1192 dosetrlimit(struct proc *p, u_int which, struct rlimit *newrlim)
1193 {
1194 	struct rlimit        rlim, stack_rlim = {.rlim_cur = 0, .rlim_max = 0};
1195 	int                  error;
1196 	kern_return_t        kr;
1197 
1198 	/* Mask out POSIX flag, saved above */
1199 	which &= ~_RLIMIT_POSIX_FLAG;
1200 
1201 	/* Unknown resource */
1202 	if (which >= RLIM_NLIMITS) {
1203 		return EINVAL;
1204 	}
1205 
1206 	proc_lock(p);
1207 
1208 	/* Only one thread is able to change the current process's rlimit values */
1209 	proc_limitblock(p);
1210 
1211 	/*
1212 	 * Take a snapshot of the current rlimit values and read this throughout
1213 	 * this routine. This minimizes the critical sections and allow other
1214 	 * processes in the system to access the plimit while we are in the
1215 	 * middle of this setrlimit call.
1216 	 */
1217 	rlim = smr_serialized_load(&p->p_limit)->pl_rlimit[which];
1218 
1219 	proc_unlock(p);
1220 
1221 	error = 0;
1222 	/* Sanity check: new soft limit cannot exceed new hard limit */
1223 	if (newrlim->rlim_cur > newrlim->rlim_max) {
1224 		error = EINVAL;
1225 	}
1226 	/*
1227 	 * Sanity check: only super-user may raise the hard limit.
1228 	 * newrlim->rlim_cur > rlim.rlim_max implies that the call
1229 	 * is increasing the hard limit as well.
1230 	 */
1231 	else if (newrlim->rlim_cur > rlim.rlim_max || newrlim->rlim_max > rlim.rlim_max) {
1232 		/* suser() returns 0 if the calling thread is super user. */
1233 		error = suser(kauth_cred_get(), &p->p_acflag);
1234 	}
1235 
1236 	if (error) {
1237 		/* Invalid setrlimit request: EINVAL or EPERM */
1238 		goto out;
1239 	}
1240 
1241 	/* We have the reader lock of the process's plimit so it's safe to read the rlimit values */
1242 	switch (which) {
1243 	case RLIMIT_CPU:
1244 		if (newrlim->rlim_cur == RLIM_INFINITY) {
1245 			task_vtimer_clear(proc_task(p), TASK_VTIMER_RLIM);
1246 			timerclear(&p->p_rlim_cpu);
1247 		} else {
1248 			task_absolutetime_info_data_t   tinfo;
1249 			mach_msg_type_number_t          count;
1250 			struct timeval                  ttv, tv;
1251 			clock_sec_t                     tv_sec;
1252 			clock_usec_t                    tv_usec;
1253 
1254 			count = TASK_ABSOLUTETIME_INFO_COUNT;
1255 			task_info(proc_task(p), TASK_ABSOLUTETIME_INFO, (task_info_t)&tinfo, &count);
1256 			absolutetime_to_microtime(tinfo.total_user + tinfo.total_system, &tv_sec, &tv_usec);
1257 			ttv.tv_sec = tv_sec;
1258 			ttv.tv_usec = tv_usec;
1259 
1260 			tv.tv_sec = (newrlim->rlim_cur > __INT_MAX__ ? __INT_MAX__ : (__darwin_time_t)newrlim->rlim_cur);
1261 			tv.tv_usec = 0;
1262 			timersub(&tv, &ttv, &p->p_rlim_cpu);
1263 
1264 			timerclear(&tv);
1265 			if (timercmp(&p->p_rlim_cpu, &tv, >)) {
1266 				task_vtimer_set(proc_task(p), TASK_VTIMER_RLIM);
1267 			} else {
1268 				task_vtimer_clear(proc_task(p), TASK_VTIMER_RLIM);
1269 
1270 				timerclear(&p->p_rlim_cpu);
1271 
1272 				psignal(p, SIGXCPU);
1273 			}
1274 		}
1275 		break;
1276 
1277 	case RLIMIT_DATA:
1278 #if 00
1279 		if (newrlim->rlim_cur > maxdmap) {
1280 			newrlim->rlim_cur = maxdmap;
1281 		}
1282 		if (newrlim->rlim_max > maxdmap) {
1283 			newrlim->rlim_max = maxdmap;
1284 		}
1285 #endif
1286 
1287 		/* Over to Mach VM to validate the new data limit */
1288 		if (vm_map_set_data_limit(current_map(), newrlim->rlim_cur) != KERN_SUCCESS) {
1289 			/* The limit specified cannot be lowered because current usage is already higher than the limit. */
1290 			error =  EINVAL;
1291 			goto out;
1292 		}
1293 		break;
1294 
1295 	case RLIMIT_STACK:
1296 		if (p->p_lflag & P_LCUSTOM_STACK) {
1297 			/* Process has a custom stack set - rlimit cannot be used to change it */
1298 			error = EINVAL;
1299 			goto out;
1300 		}
1301 
1302 		/*
1303 		 * Note: the real stack size limit is enforced by maxsmap, not a process's RLIMIT_STACK.
1304 		 *
1305 		 * The kernel uses maxsmap to control the actual stack size limit. While we allow
1306 		 * processes to set RLIMIT_STACK to RLIM_INFINITY (UNIX 03), accessing memory
1307 		 * beyond the maxsmap will still trigger an exception.
1308 		 *
1309 		 * stack_rlim is used to store the user-defined RLIMIT_STACK values while we adjust
1310 		 * the stack size using kernel limit (i.e. maxsmap).
1311 		 */
1312 		if (newrlim->rlim_cur > maxsmap ||
1313 		    newrlim->rlim_max > maxsmap) {
1314 			if (newrlim->rlim_cur > maxsmap) {
1315 				stack_rlim.rlim_cur = newrlim->rlim_cur;
1316 				newrlim->rlim_cur = maxsmap;
1317 			}
1318 			if (newrlim->rlim_max > maxsmap) {
1319 				stack_rlim.rlim_max = newrlim->rlim_max;
1320 				newrlim->rlim_max = maxsmap;
1321 			}
1322 		}
1323 
1324 		/*
1325 		 * rlim.rlim_cur/rlim_max could be arbitrarily large due to previous calls to setrlimit().
1326 		 * Use the actual size for stack region adjustment.
1327 		 */
1328 		if (rlim.rlim_cur > maxsmap) {
1329 			rlim.rlim_cur = maxsmap;
1330 		}
1331 		if (rlim.rlim_max > maxsmap) {
1332 			rlim.rlim_max = maxsmap;
1333 		}
1334 
1335 		/*
1336 		 * Stack is allocated to the max at exec time with only
1337 		 * "rlim_cur" bytes accessible.  If stack limit is going
1338 		 * up make more accessible, if going down make inaccessible.
1339 		 */
1340 		if (newrlim->rlim_cur > rlim.rlim_cur) {
1341 			mach_vm_offset_t addr;
1342 			mach_vm_size_t size;
1343 
1344 			/* grow stack */
1345 			size = newrlim->rlim_cur;
1346 			if (round_page_overflow(size, &size)) {
1347 				error = EINVAL;
1348 				goto out;
1349 			}
1350 			size -= round_page_64(rlim.rlim_cur);
1351 
1352 			addr = (mach_vm_offset_t)(p->user_stack - round_page_64(newrlim->rlim_cur));
1353 			kr = mach_vm_protect(current_map(), addr, size, FALSE, VM_PROT_DEFAULT);
1354 			if (kr != KERN_SUCCESS) {
1355 				error = EINVAL;
1356 				goto out;
1357 			}
1358 		} else if (newrlim->rlim_cur < rlim.rlim_cur) {
1359 			mach_vm_offset_t addr;
1360 			mach_vm_size_t size;
1361 			uint64_t cur_sp;
1362 
1363 			/* shrink stack */
1364 
1365 			/*
1366 			 * First check if new stack limit would agree
1367 			 * with current stack usage.
1368 			 * Get the current thread's stack pointer...
1369 			 */
1370 			cur_sp = thread_adjuserstack(current_thread(), 0);
1371 			if (cur_sp <= p->user_stack &&
1372 			    cur_sp > (p->user_stack - round_page_64(rlim.rlim_cur))) {
1373 				/* stack pointer is in main stack */
1374 				if (cur_sp <= (p->user_stack - round_page_64(newrlim->rlim_cur))) {
1375 					/*
1376 					 * New limit would cause current usage to be invalid:
1377 					 * reject new limit.
1378 					 */
1379 					error =  EINVAL;
1380 					goto out;
1381 				}
1382 			} else {
1383 				/* not on the main stack: reject */
1384 				error =  EINVAL;
1385 				goto out;
1386 			}
1387 
1388 			size = round_page_64(rlim.rlim_cur);
1389 			size -= round_page_64(rlim.rlim_cur);
1390 
1391 			addr = (mach_vm_offset_t)(p->user_stack - round_page_64(rlim.rlim_cur));
1392 
1393 			kr = mach_vm_protect(current_map(), addr, size, FALSE, VM_PROT_NONE);
1394 			if (kr != KERN_SUCCESS) {
1395 				error =  EINVAL;
1396 				goto out;
1397 			}
1398 		} else {
1399 			/* no change ... */
1400 		}
1401 
1402 		/*
1403 		 * We've adjusted the process's stack region. If the user-defined limit is greater
1404 		 * than maxsmap, we need to reflect this change in rlimit interface.
1405 		 */
1406 		if (stack_rlim.rlim_cur != 0) {
1407 			newrlim->rlim_cur = stack_rlim.rlim_cur;
1408 		}
1409 		if (stack_rlim.rlim_max != 0) {
1410 			newrlim->rlim_max = stack_rlim.rlim_max;
1411 		}
1412 		break;
1413 
1414 	case RLIMIT_NOFILE:
1415 		/*
1416 		 * Nothing to be done here as we already performed the sanity checks before entering the switch code block.
1417 		 * The real NOFILE limits enforced by the kernel is capped at MIN(RLIMIT_NOFILE, maxfilesperproc)
1418 		 */
1419 		break;
1420 
1421 	case RLIMIT_AS:
1422 		/* Over to Mach VM to validate the new address space limit */
1423 		if (vm_map_set_size_limit(current_map(), newrlim->rlim_cur) != KERN_SUCCESS) {
1424 			/* The limit specified cannot be lowered because current usage is already higher than the limit. */
1425 			error =  EINVAL;
1426 			goto out;
1427 		}
1428 		break;
1429 
1430 	case RLIMIT_NPROC:
1431 		/*
1432 		 * Only root can set to the maxproc limits, as it is
1433 		 * systemwide resource; all others are limited to
1434 		 * maxprocperuid (presumably less than maxproc).
1435 		 */
1436 		if (kauth_cred_issuser(kauth_cred_get())) {
1437 			if (newrlim->rlim_cur > (rlim_t)maxproc) {
1438 				newrlim->rlim_cur = maxproc;
1439 			}
1440 			if (newrlim->rlim_max > (rlim_t)maxproc) {
1441 				newrlim->rlim_max = maxproc;
1442 			}
1443 		} else {
1444 			if (newrlim->rlim_cur > (rlim_t)maxprocperuid) {
1445 				newrlim->rlim_cur = maxprocperuid;
1446 			}
1447 			if (newrlim->rlim_max > (rlim_t)maxprocperuid) {
1448 				newrlim->rlim_max = maxprocperuid;
1449 			}
1450 		}
1451 		break;
1452 
1453 	case RLIMIT_MEMLOCK:
1454 		/*
1455 		 * Tell the Mach VM layer about the new limit value.
1456 		 */
1457 		newrlim->rlim_cur = (vm_size_t)newrlim->rlim_cur;
1458 		vm_map_set_user_wire_limit(current_map(), (vm_size_t)newrlim->rlim_cur);
1459 		break;
1460 	} /* switch... */
1461 
1462 	/* Everything checks out and we are now ready to update the rlimit */
1463 	error = 0;
1464 
1465 out:
1466 
1467 	if (error == 0) {
1468 		/*
1469 		 * COW the current plimit if it's shared, otherwise update it in place.
1470 		 * Finally unblock other threads wishing to change plimit.
1471 		 */
1472 		proc_limitupdate(p, true, ^(struct plimit *plim) {
1473 			plim->pl_rlimit[which] = *newrlim;
1474 		});
1475 	} else {
1476 		/*
1477 		 * This setrlimit has failed, just leave the plimit as is and unblock other
1478 		 * threads wishing to change plimit.
1479 		 */
1480 		proc_lock(p);
1481 		proc_limitunblock(p);
1482 		proc_unlock(p);
1483 	}
1484 
1485 	return error;
1486 }
1487 
1488 /* ARGSUSED */
1489 int
getrlimit(struct proc * p,struct getrlimit_args * uap,__unused int32_t * retval)1490 getrlimit(struct proc *p, struct getrlimit_args *uap, __unused int32_t *retval)
1491 {
1492 	struct rlimit lim = {};
1493 
1494 	/*
1495 	 * Take out flag now in case we need to use it to trigger variant
1496 	 * behaviour later.
1497 	 */
1498 	uap->which &= ~_RLIMIT_POSIX_FLAG;
1499 
1500 	if (uap->which >= RLIM_NLIMITS) {
1501 		return EINVAL;
1502 	}
1503 	lim = proc_limitget(p, uap->which);
1504 	return copyout((caddr_t)&lim,
1505 	           uap->rlp, sizeof(struct rlimit));
1506 }
1507 
1508 /*
1509  * Transform the running time and tick information in proc p into user,
1510  * system, and interrupt time usage.
1511  */
1512 /* No lock on proc is held for this.. */
1513 void
calcru(struct proc * p,struct timeval * up,struct timeval * sp,struct timeval * ip)1514 calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *ip)
1515 {
1516 	task_t                  task;
1517 
1518 	timerclear(up);
1519 	timerclear(sp);
1520 	if (ip != NULL) {
1521 		timerclear(ip);
1522 	}
1523 
1524 	task = proc_task(p);
1525 	if (task) {
1526 		mach_task_basic_info_data_t tinfo;
1527 		task_thread_times_info_data_t ttimesinfo;
1528 		task_events_info_data_t teventsinfo;
1529 		mach_msg_type_number_t task_info_count, task_ttimes_count;
1530 		mach_msg_type_number_t task_events_count;
1531 		struct timeval ut, st;
1532 
1533 		task_info_count = MACH_TASK_BASIC_INFO_COUNT;
1534 		task_info(task, MACH_TASK_BASIC_INFO,
1535 		    (task_info_t)&tinfo, &task_info_count);
1536 		ut.tv_sec = tinfo.user_time.seconds;
1537 		ut.tv_usec = tinfo.user_time.microseconds;
1538 		st.tv_sec = tinfo.system_time.seconds;
1539 		st.tv_usec = tinfo.system_time.microseconds;
1540 		timeradd(&ut, up, up);
1541 		timeradd(&st, sp, sp);
1542 
1543 		task_ttimes_count = TASK_THREAD_TIMES_INFO_COUNT;
1544 		task_info(task, TASK_THREAD_TIMES_INFO,
1545 		    (task_info_t)&ttimesinfo, &task_ttimes_count);
1546 
1547 		ut.tv_sec = ttimesinfo.user_time.seconds;
1548 		ut.tv_usec = ttimesinfo.user_time.microseconds;
1549 		st.tv_sec = ttimesinfo.system_time.seconds;
1550 		st.tv_usec = ttimesinfo.system_time.microseconds;
1551 		timeradd(&ut, up, up);
1552 		timeradd(&st, sp, sp);
1553 
1554 		task_events_count = TASK_EVENTS_INFO_COUNT;
1555 		task_info(task, TASK_EVENTS_INFO,
1556 		    (task_info_t)&teventsinfo, &task_events_count);
1557 
1558 		/*
1559 		 * No need to lock "p":  this does not need to be
1560 		 * completely consistent, right ?
1561 		 */
1562 		p->p_stats->p_ru.ru_minflt = (teventsinfo.faults -
1563 		    teventsinfo.pageins);
1564 		p->p_stats->p_ru.ru_majflt = teventsinfo.pageins;
1565 		p->p_stats->p_ru.ru_nivcsw = (teventsinfo.csw -
1566 		    p->p_stats->p_ru.ru_nvcsw);
1567 		if (p->p_stats->p_ru.ru_nivcsw < 0) {
1568 			p->p_stats->p_ru.ru_nivcsw = 0;
1569 		}
1570 
1571 		p->p_stats->p_ru.ru_maxrss = (long)tinfo.resident_size_max;
1572 	}
1573 }
1574 
1575 __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
1576 __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
1577 
1578 /* ARGSUSED */
1579 int
getrusage(struct proc * p,struct getrusage_args * uap,__unused int32_t * retval)1580 getrusage(struct proc *p, struct getrusage_args *uap, __unused int32_t *retval)
1581 {
1582 	struct rusage *rup, rubuf;
1583 	struct user64_rusage rubuf64 = {};
1584 	struct user32_rusage rubuf32 = {};
1585 	size_t retsize = sizeof(rubuf);                 /* default: 32 bits */
1586 	caddr_t retbuf = (caddr_t)&rubuf;               /* default: 32 bits */
1587 	struct timeval utime;
1588 	struct timeval stime;
1589 
1590 
1591 	switch (uap->who) {
1592 	case RUSAGE_SELF:
1593 		calcru(p, &utime, &stime, NULL);
1594 		proc_lock(p);
1595 		rup = &p->p_stats->p_ru;
1596 		rup->ru_utime = utime;
1597 		rup->ru_stime = stime;
1598 
1599 		rubuf = *rup;
1600 		proc_unlock(p);
1601 
1602 		break;
1603 
1604 	case RUSAGE_CHILDREN:
1605 		proc_lock(p);
1606 		rup = &p->p_stats->p_cru;
1607 		rubuf = *rup;
1608 		proc_unlock(p);
1609 		break;
1610 
1611 	default:
1612 		return EINVAL;
1613 	}
1614 	if (IS_64BIT_PROCESS(p)) {
1615 		retsize = sizeof(rubuf64);
1616 		retbuf = (caddr_t)&rubuf64;
1617 		munge_user64_rusage(&rubuf, &rubuf64);
1618 	} else {
1619 		retsize = sizeof(rubuf32);
1620 		retbuf = (caddr_t)&rubuf32;
1621 		munge_user32_rusage(&rubuf, &rubuf32);
1622 	}
1623 
1624 	return copyout(retbuf, uap->rusage, retsize);
1625 }
1626 
1627 void
ruadd(struct rusage * ru,struct rusage * ru2)1628 ruadd(struct rusage *ru, struct rusage *ru2)
1629 {
1630 	long *ip, *ip2;
1631 	long i;
1632 
1633 	timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime);
1634 	timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime);
1635 	if (ru->ru_maxrss < ru2->ru_maxrss) {
1636 		ru->ru_maxrss = ru2->ru_maxrss;
1637 	}
1638 	ip = &ru->ru_first; ip2 = &ru2->ru_first;
1639 	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) {
1640 		*ip++ += *ip2++;
1641 	}
1642 }
1643 
1644 /*
1645  * Add the rusage stats of child in parent.
1646  *
1647  * It adds rusage statistics of child process and statistics of all its
1648  * children to its parent.
1649  *
1650  * Note: proc lock of parent should be held while calling this function.
1651  */
1652 void
update_rusage_info_child(struct rusage_info_child * ri,rusage_info_current * ri_current)1653 update_rusage_info_child(struct rusage_info_child *ri, rusage_info_current *ri_current)
1654 {
1655 	ri->ri_child_user_time += (ri_current->ri_user_time +
1656 	    ri_current->ri_child_user_time);
1657 	ri->ri_child_system_time += (ri_current->ri_system_time +
1658 	    ri_current->ri_child_system_time);
1659 	ri->ri_child_pkg_idle_wkups += (ri_current->ri_pkg_idle_wkups +
1660 	    ri_current->ri_child_pkg_idle_wkups);
1661 	ri->ri_child_interrupt_wkups += (ri_current->ri_interrupt_wkups +
1662 	    ri_current->ri_child_interrupt_wkups);
1663 	ri->ri_child_pageins += (ri_current->ri_pageins +
1664 	    ri_current->ri_child_pageins);
1665 	ri->ri_child_elapsed_abstime += ((ri_current->ri_proc_exit_abstime -
1666 	    ri_current->ri_proc_start_abstime) + ri_current->ri_child_elapsed_abstime);
1667 }
1668 
1669 static void
proc_limit_free(smr_node_t node)1670 proc_limit_free(smr_node_t node)
1671 {
1672 	struct plimit *plimit = __container_of(node, struct plimit, pl_node);
1673 
1674 	zfree(plimit_zone, plimit);
1675 }
1676 
1677 static void
proc_limit_release(struct plimit * plimit)1678 proc_limit_release(struct plimit *plimit)
1679 {
1680 	if (os_ref_release(&plimit->pl_refcnt) == 0) {
1681 		smr_proc_task_call(&plimit->pl_node, sizeof(*plimit), proc_limit_free);
1682 	}
1683 }
1684 
1685 /*
1686  * Reading soft limit from specified resource.
1687  */
1688 rlim_t
proc_limitgetcur(proc_t p,int which)1689 proc_limitgetcur(proc_t p, int which)
1690 {
1691 	rlim_t rlim_cur;
1692 
1693 	assert(p);
1694 	assert(which < RLIM_NLIMITS);
1695 
1696 	smr_proc_task_enter();
1697 	rlim_cur = smr_entered_load(&p->p_limit)->pl_rlimit[which].rlim_cur;
1698 	smr_proc_task_leave();
1699 
1700 	return rlim_cur;
1701 }
1702 
1703 /*
1704  * Handle commonly asked limit that needs to be clamped with maxfilesperproc.
1705  */
1706 int
proc_limitgetcur_nofile(struct proc * p)1707 proc_limitgetcur_nofile(struct proc *p)
1708 {
1709 	rlim_t lim = proc_limitgetcur(p, RLIMIT_NOFILE);
1710 
1711 	return (int)MIN(lim, maxfilesperproc);
1712 }
1713 
1714 /*
1715  * Writing soft limit to specified resource. This is an internal function
1716  * used only by proc_exit to update RLIMIT_FSIZE in
1717  * place without invoking setrlimit.
1718  */
1719 void
proc_limitsetcur_fsize(proc_t p,rlim_t value)1720 proc_limitsetcur_fsize(proc_t p, rlim_t value)
1721 {
1722 	proc_limitupdate(p, false, ^(struct plimit *plimit) {
1723 		plimit->pl_rlimit[RLIMIT_FSIZE].rlim_cur = value;
1724 	});
1725 }
1726 
1727 struct rlimit
proc_limitget(proc_t p,int which)1728 proc_limitget(proc_t p, int which)
1729 {
1730 	struct rlimit lim;
1731 
1732 	assert(which < RLIM_NLIMITS);
1733 
1734 	smr_proc_task_enter();
1735 	lim = smr_entered_load(&p->p_limit)->pl_rlimit[which];
1736 	smr_proc_task_leave();
1737 
1738 	return lim;
1739 }
1740 
1741 void
proc_limitfork(proc_t parent,proc_t child)1742 proc_limitfork(proc_t parent, proc_t child)
1743 {
1744 	struct plimit *plim;
1745 
1746 	proc_lock(parent);
1747 	plim = smr_serialized_load(&parent->p_limit);
1748 	os_ref_retain(&plim->pl_refcnt);
1749 	proc_unlock(parent);
1750 
1751 	smr_init_store(&child->p_limit, plim);
1752 }
1753 
1754 void
proc_limitdrop(proc_t p)1755 proc_limitdrop(proc_t p)
1756 {
1757 	struct plimit *plimit = NULL;
1758 
1759 	proc_lock(p);
1760 	plimit = smr_serialized_load(&p->p_limit);
1761 	smr_clear_store(&p->p_limit);
1762 	proc_unlock(p);
1763 
1764 	proc_limit_release(plimit);
1765 }
1766 
1767 /*
1768  * proc_limitblock/unblock are used to serialize access to plimit
1769  * from concurrent threads within the same process.
1770  * Callers must be holding the proc lock to enter, return with
1771  * the proc lock locked
1772  */
1773 static void
proc_limitblock(proc_t p)1774 proc_limitblock(proc_t p)
1775 {
1776 	lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
1777 
1778 	while (p->p_lflag & P_LLIMCHANGE) {
1779 		p->p_lflag |= P_LLIMWAIT;
1780 		msleep(&p->p_limit, &p->p_mlock, 0, "proc_limitblock", NULL);
1781 	}
1782 	p->p_lflag |= P_LLIMCHANGE;
1783 }
1784 
1785 /*
1786  * Callers must be holding the proc lock to enter, return with
1787  * the proc lock locked
1788  */
1789 static void
proc_limitunblock(proc_t p)1790 proc_limitunblock(proc_t p)
1791 {
1792 	lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
1793 
1794 	p->p_lflag &= ~P_LLIMCHANGE;
1795 	if (p->p_lflag & P_LLIMWAIT) {
1796 		p->p_lflag &= ~P_LLIMWAIT;
1797 		wakeup(&p->p_limit);
1798 	}
1799 }
1800 
1801 /*
1802  * Perform an rlimit update (as defined by the arbitrary `update` function).
1803  *
1804  * Because plimits are accessed without holding any locks,
1805  * with only a hazard reference, the struct plimit is always
1806  * copied, updated, and replaced, to implement a const value type.
1807  */
1808 static void
1809 proc_limitupdate(proc_t p, bool unblock, void (^update)(struct plimit *))
1810 {
1811 	struct plimit  *cur_plim;
1812 	struct plimit  *copy_plim;
1813 
1814 	copy_plim = zalloc_flags(plimit_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1815 
1816 	proc_lock(p);
1817 
1818 	cur_plim = smr_serialized_load(&p->p_limit);
1819 
1820 	os_ref_init_count(&copy_plim->pl_refcnt, &rlimit_refgrp, 1);
1821 	bcopy(cur_plim->pl_rlimit, copy_plim->pl_rlimit,
1822 	    sizeof(struct rlimit) * RLIM_NLIMITS);
1823 
1824 	update(copy_plim);
1825 
1826 	smr_serialized_store(&p->p_limit, copy_plim);
1827 
1828 	if (unblock) {
1829 		proc_limitunblock(p);
1830 	}
1831 	proc_unlock(p);
1832 
1833 	proc_limit_release(cur_plim);
1834 }
1835 
1836 static int
1837 iopolicysys_disk(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1838 static int
1839 iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1840 static int
1841 iopolicysys_vfs_atime_updates(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1842 static int
1843 iopolicysys_vfs_statfs_no_data_volume(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1844 static int
1845 iopolicysys_vfs_trigger_resolve(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1846 static int
1847 iopolicysys_vfs_ignore_content_protection(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1848 static int
1849 iopolicysys_vfs_ignore_node_permissions(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *ipo_param);
1850 static int
1851 iopolicysys_vfs_skip_mtime_update(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1852 static int
1853 iopolicysys_vfs_allow_lowspace_writes(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1854 static int
1855 iopolicysys_vfs_disallow_rw_for_o_evtonly(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1856 static int iopolicysys_vfs_altlink(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1857 static int iopolicysys_vfs_nocache_write_fs_blksize(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1858 
1859 /*
1860  * iopolicysys
1861  *
1862  * Description:	System call MUX for use in manipulating I/O policy attributes of the current process or thread
1863  *
1864  * Parameters:	cmd				Policy command
1865  *		arg				Pointer to policy arguments
1866  *
1867  * Returns:	0				Success
1868  *		EINVAL				Invalid command or invalid policy arguments
1869  *
1870  */
1871 int
iopolicysys(struct proc * p,struct iopolicysys_args * uap,int32_t * retval)1872 iopolicysys(struct proc *p, struct iopolicysys_args *uap, int32_t *retval)
1873 {
1874 	int     error = 0;
1875 	struct _iopol_param_t iop_param;
1876 
1877 	if ((error = copyin(uap->arg, &iop_param, sizeof(iop_param))) != 0) {
1878 		goto out;
1879 	}
1880 
1881 	switch (iop_param.iop_iotype) {
1882 	case IOPOL_TYPE_DISK:
1883 		error = iopolicysys_disk(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1884 		if (error == EIDRM) {
1885 			*retval = -2;
1886 			error = 0;
1887 		}
1888 		if (error) {
1889 			goto out;
1890 		}
1891 		break;
1892 	case IOPOL_TYPE_VFS_HFS_CASE_SENSITIVITY:
1893 		error = iopolicysys_vfs_hfs_case_sensitivity(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1894 		if (error) {
1895 			goto out;
1896 		}
1897 		break;
1898 	case IOPOL_TYPE_VFS_ATIME_UPDATES:
1899 		error = iopolicysys_vfs_atime_updates(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1900 		if (error) {
1901 			goto out;
1902 		}
1903 		break;
1904 	case IOPOL_TYPE_VFS_MATERIALIZE_DATALESS_FILES:
1905 		error = iopolicysys_vfs_materialize_dataless_files(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1906 		if (error) {
1907 			goto out;
1908 		}
1909 		break;
1910 	case IOPOL_TYPE_VFS_STATFS_NO_DATA_VOLUME:
1911 		error = iopolicysys_vfs_statfs_no_data_volume(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1912 		if (error) {
1913 			goto out;
1914 		}
1915 		break;
1916 	case IOPOL_TYPE_VFS_TRIGGER_RESOLVE:
1917 		error = iopolicysys_vfs_trigger_resolve(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1918 		if (error) {
1919 			goto out;
1920 		}
1921 		break;
1922 	case IOPOL_TYPE_VFS_IGNORE_CONTENT_PROTECTION:
1923 		error = iopolicysys_vfs_ignore_content_protection(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1924 		if (error) {
1925 			goto out;
1926 		}
1927 		break;
1928 	case IOPOL_TYPE_VFS_IGNORE_PERMISSIONS:
1929 		error = iopolicysys_vfs_ignore_node_permissions(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1930 		if (error) {
1931 			goto out;
1932 		}
1933 		break;
1934 	case IOPOL_TYPE_VFS_SKIP_MTIME_UPDATE:
1935 		error = iopolicysys_vfs_skip_mtime_update(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1936 		if (error) {
1937 			goto out;
1938 		}
1939 		break;
1940 	case IOPOL_TYPE_VFS_ALLOW_LOW_SPACE_WRITES:
1941 		error = iopolicysys_vfs_allow_lowspace_writes(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1942 		if (error) {
1943 			goto out;
1944 		}
1945 		break;
1946 	case IOPOL_TYPE_VFS_DISALLOW_RW_FOR_O_EVTONLY:
1947 		error = iopolicysys_vfs_disallow_rw_for_o_evtonly(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1948 		if (error) {
1949 			goto out;
1950 		}
1951 		break;
1952 	case IOPOL_TYPE_VFS_ALTLINK:
1953 		error = iopolicysys_vfs_altlink(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1954 		if (error) {
1955 			goto out;
1956 		}
1957 		break;
1958 	case IOPOL_TYPE_VFS_NOCACHE_WRITE_FS_BLKSIZE:
1959 		error = iopolicysys_vfs_nocache_write_fs_blksize(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1960 		if (error) {
1961 			goto out;
1962 		}
1963 		break;
1964 
1965 	default:
1966 		error = EINVAL;
1967 		goto out;
1968 	}
1969 
1970 	/* Individual iotype handlers are expected to update iop_param, if requested with a GET command */
1971 	if (uap->cmd == IOPOL_CMD_GET) {
1972 		error = copyout((caddr_t)&iop_param, uap->arg, sizeof(iop_param));
1973 		if (error) {
1974 			goto out;
1975 		}
1976 	}
1977 
1978 out:
1979 	return error;
1980 }
1981 
1982 static int
iopolicysys_disk(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)1983 iopolicysys_disk(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
1984 {
1985 	int                     error = 0;
1986 	thread_t        thread;
1987 	int                     policy_flavor;
1988 
1989 	/* Validate scope */
1990 	switch (scope) {
1991 	case IOPOL_SCOPE_PROCESS:
1992 		thread = THREAD_NULL;
1993 		policy_flavor = TASK_POLICY_IOPOL;
1994 		break;
1995 
1996 	case IOPOL_SCOPE_THREAD:
1997 		thread = current_thread();
1998 		policy_flavor = TASK_POLICY_IOPOL;
1999 
2000 		/* Not allowed to combine QoS and (non-PASSIVE) IO policy, doing so strips the QoS */
2001 		if (cmd == IOPOL_CMD_SET && thread_has_qos_policy(thread)) {
2002 			switch (policy) {
2003 			case IOPOL_DEFAULT:
2004 			case IOPOL_PASSIVE:
2005 				break;
2006 			case IOPOL_UTILITY:
2007 			case IOPOL_THROTTLE:
2008 			case IOPOL_IMPORTANT:
2009 			case IOPOL_STANDARD:
2010 				if (!thread_is_static_param(thread)) {
2011 					thread_remove_qos_policy(thread);
2012 					/*
2013 					 * This is not an error case, this is to return a marker to user-space that
2014 					 * we stripped the thread of its QoS class.
2015 					 */
2016 					error = EIDRM;
2017 					break;
2018 				}
2019 				OS_FALLTHROUGH;
2020 			default:
2021 				error = EINVAL;
2022 				goto out;
2023 			}
2024 		}
2025 		break;
2026 
2027 	case IOPOL_SCOPE_DARWIN_BG:
2028 #if !defined(XNU_TARGET_OS_OSX)
2029 		/* We don't want this on platforms outside of macOS as BG is always IOPOL_THROTTLE */
2030 		error = ENOTSUP;
2031 		goto out;
2032 #else /* !defined(XNU_TARGET_OS_OSX) */
2033 		thread = THREAD_NULL;
2034 		policy_flavor = TASK_POLICY_DARWIN_BG_IOPOL;
2035 		break;
2036 #endif /* !defined(XNU_TARGET_OS_OSX) */
2037 
2038 	default:
2039 		error = EINVAL;
2040 		goto out;
2041 	}
2042 
2043 	/* Validate policy */
2044 	if (cmd == IOPOL_CMD_SET) {
2045 		switch (policy) {
2046 		case IOPOL_DEFAULT:
2047 			if (scope == IOPOL_SCOPE_DARWIN_BG) {
2048 				/* the current default BG throttle level is UTILITY */
2049 				policy = IOPOL_UTILITY;
2050 			} else {
2051 				policy = IOPOL_IMPORTANT;
2052 			}
2053 			break;
2054 		case IOPOL_UTILITY:
2055 		/* fall-through */
2056 		case IOPOL_THROTTLE:
2057 			/* These levels are OK */
2058 			break;
2059 		case IOPOL_IMPORTANT:
2060 		/* fall-through */
2061 		case IOPOL_STANDARD:
2062 		/* fall-through */
2063 		case IOPOL_PASSIVE:
2064 			if (scope == IOPOL_SCOPE_DARWIN_BG) {
2065 				/* These levels are invalid for BG */
2066 				error = EINVAL;
2067 				goto out;
2068 			} else {
2069 				/* OK for other scopes */
2070 			}
2071 			break;
2072 		default:
2073 			error = EINVAL;
2074 			goto out;
2075 		}
2076 	}
2077 
2078 	/* Perform command */
2079 	switch (cmd) {
2080 	case IOPOL_CMD_SET:
2081 		if (thread != THREAD_NULL) {
2082 			proc_set_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor, policy);
2083 		} else {
2084 			proc_set_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor, policy);
2085 		}
2086 		break;
2087 	case IOPOL_CMD_GET:
2088 		if (thread != THREAD_NULL) {
2089 			policy = proc_get_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor);
2090 		} else {
2091 			policy = proc_get_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor);
2092 		}
2093 		iop_param->iop_policy = policy;
2094 		break;
2095 	default:
2096 		error = EINVAL;         /* unknown command */
2097 		break;
2098 	}
2099 
2100 out:
2101 	return error;
2102 }
2103 
2104 static int
iopolicysys_vfs_hfs_case_sensitivity(struct proc * p,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2105 iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
2106 {
2107 	int                     error = 0;
2108 
2109 	/* Validate scope */
2110 	switch (scope) {
2111 	case IOPOL_SCOPE_PROCESS:
2112 		/* Only process OK */
2113 		break;
2114 	default:
2115 		error = EINVAL;
2116 		goto out;
2117 	}
2118 
2119 	/* Validate policy */
2120 	if (cmd == IOPOL_CMD_SET) {
2121 		switch (policy) {
2122 		case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT:
2123 		/* fall-through */
2124 		case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE:
2125 			/* These policies are OK */
2126 			break;
2127 		default:
2128 			error = EINVAL;
2129 			goto out;
2130 		}
2131 	}
2132 
2133 	/* Perform command */
2134 	switch (cmd) {
2135 	case IOPOL_CMD_SET:
2136 		if (0 == kauth_cred_issuser(kauth_cred_get())) {
2137 			/* If it's a non-root process, it needs to have the entitlement to set the policy */
2138 			boolean_t entitled = FALSE;
2139 			entitled = IOCurrentTaskHasEntitlement("com.apple.private.iopol.case_sensitivity");
2140 			if (!entitled) {
2141 				error = EPERM;
2142 				goto out;
2143 			}
2144 		}
2145 
2146 		switch (policy) {
2147 		case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT:
2148 			OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY), &p->p_vfs_iopolicy);
2149 			break;
2150 		case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE:
2151 			OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY, &p->p_vfs_iopolicy);
2152 			break;
2153 		default:
2154 			error = EINVAL;
2155 			goto out;
2156 		}
2157 
2158 		break;
2159 	case IOPOL_CMD_GET:
2160 		iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY)
2161 		    ? IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE
2162 		    : IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT;
2163 		break;
2164 	default:
2165 		error = EINVAL;         /* unknown command */
2166 		break;
2167 	}
2168 
2169 out:
2170 	return error;
2171 }
2172 
2173 static inline int
get_thread_atime_policy(struct uthread * ut)2174 get_thread_atime_policy(struct uthread *ut)
2175 {
2176 	return (ut->uu_flag & UT_ATIME_UPDATE) ? IOPOL_ATIME_UPDATES_OFF : IOPOL_ATIME_UPDATES_DEFAULT;
2177 }
2178 
2179 static inline void
set_thread_atime_policy(struct uthread * ut,int policy)2180 set_thread_atime_policy(struct uthread *ut, int policy)
2181 {
2182 	if (policy == IOPOL_ATIME_UPDATES_OFF) {
2183 		ut->uu_flag |= UT_ATIME_UPDATE;
2184 	} else {
2185 		ut->uu_flag &= ~UT_ATIME_UPDATE;
2186 	}
2187 }
2188 
2189 static inline void
set_task_atime_policy(struct proc * p,int policy)2190 set_task_atime_policy(struct proc *p, int policy)
2191 {
2192 	if (policy == IOPOL_ATIME_UPDATES_OFF) {
2193 		OSBitOrAtomic16((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES, &p->p_vfs_iopolicy);
2194 	} else {
2195 		OSBitAndAtomic16(~((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES), &p->p_vfs_iopolicy);
2196 	}
2197 }
2198 
2199 static inline int
get_task_atime_policy(struct proc * p)2200 get_task_atime_policy(struct proc *p)
2201 {
2202 	return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) ? IOPOL_ATIME_UPDATES_OFF : IOPOL_ATIME_UPDATES_DEFAULT;
2203 }
2204 
2205 static int
iopolicysys_vfs_atime_updates(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2206 iopolicysys_vfs_atime_updates(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
2207 {
2208 	int                     error = 0;
2209 	thread_t                thread;
2210 
2211 	/* Validate scope */
2212 	switch (scope) {
2213 	case IOPOL_SCOPE_THREAD:
2214 		thread = current_thread();
2215 		break;
2216 	case IOPOL_SCOPE_PROCESS:
2217 		thread = THREAD_NULL;
2218 		break;
2219 	default:
2220 		error = EINVAL;
2221 		goto out;
2222 	}
2223 
2224 	/* Validate policy */
2225 	if (cmd == IOPOL_CMD_SET) {
2226 		switch (policy) {
2227 		case IOPOL_ATIME_UPDATES_DEFAULT:
2228 		case IOPOL_ATIME_UPDATES_OFF:
2229 			break;
2230 		default:
2231 			error = EINVAL;
2232 			goto out;
2233 		}
2234 	}
2235 
2236 	/* Perform command */
2237 	switch (cmd) {
2238 	case IOPOL_CMD_SET:
2239 		if (thread != THREAD_NULL) {
2240 			set_thread_atime_policy(get_bsdthread_info(thread), policy);
2241 		} else {
2242 			set_task_atime_policy(p, policy);
2243 		}
2244 		break;
2245 	case IOPOL_CMD_GET:
2246 		if (thread != THREAD_NULL) {
2247 			policy = get_thread_atime_policy(get_bsdthread_info(thread));
2248 		} else {
2249 			policy = get_task_atime_policy(p);
2250 		}
2251 		iop_param->iop_policy = policy;
2252 		break;
2253 	default:
2254 		error = EINVAL;         /* unknown command */
2255 		break;
2256 	}
2257 
2258 out:
2259 	return error;
2260 }
2261 
2262 static inline int
get_thread_materialize_policy(struct uthread * ut)2263 get_thread_materialize_policy(struct uthread *ut)
2264 {
2265 	if (ut->uu_flag & UT_NSPACE_NODATALESSFAULTS) {
2266 		return IOPOL_MATERIALIZE_DATALESS_FILES_OFF;
2267 	} else if (ut->uu_flag & UT_NSPACE_FORCEDATALESSFAULTS) {
2268 		return IOPOL_MATERIALIZE_DATALESS_FILES_ON;
2269 	}
2270 	/* Default thread behavior is "inherit process behavior". */
2271 	return IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT;
2272 }
2273 
2274 static inline void
set_thread_materialize_policy(struct uthread * ut,int policy)2275 set_thread_materialize_policy(struct uthread *ut, int policy)
2276 {
2277 	if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_OFF) {
2278 		ut->uu_flag &= ~UT_NSPACE_FORCEDATALESSFAULTS;
2279 		ut->uu_flag |= UT_NSPACE_NODATALESSFAULTS;
2280 	} else if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_ON) {
2281 		ut->uu_flag &= ~UT_NSPACE_NODATALESSFAULTS;
2282 		ut->uu_flag |= UT_NSPACE_FORCEDATALESSFAULTS;
2283 	} else {
2284 		ut->uu_flag &= ~(UT_NSPACE_NODATALESSFAULTS | UT_NSPACE_FORCEDATALESSFAULTS);
2285 	}
2286 }
2287 
2288 static inline void
set_proc_materialize_policy(struct proc * p,int policy)2289 set_proc_materialize_policy(struct proc *p, int policy)
2290 {
2291 	if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT) {
2292 		/*
2293 		 * Caller has specified "use the default policy".
2294 		 * The default policy is to NOT materialize dataless
2295 		 * files.
2296 		 */
2297 		policy = IOPOL_MATERIALIZE_DATALESS_FILES_OFF;
2298 	}
2299 	if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_ON) {
2300 		OSBitOrAtomic16((uint16_t)P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES, &p->p_vfs_iopolicy);
2301 	} else {
2302 		OSBitAndAtomic16(~((uint16_t)P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES), &p->p_vfs_iopolicy);
2303 	}
2304 }
2305 
2306 static int
get_proc_materialize_policy(struct proc * p)2307 get_proc_materialize_policy(struct proc *p)
2308 {
2309 	return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES) ? IOPOL_MATERIALIZE_DATALESS_FILES_ON : IOPOL_MATERIALIZE_DATALESS_FILES_OFF;
2310 }
2311 
2312 int
iopolicysys_vfs_materialize_dataless_files(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2313 iopolicysys_vfs_materialize_dataless_files(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
2314 {
2315 	int                     error = 0;
2316 	thread_t                thread;
2317 
2318 	/* Validate scope */
2319 	switch (scope) {
2320 	case IOPOL_SCOPE_THREAD:
2321 		thread = current_thread();
2322 		break;
2323 	case IOPOL_SCOPE_PROCESS:
2324 		thread = THREAD_NULL;
2325 		break;
2326 	default:
2327 		error = EINVAL;
2328 		goto out;
2329 	}
2330 
2331 	/* Validate policy */
2332 	if (cmd == IOPOL_CMD_SET) {
2333 		switch (policy) {
2334 		case IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT:
2335 		case IOPOL_MATERIALIZE_DATALESS_FILES_OFF:
2336 		case IOPOL_MATERIALIZE_DATALESS_FILES_ON:
2337 			break;
2338 		default:
2339 			error = EINVAL;
2340 			goto out;
2341 		}
2342 	}
2343 
2344 	/* Perform command */
2345 	switch (cmd) {
2346 	case IOPOL_CMD_SET:
2347 		if (thread != THREAD_NULL) {
2348 			set_thread_materialize_policy(get_bsdthread_info(thread), policy);
2349 		} else {
2350 			set_proc_materialize_policy(p, policy);
2351 		}
2352 		break;
2353 	case IOPOL_CMD_GET:
2354 		if (thread != THREAD_NULL) {
2355 			policy = get_thread_materialize_policy(get_bsdthread_info(thread));
2356 		} else {
2357 			policy = get_proc_materialize_policy(p);
2358 		}
2359 		iop_param->iop_policy = policy;
2360 		break;
2361 	default:
2362 		error = EINVAL;         /* unknown command */
2363 		break;
2364 	}
2365 
2366 out:
2367 	return error;
2368 }
2369 
2370 static int
iopolicysys_vfs_statfs_no_data_volume(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2371 iopolicysys_vfs_statfs_no_data_volume(struct proc *p __unused, int cmd,
2372     int scope, int policy, struct _iopol_param_t *iop_param)
2373 {
2374 	int error = 0;
2375 
2376 	/* Validate scope */
2377 	switch (scope) {
2378 	case IOPOL_SCOPE_PROCESS:
2379 		/* Only process OK */
2380 		break;
2381 	default:
2382 		error = EINVAL;
2383 		goto out;
2384 	}
2385 
2386 	/* Validate policy */
2387 	if (cmd == IOPOL_CMD_SET) {
2388 		switch (policy) {
2389 		case IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT:
2390 		/* fall-through */
2391 		case IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME:
2392 			/* These policies are OK */
2393 			break;
2394 		default:
2395 			error = EINVAL;
2396 			goto out;
2397 		}
2398 	}
2399 
2400 	/* Perform command */
2401 	switch (cmd) {
2402 	case IOPOL_CMD_SET:
2403 		if (0 == kauth_cred_issuser(kauth_cred_get())) {
2404 			/* If it's a non-root process, it needs to have the entitlement to set the policy */
2405 			boolean_t entitled = FALSE;
2406 			entitled = IOCurrentTaskHasEntitlement("com.apple.private.iopol.case_sensitivity");
2407 			if (!entitled) {
2408 				error = EPERM;
2409 				goto out;
2410 			}
2411 		}
2412 
2413 		switch (policy) {
2414 		case IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT:
2415 			OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME), &p->p_vfs_iopolicy);
2416 			break;
2417 		case IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME:
2418 			OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME, &p->p_vfs_iopolicy);
2419 			break;
2420 		default:
2421 			error = EINVAL;
2422 			goto out;
2423 		}
2424 
2425 		break;
2426 	case IOPOL_CMD_GET:
2427 		iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME)
2428 		    ? IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME
2429 		    : IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT;
2430 		break;
2431 	default:
2432 		error = EINVAL;         /* unknown command */
2433 		break;
2434 	}
2435 
2436 out:
2437 	return error;
2438 }
2439 
2440 static int
iopolicysys_vfs_trigger_resolve(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2441 iopolicysys_vfs_trigger_resolve(struct proc *p __unused, int cmd,
2442     int scope, int policy, struct _iopol_param_t *iop_param)
2443 {
2444 	int error = 0;
2445 
2446 	/* Validate scope */
2447 	switch (scope) {
2448 	case IOPOL_SCOPE_PROCESS:
2449 		/* Only process OK */
2450 		break;
2451 	default:
2452 		error = EINVAL;
2453 		goto out;
2454 	}
2455 
2456 	/* Validate policy */
2457 	if (cmd == IOPOL_CMD_SET) {
2458 		switch (policy) {
2459 		case IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT:
2460 		/* fall-through */
2461 		case IOPOL_VFS_TRIGGER_RESOLVE_OFF:
2462 			/* These policies are OK */
2463 			break;
2464 		default:
2465 			error = EINVAL;
2466 			goto out;
2467 		}
2468 	}
2469 
2470 	/* Perform command */
2471 	switch (cmd) {
2472 	case IOPOL_CMD_SET:
2473 		switch (policy) {
2474 		case IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT:
2475 			OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE), &p->p_vfs_iopolicy);
2476 			break;
2477 		case IOPOL_VFS_TRIGGER_RESOLVE_OFF:
2478 			OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE, &p->p_vfs_iopolicy);
2479 			break;
2480 		default:
2481 			error = EINVAL;
2482 			goto out;
2483 		}
2484 
2485 		break;
2486 	case IOPOL_CMD_GET:
2487 		iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE)
2488 		    ? IOPOL_VFS_TRIGGER_RESOLVE_OFF
2489 		    : IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT;
2490 		break;
2491 	default:
2492 		error = EINVAL;         /* unknown command */
2493 		break;
2494 	}
2495 
2496 out:
2497 	return error;
2498 }
2499 
2500 static int
iopolicysys_vfs_ignore_content_protection(struct proc * p,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2501 iopolicysys_vfs_ignore_content_protection(struct proc *p, int cmd, int scope,
2502     int policy, struct _iopol_param_t *iop_param)
2503 {
2504 	int error = 0;
2505 
2506 	/* Validate scope */
2507 	switch (scope) {
2508 	case IOPOL_SCOPE_PROCESS:
2509 		/* Only process OK */
2510 		break;
2511 	default:
2512 		error = EINVAL;
2513 		goto out;
2514 	}
2515 
2516 	/* Validate policy */
2517 	if (cmd == IOPOL_CMD_SET) {
2518 		switch (policy) {
2519 		case IOPOL_VFS_CONTENT_PROTECTION_DEFAULT:
2520 			OS_FALLTHROUGH;
2521 		case IOPOL_VFS_CONTENT_PROTECTION_IGNORE:
2522 			/* These policies are OK */
2523 			break;
2524 		default:
2525 			error = EINVAL;
2526 			goto out;
2527 		}
2528 	}
2529 
2530 	/* Perform command */
2531 	switch (cmd) {
2532 	case IOPOL_CMD_SET:
2533 		if (0 == kauth_cred_issuser(kauth_cred_get())) {
2534 			/* If it's a non-root process, it needs to have the entitlement to set the policy */
2535 			boolean_t entitled = FALSE;
2536 			entitled = IOCurrentTaskHasEntitlement("com.apple.private.iopol.case_sensitivity");
2537 			if (!entitled) {
2538 				error = EPERM;
2539 				goto out;
2540 			}
2541 		}
2542 
2543 		switch (policy) {
2544 		case IOPOL_VFS_CONTENT_PROTECTION_DEFAULT:
2545 			os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION, relaxed);
2546 			break;
2547 		case IOPOL_VFS_CONTENT_PROTECTION_IGNORE:
2548 			os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION, relaxed);
2549 			break;
2550 		default:
2551 			error = EINVAL;
2552 			goto out;
2553 		}
2554 
2555 		break;
2556 	case IOPOL_CMD_GET:
2557 		iop_param->iop_policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION)
2558 		    ? IOPOL_VFS_CONTENT_PROTECTION_IGNORE
2559 		    : IOPOL_VFS_CONTENT_PROTECTION_DEFAULT;
2560 		break;
2561 	default:
2562 		error = EINVAL;         /* unknown command */
2563 		break;
2564 	}
2565 
2566 out:
2567 	return error;
2568 }
2569 
2570 #define AUTHORIZED_ACCESS_ENTITLEMENT \
2571 	"com.apple.private.vfs.authorized-access"
2572 int
iopolicysys_vfs_ignore_node_permissions(struct proc * p,int cmd,int scope,int policy,__unused struct _iopol_param_t * iop_param)2573 iopolicysys_vfs_ignore_node_permissions(struct proc *p, int cmd, int scope,
2574     int policy, __unused struct _iopol_param_t *iop_param)
2575 {
2576 	int error = EINVAL;
2577 
2578 	switch (scope) {
2579 	case IOPOL_SCOPE_PROCESS:
2580 		break;
2581 	default:
2582 		goto out;
2583 	}
2584 
2585 	switch (cmd) {
2586 	case IOPOL_CMD_GET:
2587 		policy = os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_IGNORE_NODE_PERMISSIONS ?
2588 		    IOPOL_VFS_IGNORE_PERMISSIONS_ON : IOPOL_VFS_IGNORE_PERMISSIONS_OFF;
2589 		iop_param->iop_policy = policy;
2590 		goto out_ok;
2591 	case IOPOL_CMD_SET:
2592 		/* SET is handled after the switch */
2593 		break;
2594 	default:
2595 		goto out;
2596 	}
2597 
2598 	if (!IOCurrentTaskHasEntitlement(AUTHORIZED_ACCESS_ENTITLEMENT)) {
2599 		error = EPERM;
2600 		goto out;
2601 	}
2602 
2603 	switch (policy) {
2604 	case IOPOL_VFS_IGNORE_PERMISSIONS_OFF:
2605 		os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_NODE_PERMISSIONS, relaxed);
2606 		break;
2607 	case IOPOL_VFS_IGNORE_PERMISSIONS_ON:
2608 		os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_NODE_PERMISSIONS, relaxed);
2609 		break;
2610 	default:
2611 		break;
2612 	}
2613 
2614 out_ok:
2615 	error = 0;
2616 out:
2617 	return error;
2618 }
2619 
2620 static inline void
set_thread_skip_mtime_policy(struct uthread * ut,int policy)2621 set_thread_skip_mtime_policy(struct uthread *ut, int policy)
2622 {
2623 	if (policy == IOPOL_VFS_SKIP_MTIME_UPDATE_ON) {
2624 		os_atomic_or(&ut->uu_flag, UT_SKIP_MTIME_UPDATE, relaxed);
2625 	} else {
2626 		os_atomic_andnot(&ut->uu_flag, UT_SKIP_MTIME_UPDATE, relaxed);
2627 	}
2628 }
2629 
2630 static inline int
get_thread_skip_mtime_policy(struct uthread * ut)2631 get_thread_skip_mtime_policy(struct uthread *ut)
2632 {
2633 	return (os_atomic_load(&ut->uu_flag, relaxed) & UT_SKIP_MTIME_UPDATE) ?
2634 	       IOPOL_VFS_SKIP_MTIME_UPDATE_ON : IOPOL_VFS_SKIP_MTIME_UPDATE_OFF;
2635 }
2636 
2637 static inline void
set_proc_skip_mtime_policy(struct proc * p,int policy)2638 set_proc_skip_mtime_policy(struct proc *p, int policy)
2639 {
2640 	if (policy == IOPOL_VFS_SKIP_MTIME_UPDATE_ON) {
2641 		os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_SKIP_MTIME_UPDATE, relaxed);
2642 	} else {
2643 		os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_SKIP_MTIME_UPDATE, relaxed);
2644 	}
2645 }
2646 
2647 static inline int
get_proc_skip_mtime_policy(struct proc * p)2648 get_proc_skip_mtime_policy(struct proc *p)
2649 {
2650 	return (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_SKIP_MTIME_UPDATE) ?
2651 	       IOPOL_VFS_SKIP_MTIME_UPDATE_ON : IOPOL_VFS_SKIP_MTIME_UPDATE_OFF;
2652 }
2653 
2654 #define SKIP_MTIME_UPDATE_ENTITLEMENT \
2655 	"com.apple.private.vfs.skip-mtime-updates"
2656 int
iopolicysys_vfs_skip_mtime_update(struct proc * p,int cmd,int scope,int policy,__unused struct _iopol_param_t * iop_param)2657 iopolicysys_vfs_skip_mtime_update(struct proc *p, int cmd, int scope,
2658     int policy, __unused struct _iopol_param_t *iop_param)
2659 {
2660 	thread_t thread;
2661 	int error = 0;
2662 
2663 	/* Validate scope */
2664 	switch (scope) {
2665 	case IOPOL_SCOPE_THREAD:
2666 		thread = current_thread();
2667 		break;
2668 	case IOPOL_SCOPE_PROCESS:
2669 		thread = THREAD_NULL;
2670 		break;
2671 	default:
2672 		error = EINVAL;
2673 		goto out;
2674 	}
2675 
2676 	/* Validate policy */
2677 	if (cmd == IOPOL_CMD_SET) {
2678 		switch (policy) {
2679 		case IOPOL_VFS_SKIP_MTIME_UPDATE_ON:
2680 		case IOPOL_VFS_SKIP_MTIME_UPDATE_OFF:
2681 			if (!IOCurrentTaskHasEntitlement(SKIP_MTIME_UPDATE_ENTITLEMENT)) {
2682 				error = EPERM;
2683 				goto out;
2684 			}
2685 			break;
2686 		default:
2687 			error = EINVAL;
2688 			goto out;
2689 		}
2690 	}
2691 
2692 	/* Perform command */
2693 	switch (cmd) {
2694 	case IOPOL_CMD_SET:
2695 		if (thread != THREAD_NULL) {
2696 			set_thread_skip_mtime_policy(get_bsdthread_info(thread), policy);
2697 		} else {
2698 			set_proc_skip_mtime_policy(p, policy);
2699 		}
2700 		break;
2701 	case IOPOL_CMD_GET:
2702 		if (thread != THREAD_NULL) {
2703 			policy = get_thread_skip_mtime_policy(get_bsdthread_info(thread));
2704 		} else {
2705 			policy = get_proc_skip_mtime_policy(p);
2706 		}
2707 		iop_param->iop_policy = policy;
2708 		break;
2709 	default:
2710 		error = EINVAL;         /* unknown command */
2711 		break;
2712 	}
2713 
2714 out:
2715 	return error;
2716 }
2717 
2718 #define ALLOW_LOW_SPACE_WRITES_ENTITLEMENT \
2719 	"com.apple.private.vfs.allow-low-space-writes"
2720 static int
iopolicysys_vfs_allow_lowspace_writes(struct proc * p,int cmd,int scope,int policy,__unused struct _iopol_param_t * iop_param)2721 iopolicysys_vfs_allow_lowspace_writes(struct proc *p, int cmd, int scope,
2722     int policy, __unused struct _iopol_param_t *iop_param)
2723 {
2724 	int error = EINVAL;
2725 
2726 	switch (scope) {
2727 	case IOPOL_SCOPE_PROCESS:
2728 		break;
2729 	default:
2730 		goto out;
2731 	}
2732 
2733 	switch (cmd) {
2734 	case IOPOL_CMD_GET:
2735 		policy = os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_ALLOW_LOW_SPACE_WRITES ?
2736 		    IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_ON : IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_OFF;
2737 		iop_param->iop_policy = policy;
2738 		goto out_ok;
2739 	case IOPOL_CMD_SET:
2740 		break;
2741 	default:
2742 		break;
2743 	}
2744 
2745 	if (!IOCurrentTaskHasEntitlement(ALLOW_LOW_SPACE_WRITES_ENTITLEMENT)) {
2746 		error = EPERM;
2747 		goto out;
2748 	}
2749 
2750 	switch (policy) {
2751 	case IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_OFF:
2752 		os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ALLOW_LOW_SPACE_WRITES, relaxed);
2753 		break;
2754 	case IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_ON:
2755 		os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ALLOW_LOW_SPACE_WRITES, relaxed);
2756 		break;
2757 	default:
2758 		break;
2759 	}
2760 
2761 out_ok:
2762 	error = 0;
2763 out:
2764 	return error;
2765 }
2766 
2767 static int
iopolicysys_vfs_disallow_rw_for_o_evtonly(struct proc * p,int cmd,int scope,int policy,__unused struct _iopol_param_t * iop_param)2768 iopolicysys_vfs_disallow_rw_for_o_evtonly(struct proc *p, int cmd, int scope,
2769     int policy, __unused struct _iopol_param_t *iop_param)
2770 {
2771 	int error = EINVAL;
2772 
2773 	switch (scope) {
2774 	case IOPOL_SCOPE_PROCESS:
2775 		break;
2776 	default:
2777 		goto out;
2778 	}
2779 
2780 	switch (cmd) {
2781 	case IOPOL_CMD_GET:
2782 		policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) &
2783 		    P_VFS_IOPOLICY_DISALLOW_RW_FOR_O_EVTONLY) ?
2784 		    IOPOL_VFS_DISALLOW_RW_FOR_O_EVTONLY_ON :
2785 		    IOPOL_VFS_DISALLOW_RW_FOR_O_EVTONLY_DEFAULT;
2786 		iop_param->iop_policy = policy;
2787 		goto out_ok;
2788 	case IOPOL_CMD_SET:
2789 		break;
2790 	default:
2791 		goto out;
2792 	}
2793 
2794 	/* Once set, we don't allow the process to clear it. */
2795 	switch (policy) {
2796 	case IOPOL_VFS_DISALLOW_RW_FOR_O_EVTONLY_ON:
2797 		os_atomic_or(&p->p_vfs_iopolicy,
2798 		    P_VFS_IOPOLICY_DISALLOW_RW_FOR_O_EVTONLY, relaxed);
2799 		break;
2800 	default:
2801 		goto out;
2802 	}
2803 
2804 out_ok:
2805 	error = 0;
2806 out:
2807 	return error;
2808 }
2809 
2810 static int
iopolicysys_vfs_altlink(struct proc * p,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2811 iopolicysys_vfs_altlink(struct proc *p, int cmd, int scope, int policy,
2812     struct _iopol_param_t *iop_param)
2813 {
2814 	if (scope != IOPOL_SCOPE_PROCESS) {
2815 		return EINVAL;
2816 	}
2817 
2818 	if (cmd == IOPOL_CMD_GET) {
2819 		policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_ALTLINK) ?
2820 		    IOPOL_VFS_ALTLINK_ENABLED : IOPOL_VFS_ALTLINK_DISABLED;
2821 		iop_param->iop_policy = policy;
2822 		return 0;
2823 	}
2824 
2825 	/* Once set, we don't allow the process to clear it. */
2826 	if (policy == IOPOL_VFS_ALTLINK_ENABLED) {
2827 		os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ALTLINK, relaxed);
2828 		return 0;
2829 	}
2830 
2831 	return EINVAL;
2832 }
2833 
2834 static int
iopolicysys_vfs_nocache_write_fs_blksize(struct proc * p,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2835 iopolicysys_vfs_nocache_write_fs_blksize(struct proc *p, int cmd, int scope, int policy,
2836     struct _iopol_param_t *iop_param)
2837 {
2838 	thread_t thread;
2839 
2840 	switch (scope) {
2841 	case IOPOL_SCOPE_THREAD:
2842 		thread = current_thread();
2843 		break;
2844 	case IOPOL_SCOPE_PROCESS:
2845 		thread = THREAD_NULL;
2846 		break;
2847 	default:
2848 		return EINVAL;
2849 	}
2850 
2851 	if (cmd == IOPOL_CMD_GET) {
2852 		if (thread != THREAD_NULL) {
2853 			struct uthread *ut = get_bsdthread_info(thread);
2854 			policy = ut->uu_flag & UT_FS_BLKSIZE_NOCACHE_WRITES ?
2855 			    IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_ON : IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_DEFAULT;
2856 		} else {
2857 			policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_NOCACHE_WRITE_FS_BLKSIZE) ?
2858 			    IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_ON : IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_DEFAULT;
2859 		}
2860 		iop_param->iop_policy = policy;
2861 		return 0;
2862 	}
2863 
2864 	/* Once set, we don't allow the process or thread to clear it. */
2865 	if ((cmd == IOPOL_CMD_SET) && (policy == IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_ON)) {
2866 		if (thread != THREAD_NULL) {
2867 			struct uthread *ut = get_bsdthread_info(thread);
2868 			ut->uu_flag |= UT_FS_BLKSIZE_NOCACHE_WRITES;
2869 		} else {
2870 			os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_NOCACHE_WRITE_FS_BLKSIZE, relaxed);
2871 		}
2872 		return 0;
2873 	}
2874 
2875 	return EINVAL;
2876 }
2877 
2878 void
proc_apply_task_networkbg(int pid,thread_t thread)2879 proc_apply_task_networkbg(int pid, thread_t thread)
2880 {
2881 	proc_t p = proc_find(pid);
2882 
2883 	if (p != PROC_NULL) {
2884 		do_background_socket(p, thread);
2885 		proc_rele(p);
2886 	}
2887 }
2888 
2889 void
gather_rusage_info(proc_t p,rusage_info_current * ru,int flavor)2890 gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor)
2891 {
2892 	struct rusage_info_child *ri_child;
2893 
2894 	assert(p->p_stats != NULL);
2895 	memset(ru, 0, sizeof(*ru));
2896 	switch (flavor) {
2897 	case RUSAGE_INFO_V6:
2898 		ru->ri_neural_footprint = get_task_neural_nofootprint_total(proc_task(p));
2899 		ru->ri_lifetime_max_neural_footprint = get_task_neural_nofootprint_total_lifetime_max(proc_task(p));
2900 #if CONFIG_LEDGER_INTERVAL_MAX
2901 		ru->ri_interval_max_neural_footprint = get_task_neural_nofootprint_total_interval_max(proc_task(p), FALSE);
2902 #endif
2903 		/* Any P-specific resource counters are captured in fill_task_rusage. */
2904 		OS_FALLTHROUGH;
2905 
2906 	case RUSAGE_INFO_V5:
2907 #if __has_feature(ptrauth_calls)
2908 		if (vm_shared_region_is_reslide(proc_task(p))) {
2909 			ru->ri_flags |= RU_PROC_RUNS_RESLIDE;
2910 		}
2911 #endif /* __has_feature(ptrauth_calls) */
2912 		OS_FALLTHROUGH;
2913 
2914 	case RUSAGE_INFO_V4:
2915 		ru->ri_logical_writes = get_task_logical_writes(proc_task(p), false);
2916 		ru->ri_lifetime_max_phys_footprint = get_task_phys_footprint_lifetime_max(proc_task(p));
2917 #if CONFIG_LEDGER_INTERVAL_MAX
2918 		ru->ri_interval_max_phys_footprint = get_task_phys_footprint_interval_max(proc_task(p), FALSE);
2919 #endif
2920 		OS_FALLTHROUGH;
2921 
2922 	case RUSAGE_INFO_V3:
2923 		fill_task_qos_rusage(proc_task(p), ru);
2924 		fill_task_billed_usage(proc_task(p), ru);
2925 		OS_FALLTHROUGH;
2926 
2927 	case RUSAGE_INFO_V2:
2928 		fill_task_io_rusage(proc_task(p), ru);
2929 		OS_FALLTHROUGH;
2930 
2931 	case RUSAGE_INFO_V1:
2932 		/*
2933 		 * p->p_stats->ri_child statistics are protected under proc lock.
2934 		 */
2935 		proc_lock(p);
2936 
2937 		ri_child = &(p->p_stats->ri_child);
2938 		ru->ri_child_user_time = ri_child->ri_child_user_time;
2939 		ru->ri_child_system_time = ri_child->ri_child_system_time;
2940 		ru->ri_child_pkg_idle_wkups = ri_child->ri_child_pkg_idle_wkups;
2941 		ru->ri_child_interrupt_wkups = ri_child->ri_child_interrupt_wkups;
2942 		ru->ri_child_pageins = ri_child->ri_child_pageins;
2943 		ru->ri_child_elapsed_abstime = ri_child->ri_child_elapsed_abstime;
2944 
2945 		proc_unlock(p);
2946 		OS_FALLTHROUGH;
2947 
2948 	case RUSAGE_INFO_V0:
2949 		proc_getexecutableuuid(p, (unsigned char *)&ru->ri_uuid, sizeof(ru->ri_uuid));
2950 		fill_task_rusage(proc_task(p), ru);
2951 		ru->ri_proc_start_abstime = p->p_stats->ps_start;
2952 	}
2953 }
2954 
2955 int
proc_get_rusage(proc_t p,int flavor,user_addr_t buffer,__unused int is_zombie)2956 proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie)
2957 {
2958 	rusage_info_current ri_current = {};
2959 
2960 	size_t size = 0;
2961 
2962 	switch (flavor) {
2963 	case RUSAGE_INFO_V0:
2964 		size = sizeof(struct rusage_info_v0);
2965 		break;
2966 
2967 	case RUSAGE_INFO_V1:
2968 		size = sizeof(struct rusage_info_v1);
2969 		break;
2970 
2971 	case RUSAGE_INFO_V2:
2972 		size = sizeof(struct rusage_info_v2);
2973 		break;
2974 
2975 	case RUSAGE_INFO_V3:
2976 		size = sizeof(struct rusage_info_v3);
2977 		break;
2978 
2979 	case RUSAGE_INFO_V4:
2980 		size = sizeof(struct rusage_info_v4);
2981 		break;
2982 
2983 	case RUSAGE_INFO_V5:
2984 		size = sizeof(struct rusage_info_v5);
2985 		break;
2986 
2987 	case RUSAGE_INFO_V6:
2988 		size = sizeof(struct rusage_info_v6);
2989 		break;
2990 	default:
2991 		return EINVAL;
2992 	}
2993 
2994 	if (size == 0) {
2995 		return EINVAL;
2996 	}
2997 
2998 	/*
2999 	 * If task is still alive, collect info from the live task itself.
3000 	 * Otherwise, look to the cached info in the zombie proc.
3001 	 */
3002 	if (p->p_ru) {
3003 		return copyout(&p->p_ru->ri, buffer, size);
3004 	} else {
3005 		gather_rusage_info(p, &ri_current, flavor);
3006 		ri_current.ri_proc_exit_abstime = 0;
3007 		return copyout(&ri_current, buffer, size);
3008 	}
3009 }
3010 
3011 static int
mach_to_bsd_rv(int mach_rv)3012 mach_to_bsd_rv(int mach_rv)
3013 {
3014 	int bsd_rv = 0;
3015 
3016 	switch (mach_rv) {
3017 	case KERN_SUCCESS:
3018 		bsd_rv = 0;
3019 		break;
3020 	case KERN_INVALID_ARGUMENT:
3021 		bsd_rv = EINVAL;
3022 		break;
3023 	default:
3024 		panic("unknown error %#x", mach_rv);
3025 	}
3026 
3027 	return bsd_rv;
3028 }
3029 
3030 /*
3031  * Resource limit controls
3032  *
3033  * uap->flavor available flavors:
3034  *
3035  *     RLIMIT_WAKEUPS_MONITOR
3036  *     RLIMIT_CPU_USAGE_MONITOR
3037  *     RLIMIT_THREAD_CPULIMITS
3038  *     RLIMIT_FOOTPRINT_INTERVAL
3039  */
3040 int
proc_rlimit_control(__unused struct proc * p,struct proc_rlimit_control_args * uap,__unused int32_t * retval)3041 proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *uap, __unused int32_t *retval)
3042 {
3043 	proc_t  targetp;
3044 	int     error = 0;
3045 	struct  proc_rlimit_control_wakeupmon wakeupmon_args;
3046 	uint32_t cpumon_flags;
3047 	uint32_t cpulimits_flags;
3048 	kauth_cred_t my_cred, target_cred;
3049 #if CONFIG_LEDGER_INTERVAL_MAX
3050 	uint32_t footprint_interval_flags;
3051 	uint64_t interval_max_footprint;
3052 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
3053 
3054 	/* -1 implicitly means our own process (perhaps even the current thread for per-thread attributes) */
3055 	if (uap->pid == -1) {
3056 		targetp = proc_self();
3057 	} else {
3058 		targetp = proc_find(uap->pid);
3059 	}
3060 
3061 	/* proc_self() can return NULL for an exiting process */
3062 	if (targetp == PROC_NULL) {
3063 		return ESRCH;
3064 	}
3065 
3066 	my_cred = kauth_cred_get();
3067 	target_cred = kauth_cred_proc_ref(targetp);
3068 
3069 	if (!kauth_cred_issuser(my_cred) && kauth_cred_getruid(my_cred) &&
3070 	    kauth_cred_getuid(my_cred) != kauth_cred_getuid(target_cred) &&
3071 	    kauth_cred_getruid(my_cred) != kauth_cred_getuid(target_cred)) {
3072 		proc_rele(targetp);
3073 		kauth_cred_unref(&target_cred);
3074 		return EACCES;
3075 	}
3076 
3077 	switch (uap->flavor) {
3078 	case RLIMIT_WAKEUPS_MONITOR:
3079 		if ((error = copyin(uap->arg, &wakeupmon_args, sizeof(wakeupmon_args))) != 0) {
3080 			break;
3081 		}
3082 		if ((error = mach_to_bsd_rv(task_wakeups_monitor_ctl(proc_task(targetp), &wakeupmon_args.wm_flags,
3083 		    &wakeupmon_args.wm_rate))) != 0) {
3084 			break;
3085 		}
3086 		error = copyout(&wakeupmon_args, uap->arg, sizeof(wakeupmon_args));
3087 		break;
3088 	case RLIMIT_CPU_USAGE_MONITOR:
3089 		cpumon_flags = (uint32_t)uap->arg; // XXX temporarily stashing flags in argp (12592127)
3090 		error = mach_to_bsd_rv(task_cpu_usage_monitor_ctl(proc_task(targetp), &cpumon_flags));
3091 		break;
3092 	case RLIMIT_THREAD_CPULIMITS:
3093 		cpulimits_flags = (uint32_t)uap->arg; // only need a limited set of bits, pass in void * argument
3094 
3095 		if (uap->pid != -1) {
3096 			error = EINVAL;
3097 			break;
3098 		}
3099 
3100 		uint8_t percent = 0;
3101 		uint32_t ms_refill = 0;
3102 		uint64_t ns_refill;
3103 
3104 		percent = (uint8_t)(cpulimits_flags & 0xffU);           /* low 8 bits for percent */
3105 		ms_refill = (cpulimits_flags >> 8) & 0xffffff;          /* next 24 bits represent ms refill value */
3106 		if (percent >= 100 || percent == 0) {
3107 			error = EINVAL;
3108 			break;
3109 		}
3110 
3111 		ns_refill = ((uint64_t)ms_refill) * NSEC_PER_MSEC;
3112 
3113 		error = mach_to_bsd_rv(thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ns_refill));
3114 		break;
3115 
3116 #if CONFIG_LEDGER_INTERVAL_MAX
3117 	case RLIMIT_FOOTPRINT_INTERVAL:
3118 		footprint_interval_flags = (uint32_t)uap->arg; // XXX temporarily stashing flags in argp (12592127)
3119 		/*
3120 		 * There is currently only one option for this flavor.
3121 		 */
3122 		if ((footprint_interval_flags & FOOTPRINT_INTERVAL_RESET) == 0) {
3123 			error = EINVAL;
3124 			break;
3125 		}
3126 		interval_max_footprint = get_task_phys_footprint_interval_max(proc_task(targetp), TRUE);
3127 		interval_max_footprint = get_task_neural_nofootprint_total_interval_max(proc_task(targetp), TRUE);
3128 		break;
3129 
3130 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
3131 	default:
3132 		error = EINVAL;
3133 		break;
3134 	}
3135 
3136 	proc_rele(targetp);
3137 	kauth_cred_unref(&target_cred);
3138 
3139 	/*
3140 	 * Return value from this function becomes errno to userland caller.
3141 	 */
3142 	return error;
3143 }
3144 
3145 /*
3146  * Return the current amount of CPU consumed by this thread (in either user or kernel mode)
3147  */
3148 int
thread_selfusage(struct proc * p __unused,struct thread_selfusage_args * uap __unused,uint64_t * retval)3149 thread_selfusage(struct proc *p __unused, struct thread_selfusage_args *uap __unused, uint64_t *retval)
3150 {
3151 	uint64_t runtime;
3152 
3153 	runtime = thread_get_runtime_self();
3154 	*retval = runtime;
3155 
3156 	return 0;
3157 }
3158