1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/sysctl.h>
78 #include <sys/kernel.h>
79 #include <sys/file_internal.h>
80 #include <sys/resourcevar.h>
81 #include <sys/malloc.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/sysproto.h>
86
87 #include <security/audit/audit.h>
88
89 #include <machine/vmparam.h>
90
91 #include <mach/mach_types.h>
92 #include <mach/time_value.h>
93 #include <mach/task.h>
94 #include <mach/task_info.h>
95 #include <mach/vm_map.h>
96 #include <mach/mach_vm.h>
97 #include <mach/thread_act.h> /* for thread_policy_set( ) */
98 #include <kern/thread.h>
99 #include <kern/policy_internal.h>
100
101 #include <kern/task.h>
102 #include <kern/clock.h> /* for absolutetime_to_microtime() */
103 #include <netinet/in.h> /* for TRAFFIC_MGT_SO_* */
104 #if CONFIG_FREEZE
105 #include <sys/kern_memorystatus_freeze.h> /* for memorystatus_freeze_mark_ui_transition */
106 #endif /* CONFIG_FREEZE */
107 #include <sys/socketvar.h> /* for struct socket */
108 #if NECP
109 #include <net/necp.h>
110 #endif /* NECP */
111
112 #include <vm/vm_map_xnu.h>
113
114 #include <kern/assert.h>
115 #include <sys/resource.h>
116 #include <sys/resource_private.h>
117 #include <sys/priv.h>
118 #include <IOKit/IOBSD.h>
119
120 #if CONFIG_MACF
121 #include <security/mac_framework.h>
122 #endif
123
124 static void proc_limitblock(proc_t p);
125 static void proc_limitunblock(proc_t p);
126 static void proc_limitupdate(proc_t p, bool unblock,
127 void (^update)(struct plimit *plim));
128
129 static int donice(struct proc *curp, struct proc *chgp, int n);
130 static int dosetrlimit(struct proc *p, u_int which, struct rlimit *limp);
131 static void do_background_socket(struct proc *p, thread_t thread);
132 static int do_background_thread(thread_t thread, int priority);
133 static int do_background_proc(struct proc *curp, struct proc *targetp, int priority);
134 static int set_gpudeny_proc(struct proc *curp, struct proc *targetp, int priority);
135 static int proc_set_darwin_role(proc_t curp, proc_t targetp, int priority);
136 static int proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority);
137 static int proc_set_game_mode(proc_t targetp, int priority);
138 static int proc_get_game_mode(proc_t targetp, int *priority);
139 static int proc_set_carplay_mode(proc_t targetp, int priority);
140 static int proc_get_carplay_mode(proc_t targetp, int *priority);
141 static int get_background_proc(struct proc *curp, struct proc *targetp, int *priority);
142
143 int fill_task_rusage(task_t task, rusage_info_current *ri);
144 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
145 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
146 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
147 uint64_t get_task_logical_writes(task_t task, bool external);
148
149 rlim_t maxdmap = MAXDSIZ; /* XXX */
150 rlim_t maxsmap = MAXSSIZ - PAGE_MAX_SIZE; /* XXX */
151
152 /* For plimit reference count */
153 os_refgrp_decl(, rlimit_refgrp, "plimit_refcnt", NULL);
154
155 static KALLOC_TYPE_DEFINE(plimit_zone, struct plimit, KT_DEFAULT);
156
157 /*
158 * Limits on the number of open files per process, and the number
159 * of child processes per process.
160 *
161 * Note: would be in kern/subr_param.c in FreeBSD.
162 */
163 __private_extern__ int maxfilesperproc = OPEN_MAX; /* per-proc open files limit */
164
165 SYSCTL_INT(_kern, KERN_MAXPROCPERUID, maxprocperuid, CTLFLAG_RW | CTLFLAG_LOCKED,
166 &maxprocperuid, 0, "Maximum processes allowed per userid" );
167
168 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW | CTLFLAG_LOCKED,
169 &maxfilesperproc, 0, "Maximum files allowed open per process" );
170
171 /* Args and fn for proc_iteration callback used in setpriority */
172 struct puser_nice_args {
173 proc_t curp;
174 int prio;
175 id_t who;
176 int * foundp;
177 int * errorp;
178 };
179 static int puser_donice_callback(proc_t p, void * arg);
180
181
182 /* Args and fn for proc_iteration callback used in setpriority */
183 struct ppgrp_nice_args {
184 proc_t curp;
185 int prio;
186 int * foundp;
187 int * errorp;
188 };
189 static int ppgrp_donice_callback(proc_t p, void * arg);
190
191 /*
192 * Resource controls and accounting.
193 */
194 int
getpriority(struct proc * curp,struct getpriority_args * uap,int32_t * retval)195 getpriority(struct proc *curp, struct getpriority_args *uap, int32_t *retval)
196 {
197 struct proc *p;
198 int low = PRIO_MAX + 1;
199 kauth_cred_t my_cred;
200 int refheld = 0;
201 int error = 0;
202
203 /* would also test (uap->who < 0), but id_t is unsigned */
204 if (uap->who > 0x7fffffff) {
205 return EINVAL;
206 }
207
208 switch (uap->which) {
209 case PRIO_PROCESS:
210 if (uap->who == 0) {
211 p = curp;
212 low = p->p_nice;
213 } else {
214 p = proc_find(uap->who);
215 if (p == 0) {
216 break;
217 }
218 low = p->p_nice;
219 proc_rele(p);
220 }
221 break;
222
223 case PRIO_PGRP: {
224 struct pgrp *pg = PGRP_NULL;
225
226 if (uap->who == 0) {
227 /* returns the pgrp to ref */
228 pg = proc_pgrp(curp, NULL);
229 } else if ((pg = pgrp_find(uap->who)) == PGRP_NULL) {
230 break;
231 }
232 /* No need for iteration as it is a simple scan */
233 pgrp_lock(pg);
234 PGMEMBERS_FOREACH(pg, p) {
235 if (p->p_nice < low) {
236 low = p->p_nice;
237 }
238 }
239 pgrp_unlock(pg);
240 pgrp_rele(pg);
241 break;
242 }
243
244 case PRIO_USER:
245 if (uap->who == 0) {
246 uap->who = kauth_cred_getuid(kauth_cred_get());
247 }
248
249 proc_list_lock();
250
251 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
252 my_cred = kauth_cred_proc_ref(p);
253 if (kauth_cred_getuid(my_cred) == uap->who &&
254 p->p_nice < low) {
255 low = p->p_nice;
256 }
257 kauth_cred_unref(&my_cred);
258 }
259
260 proc_list_unlock();
261
262 break;
263
264 case PRIO_DARWIN_THREAD:
265 /* we currently only support the current thread */
266 if (uap->who != 0) {
267 return EINVAL;
268 }
269
270 low = proc_get_thread_policy(current_thread(), TASK_POLICY_INTERNAL, TASK_POLICY_DARWIN_BG);
271
272 break;
273
274 case PRIO_DARWIN_PROCESS:
275 if (uap->who == 0) {
276 p = curp;
277 } else {
278 p = proc_find(uap->who);
279 if (p == PROC_NULL) {
280 break;
281 }
282 refheld = 1;
283 }
284
285 error = get_background_proc(curp, p, &low);
286
287 if (refheld) {
288 proc_rele(p);
289 }
290 if (error) {
291 return error;
292 }
293 break;
294
295 case PRIO_DARWIN_ROLE:
296 if (uap->who == 0) {
297 p = curp;
298 } else {
299 p = proc_find(uap->who);
300 if (p == PROC_NULL) {
301 break;
302 }
303 refheld = 1;
304 }
305
306 error = proc_get_darwin_role(curp, p, &low);
307
308 if (refheld) {
309 proc_rele(p);
310 }
311 if (error) {
312 return error;
313 }
314 break;
315
316 case PRIO_DARWIN_GAME_MODE:
317 if (uap->who == 0) {
318 p = curp;
319 } else {
320 p = proc_find(uap->who);
321 if (p == PROC_NULL) {
322 break;
323 }
324 refheld = 1;
325 }
326
327
328 error = proc_get_game_mode(p, &low);
329
330 if (refheld) {
331 proc_rele(p);
332 }
333 if (error) {
334 return error;
335 }
336 break;
337
338 case PRIO_DARWIN_CARPLAY_MODE:
339 if (uap->who == 0) {
340 p = curp;
341 } else {
342 p = proc_find(uap->who);
343 if (p == PROC_NULL) {
344 break;
345 }
346 refheld = 1;
347 }
348
349
350 error = proc_get_carplay_mode(p, &low);
351
352 if (refheld) {
353 proc_rele(p);
354 }
355 if (error) {
356 return error;
357 }
358 break;
359
360 default:
361 return EINVAL;
362 }
363 if (low == PRIO_MAX + 1) {
364 return ESRCH;
365 }
366 *retval = low;
367 return 0;
368 }
369
370 /* call back function used for proc iteration in PRIO_USER */
371 static int
puser_donice_callback(proc_t p,void * arg)372 puser_donice_callback(proc_t p, void * arg)
373 {
374 int error, n;
375 struct puser_nice_args * pun = (struct puser_nice_args *)arg;
376 kauth_cred_t my_cred;
377
378 my_cred = kauth_cred_proc_ref(p);
379 if (kauth_cred_getuid(my_cred) == pun->who) {
380 error = donice(pun->curp, p, pun->prio);
381 if (pun->errorp != NULL) {
382 *pun->errorp = error;
383 }
384 if (pun->foundp != NULL) {
385 n = *pun->foundp;
386 *pun->foundp = n + 1;
387 }
388 }
389 kauth_cred_unref(&my_cred);
390
391 return PROC_RETURNED;
392 }
393
394 /* call back function used for proc iteration in PRIO_PGRP */
395 static int
ppgrp_donice_callback(proc_t p,void * arg)396 ppgrp_donice_callback(proc_t p, void * arg)
397 {
398 int error;
399 struct ppgrp_nice_args * pun = (struct ppgrp_nice_args *)arg;
400 int n;
401
402 error = donice(pun->curp, p, pun->prio);
403 if (pun->errorp != NULL) {
404 *pun->errorp = error;
405 }
406 if (pun->foundp != NULL) {
407 n = *pun->foundp;
408 *pun->foundp = n + 1;
409 }
410
411 return PROC_RETURNED;
412 }
413
414 /*
415 * Returns: 0 Success
416 * EINVAL
417 * ESRCH
418 * donice:EPERM
419 * donice:EACCES
420 */
421 /* ARGSUSED */
422 int
setpriority(struct proc * curp,struct setpriority_args * uap,int32_t * retval)423 setpriority(struct proc *curp, struct setpriority_args *uap, int32_t *retval)
424 {
425 struct proc *p;
426 int found = 0, error = 0;
427 int refheld = 0;
428
429 AUDIT_ARG(cmd, uap->which);
430 AUDIT_ARG(owner, uap->who, 0);
431 AUDIT_ARG(value32, uap->prio);
432
433 /* would also test (uap->who < 0), but id_t is unsigned */
434 if (uap->who > 0x7fffffff) {
435 return EINVAL;
436 }
437
438 switch (uap->which) {
439 case PRIO_PROCESS:
440 if (uap->who == 0) {
441 p = curp;
442 } else {
443 p = proc_find(uap->who);
444 if (p == 0) {
445 break;
446 }
447 refheld = 1;
448 }
449 error = donice(curp, p, uap->prio);
450 found++;
451 if (refheld != 0) {
452 proc_rele(p);
453 }
454 break;
455
456 case PRIO_PGRP: {
457 struct pgrp *pg = PGRP_NULL;
458 struct ppgrp_nice_args ppgrp;
459
460 if (uap->who == 0) {
461 pg = proc_pgrp(curp, NULL);
462 } else if ((pg = pgrp_find(uap->who)) == PGRP_NULL) {
463 break;
464 }
465
466 ppgrp.curp = curp;
467 ppgrp.prio = uap->prio;
468 ppgrp.foundp = &found;
469 ppgrp.errorp = &error;
470
471 pgrp_iterate(pg, ppgrp_donice_callback, (void *)&ppgrp, NULL);
472 pgrp_rele(pg);
473
474 break;
475 }
476
477 case PRIO_USER: {
478 struct puser_nice_args punice;
479
480 if (uap->who == 0) {
481 uap->who = kauth_cred_getuid(kauth_cred_get());
482 }
483
484 punice.curp = curp;
485 punice.prio = uap->prio;
486 punice.who = uap->who;
487 punice.foundp = &found;
488 error = 0;
489 punice.errorp = &error;
490 proc_iterate(PROC_ALLPROCLIST, puser_donice_callback, (void *)&punice, NULL, NULL);
491
492 break;
493 }
494
495 case PRIO_DARWIN_THREAD: {
496 /* we currently only support the current thread */
497 if (uap->who != 0) {
498 return EINVAL;
499 }
500
501 error = do_background_thread(current_thread(), uap->prio);
502 found++;
503 break;
504 }
505
506 case PRIO_DARWIN_PROCESS: {
507 if (uap->who == 0) {
508 p = curp;
509 } else {
510 p = proc_find(uap->who);
511 if (p == 0) {
512 break;
513 }
514 refheld = 1;
515 }
516
517 error = do_background_proc(curp, p, uap->prio);
518
519 found++;
520 if (refheld != 0) {
521 proc_rele(p);
522 }
523 break;
524 }
525
526 case PRIO_DARWIN_GPU: {
527 if (uap->who == 0) {
528 return EINVAL;
529 }
530
531 p = proc_find(uap->who);
532 if (p == PROC_NULL) {
533 break;
534 }
535
536 error = set_gpudeny_proc(curp, p, uap->prio);
537
538 found++;
539 proc_rele(p);
540 break;
541 }
542
543 case PRIO_DARWIN_ROLE: {
544 if (uap->who == 0) {
545 p = curp;
546 } else {
547 p = proc_find(uap->who);
548 if (p == PROC_NULL) {
549 break;
550 }
551 refheld = 1;
552 }
553
554 error = proc_set_darwin_role(curp, p, uap->prio);
555
556 found++;
557 if (refheld != 0) {
558 proc_rele(p);
559 }
560 break;
561 }
562
563 case PRIO_DARWIN_GAME_MODE: {
564 if (uap->who == 0) {
565 p = curp;
566 } else {
567 p = proc_find(uap->who);
568 if (p == PROC_NULL) {
569 break;
570 }
571 refheld = 1;
572 }
573
574
575 error = proc_set_game_mode(p, uap->prio);
576
577 found++;
578 if (refheld != 0) {
579 proc_rele(p);
580 }
581 break;
582 }
583
584 case PRIO_DARWIN_CARPLAY_MODE: {
585 if (uap->who == 0) {
586 p = curp;
587 } else {
588 p = proc_find(uap->who);
589 if (p == PROC_NULL) {
590 break;
591 }
592 refheld = 1;
593 }
594
595 error = proc_set_carplay_mode(p, uap->prio);
596
597 found++;
598 if (refheld != 0) {
599 proc_rele(p);
600 }
601 break;
602 }
603
604 default:
605 return EINVAL;
606 }
607 if (found == 0) {
608 return ESRCH;
609 }
610 if (error == EIDRM) {
611 *retval = -2;
612 error = 0;
613 }
614 return error;
615 }
616
617
618 /*
619 * Returns: 0 Success
620 * EPERM
621 * EACCES
622 * mac_check_proc_sched:???
623 */
624 static int
donice(struct proc * curp,struct proc * chgp,int n)625 donice(struct proc *curp, struct proc *chgp, int n)
626 {
627 int error = 0;
628 kauth_cred_t ucred;
629 kauth_cred_t my_cred;
630
631 ucred = kauth_cred_proc_ref(curp);
632 my_cred = kauth_cred_proc_ref(chgp);
633
634 if (suser(ucred, NULL) && kauth_cred_getruid(ucred) &&
635 kauth_cred_getuid(ucred) != kauth_cred_getuid(my_cred) &&
636 kauth_cred_getruid(ucred) != kauth_cred_getuid(my_cred)) {
637 error = EPERM;
638 goto out;
639 }
640 if (n > PRIO_MAX) {
641 n = PRIO_MAX;
642 }
643 if (n < PRIO_MIN) {
644 n = PRIO_MIN;
645 }
646 if (n < chgp->p_nice && suser(ucred, &curp->p_acflag)) {
647 error = EACCES;
648 goto out;
649 }
650 #if CONFIG_MACF
651 error = mac_proc_check_sched(curp, chgp);
652 if (error) {
653 goto out;
654 }
655 #endif
656 proc_lock(chgp);
657 chgp->p_nice = (char)n;
658 proc_unlock(chgp);
659 (void)resetpriority(chgp);
660 out:
661 kauth_cred_unref(&ucred);
662 kauth_cred_unref(&my_cred);
663 return error;
664 }
665
666 static int
set_gpudeny_proc(struct proc * curp,struct proc * targetp,int priority)667 set_gpudeny_proc(struct proc *curp, struct proc *targetp, int priority)
668 {
669 int error = 0;
670 kauth_cred_t ucred;
671 kauth_cred_t target_cred;
672
673 ucred = kauth_cred_get();
674 target_cred = kauth_cred_proc_ref(targetp);
675
676 /* TODO: Entitlement instead of uid check */
677
678 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
679 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
680 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
681 error = EPERM;
682 goto out;
683 }
684
685 if (curp == targetp) {
686 error = EPERM;
687 goto out;
688 }
689
690 #if CONFIG_MACF
691 error = mac_proc_check_sched(curp, targetp);
692 if (error) {
693 goto out;
694 }
695 #endif
696
697 switch (priority) {
698 case PRIO_DARWIN_GPU_DENY:
699 task_set_gpu_denied(proc_task(targetp), TRUE);
700 break;
701 case PRIO_DARWIN_GPU_ALLOW:
702 task_set_gpu_denied(proc_task(targetp), FALSE);
703 break;
704 default:
705 error = EINVAL;
706 goto out;
707 }
708
709 out:
710 kauth_cred_unref(&target_cred);
711 return error;
712 }
713
714 static int
proc_set_darwin_role(proc_t curp,proc_t targetp,int priority)715 proc_set_darwin_role(proc_t curp, proc_t targetp, int priority)
716 {
717 int error = 0;
718 uint32_t flagsp = 0;
719
720 kauth_cred_t ucred, target_cred;
721
722 ucred = kauth_cred_get();
723 target_cred = kauth_cred_proc_ref(targetp);
724
725 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
726 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
727 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
728 if (priv_check_cred(ucred, PRIV_SETPRIORITY_DARWIN_ROLE, 0) != 0) {
729 error = EPERM;
730 goto out;
731 }
732 }
733
734 if (curp != targetp) {
735 #if CONFIG_MACF
736 if ((error = mac_proc_check_sched(curp, targetp))) {
737 goto out;
738 }
739 #endif
740 }
741
742 proc_get_darwinbgstate(proc_task(targetp), &flagsp);
743 if ((flagsp & PROC_FLAG_APPLICATION) != PROC_FLAG_APPLICATION) {
744 error = ENOTSUP;
745 goto out;
746 }
747
748 task_role_t role = TASK_UNSPECIFIED;
749
750 if ((error = proc_darwin_role_to_task_role(priority, &role))) {
751 goto out;
752 }
753
754 proc_set_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE,
755 TASK_POLICY_ROLE, role);
756
757 #if CONFIG_FREEZE
758 if (priority == PRIO_DARWIN_ROLE_UI_FOCAL || priority == PRIO_DARWIN_ROLE_UI || priority == PRIO_DARWIN_ROLE_UI_NON_FOCAL) {
759 memorystatus_freezer_mark_ui_transition(targetp);
760 }
761 #endif /* CONFIG_FREEZE */
762
763 out:
764 kauth_cred_unref(&target_cred);
765 return error;
766 }
767
768 static int
proc_get_darwin_role(proc_t curp,proc_t targetp,int * priority)769 proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority)
770 {
771 int error = 0;
772 int role = 0;
773
774 kauth_cred_t ucred, target_cred;
775
776 ucred = kauth_cred_get();
777 target_cred = kauth_cred_proc_ref(targetp);
778
779 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
780 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
781 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
782 error = EPERM;
783 goto out;
784 }
785
786 if (curp != targetp) {
787 #if CONFIG_MACF
788 if ((error = mac_proc_check_sched(curp, targetp))) {
789 goto out;
790 }
791 #endif
792 }
793
794 role = proc_get_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
795
796 *priority = proc_task_role_to_darwin_role(role);
797
798 out:
799 kauth_cred_unref(&target_cred);
800 return error;
801 }
802
803 #define SET_GAME_MODE_ENTITLEMENT "com.apple.private.set-game-mode"
804
805 static int
proc_set_game_mode(proc_t targetp,int priority)806 proc_set_game_mode(proc_t targetp, int priority)
807 {
808 int error = 0;
809
810 kauth_cred_t ucred, target_cred;
811
812 ucred = kauth_cred_get();
813 target_cred = kauth_cred_proc_ref(targetp);
814
815 boolean_t entitled = FALSE;
816 entitled = IOCurrentTaskHasEntitlement(SET_GAME_MODE_ENTITLEMENT);
817 if (!entitled) {
818 error = EPERM;
819 goto out;
820 }
821
822 /* Even with entitlement, non-root is only alllowed to set same-user */
823 if (!kauth_cred_issuser(ucred) &&
824 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
825 error = EPERM;
826 goto out;
827 }
828
829 switch (priority) {
830 case PRIO_DARWIN_GAME_MODE_OFF:
831 task_set_game_mode(proc_task(targetp), false);
832 break;
833 case PRIO_DARWIN_GAME_MODE_ON:
834 task_set_game_mode(proc_task(targetp), true);
835 break;
836 default:
837 error = EINVAL;
838 goto out;
839 }
840
841 out:
842 kauth_cred_unref(&target_cred);
843 return error;
844 }
845
846 static int
proc_get_game_mode(proc_t targetp,int * priority)847 proc_get_game_mode(proc_t targetp, int *priority)
848 {
849 int error = 0;
850
851 kauth_cred_t ucred, target_cred;
852
853 ucred = kauth_cred_get();
854 target_cred = kauth_cred_proc_ref(targetp);
855
856 boolean_t entitled = FALSE;
857 entitled = IOCurrentTaskHasEntitlement(SET_GAME_MODE_ENTITLEMENT);
858
859 /* Root is allowed to get without entitlement */
860 if (!kauth_cred_issuser(ucred) && !entitled) {
861 error = EPERM;
862 goto out;
863 }
864
865 /* Even with entitlement, non-root is only alllowed to see same-user */
866 if (!kauth_cred_issuser(ucred) &&
867 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
868 error = EPERM;
869 goto out;
870 }
871
872 if (task_get_game_mode(proc_task(targetp))) {
873 *priority = PRIO_DARWIN_GAME_MODE_ON;
874 } else {
875 *priority = PRIO_DARWIN_GAME_MODE_OFF;
876 }
877
878 out:
879 kauth_cred_unref(&target_cred);
880 return error;
881 }
882
883 #define SET_CARPLAY_MODE_ENTITLEMENT "com.apple.private.set-carplay-mode"
884
885 static int
proc_set_carplay_mode(proc_t targetp,int priority)886 proc_set_carplay_mode(proc_t targetp, int priority)
887 {
888 int error = 0;
889
890 kauth_cred_t ucred, target_cred;
891
892 ucred = kauth_cred_get();
893 target_cred = kauth_cred_proc_ref(targetp);
894
895 boolean_t entitled = FALSE;
896 entitled = IOCurrentTaskHasEntitlement(SET_CARPLAY_MODE_ENTITLEMENT);
897 if (!entitled) {
898 error = EPERM;
899 goto out;
900 }
901
902 /* Even with entitlement, non-root is only alllowed to set same-user */
903 if (!kauth_cred_issuser(ucred) &&
904 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
905 error = EPERM;
906 goto out;
907 }
908
909 switch (priority) {
910 case PRIO_DARWIN_CARPLAY_MODE_OFF:
911 task_set_carplay_mode(proc_task(targetp), false);
912 break;
913 case PRIO_DARWIN_CARPLAY_MODE_ON:
914 task_set_carplay_mode(proc_task(targetp), true);
915 break;
916 default:
917 error = EINVAL;
918 goto out;
919 }
920
921 out:
922 kauth_cred_unref(&target_cred);
923 return error;
924 }
925
926 static int
proc_get_carplay_mode(proc_t targetp,int * priority)927 proc_get_carplay_mode(proc_t targetp, int *priority)
928 {
929 int error = 0;
930
931 kauth_cred_t ucred, target_cred;
932
933 ucred = kauth_cred_get();
934 target_cred = kauth_cred_proc_ref(targetp);
935
936 boolean_t entitled = FALSE;
937 entitled = IOCurrentTaskHasEntitlement(SET_CARPLAY_MODE_ENTITLEMENT);
938
939 /* Root is allowed to get without entitlement */
940 if (!kauth_cred_issuser(ucred) && !entitled) {
941 error = EPERM;
942 goto out;
943 }
944
945 /* Even with entitlement, non-root is only alllowed to see same-user */
946 if (!kauth_cred_issuser(ucred) &&
947 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
948 error = EPERM;
949 goto out;
950 }
951
952 if (task_get_carplay_mode(proc_task(targetp))) {
953 *priority = PRIO_DARWIN_CARPLAY_MODE_ON;
954 } else {
955 *priority = PRIO_DARWIN_CARPLAY_MODE_OFF;
956 }
957
958 out:
959 kauth_cred_unref(&target_cred);
960 return error;
961 }
962
963 static int
get_background_proc(struct proc * curp,struct proc * targetp,int * priority)964 get_background_proc(struct proc *curp, struct proc *targetp, int *priority)
965 {
966 int external = 0;
967 int error = 0;
968 kauth_cred_t ucred, target_cred;
969
970 ucred = kauth_cred_get();
971 target_cred = kauth_cred_proc_ref(targetp);
972
973 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
974 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
975 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
976 error = EPERM;
977 goto out;
978 }
979
980 external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
981
982 *priority = proc_get_task_policy(current_task(), external, TASK_POLICY_DARWIN_BG);
983
984 out:
985 kauth_cred_unref(&target_cred);
986 return error;
987 }
988
989 static int
do_background_proc(struct proc * curp,struct proc * targetp,int priority)990 do_background_proc(struct proc *curp, struct proc *targetp, int priority)
991 {
992 #if !CONFIG_MACF
993 #pragma unused(curp)
994 #endif
995 int error = 0;
996 kauth_cred_t ucred;
997 kauth_cred_t target_cred;
998 int external;
999 int enable;
1000
1001 ucred = kauth_cred_get();
1002 target_cred = kauth_cred_proc_ref(targetp);
1003
1004 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
1005 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
1006 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
1007 error = EPERM;
1008 goto out;
1009 }
1010
1011 #if CONFIG_MACF
1012 error = mac_proc_check_sched(curp, targetp);
1013 if (error) {
1014 goto out;
1015 }
1016 #endif
1017
1018 external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
1019
1020 switch (priority) {
1021 case PRIO_DARWIN_BG:
1022 enable = TASK_POLICY_ENABLE;
1023 break;
1024 case PRIO_DARWIN_NONUI:
1025 /* ignored for compatibility */
1026 goto out;
1027 default:
1028 /* TODO: EINVAL if priority != 0 */
1029 enable = TASK_POLICY_DISABLE;
1030 break;
1031 }
1032
1033 proc_set_task_policy(proc_task(targetp), external, TASK_POLICY_DARWIN_BG, enable);
1034
1035 out:
1036 kauth_cred_unref(&target_cred);
1037 return error;
1038 }
1039
1040 static void
do_background_socket(struct proc * p,thread_t thread)1041 do_background_socket(struct proc *p, thread_t thread)
1042 {
1043 #if SOCKETS
1044 struct fileproc *fp;
1045 int background = false;
1046 #if NECP
1047 int update_necp = false;
1048 #endif /* NECP */
1049
1050 if (thread != THREAD_NULL &&
1051 get_threadtask(thread) != proc_task(p)) {
1052 return;
1053 }
1054
1055 proc_fdlock(p);
1056
1057 if (thread != THREAD_NULL) {
1058 background = proc_get_effective_thread_policy(thread, TASK_POLICY_ALL_SOCKETS_BG);
1059 } else {
1060 background = proc_get_effective_task_policy(proc_task(p), TASK_POLICY_ALL_SOCKETS_BG);
1061 }
1062
1063 if (background) {
1064 /*
1065 * For PRIO_DARWIN_PROCESS (thread is NULL), simply mark
1066 * the sockets with the background flag. There's nothing
1067 * to do here for the PRIO_DARWIN_THREAD case.
1068 */
1069 if (thread == THREAD_NULL) {
1070 fdt_foreach(fp, p) {
1071 if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) {
1072 struct socket *sockp = (struct socket *)fp_get_data(fp);
1073 socket_set_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
1074 sockp->so_background_thread = NULL;
1075 }
1076 #if NECP
1077 else if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_NETPOLICY) {
1078 if (necp_set_client_as_background(p, fp, background)) {
1079 update_necp = true;
1080 }
1081 }
1082 #endif /* NECP */
1083 }
1084 }
1085 } else {
1086 /* disable networking IO throttle.
1087 * NOTE - It is a known limitation of the current design that we
1088 * could potentially clear TRAFFIC_MGT_SO_BACKGROUND bit for
1089 * sockets created by other threads within this process.
1090 */
1091 fdt_foreach(fp, p) {
1092 struct socket *sockp;
1093
1094 if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) {
1095 sockp = (struct socket *)fp_get_data(fp);
1096 /* skip if only clearing this thread's sockets */
1097 if ((thread) && (sockp->so_background_thread != thread)) {
1098 continue;
1099 }
1100 socket_clear_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
1101 sockp->so_background_thread = NULL;
1102 }
1103 #if NECP
1104 else if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_NETPOLICY) {
1105 if (necp_set_client_as_background(p, fp, background)) {
1106 update_necp = true;
1107 }
1108 }
1109 #endif /* NECP */
1110 }
1111 }
1112
1113 proc_fdunlock(p);
1114
1115 #if NECP
1116 if (update_necp) {
1117 necp_update_all_clients();
1118 }
1119 #endif /* NECP */
1120 #else
1121 #pragma unused(p, thread)
1122 #endif
1123 }
1124
1125
1126 /*
1127 * do_background_thread
1128 *
1129 * Requires: thread reference
1130 *
1131 * Returns: 0 Success
1132 * EPERM Tried to background while in vfork
1133 * XXX - todo - does this need a MACF hook?
1134 */
1135 static int
do_background_thread(thread_t thread,int priority)1136 do_background_thread(thread_t thread, int priority)
1137 {
1138 int enable, external;
1139 int rv = 0;
1140
1141 /* Backgrounding is unsupported for workq threads */
1142 if (thread_is_static_param(thread)) {
1143 return EPERM;
1144 }
1145
1146 /* Not allowed to combine QoS and DARWIN_BG, doing so strips the QoS */
1147 if (thread_has_qos_policy(thread)) {
1148 thread_remove_qos_policy(thread);
1149 rv = EIDRM;
1150 }
1151
1152 /* TODO: Fail if someone passes something besides 0 or PRIO_DARWIN_BG */
1153 enable = (priority == PRIO_DARWIN_BG) ? TASK_POLICY_ENABLE : TASK_POLICY_DISABLE;
1154 external = (current_thread() == thread) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
1155
1156 proc_set_thread_policy(thread, external, TASK_POLICY_DARWIN_BG, enable);
1157
1158 return rv;
1159 }
1160
1161
1162 /*
1163 * Returns: 0 Success
1164 * copyin:EFAULT
1165 * dosetrlimit:
1166 */
1167 /* ARGSUSED */
1168 int
setrlimit(struct proc * p,struct setrlimit_args * uap,__unused int32_t * retval)1169 setrlimit(struct proc *p, struct setrlimit_args *uap, __unused int32_t *retval)
1170 {
1171 struct rlimit alim;
1172 int error;
1173
1174 if ((error = copyin(uap->rlp, (caddr_t)&alim,
1175 sizeof(struct rlimit)))) {
1176 return error;
1177 }
1178
1179 return dosetrlimit(p, uap->which, &alim);
1180 }
1181
1182 /*
1183 * Returns: 0 Success
1184 * EINVAL
1185 * suser:EPERM
1186 *
1187 * Notes: EINVAL is returned both for invalid arguments, and in the
1188 * case that the current usage (e.g. RLIMIT_STACK) is already
1189 * in excess of the requested limit.
1190 */
1191 static int
dosetrlimit(struct proc * p,u_int which,struct rlimit * newrlim)1192 dosetrlimit(struct proc *p, u_int which, struct rlimit *newrlim)
1193 {
1194 struct rlimit rlim, stack_rlim = {.rlim_cur = 0, .rlim_max = 0};
1195 int error;
1196 kern_return_t kr;
1197
1198 /* Mask out POSIX flag, saved above */
1199 which &= ~_RLIMIT_POSIX_FLAG;
1200
1201 /* Unknown resource */
1202 if (which >= RLIM_NLIMITS) {
1203 return EINVAL;
1204 }
1205
1206 proc_lock(p);
1207
1208 /* Only one thread is able to change the current process's rlimit values */
1209 proc_limitblock(p);
1210
1211 /*
1212 * Take a snapshot of the current rlimit values and read this throughout
1213 * this routine. This minimizes the critical sections and allow other
1214 * processes in the system to access the plimit while we are in the
1215 * middle of this setrlimit call.
1216 */
1217 rlim = smr_serialized_load(&p->p_limit)->pl_rlimit[which];
1218
1219 proc_unlock(p);
1220
1221 error = 0;
1222 /* Sanity check: new soft limit cannot exceed new hard limit */
1223 if (newrlim->rlim_cur > newrlim->rlim_max) {
1224 error = EINVAL;
1225 }
1226 /*
1227 * Sanity check: only super-user may raise the hard limit.
1228 * newrlim->rlim_cur > rlim.rlim_max implies that the call
1229 * is increasing the hard limit as well.
1230 */
1231 else if (newrlim->rlim_cur > rlim.rlim_max || newrlim->rlim_max > rlim.rlim_max) {
1232 /* suser() returns 0 if the calling thread is super user. */
1233 error = suser(kauth_cred_get(), &p->p_acflag);
1234 }
1235
1236 if (error) {
1237 /* Invalid setrlimit request: EINVAL or EPERM */
1238 goto out;
1239 }
1240
1241 /* We have the reader lock of the process's plimit so it's safe to read the rlimit values */
1242 switch (which) {
1243 case RLIMIT_CPU:
1244 if (newrlim->rlim_cur == RLIM_INFINITY) {
1245 task_vtimer_clear(proc_task(p), TASK_VTIMER_RLIM);
1246 timerclear(&p->p_rlim_cpu);
1247 } else {
1248 task_absolutetime_info_data_t tinfo;
1249 mach_msg_type_number_t count;
1250 struct timeval ttv, tv;
1251 clock_sec_t tv_sec;
1252 clock_usec_t tv_usec;
1253
1254 count = TASK_ABSOLUTETIME_INFO_COUNT;
1255 task_info(proc_task(p), TASK_ABSOLUTETIME_INFO, (task_info_t)&tinfo, &count);
1256 absolutetime_to_microtime(tinfo.total_user + tinfo.total_system, &tv_sec, &tv_usec);
1257 ttv.tv_sec = tv_sec;
1258 ttv.tv_usec = tv_usec;
1259
1260 tv.tv_sec = (newrlim->rlim_cur > __INT_MAX__ ? __INT_MAX__ : (__darwin_time_t)newrlim->rlim_cur);
1261 tv.tv_usec = 0;
1262 timersub(&tv, &ttv, &p->p_rlim_cpu);
1263
1264 timerclear(&tv);
1265 if (timercmp(&p->p_rlim_cpu, &tv, >)) {
1266 task_vtimer_set(proc_task(p), TASK_VTIMER_RLIM);
1267 } else {
1268 task_vtimer_clear(proc_task(p), TASK_VTIMER_RLIM);
1269
1270 timerclear(&p->p_rlim_cpu);
1271
1272 psignal(p, SIGXCPU);
1273 }
1274 }
1275 break;
1276
1277 case RLIMIT_DATA:
1278 #if 00
1279 if (newrlim->rlim_cur > maxdmap) {
1280 newrlim->rlim_cur = maxdmap;
1281 }
1282 if (newrlim->rlim_max > maxdmap) {
1283 newrlim->rlim_max = maxdmap;
1284 }
1285 #endif
1286
1287 /* Over to Mach VM to validate the new data limit */
1288 if (vm_map_set_data_limit(current_map(), newrlim->rlim_cur) != KERN_SUCCESS) {
1289 /* The limit specified cannot be lowered because current usage is already higher than the limit. */
1290 error = EINVAL;
1291 goto out;
1292 }
1293 break;
1294
1295 case RLIMIT_STACK:
1296 if (p->p_lflag & P_LCUSTOM_STACK) {
1297 /* Process has a custom stack set - rlimit cannot be used to change it */
1298 error = EINVAL;
1299 goto out;
1300 }
1301
1302 /*
1303 * Note: the real stack size limit is enforced by maxsmap, not a process's RLIMIT_STACK.
1304 *
1305 * The kernel uses maxsmap to control the actual stack size limit. While we allow
1306 * processes to set RLIMIT_STACK to RLIM_INFINITY (UNIX 03), accessing memory
1307 * beyond the maxsmap will still trigger an exception.
1308 *
1309 * stack_rlim is used to store the user-defined RLIMIT_STACK values while we adjust
1310 * the stack size using kernel limit (i.e. maxsmap).
1311 */
1312 if (newrlim->rlim_cur > maxsmap ||
1313 newrlim->rlim_max > maxsmap) {
1314 if (newrlim->rlim_cur > maxsmap) {
1315 stack_rlim.rlim_cur = newrlim->rlim_cur;
1316 newrlim->rlim_cur = maxsmap;
1317 }
1318 if (newrlim->rlim_max > maxsmap) {
1319 stack_rlim.rlim_max = newrlim->rlim_max;
1320 newrlim->rlim_max = maxsmap;
1321 }
1322 }
1323
1324 /*
1325 * rlim.rlim_cur could be arbitrarily large due to previous calls to setrlimit().
1326 * Use the actual size for stack region adjustment.
1327 */
1328 if (rlim.rlim_cur > maxsmap) {
1329 rlim.rlim_cur = maxsmap;
1330 }
1331
1332 /*
1333 * Stack is allocated to the max at exec time with only
1334 * "rlim_cur" bytes accessible. If stack limit is going
1335 * up make more accessible, if going down make inaccessible.
1336 */
1337 if (newrlim->rlim_cur > rlim.rlim_cur) {
1338 mach_vm_offset_t addr;
1339 mach_vm_size_t size;
1340
1341 /* grow stack */
1342 size = newrlim->rlim_cur;
1343 if (round_page_overflow(size, &size)) {
1344 error = EINVAL;
1345 goto out;
1346 }
1347 size -= round_page_64(rlim.rlim_cur);
1348
1349 addr = (mach_vm_offset_t)(p->user_stack - round_page_64(newrlim->rlim_cur));
1350 kr = mach_vm_protect(current_map(), addr, size, FALSE, VM_PROT_DEFAULT);
1351 if (kr != KERN_SUCCESS) {
1352 error = EINVAL;
1353 goto out;
1354 }
1355 } else if (newrlim->rlim_cur < rlim.rlim_cur) {
1356 mach_vm_offset_t addr;
1357 mach_vm_size_t size;
1358 uint64_t cur_sp;
1359
1360 /* shrink stack */
1361
1362 /*
1363 * First check if new stack limit would agree
1364 * with current stack usage.
1365 * Get the current thread's stack pointer...
1366 */
1367 cur_sp = thread_adjuserstack(current_thread(), 0);
1368 if (cur_sp <= p->user_stack &&
1369 cur_sp > (p->user_stack - round_page_64(rlim.rlim_cur))) {
1370 /* stack pointer is in main stack */
1371 if (cur_sp <= (p->user_stack - round_page_64(newrlim->rlim_cur))) {
1372 /*
1373 * New limit would cause current usage to be invalid:
1374 * reject new limit.
1375 */
1376 error = EINVAL;
1377 goto out;
1378 }
1379 } else {
1380 /* not on the main stack: reject */
1381 error = EINVAL;
1382 goto out;
1383 }
1384
1385 size = round_page_64(rlim.rlim_cur);
1386 size -= round_page_64(rlim.rlim_cur);
1387
1388 addr = (mach_vm_offset_t)(p->user_stack - round_page_64(rlim.rlim_cur));
1389
1390 kr = mach_vm_protect(current_map(), addr, size, FALSE, VM_PROT_NONE);
1391 if (kr != KERN_SUCCESS) {
1392 error = EINVAL;
1393 goto out;
1394 }
1395 } else {
1396 /* no change ... */
1397 }
1398
1399 /*
1400 * We've adjusted the process's stack region. If the user-defined limit is greater
1401 * than maxsmap, we need to reflect this change in rlimit interface.
1402 */
1403 if (stack_rlim.rlim_cur != 0) {
1404 newrlim->rlim_cur = stack_rlim.rlim_cur;
1405 }
1406 if (stack_rlim.rlim_max != 0) {
1407 newrlim->rlim_max = stack_rlim.rlim_max;
1408 }
1409 break;
1410
1411 case RLIMIT_NOFILE:
1412 /*
1413 * Nothing to be done here as we already performed the sanity checks before entering the switch code block.
1414 * The real NOFILE limits enforced by the kernel is capped at MIN(RLIMIT_NOFILE, maxfilesperproc)
1415 */
1416 break;
1417
1418 case RLIMIT_AS:
1419 /* Over to Mach VM to validate the new address space limit */
1420 if (vm_map_set_size_limit(current_map(), newrlim->rlim_cur) != KERN_SUCCESS) {
1421 /* The limit specified cannot be lowered because current usage is already higher than the limit. */
1422 error = EINVAL;
1423 goto out;
1424 }
1425 break;
1426
1427 case RLIMIT_NPROC:
1428 /*
1429 * Only root can set to the maxproc limits, as it is
1430 * systemwide resource; all others are limited to
1431 * maxprocperuid (presumably less than maxproc).
1432 */
1433 if (kauth_cred_issuser(kauth_cred_get())) {
1434 if (newrlim->rlim_cur > (rlim_t)maxproc) {
1435 newrlim->rlim_cur = maxproc;
1436 }
1437 if (newrlim->rlim_max > (rlim_t)maxproc) {
1438 newrlim->rlim_max = maxproc;
1439 }
1440 } else {
1441 if (newrlim->rlim_cur > (rlim_t)maxprocperuid) {
1442 newrlim->rlim_cur = maxprocperuid;
1443 }
1444 if (newrlim->rlim_max > (rlim_t)maxprocperuid) {
1445 newrlim->rlim_max = maxprocperuid;
1446 }
1447 }
1448 break;
1449
1450 case RLIMIT_MEMLOCK:
1451 /*
1452 * Tell the Mach VM layer about the new limit value.
1453 */
1454 newrlim->rlim_cur = (vm_size_t)newrlim->rlim_cur;
1455 vm_map_set_user_wire_limit(current_map(), (vm_size_t)newrlim->rlim_cur);
1456 break;
1457 } /* switch... */
1458
1459 /* Everything checks out and we are now ready to update the rlimit */
1460 error = 0;
1461
1462 out:
1463
1464 if (error == 0) {
1465 /*
1466 * COW the current plimit if it's shared, otherwise update it in place.
1467 * Finally unblock other threads wishing to change plimit.
1468 */
1469 proc_limitupdate(p, true, ^(struct plimit *plim) {
1470 plim->pl_rlimit[which] = *newrlim;
1471 });
1472 } else {
1473 /*
1474 * This setrlimit has failed, just leave the plimit as is and unblock other
1475 * threads wishing to change plimit.
1476 */
1477 proc_lock(p);
1478 proc_limitunblock(p);
1479 proc_unlock(p);
1480 }
1481
1482 return error;
1483 }
1484
1485 /* ARGSUSED */
1486 int
getrlimit(struct proc * p,struct getrlimit_args * uap,__unused int32_t * retval)1487 getrlimit(struct proc *p, struct getrlimit_args *uap, __unused int32_t *retval)
1488 {
1489 struct rlimit lim = {};
1490
1491 /*
1492 * Take out flag now in case we need to use it to trigger variant
1493 * behaviour later.
1494 */
1495 uap->which &= ~_RLIMIT_POSIX_FLAG;
1496
1497 if (uap->which >= RLIM_NLIMITS) {
1498 return EINVAL;
1499 }
1500 lim = proc_limitget(p, uap->which);
1501 return copyout((caddr_t)&lim,
1502 uap->rlp, sizeof(struct rlimit));
1503 }
1504
1505 /*
1506 * Transform the running time and tick information in proc p into user,
1507 * system, and interrupt time usage.
1508 */
1509 /* No lock on proc is held for this.. */
1510 void
calcru(struct proc * p,struct timeval * up,struct timeval * sp,struct timeval * ip)1511 calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *ip)
1512 {
1513 task_t task;
1514
1515 timerclear(up);
1516 timerclear(sp);
1517 if (ip != NULL) {
1518 timerclear(ip);
1519 }
1520
1521 task = proc_task(p);
1522 if (task) {
1523 mach_task_basic_info_data_t tinfo;
1524 task_thread_times_info_data_t ttimesinfo;
1525 task_events_info_data_t teventsinfo;
1526 mach_msg_type_number_t task_info_count, task_ttimes_count;
1527 mach_msg_type_number_t task_events_count;
1528 struct timeval ut, st;
1529
1530 task_info_count = MACH_TASK_BASIC_INFO_COUNT;
1531 task_info(task, MACH_TASK_BASIC_INFO,
1532 (task_info_t)&tinfo, &task_info_count);
1533 ut.tv_sec = tinfo.user_time.seconds;
1534 ut.tv_usec = tinfo.user_time.microseconds;
1535 st.tv_sec = tinfo.system_time.seconds;
1536 st.tv_usec = tinfo.system_time.microseconds;
1537 timeradd(&ut, up, up);
1538 timeradd(&st, sp, sp);
1539
1540 task_ttimes_count = TASK_THREAD_TIMES_INFO_COUNT;
1541 task_info(task, TASK_THREAD_TIMES_INFO,
1542 (task_info_t)&ttimesinfo, &task_ttimes_count);
1543
1544 ut.tv_sec = ttimesinfo.user_time.seconds;
1545 ut.tv_usec = ttimesinfo.user_time.microseconds;
1546 st.tv_sec = ttimesinfo.system_time.seconds;
1547 st.tv_usec = ttimesinfo.system_time.microseconds;
1548 timeradd(&ut, up, up);
1549 timeradd(&st, sp, sp);
1550
1551 task_events_count = TASK_EVENTS_INFO_COUNT;
1552 task_info(task, TASK_EVENTS_INFO,
1553 (task_info_t)&teventsinfo, &task_events_count);
1554
1555 /*
1556 * No need to lock "p": this does not need to be
1557 * completely consistent, right ?
1558 */
1559 p->p_stats->p_ru.ru_minflt = (teventsinfo.faults -
1560 teventsinfo.pageins);
1561 p->p_stats->p_ru.ru_majflt = teventsinfo.pageins;
1562 p->p_stats->p_ru.ru_nivcsw = (teventsinfo.csw -
1563 p->p_stats->p_ru.ru_nvcsw);
1564 if (p->p_stats->p_ru.ru_nivcsw < 0) {
1565 p->p_stats->p_ru.ru_nivcsw = 0;
1566 }
1567
1568 p->p_stats->p_ru.ru_maxrss = (long)tinfo.resident_size_max;
1569 }
1570 }
1571
1572 __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
1573 __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
1574
1575 /* ARGSUSED */
1576 int
getrusage(struct proc * p,struct getrusage_args * uap,__unused int32_t * retval)1577 getrusage(struct proc *p, struct getrusage_args *uap, __unused int32_t *retval)
1578 {
1579 struct rusage *rup, rubuf;
1580 struct user64_rusage rubuf64 = {};
1581 struct user32_rusage rubuf32 = {};
1582 size_t retsize = sizeof(rubuf); /* default: 32 bits */
1583 caddr_t retbuf = (caddr_t)&rubuf; /* default: 32 bits */
1584 struct timeval utime;
1585 struct timeval stime;
1586
1587
1588 switch (uap->who) {
1589 case RUSAGE_SELF:
1590 calcru(p, &utime, &stime, NULL);
1591 proc_lock(p);
1592 rup = &p->p_stats->p_ru;
1593 rup->ru_utime = utime;
1594 rup->ru_stime = stime;
1595
1596 rubuf = *rup;
1597 proc_unlock(p);
1598
1599 break;
1600
1601 case RUSAGE_CHILDREN:
1602 proc_lock(p);
1603 rup = &p->p_stats->p_cru;
1604 rubuf = *rup;
1605 proc_unlock(p);
1606 break;
1607
1608 default:
1609 return EINVAL;
1610 }
1611 if (IS_64BIT_PROCESS(p)) {
1612 retsize = sizeof(rubuf64);
1613 retbuf = (caddr_t)&rubuf64;
1614 munge_user64_rusage(&rubuf, &rubuf64);
1615 } else {
1616 retsize = sizeof(rubuf32);
1617 retbuf = (caddr_t)&rubuf32;
1618 munge_user32_rusage(&rubuf, &rubuf32);
1619 }
1620
1621 return copyout(retbuf, uap->rusage, retsize);
1622 }
1623
1624 void
ruadd(struct rusage * ru,struct rusage * ru2)1625 ruadd(struct rusage *ru, struct rusage *ru2)
1626 {
1627 long *ip, *ip2;
1628 long i;
1629
1630 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime);
1631 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime);
1632 if (ru->ru_maxrss < ru2->ru_maxrss) {
1633 ru->ru_maxrss = ru2->ru_maxrss;
1634 }
1635 ip = &ru->ru_first; ip2 = &ru2->ru_first;
1636 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) {
1637 *ip++ += *ip2++;
1638 }
1639 }
1640
1641 /*
1642 * Add the rusage stats of child in parent.
1643 *
1644 * It adds rusage statistics of child process and statistics of all its
1645 * children to its parent.
1646 *
1647 * Note: proc lock of parent should be held while calling this function.
1648 */
1649 void
update_rusage_info_child(struct rusage_info_child * ri,rusage_info_current * ri_current)1650 update_rusage_info_child(struct rusage_info_child *ri, rusage_info_current *ri_current)
1651 {
1652 ri->ri_child_user_time += (ri_current->ri_user_time +
1653 ri_current->ri_child_user_time);
1654 ri->ri_child_system_time += (ri_current->ri_system_time +
1655 ri_current->ri_child_system_time);
1656 ri->ri_child_pkg_idle_wkups += (ri_current->ri_pkg_idle_wkups +
1657 ri_current->ri_child_pkg_idle_wkups);
1658 ri->ri_child_interrupt_wkups += (ri_current->ri_interrupt_wkups +
1659 ri_current->ri_child_interrupt_wkups);
1660 ri->ri_child_pageins += (ri_current->ri_pageins +
1661 ri_current->ri_child_pageins);
1662 ri->ri_child_elapsed_abstime += ((ri_current->ri_proc_exit_abstime -
1663 ri_current->ri_proc_start_abstime) + ri_current->ri_child_elapsed_abstime);
1664 }
1665
1666 static void
proc_limit_free(smr_node_t node)1667 proc_limit_free(smr_node_t node)
1668 {
1669 struct plimit *plimit = __container_of(node, struct plimit, pl_node);
1670
1671 zfree(plimit_zone, plimit);
1672 }
1673
1674 static void
proc_limit_release(struct plimit * plimit)1675 proc_limit_release(struct plimit *plimit)
1676 {
1677 if (os_ref_release(&plimit->pl_refcnt) == 0) {
1678 smr_proc_task_call(&plimit->pl_node, sizeof(*plimit), proc_limit_free);
1679 }
1680 }
1681
1682 /*
1683 * Reading soft limit from specified resource.
1684 */
1685 rlim_t
proc_limitgetcur(proc_t p,int which)1686 proc_limitgetcur(proc_t p, int which)
1687 {
1688 rlim_t rlim_cur;
1689
1690 assert(p);
1691 assert(which < RLIM_NLIMITS);
1692
1693 smr_proc_task_enter();
1694 rlim_cur = smr_entered_load(&p->p_limit)->pl_rlimit[which].rlim_cur;
1695 smr_proc_task_leave();
1696
1697 return rlim_cur;
1698 }
1699
1700 /*
1701 * Handle commonly asked limit that needs to be clamped with maxfilesperproc.
1702 */
1703 int
proc_limitgetcur_nofile(struct proc * p)1704 proc_limitgetcur_nofile(struct proc *p)
1705 {
1706 rlim_t lim = proc_limitgetcur(p, RLIMIT_NOFILE);
1707
1708 return (int)MIN(lim, maxfilesperproc);
1709 }
1710
1711 /*
1712 * Writing soft limit to specified resource. This is an internal function
1713 * used only by proc_exit to update RLIMIT_FSIZE in
1714 * place without invoking setrlimit.
1715 */
1716 void
proc_limitsetcur_fsize(proc_t p,rlim_t value)1717 proc_limitsetcur_fsize(proc_t p, rlim_t value)
1718 {
1719 proc_limitupdate(p, false, ^(struct plimit *plimit) {
1720 plimit->pl_rlimit[RLIMIT_FSIZE].rlim_cur = value;
1721 });
1722 }
1723
1724 struct rlimit
proc_limitget(proc_t p,int which)1725 proc_limitget(proc_t p, int which)
1726 {
1727 struct rlimit lim;
1728
1729 assert(which < RLIM_NLIMITS);
1730
1731 smr_proc_task_enter();
1732 lim = smr_entered_load(&p->p_limit)->pl_rlimit[which];
1733 smr_proc_task_leave();
1734
1735 return lim;
1736 }
1737
1738 void
proc_limitfork(proc_t parent,proc_t child)1739 proc_limitfork(proc_t parent, proc_t child)
1740 {
1741 struct plimit *plim;
1742
1743 proc_lock(parent);
1744 plim = smr_serialized_load(&parent->p_limit);
1745 os_ref_retain(&plim->pl_refcnt);
1746 proc_unlock(parent);
1747
1748 smr_init_store(&child->p_limit, plim);
1749 }
1750
1751 void
proc_limitdrop(proc_t p)1752 proc_limitdrop(proc_t p)
1753 {
1754 struct plimit *plimit = NULL;
1755
1756 proc_lock(p);
1757 plimit = smr_serialized_load(&p->p_limit);
1758 smr_clear_store(&p->p_limit);
1759 proc_unlock(p);
1760
1761 proc_limit_release(plimit);
1762 }
1763
1764 /*
1765 * proc_limitblock/unblock are used to serialize access to plimit
1766 * from concurrent threads within the same process.
1767 * Callers must be holding the proc lock to enter, return with
1768 * the proc lock locked
1769 */
1770 static void
proc_limitblock(proc_t p)1771 proc_limitblock(proc_t p)
1772 {
1773 lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
1774
1775 while (p->p_lflag & P_LLIMCHANGE) {
1776 p->p_lflag |= P_LLIMWAIT;
1777 msleep(&p->p_limit, &p->p_mlock, 0, "proc_limitblock", NULL);
1778 }
1779 p->p_lflag |= P_LLIMCHANGE;
1780 }
1781
1782 /*
1783 * Callers must be holding the proc lock to enter, return with
1784 * the proc lock locked
1785 */
1786 static void
proc_limitunblock(proc_t p)1787 proc_limitunblock(proc_t p)
1788 {
1789 lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
1790
1791 p->p_lflag &= ~P_LLIMCHANGE;
1792 if (p->p_lflag & P_LLIMWAIT) {
1793 p->p_lflag &= ~P_LLIMWAIT;
1794 wakeup(&p->p_limit);
1795 }
1796 }
1797
1798 /*
1799 * Perform an rlimit update (as defined by the arbitrary `update` function).
1800 *
1801 * Because plimits are accessed without holding any locks,
1802 * with only a hazard reference, the struct plimit is always
1803 * copied, updated, and replaced, to implement a const value type.
1804 */
1805 static void
1806 proc_limitupdate(proc_t p, bool unblock, void (^update)(struct plimit *))
1807 {
1808 struct plimit *cur_plim;
1809 struct plimit *copy_plim;
1810
1811 copy_plim = zalloc_flags(plimit_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1812
1813 proc_lock(p);
1814
1815 cur_plim = smr_serialized_load(&p->p_limit);
1816
1817 os_ref_init_count(©_plim->pl_refcnt, &rlimit_refgrp, 1);
1818 bcopy(cur_plim->pl_rlimit, copy_plim->pl_rlimit,
1819 sizeof(struct rlimit) * RLIM_NLIMITS);
1820
1821 update(copy_plim);
1822
1823 smr_serialized_store(&p->p_limit, copy_plim);
1824
1825 if (unblock) {
1826 proc_limitunblock(p);
1827 }
1828 proc_unlock(p);
1829
1830 proc_limit_release(cur_plim);
1831 }
1832
1833 static int
1834 iopolicysys_disk(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1835 static int
1836 iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1837 static int
1838 iopolicysys_vfs_atime_updates(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1839 static int
1840 iopolicysys_vfs_statfs_no_data_volume(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1841 static int
1842 iopolicysys_vfs_trigger_resolve(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1843 static int
1844 iopolicysys_vfs_ignore_content_protection(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1845 static int
1846 iopolicysys_vfs_ignore_node_permissions(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *ipo_param);
1847 static int
1848 iopolicysys_vfs_skip_mtime_update(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1849 static int
1850 iopolicysys_vfs_allow_lowspace_writes(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1851 static int
1852 iopolicysys_vfs_disallow_rw_for_o_evtonly(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1853 static int iopolicysys_vfs_altlink(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1854 static int iopolicysys_vfs_nocache_write_fs_blksize(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1855
1856 /*
1857 * iopolicysys
1858 *
1859 * Description: System call MUX for use in manipulating I/O policy attributes of the current process or thread
1860 *
1861 * Parameters: cmd Policy command
1862 * arg Pointer to policy arguments
1863 *
1864 * Returns: 0 Success
1865 * EINVAL Invalid command or invalid policy arguments
1866 *
1867 */
1868 int
iopolicysys(struct proc * p,struct iopolicysys_args * uap,int32_t * retval)1869 iopolicysys(struct proc *p, struct iopolicysys_args *uap, int32_t *retval)
1870 {
1871 int error = 0;
1872 struct _iopol_param_t iop_param;
1873
1874 if ((error = copyin(uap->arg, &iop_param, sizeof(iop_param))) != 0) {
1875 goto out;
1876 }
1877
1878 switch (iop_param.iop_iotype) {
1879 case IOPOL_TYPE_DISK:
1880 error = iopolicysys_disk(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1881 if (error == EIDRM) {
1882 *retval = -2;
1883 error = 0;
1884 }
1885 if (error) {
1886 goto out;
1887 }
1888 break;
1889 case IOPOL_TYPE_VFS_HFS_CASE_SENSITIVITY:
1890 error = iopolicysys_vfs_hfs_case_sensitivity(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1891 if (error) {
1892 goto out;
1893 }
1894 break;
1895 case IOPOL_TYPE_VFS_ATIME_UPDATES:
1896 error = iopolicysys_vfs_atime_updates(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1897 if (error) {
1898 goto out;
1899 }
1900 break;
1901 case IOPOL_TYPE_VFS_MATERIALIZE_DATALESS_FILES:
1902 error = iopolicysys_vfs_materialize_dataless_files(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1903 if (error) {
1904 goto out;
1905 }
1906 break;
1907 case IOPOL_TYPE_VFS_STATFS_NO_DATA_VOLUME:
1908 error = iopolicysys_vfs_statfs_no_data_volume(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1909 if (error) {
1910 goto out;
1911 }
1912 break;
1913 case IOPOL_TYPE_VFS_TRIGGER_RESOLVE:
1914 error = iopolicysys_vfs_trigger_resolve(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1915 if (error) {
1916 goto out;
1917 }
1918 break;
1919 case IOPOL_TYPE_VFS_IGNORE_CONTENT_PROTECTION:
1920 error = iopolicysys_vfs_ignore_content_protection(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1921 if (error) {
1922 goto out;
1923 }
1924 break;
1925 case IOPOL_TYPE_VFS_IGNORE_PERMISSIONS:
1926 error = iopolicysys_vfs_ignore_node_permissions(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1927 if (error) {
1928 goto out;
1929 }
1930 break;
1931 case IOPOL_TYPE_VFS_SKIP_MTIME_UPDATE:
1932 error = iopolicysys_vfs_skip_mtime_update(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1933 if (error) {
1934 goto out;
1935 }
1936 break;
1937 case IOPOL_TYPE_VFS_ALLOW_LOW_SPACE_WRITES:
1938 error = iopolicysys_vfs_allow_lowspace_writes(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1939 if (error) {
1940 goto out;
1941 }
1942 break;
1943 case IOPOL_TYPE_VFS_DISALLOW_RW_FOR_O_EVTONLY:
1944 error = iopolicysys_vfs_disallow_rw_for_o_evtonly(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1945 if (error) {
1946 goto out;
1947 }
1948 break;
1949 case IOPOL_TYPE_VFS_ALTLINK:
1950 error = iopolicysys_vfs_altlink(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1951 if (error) {
1952 goto out;
1953 }
1954 break;
1955 case IOPOL_TYPE_VFS_NOCACHE_WRITE_FS_BLKSIZE:
1956 error = iopolicysys_vfs_nocache_write_fs_blksize(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1957 if (error) {
1958 goto out;
1959 }
1960 break;
1961
1962 default:
1963 error = EINVAL;
1964 goto out;
1965 }
1966
1967 /* Individual iotype handlers are expected to update iop_param, if requested with a GET command */
1968 if (uap->cmd == IOPOL_CMD_GET) {
1969 error = copyout((caddr_t)&iop_param, uap->arg, sizeof(iop_param));
1970 if (error) {
1971 goto out;
1972 }
1973 }
1974
1975 out:
1976 return error;
1977 }
1978
1979 static int
iopolicysys_disk(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)1980 iopolicysys_disk(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
1981 {
1982 int error = 0;
1983 thread_t thread;
1984 int policy_flavor;
1985
1986 /* Validate scope */
1987 switch (scope) {
1988 case IOPOL_SCOPE_PROCESS:
1989 thread = THREAD_NULL;
1990 policy_flavor = TASK_POLICY_IOPOL;
1991 break;
1992
1993 case IOPOL_SCOPE_THREAD:
1994 thread = current_thread();
1995 policy_flavor = TASK_POLICY_IOPOL;
1996
1997 /* Not allowed to combine QoS and (non-PASSIVE) IO policy, doing so strips the QoS */
1998 if (cmd == IOPOL_CMD_SET && thread_has_qos_policy(thread)) {
1999 switch (policy) {
2000 case IOPOL_DEFAULT:
2001 case IOPOL_PASSIVE:
2002 break;
2003 case IOPOL_UTILITY:
2004 case IOPOL_THROTTLE:
2005 case IOPOL_IMPORTANT:
2006 case IOPOL_STANDARD:
2007 if (!thread_is_static_param(thread)) {
2008 thread_remove_qos_policy(thread);
2009 /*
2010 * This is not an error case, this is to return a marker to user-space that
2011 * we stripped the thread of its QoS class.
2012 */
2013 error = EIDRM;
2014 break;
2015 }
2016 OS_FALLTHROUGH;
2017 default:
2018 error = EINVAL;
2019 goto out;
2020 }
2021 }
2022 break;
2023
2024 case IOPOL_SCOPE_DARWIN_BG:
2025 #if !defined(XNU_TARGET_OS_OSX)
2026 /* We don't want this on platforms outside of macOS as BG is always IOPOL_THROTTLE */
2027 error = ENOTSUP;
2028 goto out;
2029 #else /* !defined(XNU_TARGET_OS_OSX) */
2030 thread = THREAD_NULL;
2031 policy_flavor = TASK_POLICY_DARWIN_BG_IOPOL;
2032 break;
2033 #endif /* !defined(XNU_TARGET_OS_OSX) */
2034
2035 default:
2036 error = EINVAL;
2037 goto out;
2038 }
2039
2040 /* Validate policy */
2041 if (cmd == IOPOL_CMD_SET) {
2042 switch (policy) {
2043 case IOPOL_DEFAULT:
2044 if (scope == IOPOL_SCOPE_DARWIN_BG) {
2045 /* the current default BG throttle level is UTILITY */
2046 policy = IOPOL_UTILITY;
2047 } else {
2048 policy = IOPOL_IMPORTANT;
2049 }
2050 break;
2051 case IOPOL_UTILITY:
2052 /* fall-through */
2053 case IOPOL_THROTTLE:
2054 /* These levels are OK */
2055 break;
2056 case IOPOL_IMPORTANT:
2057 /* fall-through */
2058 case IOPOL_STANDARD:
2059 /* fall-through */
2060 case IOPOL_PASSIVE:
2061 if (scope == IOPOL_SCOPE_DARWIN_BG) {
2062 /* These levels are invalid for BG */
2063 error = EINVAL;
2064 goto out;
2065 } else {
2066 /* OK for other scopes */
2067 }
2068 break;
2069 default:
2070 error = EINVAL;
2071 goto out;
2072 }
2073 }
2074
2075 /* Perform command */
2076 switch (cmd) {
2077 case IOPOL_CMD_SET:
2078 if (thread != THREAD_NULL) {
2079 proc_set_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor, policy);
2080 } else {
2081 proc_set_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor, policy);
2082 }
2083 break;
2084 case IOPOL_CMD_GET:
2085 if (thread != THREAD_NULL) {
2086 policy = proc_get_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor);
2087 } else {
2088 policy = proc_get_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor);
2089 }
2090 iop_param->iop_policy = policy;
2091 break;
2092 default:
2093 error = EINVAL; /* unknown command */
2094 break;
2095 }
2096
2097 out:
2098 return error;
2099 }
2100
2101 static int
iopolicysys_vfs_hfs_case_sensitivity(struct proc * p,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2102 iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
2103 {
2104 int error = 0;
2105
2106 /* Validate scope */
2107 switch (scope) {
2108 case IOPOL_SCOPE_PROCESS:
2109 /* Only process OK */
2110 break;
2111 default:
2112 error = EINVAL;
2113 goto out;
2114 }
2115
2116 /* Validate policy */
2117 if (cmd == IOPOL_CMD_SET) {
2118 switch (policy) {
2119 case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT:
2120 /* fall-through */
2121 case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE:
2122 /* These policies are OK */
2123 break;
2124 default:
2125 error = EINVAL;
2126 goto out;
2127 }
2128 }
2129
2130 /* Perform command */
2131 switch (cmd) {
2132 case IOPOL_CMD_SET:
2133 if (0 == kauth_cred_issuser(kauth_cred_get())) {
2134 /* If it's a non-root process, it needs to have the entitlement to set the policy */
2135 boolean_t entitled = FALSE;
2136 entitled = IOCurrentTaskHasEntitlement("com.apple.private.iopol.case_sensitivity");
2137 if (!entitled) {
2138 error = EPERM;
2139 goto out;
2140 }
2141 }
2142
2143 switch (policy) {
2144 case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT:
2145 OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY), &p->p_vfs_iopolicy);
2146 break;
2147 case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE:
2148 OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY, &p->p_vfs_iopolicy);
2149 break;
2150 default:
2151 error = EINVAL;
2152 goto out;
2153 }
2154
2155 break;
2156 case IOPOL_CMD_GET:
2157 iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY)
2158 ? IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE
2159 : IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT;
2160 break;
2161 default:
2162 error = EINVAL; /* unknown command */
2163 break;
2164 }
2165
2166 out:
2167 return error;
2168 }
2169
2170 static inline int
get_thread_atime_policy(struct uthread * ut)2171 get_thread_atime_policy(struct uthread *ut)
2172 {
2173 return (ut->uu_flag & UT_ATIME_UPDATE) ? IOPOL_ATIME_UPDATES_OFF : IOPOL_ATIME_UPDATES_DEFAULT;
2174 }
2175
2176 static inline void
set_thread_atime_policy(struct uthread * ut,int policy)2177 set_thread_atime_policy(struct uthread *ut, int policy)
2178 {
2179 if (policy == IOPOL_ATIME_UPDATES_OFF) {
2180 ut->uu_flag |= UT_ATIME_UPDATE;
2181 } else {
2182 ut->uu_flag &= ~UT_ATIME_UPDATE;
2183 }
2184 }
2185
2186 static inline void
set_task_atime_policy(struct proc * p,int policy)2187 set_task_atime_policy(struct proc *p, int policy)
2188 {
2189 if (policy == IOPOL_ATIME_UPDATES_OFF) {
2190 OSBitOrAtomic16((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES, &p->p_vfs_iopolicy);
2191 } else {
2192 OSBitAndAtomic16(~((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES), &p->p_vfs_iopolicy);
2193 }
2194 }
2195
2196 static inline int
get_task_atime_policy(struct proc * p)2197 get_task_atime_policy(struct proc *p)
2198 {
2199 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) ? IOPOL_ATIME_UPDATES_OFF : IOPOL_ATIME_UPDATES_DEFAULT;
2200 }
2201
2202 static int
iopolicysys_vfs_atime_updates(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2203 iopolicysys_vfs_atime_updates(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
2204 {
2205 int error = 0;
2206 thread_t thread;
2207
2208 /* Validate scope */
2209 switch (scope) {
2210 case IOPOL_SCOPE_THREAD:
2211 thread = current_thread();
2212 break;
2213 case IOPOL_SCOPE_PROCESS:
2214 thread = THREAD_NULL;
2215 break;
2216 default:
2217 error = EINVAL;
2218 goto out;
2219 }
2220
2221 /* Validate policy */
2222 if (cmd == IOPOL_CMD_SET) {
2223 switch (policy) {
2224 case IOPOL_ATIME_UPDATES_DEFAULT:
2225 case IOPOL_ATIME_UPDATES_OFF:
2226 break;
2227 default:
2228 error = EINVAL;
2229 goto out;
2230 }
2231 }
2232
2233 /* Perform command */
2234 switch (cmd) {
2235 case IOPOL_CMD_SET:
2236 if (thread != THREAD_NULL) {
2237 set_thread_atime_policy(get_bsdthread_info(thread), policy);
2238 } else {
2239 set_task_atime_policy(p, policy);
2240 }
2241 break;
2242 case IOPOL_CMD_GET:
2243 if (thread != THREAD_NULL) {
2244 policy = get_thread_atime_policy(get_bsdthread_info(thread));
2245 } else {
2246 policy = get_task_atime_policy(p);
2247 }
2248 iop_param->iop_policy = policy;
2249 break;
2250 default:
2251 error = EINVAL; /* unknown command */
2252 break;
2253 }
2254
2255 out:
2256 return error;
2257 }
2258
2259 static inline int
get_thread_materialize_policy(struct uthread * ut)2260 get_thread_materialize_policy(struct uthread *ut)
2261 {
2262 if (ut->uu_flag & UT_NSPACE_NODATALESSFAULTS) {
2263 return IOPOL_MATERIALIZE_DATALESS_FILES_OFF;
2264 } else if (ut->uu_flag & UT_NSPACE_FORCEDATALESSFAULTS) {
2265 return IOPOL_MATERIALIZE_DATALESS_FILES_ON;
2266 }
2267 /* Default thread behavior is "inherit process behavior". */
2268 return IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT;
2269 }
2270
2271 static inline void
set_thread_materialize_policy(struct uthread * ut,int policy)2272 set_thread_materialize_policy(struct uthread *ut, int policy)
2273 {
2274 if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_OFF) {
2275 ut->uu_flag &= ~UT_NSPACE_FORCEDATALESSFAULTS;
2276 ut->uu_flag |= UT_NSPACE_NODATALESSFAULTS;
2277 } else if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_ON) {
2278 ut->uu_flag &= ~UT_NSPACE_NODATALESSFAULTS;
2279 ut->uu_flag |= UT_NSPACE_FORCEDATALESSFAULTS;
2280 } else {
2281 ut->uu_flag &= ~(UT_NSPACE_NODATALESSFAULTS | UT_NSPACE_FORCEDATALESSFAULTS);
2282 }
2283 }
2284
2285 static inline void
set_proc_materialize_policy(struct proc * p,int policy)2286 set_proc_materialize_policy(struct proc *p, int policy)
2287 {
2288 if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT) {
2289 /*
2290 * Caller has specified "use the default policy".
2291 * The default policy is to NOT materialize dataless
2292 * files.
2293 */
2294 policy = IOPOL_MATERIALIZE_DATALESS_FILES_OFF;
2295 }
2296 if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_ON) {
2297 OSBitOrAtomic16((uint16_t)P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES, &p->p_vfs_iopolicy);
2298 } else {
2299 OSBitAndAtomic16(~((uint16_t)P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES), &p->p_vfs_iopolicy);
2300 }
2301 }
2302
2303 static int
get_proc_materialize_policy(struct proc * p)2304 get_proc_materialize_policy(struct proc *p)
2305 {
2306 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES) ? IOPOL_MATERIALIZE_DATALESS_FILES_ON : IOPOL_MATERIALIZE_DATALESS_FILES_OFF;
2307 }
2308
2309 int
iopolicysys_vfs_materialize_dataless_files(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2310 iopolicysys_vfs_materialize_dataless_files(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
2311 {
2312 int error = 0;
2313 thread_t thread;
2314
2315 /* Validate scope */
2316 switch (scope) {
2317 case IOPOL_SCOPE_THREAD:
2318 thread = current_thread();
2319 break;
2320 case IOPOL_SCOPE_PROCESS:
2321 thread = THREAD_NULL;
2322 break;
2323 default:
2324 error = EINVAL;
2325 goto out;
2326 }
2327
2328 /* Validate policy */
2329 if (cmd == IOPOL_CMD_SET) {
2330 switch (policy) {
2331 case IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT:
2332 case IOPOL_MATERIALIZE_DATALESS_FILES_OFF:
2333 case IOPOL_MATERIALIZE_DATALESS_FILES_ON:
2334 break;
2335 default:
2336 error = EINVAL;
2337 goto out;
2338 }
2339 }
2340
2341 /* Perform command */
2342 switch (cmd) {
2343 case IOPOL_CMD_SET:
2344 if (thread != THREAD_NULL) {
2345 set_thread_materialize_policy(get_bsdthread_info(thread), policy);
2346 } else {
2347 set_proc_materialize_policy(p, policy);
2348 }
2349 break;
2350 case IOPOL_CMD_GET:
2351 if (thread != THREAD_NULL) {
2352 policy = get_thread_materialize_policy(get_bsdthread_info(thread));
2353 } else {
2354 policy = get_proc_materialize_policy(p);
2355 }
2356 iop_param->iop_policy = policy;
2357 break;
2358 default:
2359 error = EINVAL; /* unknown command */
2360 break;
2361 }
2362
2363 out:
2364 return error;
2365 }
2366
2367 static int
iopolicysys_vfs_statfs_no_data_volume(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2368 iopolicysys_vfs_statfs_no_data_volume(struct proc *p __unused, int cmd,
2369 int scope, int policy, struct _iopol_param_t *iop_param)
2370 {
2371 int error = 0;
2372
2373 /* Validate scope */
2374 switch (scope) {
2375 case IOPOL_SCOPE_PROCESS:
2376 /* Only process OK */
2377 break;
2378 default:
2379 error = EINVAL;
2380 goto out;
2381 }
2382
2383 /* Validate policy */
2384 if (cmd == IOPOL_CMD_SET) {
2385 switch (policy) {
2386 case IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT:
2387 /* fall-through */
2388 case IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME:
2389 /* These policies are OK */
2390 break;
2391 default:
2392 error = EINVAL;
2393 goto out;
2394 }
2395 }
2396
2397 /* Perform command */
2398 switch (cmd) {
2399 case IOPOL_CMD_SET:
2400 if (0 == kauth_cred_issuser(kauth_cred_get())) {
2401 /* If it's a non-root process, it needs to have the entitlement to set the policy */
2402 boolean_t entitled = FALSE;
2403 entitled = IOCurrentTaskHasEntitlement("com.apple.private.iopol.case_sensitivity");
2404 if (!entitled) {
2405 error = EPERM;
2406 goto out;
2407 }
2408 }
2409
2410 switch (policy) {
2411 case IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT:
2412 OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME), &p->p_vfs_iopolicy);
2413 break;
2414 case IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME:
2415 OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME, &p->p_vfs_iopolicy);
2416 break;
2417 default:
2418 error = EINVAL;
2419 goto out;
2420 }
2421
2422 break;
2423 case IOPOL_CMD_GET:
2424 iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME)
2425 ? IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME
2426 : IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT;
2427 break;
2428 default:
2429 error = EINVAL; /* unknown command */
2430 break;
2431 }
2432
2433 out:
2434 return error;
2435 }
2436
2437 static int
iopolicysys_vfs_trigger_resolve(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2438 iopolicysys_vfs_trigger_resolve(struct proc *p __unused, int cmd,
2439 int scope, int policy, struct _iopol_param_t *iop_param)
2440 {
2441 int error = 0;
2442
2443 /* Validate scope */
2444 switch (scope) {
2445 case IOPOL_SCOPE_PROCESS:
2446 /* Only process OK */
2447 break;
2448 default:
2449 error = EINVAL;
2450 goto out;
2451 }
2452
2453 /* Validate policy */
2454 if (cmd == IOPOL_CMD_SET) {
2455 switch (policy) {
2456 case IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT:
2457 /* fall-through */
2458 case IOPOL_VFS_TRIGGER_RESOLVE_OFF:
2459 /* These policies are OK */
2460 break;
2461 default:
2462 error = EINVAL;
2463 goto out;
2464 }
2465 }
2466
2467 /* Perform command */
2468 switch (cmd) {
2469 case IOPOL_CMD_SET:
2470 switch (policy) {
2471 case IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT:
2472 OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE), &p->p_vfs_iopolicy);
2473 break;
2474 case IOPOL_VFS_TRIGGER_RESOLVE_OFF:
2475 OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE, &p->p_vfs_iopolicy);
2476 break;
2477 default:
2478 error = EINVAL;
2479 goto out;
2480 }
2481
2482 break;
2483 case IOPOL_CMD_GET:
2484 iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE)
2485 ? IOPOL_VFS_TRIGGER_RESOLVE_OFF
2486 : IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT;
2487 break;
2488 default:
2489 error = EINVAL; /* unknown command */
2490 break;
2491 }
2492
2493 out:
2494 return error;
2495 }
2496
2497 static int
iopolicysys_vfs_ignore_content_protection(struct proc * p,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2498 iopolicysys_vfs_ignore_content_protection(struct proc *p, int cmd, int scope,
2499 int policy, struct _iopol_param_t *iop_param)
2500 {
2501 int error = 0;
2502
2503 /* Validate scope */
2504 switch (scope) {
2505 case IOPOL_SCOPE_PROCESS:
2506 /* Only process OK */
2507 break;
2508 default:
2509 error = EINVAL;
2510 goto out;
2511 }
2512
2513 /* Validate policy */
2514 if (cmd == IOPOL_CMD_SET) {
2515 switch (policy) {
2516 case IOPOL_VFS_CONTENT_PROTECTION_DEFAULT:
2517 OS_FALLTHROUGH;
2518 case IOPOL_VFS_CONTENT_PROTECTION_IGNORE:
2519 /* These policies are OK */
2520 break;
2521 default:
2522 error = EINVAL;
2523 goto out;
2524 }
2525 }
2526
2527 /* Perform command */
2528 switch (cmd) {
2529 case IOPOL_CMD_SET:
2530 if (0 == kauth_cred_issuser(kauth_cred_get())) {
2531 /* If it's a non-root process, it needs to have the entitlement to set the policy */
2532 boolean_t entitled = FALSE;
2533 entitled = IOCurrentTaskHasEntitlement("com.apple.private.iopol.case_sensitivity");
2534 if (!entitled) {
2535 error = EPERM;
2536 goto out;
2537 }
2538 }
2539
2540 switch (policy) {
2541 case IOPOL_VFS_CONTENT_PROTECTION_DEFAULT:
2542 os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION, relaxed);
2543 break;
2544 case IOPOL_VFS_CONTENT_PROTECTION_IGNORE:
2545 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION, relaxed);
2546 break;
2547 default:
2548 error = EINVAL;
2549 goto out;
2550 }
2551
2552 break;
2553 case IOPOL_CMD_GET:
2554 iop_param->iop_policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION)
2555 ? IOPOL_VFS_CONTENT_PROTECTION_IGNORE
2556 : IOPOL_VFS_CONTENT_PROTECTION_DEFAULT;
2557 break;
2558 default:
2559 error = EINVAL; /* unknown command */
2560 break;
2561 }
2562
2563 out:
2564 return error;
2565 }
2566
2567 #define AUTHORIZED_ACCESS_ENTITLEMENT \
2568 "com.apple.private.vfs.authorized-access"
2569 int
iopolicysys_vfs_ignore_node_permissions(struct proc * p,int cmd,int scope,int policy,__unused struct _iopol_param_t * iop_param)2570 iopolicysys_vfs_ignore_node_permissions(struct proc *p, int cmd, int scope,
2571 int policy, __unused struct _iopol_param_t *iop_param)
2572 {
2573 int error = EINVAL;
2574
2575 switch (scope) {
2576 case IOPOL_SCOPE_PROCESS:
2577 break;
2578 default:
2579 goto out;
2580 }
2581
2582 switch (cmd) {
2583 case IOPOL_CMD_GET:
2584 policy = os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_IGNORE_NODE_PERMISSIONS ?
2585 IOPOL_VFS_IGNORE_PERMISSIONS_ON : IOPOL_VFS_IGNORE_PERMISSIONS_OFF;
2586 iop_param->iop_policy = policy;
2587 goto out_ok;
2588 case IOPOL_CMD_SET:
2589 /* SET is handled after the switch */
2590 break;
2591 default:
2592 goto out;
2593 }
2594
2595 if (!IOCurrentTaskHasEntitlement(AUTHORIZED_ACCESS_ENTITLEMENT)) {
2596 error = EPERM;
2597 goto out;
2598 }
2599
2600 switch (policy) {
2601 case IOPOL_VFS_IGNORE_PERMISSIONS_OFF:
2602 os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_NODE_PERMISSIONS, relaxed);
2603 break;
2604 case IOPOL_VFS_IGNORE_PERMISSIONS_ON:
2605 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_NODE_PERMISSIONS, relaxed);
2606 break;
2607 default:
2608 break;
2609 }
2610
2611 out_ok:
2612 error = 0;
2613 out:
2614 return error;
2615 }
2616
2617 static inline void
set_thread_skip_mtime_policy(struct uthread * ut,int policy)2618 set_thread_skip_mtime_policy(struct uthread *ut, int policy)
2619 {
2620 if (policy == IOPOL_VFS_SKIP_MTIME_UPDATE_ON) {
2621 os_atomic_or(&ut->uu_flag, UT_SKIP_MTIME_UPDATE, relaxed);
2622 } else {
2623 os_atomic_andnot(&ut->uu_flag, UT_SKIP_MTIME_UPDATE, relaxed);
2624 }
2625 }
2626
2627 static inline int
get_thread_skip_mtime_policy(struct uthread * ut)2628 get_thread_skip_mtime_policy(struct uthread *ut)
2629 {
2630 return (os_atomic_load(&ut->uu_flag, relaxed) & UT_SKIP_MTIME_UPDATE) ?
2631 IOPOL_VFS_SKIP_MTIME_UPDATE_ON : IOPOL_VFS_SKIP_MTIME_UPDATE_OFF;
2632 }
2633
2634 static inline void
set_proc_skip_mtime_policy(struct proc * p,int policy)2635 set_proc_skip_mtime_policy(struct proc *p, int policy)
2636 {
2637 if (policy == IOPOL_VFS_SKIP_MTIME_UPDATE_ON) {
2638 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_SKIP_MTIME_UPDATE, relaxed);
2639 } else {
2640 os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_SKIP_MTIME_UPDATE, relaxed);
2641 }
2642 }
2643
2644 static inline int
get_proc_skip_mtime_policy(struct proc * p)2645 get_proc_skip_mtime_policy(struct proc *p)
2646 {
2647 return (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_SKIP_MTIME_UPDATE) ?
2648 IOPOL_VFS_SKIP_MTIME_UPDATE_ON : IOPOL_VFS_SKIP_MTIME_UPDATE_OFF;
2649 }
2650
2651 #define SKIP_MTIME_UPDATE_ENTITLEMENT \
2652 "com.apple.private.vfs.skip-mtime-updates"
2653 int
iopolicysys_vfs_skip_mtime_update(struct proc * p,int cmd,int scope,int policy,__unused struct _iopol_param_t * iop_param)2654 iopolicysys_vfs_skip_mtime_update(struct proc *p, int cmd, int scope,
2655 int policy, __unused struct _iopol_param_t *iop_param)
2656 {
2657 thread_t thread;
2658 int error = 0;
2659
2660 /* Validate scope */
2661 switch (scope) {
2662 case IOPOL_SCOPE_THREAD:
2663 thread = current_thread();
2664 break;
2665 case IOPOL_SCOPE_PROCESS:
2666 thread = THREAD_NULL;
2667 break;
2668 default:
2669 error = EINVAL;
2670 goto out;
2671 }
2672
2673 /* Validate policy */
2674 if (cmd == IOPOL_CMD_SET) {
2675 switch (policy) {
2676 case IOPOL_VFS_SKIP_MTIME_UPDATE_ON:
2677 case IOPOL_VFS_SKIP_MTIME_UPDATE_OFF:
2678 if (!IOCurrentTaskHasEntitlement(SKIP_MTIME_UPDATE_ENTITLEMENT)) {
2679 error = EPERM;
2680 goto out;
2681 }
2682 break;
2683 default:
2684 error = EINVAL;
2685 goto out;
2686 }
2687 }
2688
2689 /* Perform command */
2690 switch (cmd) {
2691 case IOPOL_CMD_SET:
2692 if (thread != THREAD_NULL) {
2693 set_thread_skip_mtime_policy(get_bsdthread_info(thread), policy);
2694 } else {
2695 set_proc_skip_mtime_policy(p, policy);
2696 }
2697 break;
2698 case IOPOL_CMD_GET:
2699 if (thread != THREAD_NULL) {
2700 policy = get_thread_skip_mtime_policy(get_bsdthread_info(thread));
2701 } else {
2702 policy = get_proc_skip_mtime_policy(p);
2703 }
2704 iop_param->iop_policy = policy;
2705 break;
2706 default:
2707 error = EINVAL; /* unknown command */
2708 break;
2709 }
2710
2711 out:
2712 return error;
2713 }
2714
2715 #define ALLOW_LOW_SPACE_WRITES_ENTITLEMENT \
2716 "com.apple.private.vfs.allow-low-space-writes"
2717 static int
iopolicysys_vfs_allow_lowspace_writes(struct proc * p,int cmd,int scope,int policy,__unused struct _iopol_param_t * iop_param)2718 iopolicysys_vfs_allow_lowspace_writes(struct proc *p, int cmd, int scope,
2719 int policy, __unused struct _iopol_param_t *iop_param)
2720 {
2721 int error = EINVAL;
2722
2723 switch (scope) {
2724 case IOPOL_SCOPE_PROCESS:
2725 break;
2726 default:
2727 goto out;
2728 }
2729
2730 switch (cmd) {
2731 case IOPOL_CMD_GET:
2732 policy = os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_ALLOW_LOW_SPACE_WRITES ?
2733 IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_ON : IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_OFF;
2734 iop_param->iop_policy = policy;
2735 goto out_ok;
2736 case IOPOL_CMD_SET:
2737 break;
2738 default:
2739 break;
2740 }
2741
2742 if (!IOCurrentTaskHasEntitlement(ALLOW_LOW_SPACE_WRITES_ENTITLEMENT)) {
2743 error = EPERM;
2744 goto out;
2745 }
2746
2747 switch (policy) {
2748 case IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_OFF:
2749 os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ALLOW_LOW_SPACE_WRITES, relaxed);
2750 break;
2751 case IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_ON:
2752 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ALLOW_LOW_SPACE_WRITES, relaxed);
2753 break;
2754 default:
2755 break;
2756 }
2757
2758 out_ok:
2759 error = 0;
2760 out:
2761 return error;
2762 }
2763
2764 static int
iopolicysys_vfs_disallow_rw_for_o_evtonly(struct proc * p,int cmd,int scope,int policy,__unused struct _iopol_param_t * iop_param)2765 iopolicysys_vfs_disallow_rw_for_o_evtonly(struct proc *p, int cmd, int scope,
2766 int policy, __unused struct _iopol_param_t *iop_param)
2767 {
2768 int error = EINVAL;
2769
2770 switch (scope) {
2771 case IOPOL_SCOPE_PROCESS:
2772 break;
2773 default:
2774 goto out;
2775 }
2776
2777 switch (cmd) {
2778 case IOPOL_CMD_GET:
2779 policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) &
2780 P_VFS_IOPOLICY_DISALLOW_RW_FOR_O_EVTONLY) ?
2781 IOPOL_VFS_DISALLOW_RW_FOR_O_EVTONLY_ON :
2782 IOPOL_VFS_DISALLOW_RW_FOR_O_EVTONLY_DEFAULT;
2783 iop_param->iop_policy = policy;
2784 goto out_ok;
2785 case IOPOL_CMD_SET:
2786 break;
2787 default:
2788 goto out;
2789 }
2790
2791 /* Once set, we don't allow the process to clear it. */
2792 switch (policy) {
2793 case IOPOL_VFS_DISALLOW_RW_FOR_O_EVTONLY_ON:
2794 os_atomic_or(&p->p_vfs_iopolicy,
2795 P_VFS_IOPOLICY_DISALLOW_RW_FOR_O_EVTONLY, relaxed);
2796 break;
2797 default:
2798 goto out;
2799 }
2800
2801 out_ok:
2802 error = 0;
2803 out:
2804 return error;
2805 }
2806
2807 static int
iopolicysys_vfs_altlink(struct proc * p,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2808 iopolicysys_vfs_altlink(struct proc *p, int cmd, int scope, int policy,
2809 struct _iopol_param_t *iop_param)
2810 {
2811 if (scope != IOPOL_SCOPE_PROCESS) {
2812 return EINVAL;
2813 }
2814
2815 if (cmd == IOPOL_CMD_GET) {
2816 policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_ALTLINK) ?
2817 IOPOL_VFS_ALTLINK_ENABLED : IOPOL_VFS_ALTLINK_DISABLED;
2818 iop_param->iop_policy = policy;
2819 return 0;
2820 }
2821
2822 /* Once set, we don't allow the process to clear it. */
2823 if (policy == IOPOL_VFS_ALTLINK_ENABLED) {
2824 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ALTLINK, relaxed);
2825 return 0;
2826 }
2827
2828 return EINVAL;
2829 }
2830
2831 static int
iopolicysys_vfs_nocache_write_fs_blksize(struct proc * p,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2832 iopolicysys_vfs_nocache_write_fs_blksize(struct proc *p, int cmd, int scope, int policy,
2833 struct _iopol_param_t *iop_param)
2834 {
2835 thread_t thread;
2836
2837 switch (scope) {
2838 case IOPOL_SCOPE_THREAD:
2839 thread = current_thread();
2840 break;
2841 case IOPOL_SCOPE_PROCESS:
2842 thread = THREAD_NULL;
2843 break;
2844 default:
2845 return EINVAL;
2846 }
2847
2848 if (cmd == IOPOL_CMD_GET) {
2849 if (thread != THREAD_NULL) {
2850 struct uthread *ut = get_bsdthread_info(thread);
2851 policy = ut->uu_flag & UT_FS_BLKSIZE_NOCACHE_WRITES ?
2852 IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_ON : IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_DEFAULT;
2853 } else {
2854 policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_NOCACHE_WRITE_FS_BLKSIZE) ?
2855 IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_ON : IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_DEFAULT;
2856 }
2857 iop_param->iop_policy = policy;
2858 return 0;
2859 }
2860
2861 /* Once set, we don't allow the process or thread to clear it. */
2862 if ((cmd == IOPOL_CMD_SET) && (policy == IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_ON)) {
2863 if (thread != THREAD_NULL) {
2864 struct uthread *ut = get_bsdthread_info(thread);
2865 ut->uu_flag |= UT_FS_BLKSIZE_NOCACHE_WRITES;
2866 } else {
2867 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_NOCACHE_WRITE_FS_BLKSIZE, relaxed);
2868 }
2869 return 0;
2870 }
2871
2872 return EINVAL;
2873 }
2874
2875 void
proc_apply_task_networkbg(int pid,thread_t thread)2876 proc_apply_task_networkbg(int pid, thread_t thread)
2877 {
2878 proc_t p = proc_find(pid);
2879
2880 if (p != PROC_NULL) {
2881 do_background_socket(p, thread);
2882 proc_rele(p);
2883 }
2884 }
2885
2886 void
gather_rusage_info(proc_t p,rusage_info_current * ru,int flavor)2887 gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor)
2888 {
2889 struct rusage_info_child *ri_child;
2890
2891 assert(p->p_stats != NULL);
2892 memset(ru, 0, sizeof(*ru));
2893 switch (flavor) {
2894 case RUSAGE_INFO_V6:
2895 ru->ri_neural_footprint = get_task_neural_nofootprint_total(proc_task(p));
2896 ru->ri_lifetime_max_neural_footprint = get_task_neural_nofootprint_total_lifetime_max(proc_task(p));
2897 #if CONFIG_LEDGER_INTERVAL_MAX
2898 ru->ri_interval_max_neural_footprint = get_task_neural_nofootprint_total_interval_max(proc_task(p), FALSE);
2899 #endif
2900 /* Any P-specific resource counters are captured in fill_task_rusage. */
2901 OS_FALLTHROUGH;
2902
2903 case RUSAGE_INFO_V5:
2904 #if __has_feature(ptrauth_calls)
2905 if (vm_shared_region_is_reslide(proc_task(p))) {
2906 ru->ri_flags |= RU_PROC_RUNS_RESLIDE;
2907 }
2908 #endif /* __has_feature(ptrauth_calls) */
2909 OS_FALLTHROUGH;
2910
2911 case RUSAGE_INFO_V4:
2912 ru->ri_logical_writes = get_task_logical_writes(proc_task(p), false);
2913 ru->ri_lifetime_max_phys_footprint = get_task_phys_footprint_lifetime_max(proc_task(p));
2914 #if CONFIG_LEDGER_INTERVAL_MAX
2915 ru->ri_interval_max_phys_footprint = get_task_phys_footprint_interval_max(proc_task(p), FALSE);
2916 #endif
2917 OS_FALLTHROUGH;
2918
2919 case RUSAGE_INFO_V3:
2920 fill_task_qos_rusage(proc_task(p), ru);
2921 fill_task_billed_usage(proc_task(p), ru);
2922 OS_FALLTHROUGH;
2923
2924 case RUSAGE_INFO_V2:
2925 fill_task_io_rusage(proc_task(p), ru);
2926 OS_FALLTHROUGH;
2927
2928 case RUSAGE_INFO_V1:
2929 /*
2930 * p->p_stats->ri_child statistics are protected under proc lock.
2931 */
2932 proc_lock(p);
2933
2934 ri_child = &(p->p_stats->ri_child);
2935 ru->ri_child_user_time = ri_child->ri_child_user_time;
2936 ru->ri_child_system_time = ri_child->ri_child_system_time;
2937 ru->ri_child_pkg_idle_wkups = ri_child->ri_child_pkg_idle_wkups;
2938 ru->ri_child_interrupt_wkups = ri_child->ri_child_interrupt_wkups;
2939 ru->ri_child_pageins = ri_child->ri_child_pageins;
2940 ru->ri_child_elapsed_abstime = ri_child->ri_child_elapsed_abstime;
2941
2942 proc_unlock(p);
2943 OS_FALLTHROUGH;
2944
2945 case RUSAGE_INFO_V0:
2946 proc_getexecutableuuid(p, (unsigned char *)&ru->ri_uuid, sizeof(ru->ri_uuid));
2947 fill_task_rusage(proc_task(p), ru);
2948 ru->ri_proc_start_abstime = p->p_stats->ps_start;
2949 }
2950 }
2951
2952 int
proc_get_rusage(proc_t p,int flavor,user_addr_t buffer,__unused int is_zombie)2953 proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie)
2954 {
2955 rusage_info_current ri_current = {};
2956
2957 size_t size = 0;
2958
2959 switch (flavor) {
2960 case RUSAGE_INFO_V0:
2961 size = sizeof(struct rusage_info_v0);
2962 break;
2963
2964 case RUSAGE_INFO_V1:
2965 size = sizeof(struct rusage_info_v1);
2966 break;
2967
2968 case RUSAGE_INFO_V2:
2969 size = sizeof(struct rusage_info_v2);
2970 break;
2971
2972 case RUSAGE_INFO_V3:
2973 size = sizeof(struct rusage_info_v3);
2974 break;
2975
2976 case RUSAGE_INFO_V4:
2977 size = sizeof(struct rusage_info_v4);
2978 break;
2979
2980 case RUSAGE_INFO_V5:
2981 size = sizeof(struct rusage_info_v5);
2982 break;
2983
2984 case RUSAGE_INFO_V6:
2985 size = sizeof(struct rusage_info_v6);
2986 break;
2987 default:
2988 return EINVAL;
2989 }
2990
2991 if (size == 0) {
2992 return EINVAL;
2993 }
2994
2995 /*
2996 * If task is still alive, collect info from the live task itself.
2997 * Otherwise, look to the cached info in the zombie proc.
2998 */
2999 if (p->p_ru) {
3000 return copyout(&p->p_ru->ri, buffer, size);
3001 } else {
3002 gather_rusage_info(p, &ri_current, flavor);
3003 ri_current.ri_proc_exit_abstime = 0;
3004 return copyout(&ri_current, buffer, size);
3005 }
3006 }
3007
3008 static int
mach_to_bsd_rv(int mach_rv)3009 mach_to_bsd_rv(int mach_rv)
3010 {
3011 int bsd_rv = 0;
3012
3013 switch (mach_rv) {
3014 case KERN_SUCCESS:
3015 bsd_rv = 0;
3016 break;
3017 case KERN_INVALID_ARGUMENT:
3018 bsd_rv = EINVAL;
3019 break;
3020 default:
3021 panic("unknown error %#x", mach_rv);
3022 }
3023
3024 return bsd_rv;
3025 }
3026
3027 /*
3028 * Resource limit controls
3029 *
3030 * uap->flavor available flavors:
3031 *
3032 * RLIMIT_WAKEUPS_MONITOR
3033 * RLIMIT_CPU_USAGE_MONITOR
3034 * RLIMIT_THREAD_CPULIMITS
3035 * RLIMIT_FOOTPRINT_INTERVAL
3036 */
3037 int
proc_rlimit_control(__unused struct proc * p,struct proc_rlimit_control_args * uap,__unused int32_t * retval)3038 proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *uap, __unused int32_t *retval)
3039 {
3040 proc_t targetp;
3041 int error = 0;
3042 struct proc_rlimit_control_wakeupmon wakeupmon_args;
3043 uint32_t cpumon_flags;
3044 uint32_t cpulimits_flags;
3045 kauth_cred_t my_cred, target_cred;
3046 #if CONFIG_LEDGER_INTERVAL_MAX
3047 uint32_t footprint_interval_flags;
3048 uint64_t interval_max_footprint;
3049 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
3050
3051 /* -1 implicitly means our own process (perhaps even the current thread for per-thread attributes) */
3052 if (uap->pid == -1) {
3053 targetp = proc_self();
3054 } else {
3055 targetp = proc_find(uap->pid);
3056 }
3057
3058 /* proc_self() can return NULL for an exiting process */
3059 if (targetp == PROC_NULL) {
3060 return ESRCH;
3061 }
3062
3063 my_cred = kauth_cred_get();
3064 target_cred = kauth_cred_proc_ref(targetp);
3065
3066 if (!kauth_cred_issuser(my_cred) && kauth_cred_getruid(my_cred) &&
3067 kauth_cred_getuid(my_cred) != kauth_cred_getuid(target_cred) &&
3068 kauth_cred_getruid(my_cred) != kauth_cred_getuid(target_cred)) {
3069 proc_rele(targetp);
3070 kauth_cred_unref(&target_cred);
3071 return EACCES;
3072 }
3073
3074 switch (uap->flavor) {
3075 case RLIMIT_WAKEUPS_MONITOR:
3076 if ((error = copyin(uap->arg, &wakeupmon_args, sizeof(wakeupmon_args))) != 0) {
3077 break;
3078 }
3079 if ((error = mach_to_bsd_rv(task_wakeups_monitor_ctl(proc_task(targetp), &wakeupmon_args.wm_flags,
3080 &wakeupmon_args.wm_rate))) != 0) {
3081 break;
3082 }
3083 error = copyout(&wakeupmon_args, uap->arg, sizeof(wakeupmon_args));
3084 break;
3085 case RLIMIT_CPU_USAGE_MONITOR:
3086 cpumon_flags = (uint32_t)uap->arg; // XXX temporarily stashing flags in argp (12592127)
3087 error = mach_to_bsd_rv(task_cpu_usage_monitor_ctl(proc_task(targetp), &cpumon_flags));
3088 break;
3089 case RLIMIT_THREAD_CPULIMITS:
3090 cpulimits_flags = (uint32_t)uap->arg; // only need a limited set of bits, pass in void * argument
3091
3092 if (uap->pid != -1) {
3093 error = EINVAL;
3094 break;
3095 }
3096
3097 uint8_t percent = 0;
3098 uint32_t ms_refill = 0;
3099 uint64_t ns_refill;
3100
3101 percent = (uint8_t)(cpulimits_flags & 0xffU); /* low 8 bits for percent */
3102 ms_refill = (cpulimits_flags >> 8) & 0xffffff; /* next 24 bits represent ms refill value */
3103 if (percent >= 100 || percent == 0) {
3104 error = EINVAL;
3105 break;
3106 }
3107
3108 ns_refill = ((uint64_t)ms_refill) * NSEC_PER_MSEC;
3109
3110 error = mach_to_bsd_rv(thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ns_refill));
3111 break;
3112
3113 #if CONFIG_LEDGER_INTERVAL_MAX
3114 case RLIMIT_FOOTPRINT_INTERVAL:
3115 footprint_interval_flags = (uint32_t)uap->arg; // XXX temporarily stashing flags in argp (12592127)
3116 /*
3117 * There is currently only one option for this flavor.
3118 */
3119 if ((footprint_interval_flags & FOOTPRINT_INTERVAL_RESET) == 0) {
3120 error = EINVAL;
3121 break;
3122 }
3123 interval_max_footprint = get_task_phys_footprint_interval_max(proc_task(targetp), TRUE);
3124 interval_max_footprint = get_task_neural_nofootprint_total_interval_max(proc_task(targetp), TRUE);
3125 break;
3126
3127 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
3128 default:
3129 error = EINVAL;
3130 break;
3131 }
3132
3133 proc_rele(targetp);
3134 kauth_cred_unref(&target_cred);
3135
3136 /*
3137 * Return value from this function becomes errno to userland caller.
3138 */
3139 return error;
3140 }
3141
3142 /*
3143 * Return the current amount of CPU consumed by this thread (in either user or kernel mode)
3144 */
3145 int
thread_selfusage(struct proc * p __unused,struct thread_selfusage_args * uap __unused,uint64_t * retval)3146 thread_selfusage(struct proc *p __unused, struct thread_selfusage_args *uap __unused, uint64_t *retval)
3147 {
3148 uint64_t runtime;
3149
3150 runtime = thread_get_runtime_self();
3151 *retval = runtime;
3152
3153 return 0;
3154 }
3155