1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1982, 1986, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/sysctl.h>
78 #include <sys/kernel.h>
79 #include <sys/file_internal.h>
80 #include <sys/resourcevar.h>
81 #include <sys/malloc.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/sysproto.h>
86
87 #include <security/audit/audit.h>
88
89 #include <machine/vmparam.h>
90
91 #include <mach/mach_types.h>
92 #include <mach/time_value.h>
93 #include <mach/task.h>
94 #include <mach/task_info.h>
95 #include <mach/vm_map.h>
96 #include <mach/mach_vm.h>
97 #include <mach/thread_act.h> /* for thread_policy_set( ) */
98 #include <kern/thread.h>
99 #include <kern/policy_internal.h>
100
101 #include <kern/task.h>
102 #include <kern/clock.h> /* for absolutetime_to_microtime() */
103 #include <netinet/in.h> /* for TRAFFIC_MGT_SO_* */
104 #if CONFIG_FREEZE
105 #include <sys/kern_memorystatus_freeze.h> /* for memorystatus_freeze_mark_ui_transition */
106 #endif /* CONFIG_FREEZE */
107 #include <sys/kern_memorystatus_xnu.h> /* for memorystatus_get_proc_is_managed */
108 #include <sys/socketvar.h> /* for struct socket */
109 #if NECP
110 #include <net/necp.h>
111 #endif /* NECP */
112
113 #include <vm/vm_map_xnu.h>
114
115 #include <kern/assert.h>
116 #include <sys/resource.h>
117 #include <sys/resource_private.h>
118 #include <sys/priv.h>
119 #include <IOKit/IOBSD.h>
120
121 #if CONFIG_MACF
122 #include <security/mac_framework.h>
123 #endif
124
125 static void proc_limitblock(proc_t p);
126 static void proc_limitunblock(proc_t p);
127 static void proc_limitupdate(proc_t p, bool unblock,
128 void (^update)(struct plimit *plim));
129
130 static int donice(struct proc *curp, struct proc *chgp, int n);
131 static int dosetrlimit(struct proc *p, u_int which, struct rlimit *limp);
132 static void do_background_socket(struct proc *p, thread_t thread);
133 static int do_background_thread(thread_t thread, int priority);
134 static int do_background_proc(struct proc *curp, struct proc *targetp, int priority);
135 static int proc_set_gpurole(struct proc *curp, struct proc *targetp, int priority);
136 static int proc_get_gpurole(proc_t targetp, int *priority);
137 static int proc_set_darwin_role(proc_t curp, proc_t targetp, int priority);
138 static int proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority);
139 static int proc_set_game_mode(proc_t targetp, int priority);
140 static int proc_get_game_mode(proc_t targetp, int *priority);
141 static int proc_set_carplay_mode(proc_t targetp, int priority);
142 static int proc_get_carplay_mode(proc_t targetp, int *priority);
143 static int proc_set_runaway_mitigation(proc_t targetp, int priority);
144 static int proc_get_runaway_mitigation(proc_t targetp, int *priority);
145 static int get_background_proc(struct proc *curp, struct proc *targetp, int *priority);
146
147 int fill_task_rusage(task_t task, rusage_info_current *ri);
148 void fill_task_billed_usage(task_t task, rusage_info_current *ri);
149 int fill_task_io_rusage(task_t task, rusage_info_current *ri);
150 int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
151 uint64_t get_task_logical_writes(task_t task, bool external);
152
153 rlim_t maxdmap = MAXDSIZ; /* XXX */
154 rlim_t maxsmap = MAXSSIZ - PAGE_MAX_SIZE; /* XXX */
155
156 /* For plimit reference count */
157 os_refgrp_decl(, rlimit_refgrp, "plimit_refcnt", NULL);
158
159 static KALLOC_TYPE_DEFINE(plimit_zone, struct plimit, KT_DEFAULT);
160
161 /*
162 * Limits on the number of open files per process, and the number
163 * of child processes per process.
164 *
165 * Note: would be in kern/subr_param.c in FreeBSD.
166 */
167 __private_extern__ int maxfilesperproc = OPEN_MAX; /* per-proc open files limit */
168
169 SYSCTL_INT(_kern, KERN_MAXPROCPERUID, maxprocperuid, CTLFLAG_RW | CTLFLAG_LOCKED,
170 &maxprocperuid, 0, "Maximum processes allowed per userid" );
171
172 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW | CTLFLAG_LOCKED,
173 &maxfilesperproc, 0, "Maximum files allowed open per process" );
174
175 /* Args and fn for proc_iteration callback used in setpriority */
176 struct puser_nice_args {
177 proc_t curp;
178 int prio;
179 id_t who;
180 int * foundp;
181 int * errorp;
182 };
183 static int puser_donice_callback(proc_t p, void * arg);
184
185
186 /* Args and fn for proc_iteration callback used in setpriority */
187 struct ppgrp_nice_args {
188 proc_t curp;
189 int prio;
190 int * foundp;
191 int * errorp;
192 };
193 static int ppgrp_donice_callback(proc_t p, void * arg);
194
195 /*
196 * Resource controls and accounting.
197 */
198 int
getpriority(struct proc * curp,struct getpriority_args * uap,int32_t * retval)199 getpriority(struct proc *curp, struct getpriority_args *uap, int32_t *retval)
200 {
201 struct proc *p;
202 int low = PRIO_MAX + 1;
203 kauth_cred_t my_cred;
204 int refheld = 0;
205 int error = 0;
206
207 /* would also test (uap->who < 0), but id_t is unsigned */
208 if (uap->who > 0x7fffffff) {
209 return EINVAL;
210 }
211
212 switch (uap->which) {
213 case PRIO_PROCESS:
214 if (uap->who == 0) {
215 p = curp;
216 low = p->p_nice;
217 } else {
218 p = proc_find(uap->who);
219 if (p == 0) {
220 break;
221 }
222 low = p->p_nice;
223 proc_rele(p);
224 }
225 break;
226
227 case PRIO_PGRP: {
228 struct pgrp *pg = PGRP_NULL;
229
230 if (uap->who == 0) {
231 /* returns the pgrp to ref */
232 pg = proc_pgrp(curp, NULL);
233 } else if ((pg = pgrp_find(uap->who)) == PGRP_NULL) {
234 break;
235 }
236 /* No need for iteration as it is a simple scan */
237 pgrp_lock(pg);
238 PGMEMBERS_FOREACH(pg, p) {
239 if (p->p_nice < low) {
240 low = p->p_nice;
241 }
242 }
243 pgrp_unlock(pg);
244 pgrp_rele(pg);
245 break;
246 }
247
248 case PRIO_USER:
249 if (uap->who == 0) {
250 uap->who = kauth_cred_getuid(kauth_cred_get());
251 }
252
253 proc_list_lock();
254
255 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
256 my_cred = kauth_cred_proc_ref(p);
257 if (kauth_cred_getuid(my_cred) == uap->who &&
258 p->p_nice < low) {
259 low = p->p_nice;
260 }
261 kauth_cred_unref(&my_cred);
262 }
263
264 proc_list_unlock();
265
266 break;
267
268 case PRIO_DARWIN_THREAD:
269 /* we currently only support the current thread */
270 if (uap->who != 0) {
271 return EINVAL;
272 }
273
274 low = proc_get_thread_policy(current_thread(), TASK_POLICY_INTERNAL, TASK_POLICY_DARWIN_BG);
275
276 break;
277
278 case PRIO_DARWIN_PROCESS:
279 if (uap->who == 0) {
280 p = curp;
281 } else {
282 p = proc_find(uap->who);
283 if (p == PROC_NULL) {
284 break;
285 }
286 refheld = 1;
287 }
288
289 error = get_background_proc(curp, p, &low);
290
291 if (refheld) {
292 proc_rele(p);
293 }
294 if (error) {
295 return error;
296 }
297 break;
298
299 case PRIO_DARWIN_ROLE:
300 if (uap->who == 0) {
301 p = curp;
302 } else {
303 p = proc_find(uap->who);
304 if (p == PROC_NULL) {
305 break;
306 }
307 refheld = 1;
308 }
309
310 error = proc_get_darwin_role(curp, p, &low);
311
312 if (refheld) {
313 proc_rele(p);
314 }
315 if (error) {
316 return error;
317 }
318 break;
319
320 case PRIO_DARWIN_GAME_MODE:
321 if (uap->who == 0) {
322 p = curp;
323 } else {
324 p = proc_find(uap->who);
325 if (p == PROC_NULL) {
326 break;
327 }
328 refheld = 1;
329 }
330
331
332 error = proc_get_game_mode(p, &low);
333
334 if (refheld) {
335 proc_rele(p);
336 }
337 if (error) {
338 return error;
339 }
340 break;
341
342 case PRIO_DARWIN_CARPLAY_MODE:
343 if (uap->who == 0) {
344 p = curp;
345 } else {
346 p = proc_find(uap->who);
347 if (p == PROC_NULL) {
348 break;
349 }
350 refheld = 1;
351 }
352
353
354 error = proc_get_carplay_mode(p, &low);
355
356 if (refheld) {
357 proc_rele(p);
358 }
359 if (error) {
360 return error;
361 }
362 break;
363
364 case PRIO_DARWIN_GPU:
365 if (uap->who == 0) {
366 p = curp;
367 } else {
368 p = proc_find(uap->who);
369 if (p == PROC_NULL) {
370 break;
371 }
372 refheld = 1;
373 }
374
375
376 error = proc_get_gpurole(p, &low);
377
378 if (refheld) {
379 proc_rele(p);
380 }
381 if (error) {
382 return error;
383 }
384 break;
385
386 case PRIO_DARWIN_RUNAWAY_MITIGATION:
387 if (uap->who == 0) {
388 p = curp;
389 } else {
390 p = proc_find(uap->who);
391 if (p == PROC_NULL) {
392 break;
393 }
394 refheld = 1;
395 }
396
397
398 error = proc_get_runaway_mitigation(p, &low);
399
400 if (refheld) {
401 proc_rele(p);
402 }
403 if (error) {
404 return error;
405 }
406 break;
407
408 default:
409 return EINVAL;
410 }
411 if (low == PRIO_MAX + 1) {
412 return ESRCH;
413 }
414 *retval = low;
415 return 0;
416 }
417
418 /* call back function used for proc iteration in PRIO_USER */
419 static int
puser_donice_callback(proc_t p,void * arg)420 puser_donice_callback(proc_t p, void * arg)
421 {
422 int error, n;
423 struct puser_nice_args * pun = (struct puser_nice_args *)arg;
424 kauth_cred_t my_cred;
425
426 my_cred = kauth_cred_proc_ref(p);
427 if (kauth_cred_getuid(my_cred) == pun->who) {
428 error = donice(pun->curp, p, pun->prio);
429 if (pun->errorp != NULL) {
430 *pun->errorp = error;
431 }
432 if (pun->foundp != NULL) {
433 n = *pun->foundp;
434 *pun->foundp = n + 1;
435 }
436 }
437 kauth_cred_unref(&my_cred);
438
439 return PROC_RETURNED;
440 }
441
442 /* call back function used for proc iteration in PRIO_PGRP */
443 static int
ppgrp_donice_callback(proc_t p,void * arg)444 ppgrp_donice_callback(proc_t p, void * arg)
445 {
446 int error;
447 struct ppgrp_nice_args * pun = (struct ppgrp_nice_args *)arg;
448 int n;
449
450 error = donice(pun->curp, p, pun->prio);
451 if (pun->errorp != NULL) {
452 *pun->errorp = error;
453 }
454 if (pun->foundp != NULL) {
455 n = *pun->foundp;
456 *pun->foundp = n + 1;
457 }
458
459 return PROC_RETURNED;
460 }
461
462 /*
463 * Returns: 0 Success
464 * EINVAL
465 * ESRCH
466 * donice:EPERM
467 * donice:EACCES
468 */
469 /* ARGSUSED */
470 int
setpriority(struct proc * curp,struct setpriority_args * uap,int32_t * retval)471 setpriority(struct proc *curp, struct setpriority_args *uap, int32_t *retval)
472 {
473 struct proc *p;
474 int found = 0, error = 0;
475 int refheld = 0;
476
477 AUDIT_ARG(cmd, uap->which);
478 AUDIT_ARG(owner, uap->who, 0);
479 AUDIT_ARG(value32, uap->prio);
480
481 /* would also test (uap->who < 0), but id_t is unsigned */
482 if (uap->who > 0x7fffffff) {
483 return EINVAL;
484 }
485
486 switch (uap->which) {
487 case PRIO_PROCESS:
488 if (uap->who == 0) {
489 p = curp;
490 } else {
491 p = proc_find(uap->who);
492 if (p == 0) {
493 break;
494 }
495 refheld = 1;
496 }
497 error = donice(curp, p, uap->prio);
498 found++;
499 if (refheld != 0) {
500 proc_rele(p);
501 }
502 break;
503
504 case PRIO_PGRP: {
505 struct pgrp *pg = PGRP_NULL;
506 struct ppgrp_nice_args ppgrp;
507
508 if (uap->who == 0) {
509 pg = proc_pgrp(curp, NULL);
510 } else if ((pg = pgrp_find(uap->who)) == PGRP_NULL) {
511 break;
512 }
513
514 ppgrp.curp = curp;
515 ppgrp.prio = uap->prio;
516 ppgrp.foundp = &found;
517 ppgrp.errorp = &error;
518
519 pgrp_iterate(pg, ppgrp_donice_callback, (void *)&ppgrp, NULL);
520 pgrp_rele(pg);
521
522 break;
523 }
524
525 case PRIO_USER: {
526 struct puser_nice_args punice;
527
528 if (uap->who == 0) {
529 uap->who = kauth_cred_getuid(kauth_cred_get());
530 }
531
532 punice.curp = curp;
533 punice.prio = uap->prio;
534 punice.who = uap->who;
535 punice.foundp = &found;
536 error = 0;
537 punice.errorp = &error;
538 proc_iterate(PROC_ALLPROCLIST, puser_donice_callback, (void *)&punice, NULL, NULL);
539
540 break;
541 }
542
543 case PRIO_DARWIN_THREAD: {
544 /* we currently only support the current thread */
545 if (uap->who != 0) {
546 return EINVAL;
547 }
548
549 error = do_background_thread(current_thread(), uap->prio);
550 found++;
551 break;
552 }
553
554 case PRIO_DARWIN_PROCESS: {
555 if (uap->who == 0) {
556 p = curp;
557 } else {
558 p = proc_find(uap->who);
559 if (p == 0) {
560 break;
561 }
562 refheld = 1;
563 }
564
565 error = do_background_proc(curp, p, uap->prio);
566
567 found++;
568 if (refheld != 0) {
569 proc_rele(p);
570 }
571 break;
572 }
573
574 case PRIO_DARWIN_GPU: {
575 if (uap->who == 0) {
576 return EINVAL;
577 }
578
579 p = proc_find(uap->who);
580 if (p == PROC_NULL) {
581 break;
582 }
583
584 error = proc_set_gpurole(curp, p, uap->prio);
585
586 found++;
587 proc_rele(p);
588 break;
589 }
590
591 case PRIO_DARWIN_ROLE: {
592 if (uap->who == 0) {
593 p = curp;
594 } else {
595 p = proc_find(uap->who);
596 if (p == PROC_NULL) {
597 break;
598 }
599 refheld = 1;
600 }
601
602 error = proc_set_darwin_role(curp, p, uap->prio);
603
604 found++;
605 if (refheld != 0) {
606 proc_rele(p);
607 }
608 break;
609 }
610
611 case PRIO_DARWIN_GAME_MODE: {
612 if (uap->who == 0) {
613 p = curp;
614 } else {
615 p = proc_find(uap->who);
616 if (p == PROC_NULL) {
617 break;
618 }
619 refheld = 1;
620 }
621
622
623 error = proc_set_game_mode(p, uap->prio);
624
625 found++;
626 if (refheld != 0) {
627 proc_rele(p);
628 }
629 break;
630 }
631
632 case PRIO_DARWIN_CARPLAY_MODE: {
633 if (uap->who == 0) {
634 p = curp;
635 } else {
636 p = proc_find(uap->who);
637 if (p == PROC_NULL) {
638 break;
639 }
640 refheld = 1;
641 }
642
643 error = proc_set_carplay_mode(p, uap->prio);
644
645 found++;
646 if (refheld != 0) {
647 proc_rele(p);
648 }
649 break;
650 }
651
652 case PRIO_DARWIN_RUNAWAY_MITIGATION: {
653 if (uap->who == 0) {
654 p = curp;
655 } else {
656 p = proc_find(uap->who);
657 if (p == PROC_NULL) {
658 break;
659 }
660 refheld = 1;
661 }
662
663 error = proc_set_runaway_mitigation(p, uap->prio);
664
665 found++;
666 if (refheld != 0) {
667 proc_rele(p);
668 }
669 break;
670 }
671
672 default:
673 return EINVAL;
674 }
675 if (found == 0) {
676 return ESRCH;
677 }
678 if (error == EIDRM) {
679 *retval = -2;
680 error = 0;
681 }
682 return error;
683 }
684
685
686 /*
687 * Returns: 0 Success
688 * EPERM
689 * EACCES
690 * mac_check_proc_sched:???
691 */
692 static int
donice(struct proc * curp,struct proc * chgp,int n)693 donice(struct proc *curp, struct proc *chgp, int n)
694 {
695 int error = 0;
696 kauth_cred_t ucred;
697 kauth_cred_t my_cred;
698
699 ucred = kauth_cred_proc_ref(curp);
700 my_cred = kauth_cred_proc_ref(chgp);
701
702 if (suser(ucred, NULL) && kauth_cred_getruid(ucred) &&
703 kauth_cred_getuid(ucred) != kauth_cred_getuid(my_cred) &&
704 kauth_cred_getruid(ucred) != kauth_cred_getuid(my_cred)) {
705 error = EPERM;
706 goto out;
707 }
708 if (n > PRIO_MAX) {
709 n = PRIO_MAX;
710 }
711 if (n < PRIO_MIN) {
712 n = PRIO_MIN;
713 }
714 if (n < chgp->p_nice && suser(ucred, &curp->p_acflag)) {
715 error = EACCES;
716 goto out;
717 }
718 #if CONFIG_MACF
719 error = mac_proc_check_sched(curp, chgp);
720 if (error) {
721 goto out;
722 }
723 #endif
724 proc_lock(chgp);
725 chgp->p_nice = (char)n;
726 proc_unlock(chgp);
727 (void)resetpriority(chgp);
728 out:
729 kauth_cred_unref(&ucred);
730 kauth_cred_unref(&my_cred);
731 return error;
732 }
733
734 #define SET_GPU_ROLE_ENTITLEMENT "com.apple.private.set-gpu-role"
735
736 static int
proc_set_gpurole(struct proc * curp,struct proc * targetp,int priority)737 proc_set_gpurole(struct proc *curp, struct proc *targetp, int priority)
738 {
739 int error = 0;
740 kauth_cred_t ucred;
741 kauth_cred_t target_cred;
742
743 ucred = kauth_cred_get();
744 target_cred = kauth_cred_proc_ref(targetp);
745
746 boolean_t entitled = FALSE;
747 entitled = IOCurrentTaskHasEntitlement(SET_GPU_ROLE_ENTITLEMENT);
748 if (!entitled) {
749 error = EPERM;
750 goto out;
751 }
752
753 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
754 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
755 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
756 error = EPERM;
757 goto out;
758 }
759
760 if (curp == targetp) {
761 error = EPERM;
762 goto out;
763 }
764
765 #if CONFIG_MACF
766 error = mac_proc_check_sched(curp, targetp);
767 if (error) {
768 goto out;
769 }
770 #endif
771
772 switch (priority) {
773 case PRIO_DARWIN_GPU_UNKNOWN:
774 case PRIO_DARWIN_GPU_ALLOW:
775 case PRIO_DARWIN_GPU_DENY:
776 case PRIO_DARWIN_GPU_BACKGROUND:
777 case PRIO_DARWIN_GPU_UTILITY:
778 case PRIO_DARWIN_GPU_UI_NON_FOCAL:
779 case PRIO_DARWIN_GPU_UI:
780 case PRIO_DARWIN_GPU_UI_FOCAL:
781 task_set_gpu_role(proc_task(targetp),
782 (darwin_gpu_role_t)priority);
783 break;
784 default:
785 error = EINVAL;
786 goto out;
787 }
788
789 out:
790 kauth_cred_unref(&target_cred);
791 return error;
792 }
793
794 static int
proc_get_gpurole(proc_t targetp,int * priority)795 proc_get_gpurole(proc_t targetp, int *priority)
796 {
797 int error = 0;
798
799 kauth_cred_t ucred, target_cred;
800
801 ucred = kauth_cred_get();
802 target_cred = kauth_cred_proc_ref(targetp);
803
804 boolean_t entitled = FALSE;
805 entitled = IOCurrentTaskHasEntitlement(SET_GPU_ROLE_ENTITLEMENT);
806
807 /* Root is allowed to get without entitlement */
808 if (!kauth_cred_issuser(ucred) && !entitled) {
809 error = EPERM;
810 goto out;
811 }
812
813 /* Even with entitlement, non-root is only alllowed to see same-user */
814 if (!kauth_cred_issuser(ucred) &&
815 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
816 error = EPERM;
817 goto out;
818 }
819
820 darwin_gpu_role_t gpurole = task_get_gpu_role(proc_task(targetp));
821
822 *priority = gpurole;
823
824 out:
825 kauth_cred_unref(&target_cred);
826 return error;
827 }
828
829
830 static int
proc_set_darwin_role(proc_t curp,proc_t targetp,int priority)831 proc_set_darwin_role(proc_t curp, proc_t targetp, int priority)
832 {
833 int error = 0;
834 uint32_t flagsp = 0;
835
836 kauth_cred_t ucred, target_cred;
837
838 ucred = kauth_cred_get();
839 target_cred = kauth_cred_proc_ref(targetp);
840
841 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
842 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
843 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
844 if (priv_check_cred(ucred, PRIV_SETPRIORITY_DARWIN_ROLE, 0) != 0) {
845 error = EPERM;
846 goto out;
847 }
848 }
849
850 if (curp != targetp) {
851 #if CONFIG_MACF
852 if ((error = mac_proc_check_sched(curp, targetp))) {
853 goto out;
854 }
855 #endif
856 }
857
858 proc_get_darwinbgstate(proc_task(targetp), &flagsp);
859 if ((flagsp & PROC_FLAG_APPLICATION) != PROC_FLAG_APPLICATION) {
860 error = ENOTSUP;
861 goto out;
862 }
863
864 task_role_t role = TASK_UNSPECIFIED;
865
866 if ((error = proc_darwin_role_to_task_role(priority, &role))) {
867 goto out;
868 }
869
870 proc_set_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE,
871 TASK_POLICY_ROLE, role);
872
873 #if CONFIG_FREEZE
874 if (priority == PRIO_DARWIN_ROLE_UI_FOCAL || priority == PRIO_DARWIN_ROLE_UI || priority == PRIO_DARWIN_ROLE_UI_NON_FOCAL) {
875 memorystatus_freezer_mark_ui_transition(targetp);
876 }
877 #endif /* CONFIG_FREEZE */
878
879 out:
880 kauth_cred_unref(&target_cred);
881 return error;
882 }
883
884 static int
proc_get_darwin_role(proc_t curp,proc_t targetp,int * priority)885 proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority)
886 {
887 int error = 0;
888 int role = 0;
889
890 kauth_cred_t ucred, target_cred;
891
892 ucred = kauth_cred_get();
893 target_cred = kauth_cred_proc_ref(targetp);
894
895 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
896 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
897 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
898 error = EPERM;
899 goto out;
900 }
901
902 if (curp != targetp) {
903 #if CONFIG_MACF
904 if ((error = mac_proc_check_sched(curp, targetp))) {
905 goto out;
906 }
907 #endif
908 }
909
910 role = proc_get_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
911
912 *priority = proc_task_role_to_darwin_role(role);
913
914 out:
915 kauth_cred_unref(&target_cred);
916 return error;
917 }
918
919 #define SET_GAME_MODE_ENTITLEMENT "com.apple.private.set-game-mode"
920
921 static int
proc_set_game_mode(proc_t targetp,int priority)922 proc_set_game_mode(proc_t targetp, int priority)
923 {
924 int error = 0;
925
926 kauth_cred_t ucred, target_cred;
927
928 ucred = kauth_cred_get();
929 target_cred = kauth_cred_proc_ref(targetp);
930
931 boolean_t entitled = FALSE;
932 entitled = IOCurrentTaskHasEntitlement(SET_GAME_MODE_ENTITLEMENT);
933 if (!entitled) {
934 error = EPERM;
935 goto out;
936 }
937
938 /* Even with entitlement, non-root is only alllowed to set same-user */
939 if (!kauth_cred_issuser(ucred) &&
940 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
941 error = EPERM;
942 goto out;
943 }
944
945 switch (priority) {
946 case PRIO_DARWIN_GAME_MODE_OFF:
947 task_set_game_mode(proc_task(targetp), false);
948 break;
949 case PRIO_DARWIN_GAME_MODE_ON:
950 task_set_game_mode(proc_task(targetp), true);
951 break;
952 default:
953 error = EINVAL;
954 goto out;
955 }
956
957 out:
958 kauth_cred_unref(&target_cred);
959 return error;
960 }
961
962 static int
proc_get_game_mode(proc_t targetp,int * priority)963 proc_get_game_mode(proc_t targetp, int *priority)
964 {
965 int error = 0;
966
967 kauth_cred_t ucred, target_cred;
968
969 ucred = kauth_cred_get();
970 target_cred = kauth_cred_proc_ref(targetp);
971
972 boolean_t entitled = FALSE;
973 entitled = IOCurrentTaskHasEntitlement(SET_GAME_MODE_ENTITLEMENT);
974
975 /* Root is allowed to get without entitlement */
976 if (!kauth_cred_issuser(ucred) && !entitled) {
977 error = EPERM;
978 goto out;
979 }
980
981 /* Even with entitlement, non-root is only alllowed to see same-user */
982 if (!kauth_cred_issuser(ucred) &&
983 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
984 error = EPERM;
985 goto out;
986 }
987
988 if (task_get_game_mode(proc_task(targetp))) {
989 *priority = PRIO_DARWIN_GAME_MODE_ON;
990 } else {
991 *priority = PRIO_DARWIN_GAME_MODE_OFF;
992 }
993
994 out:
995 kauth_cred_unref(&target_cred);
996 return error;
997 }
998
999 #define SET_CARPLAY_MODE_ENTITLEMENT "com.apple.private.set-carplay-mode"
1000
1001 static int
proc_set_carplay_mode(proc_t targetp,int priority)1002 proc_set_carplay_mode(proc_t targetp, int priority)
1003 {
1004 int error = 0;
1005
1006 kauth_cred_t ucred, target_cred;
1007
1008 ucred = kauth_cred_get();
1009 target_cred = kauth_cred_proc_ref(targetp);
1010
1011 boolean_t entitled = FALSE;
1012 entitled = IOCurrentTaskHasEntitlement(SET_CARPLAY_MODE_ENTITLEMENT);
1013 if (!entitled) {
1014 error = EPERM;
1015 goto out;
1016 }
1017
1018 /* Even with entitlement, non-root is only alllowed to set same-user */
1019 if (!kauth_cred_issuser(ucred) &&
1020 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
1021 error = EPERM;
1022 goto out;
1023 }
1024
1025 switch (priority) {
1026 case PRIO_DARWIN_CARPLAY_MODE_OFF:
1027 task_set_carplay_mode(proc_task(targetp), false);
1028 break;
1029 case PRIO_DARWIN_CARPLAY_MODE_ON:
1030 task_set_carplay_mode(proc_task(targetp), true);
1031 break;
1032 default:
1033 error = EINVAL;
1034 goto out;
1035 }
1036
1037 out:
1038 kauth_cred_unref(&target_cred);
1039 return error;
1040 }
1041
1042 static int
proc_get_carplay_mode(proc_t targetp,int * priority)1043 proc_get_carplay_mode(proc_t targetp, int *priority)
1044 {
1045 int error = 0;
1046
1047 kauth_cred_t ucred, target_cred;
1048
1049 ucred = kauth_cred_get();
1050 target_cred = kauth_cred_proc_ref(targetp);
1051
1052 boolean_t entitled = FALSE;
1053 entitled = IOCurrentTaskHasEntitlement(SET_CARPLAY_MODE_ENTITLEMENT);
1054
1055 /* Root is allowed to get without entitlement */
1056 if (!kauth_cred_issuser(ucred) && !entitled) {
1057 error = EPERM;
1058 goto out;
1059 }
1060
1061 /* Even with entitlement, non-root is only alllowed to see same-user */
1062 if (!kauth_cred_issuser(ucred) &&
1063 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
1064 error = EPERM;
1065 goto out;
1066 }
1067
1068 if (task_get_carplay_mode(proc_task(targetp))) {
1069 *priority = PRIO_DARWIN_CARPLAY_MODE_ON;
1070 } else {
1071 *priority = PRIO_DARWIN_CARPLAY_MODE_OFF;
1072 }
1073
1074 out:
1075 kauth_cred_unref(&target_cred);
1076 return error;
1077 }
1078
1079 #define RUNAWAY_MITIGATION_ENTITLEMENT "com.apple.private.runaway-mitigation"
1080
1081 /* Boot arg to allow RunningBoard-managed processes to be mitigated */
1082 static TUNABLE(bool, allow_managed_mitigation, "allow_managed_mitigation", false);
1083
1084 static int
proc_set_runaway_mitigation(proc_t targetp,int priority)1085 proc_set_runaway_mitigation(proc_t targetp, int priority)
1086 {
1087 int error = 0;
1088
1089 kauth_cred_t ucred, target_cred;
1090
1091 ucred = kauth_cred_get();
1092 target_cred = kauth_cred_proc_ref(targetp);
1093
1094 boolean_t entitled = FALSE;
1095 entitled = IOCurrentTaskHasEntitlement(RUNAWAY_MITIGATION_ENTITLEMENT);
1096 if (!entitled) {
1097 error = EPERM;
1098 goto out;
1099 }
1100
1101 /* Even with entitlement, non-root is only alllowed to set same-user */
1102 if (!kauth_cred_issuser(ucred) &&
1103 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
1104 error = EPERM;
1105 goto out;
1106 }
1107
1108 switch (priority) {
1109 case PRIO_DARWIN_RUNAWAY_MITIGATION_OFF:
1110 printf("%s[%d] disabling runaway mitigation on %s[%d]\n",
1111 proc_best_name(current_proc()), proc_selfpid(),
1112 proc_best_name(targetp), proc_getpid(targetp));
1113
1114 proc_set_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE,
1115 TASK_POLICY_RUNAWAY_MITIGATION, TASK_POLICY_DISABLE);
1116 break;
1117
1118 case PRIO_DARWIN_RUNAWAY_MITIGATION_ON:
1119 /*
1120 * RunningBoard-managed processes are not mitigatable - they should be
1121 * managed through RunningBoard-level interfaces instead.
1122 * Set the boot arg allow_managed_mitigation=1 to allow this.
1123 */
1124 if (memorystatus_get_proc_is_managed(targetp) && !allow_managed_mitigation) {
1125 printf("%s[%d] blocked from disabling runaway mitigation on RunningBoard managed process %s[%d]\n",
1126 proc_best_name(current_proc()), proc_selfpid(),
1127 proc_best_name(targetp), proc_getpid(targetp));
1128
1129 error = ENOTSUP;
1130 goto out;
1131 }
1132
1133 proc_set_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE,
1134 TASK_POLICY_RUNAWAY_MITIGATION, TASK_POLICY_ENABLE);
1135
1136 printf("%s[%d] enabling runaway mitigation on %s[%d]\n",
1137 proc_best_name(current_proc()), proc_selfpid(),
1138 proc_best_name(targetp), proc_getpid(targetp));
1139 break;
1140
1141 default:
1142 error = EINVAL;
1143 goto out;
1144 }
1145
1146 out:
1147 kauth_cred_unref(&target_cred);
1148 return error;
1149 }
1150
1151 static int
proc_get_runaway_mitigation(proc_t targetp,int * priority)1152 proc_get_runaway_mitigation(proc_t targetp, int *priority)
1153 {
1154 int error = 0;
1155
1156 kauth_cred_t ucred, target_cred;
1157
1158 ucred = kauth_cred_get();
1159 target_cred = kauth_cred_proc_ref(targetp);
1160
1161 boolean_t entitled = FALSE;
1162 entitled = IOCurrentTaskHasEntitlement(RUNAWAY_MITIGATION_ENTITLEMENT);
1163
1164 /* Root is allowed to get without entitlement */
1165 if (!kauth_cred_issuser(ucred) && !entitled) {
1166 error = EPERM;
1167 goto out;
1168 }
1169
1170 /* Even with entitlement, non-root is only alllowed to see same-user */
1171 if (!kauth_cred_issuser(ucred) &&
1172 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred)) {
1173 error = EPERM;
1174 goto out;
1175 }
1176
1177 if (proc_get_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE, TASK_POLICY_RUNAWAY_MITIGATION)) {
1178 *priority = PRIO_DARWIN_RUNAWAY_MITIGATION_ON;
1179 } else {
1180 *priority = PRIO_DARWIN_RUNAWAY_MITIGATION_OFF;
1181 }
1182
1183 out:
1184 kauth_cred_unref(&target_cred);
1185 return error;
1186 }
1187
1188
1189 static int
get_background_proc(struct proc * curp,struct proc * targetp,int * priority)1190 get_background_proc(struct proc *curp, struct proc *targetp, int *priority)
1191 {
1192 int external = 0;
1193 int error = 0;
1194 kauth_cred_t ucred, target_cred;
1195
1196 ucred = kauth_cred_get();
1197 target_cred = kauth_cred_proc_ref(targetp);
1198
1199 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
1200 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
1201 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
1202 error = EPERM;
1203 goto out;
1204 }
1205
1206 external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
1207
1208 *priority = proc_get_task_policy(current_task(), external, TASK_POLICY_DARWIN_BG);
1209
1210 out:
1211 kauth_cred_unref(&target_cred);
1212 return error;
1213 }
1214
1215 static int
do_background_proc(struct proc * curp,struct proc * targetp,int priority)1216 do_background_proc(struct proc *curp, struct proc *targetp, int priority)
1217 {
1218 #if !CONFIG_MACF
1219 #pragma unused(curp)
1220 #endif
1221 int error = 0;
1222 kauth_cred_t ucred;
1223 kauth_cred_t target_cred;
1224 int external;
1225 int enable;
1226
1227 ucred = kauth_cred_get();
1228 target_cred = kauth_cred_proc_ref(targetp);
1229
1230 if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
1231 kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
1232 kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
1233 error = EPERM;
1234 goto out;
1235 }
1236
1237 #if CONFIG_MACF
1238 error = mac_proc_check_sched(curp, targetp);
1239 if (error) {
1240 goto out;
1241 }
1242 #endif
1243
1244 external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
1245
1246 switch (priority) {
1247 case PRIO_DARWIN_BG:
1248 enable = TASK_POLICY_ENABLE;
1249 break;
1250 case PRIO_DARWIN_NONUI:
1251 /* ignored for compatibility */
1252 goto out;
1253 default:
1254 /* TODO: EINVAL if priority != 0 */
1255 enable = TASK_POLICY_DISABLE;
1256 break;
1257 }
1258
1259 proc_set_task_policy(proc_task(targetp), external, TASK_POLICY_DARWIN_BG, enable);
1260
1261 out:
1262 kauth_cred_unref(&target_cred);
1263 return error;
1264 }
1265
1266 static void
do_background_socket(struct proc * p,thread_t thread)1267 do_background_socket(struct proc *p, thread_t thread)
1268 {
1269 #if SOCKETS
1270 struct fileproc *fp;
1271 int background = false;
1272 #if NECP
1273 int update_necp = false;
1274 #endif /* NECP */
1275
1276 if (thread != THREAD_NULL &&
1277 get_threadtask(thread) != proc_task(p)) {
1278 return;
1279 }
1280
1281 proc_fdlock(p);
1282
1283 if (thread != THREAD_NULL) {
1284 background = proc_get_effective_thread_policy(thread, TASK_POLICY_ALL_SOCKETS_BG);
1285 } else {
1286 background = proc_get_effective_task_policy(proc_task(p), TASK_POLICY_ALL_SOCKETS_BG);
1287 }
1288
1289 if (background) {
1290 /*
1291 * For PRIO_DARWIN_PROCESS (thread is NULL), simply mark
1292 * the sockets with the background flag. There's nothing
1293 * to do here for the PRIO_DARWIN_THREAD case.
1294 */
1295 if (thread == THREAD_NULL) {
1296 fdt_foreach(fp, p) {
1297 if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) {
1298 struct socket *sockp = (struct socket *)fp_get_data(fp);
1299 socket_set_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
1300 sockp->so_background_thread = NULL;
1301 }
1302 #if NECP
1303 else if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_NETPOLICY) {
1304 if (necp_set_client_as_background(p, fp, background)) {
1305 update_necp = true;
1306 }
1307 }
1308 #endif /* NECP */
1309 }
1310 }
1311 } else {
1312 /* disable networking IO throttle.
1313 * NOTE - It is a known limitation of the current design that we
1314 * could potentially clear TRAFFIC_MGT_SO_BACKGROUND bit for
1315 * sockets created by other threads within this process.
1316 */
1317 fdt_foreach(fp, p) {
1318 struct socket *sockp;
1319
1320 if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) {
1321 sockp = (struct socket *)fp_get_data(fp);
1322 /* skip if only clearing this thread's sockets */
1323 if ((thread) && (sockp->so_background_thread != thread)) {
1324 continue;
1325 }
1326 socket_clear_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
1327 sockp->so_background_thread = NULL;
1328 }
1329 #if NECP
1330 else if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_NETPOLICY) {
1331 if (necp_set_client_as_background(p, fp, background)) {
1332 update_necp = true;
1333 }
1334 }
1335 #endif /* NECP */
1336 }
1337 }
1338
1339 proc_fdunlock(p);
1340
1341 #if NECP
1342 if (update_necp) {
1343 necp_update_all_clients();
1344 }
1345 #endif /* NECP */
1346 #else
1347 #pragma unused(p, thread)
1348 #endif
1349 }
1350
1351
1352 /*
1353 * do_background_thread
1354 *
1355 * Requires: thread reference
1356 *
1357 * Returns: 0 Success
1358 * EPERM Tried to background while in vfork
1359 * XXX - todo - does this need a MACF hook?
1360 */
1361 static int
do_background_thread(thread_t thread,int priority)1362 do_background_thread(thread_t thread, int priority)
1363 {
1364 int enable, external;
1365 int rv = 0;
1366
1367 /* Backgrounding is unsupported for workq threads */
1368 if (thread_is_static_param(thread)) {
1369 return EPERM;
1370 }
1371
1372 /* Not allowed to combine QoS and DARWIN_BG, doing so strips the QoS */
1373 if (thread_has_qos_policy(thread)) {
1374 thread_remove_qos_policy(thread);
1375 rv = EIDRM;
1376 }
1377
1378 /* TODO: Fail if someone passes something besides 0 or PRIO_DARWIN_BG */
1379 enable = (priority == PRIO_DARWIN_BG) ? TASK_POLICY_ENABLE : TASK_POLICY_DISABLE;
1380 external = (current_thread() == thread) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
1381
1382 proc_set_thread_policy(thread, external, TASK_POLICY_DARWIN_BG, enable);
1383
1384 return rv;
1385 }
1386
1387
1388 /*
1389 * Returns: 0 Success
1390 * copyin:EFAULT
1391 * dosetrlimit:
1392 */
1393 /* ARGSUSED */
1394 int
setrlimit(struct proc * p,struct setrlimit_args * uap,__unused int32_t * retval)1395 setrlimit(struct proc *p, struct setrlimit_args *uap, __unused int32_t *retval)
1396 {
1397 struct rlimit alim;
1398 int error;
1399
1400 if ((error = copyin(uap->rlp, (caddr_t)&alim,
1401 sizeof(struct rlimit)))) {
1402 return error;
1403 }
1404
1405 return dosetrlimit(p, uap->which, &alim);
1406 }
1407
1408 /*
1409 * Returns: 0 Success
1410 * EINVAL
1411 * suser:EPERM
1412 *
1413 * Notes: EINVAL is returned both for invalid arguments, and in the
1414 * case that the current usage (e.g. RLIMIT_STACK) is already
1415 * in excess of the requested limit.
1416 */
1417 static int
dosetrlimit(struct proc * p,u_int which,struct rlimit * newrlim)1418 dosetrlimit(struct proc *p, u_int which, struct rlimit *newrlim)
1419 {
1420 struct rlimit rlim, stack_rlim = {.rlim_cur = 0, .rlim_max = 0};
1421 int error;
1422 kern_return_t kr;
1423
1424 /* Mask out POSIX flag, saved above */
1425 which &= ~_RLIMIT_POSIX_FLAG;
1426
1427 /* Unknown resource */
1428 if (which >= RLIM_NLIMITS) {
1429 return EINVAL;
1430 }
1431
1432 proc_lock(p);
1433
1434 /* Only one thread is able to change the current process's rlimit values */
1435 proc_limitblock(p);
1436
1437 /*
1438 * Take a snapshot of the current rlimit values and read this throughout
1439 * this routine. This minimizes the critical sections and allow other
1440 * processes in the system to access the plimit while we are in the
1441 * middle of this setrlimit call.
1442 */
1443 rlim = smr_serialized_load(&p->p_limit)->pl_rlimit[which];
1444
1445 proc_unlock(p);
1446
1447 error = 0;
1448 /* Sanity check: new soft limit cannot exceed new hard limit */
1449 if (newrlim->rlim_cur > newrlim->rlim_max) {
1450 error = EINVAL;
1451 }
1452 /*
1453 * Sanity check: only super-user may raise the hard limit.
1454 * newrlim->rlim_cur > rlim.rlim_max implies that the call
1455 * is increasing the hard limit as well.
1456 */
1457 else if (newrlim->rlim_cur > rlim.rlim_max || newrlim->rlim_max > rlim.rlim_max) {
1458 /* suser() returns 0 if the calling thread is super user. */
1459 error = suser(kauth_cred_get(), &p->p_acflag);
1460 }
1461
1462 if (error) {
1463 /* Invalid setrlimit request: EINVAL or EPERM */
1464 goto out;
1465 }
1466
1467 /* We have the reader lock of the process's plimit so it's safe to read the rlimit values */
1468 switch (which) {
1469 case RLIMIT_CPU:
1470 if (newrlim->rlim_cur == RLIM_INFINITY) {
1471 task_vtimer_clear(proc_task(p), TASK_VTIMER_RLIM);
1472 timerclear(&p->p_rlim_cpu);
1473 } else {
1474 task_absolutetime_info_data_t tinfo;
1475 mach_msg_type_number_t count;
1476 struct timeval ttv, tv;
1477 clock_sec_t tv_sec;
1478 clock_usec_t tv_usec;
1479
1480 count = TASK_ABSOLUTETIME_INFO_COUNT;
1481 task_info(proc_task(p), TASK_ABSOLUTETIME_INFO, (task_info_t)&tinfo, &count);
1482 absolutetime_to_microtime(tinfo.total_user + tinfo.total_system, &tv_sec, &tv_usec);
1483 ttv.tv_sec = tv_sec;
1484 ttv.tv_usec = tv_usec;
1485
1486 tv.tv_sec = (newrlim->rlim_cur > __INT_MAX__ ? __INT_MAX__ : (__darwin_time_t)newrlim->rlim_cur);
1487 tv.tv_usec = 0;
1488 timersub(&tv, &ttv, &p->p_rlim_cpu);
1489
1490 timerclear(&tv);
1491 if (timercmp(&p->p_rlim_cpu, &tv, >)) {
1492 task_vtimer_set(proc_task(p), TASK_VTIMER_RLIM);
1493 } else {
1494 task_vtimer_clear(proc_task(p), TASK_VTIMER_RLIM);
1495
1496 timerclear(&p->p_rlim_cpu);
1497
1498 psignal(p, SIGXCPU);
1499 }
1500 }
1501 break;
1502
1503 case RLIMIT_DATA:
1504 #if 00
1505 if (newrlim->rlim_cur > maxdmap) {
1506 newrlim->rlim_cur = maxdmap;
1507 }
1508 if (newrlim->rlim_max > maxdmap) {
1509 newrlim->rlim_max = maxdmap;
1510 }
1511 #endif
1512
1513 /* Over to Mach VM to validate the new data limit */
1514 if (vm_map_set_data_limit(current_map(), newrlim->rlim_cur) != KERN_SUCCESS) {
1515 /* The limit specified cannot be lowered because current usage is already higher than the limit. */
1516 error = EINVAL;
1517 goto out;
1518 }
1519 break;
1520
1521 case RLIMIT_STACK:
1522 if (p->p_lflag & P_LCUSTOM_STACK) {
1523 /* Process has a custom stack set - rlimit cannot be used to change it */
1524 error = EINVAL;
1525 goto out;
1526 }
1527
1528 /*
1529 * Note: the real stack size limit is enforced by maxsmap, not a process's RLIMIT_STACK.
1530 *
1531 * The kernel uses maxsmap to control the actual stack size limit. While we allow
1532 * processes to set RLIMIT_STACK to RLIM_INFINITY (UNIX 03), accessing memory
1533 * beyond the maxsmap will still trigger an exception.
1534 *
1535 * stack_rlim is used to store the user-defined RLIMIT_STACK values while we adjust
1536 * the stack size using kernel limit (i.e. maxsmap).
1537 */
1538 if (newrlim->rlim_cur > maxsmap ||
1539 newrlim->rlim_max > maxsmap) {
1540 if (newrlim->rlim_cur > maxsmap) {
1541 stack_rlim.rlim_cur = newrlim->rlim_cur;
1542 newrlim->rlim_cur = maxsmap;
1543 }
1544 if (newrlim->rlim_max > maxsmap) {
1545 stack_rlim.rlim_max = newrlim->rlim_max;
1546 newrlim->rlim_max = maxsmap;
1547 }
1548 }
1549
1550 /*
1551 * rlim.rlim_cur/rlim_max could be arbitrarily large due to previous calls to setrlimit().
1552 * Use the actual size for stack region adjustment.
1553 */
1554 if (rlim.rlim_cur > maxsmap) {
1555 rlim.rlim_cur = maxsmap;
1556 }
1557 if (rlim.rlim_max > maxsmap) {
1558 rlim.rlim_max = maxsmap;
1559 }
1560
1561 /*
1562 * Stack is allocated to the max at exec time with only
1563 * "rlim_cur" bytes accessible. If stack limit is going
1564 * up make more accessible, if going down make inaccessible.
1565 */
1566 if (newrlim->rlim_cur > rlim.rlim_cur) {
1567 mach_vm_offset_t addr;
1568 mach_vm_size_t size;
1569
1570 /* grow stack */
1571 size = newrlim->rlim_cur;
1572 if (round_page_overflow(size, &size)) {
1573 error = EINVAL;
1574 goto out;
1575 }
1576 size -= round_page_64(rlim.rlim_cur);
1577
1578 addr = (mach_vm_offset_t)(p->user_stack - round_page_64(newrlim->rlim_cur));
1579 kr = mach_vm_protect(current_map(), addr, size, FALSE, VM_PROT_DEFAULT);
1580 if (kr != KERN_SUCCESS) {
1581 error = EINVAL;
1582 goto out;
1583 }
1584 } else if (newrlim->rlim_cur < rlim.rlim_cur) {
1585 mach_vm_offset_t addr;
1586 mach_vm_size_t size;
1587 uint64_t cur_sp;
1588
1589 /* shrink stack */
1590
1591 /*
1592 * First check if new stack limit would agree
1593 * with current stack usage.
1594 * Get the current thread's stack pointer...
1595 */
1596 cur_sp = thread_adjuserstack(current_thread(), 0);
1597 if (cur_sp <= p->user_stack &&
1598 cur_sp > (p->user_stack - round_page_64(rlim.rlim_cur))) {
1599 /* stack pointer is in main stack */
1600 if (cur_sp <= (p->user_stack - round_page_64(newrlim->rlim_cur))) {
1601 /*
1602 * New limit would cause current usage to be invalid:
1603 * reject new limit.
1604 */
1605 error = EINVAL;
1606 goto out;
1607 }
1608 } else {
1609 /* not on the main stack: reject */
1610 error = EINVAL;
1611 goto out;
1612 }
1613
1614 size = round_page_64(rlim.rlim_cur);
1615 size -= round_page_64(rlim.rlim_cur);
1616
1617 addr = (mach_vm_offset_t)(p->user_stack - round_page_64(rlim.rlim_cur));
1618
1619 kr = mach_vm_protect(current_map(), addr, size, FALSE, VM_PROT_NONE);
1620 if (kr != KERN_SUCCESS) {
1621 error = EINVAL;
1622 goto out;
1623 }
1624 } else {
1625 /* no change ... */
1626 }
1627
1628 /*
1629 * We've adjusted the process's stack region. If the user-defined limit is greater
1630 * than maxsmap, we need to reflect this change in rlimit interface.
1631 */
1632 if (stack_rlim.rlim_cur != 0) {
1633 newrlim->rlim_cur = stack_rlim.rlim_cur;
1634 }
1635 if (stack_rlim.rlim_max != 0) {
1636 newrlim->rlim_max = stack_rlim.rlim_max;
1637 }
1638 break;
1639
1640 case RLIMIT_NOFILE:
1641 /*
1642 * Nothing to be done here as we already performed the sanity checks before entering the switch code block.
1643 * The real NOFILE limits enforced by the kernel is capped at MIN(RLIMIT_NOFILE, maxfilesperproc)
1644 */
1645 break;
1646
1647 case RLIMIT_AS:
1648 /* Over to Mach VM to validate the new address space limit */
1649 if (vm_map_set_size_limit(current_map(), newrlim->rlim_cur) != KERN_SUCCESS) {
1650 /* The limit specified cannot be lowered because current usage is already higher than the limit. */
1651 error = EINVAL;
1652 goto out;
1653 }
1654 break;
1655
1656 case RLIMIT_NPROC:
1657 /*
1658 * Only root can set to the maxproc limits, as it is
1659 * systemwide resource; all others are limited to
1660 * maxprocperuid (presumably less than maxproc).
1661 */
1662 if (kauth_cred_issuser(kauth_cred_get())) {
1663 if (newrlim->rlim_cur > (rlim_t)maxproc) {
1664 newrlim->rlim_cur = maxproc;
1665 }
1666 if (newrlim->rlim_max > (rlim_t)maxproc) {
1667 newrlim->rlim_max = maxproc;
1668 }
1669 } else {
1670 if (newrlim->rlim_cur > (rlim_t)maxprocperuid) {
1671 newrlim->rlim_cur = maxprocperuid;
1672 }
1673 if (newrlim->rlim_max > (rlim_t)maxprocperuid) {
1674 newrlim->rlim_max = maxprocperuid;
1675 }
1676 }
1677 break;
1678
1679 case RLIMIT_MEMLOCK:
1680 /*
1681 * Tell the Mach VM layer about the new limit value.
1682 */
1683 newrlim->rlim_cur = (vm_size_t)newrlim->rlim_cur;
1684 vm_map_set_user_wire_limit(current_map(), (vm_size_t)newrlim->rlim_cur);
1685 break;
1686 } /* switch... */
1687
1688 /* Everything checks out and we are now ready to update the rlimit */
1689 error = 0;
1690
1691 out:
1692
1693 if (error == 0) {
1694 /*
1695 * COW the current plimit if it's shared, otherwise update it in place.
1696 * Finally unblock other threads wishing to change plimit.
1697 */
1698 proc_limitupdate(p, true, ^(struct plimit *plim) {
1699 plim->pl_rlimit[which] = *newrlim;
1700 });
1701 } else {
1702 /*
1703 * This setrlimit has failed, just leave the plimit as is and unblock other
1704 * threads wishing to change plimit.
1705 */
1706 proc_lock(p);
1707 proc_limitunblock(p);
1708 proc_unlock(p);
1709 }
1710
1711 return error;
1712 }
1713
1714 /* ARGSUSED */
1715 int
getrlimit(struct proc * p,struct getrlimit_args * uap,__unused int32_t * retval)1716 getrlimit(struct proc *p, struct getrlimit_args *uap, __unused int32_t *retval)
1717 {
1718 struct rlimit lim = {};
1719
1720 /*
1721 * Take out flag now in case we need to use it to trigger variant
1722 * behaviour later.
1723 */
1724 uap->which &= ~_RLIMIT_POSIX_FLAG;
1725
1726 if (uap->which >= RLIM_NLIMITS) {
1727 return EINVAL;
1728 }
1729 lim = proc_limitget(p, uap->which);
1730 return copyout((caddr_t)&lim, uap->rlp, sizeof(struct rlimit));
1731 }
1732
1733 static struct timeval
_absolutetime_to_timeval(uint64_t abstime)1734 _absolutetime_to_timeval(uint64_t abstime)
1735 {
1736 clock_sec_t sec;
1737 clock_usec_t usec;
1738 absolutetime_to_microtime(abstime, &sec, &usec);
1739 return (struct timeval){
1740 .tv_sec = sec,
1741 .tv_usec = usec,
1742 };
1743 }
1744
1745 /*
1746 * Transform the running time and tick information in proc p into user,
1747 * system, and interrupt time usage.
1748 */
1749 void
calcru(struct proc * p,struct timeval * up,struct timeval * sp,struct timeval * ip)1750 calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *ip)
1751 {
1752 task_t task;
1753
1754 if (ip != NULL) {
1755 timerclear(ip);
1756 }
1757
1758 task = proc_task(p);
1759 if (task) {
1760 mach_task_basic_info_data_t tinfo;
1761 mach_msg_type_number_t task_info_count;
1762 mach_msg_type_number_t task_events_count;
1763 task_events_info_data_t teventsinfo;
1764 struct recount_times_mach times;
1765
1766 task_info_count = MACH_TASK_BASIC_INFO_COUNT;
1767 task_info(task, MACH_TASK_BASIC_INFO,
1768 (task_info_t)&tinfo, &task_info_count);
1769 task_events_count = TASK_EVENTS_INFO_COUNT;
1770 task_info(task, TASK_EVENTS_INFO,
1771 (task_info_t)&teventsinfo, &task_events_count);
1772
1773 times = recount_task_times(task);
1774 *up = _absolutetime_to_timeval(times.rtm_user);
1775 *sp = _absolutetime_to_timeval(times.rtm_system);
1776
1777 /*
1778 * No lock is held here, but it's only a consistency issue for non-
1779 * getrusage(2) callers of this function.
1780 */
1781 p->p_stats->p_ru.ru_minflt = teventsinfo.faults -
1782 teventsinfo.pageins;
1783 p->p_stats->p_ru.ru_majflt = teventsinfo.pageins;
1784 p->p_stats->p_ru.ru_nivcsw = teventsinfo.csw -
1785 p->p_stats->p_ru.ru_nvcsw;
1786 if (p->p_stats->p_ru.ru_nivcsw < 0) {
1787 p->p_stats->p_ru.ru_nivcsw = 0;
1788 }
1789
1790 p->p_stats->p_ru.ru_maxrss = (long)tinfo.resident_size_max;
1791 } else {
1792 timerclear(up);
1793 timerclear(sp);
1794 }
1795 }
1796
1797 __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
1798 __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
1799
1800 /* ARGSUSED */
1801 int
getrusage(struct proc * p,struct getrusage_args * uap,__unused int32_t * retval)1802 getrusage(struct proc *p, struct getrusage_args *uap, __unused int32_t *retval)
1803 {
1804 struct rusage *rup, rubuf;
1805 struct user64_rusage rubuf64 = {};
1806 struct user32_rusage rubuf32 = {};
1807 size_t retsize = sizeof(rubuf); /* default: 32 bits */
1808 caddr_t retbuf = (caddr_t)&rubuf; /* default: 32 bits */
1809 struct timeval utime;
1810 struct timeval stime;
1811
1812 switch (uap->who) {
1813 case RUSAGE_SELF:
1814 calcru(p, &utime, &stime, NULL);
1815 proc_lock(p);
1816 rup = &p->p_stats->p_ru;
1817 rup->ru_utime = utime;
1818 rup->ru_stime = stime;
1819
1820 rubuf = *rup;
1821 proc_unlock(p);
1822
1823 break;
1824
1825 case RUSAGE_CHILDREN:
1826 proc_lock(p);
1827 rup = &p->p_stats->p_cru;
1828 rubuf = *rup;
1829 proc_unlock(p);
1830 break;
1831
1832 default:
1833 return EINVAL;
1834 }
1835 if (IS_64BIT_PROCESS(p)) {
1836 retsize = sizeof(rubuf64);
1837 retbuf = (caddr_t)&rubuf64;
1838 munge_user64_rusage(&rubuf, &rubuf64);
1839 } else {
1840 retsize = sizeof(rubuf32);
1841 retbuf = (caddr_t)&rubuf32;
1842 munge_user32_rusage(&rubuf, &rubuf32);
1843 }
1844
1845 return copyout(retbuf, uap->rusage, retsize);
1846 }
1847
1848 void
ruadd(struct rusage * ru,struct rusage * ru2)1849 ruadd(struct rusage *ru, struct rusage *ru2)
1850 {
1851 long *ip, *ip2;
1852 long i;
1853
1854 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime);
1855 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime);
1856 if (ru->ru_maxrss < ru2->ru_maxrss) {
1857 ru->ru_maxrss = ru2->ru_maxrss;
1858 }
1859 ip = &ru->ru_first; ip2 = &ru2->ru_first;
1860 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) {
1861 *ip++ += *ip2++;
1862 }
1863 }
1864
1865 /*
1866 * Add the rusage stats of child in parent.
1867 *
1868 * It adds rusage statistics of child process and statistics of all its
1869 * children to its parent.
1870 *
1871 * Note: proc lock of parent should be held while calling this function.
1872 */
1873 void
update_rusage_info_child(struct rusage_info_child * ri,rusage_info_current * ri_current)1874 update_rusage_info_child(struct rusage_info_child *ri, rusage_info_current *ri_current)
1875 {
1876 ri->ri_child_user_time += (ri_current->ri_user_time +
1877 ri_current->ri_child_user_time);
1878 ri->ri_child_system_time += (ri_current->ri_system_time +
1879 ri_current->ri_child_system_time);
1880 ri->ri_child_pkg_idle_wkups += (ri_current->ri_pkg_idle_wkups +
1881 ri_current->ri_child_pkg_idle_wkups);
1882 ri->ri_child_interrupt_wkups += (ri_current->ri_interrupt_wkups +
1883 ri_current->ri_child_interrupt_wkups);
1884 ri->ri_child_pageins += (ri_current->ri_pageins +
1885 ri_current->ri_child_pageins);
1886 ri->ri_child_elapsed_abstime += ((ri_current->ri_proc_exit_abstime -
1887 ri_current->ri_proc_start_abstime) + ri_current->ri_child_elapsed_abstime);
1888 }
1889
1890 static void
proc_limit_free(smr_node_t node)1891 proc_limit_free(smr_node_t node)
1892 {
1893 struct plimit *plimit = __container_of(node, struct plimit, pl_node);
1894
1895 zfree(plimit_zone, plimit);
1896 }
1897
1898 static void
proc_limit_release(struct plimit * plimit)1899 proc_limit_release(struct plimit *plimit)
1900 {
1901 if (os_ref_release(&plimit->pl_refcnt) == 0) {
1902 smr_proc_task_call(&plimit->pl_node, sizeof(*plimit), proc_limit_free);
1903 }
1904 }
1905
1906 /*
1907 * Reading soft limit from specified resource.
1908 */
1909 rlim_t
proc_limitgetcur(proc_t p,int which)1910 proc_limitgetcur(proc_t p, int which)
1911 {
1912 rlim_t rlim_cur;
1913
1914 assert(p);
1915 assert(which < RLIM_NLIMITS);
1916
1917 smr_proc_task_enter();
1918 rlim_cur = smr_entered_load(&p->p_limit)->pl_rlimit[which].rlim_cur;
1919 smr_proc_task_leave();
1920
1921 return rlim_cur;
1922 }
1923
1924 /*
1925 * Handle commonly asked limit that needs to be clamped with maxfilesperproc.
1926 */
1927 int
proc_limitgetcur_nofile(struct proc * p)1928 proc_limitgetcur_nofile(struct proc *p)
1929 {
1930 rlim_t lim = proc_limitgetcur(p, RLIMIT_NOFILE);
1931
1932 return (int)MIN(lim, maxfilesperproc);
1933 }
1934
1935 /*
1936 * Writing soft limit to specified resource. This is an internal function
1937 * used only by proc_exit to update RLIMIT_FSIZE in
1938 * place without invoking setrlimit.
1939 */
1940 void
proc_limitsetcur_fsize(proc_t p,rlim_t value)1941 proc_limitsetcur_fsize(proc_t p, rlim_t value)
1942 {
1943 proc_limitupdate(p, false, ^(struct plimit *plimit) {
1944 plimit->pl_rlimit[RLIMIT_FSIZE].rlim_cur = value;
1945 });
1946 }
1947
1948 struct rlimit
proc_limitget(proc_t p,int which)1949 proc_limitget(proc_t p, int which)
1950 {
1951 struct rlimit lim;
1952
1953 assert(which < RLIM_NLIMITS);
1954
1955 smr_proc_task_enter();
1956 lim = smr_entered_load(&p->p_limit)->pl_rlimit[which];
1957 smr_proc_task_leave();
1958
1959 return lim;
1960 }
1961
1962 void
proc_limitfork(proc_t parent,proc_t child)1963 proc_limitfork(proc_t parent, proc_t child)
1964 {
1965 struct plimit *plim;
1966
1967 proc_lock(parent);
1968 plim = smr_serialized_load(&parent->p_limit);
1969 os_ref_retain(&plim->pl_refcnt);
1970 proc_unlock(parent);
1971
1972 smr_init_store(&child->p_limit, plim);
1973 }
1974
1975 void
proc_limitdrop(proc_t p)1976 proc_limitdrop(proc_t p)
1977 {
1978 struct plimit *plimit = NULL;
1979
1980 proc_lock(p);
1981 plimit = smr_serialized_load(&p->p_limit);
1982 smr_clear_store(&p->p_limit);
1983 proc_unlock(p);
1984
1985 proc_limit_release(plimit);
1986 }
1987
1988 /*
1989 * proc_limitblock/unblock are used to serialize access to plimit
1990 * from concurrent threads within the same process.
1991 * Callers must be holding the proc lock to enter, return with
1992 * the proc lock locked
1993 */
1994 static void
proc_limitblock(proc_t p)1995 proc_limitblock(proc_t p)
1996 {
1997 lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
1998
1999 while (p->p_lflag & P_LLIMCHANGE) {
2000 p->p_lflag |= P_LLIMWAIT;
2001 msleep(&p->p_limit, &p->p_mlock, 0, "proc_limitblock", NULL);
2002 }
2003 p->p_lflag |= P_LLIMCHANGE;
2004 }
2005
2006 /*
2007 * Callers must be holding the proc lock to enter, return with
2008 * the proc lock locked
2009 */
2010 static void
proc_limitunblock(proc_t p)2011 proc_limitunblock(proc_t p)
2012 {
2013 lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
2014
2015 p->p_lflag &= ~P_LLIMCHANGE;
2016 if (p->p_lflag & P_LLIMWAIT) {
2017 p->p_lflag &= ~P_LLIMWAIT;
2018 wakeup(&p->p_limit);
2019 }
2020 }
2021
2022 /*
2023 * Perform an rlimit update (as defined by the arbitrary `update` function).
2024 *
2025 * Because plimits are accessed without holding any locks,
2026 * with only a hazard reference, the struct plimit is always
2027 * copied, updated, and replaced, to implement a const value type.
2028 */
2029 static void
2030 proc_limitupdate(proc_t p, bool unblock, void (^update)(struct plimit *))
2031 {
2032 struct plimit *cur_plim;
2033 struct plimit *copy_plim;
2034
2035 copy_plim = zalloc_flags(plimit_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2036
2037 proc_lock(p);
2038
2039 cur_plim = smr_serialized_load(&p->p_limit);
2040
2041 os_ref_init_count(©_plim->pl_refcnt, &rlimit_refgrp, 1);
2042 bcopy(cur_plim->pl_rlimit, copy_plim->pl_rlimit,
2043 sizeof(struct rlimit) * RLIM_NLIMITS);
2044
2045 update(copy_plim);
2046
2047 smr_serialized_store(&p->p_limit, copy_plim);
2048
2049 if (unblock) {
2050 proc_limitunblock(p);
2051 }
2052 proc_unlock(p);
2053
2054 proc_limit_release(cur_plim);
2055 }
2056
2057 static int
2058 iopolicysys_disk(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
2059 static int
2060 iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
2061 static int
2062 iopolicysys_vfs_atime_updates(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
2063 static int
2064 iopolicysys_vfs_statfs_no_data_volume(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
2065 static int
2066 iopolicysys_vfs_trigger_resolve(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
2067 static int
2068 iopolicysys_vfs_ignore_content_protection(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
2069 static int
2070 iopolicysys_vfs_ignore_node_permissions(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *ipo_param);
2071 static int
2072 iopolicysys_vfs_skip_mtime_update(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
2073 static int
2074 iopolicysys_vfs_allow_lowspace_writes(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
2075 static int
2076 iopolicysys_vfs_disallow_rw_for_o_evtonly(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
2077 static int iopolicysys_vfs_altlink(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
2078 static int iopolicysys_vfs_nocache_write_fs_blksize(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
2079 static int
2080 iopolicysys_vfs_support_long_paths(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
2081 static int
2082 iopolicysys_vfs_entitled_reserve_access(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
2083
2084 /*
2085 * iopolicysys
2086 *
2087 * Description: System call MUX for use in manipulating I/O policy attributes of the current process or thread
2088 *
2089 * Parameters: cmd Policy command
2090 * arg Pointer to policy arguments
2091 *
2092 * Returns: 0 Success
2093 * EINVAL Invalid command or invalid policy arguments
2094 *
2095 */
2096 int
iopolicysys(struct proc * p,struct iopolicysys_args * uap,int32_t * retval)2097 iopolicysys(struct proc *p, struct iopolicysys_args *uap, int32_t *retval)
2098 {
2099 int error = 0;
2100 struct _iopol_param_t iop_param;
2101
2102 if ((error = copyin(uap->arg, &iop_param, sizeof(iop_param))) != 0) {
2103 goto out;
2104 }
2105
2106 #if CONFIG_MACF
2107 error = mac_proc_check_iopolicysys(p, kauth_cred_get(),
2108 uap->cmd,
2109 iop_param.iop_iotype,
2110 iop_param.iop_scope,
2111 iop_param.iop_policy);
2112 if (error) {
2113 return error;
2114 }
2115 #endif
2116
2117 switch (iop_param.iop_iotype) {
2118 case IOPOL_TYPE_DISK:
2119 error = iopolicysys_disk(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2120 if (error == EIDRM) {
2121 *retval = -2;
2122 error = 0;
2123 }
2124 if (error) {
2125 goto out;
2126 }
2127 break;
2128 case IOPOL_TYPE_VFS_HFS_CASE_SENSITIVITY:
2129 error = iopolicysys_vfs_hfs_case_sensitivity(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2130 if (error) {
2131 goto out;
2132 }
2133 break;
2134 case IOPOL_TYPE_VFS_ATIME_UPDATES:
2135 error = iopolicysys_vfs_atime_updates(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2136 if (error) {
2137 goto out;
2138 }
2139 break;
2140 case IOPOL_TYPE_VFS_MATERIALIZE_DATALESS_FILES:
2141 error = iopolicysys_vfs_materialize_dataless_files(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2142 if (error) {
2143 goto out;
2144 }
2145 break;
2146 case IOPOL_TYPE_VFS_STATFS_NO_DATA_VOLUME:
2147 error = iopolicysys_vfs_statfs_no_data_volume(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2148 if (error) {
2149 goto out;
2150 }
2151 break;
2152 case IOPOL_TYPE_VFS_TRIGGER_RESOLVE:
2153 error = iopolicysys_vfs_trigger_resolve(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2154 if (error) {
2155 goto out;
2156 }
2157 break;
2158 case IOPOL_TYPE_VFS_IGNORE_CONTENT_PROTECTION:
2159 error = iopolicysys_vfs_ignore_content_protection(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2160 if (error) {
2161 goto out;
2162 }
2163 break;
2164 case IOPOL_TYPE_VFS_IGNORE_PERMISSIONS:
2165 error = iopolicysys_vfs_ignore_node_permissions(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2166 if (error) {
2167 goto out;
2168 }
2169 break;
2170 case IOPOL_TYPE_VFS_SKIP_MTIME_UPDATE:
2171 error = iopolicysys_vfs_skip_mtime_update(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2172 if (error) {
2173 goto out;
2174 }
2175 break;
2176 case IOPOL_TYPE_VFS_ALLOW_LOW_SPACE_WRITES:
2177 error = iopolicysys_vfs_allow_lowspace_writes(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2178 if (error) {
2179 goto out;
2180 }
2181 break;
2182 case IOPOL_TYPE_VFS_DISALLOW_RW_FOR_O_EVTONLY:
2183 error = iopolicysys_vfs_disallow_rw_for_o_evtonly(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2184 if (error) {
2185 goto out;
2186 }
2187 break;
2188 case IOPOL_TYPE_VFS_ALTLINK:
2189 error = iopolicysys_vfs_altlink(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2190 if (error) {
2191 goto out;
2192 }
2193 break;
2194 case IOPOL_TYPE_VFS_NOCACHE_WRITE_FS_BLKSIZE:
2195 error = iopolicysys_vfs_nocache_write_fs_blksize(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2196 if (error) {
2197 goto out;
2198 }
2199 break;
2200 case IOPOL_TYPE_VFS_SUPPORT_LONG_PATHS:
2201 error = iopolicysys_vfs_support_long_paths(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2202 if (error) {
2203 goto out;
2204 }
2205 break;
2206 case IOPOL_TYPE_VFS_ENTITLED_RESERVE_ACCESS:
2207 error = iopolicysys_vfs_entitled_reserve_access(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
2208 if (error) {
2209 goto out;
2210 }
2211 break;
2212
2213 default:
2214 error = EINVAL;
2215 goto out;
2216 }
2217
2218 /* Individual iotype handlers are expected to update iop_param, if requested with a GET command */
2219 if (uap->cmd == IOPOL_CMD_GET) {
2220 error = copyout((caddr_t)&iop_param, uap->arg, sizeof(iop_param));
2221 if (error) {
2222 goto out;
2223 }
2224 }
2225
2226 out:
2227 return error;
2228 }
2229
2230 static int
iopolicysys_disk(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2231 iopolicysys_disk(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
2232 {
2233 int error = 0;
2234 thread_t thread;
2235 int policy_flavor;
2236
2237 /* Validate scope */
2238 switch (scope) {
2239 case IOPOL_SCOPE_PROCESS:
2240 thread = THREAD_NULL;
2241 policy_flavor = TASK_POLICY_IOPOL;
2242 break;
2243
2244 case IOPOL_SCOPE_THREAD:
2245 thread = current_thread();
2246 policy_flavor = TASK_POLICY_IOPOL;
2247
2248 /* Not allowed to combine QoS and (non-PASSIVE) IO policy, doing so strips the QoS */
2249 if (cmd == IOPOL_CMD_SET && thread_has_qos_policy(thread)) {
2250 switch (policy) {
2251 case IOPOL_DEFAULT:
2252 case IOPOL_PASSIVE:
2253 break;
2254 case IOPOL_UTILITY:
2255 case IOPOL_THROTTLE:
2256 case IOPOL_IMPORTANT:
2257 case IOPOL_STANDARD:
2258 if (!thread_is_static_param(thread)) {
2259 thread_remove_qos_policy(thread);
2260 /*
2261 * This is not an error case, this is to return a marker to user-space that
2262 * we stripped the thread of its QoS class.
2263 */
2264 error = EIDRM;
2265 break;
2266 }
2267 OS_FALLTHROUGH;
2268 default:
2269 error = EINVAL;
2270 goto out;
2271 }
2272 }
2273 break;
2274
2275 case IOPOL_SCOPE_DARWIN_BG:
2276 #if !defined(XNU_TARGET_OS_OSX)
2277 /* We don't want this on platforms outside of macOS as BG is always IOPOL_THROTTLE */
2278 error = ENOTSUP;
2279 goto out;
2280 #else /* !defined(XNU_TARGET_OS_OSX) */
2281 thread = THREAD_NULL;
2282 policy_flavor = TASK_POLICY_DARWIN_BG_IOPOL;
2283 break;
2284 #endif /* !defined(XNU_TARGET_OS_OSX) */
2285
2286 default:
2287 error = EINVAL;
2288 goto out;
2289 }
2290
2291 /* Validate policy */
2292 if (cmd == IOPOL_CMD_SET) {
2293 switch (policy) {
2294 case IOPOL_DEFAULT:
2295 if (scope == IOPOL_SCOPE_DARWIN_BG) {
2296 /* the current default BG throttle level is UTILITY */
2297 policy = IOPOL_UTILITY;
2298 } else {
2299 policy = IOPOL_IMPORTANT;
2300 }
2301 break;
2302 case IOPOL_UTILITY:
2303 /* fall-through */
2304 case IOPOL_THROTTLE:
2305 /* These levels are OK */
2306 break;
2307 case IOPOL_IMPORTANT:
2308 /* fall-through */
2309 case IOPOL_STANDARD:
2310 /* fall-through */
2311 case IOPOL_PASSIVE:
2312 if (scope == IOPOL_SCOPE_DARWIN_BG) {
2313 /* These levels are invalid for BG */
2314 error = EINVAL;
2315 goto out;
2316 } else {
2317 /* OK for other scopes */
2318 }
2319 break;
2320 default:
2321 error = EINVAL;
2322 goto out;
2323 }
2324 }
2325
2326 /* Perform command */
2327 switch (cmd) {
2328 case IOPOL_CMD_SET:
2329 if (thread != THREAD_NULL) {
2330 proc_set_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor, policy);
2331 } else {
2332 proc_set_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor, policy);
2333 }
2334 break;
2335 case IOPOL_CMD_GET:
2336 if (thread != THREAD_NULL) {
2337 policy = proc_get_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor);
2338 } else {
2339 policy = proc_get_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor);
2340 }
2341 iop_param->iop_policy = policy;
2342 break;
2343 default:
2344 error = EINVAL; /* unknown command */
2345 break;
2346 }
2347
2348 out:
2349 return error;
2350 }
2351
2352 static int
iopolicysys_vfs_hfs_case_sensitivity(struct proc * p,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2353 iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
2354 {
2355 int error = 0;
2356
2357 /* Validate scope */
2358 switch (scope) {
2359 case IOPOL_SCOPE_PROCESS:
2360 /* Only process OK */
2361 break;
2362 default:
2363 error = EINVAL;
2364 goto out;
2365 }
2366
2367 /* Validate policy */
2368 if (cmd == IOPOL_CMD_SET) {
2369 switch (policy) {
2370 case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT:
2371 /* fall-through */
2372 case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE:
2373 /* These policies are OK */
2374 break;
2375 default:
2376 error = EINVAL;
2377 goto out;
2378 }
2379 }
2380
2381 /* Perform command */
2382 switch (cmd) {
2383 case IOPOL_CMD_SET:
2384 if (0 == kauth_cred_issuser(kauth_cred_get())) {
2385 /* If it's a non-root process, it needs to have the entitlement to set the policy */
2386 boolean_t entitled = FALSE;
2387 entitled = IOCurrentTaskHasEntitlement("com.apple.private.iopol.case_sensitivity");
2388 if (!entitled) {
2389 error = EPERM;
2390 goto out;
2391 }
2392 }
2393
2394 switch (policy) {
2395 case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT:
2396 OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY), &p->p_vfs_iopolicy);
2397 break;
2398 case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE:
2399 OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY, &p->p_vfs_iopolicy);
2400 break;
2401 default:
2402 error = EINVAL;
2403 goto out;
2404 }
2405
2406 break;
2407 case IOPOL_CMD_GET:
2408 iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY)
2409 ? IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE
2410 : IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT;
2411 break;
2412 default:
2413 error = EINVAL; /* unknown command */
2414 break;
2415 }
2416
2417 out:
2418 return error;
2419 }
2420
2421 static inline int
get_thread_atime_policy(struct uthread * ut)2422 get_thread_atime_policy(struct uthread *ut)
2423 {
2424 return (ut->uu_flag & UT_ATIME_UPDATE) ? IOPOL_ATIME_UPDATES_OFF : IOPOL_ATIME_UPDATES_DEFAULT;
2425 }
2426
2427 static inline void
set_thread_atime_policy(struct uthread * ut,int policy)2428 set_thread_atime_policy(struct uthread *ut, int policy)
2429 {
2430 if (policy == IOPOL_ATIME_UPDATES_OFF) {
2431 ut->uu_flag |= UT_ATIME_UPDATE;
2432 } else {
2433 ut->uu_flag &= ~UT_ATIME_UPDATE;
2434 }
2435 }
2436
2437 static inline void
set_task_atime_policy(struct proc * p,int policy)2438 set_task_atime_policy(struct proc *p, int policy)
2439 {
2440 if (policy == IOPOL_ATIME_UPDATES_OFF) {
2441 OSBitOrAtomic16((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES, &p->p_vfs_iopolicy);
2442 } else {
2443 OSBitAndAtomic16(~((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES), &p->p_vfs_iopolicy);
2444 }
2445 }
2446
2447 static inline int
get_task_atime_policy(struct proc * p)2448 get_task_atime_policy(struct proc *p)
2449 {
2450 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) ? IOPOL_ATIME_UPDATES_OFF : IOPOL_ATIME_UPDATES_DEFAULT;
2451 }
2452
2453 static int
iopolicysys_vfs_atime_updates(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2454 iopolicysys_vfs_atime_updates(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
2455 {
2456 int error = 0;
2457 thread_t thread;
2458
2459 /* Validate scope */
2460 switch (scope) {
2461 case IOPOL_SCOPE_THREAD:
2462 thread = current_thread();
2463 break;
2464 case IOPOL_SCOPE_PROCESS:
2465 thread = THREAD_NULL;
2466 break;
2467 default:
2468 error = EINVAL;
2469 goto out;
2470 }
2471
2472 /* Validate policy */
2473 if (cmd == IOPOL_CMD_SET) {
2474 switch (policy) {
2475 case IOPOL_ATIME_UPDATES_DEFAULT:
2476 case IOPOL_ATIME_UPDATES_OFF:
2477 break;
2478 default:
2479 error = EINVAL;
2480 goto out;
2481 }
2482 }
2483
2484 /* Perform command */
2485 switch (cmd) {
2486 case IOPOL_CMD_SET:
2487 if (thread != THREAD_NULL) {
2488 set_thread_atime_policy(get_bsdthread_info(thread), policy);
2489 } else {
2490 set_task_atime_policy(p, policy);
2491 }
2492 break;
2493 case IOPOL_CMD_GET:
2494 if (thread != THREAD_NULL) {
2495 policy = get_thread_atime_policy(get_bsdthread_info(thread));
2496 } else {
2497 policy = get_task_atime_policy(p);
2498 }
2499 iop_param->iop_policy = policy;
2500 break;
2501 default:
2502 error = EINVAL; /* unknown command */
2503 break;
2504 }
2505
2506 out:
2507 return error;
2508 }
2509
2510 static inline int
get_thread_materialize_policy(struct uthread * ut)2511 get_thread_materialize_policy(struct uthread *ut)
2512 {
2513 if (ut->uu_flag & UT_NSPACE_NODATALESSFAULTS) {
2514 return IOPOL_MATERIALIZE_DATALESS_FILES_OFF;
2515 } else if (ut->uu_flag & UT_NSPACE_FORCEDATALESSFAULTS) {
2516 return IOPOL_MATERIALIZE_DATALESS_FILES_ON;
2517 }
2518 /* Default thread behavior is "inherit process behavior". */
2519 return IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT;
2520 }
2521
2522 static inline void
set_thread_materialize_policy(struct uthread * ut,int policy)2523 set_thread_materialize_policy(struct uthread *ut, int policy)
2524 {
2525 if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_OFF) {
2526 ut->uu_flag &= ~UT_NSPACE_FORCEDATALESSFAULTS;
2527 ut->uu_flag |= UT_NSPACE_NODATALESSFAULTS;
2528 } else if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_ON) {
2529 ut->uu_flag &= ~UT_NSPACE_NODATALESSFAULTS;
2530 ut->uu_flag |= UT_NSPACE_FORCEDATALESSFAULTS;
2531 } else {
2532 ut->uu_flag &= ~(UT_NSPACE_NODATALESSFAULTS | UT_NSPACE_FORCEDATALESSFAULTS);
2533 }
2534 }
2535
2536 static inline void
set_proc_materialize_policy(struct proc * p,int policy)2537 set_proc_materialize_policy(struct proc *p, int policy)
2538 {
2539 if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT) {
2540 /*
2541 * Caller has specified "use the default policy".
2542 * The default policy is to NOT materialize dataless
2543 * files.
2544 */
2545 policy = IOPOL_MATERIALIZE_DATALESS_FILES_OFF;
2546 }
2547 if (policy == IOPOL_MATERIALIZE_DATALESS_FILES_ON) {
2548 OSBitOrAtomic16((uint16_t)P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES, &p->p_vfs_iopolicy);
2549 } else {
2550 OSBitAndAtomic16(~((uint16_t)P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES), &p->p_vfs_iopolicy);
2551 }
2552 }
2553
2554 static int
get_proc_materialize_policy(struct proc * p)2555 get_proc_materialize_policy(struct proc *p)
2556 {
2557 return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES) ? IOPOL_MATERIALIZE_DATALESS_FILES_ON : IOPOL_MATERIALIZE_DATALESS_FILES_OFF;
2558 }
2559
2560 int
iopolicysys_vfs_materialize_dataless_files(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2561 iopolicysys_vfs_materialize_dataless_files(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
2562 {
2563 int error = 0;
2564 thread_t thread;
2565
2566 /* Validate scope */
2567 switch (scope) {
2568 case IOPOL_SCOPE_THREAD:
2569 thread = current_thread();
2570 break;
2571 case IOPOL_SCOPE_PROCESS:
2572 thread = THREAD_NULL;
2573 break;
2574 default:
2575 error = EINVAL;
2576 goto out;
2577 }
2578
2579 /* Validate policy */
2580 if (cmd == IOPOL_CMD_SET) {
2581 switch (policy) {
2582 case IOPOL_MATERIALIZE_DATALESS_FILES_DEFAULT:
2583 case IOPOL_MATERIALIZE_DATALESS_FILES_OFF:
2584 case IOPOL_MATERIALIZE_DATALESS_FILES_ON:
2585 break;
2586 default:
2587 error = EINVAL;
2588 goto out;
2589 }
2590 }
2591
2592 /* Perform command */
2593 switch (cmd) {
2594 case IOPOL_CMD_SET:
2595 if (thread != THREAD_NULL) {
2596 set_thread_materialize_policy(get_bsdthread_info(thread), policy);
2597 } else {
2598 set_proc_materialize_policy(p, policy);
2599 }
2600 break;
2601 case IOPOL_CMD_GET:
2602 if (thread != THREAD_NULL) {
2603 policy = get_thread_materialize_policy(get_bsdthread_info(thread));
2604 } else {
2605 policy = get_proc_materialize_policy(p);
2606 }
2607 iop_param->iop_policy = policy;
2608 break;
2609 default:
2610 error = EINVAL; /* unknown command */
2611 break;
2612 }
2613
2614 out:
2615 return error;
2616 }
2617
2618 static int
iopolicysys_vfs_statfs_no_data_volume(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2619 iopolicysys_vfs_statfs_no_data_volume(struct proc *p __unused, int cmd,
2620 int scope, int policy, struct _iopol_param_t *iop_param)
2621 {
2622 int error = 0;
2623
2624 /* Validate scope */
2625 switch (scope) {
2626 case IOPOL_SCOPE_PROCESS:
2627 /* Only process OK */
2628 break;
2629 default:
2630 error = EINVAL;
2631 goto out;
2632 }
2633
2634 /* Validate policy */
2635 if (cmd == IOPOL_CMD_SET) {
2636 switch (policy) {
2637 case IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT:
2638 /* fall-through */
2639 case IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME:
2640 /* These policies are OK */
2641 break;
2642 default:
2643 error = EINVAL;
2644 goto out;
2645 }
2646 }
2647
2648 /* Perform command */
2649 switch (cmd) {
2650 case IOPOL_CMD_SET:
2651 if (0 == kauth_cred_issuser(kauth_cred_get())) {
2652 /* If it's a non-root process, it needs to have the entitlement to set the policy */
2653 boolean_t entitled = FALSE;
2654 entitled = IOCurrentTaskHasEntitlement("com.apple.private.iopol.case_sensitivity");
2655 if (!entitled) {
2656 error = EPERM;
2657 goto out;
2658 }
2659 }
2660
2661 switch (policy) {
2662 case IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT:
2663 OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME), &p->p_vfs_iopolicy);
2664 break;
2665 case IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME:
2666 OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME, &p->p_vfs_iopolicy);
2667 break;
2668 default:
2669 error = EINVAL;
2670 goto out;
2671 }
2672
2673 break;
2674 case IOPOL_CMD_GET:
2675 iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME)
2676 ? IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME
2677 : IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT;
2678 break;
2679 default:
2680 error = EINVAL; /* unknown command */
2681 break;
2682 }
2683
2684 out:
2685 return error;
2686 }
2687
2688 static int
iopolicysys_vfs_trigger_resolve(struct proc * p __unused,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2689 iopolicysys_vfs_trigger_resolve(struct proc *p __unused, int cmd,
2690 int scope, int policy, struct _iopol_param_t *iop_param)
2691 {
2692 int error = 0;
2693
2694 /* Validate scope */
2695 switch (scope) {
2696 case IOPOL_SCOPE_PROCESS:
2697 /* Only process OK */
2698 break;
2699 default:
2700 error = EINVAL;
2701 goto out;
2702 }
2703
2704 /* Validate policy */
2705 if (cmd == IOPOL_CMD_SET) {
2706 switch (policy) {
2707 case IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT:
2708 /* fall-through */
2709 case IOPOL_VFS_TRIGGER_RESOLVE_OFF:
2710 /* These policies are OK */
2711 break;
2712 default:
2713 error = EINVAL;
2714 goto out;
2715 }
2716 }
2717
2718 /* Perform command */
2719 switch (cmd) {
2720 case IOPOL_CMD_SET:
2721 switch (policy) {
2722 case IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT:
2723 OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE), &p->p_vfs_iopolicy);
2724 break;
2725 case IOPOL_VFS_TRIGGER_RESOLVE_OFF:
2726 OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE, &p->p_vfs_iopolicy);
2727 break;
2728 default:
2729 error = EINVAL;
2730 goto out;
2731 }
2732
2733 break;
2734 case IOPOL_CMD_GET:
2735 iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE)
2736 ? IOPOL_VFS_TRIGGER_RESOLVE_OFF
2737 : IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT;
2738 break;
2739 default:
2740 error = EINVAL; /* unknown command */
2741 break;
2742 }
2743
2744 out:
2745 return error;
2746 }
2747
2748 static int
iopolicysys_vfs_ignore_content_protection(struct proc * p,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)2749 iopolicysys_vfs_ignore_content_protection(struct proc *p, int cmd, int scope,
2750 int policy, struct _iopol_param_t *iop_param)
2751 {
2752 int error = 0;
2753
2754 /* Validate scope */
2755 switch (scope) {
2756 case IOPOL_SCOPE_PROCESS:
2757 /* Only process OK */
2758 break;
2759 default:
2760 error = EINVAL;
2761 goto out;
2762 }
2763
2764 /* Validate policy */
2765 if (cmd == IOPOL_CMD_SET) {
2766 switch (policy) {
2767 case IOPOL_VFS_CONTENT_PROTECTION_DEFAULT:
2768 OS_FALLTHROUGH;
2769 case IOPOL_VFS_CONTENT_PROTECTION_IGNORE:
2770 /* These policies are OK */
2771 break;
2772 default:
2773 error = EINVAL;
2774 goto out;
2775 }
2776 }
2777
2778 /* Perform command */
2779 switch (cmd) {
2780 case IOPOL_CMD_SET:
2781 if (0 == kauth_cred_issuser(kauth_cred_get())) {
2782 /* If it's a non-root process, it needs to have the entitlement to set the policy */
2783 boolean_t entitled = FALSE;
2784 entitled = IOCurrentTaskHasEntitlement("com.apple.private.iopol.case_sensitivity");
2785 if (!entitled) {
2786 error = EPERM;
2787 goto out;
2788 }
2789 }
2790
2791 switch (policy) {
2792 case IOPOL_VFS_CONTENT_PROTECTION_DEFAULT:
2793 os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION, relaxed);
2794 break;
2795 case IOPOL_VFS_CONTENT_PROTECTION_IGNORE:
2796 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION, relaxed);
2797 break;
2798 default:
2799 error = EINVAL;
2800 goto out;
2801 }
2802
2803 break;
2804 case IOPOL_CMD_GET:
2805 iop_param->iop_policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION)
2806 ? IOPOL_VFS_CONTENT_PROTECTION_IGNORE
2807 : IOPOL_VFS_CONTENT_PROTECTION_DEFAULT;
2808 break;
2809 default:
2810 error = EINVAL; /* unknown command */
2811 break;
2812 }
2813
2814 out:
2815 return error;
2816 }
2817
2818 static int
get_proc_vfs_ignore_permissions_policy(struct proc * p)2819 get_proc_vfs_ignore_permissions_policy(struct proc *p)
2820 {
2821 return os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_IGNORE_NODE_PERMISSIONS ?
2822 IOPOL_VFS_IGNORE_PERMISSIONS_ON : IOPOL_VFS_IGNORE_PERMISSIONS_OFF;
2823 }
2824
2825 static int
get_thread_vfs_ignore_permissions_policy(thread_t thread)2826 get_thread_vfs_ignore_permissions_policy(thread_t thread)
2827 {
2828 struct uthread *ut = get_bsdthread_info(thread);
2829
2830 return (ut->uu_flag & UT_IGNORE_NODE_PERMISSIONS) ?
2831 IOPOL_VFS_IGNORE_PERMISSIONS_ON : IOPOL_VFS_IGNORE_PERMISSIONS_OFF;
2832 }
2833
2834 static void
set_proc_vfs_ignore_permissions_policy(struct proc * p,int policy)2835 set_proc_vfs_ignore_permissions_policy(struct proc *p, int policy)
2836 {
2837 switch (policy) {
2838 case IOPOL_VFS_IGNORE_PERMISSIONS_OFF:
2839 os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_NODE_PERMISSIONS, relaxed);
2840 break;
2841 case IOPOL_VFS_IGNORE_PERMISSIONS_ON:
2842 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_NODE_PERMISSIONS, relaxed);
2843 break;
2844 default:
2845 break;
2846 }
2847 }
2848
2849 static void
set_thread_vfs_ignore_permissions_policy(thread_t thread,int policy)2850 set_thread_vfs_ignore_permissions_policy(thread_t thread, int policy)
2851 {
2852 struct uthread *ut = get_bsdthread_info(thread);
2853
2854 switch (policy) {
2855 case IOPOL_VFS_IGNORE_PERMISSIONS_OFF:
2856 ut->uu_flag &= ~UT_IGNORE_NODE_PERMISSIONS;
2857 break;
2858 case IOPOL_VFS_IGNORE_PERMISSIONS_ON:
2859 ut->uu_flag |= UT_IGNORE_NODE_PERMISSIONS;
2860 break;
2861 default:
2862 break;
2863 }
2864 }
2865
2866 #define AUTHORIZED_ACCESS_ENTITLEMENT \
2867 "com.apple.private.vfs.authorized-access"
2868 int
iopolicysys_vfs_ignore_node_permissions(struct proc * p,int cmd,int scope,int policy,__unused struct _iopol_param_t * iop_param)2869 iopolicysys_vfs_ignore_node_permissions(struct proc *p, int cmd, int scope,
2870 int policy, __unused struct _iopol_param_t *iop_param)
2871 {
2872 int error = EINVAL;
2873 thread_t thread = THREAD_NULL;
2874
2875 switch (scope) {
2876 case IOPOL_SCOPE_THREAD:
2877 thread = current_thread();
2878 break;
2879 case IOPOL_SCOPE_PROCESS:
2880 break;
2881 default:
2882 goto out;
2883 }
2884
2885 switch (cmd) {
2886 case IOPOL_CMD_GET:
2887 if (thread != THREAD_NULL) {
2888 policy = get_thread_vfs_ignore_permissions_policy(thread);
2889 } else {
2890 policy = get_proc_vfs_ignore_permissions_policy(p);
2891 }
2892 iop_param->iop_policy = policy;
2893 goto out_ok;
2894 case IOPOL_CMD_SET:
2895 /* SET is handled after the switch */
2896 break;
2897 default:
2898 goto out;
2899 }
2900
2901 if (!IOCurrentTaskHasEntitlement(AUTHORIZED_ACCESS_ENTITLEMENT)) {
2902 error = EPERM;
2903 goto out;
2904 }
2905
2906 if (thread != THREAD_NULL) {
2907 set_thread_vfs_ignore_permissions_policy(thread, policy);
2908 } else {
2909 set_proc_vfs_ignore_permissions_policy(p, policy);
2910 }
2911
2912 out_ok:
2913 error = 0;
2914 out:
2915 return error;
2916 }
2917
2918 static inline void
set_thread_skip_mtime_policy(struct uthread * ut,int policy)2919 set_thread_skip_mtime_policy(struct uthread *ut, int policy)
2920 {
2921 os_atomic_andnot(&ut->uu_flag, UT_SKIP_MTIME_UPDATE |
2922 UT_SKIP_MTIME_UPDATE_IGNORE, relaxed);
2923
2924 if (policy == IOPOL_VFS_SKIP_MTIME_UPDATE_ON) {
2925 os_atomic_or(&ut->uu_flag, UT_SKIP_MTIME_UPDATE, relaxed);
2926 } else if (policy == IOPOL_VFS_SKIP_MTIME_UPDATE_IGNORE) {
2927 os_atomic_or(&ut->uu_flag, UT_SKIP_MTIME_UPDATE_IGNORE, relaxed);
2928 }
2929 }
2930
2931 static inline int
get_thread_skip_mtime_policy(struct uthread * ut)2932 get_thread_skip_mtime_policy(struct uthread *ut)
2933 {
2934 return (os_atomic_load(&ut->uu_flag, relaxed) & UT_SKIP_MTIME_UPDATE) ?
2935 IOPOL_VFS_SKIP_MTIME_UPDATE_ON : IOPOL_VFS_SKIP_MTIME_UPDATE_OFF;
2936 }
2937
2938 static inline void
set_proc_skip_mtime_policy(struct proc * p,int policy)2939 set_proc_skip_mtime_policy(struct proc *p, int policy)
2940 {
2941 if (policy == IOPOL_VFS_SKIP_MTIME_UPDATE_ON) {
2942 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_SKIP_MTIME_UPDATE, relaxed);
2943 } else {
2944 os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_SKIP_MTIME_UPDATE, relaxed);
2945 }
2946 }
2947
2948 static inline int
get_proc_skip_mtime_policy(struct proc * p)2949 get_proc_skip_mtime_policy(struct proc *p)
2950 {
2951 return (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_SKIP_MTIME_UPDATE) ?
2952 IOPOL_VFS_SKIP_MTIME_UPDATE_ON : IOPOL_VFS_SKIP_MTIME_UPDATE_OFF;
2953 }
2954
2955 #define SKIP_MTIME_UPDATE_ENTITLEMENT \
2956 "com.apple.private.vfs.skip-mtime-updates"
2957 int
iopolicysys_vfs_skip_mtime_update(struct proc * p,int cmd,int scope,int policy,__unused struct _iopol_param_t * iop_param)2958 iopolicysys_vfs_skip_mtime_update(struct proc *p, int cmd, int scope,
2959 int policy, __unused struct _iopol_param_t *iop_param)
2960 {
2961 thread_t thread;
2962 int error = 0;
2963
2964 /* Validate scope */
2965 switch (scope) {
2966 case IOPOL_SCOPE_THREAD:
2967 thread = current_thread();
2968 break;
2969 case IOPOL_SCOPE_PROCESS:
2970 thread = THREAD_NULL;
2971 break;
2972 default:
2973 error = EINVAL;
2974 goto out;
2975 }
2976
2977 /* Validate policy */
2978 if (cmd == IOPOL_CMD_SET) {
2979 switch (policy) {
2980 case IOPOL_VFS_SKIP_MTIME_UPDATE_ON:
2981 case IOPOL_VFS_SKIP_MTIME_UPDATE_OFF:
2982 case IOPOL_VFS_SKIP_MTIME_UPDATE_IGNORE:
2983 if (!IOCurrentTaskHasEntitlement(SKIP_MTIME_UPDATE_ENTITLEMENT)) {
2984 error = EPERM;
2985 goto out;
2986 }
2987 break;
2988 default:
2989 error = EINVAL;
2990 goto out;
2991 }
2992 }
2993
2994 /* Perform command */
2995 switch (cmd) {
2996 case IOPOL_CMD_SET:
2997 if (thread != THREAD_NULL) {
2998 set_thread_skip_mtime_policy(get_bsdthread_info(thread), policy);
2999 } else {
3000 /*
3001 * The 'IOPOL_VFS_SKIP_MTIME_UPDATE_IGNORE' policy is only
3002 * applicable for thread.
3003 */
3004 if (policy == IOPOL_VFS_SKIP_MTIME_UPDATE_IGNORE) {
3005 error = EINVAL;
3006 goto out;
3007 }
3008 set_proc_skip_mtime_policy(p, policy);
3009 }
3010 break;
3011 case IOPOL_CMD_GET:
3012 if (thread != THREAD_NULL) {
3013 policy = get_thread_skip_mtime_policy(get_bsdthread_info(thread));
3014 } else {
3015 policy = get_proc_skip_mtime_policy(p);
3016 }
3017 iop_param->iop_policy = policy;
3018 break;
3019 default:
3020 error = EINVAL; /* unknown command */
3021 break;
3022 }
3023
3024 out:
3025 return error;
3026 }
3027
3028 #define ALLOW_LOW_SPACE_WRITES_ENTITLEMENT \
3029 "com.apple.private.vfs.allow-low-space-writes"
3030 static int
iopolicysys_vfs_allow_lowspace_writes(struct proc * p,int cmd,int scope,int policy,__unused struct _iopol_param_t * iop_param)3031 iopolicysys_vfs_allow_lowspace_writes(struct proc *p, int cmd, int scope,
3032 int policy, __unused struct _iopol_param_t *iop_param)
3033 {
3034 int error = EINVAL;
3035
3036 switch (scope) {
3037 case IOPOL_SCOPE_PROCESS:
3038 break;
3039 default:
3040 goto out;
3041 }
3042
3043 switch (cmd) {
3044 case IOPOL_CMD_GET:
3045 policy = os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_ALLOW_LOW_SPACE_WRITES ?
3046 IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_ON : IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_OFF;
3047 iop_param->iop_policy = policy;
3048 goto out_ok;
3049 case IOPOL_CMD_SET:
3050 break;
3051 default:
3052 break;
3053 }
3054
3055 if (!IOCurrentTaskHasEntitlement(ALLOW_LOW_SPACE_WRITES_ENTITLEMENT)) {
3056 error = EPERM;
3057 goto out;
3058 }
3059
3060 switch (policy) {
3061 case IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_OFF:
3062 os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ALLOW_LOW_SPACE_WRITES, relaxed);
3063 break;
3064 case IOPOL_VFS_ALLOW_LOW_SPACE_WRITES_ON:
3065 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ALLOW_LOW_SPACE_WRITES, relaxed);
3066 break;
3067 default:
3068 break;
3069 }
3070
3071 out_ok:
3072 error = 0;
3073 out:
3074 return error;
3075 }
3076
3077 #define DISALLOW_RW_FOR_O_EVTONLY_ENTITLEMENT \
3078 "com.apple.private.vfs.disallow-rw-for-o-evtonly"
3079
3080 static int
iopolicysys_vfs_disallow_rw_for_o_evtonly(struct proc * p,int cmd,int scope,int policy,__unused struct _iopol_param_t * iop_param)3081 iopolicysys_vfs_disallow_rw_for_o_evtonly(struct proc *p, int cmd, int scope,
3082 int policy, __unused struct _iopol_param_t *iop_param)
3083 {
3084 int error = EINVAL;
3085
3086 switch (scope) {
3087 case IOPOL_SCOPE_PROCESS:
3088 break;
3089 default:
3090 goto out;
3091 }
3092
3093 switch (cmd) {
3094 case IOPOL_CMD_GET:
3095 policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) &
3096 P_VFS_IOPOLICY_DISALLOW_RW_FOR_O_EVTONLY) ?
3097 IOPOL_VFS_DISALLOW_RW_FOR_O_EVTONLY_ON :
3098 IOPOL_VFS_DISALLOW_RW_FOR_O_EVTONLY_DEFAULT;
3099 iop_param->iop_policy = policy;
3100 goto out_ok;
3101 case IOPOL_CMD_SET:
3102 break;
3103 default:
3104 goto out;
3105 }
3106
3107 if (!IOCurrentTaskHasEntitlement(DISALLOW_RW_FOR_O_EVTONLY_ENTITLEMENT)) {
3108 error = EPERM;
3109 goto out;
3110 }
3111
3112 /* Once set, we don't allow the process to clear it. */
3113 switch (policy) {
3114 case IOPOL_VFS_DISALLOW_RW_FOR_O_EVTONLY_ON:
3115 os_atomic_or(&p->p_vfs_iopolicy,
3116 P_VFS_IOPOLICY_DISALLOW_RW_FOR_O_EVTONLY, relaxed);
3117 break;
3118 default:
3119 goto out;
3120 }
3121
3122 out_ok:
3123 error = 0;
3124 out:
3125 return error;
3126 }
3127
3128 static int
iopolicysys_vfs_altlink(struct proc * p,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)3129 iopolicysys_vfs_altlink(struct proc *p, int cmd, int scope, int policy,
3130 struct _iopol_param_t *iop_param)
3131 {
3132 if (scope != IOPOL_SCOPE_PROCESS) {
3133 return EINVAL;
3134 }
3135
3136 if (cmd == IOPOL_CMD_GET) {
3137 policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_ALTLINK) ?
3138 IOPOL_VFS_ALTLINK_ENABLED : IOPOL_VFS_ALTLINK_DISABLED;
3139 iop_param->iop_policy = policy;
3140 return 0;
3141 }
3142
3143 /* Once set, we don't allow the process to clear it. */
3144 if (policy == IOPOL_VFS_ALTLINK_ENABLED) {
3145 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ALTLINK, relaxed);
3146 return 0;
3147 }
3148
3149 return EINVAL;
3150 }
3151
3152 static int
iopolicysys_vfs_nocache_write_fs_blksize(struct proc * p,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)3153 iopolicysys_vfs_nocache_write_fs_blksize(struct proc *p, int cmd, int scope, int policy,
3154 struct _iopol_param_t *iop_param)
3155 {
3156 if (scope != IOPOL_SCOPE_PROCESS) {
3157 return EINVAL;
3158 }
3159
3160 if (cmd == IOPOL_CMD_GET) {
3161 policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_NOCACHE_WRITE_FS_BLKSIZE) ?
3162 IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_ON : IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_DEFAULT;
3163 iop_param->iop_policy = policy;
3164 return 0;
3165 }
3166
3167 /* Once set, we don't allow the process to clear it. */
3168 if (policy == IOPOL_VFS_NOCACHE_WRITE_FS_BLKSIZE_ON) {
3169 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_NOCACHE_WRITE_FS_BLKSIZE, relaxed);
3170 return 0;
3171 }
3172
3173 return EINVAL;
3174 }
3175
3176 static inline void
set_thread_support_long_paths(struct uthread * ut,int policy)3177 set_thread_support_long_paths(struct uthread *ut, int policy)
3178 {
3179 if (policy == IOPOL_VFS_SUPPORT_LONG_PATHS_ON) {
3180 os_atomic_or(&ut->uu_flag, UT_SUPPORT_LONG_PATHS, relaxed);
3181 } else {
3182 os_atomic_andnot(&ut->uu_flag, UT_SUPPORT_LONG_PATHS, relaxed);
3183 }
3184 }
3185
3186 static inline int
get_thread_support_long_paths(struct uthread * ut)3187 get_thread_support_long_paths(struct uthread *ut)
3188 {
3189 return (os_atomic_load(&ut->uu_flag, relaxed) & UT_SUPPORT_LONG_PATHS) ?
3190 IOPOL_VFS_SUPPORT_LONG_PATHS_ON : IOPOL_VFS_SUPPORT_LONG_PATHS_DEFAULT;
3191 }
3192
3193 static inline void
set_proc_support_long_paths(struct proc * p,int policy)3194 set_proc_support_long_paths(struct proc *p, int policy)
3195 {
3196 if (policy == IOPOL_VFS_SUPPORT_LONG_PATHS_ON) {
3197 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_SUPPORT_LONG_PATHS, relaxed);
3198 } else {
3199 os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_SUPPORT_LONG_PATHS, relaxed);
3200 }
3201 }
3202
3203 static inline int
get_proc_support_long_paths(struct proc * p)3204 get_proc_support_long_paths(struct proc *p)
3205 {
3206 return (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_SUPPORT_LONG_PATHS) ?
3207 IOPOL_VFS_SUPPORT_LONG_PATHS_ON : IOPOL_VFS_SUPPORT_LONG_PATHS_DEFAULT;
3208 }
3209
3210 #define SUPPORT_LONG_PATHS_ENTITLEMENT \
3211 "com.apple.private.vfs.support-long-paths"
3212
3213 static int
iopolicysys_vfs_support_long_paths(struct proc * p,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)3214 iopolicysys_vfs_support_long_paths(struct proc *p, int cmd, int scope,
3215 int policy, struct _iopol_param_t *iop_param)
3216 {
3217 thread_t thread;
3218 int error = 0;
3219
3220 /* Validate scope */
3221 switch (scope) {
3222 case IOPOL_SCOPE_THREAD:
3223 thread = current_thread();
3224 break;
3225 case IOPOL_SCOPE_PROCESS:
3226 thread = THREAD_NULL;
3227 break;
3228 default:
3229 error = EINVAL;
3230 goto out;
3231 }
3232
3233 /* Validate policy */
3234 if (cmd == IOPOL_CMD_SET) {
3235 switch (policy) {
3236 case IOPOL_VFS_SUPPORT_LONG_PATHS_DEFAULT:
3237 case IOPOL_VFS_SUPPORT_LONG_PATHS_ON:
3238 if (!IOCurrentTaskHasEntitlement(SUPPORT_LONG_PATHS_ENTITLEMENT)) {
3239 error = EPERM;
3240 goto out;
3241 }
3242 break;
3243 default:
3244 error = EINVAL;
3245 goto out;
3246 }
3247 }
3248
3249 /* Perform command */
3250 switch (cmd) {
3251 case IOPOL_CMD_SET:
3252 if (thread != THREAD_NULL) {
3253 set_thread_support_long_paths(get_bsdthread_info(thread), policy);
3254 } else {
3255 set_proc_support_long_paths(p, policy);
3256 }
3257 break;
3258 case IOPOL_CMD_GET:
3259 if (thread != THREAD_NULL) {
3260 policy = get_thread_support_long_paths(get_bsdthread_info(thread));
3261 } else {
3262 policy = get_proc_support_long_paths(p);
3263 }
3264 iop_param->iop_policy = policy;
3265 break;
3266 default:
3267 error = EINVAL; /* unknown command */
3268 break;
3269 }
3270
3271 out:
3272 return error;
3273 }
3274
3275 #define ENTITLED_RESERVE_ACCESS_ENTITLEMENT \
3276 "com.apple.private.vfs.entitled-reserve-access"
3277 static int
iopolicysys_vfs_entitled_reserve_access(struct proc * p,int cmd,int scope,int policy,struct _iopol_param_t * iop_param)3278 iopolicysys_vfs_entitled_reserve_access(struct proc *p, int cmd, int scope,
3279 int policy, struct _iopol_param_t *iop_param)
3280 {
3281 struct uthread *ut;
3282
3283 switch (scope) {
3284 case IOPOL_SCOPE_THREAD:
3285 ut = get_bsdthread_info(current_thread());
3286 break;
3287 case IOPOL_SCOPE_PROCESS:
3288 ut = NULL;
3289 break;
3290 default:
3291 return EINVAL;
3292 }
3293
3294 if (cmd == IOPOL_CMD_GET) {
3295 if (scope == IOPOL_SCOPE_THREAD) {
3296 policy = (os_atomic_load(&ut->uu_flag, relaxed) & UT_FS_ENTITLED_RESERVE_ACCESS) ?
3297 IOPOL_VFS_ENTITLED_RESERVE_ACCESS_ON : IOPOL_VFS_ENTITLED_RESERVE_ACCESS_OFF;
3298 } else {
3299 policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_ENTITLED_RESERVE_ACCESS) ?
3300 IOPOL_VFS_ENTITLED_RESERVE_ACCESS_ON : IOPOL_VFS_ENTITLED_RESERVE_ACCESS_OFF;
3301 }
3302 iop_param->iop_policy = policy;
3303 return 0;
3304 }
3305
3306 if (cmd != IOPOL_CMD_SET) {
3307 return EINVAL;
3308 }
3309
3310 if (!IOCurrentTaskHasEntitlement(ENTITLED_RESERVE_ACCESS_ENTITLEMENT)) {
3311 return EPERM;
3312 }
3313
3314 switch (policy) {
3315 case IOPOL_VFS_ENTITLED_RESERVE_ACCESS_OFF:
3316 if (scope == IOPOL_SCOPE_THREAD) {
3317 os_atomic_andnot(&ut->uu_flag, UT_FS_ENTITLED_RESERVE_ACCESS, relaxed);
3318 } else {
3319 os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ENTITLED_RESERVE_ACCESS, relaxed);
3320 }
3321 break;
3322 case IOPOL_VFS_ENTITLED_RESERVE_ACCESS_ON:
3323 if (scope == IOPOL_SCOPE_THREAD) {
3324 os_atomic_or(&ut->uu_flag, UT_FS_ENTITLED_RESERVE_ACCESS, relaxed);
3325 } else {
3326 os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_ENTITLED_RESERVE_ACCESS, relaxed);
3327 }
3328 break;
3329 default:
3330 return EINVAL;
3331 }
3332
3333 return 0;
3334 }
3335
3336 void
proc_apply_task_networkbg(int pid,thread_t thread)3337 proc_apply_task_networkbg(int pid, thread_t thread)
3338 {
3339 proc_t p = proc_find(pid);
3340
3341 if (p != PROC_NULL) {
3342 do_background_socket(p, thread);
3343 proc_rele(p);
3344 }
3345 }
3346
3347 void
gather_rusage_info(proc_t p,rusage_info_current * ru,int flavor)3348 gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor)
3349 {
3350 struct rusage_info_child *ri_child;
3351
3352 assert(p->p_stats != NULL);
3353 memset(ru, 0, sizeof(*ru));
3354 switch (flavor) {
3355 case RUSAGE_INFO_V6:
3356 ru->ri_neural_footprint = get_task_neural_nofootprint_total(proc_task(p));
3357 ru->ri_lifetime_max_neural_footprint = get_task_neural_nofootprint_total_lifetime_max(proc_task(p));
3358 #if CONFIG_LEDGER_INTERVAL_MAX
3359 ru->ri_interval_max_neural_footprint = get_task_neural_nofootprint_total_interval_max(proc_task(p), FALSE);
3360 #endif
3361 /* Any P-specific resource counters are captured in fill_task_rusage. */
3362 OS_FALLTHROUGH;
3363
3364 case RUSAGE_INFO_V5:
3365 #if __has_feature(ptrauth_calls)
3366 if (vm_shared_region_is_reslide(proc_task(p))) {
3367 ru->ri_flags |= RU_PROC_RUNS_RESLIDE;
3368 }
3369 #endif /* __has_feature(ptrauth_calls) */
3370 OS_FALLTHROUGH;
3371
3372 case RUSAGE_INFO_V4:
3373 ru->ri_logical_writes = get_task_logical_writes(proc_task(p), false);
3374 ru->ri_lifetime_max_phys_footprint = get_task_phys_footprint_lifetime_max(proc_task(p));
3375 #if CONFIG_LEDGER_INTERVAL_MAX
3376 ru->ri_interval_max_phys_footprint = get_task_phys_footprint_interval_max(proc_task(p), FALSE);
3377 #endif
3378 OS_FALLTHROUGH;
3379
3380 case RUSAGE_INFO_V3:
3381 fill_task_qos_rusage(proc_task(p), ru);
3382 fill_task_billed_usage(proc_task(p), ru);
3383 OS_FALLTHROUGH;
3384
3385 case RUSAGE_INFO_V2:
3386 fill_task_io_rusage(proc_task(p), ru);
3387 OS_FALLTHROUGH;
3388
3389 case RUSAGE_INFO_V1:
3390 /*
3391 * p->p_stats->ri_child statistics are protected under proc lock.
3392 */
3393 proc_lock(p);
3394
3395 ri_child = &(p->p_stats->ri_child);
3396 ru->ri_child_user_time = ri_child->ri_child_user_time;
3397 ru->ri_child_system_time = ri_child->ri_child_system_time;
3398 ru->ri_child_pkg_idle_wkups = ri_child->ri_child_pkg_idle_wkups;
3399 ru->ri_child_interrupt_wkups = ri_child->ri_child_interrupt_wkups;
3400 ru->ri_child_pageins = ri_child->ri_child_pageins;
3401 ru->ri_child_elapsed_abstime = ri_child->ri_child_elapsed_abstime;
3402
3403 proc_unlock(p);
3404 OS_FALLTHROUGH;
3405
3406 case RUSAGE_INFO_V0:
3407 proc_getexecutableuuid(p, (unsigned char *)&ru->ri_uuid, sizeof(ru->ri_uuid));
3408 fill_task_rusage(proc_task(p), ru);
3409 ru->ri_proc_start_abstime = p->p_stats->ps_start;
3410 }
3411 }
3412
3413 int
proc_get_rusage(proc_t p,int flavor,user_addr_t buffer,__unused int is_zombie)3414 proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie)
3415 {
3416 rusage_info_current ri_current = {};
3417
3418 size_t size = 0;
3419
3420 switch (flavor) {
3421 case RUSAGE_INFO_V0:
3422 size = sizeof(struct rusage_info_v0);
3423 break;
3424
3425 case RUSAGE_INFO_V1:
3426 size = sizeof(struct rusage_info_v1);
3427 break;
3428
3429 case RUSAGE_INFO_V2:
3430 size = sizeof(struct rusage_info_v2);
3431 break;
3432
3433 case RUSAGE_INFO_V3:
3434 size = sizeof(struct rusage_info_v3);
3435 break;
3436
3437 case RUSAGE_INFO_V4:
3438 size = sizeof(struct rusage_info_v4);
3439 break;
3440
3441 case RUSAGE_INFO_V5:
3442 size = sizeof(struct rusage_info_v5);
3443 break;
3444
3445 case RUSAGE_INFO_V6:
3446 size = sizeof(struct rusage_info_v6);
3447 break;
3448 default:
3449 return EINVAL;
3450 }
3451
3452 if (size == 0) {
3453 return EINVAL;
3454 }
3455
3456 /*
3457 * If task is still alive, collect info from the live task itself.
3458 * Otherwise, look to the cached info in the zombie proc.
3459 */
3460 if (p->p_ru) {
3461 return copyout(&p->p_ru->ri, buffer, size);
3462 } else {
3463 gather_rusage_info(p, &ri_current, flavor);
3464 ri_current.ri_proc_exit_abstime = 0;
3465 return copyout(&ri_current, buffer, size);
3466 }
3467 }
3468
3469 static int
mach_to_bsd_rv(int mach_rv)3470 mach_to_bsd_rv(int mach_rv)
3471 {
3472 int bsd_rv = 0;
3473
3474 switch (mach_rv) {
3475 case KERN_SUCCESS:
3476 bsd_rv = 0;
3477 break;
3478 case KERN_INVALID_ARGUMENT:
3479 bsd_rv = EINVAL;
3480 break;
3481 default:
3482 panic("unknown error %#x", mach_rv);
3483 }
3484
3485 return bsd_rv;
3486 }
3487
3488 /*
3489 * Resource limit controls
3490 *
3491 * uap->flavor available flavors:
3492 *
3493 * RLIMIT_WAKEUPS_MONITOR
3494 * RLIMIT_CPU_USAGE_MONITOR
3495 * RLIMIT_THREAD_CPULIMITS
3496 * RLIMIT_FOOTPRINT_INTERVAL
3497 */
3498 int
proc_rlimit_control(__unused struct proc * p,struct proc_rlimit_control_args * uap,__unused int32_t * retval)3499 proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *uap, __unused int32_t *retval)
3500 {
3501 proc_t targetp;
3502 int error = 0;
3503 uint32_t cpumon_flags;
3504 uint32_t cpulimits_flags;
3505 kauth_cred_t my_cred, target_cred;
3506 #if CONFIG_LEDGER_INTERVAL_MAX
3507 uint32_t footprint_interval_flags;
3508 uint64_t interval_max_footprint;
3509 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
3510
3511 /* -1 implicitly means our own process (perhaps even the current thread for per-thread attributes) */
3512 if (uap->pid == -1) {
3513 targetp = proc_self();
3514 } else {
3515 targetp = proc_find(uap->pid);
3516 }
3517
3518 /* proc_self() can return NULL for an exiting process */
3519 if (targetp == PROC_NULL) {
3520 return ESRCH;
3521 }
3522
3523 my_cred = kauth_cred_get();
3524 target_cred = kauth_cred_proc_ref(targetp);
3525
3526 if (!kauth_cred_issuser(my_cred) && kauth_cred_getruid(my_cred) &&
3527 kauth_cred_getuid(my_cred) != kauth_cred_getuid(target_cred) &&
3528 kauth_cred_getruid(my_cred) != kauth_cred_getuid(target_cred)) {
3529 proc_rele(targetp);
3530 kauth_cred_unref(&target_cred);
3531 return EACCES;
3532 }
3533
3534 switch (uap->flavor) {
3535 case RLIMIT_WAKEUPS_MONITOR:
3536 // Ignore requests silently here, no longer supported.
3537 error = 0;
3538 break;
3539 case RLIMIT_CPU_USAGE_MONITOR:
3540 cpumon_flags = (uint32_t)uap->arg; // XXX temporarily stashing flags in argp (12592127)
3541 error = mach_to_bsd_rv(task_cpu_usage_monitor_ctl(proc_task(targetp), &cpumon_flags));
3542 break;
3543 case RLIMIT_THREAD_CPULIMITS:
3544 cpulimits_flags = (uint32_t)uap->arg; // only need a limited set of bits, pass in void * argument
3545
3546 if (uap->pid != -1) {
3547 error = EINVAL;
3548 break;
3549 }
3550
3551 uint8_t percent = 0;
3552 uint32_t ms_refill = 0;
3553 uint64_t ns_refill;
3554
3555 percent = (uint8_t)(cpulimits_flags & 0xffU); /* low 8 bits for percent */
3556 ms_refill = (cpulimits_flags >> 8) & 0xffffff; /* next 24 bits represent ms refill value */
3557 if (percent >= 100 || percent == 0) {
3558 error = EINVAL;
3559 break;
3560 }
3561
3562 ns_refill = ((uint64_t)ms_refill) * NSEC_PER_MSEC;
3563
3564 error = mach_to_bsd_rv(thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ns_refill));
3565 break;
3566
3567 #if CONFIG_LEDGER_INTERVAL_MAX
3568 case RLIMIT_FOOTPRINT_INTERVAL:
3569 footprint_interval_flags = (uint32_t)uap->arg; // XXX temporarily stashing flags in argp (12592127)
3570 /*
3571 * There is currently only one option for this flavor.
3572 */
3573 if ((footprint_interval_flags & FOOTPRINT_INTERVAL_RESET) == 0) {
3574 error = EINVAL;
3575 break;
3576 }
3577 interval_max_footprint = get_task_phys_footprint_interval_max(proc_task(targetp), TRUE);
3578 interval_max_footprint = get_task_neural_nofootprint_total_interval_max(proc_task(targetp), TRUE);
3579 break;
3580
3581 #endif /* CONFIG_LEDGER_INTERVAL_MAX */
3582 default:
3583 error = EINVAL;
3584 break;
3585 }
3586
3587 proc_rele(targetp);
3588 kauth_cred_unref(&target_cred);
3589
3590 /*
3591 * Return value from this function becomes errno to userland caller.
3592 */
3593 return error;
3594 }
3595
3596 /*
3597 * Return the current amount of CPU consumed by this thread (in either user or kernel mode)
3598 */
3599 int
thread_selfusage(struct proc * p __unused,struct thread_selfusage_args * uap __unused,uint64_t * retval)3600 thread_selfusage(struct proc *p __unused, struct thread_selfusage_args *uap __unused, uint64_t *retval)
3601 {
3602 uint64_t runtime;
3603
3604 runtime = thread_get_runtime_self();
3605 *retval = runtime;
3606
3607 return 0;
3608 }
3609