1 /*
2 * Copyright (c) 2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/filedesc.h>
32 #include <sys/kernel.h>
33 #include <sys/file_internal.h>
34 #include <sys/guarded.h>
35 #include <sys/sysproto.h>
36 #include <sys/vnode.h>
37 #include <sys/vnode_internal.h>
38 #include <sys/uio_internal.h>
39 #include <sys/ubc_internal.h>
40 #include <vfs/vfs_support.h>
41 #include <security/audit/audit.h>
42 #include <sys/syscall.h>
43 #include <sys/kauth.h>
44 #include <sys/kdebug.h>
45 #include <stdbool.h>
46 #include <vm/vm_protos.h>
47 #include <libkern/section_keywords.h>
48
49 #include <kern/kalloc.h>
50 #include <kern/task.h>
51 #include <kern/exc_guard.h>
52
53 #if CONFIG_MACF && CONFIG_VNGUARD
54 #include <security/mac.h>
55 #include <security/mac_framework.h>
56 #include <security/mac_policy.h>
57 #include <pexpert/pexpert.h>
58 #include <sys/sysctl.h>
59 #include <sys/reason.h>
60 #endif
61
62 #define f_flag fp_glob->fg_flag
63 extern int writev_uio(struct proc *p, int fd, user_addr_t user_iovp,
64 int iovcnt, off_t offset, int flags, guardid_t *puguard,
65 user_ssize_t *retval);
66 extern int write_internal(struct proc *p, int fd, user_addr_t buf,
67 user_size_t nbyte, off_t offset, int flags, guardid_t *puguard,
68 user_ssize_t *retval);
69 extern int exit_with_guard_exception(void *p, mach_exception_data_type_t code,
70 mach_exception_data_type_t subcode);
71 /*
72 * Experimental guarded file descriptor support.
73 */
74
75 kern_return_t task_exception_notify(exception_type_t exception,
76 mach_exception_data_type_t code, mach_exception_data_type_t subcode);
77
78 #define GUARD_REQUIRED (GUARD_DUP)
79 #define GUARD_ALL (GUARD_REQUIRED | \
80 (GUARD_CLOSE | GUARD_SOCKET_IPC | GUARD_FILEPORT | GUARD_WRITE))
81
82 static KALLOC_TYPE_DEFINE(fp_guard_zone, struct fileproc_guard, KT_DEFAULT);
83
84 struct gfp_crarg {
85 guardid_t gca_guard;
86 uint16_t gca_attrs;
87 };
88
89 static struct fileproc_guard *
guarded_fileproc_alloc(guardid_t guard)90 guarded_fileproc_alloc(guardid_t guard)
91 {
92 struct fileproc_guard *fpg;
93
94 fpg = zalloc_flags(fp_guard_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
95 fpg->fpg_guard = guard;
96 return fpg;
97 }
98
99 static void
guarded_fileproc_init(struct fileproc * fp,void * initarg)100 guarded_fileproc_init(struct fileproc *fp, void *initarg)
101 {
102 struct gfp_crarg *arg = initarg;
103
104 assert(arg->gca_attrs);
105 fp->fp_guard = guarded_fileproc_alloc(arg->gca_guard);
106 fp->fp_guard_attrs = arg->gca_attrs;
107 }
108
109 /*
110 * This is called from fdt_fork(),
111 * where it needs to copy a guarded
112 * fd to the new shadow proc.
113 */
114 void
guarded_fileproc_copy_guard(struct fileproc * ofp,struct fileproc * nfp)115 guarded_fileproc_copy_guard(struct fileproc *ofp, struct fileproc *nfp)
116 {
117 struct gfp_crarg arg = {
118 .gca_guard = ofp->fp_guard->fpg_guard,
119 .gca_attrs = ofp->fp_guard_attrs
120 };
121 guarded_fileproc_init(nfp, &arg);
122 }
123
124 /*
125 * This is called from fileproc_free(),
126 * which is why it is safe to call
127 * without holding the proc_fdlock.
128 */
129 void
guarded_fileproc_unguard(struct fileproc * fp)130 guarded_fileproc_unguard(struct fileproc *fp)
131 {
132 struct fileproc_guard *fpg = fp->fp_guard;
133
134 fp->fp_guard_attrs = 0;
135 fp->fp_wset = fpg->fpg_wset;
136
137 zfree(fp_guard_zone, fpg);
138 }
139
140 static int
fp_lookup_guarded_locked(proc_t p,int fd,guardid_t guard,struct fileproc ** fpp)141 fp_lookup_guarded_locked(proc_t p, int fd, guardid_t guard,
142 struct fileproc **fpp)
143 {
144 int error;
145 struct fileproc *fp;
146
147 if ((error = fp_lookup(p, fd, &fp, 1)) != 0) {
148 return error;
149 }
150
151 if (fp->fp_guard_attrs == 0) {
152 (void) fp_drop(p, fd, fp, 1);
153 return EINVAL;
154 }
155
156 if (guard != fp->fp_guard->fpg_guard) {
157 (void) fp_drop(p, fd, fp, 1);
158 return EPERM; /* *not* a mismatch exception */
159 }
160
161 *fpp = fp;
162 return 0;
163 }
164
165 int
fp_lookup_guarded(proc_t p,int fd,guardid_t guard,struct fileproc ** fpp,int locked)166 fp_lookup_guarded(proc_t p, int fd, guardid_t guard,
167 struct fileproc **fpp, int locked)
168 {
169 int error;
170
171 if (!locked) {
172 proc_fdlock_spin(p);
173 }
174
175 error = fp_lookup_guarded_locked(p, fd, guard, fpp);
176
177 if (!locked) {
178 proc_fdunlock(p);
179 }
180
181 return error;
182 }
183
184 /*
185 * Expected use pattern:
186 *
187 * if (fp_isguarded(fp, GUARD_CLOSE)) {
188 * error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
189 * proc_fdunlock(p);
190 * return error;
191 * }
192 */
193 int
fp_isguarded(struct fileproc * fp,u_int attrs)194 fp_isguarded(struct fileproc *fp, u_int attrs)
195 {
196 return fp->fp_guard_attrs && (fp->fp_guard_attrs & attrs) == attrs;
197 }
198
199 extern char *proc_name_address(void *p);
200
201 int
fp_guard_exception(proc_t p,int fd,struct fileproc * fp,u_int flavor)202 fp_guard_exception(proc_t p, int fd, struct fileproc *fp, u_int flavor)
203 {
204 /* all fp guard fields protected via proc_fdlock() */
205 proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
206
207 mach_exception_code_t code = 0;
208 EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_FD);
209 EXC_GUARD_ENCODE_FLAVOR(code, flavor);
210 EXC_GUARD_ENCODE_TARGET(code, fd);
211 mach_exception_subcode_t subcode = fp->fp_guard->fpg_guard;
212
213 assert(fp->fp_guard_attrs);
214
215 thread_t t = current_thread();
216 thread_guard_violation(t, code, subcode, TRUE);
217 return EPERM;
218 }
219
220 /*
221 * (Invoked before returning to userland from the syscall handler.)
222 */
223 void
fd_guard_ast(thread_t __unused t,mach_exception_code_t code,mach_exception_subcode_t subcode)224 fd_guard_ast(
225 thread_t __unused t,
226 mach_exception_code_t code,
227 mach_exception_subcode_t subcode)
228 {
229 /*
230 * Check if anyone has registered for Synchronous EXC_GUARD, if yes then,
231 * deliver it synchronously and then kill the process, else kill the process
232 * and deliver the exception via EXC_CORPSE_NOTIFY.
233 */
234 if (task_exception_notify(EXC_GUARD, code, subcode) == KERN_SUCCESS) {
235 psignal(current_proc(), SIGKILL);
236 } else {
237 exit_with_guard_exception(current_proc(), code, subcode);
238 }
239 }
240
241 /*
242 * Experimental guarded file descriptor SPIs
243 */
244
245 /*
246 * int guarded_open_np(const char *pathname, int flags,
247 * const guardid_t *guard, u_int guardflags, ...);
248 *
249 * In this initial implementation, GUARD_DUP must be specified.
250 * GUARD_CLOSE, GUARD_SOCKET_IPC and GUARD_FILEPORT are optional.
251 *
252 * If GUARD_DUP wasn't specified, then we'd have to do the (extra) work
253 * to allow dup-ing a descriptor to inherit the guard onto the new
254 * descriptor. (Perhaps GUARD_DUP behaviours should just always be true
255 * for a guarded fd? Or, more sanely, all the dup operations should
256 * just always propagate the guard?)
257 *
258 * Guarded descriptors are always close-on-exec, and GUARD_CLOSE
259 * requires close-on-fork; O_CLOEXEC must be set in flags.
260 * This setting is immutable; attempts to clear the flag will
261 * cause a guard exception.
262 *
263 * XXX It's somewhat broken that change_fdguard_np() can completely
264 * remove the guard and thus revoke down the immutability
265 * promises above. Ick.
266 */
267 int
guarded_open_np(proc_t p,struct guarded_open_np_args * uap,int32_t * retval)268 guarded_open_np(proc_t p, struct guarded_open_np_args *uap, int32_t *retval)
269 {
270 if ((uap->flags & O_CLOEXEC) == 0) {
271 return EINVAL;
272 }
273
274 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
275 ((uap->guardflags & ~GUARD_ALL) != 0)) {
276 return EINVAL;
277 }
278
279 int error;
280 struct gfp_crarg crarg = {
281 .gca_attrs = (uint16_t)uap->guardflags
282 };
283
284 if ((error = copyin(uap->guard,
285 &(crarg.gca_guard), sizeof(crarg.gca_guard))) != 0) {
286 return error;
287 }
288
289 /*
290 * Disallow certain guard values -- is zero enough?
291 */
292 if (crarg.gca_guard == 0) {
293 return EINVAL;
294 }
295
296 struct vnode_attr va;
297 struct nameidata nd;
298 vfs_context_t ctx = vfs_context_current();
299 int cmode;
300
301 VATTR_INIT(&va);
302 cmode = ((uap->mode & ~p->p_fd.fd_cmask) & ALLPERMS) & ~S_ISTXT;
303 VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
304
305 NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
306 uap->path, ctx);
307
308 return open1(ctx, &nd, uap->flags | O_CLOFORK, &va,
309 guarded_fileproc_init, &crarg, retval, AUTH_OPEN_NOAUTHFD);
310 }
311
312 /*
313 * int guarded_open_dprotected_np(const char *pathname, int flags,
314 * const guardid_t *guard, u_int guardflags, int dpclass, int dpflags, ...);
315 *
316 * This SPI is extension of guarded_open_np() to include dataprotection class on creation
317 * in "dpclass" and dataprotection flags 'dpflags'. Otherwise behaviors are same as in
318 * guarded_open_np()
319 */
320 int
guarded_open_dprotected_np(proc_t p,struct guarded_open_dprotected_np_args * uap,int32_t * retval)321 guarded_open_dprotected_np(proc_t p, struct guarded_open_dprotected_np_args *uap, int32_t *retval)
322 {
323 if ((uap->flags & O_CLOEXEC) == 0) {
324 return EINVAL;
325 }
326
327 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
328 ((uap->guardflags & ~GUARD_ALL) != 0)) {
329 return EINVAL;
330 }
331
332 int error;
333 struct gfp_crarg crarg = {
334 .gca_attrs = (uint16_t)uap->guardflags
335 };
336
337 if ((error = copyin(uap->guard,
338 &(crarg.gca_guard), sizeof(crarg.gca_guard))) != 0) {
339 return error;
340 }
341
342 /*
343 * Disallow certain guard values -- is zero enough?
344 */
345 if (crarg.gca_guard == 0) {
346 return EINVAL;
347 }
348
349 struct vnode_attr va;
350 struct nameidata nd;
351 vfs_context_t ctx = vfs_context_current();
352 int cmode;
353
354 VATTR_INIT(&va);
355 cmode = ((uap->mode & ~p->p_fd.fd_cmask) & ALLPERMS) & ~S_ISTXT;
356 VATTR_SET(&va, va_mode, cmode & ACCESSPERMS);
357
358 NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
359 uap->path, ctx);
360
361 /*
362 * Initialize the extra fields in vnode_attr to pass down dataprotection
363 * extra fields.
364 * 1. target cprotect class.
365 * 2. set a flag to mark it as requiring open-raw-encrypted semantics.
366 */
367 if (uap->flags & O_CREAT) {
368 VATTR_SET(&va, va_dataprotect_class, uap->dpclass);
369 }
370
371 if (uap->dpflags & (O_DP_GETRAWENCRYPTED | O_DP_GETRAWUNENCRYPTED)) {
372 if (uap->flags & (O_RDWR | O_WRONLY)) {
373 /* Not allowed to write raw encrypted bytes */
374 return EINVAL;
375 }
376 if (uap->dpflags & O_DP_GETRAWENCRYPTED) {
377 VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED);
378 }
379 if (uap->dpflags & O_DP_GETRAWUNENCRYPTED) {
380 VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWUNENCRYPTED);
381 }
382 }
383
384 return open1(ctx, &nd, uap->flags | O_CLOFORK, &va,
385 guarded_fileproc_init, &crarg, retval, AUTH_OPEN_NOAUTHFD);
386 }
387
388 /*
389 * int guarded_kqueue_np(const guardid_t *guard, u_int guardflags);
390 *
391 * Create a guarded kqueue descriptor with guardid and guardflags.
392 *
393 * Same restrictions on guardflags as for guarded_open_np().
394 * All kqueues are -always- close-on-exec and close-on-fork by themselves
395 * and are not sendable.
396 */
397 int
guarded_kqueue_np(proc_t p,struct guarded_kqueue_np_args * uap,int32_t * retval)398 guarded_kqueue_np(proc_t p, struct guarded_kqueue_np_args *uap, int32_t *retval)
399 {
400 if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
401 ((uap->guardflags & ~GUARD_ALL) != 0)) {
402 return EINVAL;
403 }
404
405 int error;
406 struct gfp_crarg crarg = {
407 .gca_attrs = (uint16_t)uap->guardflags
408 };
409
410 if ((error = copyin(uap->guard,
411 &(crarg.gca_guard), sizeof(crarg.gca_guard))) != 0) {
412 return error;
413 }
414
415 if (crarg.gca_guard == 0) {
416 return EINVAL;
417 }
418
419 return kqueue_internal(p, guarded_fileproc_init, &crarg, retval);
420 }
421
422 /*
423 * int guarded_close_np(int fd, const guardid_t *guard);
424 */
425 int
guarded_close_np(proc_t p,struct guarded_close_np_args * uap,__unused int32_t * retval)426 guarded_close_np(proc_t p, struct guarded_close_np_args *uap,
427 __unused int32_t *retval)
428 {
429 struct fileproc *fp;
430 int fd = uap->fd;
431 int error;
432 guardid_t uguard;
433
434 AUDIT_SYSCLOSE(p, fd);
435
436 if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) {
437 return error;
438 }
439
440 proc_fdlock(p);
441 if ((error = fp_lookup_guarded(p, fd, uguard, &fp, 1)) != 0) {
442 proc_fdunlock(p);
443 return error;
444 }
445 fp_drop(p, fd, fp, 1);
446 return fp_close_and_unlock(p, fd, fp, 0);
447 }
448
449 /*
450 * int
451 * change_fdguard_np(int fd, const guardid_t *guard, u_int guardflags,
452 * const guardid_t *nguard, u_int nguardflags, int *fdflagsp);
453 *
454 * Given a file descriptor, atomically exchange <guard, guardflags> for
455 * a new guard <nguard, nguardflags>, returning the previous fd
456 * flags (see fcntl:F_SETFD) in *fdflagsp.
457 *
458 * This syscall can be used to either (a) add a new guard to an existing
459 * unguarded file descriptor (b) remove the old guard from an existing
460 * guarded file descriptor or (c) change the guard (guardid and/or
461 * guardflags) on a guarded file descriptor.
462 *
463 * If 'guard' is NULL, fd must be unguarded at entry. If the call completes
464 * successfully the fd will be guarded with <nguard, nguardflags>.
465 *
466 * Guarding a file descriptor has some side-effects on the "fp_flags"
467 * associated with the descriptor - in particular FD_CLOEXEC is
468 * forced ON unconditionally, and FD_CLOFORK is forced ON by GUARD_CLOSE.
469 * Callers who wish to subsequently restore the state of the fd should save
470 * the value of *fdflagsp after a successful invocation.
471 *
472 * If 'nguard' is NULL, fd must be guarded at entry, <guard, guardflags>
473 * must match with what's already guarding the descriptor, and the
474 * result will be to completely remove the guard.
475 *
476 * If the descriptor is guarded, and neither 'guard' nor 'nguard' is NULL
477 * and <guard, guardflags> matches what's already guarding the descriptor,
478 * then <nguard, nguardflags> becomes the new guard. In this case, even if
479 * the GUARD_CLOSE flag is being cleared, it is still possible to continue
480 * to keep FD_CLOFORK on the descriptor by passing FD_CLOFORK via fdflagsp.
481 *
482 * (File descriptors whose underlying fileglobs are marked FG_CONFINED are
483 * still close-on-fork, regardless of the setting of FD_CLOFORK.)
484 *
485 * Example 1: Guard an unguarded descriptor during a set of operations,
486 * then restore the original state of the descriptor.
487 *
488 * int sav_flags = 0;
489 * change_fdguard_np(fd, NULL, 0, &myguard, GUARD_CLOSE, &sav_flags);
490 * // do things with now guarded 'fd'
491 * change_fdguard_np(fd, &myguard, GUARD_CLOSE, NULL, 0, &sav_flags);
492 * // fd now unguarded.
493 *
494 * Example 2: Change the guard of a guarded descriptor during a set of
495 * operations, then restore the original state of the descriptor.
496 *
497 * int sav_flags = (gdflags & GUARD_CLOSE) ? FD_CLOFORK : 0;
498 * change_fdguard_np(fd, &gd, gdflags, &myguard, GUARD_CLOSE, &sav_flags);
499 * // do things with 'fd' with a different guard
500 * change_fdguard_np(fd, &myg, GUARD_CLOSE, &gd, gdflags, &sav_flags);
501 * // back to original guarded state
502 *
503 * XXX This SPI is too much of a chainsaw and should be revised.
504 */
505
506 int
change_fdguard_np(proc_t p,struct change_fdguard_np_args * uap,__unused int32_t * retval)507 change_fdguard_np(proc_t p, struct change_fdguard_np_args *uap,
508 __unused int32_t *retval)
509 {
510 struct fileproc_guard *fpg = NULL;
511 struct fileproc *fp;
512 int fd = uap->fd;
513 int error;
514 guardid_t oldg = 0, newg = 0;
515 int nfdflags = 0;
516
517 if (0 != uap->guard &&
518 0 != (error = copyin(uap->guard, &oldg, sizeof(oldg)))) {
519 return error; /* can't copyin current guard */
520 }
521 if (0 != uap->nguard &&
522 0 != (error = copyin(uap->nguard, &newg, sizeof(newg)))) {
523 return error; /* can't copyin new guard */
524 }
525 if (0 != uap->fdflagsp &&
526 0 != (error = copyin(uap->fdflagsp, &nfdflags, sizeof(nfdflags)))) {
527 return error; /* can't copyin new fdflags */
528 }
529
530 if (oldg == 0 && newg) {
531 fpg = guarded_fileproc_alloc(newg);
532 }
533
534 proc_fdlock(p);
535
536 if ((error = fp_lookup(p, fd, &fp, 1)) != 0) {
537 proc_fdunlock(p);
538 return error;
539 }
540
541 if (0 != uap->fdflagsp) {
542 int ofl = 0;
543 if (fp->fp_flags & FP_CLOEXEC) {
544 ofl |= FD_CLOEXEC;
545 }
546 if (fp->fp_flags & FP_CLOFORK) {
547 ofl |= FD_CLOFORK;
548 }
549 proc_fdunlock(p);
550 if (0 != (error = copyout(&ofl, uap->fdflagsp, sizeof(ofl)))) {
551 proc_fdlock(p);
552 goto dropout; /* can't copyout old fdflags */
553 }
554 proc_fdlock(p);
555 }
556
557 if (fp->fp_guard_attrs) {
558 if (0 == uap->guard || 0 == uap->guardflags) {
559 error = EINVAL; /* missing guard! */
560 } else if (0 == oldg) {
561 error = EPERM; /* guardids cannot be zero */
562 }
563 } else {
564 if (0 != uap->guard || 0 != uap->guardflags) {
565 error = EINVAL; /* guard provided, but none needed! */
566 }
567 }
568
569 if (0 != error) {
570 goto dropout;
571 }
572
573 if (0 != uap->nguard) {
574 /*
575 * There's a new guard in town.
576 */
577 if (0 == newg) {
578 error = EINVAL; /* guards cannot contain zero */
579 } else if (((uap->nguardflags & GUARD_REQUIRED) != GUARD_REQUIRED) ||
580 ((uap->nguardflags & ~GUARD_ALL) != 0)) {
581 error = EINVAL; /* must have valid attributes too */
582 }
583 if (0 != error) {
584 goto dropout;
585 }
586
587 if (fp->fp_guard_attrs) {
588 /*
589 * Replace old guard with new guard
590 */
591 if (oldg == fp->fp_guard->fpg_guard &&
592 uap->guardflags == fp->fp_guard_attrs) {
593 /*
594 * Must match existing guard + attributes
595 * before we'll swap them to new ones, managing
596 * fdflags "side-effects" as we go. Note that
597 * userland can request FD_CLOFORK semantics.
598 */
599 if (fp->fp_guard_attrs & GUARD_CLOSE) {
600 fp->fp_flags &= ~FP_CLOFORK;
601 }
602 fp->fp_guard->fpg_guard = newg;
603 fp->fp_guard_attrs = (uint16_t)uap->nguardflags;
604 if ((fp->fp_guard_attrs & GUARD_CLOSE) ||
605 (nfdflags & FD_CLOFORK)) {
606 fp->fp_flags |= FP_CLOFORK;
607 }
608 /* FG_CONFINED enforced regardless */
609 } else {
610 error = EPERM;
611 }
612 } else {
613 /*
614 * Add a guard to a previously unguarded descriptor
615 */
616 switch (FILEGLOB_DTYPE(fp->fp_glob)) {
617 case DTYPE_VNODE:
618 case DTYPE_PIPE:
619 case DTYPE_SOCKET:
620 case DTYPE_KQUEUE:
621 case DTYPE_NETPOLICY:
622 break;
623 default:
624 error = ENOTSUP;
625 goto dropout;
626 }
627
628 fp->fp_guard_attrs = (uint16_t)uap->nguardflags;
629 fpg->fpg_wset = fp->fp_wset;
630 fp->fp_guard = fpg;
631 fpg = NULL;
632 if (fp->fp_guard_attrs & GUARD_CLOSE) {
633 fp->fp_flags |= FP_CLOFORK;
634 }
635 fp->fp_flags |= FP_CLOEXEC;
636 }
637 } else {
638 if (fp->fp_guard_attrs) {
639 /*
640 * Remove the guard altogether.
641 */
642 if (0 != uap->nguardflags) {
643 error = EINVAL;
644 goto dropout;
645 }
646
647 if (oldg != fp->fp_guard->fpg_guard ||
648 uap->guardflags != fp->fp_guard_attrs) {
649 error = EPERM;
650 goto dropout;
651 }
652
653 assert(fpg == NULL);
654 fp->fp_guard_attrs = 0;
655 fpg = fp->fp_guard;
656 fp->fp_wset = fpg->fpg_wset;
657
658 fp->fp_flags &= ~(FP_CLOEXEC | FP_CLOFORK);
659 if (nfdflags & FD_CLOFORK) {
660 fp->fp_flags |= FP_CLOFORK;
661 }
662 if (nfdflags & FD_CLOEXEC) {
663 fp->fp_flags |= FP_CLOEXEC;
664 }
665 } else {
666 /*
667 * Not already guarded, and no new guard?
668 */
669 error = EINVAL;
670 }
671 }
672
673 dropout:
674 (void) fp_drop(p, fd, fp, 1);
675 proc_fdunlock(p);
676
677 if (fpg) {
678 zfree(fp_guard_zone, fpg);
679 }
680 return error;
681 }
682
683 /*
684 * user_ssize_t guarded_write_np(int fd, const guardid_t *guard,
685 * user_addr_t cbuf, user_ssize_t nbyte);
686 *
687 * Initial implementation of guarded writes.
688 */
689 int
guarded_write_np(struct proc * p,struct guarded_write_np_args * uap,user_ssize_t * retval)690 guarded_write_np(struct proc *p, struct guarded_write_np_args *uap, user_ssize_t *retval)
691 {
692 int error;
693 guardid_t uguard;
694
695 AUDIT_ARG(fd, uap->fd);
696
697 if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) {
698 return error;
699 }
700
701 return write_internal(p, uap->fd, uap->cbuf, uap->nbyte, 0, 0, &uguard, retval);
702 }
703
704 /*
705 * user_ssize_t guarded_pwrite_np(int fd, const guardid_t *guard,
706 * user_addr_t buf, user_size_t nbyte, off_t offset);
707 *
708 * Initial implementation of guarded pwrites.
709 */
710 int
guarded_pwrite_np(struct proc * p,struct guarded_pwrite_np_args * uap,user_ssize_t * retval)711 guarded_pwrite_np(struct proc *p, struct guarded_pwrite_np_args *uap, user_ssize_t *retval)
712 {
713 int error;
714 guardid_t uguard;
715
716 AUDIT_ARG(fd, uap->fd);
717
718 if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) {
719 return error;
720 }
721
722 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_guarded_pwrite_np) | DBG_FUNC_NONE),
723 uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0);
724
725 return write_internal(p, uap->fd, uap->buf, uap->nbyte, uap->offset, FOF_OFFSET,
726 &uguard, retval);
727 }
728
729 /*
730 * user_ssize_t guarded_writev_np(int fd, const guardid_t *guard,
731 * struct iovec *iovp, u_int iovcnt);
732 *
733 * Initial implementation of guarded writev.
734 *
735 */
736 int
guarded_writev_np(struct proc * p,struct guarded_writev_np_args * uap,user_ssize_t * retval)737 guarded_writev_np(struct proc *p, struct guarded_writev_np_args *uap, user_ssize_t *retval)
738 {
739 int error;
740 guardid_t uguard;
741
742 AUDIT_ARG(fd, uap->fd);
743
744 if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) {
745 return error;
746 }
747
748 return writev_uio(p, uap->fd, uap->iovp, uap->iovcnt, 0, 0, &uguard, retval);
749 }
750
751 /*
752 * int falloc_guarded(struct proc *p, struct fileproc **fp, int *fd,
753 * vfs_context_t ctx, const guardid_t *guard, u_int attrs);
754 *
755 * This SPI is the guarded variant of falloc(). It borrows the same
756 * restrictions as those used by the rest of the guarded_* routines.
757 */
758 int
falloc_guarded(struct proc * p,struct fileproc ** fp,int * fd,vfs_context_t ctx,const guardid_t * guard,u_int attrs)759 falloc_guarded(struct proc *p, struct fileproc **fp, int *fd,
760 vfs_context_t ctx, const guardid_t *guard, u_int attrs)
761 {
762 struct gfp_crarg crarg;
763
764 if (((attrs & GUARD_REQUIRED) != GUARD_REQUIRED) ||
765 ((attrs & ~GUARD_ALL) != 0) || (*guard == 0)) {
766 return EINVAL;
767 }
768
769 bzero(&crarg, sizeof(crarg));
770 crarg.gca_guard = *guard;
771 crarg.gca_attrs = (uint16_t)attrs;
772
773 return falloc_withinit(p, fp, fd, ctx, guarded_fileproc_init, &crarg);
774 }
775
776 #if CONFIG_MACF && CONFIG_VNGUARD
777
778 /*
779 * Guarded vnodes
780 *
781 * Uses MAC hooks to guard operations on vnodes in the system. Given an fd,
782 * add data to the label on the fileglob and the vnode it points at.
783 * The data contains a pointer to the fileglob, the set of attributes to
784 * guard, a guard value for uniquification, and the pid of the process
785 * who set the guard up in the first place.
786 *
787 * The fd must have been opened read/write, and the underlying
788 * fileglob is FG_CONFINED so that there's no ambiguity about the
789 * owning process.
790 *
791 * When there's a callback for a vnode operation of interest (rename, unlink,
792 * etc.) check to see if the guard permits that operation, and if not
793 * take an action e.g. log a message or generate a crash report.
794 *
795 * The label is removed from the vnode and the fileglob when the fileglob
796 * is closed.
797 *
798 * The initial action to be taken can be specified by a boot arg (vnguard=0x42)
799 * and change via the "kern.vnguard.flags" sysctl.
800 */
801
802 struct vng_owner;
803
804 struct vng_info { /* lives on the vnode label */
805 guardid_t vgi_guard;
806 unsigned vgi_attrs;
807 TAILQ_HEAD(, vng_owner) vgi_owners;
808 };
809
810 struct vng_owner { /* lives on the fileglob label */
811 proc_t vgo_p;
812 struct vng_info *vgo_vgi;
813 TAILQ_ENTRY(vng_owner) vgo_link;
814 };
815
816 static struct vng_info *
new_vgi(unsigned attrs,guardid_t guard)817 new_vgi(unsigned attrs, guardid_t guard)
818 {
819 struct vng_info *vgi = kalloc_type(struct vng_info, Z_WAITOK);
820 vgi->vgi_guard = guard;
821 vgi->vgi_attrs = attrs;
822 TAILQ_INIT(&vgi->vgi_owners);
823 return vgi;
824 }
825
826 static struct vng_owner *
new_vgo(proc_t p)827 new_vgo(proc_t p)
828 {
829 struct vng_owner *vgo = kalloc_type(struct vng_owner, Z_WAITOK | Z_ZERO);
830 vgo->vgo_p = p;
831 return vgo;
832 }
833
834 static void
vgi_add_vgo(struct vng_info * vgi,struct vng_owner * vgo)835 vgi_add_vgo(struct vng_info *vgi, struct vng_owner *vgo)
836 {
837 vgo->vgo_vgi = vgi;
838 TAILQ_INSERT_HEAD(&vgi->vgi_owners, vgo, vgo_link);
839 }
840
841 static boolean_t
vgi_remove_vgo(struct vng_info * vgi,struct vng_owner * vgo)842 vgi_remove_vgo(struct vng_info *vgi, struct vng_owner *vgo)
843 {
844 TAILQ_REMOVE(&vgi->vgi_owners, vgo, vgo_link);
845 vgo->vgo_vgi = NULL;
846 return TAILQ_EMPTY(&vgi->vgi_owners);
847 }
848
849 static void
free_vgi(struct vng_info * vgi)850 free_vgi(struct vng_info *vgi)
851 {
852 assert(TAILQ_EMPTY(&vgi->vgi_owners));
853 #if DEVELOP || DEBUG
854 memset(vgi, 0xbeadfade, sizeof(*vgi));
855 #endif
856 kfree_type(struct vng_info, vgi);
857 }
858
859 static void
free_vgo(struct vng_owner * vgo)860 free_vgo(struct vng_owner *vgo)
861 {
862 #if DEVELOP || DEBUG
863 memset(vgo, 0x2bedf1d0, sizeof(*vgo));
864 #endif
865 kfree_type(struct vng_owner, vgo);
866 }
867
868 static int label_slot;
869 static LCK_GRP_DECLARE(llock_grp, VNG_POLICY_NAME);
870 static LCK_RW_DECLARE(llock, &llock_grp);
871
872 static __inline void *
vng_lbl_get(struct label * label)873 vng_lbl_get(struct label *label)
874 {
875 lck_rw_assert(&llock, LCK_RW_ASSERT_HELD);
876 void *data;
877 if (NULL == label) {
878 data = NULL;
879 } else {
880 data = (void *)mac_label_get(label, label_slot);
881 }
882 return data;
883 }
884
885 static __inline struct vng_info *
vng_lbl_get_withattr(struct label * label,unsigned attrmask)886 vng_lbl_get_withattr(struct label *label, unsigned attrmask)
887 {
888 struct vng_info *vgi = vng_lbl_get(label);
889 assert(NULL == vgi || (vgi->vgi_attrs & ~VNG_ALL) == 0);
890 if (NULL != vgi && 0 == (vgi->vgi_attrs & attrmask)) {
891 vgi = NULL;
892 }
893 return vgi;
894 }
895
896 static __inline void
vng_lbl_set(struct label * label,void * data)897 vng_lbl_set(struct label *label, void *data)
898 {
899 assert(NULL != label);
900 lck_rw_assert(&llock, LCK_RW_ASSERT_EXCLUSIVE);
901 mac_label_set(label, label_slot, (intptr_t)data);
902 }
903
904 static int
vnguard_sysc_getguardattr(proc_t p,struct vnguard_getattr * vga)905 vnguard_sysc_getguardattr(proc_t p, struct vnguard_getattr *vga)
906 {
907 const int fd = vga->vga_fd;
908
909 if (0 == vga->vga_guard) {
910 return EINVAL;
911 }
912
913 int error;
914 struct fileproc *fp;
915 if (0 != (error = fp_lookup(p, fd, &fp, 0))) {
916 return error;
917 }
918 do {
919 struct fileglob *fg = fp->fp_glob;
920 if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) {
921 error = EBADF;
922 break;
923 }
924 struct vnode *vp = fg_get_data(fg);
925 if (!vnode_isreg(vp) || NULL == vp->v_mount) {
926 error = EBADF;
927 break;
928 }
929 error = vnode_getwithref(vp);
930 if (0 != error) {
931 break;
932 }
933
934 vga->vga_attrs = 0;
935
936 lck_rw_lock_shared(&llock);
937
938 if (NULL != mac_vnode_label(vp)) {
939 const struct vng_info *vgi = vng_lbl_get(mac_vnode_label(vp));
940 if (NULL != vgi) {
941 if (vgi->vgi_guard != vga->vga_guard) {
942 error = EPERM;
943 } else {
944 vga->vga_attrs = vgi->vgi_attrs;
945 }
946 }
947 }
948
949 lck_rw_unlock_shared(&llock);
950 vnode_put(vp);
951 } while (0);
952
953 fp_drop(p, fd, fp, 0);
954 return error;
955 }
956
957 static int
vnguard_sysc_setguard(proc_t p,const struct vnguard_set * vns)958 vnguard_sysc_setguard(proc_t p, const struct vnguard_set *vns)
959 {
960 const int fd = vns->vns_fd;
961
962 if ((vns->vns_attrs & ~VNG_ALL) != 0 ||
963 0 == vns->vns_attrs || 0 == vns->vns_guard) {
964 return EINVAL;
965 }
966
967 int error;
968 struct fileproc *fp;
969 if (0 != (error = fp_lookup(p, fd, &fp, 0))) {
970 return error;
971 }
972 do {
973 /*
974 * To avoid trivial DoS, insist that the caller
975 * has read/write access to the file.
976 */
977 if ((FREAD | FWRITE) != (fp->f_flag & (FREAD | FWRITE))) {
978 error = EBADF;
979 break;
980 }
981 struct fileglob *fg = fp->fp_glob;
982 if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) {
983 error = EBADF;
984 break;
985 }
986 /*
987 * Confinement means there's only one fd pointing at
988 * this fileglob, and will always be associated with
989 * this pid.
990 */
991 if (0 == (FG_CONFINED & fg->fg_lflags)) {
992 error = EBADF;
993 break;
994 }
995 struct vnode *vp = fg_get_data(fg);
996 if (!vnode_isreg(vp) || NULL == vp->v_mount) {
997 error = EBADF;
998 break;
999 }
1000 error = vnode_getwithref(vp);
1001 if (0 != error) {
1002 break;
1003 }
1004
1005 /* Ensure the target vnode -has- a label */
1006 struct vfs_context *ctx = vfs_context_current();
1007 mac_vnode_label_update(ctx, vp, NULL);
1008
1009 struct vng_info *nvgi = new_vgi(vns->vns_attrs, vns->vns_guard);
1010 struct vng_owner *nvgo = new_vgo(p);
1011
1012 lck_rw_lock_exclusive(&llock);
1013
1014 do {
1015 /*
1016 * A vnode guard is associated with one or more
1017 * fileglobs in one or more processes.
1018 */
1019 struct vng_info *vgi = vng_lbl_get(mac_vnode_label(vp));
1020 struct vng_owner *vgo = fg->fg_vgo;
1021
1022 if (NULL == vgi) {
1023 /* vnode unguarded, add the first guard */
1024 if (NULL != vgo) {
1025 panic("vnguard label on fileglob "
1026 "but not vnode");
1027 }
1028 /* add a kusecount so we can unlabel later */
1029 error = vnode_ref_ext(vp, O_EVTONLY, 0);
1030 if (0 == error) {
1031 /* add the guard */
1032 vgi_add_vgo(nvgi, nvgo);
1033 vng_lbl_set(mac_vnode_label(vp), nvgi);
1034 fg->fg_vgo = nvgo;
1035 } else {
1036 free_vgo(nvgo);
1037 free_vgi(nvgi);
1038 }
1039 } else {
1040 /* vnode already guarded */
1041 free_vgi(nvgi);
1042 if (vgi->vgi_guard != vns->vns_guard) {
1043 error = EPERM; /* guard mismatch */
1044 } else if (vgi->vgi_attrs != vns->vns_attrs) {
1045 /*
1046 * Temporary workaround for older versions of SQLite:
1047 * allow newer guard attributes to be silently cleared.
1048 */
1049 const unsigned mask = ~(VNG_WRITE_OTHER | VNG_TRUNC_OTHER);
1050 if ((vgi->vgi_attrs & mask) == (vns->vns_attrs & mask)) {
1051 vgi->vgi_attrs &= vns->vns_attrs;
1052 } else {
1053 error = EACCES; /* attr mismatch */
1054 }
1055 }
1056 if (0 != error || NULL != vgo) {
1057 free_vgo(nvgo);
1058 break;
1059 }
1060 /* record shared ownership */
1061 vgi_add_vgo(vgi, nvgo);
1062 fg->fg_vgo = nvgo;
1063 }
1064 } while (0);
1065
1066 lck_rw_unlock_exclusive(&llock);
1067 vnode_put(vp);
1068 } while (0);
1069
1070 fp_drop(p, fd, fp, 0);
1071 return error;
1072 }
1073
1074 static int
vng_policy_syscall(proc_t p,int cmd,user_addr_t arg)1075 vng_policy_syscall(proc_t p, int cmd, user_addr_t arg)
1076 {
1077 int error = EINVAL;
1078
1079 switch (cmd) {
1080 case VNG_SYSC_PING:
1081 if (0 == arg) {
1082 error = 0;
1083 }
1084 break;
1085 case VNG_SYSC_SET_GUARD: {
1086 struct vnguard_set vns;
1087 error = copyin(arg, (void *)&vns, sizeof(vns));
1088 if (error) {
1089 break;
1090 }
1091 error = vnguard_sysc_setguard(p, &vns);
1092 break;
1093 }
1094 case VNG_SYSC_GET_ATTR: {
1095 struct vnguard_getattr vga;
1096 error = copyin(arg, (void *)&vga, sizeof(vga));
1097 if (error) {
1098 break;
1099 }
1100 error = vnguard_sysc_getguardattr(p, &vga);
1101 if (error) {
1102 break;
1103 }
1104 error = copyout((void *)&vga, arg, sizeof(vga));
1105 break;
1106 }
1107 default:
1108 break;
1109 }
1110 return error;
1111 }
1112
1113 /*
1114 * This is called just before the fileglob disappears in fg_free().
1115 * Take the exclusive lock: no other thread can add or remove
1116 * a vng_info to any vnode in the system.
1117 */
1118 void
vng_file_label_destroy(struct fileglob * fg)1119 vng_file_label_destroy(struct fileglob *fg)
1120 {
1121 struct vng_owner *lvgo = fg->fg_vgo;
1122 struct vng_info *vgi = NULL;
1123
1124 if (lvgo) {
1125 lck_rw_lock_exclusive(&llock);
1126 fg->fg_vgo = NULL;
1127 vgi = lvgo->vgo_vgi;
1128 assert(vgi);
1129 if (vgi_remove_vgo(vgi, lvgo)) {
1130 /* that was the last reference */
1131 vgi->vgi_attrs = 0;
1132 if (DTYPE_VNODE == FILEGLOB_DTYPE(fg)) {
1133 struct vnode *vp = fg_get_data(fg);
1134 int error = vnode_getwithref(vp);
1135 if (0 == error) {
1136 vng_lbl_set(mac_vnode_label(vp), 0);
1137 lck_rw_unlock_exclusive(&llock);
1138 /* may trigger VNOP_INACTIVE */
1139 vnode_rele_ext(vp, O_EVTONLY, 0);
1140 vnode_put(vp);
1141 free_vgi(vgi);
1142 free_vgo(lvgo);
1143 return;
1144 }
1145 }
1146 }
1147 lck_rw_unlock_exclusive(&llock);
1148 free_vgo(lvgo);
1149 }
1150 }
1151
1152 static os_reason_t
vng_reason_from_pathname(const char * path,uint32_t pathlen)1153 vng_reason_from_pathname(const char *path, uint32_t pathlen)
1154 {
1155 os_reason_t r = os_reason_create(OS_REASON_GUARD, GUARD_REASON_VNODE);
1156 if (NULL == r) {
1157 return r;
1158 }
1159 /*
1160 * If the pathname is very long, just keep the trailing part
1161 */
1162 const uint32_t pathmax = 3 * EXIT_REASON_USER_DESC_MAX_LEN / 4;
1163 if (pathlen > pathmax) {
1164 path += (pathlen - pathmax);
1165 pathlen = pathmax;
1166 }
1167 uint32_t rsize = kcdata_estimate_required_buffer_size(1, pathlen);
1168 if (0 == os_reason_alloc_buffer(r, rsize)) {
1169 struct kcdata_descriptor *kcd = &r->osr_kcd_descriptor;
1170 mach_vm_address_t addr;
1171 if (kcdata_get_memory_addr(kcd,
1172 EXIT_REASON_USER_DESC, pathlen, &addr) == KERN_SUCCESS) {
1173 kcdata_memcpy(kcd, addr, path, pathlen);
1174 return r;
1175 }
1176 }
1177 os_reason_free(r);
1178 return OS_REASON_NULL;
1179 }
1180
1181 static int vng_policy_flags;
1182
1183 /*
1184 * Note: if an EXC_GUARD is generated, llock will be dropped and
1185 * subsequently reacquired by this routine. Data derived from
1186 * any label in the caller should be regenerated.
1187 */
1188 static int
vng_guard_violation(const struct vng_info * vgi,unsigned opval,vnode_t vp)1189 vng_guard_violation(const struct vng_info *vgi,
1190 unsigned opval, vnode_t vp)
1191 {
1192 int retval = 0;
1193
1194 if (vng_policy_flags & kVNG_POLICY_EPERM) {
1195 /* deny the operation */
1196 retval = EPERM;
1197 }
1198
1199 if (vng_policy_flags & (kVNG_POLICY_LOGMSG | kVNG_POLICY_UPRINTMSG)) {
1200 /* log a message */
1201 const char *op;
1202 switch (opval) {
1203 case VNG_RENAME_FROM:
1204 op = "rename-from";
1205 break;
1206 case VNG_RENAME_TO:
1207 op = "rename-to";
1208 break;
1209 case VNG_UNLINK:
1210 op = "unlink";
1211 break;
1212 case VNG_LINK:
1213 op = "link";
1214 break;
1215 case VNG_EXCHDATA:
1216 op = "exchdata";
1217 break;
1218 case VNG_WRITE_OTHER:
1219 op = "write";
1220 break;
1221 case VNG_TRUNC_OTHER:
1222 op = "truncate";
1223 break;
1224 default:
1225 op = "(unknown)";
1226 break;
1227 }
1228
1229 const char *nm = vnode_getname(vp);
1230 proc_t p = current_proc();
1231 const struct vng_owner *vgo;
1232 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1233 const char fmt[] =
1234 "%s[%d]: %s%s: '%s' guarded by %s[%d] (0x%llx)\n";
1235
1236 if (vng_policy_flags & kVNG_POLICY_LOGMSG) {
1237 printf(fmt,
1238 proc_name_address(p), proc_pid(p), op,
1239 0 != retval ? " denied" : "",
1240 NULL != nm ? nm : "(unknown)",
1241 proc_name_address(vgo->vgo_p),
1242 proc_pid(vgo->vgo_p), vgi->vgi_guard);
1243 }
1244 if (vng_policy_flags & kVNG_POLICY_UPRINTMSG) {
1245 uprintf(fmt,
1246 proc_name_address(p), proc_pid(p), op,
1247 0 != retval ? " denied" : "",
1248 NULL != nm ? nm : "(unknown)",
1249 proc_name_address(vgo->vgo_p),
1250 proc_pid(vgo->vgo_p), vgi->vgi_guard);
1251 }
1252 }
1253 if (NULL != nm) {
1254 vnode_putname(nm);
1255 }
1256 }
1257
1258 if (vng_policy_flags & (kVNG_POLICY_EXC | kVNG_POLICY_EXC_CORPSE)) {
1259 /* EXC_GUARD exception */
1260 const struct vng_owner *vgo = TAILQ_FIRST(&vgi->vgi_owners);
1261 pid_t pid = vgo ? proc_pid(vgo->vgo_p) : 0;
1262 mach_exception_code_t code;
1263 mach_exception_subcode_t subcode;
1264
1265 code = 0;
1266 EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_VN);
1267 EXC_GUARD_ENCODE_FLAVOR(code, opval);
1268 EXC_GUARD_ENCODE_TARGET(code, pid);
1269 subcode = vgi->vgi_guard;
1270
1271 lck_rw_unlock_shared(&llock);
1272
1273 if (vng_policy_flags & kVNG_POLICY_EXC_CORPSE) {
1274 char *path;
1275 int len = MAXPATHLEN;
1276
1277 path = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_NOFAIL);
1278
1279 os_reason_t r = NULL;
1280 vn_getpath(vp, path, &len);
1281 if (*path && len) {
1282 r = vng_reason_from_pathname(path, len);
1283 }
1284 task_violated_guard(code, subcode, r, TRUE); /* not fatal */
1285 if (NULL != r) {
1286 os_reason_free(r);
1287 }
1288
1289 zfree(ZV_NAMEI, path);
1290 } else {
1291 thread_t t = current_thread();
1292 thread_guard_violation(t, code, subcode, TRUE);
1293 }
1294
1295 lck_rw_lock_shared(&llock);
1296 } else if (vng_policy_flags & kVNG_POLICY_SIGKILL) {
1297 proc_t p = current_proc();
1298 psignal(p, SIGKILL);
1299 }
1300
1301 return retval;
1302 }
1303
1304 /*
1305 * A fatal vnode guard was tripped on this thread.
1306 *
1307 * (Invoked before returning to userland from the syscall handler.)
1308 */
1309 void
vn_guard_ast(thread_t __unused t,mach_exception_data_type_t code,mach_exception_data_type_t subcode)1310 vn_guard_ast(thread_t __unused t,
1311 mach_exception_data_type_t code, mach_exception_data_type_t subcode)
1312 {
1313 /*
1314 * Check if anyone has registered for Synchronous EXC_GUARD, if yes then,
1315 * deliver it synchronously and then kill the process, else kill the process
1316 * and deliver the exception via EXC_CORPSE_NOTIFY.
1317 */
1318 if (task_exception_notify(EXC_GUARD, code, subcode) == KERN_SUCCESS) {
1319 psignal(current_proc(), SIGKILL);
1320 } else {
1321 exit_with_guard_exception(current_proc(), code, subcode);
1322 }
1323 }
1324
1325 /*
1326 * vnode callbacks
1327 */
1328
1329 static int
vng_vnode_check_rename(kauth_cred_t __unused cred,struct vnode * __unused dvp,struct label * __unused dlabel,struct vnode * vp,struct label * label,struct componentname * __unused cnp,struct vnode * __unused tdvp,struct label * __unused tdlabel,struct vnode * tvp,struct label * tlabel,struct componentname * __unused tcnp)1330 vng_vnode_check_rename(kauth_cred_t __unused cred,
1331 struct vnode *__unused dvp, struct label *__unused dlabel,
1332 struct vnode *vp, struct label *label,
1333 struct componentname *__unused cnp,
1334 struct vnode *__unused tdvp, struct label *__unused tdlabel,
1335 struct vnode *tvp, struct label *tlabel,
1336 struct componentname *__unused tcnp)
1337 {
1338 int error = 0;
1339 if (NULL != label || NULL != tlabel) {
1340 lck_rw_lock_shared(&llock);
1341 const struct vng_info *vgi =
1342 vng_lbl_get_withattr(label, VNG_RENAME_FROM);
1343 if (NULL != vgi) {
1344 error = vng_guard_violation(vgi, VNG_RENAME_FROM, vp);
1345 }
1346 if (0 == error) {
1347 vgi = vng_lbl_get_withattr(tlabel, VNG_RENAME_TO);
1348 if (NULL != vgi) {
1349 error = vng_guard_violation(vgi,
1350 VNG_RENAME_TO, tvp);
1351 }
1352 }
1353 lck_rw_unlock_shared(&llock);
1354 }
1355 return error;
1356 }
1357
1358 static int
vng_vnode_check_link(kauth_cred_t __unused cred,struct vnode * __unused dvp,struct label * __unused dlabel,struct vnode * vp,struct label * label,struct componentname * __unused cnp)1359 vng_vnode_check_link(kauth_cred_t __unused cred,
1360 struct vnode *__unused dvp, struct label *__unused dlabel,
1361 struct vnode *vp, struct label *label, struct componentname *__unused cnp)
1362 {
1363 int error = 0;
1364 if (NULL != label) {
1365 lck_rw_lock_shared(&llock);
1366 const struct vng_info *vgi =
1367 vng_lbl_get_withattr(label, VNG_LINK);
1368 if (vgi) {
1369 error = vng_guard_violation(vgi, VNG_LINK, vp);
1370 }
1371 lck_rw_unlock_shared(&llock);
1372 }
1373 return error;
1374 }
1375
1376 static int
vng_vnode_check_unlink(kauth_cred_t __unused cred,struct vnode * __unused dvp,struct label * __unused dlabel,struct vnode * vp,struct label * label,struct componentname * __unused cnp)1377 vng_vnode_check_unlink(kauth_cred_t __unused cred,
1378 struct vnode *__unused dvp, struct label *__unused dlabel,
1379 struct vnode *vp, struct label *label, struct componentname *__unused cnp)
1380 {
1381 int error = 0;
1382 if (NULL != label) {
1383 lck_rw_lock_shared(&llock);
1384 const struct vng_info *vgi =
1385 vng_lbl_get_withattr(label, VNG_UNLINK);
1386 if (vgi) {
1387 error = vng_guard_violation(vgi, VNG_UNLINK, vp);
1388 }
1389 lck_rw_unlock_shared(&llock);
1390 }
1391 return error;
1392 }
1393
1394 /*
1395 * Only check violations for writes performed by "other processes"
1396 */
1397 static int
vng_vnode_check_write(kauth_cred_t __unused actv_cred,kauth_cred_t __unused file_cred,struct vnode * vp,struct label * label)1398 vng_vnode_check_write(kauth_cred_t __unused actv_cred,
1399 kauth_cred_t __unused file_cred, struct vnode *vp, struct label *label)
1400 {
1401 int error = 0;
1402 if (NULL != label) {
1403 lck_rw_lock_shared(&llock);
1404 const struct vng_info *vgi =
1405 vng_lbl_get_withattr(label, VNG_WRITE_OTHER);
1406 if (vgi) {
1407 proc_t p = current_proc();
1408 const struct vng_owner *vgo;
1409 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1410 if (vgo->vgo_p == p) {
1411 goto done;
1412 }
1413 }
1414 error = vng_guard_violation(vgi, VNG_WRITE_OTHER, vp);
1415 }
1416 done:
1417 lck_rw_unlock_shared(&llock);
1418 }
1419 return error;
1420 }
1421
1422 /*
1423 * Only check violations for truncates performed by "other processes"
1424 */
1425 static int
vng_vnode_check_truncate(kauth_cred_t __unused actv_cred,kauth_cred_t __unused file_cred,struct vnode * vp,struct label * label)1426 vng_vnode_check_truncate(kauth_cred_t __unused actv_cred,
1427 kauth_cred_t __unused file_cred, struct vnode *vp,
1428 struct label *label)
1429 {
1430 int error = 0;
1431 if (NULL != label) {
1432 lck_rw_lock_shared(&llock);
1433 const struct vng_info *vgi =
1434 vng_lbl_get_withattr(label, VNG_TRUNC_OTHER);
1435 if (vgi) {
1436 proc_t p = current_proc();
1437 const struct vng_owner *vgo;
1438 TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) {
1439 if (vgo->vgo_p == p) {
1440 goto done;
1441 }
1442 }
1443 error = vng_guard_violation(vgi, VNG_TRUNC_OTHER, vp);
1444 }
1445 done:
1446 lck_rw_unlock_shared(&llock);
1447 }
1448 return error;
1449 }
1450
1451 static int
vng_vnode_check_exchangedata(kauth_cred_t __unused cred,struct vnode * fvp,struct label * flabel,struct vnode * svp,struct label * slabel)1452 vng_vnode_check_exchangedata(kauth_cred_t __unused cred,
1453 struct vnode *fvp, struct label *flabel,
1454 struct vnode *svp, struct label *slabel)
1455 {
1456 int error = 0;
1457 if (NULL != flabel || NULL != slabel) {
1458 lck_rw_lock_shared(&llock);
1459 const struct vng_info *vgi =
1460 vng_lbl_get_withattr(flabel, VNG_EXCHDATA);
1461 if (NULL != vgi) {
1462 error = vng_guard_violation(vgi, VNG_EXCHDATA, fvp);
1463 }
1464 if (0 == error) {
1465 vgi = vng_lbl_get_withattr(slabel, VNG_EXCHDATA);
1466 if (NULL != vgi) {
1467 error = vng_guard_violation(vgi,
1468 VNG_EXCHDATA, svp);
1469 }
1470 }
1471 lck_rw_unlock_shared(&llock);
1472 }
1473 return error;
1474 }
1475
1476 /* Intercept open-time truncations (by "other") of a guarded vnode */
1477
1478 static int
vng_vnode_check_open(kauth_cred_t cred,struct vnode * vp,struct label * label,int acc_mode)1479 vng_vnode_check_open(kauth_cred_t cred,
1480 struct vnode *vp, struct label *label, int acc_mode)
1481 {
1482 if (0 == (acc_mode & O_TRUNC)) {
1483 return 0;
1484 }
1485 return vng_vnode_check_truncate(cred, NULL, vp, label);
1486 }
1487
1488 /*
1489 * Configuration gorp
1490 */
1491
1492 SECURITY_READ_ONLY_EARLY(static struct mac_policy_ops) vng_policy_ops = {
1493 .mpo_vnode_check_link = vng_vnode_check_link,
1494 .mpo_vnode_check_unlink = vng_vnode_check_unlink,
1495 .mpo_vnode_check_rename = vng_vnode_check_rename,
1496 .mpo_vnode_check_write = vng_vnode_check_write,
1497 .mpo_vnode_check_truncate = vng_vnode_check_truncate,
1498 .mpo_vnode_check_exchangedata = vng_vnode_check_exchangedata,
1499 .mpo_vnode_check_open = vng_vnode_check_open,
1500
1501 .mpo_policy_syscall = vng_policy_syscall,
1502 };
1503
1504 static const char *vng_labelnames[] = {
1505 "vnguard",
1506 };
1507
1508 #define ACOUNT(arr) ((unsigned)(sizeof (arr) / sizeof (arr[0])))
1509
1510 SECURITY_READ_ONLY_LATE(static struct mac_policy_conf) vng_policy_conf = {
1511 .mpc_name = VNG_POLICY_NAME,
1512 .mpc_fullname = "Guarded vnode policy",
1513 .mpc_field_off = &label_slot,
1514 .mpc_labelnames = vng_labelnames,
1515 .mpc_labelname_count = ACOUNT(vng_labelnames),
1516 .mpc_ops = &vng_policy_ops,
1517 .mpc_loadtime_flags = 0,
1518 .mpc_runtime_flags = 0
1519 };
1520
1521 SECURITY_READ_ONLY_LATE(static mac_policy_handle_t) vng_policy_handle;
1522
1523 void
vnguard_policy_init(void)1524 vnguard_policy_init(void)
1525 {
1526 if (0 == PE_i_can_has_debugger(NULL)) {
1527 return;
1528 }
1529 vng_policy_flags = kVNG_POLICY_LOGMSG |
1530 kVNG_POLICY_EXC_CORPSE | kVNG_POLICY_UPRINTMSG;
1531 PE_parse_boot_argn("vnguard", &vng_policy_flags, sizeof(vng_policy_flags));
1532 if (vng_policy_flags) {
1533 mac_policy_register(&vng_policy_conf, &vng_policy_handle, NULL);
1534 }
1535 }
1536
1537 #if DEBUG || DEVELOPMENT
1538 #include <sys/sysctl.h>
1539
1540 SYSCTL_DECL(_kern_vnguard);
1541 SYSCTL_NODE(_kern, OID_AUTO, vnguard, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "vnguard");
1542 SYSCTL_INT(_kern_vnguard, OID_AUTO, flags, CTLFLAG_RW | CTLFLAG_LOCKED,
1543 &vng_policy_flags, 0, "vnguard policy flags");
1544 #endif
1545
1546 #endif /* CONFIG_MACF && CONFIG_VNGUARD */
1547