1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/proc_internal.h>
82 #include <sys/kauth.h>
83 #include <sys/mount_internal.h>
84 #include <sys/time.h>
85 #include <sys/lock.h>
86 #include <sys/vnode.h>
87 #include <sys/vnode_internal.h>
88 #include <sys/stat.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
91 #include <sys/buf_internal.h>
92 #include <sys/errno.h>
93 #include <kern/kalloc.h>
94 #include <sys/uio_internal.h>
95 #include <sys/uio.h>
96 #include <sys/domain.h>
97 #include <sys/mbuf.h>
98 #include <sys/syslog.h>
99 #include <sys/ubc_internal.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/filedesc.h>
103 #include <sys/event.h>
104 #include <sys/kdebug.h>
105 #include <sys/kauth.h>
106 #include <sys/user.h>
107 #include <sys/systm.h>
108 #include <sys/kern_memorystatus.h>
109 #include <sys/lockf.h>
110 #include <sys/reboot.h>
111 #include <miscfs/fifofs/fifo.h>
112
113 #include <nfs/nfs_conf.h>
114
115 #include <string.h>
116 #include <machine/machine_routines.h>
117
118 #include <kern/assert.h>
119 #include <mach/kern_return.h>
120 #include <kern/thread.h>
121 #include <kern/sched_prim.h>
122
123 #include <miscfs/specfs/specdev.h>
124
125 #include <mach/mach_types.h>
126 #include <mach/memory_object_types.h>
127 #include <mach/memory_object_control.h>
128
129 #include <kern/kalloc.h> /* kalloc()/kfree() */
130 #include <kern/clock.h> /* delay_for_interval() */
131 #include <libkern/OSAtomic.h> /* OSAddAtomic() */
132 #include <os/atomic_private.h>
133 #if defined(XNU_TARGET_OS_OSX)
134 #include <console/video_console.h>
135 #endif
136
137 #ifdef CONFIG_IOCOUNT_TRACE
138 #include <libkern/OSDebug.h>
139 #endif
140
141 #include <vm/vm_protos.h> /* vnode_pager_vrele() */
142
143 #if CONFIG_MACF
144 #include <security/mac_framework.h>
145 #endif
146
147 #include <vfs/vfs_disk_conditioner.h>
148 #include <libkern/section_keywords.h>
149
150 static LCK_GRP_DECLARE(vnode_lck_grp, "vnode");
151 static LCK_ATTR_DECLARE(vnode_lck_attr, 0, 0);
152
153 #if CONFIG_TRIGGERS
154 static LCK_GRP_DECLARE(trigger_vnode_lck_grp, "trigger_vnode");
155 static LCK_ATTR_DECLARE(trigger_vnode_lck_attr, 0, 0);
156 #endif
157
158 extern lck_mtx_t mnt_list_mtx_lock;
159
160 ZONE_DECLARE(specinfo_zone, "specinfo",
161 sizeof(struct specinfo), ZC_ZFREE_CLEARMEM);
162
163 ZONE_DECLARE(vnode_zone, "vnodes",
164 sizeof(struct vnode), ZC_NOGC | ZC_ZFREE_CLEARMEM);
165
166 enum vtype iftovt_tab[16] = {
167 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
168 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
169 };
170 int vttoif_tab[9] = {
171 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
172 S_IFSOCK, S_IFIFO, S_IFMT,
173 };
174
175 /* XXX These should be in a BSD accessible Mach header, but aren't. */
176 extern void memory_object_mark_used(
177 memory_object_control_t control);
178
179 extern void memory_object_mark_unused(
180 memory_object_control_t control,
181 boolean_t rage);
182
183 extern void memory_object_mark_io_tracking(
184 memory_object_control_t control);
185
186 /* XXX next protptype should be from <nfs/nfs.h> */
187 extern int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int);
188
189 extern int paniclog_append_noflush(const char *format, ...);
190
191 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
192 __private_extern__ void qsort(
193 void * array,
194 size_t nmembers,
195 size_t member_size,
196 int (*)(const void *, const void *));
197
198 __private_extern__ void vntblinit(void);
199 __private_extern__ int unlink1(vfs_context_t, vnode_t, user_addr_t,
200 enum uio_seg, int);
201
202 static void vnode_list_add(vnode_t);
203 static void vnode_async_list_add(vnode_t);
204 static void vnode_list_remove(vnode_t);
205 static void vnode_list_remove_locked(vnode_t);
206
207 static void vnode_abort_advlocks(vnode_t);
208 static errno_t vnode_drain(vnode_t);
209 static void vgone(vnode_t, int flags);
210 static void vclean(vnode_t vp, int flag);
211 static void vnode_reclaim_internal(vnode_t, int, int, int);
212
213 static void vnode_dropiocount(vnode_t);
214
215 static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev);
216 static int vnode_reload(vnode_t);
217
218 static int unmount_callback(mount_t, __unused void *);
219
220 static void insmntque(vnode_t vp, mount_t mp);
221 static int mount_getvfscnt(void);
222 static int mount_fillfsids(fsid_t *, int );
223 static void vnode_iterate_setup(mount_t);
224 int vnode_umount_preflight(mount_t, vnode_t, int);
225 static int vnode_iterate_prepare(mount_t);
226 static int vnode_iterate_reloadq(mount_t);
227 static void vnode_iterate_clear(mount_t);
228 static mount_t vfs_getvfs_locked(fsid_t *);
229 static int vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp,
230 struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx);
231 static int vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx);
232
233 errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
234
235 #ifdef CONFIG_IOCOUNT_TRACE
236 static void record_vp(vnode_t vp, int count);
237 static TUNABLE(int, bootarg_vnode_iocount_trace, "vnode_iocount_trace", 0);
238 static TUNABLE(int, bootarg_uthread_iocount_trace, "uthread_iocount_trace", 0);
239 #endif /* CONFIG_IOCOUNT_TRACE */
240
241 #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG)
242 static TUNABLE(bool, bootarg_no_vnode_jetsam, "-no_vnode_jetsam", false);
243 #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */
244
245 static TUNABLE(bool, bootarg_no_vnode_drain, "-no_vnode_drain", false);
246
247 boolean_t root_is_CF_drive = FALSE;
248
249 #if CONFIG_TRIGGERS
250 static int vnode_resolver_create(mount_t, vnode_t, struct vnode_trigger_param *, boolean_t external);
251 static void vnode_resolver_detach(vnode_t);
252 #endif
253
254 TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
255 TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */
256 TAILQ_HEAD(async_work_lst, vnode) vnode_async_work_list;
257
258
259 TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */
260 struct timeval rage_tv;
261 int rage_limit = 0;
262 int ragevnodes = 0;
263
264 int deadvnodes_low = 0;
265 int deadvnodes_high = 0;
266
267 uint64_t newvnode = 0;
268 uint64_t newvnode_nodead = 0;
269
270 static int vfs_unmountall_started = 0;
271
272 #define RAGE_LIMIT_MIN 100
273 #define RAGE_TIME_LIMIT 5
274
275 /*
276 * ROSV definitions
277 * NOTE: These are shadowed from PlatformSupport definitions, but XNU
278 * builds standalone.
279 */
280 #define PLATFORM_DATA_VOLUME_MOUNT_POINT "/System/Volumes/Data"
281
282 /*
283 * These could be in PlatformSupport but aren't yet
284 */
285 #define PLATFORM_PREBOOT_VOLUME_MOUNT_POINT "/System/Volumes/Preboot"
286 #define PLATFORM_RECOVERY_VOLUME_MOUNT_POINT "/System/Volumes/Recovery"
287
288 #if CONFIG_MOUNT_VM
289 #define PLATFORM_VM_VOLUME_MOUNT_POINT "/System/Volumes/VM"
290 #endif
291
292 struct mntlist mountlist; /* mounted filesystem list */
293 static int nummounts = 0;
294
295 static int print_busy_vnodes = 0; /* print out busy vnodes */
296
297 #if DIAGNOSTIC
298 #define VLISTCHECK(fun, vp, list) \
299 if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
300 panic("%s: %s vnode not on %slist", (fun), (list), (list));
301 #else
302 #define VLISTCHECK(fun, vp, list)
303 #endif /* DIAGNOSTIC */
304
305 #define VLISTNONE(vp) \
306 do { \
307 (vp)->v_freelist.tqe_next = (struct vnode *)0; \
308 (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \
309 } while(0)
310
311 #define VONLIST(vp) \
312 ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
313
314 /* remove a vnode from free vnode list */
315 #define VREMFREE(fun, vp) \
316 do { \
317 VLISTCHECK((fun), (vp), "free"); \
318 TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \
319 VLISTNONE((vp)); \
320 freevnodes--; \
321 } while(0)
322
323
324 /* remove a vnode from dead vnode list */
325 #define VREMDEAD(fun, vp) \
326 do { \
327 VLISTCHECK((fun), (vp), "dead"); \
328 TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \
329 VLISTNONE((vp)); \
330 vp->v_listflag &= ~VLIST_DEAD; \
331 deadvnodes--; \
332 } while(0)
333
334
335 /* remove a vnode from async work vnode list */
336 #define VREMASYNC_WORK(fun, vp) \
337 do { \
338 VLISTCHECK((fun), (vp), "async_work"); \
339 TAILQ_REMOVE(&vnode_async_work_list, (vp), v_freelist); \
340 VLISTNONE((vp)); \
341 vp->v_listflag &= ~VLIST_ASYNC_WORK; \
342 async_work_vnodes--; \
343 } while(0)
344
345
346 /* remove a vnode from rage vnode list */
347 #define VREMRAGE(fun, vp) \
348 do { \
349 if ( !(vp->v_listflag & VLIST_RAGE)) \
350 panic("VREMRAGE: vp not on rage list"); \
351 VLISTCHECK((fun), (vp), "rage"); \
352 TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \
353 VLISTNONE((vp)); \
354 vp->v_listflag &= ~VLIST_RAGE; \
355 ragevnodes--; \
356 } while(0)
357
358 static void async_work_continue(void);
359 static void vn_laundry_continue(void);
360
361 /*
362 * Initialize the vnode management data structures.
363 */
364 __private_extern__ void
vntblinit(void)365 vntblinit(void)
366 {
367 thread_t thread = THREAD_NULL;
368
369 TAILQ_INIT(&vnode_free_list);
370 TAILQ_INIT(&vnode_rage_list);
371 TAILQ_INIT(&vnode_dead_list);
372 TAILQ_INIT(&vnode_async_work_list);
373 TAILQ_INIT(&mountlist);
374
375 microuptime(&rage_tv);
376 rage_limit = desiredvnodes / 100;
377
378 if (rage_limit < RAGE_LIMIT_MIN) {
379 rage_limit = RAGE_LIMIT_MIN;
380 }
381
382 deadvnodes_low = (desiredvnodes) / 100;
383 if (deadvnodes_low > 300) {
384 deadvnodes_low = 300;
385 }
386 deadvnodes_high = deadvnodes_low * 2;
387
388 /*
389 * create worker threads
390 */
391 kernel_thread_start((thread_continue_t)async_work_continue, NULL, &thread);
392 thread_deallocate(thread);
393 kernel_thread_start((thread_continue_t)vn_laundry_continue, NULL, &thread);
394 thread_deallocate(thread);
395 }
396
397 /* the timeout is in 10 msecs */
398 int
vnode_waitforwrites(vnode_t vp,int output_target,int slpflag,int slptimeout,const char * msg)399 vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg)
400 {
401 int error = 0;
402 struct timespec ts;
403
404 if (output_target < 0) {
405 return EINVAL;
406 }
407
408 KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0);
409
410 if (vp->v_numoutput > output_target) {
411 slpflag |= PDROP;
412
413 vnode_lock_spin(vp);
414
415 while ((vp->v_numoutput > output_target) && error == 0) {
416 if (output_target) {
417 vp->v_flag |= VTHROTTLED;
418 } else {
419 vp->v_flag |= VBWAIT;
420 }
421
422 ts.tv_sec = (slptimeout / 100);
423 ts.tv_nsec = (slptimeout % 1000) * 10 * NSEC_PER_USEC * 1000;
424 error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts);
425
426 vnode_lock_spin(vp);
427 }
428 vnode_unlock(vp);
429 }
430 KERNEL_DEBUG(0x3010280 | DBG_FUNC_END, (int)vp, output_target, vp->v_numoutput, error, 0);
431
432 return error;
433 }
434
435
436 void
vnode_startwrite(vnode_t vp)437 vnode_startwrite(vnode_t vp)
438 {
439 OSAddAtomic(1, &vp->v_numoutput);
440 }
441
442
443 void
vnode_writedone(vnode_t vp)444 vnode_writedone(vnode_t vp)
445 {
446 if (vp) {
447 int need_wakeup = 0;
448
449 OSAddAtomic(-1, &vp->v_numoutput);
450
451 vnode_lock_spin(vp);
452
453 if (vp->v_numoutput < 0) {
454 panic("vnode_writedone: numoutput < 0");
455 }
456
457 if ((vp->v_flag & VTHROTTLED)) {
458 vp->v_flag &= ~VTHROTTLED;
459 need_wakeup = 1;
460 }
461 if ((vp->v_flag & VBWAIT) && (vp->v_numoutput == 0)) {
462 vp->v_flag &= ~VBWAIT;
463 need_wakeup = 1;
464 }
465 vnode_unlock(vp);
466
467 if (need_wakeup) {
468 wakeup((caddr_t)&vp->v_numoutput);
469 }
470 }
471 }
472
473
474
475 int
vnode_hasdirtyblks(vnode_t vp)476 vnode_hasdirtyblks(vnode_t vp)
477 {
478 struct cl_writebehind *wbp;
479
480 /*
481 * Not taking the buf_mtx as there is little
482 * point doing it. Even if the lock is taken the
483 * state can change right after that. If their
484 * needs to be a synchronization, it must be driven
485 * by the caller
486 */
487 if (vp->v_dirtyblkhd.lh_first) {
488 return 1;
489 }
490
491 if (!UBCINFOEXISTS(vp)) {
492 return 0;
493 }
494
495 wbp = vp->v_ubcinfo->cl_wbehind;
496
497 if (wbp && (wbp->cl_number || wbp->cl_scmap)) {
498 return 1;
499 }
500
501 return 0;
502 }
503
504 int
vnode_hascleanblks(vnode_t vp)505 vnode_hascleanblks(vnode_t vp)
506 {
507 /*
508 * Not taking the buf_mtx as there is little
509 * point doing it. Even if the lock is taken the
510 * state can change right after that. If their
511 * needs to be a synchronization, it must be driven
512 * by the caller
513 */
514 if (vp->v_cleanblkhd.lh_first) {
515 return 1;
516 }
517 return 0;
518 }
519
520 void
vnode_iterate_setup(mount_t mp)521 vnode_iterate_setup(mount_t mp)
522 {
523 mp->mnt_lflag |= MNT_LITER;
524 }
525
526 int
vnode_umount_preflight(mount_t mp,vnode_t skipvp,int flags)527 vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags)
528 {
529 vnode_t vp;
530 int ret = 0;
531
532 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
533 if (vp->v_type == VDIR) {
534 continue;
535 }
536 if (vp == skipvp) {
537 continue;
538 }
539 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || (vp->v_flag & VNOFLUSH))) {
540 continue;
541 }
542 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
543 continue;
544 }
545 if ((flags & WRITECLOSE) && (vp->v_writecount == 0 || vp->v_type != VREG)) {
546 continue;
547 }
548
549 /* Look for busy vnode */
550 if ((vp->v_usecount != 0) && ((vp->v_usecount - vp->v_kusecount) != 0)) {
551 ret = 1;
552 if (print_busy_vnodes && ((flags & FORCECLOSE) == 0)) {
553 vprint("vnode_umount_preflight - busy vnode", vp);
554 } else {
555 return ret;
556 }
557 } else if (vp->v_iocount > 0) {
558 /* Busy if iocount is > 0 for more than 3 seconds */
559 tsleep(&vp->v_iocount, PVFS, "vnode_drain_network", 3 * hz);
560 if (vp->v_iocount > 0) {
561 ret = 1;
562 if (print_busy_vnodes && ((flags & FORCECLOSE) == 0)) {
563 vprint("vnode_umount_preflight - busy vnode", vp);
564 } else {
565 return ret;
566 }
567 }
568 continue;
569 }
570 }
571
572 return ret;
573 }
574
575 /*
576 * This routine prepares iteration by moving all the vnodes to worker queue
577 * called with mount lock held
578 */
579 int
vnode_iterate_prepare(mount_t mp)580 vnode_iterate_prepare(mount_t mp)
581 {
582 vnode_t vp;
583
584 if (TAILQ_EMPTY(&mp->mnt_vnodelist)) {
585 /* nothing to do */
586 return 0;
587 }
588
589 vp = TAILQ_FIRST(&mp->mnt_vnodelist);
590 vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first);
591 mp->mnt_workerqueue.tqh_first = mp->mnt_vnodelist.tqh_first;
592 mp->mnt_workerqueue.tqh_last = mp->mnt_vnodelist.tqh_last;
593
594 TAILQ_INIT(&mp->mnt_vnodelist);
595 if (mp->mnt_newvnodes.tqh_first != NULL) {
596 panic("vnode_iterate_prepare: newvnode when entering vnode");
597 }
598 TAILQ_INIT(&mp->mnt_newvnodes);
599
600 return 1;
601 }
602
603
604 /* called with mount lock held */
605 int
vnode_iterate_reloadq(mount_t mp)606 vnode_iterate_reloadq(mount_t mp)
607 {
608 int moved = 0;
609
610 /* add the remaining entries in workerq to the end of mount vnode list */
611 if (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
612 struct vnode * mvp;
613 mvp = TAILQ_LAST(&mp->mnt_vnodelist, vnodelst);
614
615 /* Joining the workerque entities to mount vnode list */
616 if (mvp) {
617 mvp->v_mntvnodes.tqe_next = mp->mnt_workerqueue.tqh_first;
618 } else {
619 mp->mnt_vnodelist.tqh_first = mp->mnt_workerqueue.tqh_first;
620 }
621 mp->mnt_workerqueue.tqh_first->v_mntvnodes.tqe_prev = mp->mnt_vnodelist.tqh_last;
622 mp->mnt_vnodelist.tqh_last = mp->mnt_workerqueue.tqh_last;
623 TAILQ_INIT(&mp->mnt_workerqueue);
624 }
625
626 /* add the newvnodes to the head of mount vnode list */
627 if (!TAILQ_EMPTY(&mp->mnt_newvnodes)) {
628 struct vnode * nlvp;
629 nlvp = TAILQ_LAST(&mp->mnt_newvnodes, vnodelst);
630
631 mp->mnt_newvnodes.tqh_first->v_mntvnodes.tqe_prev = &mp->mnt_vnodelist.tqh_first;
632 nlvp->v_mntvnodes.tqe_next = mp->mnt_vnodelist.tqh_first;
633 if (mp->mnt_vnodelist.tqh_first) {
634 mp->mnt_vnodelist.tqh_first->v_mntvnodes.tqe_prev = &nlvp->v_mntvnodes.tqe_next;
635 } else {
636 mp->mnt_vnodelist.tqh_last = mp->mnt_newvnodes.tqh_last;
637 }
638 mp->mnt_vnodelist.tqh_first = mp->mnt_newvnodes.tqh_first;
639 TAILQ_INIT(&mp->mnt_newvnodes);
640 moved = 1;
641 }
642
643 return moved;
644 }
645
646
647 void
vnode_iterate_clear(mount_t mp)648 vnode_iterate_clear(mount_t mp)
649 {
650 mp->mnt_lflag &= ~MNT_LITER;
651 }
652
653 #if defined(__x86_64__)
654
655 #include <i386/panic_hooks.h>
656
657 struct vnode_iterate_panic_hook {
658 panic_hook_t hook;
659 mount_t mp;
660 struct vnode *vp;
661 };
662
663 static void
vnode_iterate_panic_hook(panic_hook_t * hook_)664 vnode_iterate_panic_hook(panic_hook_t *hook_)
665 {
666 struct vnode_iterate_panic_hook *hook = (struct vnode_iterate_panic_hook *)hook_;
667 panic_phys_range_t range;
668 uint64_t phys;
669
670 if (panic_phys_range_before(hook->mp, &phys, &range)) {
671 paniclog_append_noflush("mp = %p, phys = %p, prev (%p: %p-%p)\n",
672 hook->mp, phys, range.type, range.phys_start,
673 range.phys_start + range.len);
674 } else {
675 paniclog_append_noflush("mp = %p, phys = %p, prev (!)\n", hook->mp, phys);
676 }
677
678 if (panic_phys_range_before(hook->vp, &phys, &range)) {
679 paniclog_append_noflush("vp = %p, phys = %p, prev (%p: %p-%p)\n",
680 hook->vp, phys, range.type, range.phys_start,
681 range.phys_start + range.len);
682 } else {
683 paniclog_append_noflush("vp = %p, phys = %p, prev (!)\n", hook->vp, phys);
684 }
685 panic_dump_mem((void *)(((vm_offset_t)hook->mp - 4096) & ~4095), 12288);
686 }
687 #endif /* defined(__x86_64__) */
688
689 int
vnode_iterate(mount_t mp,int flags,int (* callout)(struct vnode *,void *),void * arg)690 vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *),
691 void *arg)
692 {
693 struct vnode *vp;
694 int vid, retval;
695 int ret = 0;
696
697 /*
698 * The mount iterate mutex is held for the duration of the iteration.
699 * This can be done by a state flag on the mount structure but we can
700 * run into priority inversion issues sometimes.
701 * Using a mutex allows us to benefit from the priority donation
702 * mechanisms in the kernel for locks. This mutex should never be
703 * acquired in spin mode and it should be acquired before attempting to
704 * acquire the mount lock.
705 */
706 mount_iterate_lock(mp);
707
708 mount_lock(mp);
709
710 vnode_iterate_setup(mp);
711
712 /* If it returns 0 then there is nothing to do */
713 retval = vnode_iterate_prepare(mp);
714
715 if (retval == 0) {
716 vnode_iterate_clear(mp);
717 mount_unlock(mp);
718 mount_iterate_unlock(mp);
719 return ret;
720 }
721
722 #if defined(__x86_64__)
723 struct vnode_iterate_panic_hook hook;
724 hook.mp = mp;
725 hook.vp = NULL;
726 panic_hook(&hook.hook, vnode_iterate_panic_hook);
727 #endif
728 /* iterate over all the vnodes */
729 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
730 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
731 #if defined(__x86_64__)
732 hook.vp = vp;
733 #endif
734 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
735 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
736 vid = vp->v_id;
737 if ((vp->v_data == NULL) || (vp->v_type == VNON) || (vp->v_mount != mp)) {
738 continue;
739 }
740 mount_unlock(mp);
741
742 if (vget_internal(vp, vid, (flags | VNODE_NODEAD | VNODE_WITHID | VNODE_NOSUSPEND))) {
743 mount_lock(mp);
744 continue;
745 }
746 if (flags & VNODE_RELOAD) {
747 /*
748 * we're reloading the filesystem
749 * cast out any inactive vnodes...
750 */
751 if (vnode_reload(vp)) {
752 /* vnode will be recycled on the refcount drop */
753 vnode_put(vp);
754 mount_lock(mp);
755 continue;
756 }
757 }
758
759 retval = callout(vp, arg);
760
761 switch (retval) {
762 case VNODE_RETURNED:
763 case VNODE_RETURNED_DONE:
764 vnode_put(vp);
765 if (retval == VNODE_RETURNED_DONE) {
766 mount_lock(mp);
767 ret = 0;
768 goto out;
769 }
770 break;
771
772 case VNODE_CLAIMED_DONE:
773 mount_lock(mp);
774 ret = 0;
775 goto out;
776 case VNODE_CLAIMED:
777 default:
778 break;
779 }
780 mount_lock(mp);
781 }
782
783 out:
784 #if defined(__x86_64__)
785 panic_unhook(&hook.hook);
786 #endif
787 (void)vnode_iterate_reloadq(mp);
788 vnode_iterate_clear(mp);
789 mount_unlock(mp);
790 mount_iterate_unlock(mp);
791 return ret;
792 }
793
794 void
mount_lock_renames(mount_t mp)795 mount_lock_renames(mount_t mp)
796 {
797 lck_mtx_lock(&mp->mnt_renamelock);
798 }
799
800 void
mount_unlock_renames(mount_t mp)801 mount_unlock_renames(mount_t mp)
802 {
803 lck_mtx_unlock(&mp->mnt_renamelock);
804 }
805
806 void
mount_iterate_lock(mount_t mp)807 mount_iterate_lock(mount_t mp)
808 {
809 lck_mtx_lock(&mp->mnt_iter_lock);
810 }
811
812 void
mount_iterate_unlock(mount_t mp)813 mount_iterate_unlock(mount_t mp)
814 {
815 lck_mtx_unlock(&mp->mnt_iter_lock);
816 }
817
818 void
mount_lock(mount_t mp)819 mount_lock(mount_t mp)
820 {
821 lck_mtx_lock(&mp->mnt_mlock);
822 }
823
824 void
mount_lock_spin(mount_t mp)825 mount_lock_spin(mount_t mp)
826 {
827 lck_mtx_lock_spin(&mp->mnt_mlock);
828 }
829
830 void
mount_unlock(mount_t mp)831 mount_unlock(mount_t mp)
832 {
833 lck_mtx_unlock(&mp->mnt_mlock);
834 }
835
836
837 void
mount_ref(mount_t mp,int locked)838 mount_ref(mount_t mp, int locked)
839 {
840 if (!locked) {
841 mount_lock_spin(mp);
842 }
843
844 mp->mnt_count++;
845
846 if (!locked) {
847 mount_unlock(mp);
848 }
849 }
850
851
852 void
mount_drop(mount_t mp,int locked)853 mount_drop(mount_t mp, int locked)
854 {
855 if (!locked) {
856 mount_lock_spin(mp);
857 }
858
859 mp->mnt_count--;
860
861 if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN)) {
862 wakeup(&mp->mnt_lflag);
863 }
864
865 if (!locked) {
866 mount_unlock(mp);
867 }
868 }
869
870
871 int
mount_iterref(mount_t mp,int locked)872 mount_iterref(mount_t mp, int locked)
873 {
874 int retval = 0;
875
876 if (!locked) {
877 mount_list_lock();
878 }
879 if (mp->mnt_iterref < 0) {
880 retval = 1;
881 } else {
882 mp->mnt_iterref++;
883 }
884 if (!locked) {
885 mount_list_unlock();
886 }
887 return retval;
888 }
889
890 int
mount_isdrained(mount_t mp,int locked)891 mount_isdrained(mount_t mp, int locked)
892 {
893 int retval;
894
895 if (!locked) {
896 mount_list_lock();
897 }
898 if (mp->mnt_iterref < 0) {
899 retval = 1;
900 } else {
901 retval = 0;
902 }
903 if (!locked) {
904 mount_list_unlock();
905 }
906 return retval;
907 }
908
909 void
mount_iterdrop(mount_t mp)910 mount_iterdrop(mount_t mp)
911 {
912 mount_list_lock();
913 mp->mnt_iterref--;
914 wakeup(&mp->mnt_iterref);
915 mount_list_unlock();
916 }
917
918 void
mount_iterdrain(mount_t mp)919 mount_iterdrain(mount_t mp)
920 {
921 mount_list_lock();
922 while (mp->mnt_iterref) {
923 msleep((caddr_t)&mp->mnt_iterref, &mnt_list_mtx_lock, PVFS, "mount_iterdrain", NULL);
924 }
925 /* mount iterations drained */
926 mp->mnt_iterref = -1;
927 mount_list_unlock();
928 }
929 void
mount_iterreset(mount_t mp)930 mount_iterreset(mount_t mp)
931 {
932 mount_list_lock();
933 if (mp->mnt_iterref == -1) {
934 mp->mnt_iterref = 0;
935 }
936 mount_list_unlock();
937 }
938
939 /* always called with mount lock held */
940 int
mount_refdrain(mount_t mp)941 mount_refdrain(mount_t mp)
942 {
943 if (mp->mnt_lflag & MNT_LDRAIN) {
944 panic("already in drain");
945 }
946 mp->mnt_lflag |= MNT_LDRAIN;
947
948 while (mp->mnt_count) {
949 msleep((caddr_t)&mp->mnt_lflag, &mp->mnt_mlock, PVFS, "mount_drain", NULL);
950 }
951
952 if (mp->mnt_vnodelist.tqh_first != NULL) {
953 panic("mount_refdrain: dangling vnode");
954 }
955
956 mp->mnt_lflag &= ~MNT_LDRAIN;
957
958 return 0;
959 }
960
961 /* Tags the mount point as not supportine extended readdir for NFS exports */
962 void
mount_set_noreaddirext(mount_t mp)963 mount_set_noreaddirext(mount_t mp)
964 {
965 mount_lock(mp);
966 mp->mnt_kern_flag |= MNTK_DENY_READDIREXT;
967 mount_unlock(mp);
968 }
969
970 /*
971 * Mark a mount point as busy. Used to synchronize access and to delay
972 * unmounting.
973 */
974 int
vfs_busy(mount_t mp,int flags)975 vfs_busy(mount_t mp, int flags)
976 {
977 restart:
978 if (mp->mnt_lflag & MNT_LDEAD) {
979 return ENOENT;
980 }
981
982 mount_lock(mp);
983
984 if (mp->mnt_lflag & MNT_LUNMOUNT) {
985 if (flags & LK_NOWAIT || mp->mnt_lflag & MNT_LDEAD) {
986 mount_unlock(mp);
987 return ENOENT;
988 }
989
990 /*
991 * Since all busy locks are shared except the exclusive
992 * lock granted when unmounting, the only place that a
993 * wakeup needs to be done is at the release of the
994 * exclusive lock at the end of dounmount.
995 */
996 mp->mnt_lflag |= MNT_LWAIT;
997 msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL);
998 return ENOENT;
999 }
1000
1001 mount_unlock(mp);
1002
1003 lck_rw_lock_shared(&mp->mnt_rwlock);
1004
1005 /*
1006 * Until we are granted the rwlock, it's possible for the mount point to
1007 * change state, so re-evaluate before granting the vfs_busy.
1008 */
1009 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
1010 lck_rw_done(&mp->mnt_rwlock);
1011 goto restart;
1012 }
1013 return 0;
1014 }
1015
1016 /*
1017 * Free a busy filesystem.
1018 */
1019 void
vfs_unbusy(mount_t mp)1020 vfs_unbusy(mount_t mp)
1021 {
1022 lck_rw_done(&mp->mnt_rwlock);
1023 }
1024
1025
1026
1027 static void
vfs_rootmountfailed(mount_t mp)1028 vfs_rootmountfailed(mount_t mp)
1029 {
1030 mount_list_lock();
1031 mp->mnt_vtable->vfc_refcount--;
1032 mount_list_unlock();
1033
1034 vfs_unbusy(mp);
1035
1036 mount_lock_destroy(mp);
1037
1038 #if CONFIG_MACF
1039 mac_mount_label_destroy(mp);
1040 #endif
1041
1042 zfree(mount_zone, mp);
1043 }
1044
1045 /*
1046 * Lookup a filesystem type, and if found allocate and initialize
1047 * a mount structure for it.
1048 *
1049 * Devname is usually updated by mount(8) after booting.
1050 */
1051 static mount_t
vfs_rootmountalloc_internal(struct vfstable * vfsp,const char * devname)1052 vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname)
1053 {
1054 mount_t mp;
1055
1056 mp = zalloc_flags(mount_zone, Z_WAITOK | Z_ZERO);
1057 /* Initialize the default IO constraints */
1058 mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
1059 mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
1060 mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
1061 mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
1062 mp->mnt_devblocksize = DEV_BSIZE;
1063 mp->mnt_alignmentmask = PAGE_MASK;
1064 mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
1065 mp->mnt_ioscale = 1;
1066 mp->mnt_ioflags = 0;
1067 mp->mnt_realrootvp = NULLVP;
1068 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
1069 mp->mnt_throttle_mask = LOWPRI_MAX_NUM_DEV - 1;
1070 mp->mnt_devbsdunit = 0;
1071
1072 mount_lock_init(mp);
1073 (void)vfs_busy(mp, LK_NOWAIT);
1074
1075 TAILQ_INIT(&mp->mnt_vnodelist);
1076 TAILQ_INIT(&mp->mnt_workerqueue);
1077 TAILQ_INIT(&mp->mnt_newvnodes);
1078
1079 mp->mnt_vtable = vfsp;
1080 mp->mnt_op = vfsp->vfc_vfsops;
1081 mp->mnt_flag = MNT_RDONLY | MNT_ROOTFS;
1082 mp->mnt_vnodecovered = NULLVP;
1083 //mp->mnt_stat.f_type = vfsp->vfc_typenum;
1084 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
1085
1086 mount_list_lock();
1087 vfsp->vfc_refcount++;
1088 mount_list_unlock();
1089
1090 strlcpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
1091 mp->mnt_vfsstat.f_mntonname[0] = '/';
1092 /* XXX const poisoning layering violation */
1093 (void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL);
1094
1095 #if CONFIG_MACF
1096 mac_mount_label_init(mp);
1097 mac_mount_label_associate(vfs_context_kernel(), mp);
1098 #endif
1099 return mp;
1100 }
1101
1102 errno_t
vfs_rootmountalloc(const char * fstypename,const char * devname,mount_t * mpp)1103 vfs_rootmountalloc(const char *fstypename, const char *devname, mount_t *mpp)
1104 {
1105 struct vfstable *vfsp;
1106
1107 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1108 if (!strncmp(vfsp->vfc_name, fstypename,
1109 sizeof(vfsp->vfc_name))) {
1110 break;
1111 }
1112 }
1113 if (vfsp == NULL) {
1114 return ENODEV;
1115 }
1116
1117 *mpp = vfs_rootmountalloc_internal(vfsp, devname);
1118
1119 if (*mpp) {
1120 return 0;
1121 }
1122
1123 return ENOMEM;
1124 }
1125
1126 #define DBG_MOUNTROOT (FSDBG_CODE(DBG_MOUNT, 0))
1127
1128 /*
1129 * Find an appropriate filesystem to use for the root. If a filesystem
1130 * has not been preselected, walk through the list of known filesystems
1131 * trying those that have mountroot routines, and try them until one
1132 * works or we have tried them all.
1133 */
1134 extern int (*mountroot)(void);
1135
1136 int
vfs_mountroot(void)1137 vfs_mountroot(void)
1138 {
1139 #if CONFIG_MACF
1140 struct vnode *vp;
1141 #endif
1142 struct vfstable *vfsp;
1143 vfs_context_t ctx = vfs_context_kernel();
1144 struct vfs_attr vfsattr;
1145 int error;
1146 mount_t mp;
1147 vnode_t bdevvp_rootvp;
1148
1149 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_START);
1150 if (mountroot != NULL) {
1151 /*
1152 * used for netboot which follows a different set of rules
1153 */
1154 error = (*mountroot)();
1155
1156 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 0);
1157 return error;
1158 }
1159 if ((error = bdevvp(rootdev, &rootvp))) {
1160 printf("vfs_mountroot: can't setup bdevvp\n");
1161
1162 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 1);
1163 return error;
1164 }
1165 /*
1166 * 4951998 - code we call in vfc_mountroot may replace rootvp
1167 * so keep a local copy for some house keeping.
1168 */
1169 bdevvp_rootvp = rootvp;
1170
1171 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1172 if (vfsp->vfc_mountroot == NULL
1173 && !ISSET(vfsp->vfc_vfsflags, VFC_VFSCANMOUNTROOT)) {
1174 continue;
1175 }
1176
1177 mp = vfs_rootmountalloc_internal(vfsp, "root_device");
1178 mp->mnt_devvp = rootvp;
1179
1180 if (vfsp->vfc_mountroot) {
1181 error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx);
1182 } else {
1183 error = VFS_MOUNT(mp, rootvp, 0, ctx);
1184 }
1185
1186 if (!error) {
1187 if (bdevvp_rootvp != rootvp) {
1188 /*
1189 * rootvp changed...
1190 * bump the iocount and fix up mnt_devvp for the
1191 * new rootvp (it will already have a usecount taken)...
1192 * drop the iocount and the usecount on the orignal
1193 * since we are no longer going to use it...
1194 */
1195 vnode_getwithref(rootvp);
1196 mp->mnt_devvp = rootvp;
1197
1198 vnode_rele(bdevvp_rootvp);
1199 vnode_put(bdevvp_rootvp);
1200 }
1201 mp->mnt_devvp->v_specflags |= SI_MOUNTEDON;
1202
1203 vfs_unbusy(mp);
1204
1205 mount_list_add(mp);
1206
1207 /*
1208 * cache the IO attributes for the underlying physical media...
1209 * an error return indicates the underlying driver doesn't
1210 * support all the queries necessary... however, reasonable
1211 * defaults will have been set, so no reason to bail or care
1212 */
1213 vfs_init_io_attributes(rootvp, mp);
1214
1215 if (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) {
1216 root_is_CF_drive = TRUE;
1217 }
1218
1219 /*
1220 * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
1221 */
1222 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR) {
1223 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1224 }
1225 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSPREFLIGHT) {
1226 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
1227 }
1228
1229 #if defined(XNU_TARGET_OS_OSX)
1230 uint32_t speed;
1231
1232 if (MNTK_VIRTUALDEV & mp->mnt_kern_flag) {
1233 speed = 128;
1234 } else if (disk_conditioner_mount_is_ssd(mp)) {
1235 speed = 7 * 256;
1236 } else {
1237 speed = 256;
1238 }
1239 vc_progress_setdiskspeed(speed);
1240 #endif /* XNU_TARGET_OS_OSX */
1241 /*
1242 * Probe root file system for additional features.
1243 */
1244 (void)VFS_START(mp, 0, ctx);
1245
1246 VFSATTR_INIT(&vfsattr);
1247 VFSATTR_WANTED(&vfsattr, f_capabilities);
1248 if (vfs_getattr(mp, &vfsattr, ctx) == 0 &&
1249 VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) {
1250 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) &&
1251 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) {
1252 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1253 }
1254 #if NAMEDSTREAMS
1255 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS) &&
1256 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS)) {
1257 mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1258 }
1259 #endif
1260 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID) &&
1261 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID)) {
1262 mp->mnt_kern_flag |= MNTK_PATH_FROM_ID;
1263 }
1264
1265 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS) &&
1266 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS)) {
1267 mp->mnt_kern_flag |= MNTK_DIR_HARDLINKS;
1268 }
1269 }
1270
1271 /*
1272 * get rid of iocount reference returned
1273 * by bdevvp (or picked up by us on the substitued
1274 * rootvp)... it (or we) will have also taken
1275 * a usecount reference which we want to keep
1276 */
1277 vnode_put(rootvp);
1278
1279 #if CONFIG_MACF
1280 if ((vfs_flags(mp) & MNT_MULTILABEL) == 0) {
1281 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 2);
1282 return 0;
1283 }
1284
1285 error = VFS_ROOT(mp, &vp, ctx);
1286 if (error) {
1287 printf("%s() VFS_ROOT() returned %d\n",
1288 __func__, error);
1289 dounmount(mp, MNT_FORCE, 0, ctx);
1290 goto fail;
1291 }
1292 error = vnode_label(mp, NULL, vp, NULL, 0, ctx);
1293 /*
1294 * get rid of reference provided by VFS_ROOT
1295 */
1296 vnode_put(vp);
1297
1298 if (error) {
1299 printf("%s() vnode_label() returned %d\n",
1300 __func__, error);
1301 dounmount(mp, MNT_FORCE, 0, ctx);
1302 goto fail;
1303 }
1304 #endif
1305 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 3);
1306 return 0;
1307 }
1308 vfs_rootmountfailed(mp);
1309 #if CONFIG_MACF
1310 fail:
1311 #endif
1312 if (error != EINVAL) {
1313 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
1314 }
1315 }
1316 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error ? error : ENODEV, 4);
1317 return ENODEV;
1318 }
1319
1320 static int
cache_purge_callback(mount_t mp,__unused void * arg)1321 cache_purge_callback(mount_t mp, __unused void * arg)
1322 {
1323 cache_purgevfs(mp);
1324 return VFS_RETURNED;
1325 }
1326
1327 extern lck_rw_t rootvnode_rw_lock;
1328 extern void set_rootvnode(vnode_t);
1329
1330
1331 static int
mntonname_fixup_callback(mount_t mp,__unused void * arg)1332 mntonname_fixup_callback(mount_t mp, __unused void *arg)
1333 {
1334 int error = 0;
1335
1336 if ((strncmp(&mp->mnt_vfsstat.f_mntonname[0], "/", sizeof("/")) == 0) ||
1337 (strncmp(&mp->mnt_vfsstat.f_mntonname[0], "/dev", sizeof("/dev")) == 0)) {
1338 return 0;
1339 }
1340
1341 if ((error = vfs_busy(mp, LK_NOWAIT))) {
1342 printf("vfs_busy failed with %d for %s\n", error, mp->mnt_vfsstat.f_mntonname);
1343 return -1;
1344 }
1345
1346 int pathlen = MAXPATHLEN;
1347 if ((error = vn_getpath_ext(mp->mnt_vnodecovered, NULL, mp->mnt_vfsstat.f_mntonname, &pathlen, VN_GETPATH_FSENTER))) {
1348 printf("vn_getpath_ext failed with %d for mnt_vnodecovered of %s\n", error, mp->mnt_vfsstat.f_mntonname);
1349 }
1350
1351 vfs_unbusy(mp);
1352
1353 return error;
1354 }
1355
1356 static int
clear_mntk_backs_root_callback(mount_t mp,__unused void * arg)1357 clear_mntk_backs_root_callback(mount_t mp, __unused void *arg)
1358 {
1359 lck_rw_lock_exclusive(&mp->mnt_rwlock);
1360 mp->mnt_kern_flag &= ~MNTK_BACKS_ROOT;
1361 lck_rw_done(&mp->mnt_rwlock);
1362 return VFS_RETURNED;
1363 }
1364
1365 static int
verify_incoming_rootfs(vnode_t * incoming_rootvnodep,vfs_context_t ctx,vfs_switch_root_flags_t flags)1366 verify_incoming_rootfs(vnode_t *incoming_rootvnodep, vfs_context_t ctx,
1367 vfs_switch_root_flags_t flags)
1368 {
1369 mount_t mp;
1370 vnode_t tdp;
1371 vnode_t incoming_rootvnode_with_iocount = *incoming_rootvnodep;
1372 vnode_t incoming_rootvnode_with_usecount = NULLVP;
1373 int error = 0;
1374
1375 if (vnode_vtype(incoming_rootvnode_with_iocount) != VDIR) {
1376 printf("Incoming rootfs path not a directory\n");
1377 error = ENOTDIR;
1378 goto done;
1379 }
1380
1381 /*
1382 * Before we call VFS_ROOT, we have to let go of the iocount already
1383 * acquired, but before doing that get a usecount.
1384 */
1385 vnode_ref_ext(incoming_rootvnode_with_iocount, 0, VNODE_REF_FORCE);
1386 incoming_rootvnode_with_usecount = incoming_rootvnode_with_iocount;
1387 vnode_lock_spin(incoming_rootvnode_with_usecount);
1388 if ((mp = incoming_rootvnode_with_usecount->v_mount)) {
1389 mp->mnt_crossref++;
1390 vnode_unlock(incoming_rootvnode_with_usecount);
1391 } else {
1392 vnode_unlock(incoming_rootvnode_with_usecount);
1393 printf("Incoming rootfs root vnode does not have associated mount\n");
1394 error = ENOTDIR;
1395 goto done;
1396 }
1397
1398 if (vfs_busy(mp, LK_NOWAIT)) {
1399 printf("Incoming rootfs root vnode mount is busy\n");
1400 error = ENOENT;
1401 goto out;
1402 }
1403
1404 vnode_put(incoming_rootvnode_with_iocount);
1405 incoming_rootvnode_with_iocount = NULLVP;
1406
1407 error = VFS_ROOT(mp, &tdp, ctx);
1408
1409 if (error) {
1410 printf("Could not get rootvnode of incoming rootfs\n");
1411 } else if (tdp != incoming_rootvnode_with_usecount) {
1412 vnode_put(tdp);
1413 tdp = NULLVP;
1414 printf("Incoming rootfs root vnode mount is is not a mountpoint\n");
1415 error = EINVAL;
1416 goto out_busy;
1417 } else {
1418 incoming_rootvnode_with_iocount = tdp;
1419 tdp = NULLVP;
1420 }
1421
1422 if ((flags & VFSSR_VIRTUALDEV_PROHIBITED) != 0) {
1423 lck_rw_lock_shared(&mp->mnt_rwlock);
1424 if (mp->mnt_flag & MNTK_VIRTUALDEV) {
1425 error = ENODEV;
1426 }
1427 lck_rw_done(&mp->mnt_rwlock);
1428 if (error) {
1429 printf("Incoming rootfs is backed by a virtual device; cannot switch to it");
1430 goto out_busy;
1431 }
1432 }
1433
1434 out_busy:
1435 vfs_unbusy(mp);
1436
1437 out:
1438 vnode_lock(incoming_rootvnode_with_usecount);
1439 mp->mnt_crossref--;
1440 if (mp->mnt_crossref < 0) {
1441 panic("mount cross refs -ve");
1442 }
1443 vnode_unlock(incoming_rootvnode_with_usecount);
1444
1445 done:
1446 if (incoming_rootvnode_with_usecount) {
1447 vnode_rele(incoming_rootvnode_with_usecount);
1448 incoming_rootvnode_with_usecount = NULLVP;
1449 }
1450
1451 if (error && incoming_rootvnode_with_iocount) {
1452 vnode_put(incoming_rootvnode_with_iocount);
1453 incoming_rootvnode_with_iocount = NULLVP;
1454 }
1455
1456 *incoming_rootvnodep = incoming_rootvnode_with_iocount;
1457 return error;
1458 }
1459
1460 /*
1461 * vfs_switch_root()
1462 *
1463 * Move the current root volume, and put a different volume at the root.
1464 *
1465 * incoming_vol_old_path: This is the path where the incoming root volume
1466 * is mounted when this function begins.
1467 * outgoing_vol_new_path: This is the path where the outgoing root volume
1468 * will be mounted when this function (successfully) ends.
1469 * Note: Do not use a leading slash.
1470 *
1471 * Volumes mounted at several fixed points (including /dev) will be preserved
1472 * at the same absolute path. That means they will move within the folder
1473 * hierarchy during the pivot operation. For example, /dev before the pivot
1474 * will be at /dev after the pivot.
1475 *
1476 * If any filesystem has MNTK_BACKS_ROOT set, it will be cleared. If the
1477 * incoming root volume is actually a disk image backed by some other
1478 * filesystem, it is the caller's responsibility to re-set MNTK_BACKS_ROOT
1479 * as appropriate.
1480 */
1481 int
vfs_switch_root(const char * incoming_vol_old_path,const char * outgoing_vol_new_path,vfs_switch_root_flags_t flags)1482 vfs_switch_root(const char *incoming_vol_old_path,
1483 const char *outgoing_vol_new_path,
1484 vfs_switch_root_flags_t flags)
1485 {
1486 // grumble grumble
1487 #define countof(x) (sizeof(x) / sizeof(x[0]))
1488
1489 struct preserved_mount {
1490 vnode_t pm_rootvnode;
1491 mount_t pm_mount;
1492 vnode_t pm_new_covered_vp;
1493 vnode_t pm_old_covered_vp;
1494 const char *pm_path;
1495 };
1496
1497 vfs_context_t ctx = vfs_context_kernel();
1498 vnode_t incoming_rootvnode = NULLVP;
1499 vnode_t outgoing_vol_new_covered_vp = NULLVP;
1500 vnode_t incoming_vol_old_covered_vp = NULLVP;
1501 mount_t outgoing = NULL;
1502 mount_t incoming = NULL;
1503
1504 struct preserved_mount devfs = { NULLVP, NULL, NULLVP, NULLVP, "dev" };
1505 struct preserved_mount preboot = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Preboot" };
1506 struct preserved_mount recovery = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Recovery" };
1507 struct preserved_mount vm = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/VM" };
1508 struct preserved_mount update = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Update" };
1509 struct preserved_mount iscPreboot = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/iSCPreboot" };
1510 struct preserved_mount hardware = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Hardware" };
1511 struct preserved_mount xarts = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/xarts" };
1512 struct preserved_mount factorylogs = { NULLVP, NULL, NULLVP, NULLVP, "FactoryLogs" };
1513 struct preserved_mount idiags = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Diags" };
1514
1515 struct preserved_mount *preserved[10];
1516 preserved[0] = &devfs;
1517 preserved[1] = &preboot;
1518 preserved[2] = &recovery;
1519 preserved[3] = &vm;
1520 preserved[4] = &update;
1521 preserved[5] = &iscPreboot;
1522 preserved[6] = &hardware;
1523 preserved[7] = &xarts;
1524 preserved[8] = &factorylogs;
1525 preserved[9] = &idiags;
1526
1527 int error;
1528
1529 printf("%s : shuffling mount points : %s <-> / <-> %s\n", __FUNCTION__, incoming_vol_old_path, outgoing_vol_new_path);
1530
1531 if (outgoing_vol_new_path[0] == '/') {
1532 // I should have written this to be more helpful and just advance the pointer forward past the slash
1533 printf("Do not use a leading slash in outgoing_vol_new_path\n");
1534 return EINVAL;
1535 }
1536
1537 // Set incoming_rootvnode.
1538 // Find the vnode representing the mountpoint of the new root
1539 // filesystem. That will be the new root directory.
1540 error = vnode_lookup(incoming_vol_old_path, 0, &incoming_rootvnode, ctx);
1541 if (error) {
1542 printf("Incoming rootfs root vnode not found\n");
1543 error = ENOENT;
1544 goto done;
1545 }
1546
1547 /*
1548 * This function drops the icoount and sets the vnode to NULL on error.
1549 */
1550 error = verify_incoming_rootfs(&incoming_rootvnode, ctx, flags);
1551 if (error) {
1552 goto done;
1553 }
1554
1555 /*
1556 * Set outgoing_vol_new_covered_vp.
1557 * Find the vnode representing the future mountpoint of the old
1558 * root filesystem, inside the directory incoming_rootvnode.
1559 * Right now it's at "/incoming_vol_old_path/outgoing_vol_new_path".
1560 * soon it will become "/oldrootfs_path_after", which will be covered.
1561 */
1562 error = vnode_lookupat(outgoing_vol_new_path, 0, &outgoing_vol_new_covered_vp, ctx, incoming_rootvnode);
1563 if (error) {
1564 printf("Outgoing rootfs path not found, abandoning / switch, error = %d\n", error);
1565 error = ENOENT;
1566 goto done;
1567 }
1568 if (vnode_vtype(outgoing_vol_new_covered_vp) != VDIR) {
1569 printf("Outgoing rootfs path is not a directory, abandoning / switch\n");
1570 error = ENOTDIR;
1571 goto done;
1572 }
1573
1574 /*
1575 * Find the preserved mounts - see if they are mounted. Get their root
1576 * vnode if they are. If they aren't, leave rootvnode NULL which will
1577 * be the signal to ignore this mount later on.
1578 *
1579 * Also get preserved mounts' new_covered_vp.
1580 * Find the node representing the folder "dev" inside the directory newrootvnode.
1581 * Right now it's at "/incoming_vol_old_path/dev".
1582 * Soon it will become /dev, which will be covered by the devfs mountpoint.
1583 */
1584 for (size_t i = 0; i < countof(preserved); i++) {
1585 struct preserved_mount *pmi = preserved[i];
1586
1587 error = vnode_lookupat(pmi->pm_path, 0, &pmi->pm_rootvnode, ctx, rootvnode);
1588 if (error) {
1589 printf("skipping preserved mountpoint because not found or error: %d: %s\n", error, pmi->pm_path);
1590 // not fatal. try the next one in the list.
1591 continue;
1592 }
1593 bool is_mountpoint = false;
1594 vnode_lock_spin(pmi->pm_rootvnode);
1595 if ((pmi->pm_rootvnode->v_flag & VROOT) != 0) {
1596 is_mountpoint = true;
1597 }
1598 vnode_unlock(pmi->pm_rootvnode);
1599 if (!is_mountpoint) {
1600 printf("skipping preserved mountpoint because not a mountpoint: %s\n", pmi->pm_path);
1601 vnode_put(pmi->pm_rootvnode);
1602 pmi->pm_rootvnode = NULLVP;
1603 // not fatal. try the next one in the list.
1604 continue;
1605 }
1606
1607 error = vnode_lookupat(pmi->pm_path, 0, &pmi->pm_new_covered_vp, ctx, incoming_rootvnode);
1608 if (error) {
1609 printf("preserved new mount directory not found or error: %d: %s\n", error, pmi->pm_path);
1610 error = ENOENT;
1611 goto done;
1612 }
1613 if (vnode_vtype(pmi->pm_new_covered_vp) != VDIR) {
1614 printf("preserved new mount directory not directory: %s\n", pmi->pm_path);
1615 error = ENOTDIR;
1616 goto done;
1617 }
1618
1619 printf("will preserve mountpoint across pivot: /%s\n", pmi->pm_path);
1620 }
1621
1622 /*
1623 * --
1624 * At this point, everything has been prepared and all error conditions
1625 * have been checked. We check everything we can before this point;
1626 * from now on we start making destructive changes, and we can't stop
1627 * until we reach the end.
1628 * ----
1629 */
1630
1631 /* this usecount is transferred to the mnt_vnodecovered */
1632 vnode_ref_ext(outgoing_vol_new_covered_vp, 0, VNODE_REF_FORCE);
1633 /* this usecount is transferred to set_rootvnode */
1634 vnode_ref_ext(incoming_rootvnode, 0, VNODE_REF_FORCE);
1635
1636
1637 for (size_t i = 0; i < countof(preserved); i++) {
1638 struct preserved_mount *pmi = preserved[i];
1639 if (pmi->pm_rootvnode == NULLVP) {
1640 continue;
1641 }
1642
1643 /* this usecount is transferred to the mnt_vnodecovered */
1644 vnode_ref_ext(pmi->pm_new_covered_vp, 0, VNODE_REF_FORCE);
1645
1646 /* The new_covered_vp is a mountpoint from now on. */
1647 vnode_lock_spin(pmi->pm_new_covered_vp);
1648 pmi->pm_new_covered_vp->v_flag |= VMOUNT;
1649 vnode_unlock(pmi->pm_new_covered_vp);
1650 }
1651
1652 /* The outgoing_vol_new_covered_vp is a mountpoint from now on. */
1653 vnode_lock_spin(outgoing_vol_new_covered_vp);
1654 outgoing_vol_new_covered_vp->v_flag |= VMOUNT;
1655 vnode_unlock(outgoing_vol_new_covered_vp);
1656
1657
1658 /*
1659 * Identify the mount_ts of the mounted filesystems that are being
1660 * manipulated: outgoing rootfs, incoming rootfs, and the preserved
1661 * mounts.
1662 */
1663 outgoing = rootvnode->v_mount;
1664 incoming = incoming_rootvnode->v_mount;
1665 for (size_t i = 0; i < countof(preserved); i++) {
1666 struct preserved_mount *pmi = preserved[i];
1667 if (pmi->pm_rootvnode == NULLVP) {
1668 continue;
1669 }
1670
1671 pmi->pm_mount = pmi->pm_rootvnode->v_mount;
1672 }
1673
1674 lck_rw_lock_exclusive(&rootvnode_rw_lock);
1675
1676 /* Setup incoming as the new rootfs */
1677 lck_rw_lock_exclusive(&incoming->mnt_rwlock);
1678 incoming_vol_old_covered_vp = incoming->mnt_vnodecovered;
1679 incoming->mnt_vnodecovered = NULLVP;
1680 strlcpy(incoming->mnt_vfsstat.f_mntonname, "/", MAXPATHLEN);
1681 incoming->mnt_flag |= MNT_ROOTFS;
1682 lck_rw_done(&incoming->mnt_rwlock);
1683
1684 /*
1685 * The preserved mountpoints will now be moved to
1686 * incoming_rootnode/pm_path, and then by the end of the function,
1687 * since incoming_rootnode is going to /, the preserved mounts
1688 * will be end up back at /pm_path
1689 */
1690 for (size_t i = 0; i < countof(preserved); i++) {
1691 struct preserved_mount *pmi = preserved[i];
1692 if (pmi->pm_rootvnode == NULLVP) {
1693 continue;
1694 }
1695
1696 lck_rw_lock_exclusive(&pmi->pm_mount->mnt_rwlock);
1697 pmi->pm_old_covered_vp = pmi->pm_mount->mnt_vnodecovered;
1698 pmi->pm_mount->mnt_vnodecovered = pmi->pm_new_covered_vp;
1699 vnode_lock_spin(pmi->pm_new_covered_vp);
1700 pmi->pm_new_covered_vp->v_mountedhere = pmi->pm_mount;
1701 vnode_unlock(pmi->pm_new_covered_vp);
1702 lck_rw_done(&pmi->pm_mount->mnt_rwlock);
1703 }
1704
1705 /*
1706 * The old root volume now covers outgoing_vol_new_covered_vp
1707 * on the new root volume. Remove the ROOTFS marker.
1708 * Now it is to be found at outgoing_vol_new_path
1709 */
1710 lck_rw_lock_exclusive(&outgoing->mnt_rwlock);
1711 outgoing->mnt_vnodecovered = outgoing_vol_new_covered_vp;
1712 strlcpy(outgoing->mnt_vfsstat.f_mntonname, "/", MAXPATHLEN);
1713 strlcat(outgoing->mnt_vfsstat.f_mntonname, outgoing_vol_new_path, MAXPATHLEN);
1714 outgoing->mnt_flag &= ~MNT_ROOTFS;
1715 vnode_lock_spin(outgoing_vol_new_covered_vp);
1716 outgoing_vol_new_covered_vp->v_mountedhere = outgoing;
1717 vnode_unlock(outgoing_vol_new_covered_vp);
1718 lck_rw_done(&outgoing->mnt_rwlock);
1719
1720 if (!(outgoing->mnt_kern_flag & MNTK_VIRTUALDEV) &&
1721 (TAILQ_FIRST(&mountlist) == outgoing)) {
1722 vfs_setmntsystem(outgoing);
1723 }
1724
1725 /*
1726 * Finally, remove the mount_t linkage from the previously covered
1727 * vnodes on the old root volume. These were incoming_vol_old_path,
1728 * and each preserved mounts's "/pm_path". The filesystems previously
1729 * mounted there have already been moved away.
1730 */
1731 vnode_lock_spin(incoming_vol_old_covered_vp);
1732 incoming_vol_old_covered_vp->v_flag &= ~VMOUNT;
1733 incoming_vol_old_covered_vp->v_mountedhere = NULL;
1734 vnode_unlock(incoming_vol_old_covered_vp);
1735
1736 for (size_t i = 0; i < countof(preserved); i++) {
1737 struct preserved_mount *pmi = preserved[i];
1738 if (pmi->pm_rootvnode == NULLVP) {
1739 continue;
1740 }
1741
1742 vnode_lock_spin(pmi->pm_old_covered_vp);
1743 pmi->pm_old_covered_vp->v_flag &= ~VMOUNT;
1744 pmi->pm_old_covered_vp->v_mountedhere = NULL;
1745 vnode_unlock(pmi->pm_old_covered_vp);
1746 }
1747
1748 /*
1749 * Clear the name cache since many cached names are now invalid.
1750 */
1751 vfs_iterate(0 /* flags */, cache_purge_callback, NULL);
1752
1753 /*
1754 * Actually change the rootvnode! And finally drop the lock that
1755 * prevents concurrent vnode_lookups.
1756 */
1757 set_rootvnode(incoming_rootvnode);
1758 lck_rw_unlock_exclusive(&rootvnode_rw_lock);
1759
1760 if (!(incoming->mnt_kern_flag & MNTK_VIRTUALDEV) &&
1761 !(outgoing->mnt_kern_flag & MNTK_VIRTUALDEV)) {
1762 /*
1763 * Switch the order of mount structures in the mountlist, new root
1764 * mount moves to the head of the list followed by /dev and the other
1765 * preserved mounts then all the preexisting mounts (old rootfs + any
1766 * others)
1767 */
1768 mount_list_lock();
1769 for (size_t i = 0; i < countof(preserved); i++) {
1770 struct preserved_mount *pmi = preserved[i];
1771 if (pmi->pm_rootvnode == NULLVP) {
1772 continue;
1773 }
1774
1775 TAILQ_REMOVE(&mountlist, pmi->pm_mount, mnt_list);
1776 TAILQ_INSERT_HEAD(&mountlist, pmi->pm_mount, mnt_list);
1777 }
1778 TAILQ_REMOVE(&mountlist, incoming, mnt_list);
1779 TAILQ_INSERT_HEAD(&mountlist, incoming, mnt_list);
1780 mount_list_unlock();
1781 }
1782
1783 /*
1784 * Fixups across all volumes
1785 */
1786 vfs_iterate(0 /* flags */, mntonname_fixup_callback, NULL);
1787 vfs_iterate(0 /* flags */, clear_mntk_backs_root_callback, NULL);
1788
1789 error = 0;
1790
1791 done:
1792 for (size_t i = 0; i < countof(preserved); i++) {
1793 struct preserved_mount *pmi = preserved[i];
1794
1795 if (pmi->pm_rootvnode) {
1796 vnode_put(pmi->pm_rootvnode);
1797 }
1798 if (pmi->pm_new_covered_vp) {
1799 vnode_put(pmi->pm_new_covered_vp);
1800 }
1801 if (pmi->pm_old_covered_vp) {
1802 vnode_rele(pmi->pm_old_covered_vp);
1803 }
1804 }
1805
1806 if (outgoing_vol_new_covered_vp) {
1807 vnode_put(outgoing_vol_new_covered_vp);
1808 }
1809
1810 if (incoming_vol_old_covered_vp) {
1811 vnode_rele(incoming_vol_old_covered_vp);
1812 }
1813
1814 if (incoming_rootvnode) {
1815 vnode_put(incoming_rootvnode);
1816 }
1817
1818 printf("%s : done shuffling mount points with error: %d\n", __FUNCTION__, error);
1819 return error;
1820 }
1821
1822 /*
1823 * Mount the Recovery volume of a container
1824 */
1825 int
vfs_mount_recovery(void)1826 vfs_mount_recovery(void)
1827 {
1828 #if CONFIG_MOUNT_PREBOOTRECOVERY
1829 int error = 0;
1830
1831 error = vnode_get(rootvnode);
1832 if (error) {
1833 /* root must be mounted first */
1834 printf("vnode_get(rootvnode) failed with error %d\n", error);
1835 return error;
1836 }
1837
1838 char recoverypath[] = PLATFORM_RECOVERY_VOLUME_MOUNT_POINT; /* !const because of internal casting */
1839
1840 /* Mount the recovery volume */
1841 printf("attempting kernel mount for recovery volume... \n");
1842 error = kernel_mount(rootvnode->v_mount->mnt_vfsstat.f_fstypename, NULLVP, NULLVP,
1843 recoverypath, (rootvnode->v_mount), 0, 0, (KERNEL_MOUNT_RECOVERYVOL), vfs_context_kernel());
1844
1845 if (error) {
1846 printf("Failed to mount recovery volume (%d)\n", error);
1847 } else {
1848 printf("mounted recovery volume\n");
1849 }
1850
1851 vnode_put(rootvnode);
1852 return error;
1853 #else
1854 return 0;
1855 #endif
1856 }
1857
1858 /*
1859 * Lookup a mount point by filesystem identifier.
1860 */
1861
1862 struct mount *
vfs_getvfs(fsid_t * fsid)1863 vfs_getvfs(fsid_t *fsid)
1864 {
1865 return mount_list_lookupby_fsid(fsid, 0, 0);
1866 }
1867
1868 static struct mount *
vfs_getvfs_locked(fsid_t * fsid)1869 vfs_getvfs_locked(fsid_t *fsid)
1870 {
1871 return mount_list_lookupby_fsid(fsid, 1, 0);
1872 }
1873
1874 struct mount *
vfs_getvfs_by_mntonname(char * path)1875 vfs_getvfs_by_mntonname(char *path)
1876 {
1877 mount_t retmp = (mount_t)0;
1878 mount_t mp;
1879
1880 mount_list_lock();
1881 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1882 if (!strncmp(mp->mnt_vfsstat.f_mntonname, path,
1883 sizeof(mp->mnt_vfsstat.f_mntonname))) {
1884 retmp = mp;
1885 if (mount_iterref(retmp, 1)) {
1886 retmp = NULL;
1887 }
1888 goto out;
1889 }
1890 }
1891 out:
1892 mount_list_unlock();
1893 return retmp;
1894 }
1895
1896 /* generation number for creation of new fsids */
1897 u_short mntid_gen = 0;
1898 /*
1899 * Get a new unique fsid
1900 */
1901 void
vfs_getnewfsid(struct mount * mp)1902 vfs_getnewfsid(struct mount *mp)
1903 {
1904 fsid_t tfsid;
1905 int mtype;
1906
1907 mount_list_lock();
1908
1909 /* generate a new fsid */
1910 mtype = mp->mnt_vtable->vfc_typenum;
1911 if (++mntid_gen == 0) {
1912 mntid_gen++;
1913 }
1914 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1915 tfsid.val[1] = mtype;
1916
1917 while (vfs_getvfs_locked(&tfsid)) {
1918 if (++mntid_gen == 0) {
1919 mntid_gen++;
1920 }
1921 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1922 }
1923
1924 mp->mnt_vfsstat.f_fsid.val[0] = tfsid.val[0];
1925 mp->mnt_vfsstat.f_fsid.val[1] = tfsid.val[1];
1926 mount_list_unlock();
1927 }
1928
1929 /*
1930 * Routines having to do with the management of the vnode table.
1931 */
1932 extern int(**dead_vnodeop_p)(void *);
1933 long numvnodes, freevnodes, deadvnodes, async_work_vnodes;
1934
1935
1936 int async_work_timed_out = 0;
1937 int async_work_handled = 0;
1938 int dead_vnode_wanted = 0;
1939 int dead_vnode_waited = 0;
1940
1941 /*
1942 * Move a vnode from one mount queue to another.
1943 */
1944 static void
insmntque(vnode_t vp,mount_t mp)1945 insmntque(vnode_t vp, mount_t mp)
1946 {
1947 mount_t lmp;
1948 /*
1949 * Delete from old mount point vnode list, if on one.
1950 */
1951 if ((lmp = vp->v_mount) != NULL && lmp != dead_mountp) {
1952 if ((vp->v_lflag & VNAMED_MOUNT) == 0) {
1953 panic("insmntque: vp not in mount vnode list");
1954 }
1955 vp->v_lflag &= ~VNAMED_MOUNT;
1956
1957 mount_lock_spin(lmp);
1958
1959 mount_drop(lmp, 1);
1960
1961 if (vp->v_mntvnodes.tqe_next == NULL) {
1962 if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp) {
1963 TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes);
1964 } else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp) {
1965 TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes);
1966 } else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp) {
1967 TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes);
1968 }
1969 } else {
1970 vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev;
1971 *vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next;
1972 }
1973 vp->v_mntvnodes.tqe_next = NULL;
1974 vp->v_mntvnodes.tqe_prev = NULL;
1975 mount_unlock(lmp);
1976 return;
1977 }
1978
1979 /*
1980 * Insert into list of vnodes for the new mount point, if available.
1981 */
1982 if ((vp->v_mount = mp) != NULL) {
1983 mount_lock_spin(mp);
1984 if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0)) {
1985 panic("vp already in mount list");
1986 }
1987 if (mp->mnt_lflag & MNT_LITER) {
1988 TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes);
1989 } else {
1990 TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
1991 }
1992 if (vp->v_lflag & VNAMED_MOUNT) {
1993 panic("insmntque: vp already in mount vnode list");
1994 }
1995 vp->v_lflag |= VNAMED_MOUNT;
1996 mount_ref(mp, 1);
1997 mount_unlock(mp);
1998 }
1999 }
2000
2001
2002 /*
2003 * Create a vnode for a block device.
2004 * Used for root filesystem, argdev, and swap areas.
2005 * Also used for memory file system special devices.
2006 */
2007 int
bdevvp(dev_t dev,vnode_t * vpp)2008 bdevvp(dev_t dev, vnode_t *vpp)
2009 {
2010 vnode_t nvp;
2011 int error;
2012 struct vnode_fsparam vfsp;
2013 struct vfs_context context;
2014
2015 if (dev == NODEV) {
2016 *vpp = NULLVP;
2017 return ENODEV;
2018 }
2019
2020 context.vc_thread = current_thread();
2021 context.vc_ucred = FSCRED;
2022
2023 vfsp.vnfs_mp = (struct mount *)0;
2024 vfsp.vnfs_vtype = VBLK;
2025 vfsp.vnfs_str = "bdevvp";
2026 vfsp.vnfs_dvp = NULL;
2027 vfsp.vnfs_fsnode = NULL;
2028 vfsp.vnfs_cnp = NULL;
2029 vfsp.vnfs_vops = spec_vnodeop_p;
2030 vfsp.vnfs_rdev = dev;
2031 vfsp.vnfs_filesize = 0;
2032
2033 vfsp.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE;
2034
2035 vfsp.vnfs_marksystem = 0;
2036 vfsp.vnfs_markroot = 0;
2037
2038 if ((error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp))) {
2039 *vpp = NULLVP;
2040 return error;
2041 }
2042 vnode_lock_spin(nvp);
2043 nvp->v_flag |= VBDEVVP;
2044 nvp->v_tag = VT_NON; /* set this to VT_NON so during aliasing it can be replaced */
2045 vnode_unlock(nvp);
2046 if ((error = vnode_ref(nvp))) {
2047 panic("bdevvp failed: vnode_ref");
2048 return error;
2049 }
2050 if ((error = VNOP_FSYNC(nvp, MNT_WAIT, &context))) {
2051 panic("bdevvp failed: fsync");
2052 return error;
2053 }
2054 if ((error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0))) {
2055 panic("bdevvp failed: invalidateblks");
2056 return error;
2057 }
2058
2059 #if CONFIG_MACF
2060 /*
2061 * XXXMAC: We can't put a MAC check here, the system will
2062 * panic without this vnode.
2063 */
2064 #endif /* MAC */
2065
2066 if ((error = VNOP_OPEN(nvp, FREAD, &context))) {
2067 panic("bdevvp failed: open");
2068 return error;
2069 }
2070 *vpp = nvp;
2071
2072 return 0;
2073 }
2074
2075 /*
2076 * Check to see if the new vnode represents a special device
2077 * for which we already have a vnode (either because of
2078 * bdevvp() or because of a different vnode representing
2079 * the same block device). If such an alias exists, deallocate
2080 * the existing contents and return the aliased vnode. The
2081 * caller is responsible for filling it with its new contents.
2082 */
2083 static vnode_t
checkalias(struct vnode * nvp,dev_t nvp_rdev)2084 checkalias(struct vnode *nvp, dev_t nvp_rdev)
2085 {
2086 struct vnode *vp;
2087 struct vnode **vpp;
2088 struct specinfo *sin = NULL;
2089 int vid = 0;
2090
2091 vpp = &speclisth[SPECHASH(nvp_rdev)];
2092 loop:
2093 SPECHASH_LOCK();
2094
2095 for (vp = *vpp; vp; vp = vp->v_specnext) {
2096 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
2097 vid = vp->v_id;
2098 break;
2099 }
2100 }
2101 SPECHASH_UNLOCK();
2102
2103 if (vp) {
2104 found_alias:
2105 if (vnode_getwithvid(vp, vid)) {
2106 goto loop;
2107 }
2108 /*
2109 * Termination state is checked in vnode_getwithvid
2110 */
2111 vnode_lock(vp);
2112
2113 /*
2114 * Alias, but not in use, so flush it out.
2115 */
2116 if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
2117 vnode_reclaim_internal(vp, 1, 1, 0);
2118 vnode_put_locked(vp);
2119 vnode_unlock(vp);
2120 goto loop;
2121 }
2122 }
2123 if (vp == NULL || vp->v_tag != VT_NON) {
2124 if (sin == NULL) {
2125 sin = zalloc_flags(specinfo_zone, Z_WAITOK | Z_ZERO);
2126 } else {
2127 bzero(sin, sizeof(struct specinfo));
2128 }
2129
2130 nvp->v_specinfo = sin;
2131 nvp->v_rdev = nvp_rdev;
2132 nvp->v_specflags = 0;
2133 nvp->v_speclastr = -1;
2134 nvp->v_specinfo->si_opencount = 0;
2135 nvp->v_specinfo->si_initted = 0;
2136 nvp->v_specinfo->si_throttleable = 0;
2137
2138 SPECHASH_LOCK();
2139
2140 /* We dropped the lock, someone could have added */
2141 if (vp == NULLVP) {
2142 for (vp = *vpp; vp; vp = vp->v_specnext) {
2143 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
2144 vid = vp->v_id;
2145 SPECHASH_UNLOCK();
2146 goto found_alias;
2147 }
2148 }
2149 }
2150
2151 nvp->v_hashchain = vpp;
2152 nvp->v_specnext = *vpp;
2153 *vpp = nvp;
2154
2155 if (vp != NULLVP) {
2156 nvp->v_specflags |= SI_ALIASED;
2157 vp->v_specflags |= SI_ALIASED;
2158 SPECHASH_UNLOCK();
2159 vnode_put_locked(vp);
2160 vnode_unlock(vp);
2161 } else {
2162 SPECHASH_UNLOCK();
2163 }
2164
2165 return NULLVP;
2166 }
2167
2168 if (sin) {
2169 zfree(specinfo_zone, sin);
2170 }
2171
2172 if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0) {
2173 return vp;
2174 }
2175
2176 panic("checkalias with VT_NON vp that shouldn't: %p", vp);
2177
2178 return vp;
2179 }
2180
2181
2182 /*
2183 * Get a reference on a particular vnode and lock it if requested.
2184 * If the vnode was on the inactive list, remove it from the list.
2185 * If the vnode was on the free list, remove it from the list and
2186 * move it to inactive list as needed.
2187 * The vnode lock bit is set if the vnode is being eliminated in
2188 * vgone. The process is awakened when the transition is completed,
2189 * and an error returned to indicate that the vnode is no longer
2190 * usable (possibly having been changed to a new file system type).
2191 */
2192 int
vget_internal(vnode_t vp,int vid,int vflags)2193 vget_internal(vnode_t vp, int vid, int vflags)
2194 {
2195 int error = 0;
2196
2197 vnode_lock_spin(vp);
2198
2199 if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0)) {
2200 /*
2201 * vnode to be returned only if it has writers opened
2202 */
2203 error = EINVAL;
2204 } else {
2205 error = vnode_getiocount(vp, vid, vflags);
2206 }
2207
2208 vnode_unlock(vp);
2209
2210 return error;
2211 }
2212
2213 /*
2214 * Returns: 0 Success
2215 * ENOENT No such file or directory [terminating]
2216 */
2217 int
vnode_ref(vnode_t vp)2218 vnode_ref(vnode_t vp)
2219 {
2220 return vnode_ref_ext(vp, 0, 0);
2221 }
2222
2223 /*
2224 * Returns: 0 Success
2225 * ENOENT No such file or directory [terminating]
2226 */
2227 int
vnode_ref_ext(vnode_t vp,int fmode,int flags)2228 vnode_ref_ext(vnode_t vp, int fmode, int flags)
2229 {
2230 int error = 0;
2231
2232 vnode_lock_spin(vp);
2233
2234 /*
2235 * once all the current call sites have been fixed to insure they have
2236 * taken an iocount, we can toughen this assert up and insist that the
2237 * iocount is non-zero... a non-zero usecount doesn't insure correctness
2238 */
2239 if (vp->v_iocount <= 0 && vp->v_usecount <= 0) {
2240 panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount);
2241 }
2242
2243 /*
2244 * if you are the owner of drain/termination, can acquire usecount
2245 */
2246 if ((flags & VNODE_REF_FORCE) == 0) {
2247 if ((vp->v_lflag & (VL_DRAIN | VL_TERMINATE | VL_DEAD))) {
2248 if (vp->v_owner != current_thread()) {
2249 error = ENOENT;
2250 goto out;
2251 }
2252 }
2253 }
2254
2255 /* Enable atomic ops on v_usecount without the vnode lock */
2256 os_atomic_inc(&vp->v_usecount, relaxed);
2257
2258 if (fmode & FWRITE) {
2259 if (++vp->v_writecount <= 0) {
2260 panic("vnode_ref_ext: v_writecount");
2261 }
2262 }
2263 if (fmode & O_EVTONLY) {
2264 if (++vp->v_kusecount <= 0) {
2265 panic("vnode_ref_ext: v_kusecount");
2266 }
2267 }
2268 if (vp->v_flag & VRAGE) {
2269 struct uthread *ut;
2270
2271 ut = current_uthread();
2272
2273 if (!(current_proc()->p_lflag & P_LRAGE_VNODES) &&
2274 !(ut->uu_flag & UT_RAGE_VNODES)) {
2275 /*
2276 * a 'normal' process accessed this vnode
2277 * so make sure its no longer marked
2278 * for rapid aging... also, make sure
2279 * it gets removed from the rage list...
2280 * when v_usecount drops back to 0, it
2281 * will be put back on the real free list
2282 */
2283 vp->v_flag &= ~VRAGE;
2284 vp->v_references = 0;
2285 vnode_list_remove(vp);
2286 }
2287 }
2288 if (vp->v_usecount == 1 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
2289 if (vp->v_ubcinfo) {
2290 vnode_lock_convert(vp);
2291 memory_object_mark_used(vp->v_ubcinfo->ui_control);
2292 }
2293 }
2294 out:
2295 vnode_unlock(vp);
2296
2297 return error;
2298 }
2299
2300
2301 boolean_t
vnode_on_reliable_media(vnode_t vp)2302 vnode_on_reliable_media(vnode_t vp)
2303 {
2304 mount_t mp = vp->v_mount;
2305
2306 /*
2307 * A NULL mountpoint would imply it's not attached to a any filesystem.
2308 * This can only happen with a vnode created by bdevvp(). We'll consider
2309 * those as not unreliable as the primary use of this function is determine
2310 * which vnodes are to be handed off to the async cleaner thread for
2311 * reclaim.
2312 */
2313 if (!mp || (!(mp->mnt_kern_flag & MNTK_VIRTUALDEV) && (mp->mnt_flag & MNT_LOCAL))) {
2314 return TRUE;
2315 }
2316
2317 return FALSE;
2318 }
2319
2320 static void
vnode_async_list_add_locked(vnode_t vp)2321 vnode_async_list_add_locked(vnode_t vp)
2322 {
2323 if (VONLIST(vp) || (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
2324 panic("vnode_async_list_add: %p is in wrong state", vp);
2325 }
2326
2327 TAILQ_INSERT_HEAD(&vnode_async_work_list, vp, v_freelist);
2328 vp->v_listflag |= VLIST_ASYNC_WORK;
2329
2330 async_work_vnodes++;
2331 }
2332
2333 static void
vnode_async_list_add(vnode_t vp)2334 vnode_async_list_add(vnode_t vp)
2335 {
2336 vnode_list_lock();
2337
2338 vnode_async_list_add_locked(vp);
2339
2340 vnode_list_unlock();
2341
2342 wakeup(&vnode_async_work_list);
2343 }
2344
2345
2346 /*
2347 * put the vnode on appropriate free list.
2348 * called with vnode LOCKED
2349 */
2350 static void
vnode_list_add(vnode_t vp)2351 vnode_list_add(vnode_t vp)
2352 {
2353 boolean_t need_dead_wakeup = FALSE;
2354
2355 #if DIAGNOSTIC
2356 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2357 #endif
2358
2359 again:
2360
2361 /*
2362 * if it is already on a list or non zero references return
2363 */
2364 if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE)) {
2365 return;
2366 }
2367
2368 /*
2369 * In vclean, we might have deferred ditching locked buffers
2370 * because something was still referencing them (indicated by
2371 * usecount). We can ditch them now.
2372 */
2373 if (ISSET(vp->v_lflag, VL_DEAD)
2374 && (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))) {
2375 ++vp->v_iocount; // Probably not necessary, but harmless
2376 #ifdef CONFIG_IOCOUNT_TRACE
2377 record_vp(vp, 1);
2378 #endif
2379 vnode_unlock(vp);
2380 buf_invalidateblks(vp, BUF_INVALIDATE_LOCKED, 0, 0);
2381 vnode_lock(vp);
2382 vnode_dropiocount(vp);
2383 goto again;
2384 }
2385
2386 vnode_list_lock();
2387
2388 if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
2389 /*
2390 * add the new guy to the appropriate end of the RAGE list
2391 */
2392 if ((vp->v_flag & VAGE)) {
2393 TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist);
2394 } else {
2395 TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist);
2396 }
2397
2398 vp->v_listflag |= VLIST_RAGE;
2399 ragevnodes++;
2400
2401 /*
2402 * reset the timestamp for the last inserted vp on the RAGE
2403 * queue to let new_vnode know that its not ok to start stealing
2404 * from this list... as long as we're actively adding to this list
2405 * we'll push out the vnodes we want to donate to the real free list
2406 * once we stop pushing, we'll let some time elapse before we start
2407 * stealing them in the new_vnode routine
2408 */
2409 microuptime(&rage_tv);
2410 } else {
2411 /*
2412 * if VL_DEAD, insert it at head of the dead list
2413 * else insert at tail of LRU list or at head if VAGE is set
2414 */
2415 if ((vp->v_lflag & VL_DEAD)) {
2416 TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist);
2417 vp->v_listflag |= VLIST_DEAD;
2418 deadvnodes++;
2419
2420 if (dead_vnode_wanted) {
2421 dead_vnode_wanted--;
2422 need_dead_wakeup = TRUE;
2423 }
2424 } else if ((vp->v_flag & VAGE)) {
2425 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2426 vp->v_flag &= ~VAGE;
2427 freevnodes++;
2428 } else {
2429 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2430 freevnodes++;
2431 }
2432 }
2433 vnode_list_unlock();
2434
2435 if (need_dead_wakeup == TRUE) {
2436 wakeup_one((caddr_t)&dead_vnode_wanted);
2437 }
2438 }
2439
2440
2441 /*
2442 * remove the vnode from appropriate free list.
2443 * called with vnode LOCKED and
2444 * the list lock held
2445 */
2446 static void
vnode_list_remove_locked(vnode_t vp)2447 vnode_list_remove_locked(vnode_t vp)
2448 {
2449 if (VONLIST(vp)) {
2450 /*
2451 * the v_listflag field is
2452 * protected by the vnode_list_lock
2453 */
2454 if (vp->v_listflag & VLIST_RAGE) {
2455 VREMRAGE("vnode_list_remove", vp);
2456 } else if (vp->v_listflag & VLIST_DEAD) {
2457 VREMDEAD("vnode_list_remove", vp);
2458 } else if (vp->v_listflag & VLIST_ASYNC_WORK) {
2459 VREMASYNC_WORK("vnode_list_remove", vp);
2460 } else {
2461 VREMFREE("vnode_list_remove", vp);
2462 }
2463 }
2464 }
2465
2466
2467 /*
2468 * remove the vnode from appropriate free list.
2469 * called with vnode LOCKED
2470 */
2471 static void
vnode_list_remove(vnode_t vp)2472 vnode_list_remove(vnode_t vp)
2473 {
2474 #if DIAGNOSTIC
2475 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2476 #endif
2477 /*
2478 * we want to avoid taking the list lock
2479 * in the case where we're not on the free
2480 * list... this will be true for most
2481 * directories and any currently in use files
2482 *
2483 * we're guaranteed that we can't go from
2484 * the not-on-list state to the on-list
2485 * state since we hold the vnode lock...
2486 * all calls to vnode_list_add are done
2487 * under the vnode lock... so we can
2488 * check for that condition (the prevelant one)
2489 * without taking the list lock
2490 */
2491 if (VONLIST(vp)) {
2492 vnode_list_lock();
2493 /*
2494 * however, we're not guaranteed that
2495 * we won't go from the on-list state
2496 * to the not-on-list state until we
2497 * hold the vnode_list_lock... this
2498 * is due to "new_vnode" removing vnodes
2499 * from the free list uder the list_lock
2500 * w/o the vnode lock... so we need to
2501 * check again whether we're currently
2502 * on the free list
2503 */
2504 vnode_list_remove_locked(vp);
2505
2506 vnode_list_unlock();
2507 }
2508 }
2509
2510
2511 void
vnode_rele(vnode_t vp)2512 vnode_rele(vnode_t vp)
2513 {
2514 vnode_rele_internal(vp, 0, 0, 0);
2515 }
2516
2517
2518 void
vnode_rele_ext(vnode_t vp,int fmode,int dont_reenter)2519 vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter)
2520 {
2521 vnode_rele_internal(vp, fmode, dont_reenter, 0);
2522 }
2523
2524
2525 void
vnode_rele_internal(vnode_t vp,int fmode,int dont_reenter,int locked)2526 vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked)
2527 {
2528 int32_t old_usecount;
2529
2530 if (!locked) {
2531 vnode_lock_spin(vp);
2532 }
2533 #if DIAGNOSTIC
2534 else {
2535 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2536 }
2537 #endif
2538 /* Enable atomic ops on v_usecount without the vnode lock */
2539 old_usecount = os_atomic_dec_orig(&vp->v_usecount, relaxed);
2540 if (old_usecount < 1) {
2541 /*
2542 * Because we allow atomic ops on usecount (in lookup only, under
2543 * specific conditions of already having a usecount) it is
2544 * possible that when the vnode is examined, its usecount is
2545 * different than what will be printed in this panic message.
2546 */
2547 panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.",
2548 vp, old_usecount - 1, vp->v_tag, vp->v_type, vp->v_flag);
2549 }
2550
2551 if (fmode & FWRITE) {
2552 if (--vp->v_writecount < 0) {
2553 panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
2554 }
2555 }
2556 if (fmode & O_EVTONLY) {
2557 if (--vp->v_kusecount < 0) {
2558 panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag);
2559 }
2560 }
2561 if (vp->v_kusecount > vp->v_usecount) {
2562 panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
2563 }
2564
2565 if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
2566 /*
2567 * vnode is still busy... if we're the last
2568 * usecount, mark for a future call to VNOP_INACTIVE
2569 * when the iocount finally drops to 0
2570 */
2571 if (vp->v_usecount == 0) {
2572 vp->v_lflag |= VL_NEEDINACTIVE;
2573 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
2574 }
2575 goto done;
2576 }
2577 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
2578
2579 if (ISSET(vp->v_lflag, VL_TERMINATE | VL_DEAD) || dont_reenter) {
2580 /*
2581 * vnode is being cleaned, or
2582 * we've requested that we don't reenter
2583 * the filesystem on this release...in
2584 * the latter case, we'll mark the vnode aged
2585 */
2586 if (dont_reenter) {
2587 if (!(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM))) {
2588 vp->v_lflag |= VL_NEEDINACTIVE;
2589
2590 if (vnode_on_reliable_media(vp) == FALSE || vp->v_flag & VISDIRTY) {
2591 vnode_async_list_add(vp);
2592 goto done;
2593 }
2594 }
2595 vp->v_flag |= VAGE;
2596 }
2597 vnode_list_add(vp);
2598
2599 goto done;
2600 }
2601 /*
2602 * at this point both the iocount and usecount
2603 * are zero
2604 * pick up an iocount so that we can call
2605 * VNOP_INACTIVE with the vnode lock unheld
2606 */
2607 vp->v_iocount++;
2608 #ifdef CONFIG_IOCOUNT_TRACE
2609 record_vp(vp, 1);
2610 #endif
2611 vp->v_lflag &= ~VL_NEEDINACTIVE;
2612 vnode_unlock(vp);
2613
2614 VNOP_INACTIVE(vp, vfs_context_current());
2615
2616 vnode_lock_spin(vp);
2617 /*
2618 * because we dropped the vnode lock to call VNOP_INACTIVE
2619 * the state of the vnode may have changed... we may have
2620 * picked up an iocount, usecount or the MARKTERM may have
2621 * been set... we need to reevaluate the reference counts
2622 * to determine if we can call vnode_reclaim_internal at
2623 * this point... if the reference counts are up, we'll pick
2624 * up the MARKTERM state when they get subsequently dropped
2625 */
2626 if ((vp->v_iocount == 1) && (vp->v_usecount == 0) &&
2627 ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
2628 struct uthread *ut;
2629
2630 ut = current_uthread();
2631
2632 if (ut->uu_defer_reclaims) {
2633 vp->v_defer_reclaimlist = ut->uu_vreclaims;
2634 ut->uu_vreclaims = vp;
2635 goto done;
2636 }
2637 vnode_lock_convert(vp);
2638 vnode_reclaim_internal(vp, 1, 1, 0);
2639 }
2640 vnode_dropiocount(vp);
2641 vnode_list_add(vp);
2642 done:
2643 if (vp->v_usecount == 0 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
2644 if (vp->v_ubcinfo) {
2645 vnode_lock_convert(vp);
2646 memory_object_mark_unused(vp->v_ubcinfo->ui_control, (vp->v_flag & VRAGE) == VRAGE);
2647 }
2648 }
2649 if (!locked) {
2650 vnode_unlock(vp);
2651 }
2652 return;
2653 }
2654
2655 /*
2656 * Remove any vnodes in the vnode table belonging to mount point mp.
2657 *
2658 * If MNT_NOFORCE is specified, there should not be any active ones,
2659 * return error if any are found (nb: this is a user error, not a
2660 * system error). If MNT_FORCE is specified, detach any active vnodes
2661 * that are found.
2662 */
2663
2664 int
vflush(struct mount * mp,struct vnode * skipvp,int flags)2665 vflush(struct mount *mp, struct vnode *skipvp, int flags)
2666 {
2667 struct vnode *vp;
2668 int busy = 0;
2669 int reclaimed = 0;
2670 int retval;
2671 unsigned int vid;
2672 bool first_try = true;
2673
2674 /*
2675 * See comments in vnode_iterate() for the rationale for this lock
2676 */
2677 mount_iterate_lock(mp);
2678
2679 mount_lock(mp);
2680 vnode_iterate_setup(mp);
2681 /*
2682 * On regular unmounts(not forced) do a
2683 * quick check for vnodes to be in use. This
2684 * preserves the caching of vnodes. automounter
2685 * tries unmounting every so often to see whether
2686 * it is still busy or not.
2687 */
2688 if (((flags & FORCECLOSE) == 0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) {
2689 if (vnode_umount_preflight(mp, skipvp, flags)) {
2690 vnode_iterate_clear(mp);
2691 mount_unlock(mp);
2692 mount_iterate_unlock(mp);
2693 return EBUSY;
2694 }
2695 }
2696 loop:
2697 /* If it returns 0 then there is nothing to do */
2698 retval = vnode_iterate_prepare(mp);
2699
2700 if (retval == 0) {
2701 vnode_iterate_clear(mp);
2702 mount_unlock(mp);
2703 mount_iterate_unlock(mp);
2704 return retval;
2705 }
2706
2707 /* iterate over all the vnodes */
2708 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
2709 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
2710 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
2711 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
2712
2713 if ((vp->v_mount != mp) || (vp == skipvp)) {
2714 continue;
2715 }
2716 vid = vp->v_id;
2717 mount_unlock(mp);
2718
2719 vnode_lock_spin(vp);
2720
2721 // If vnode is already terminating, wait for it...
2722 while (vp->v_id == vid && ISSET(vp->v_lflag, VL_TERMINATE)) {
2723 vp->v_lflag |= VL_TERMWANT;
2724 msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vflush", NULL);
2725 }
2726
2727 if ((vp->v_id != vid) || ISSET(vp->v_lflag, VL_DEAD)) {
2728 vnode_unlock(vp);
2729 mount_lock(mp);
2730 continue;
2731 }
2732
2733 /*
2734 * If requested, skip over vnodes marked VSYSTEM.
2735 * Skip over all vnodes marked VNOFLUSH.
2736 */
2737 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
2738 (vp->v_flag & VNOFLUSH))) {
2739 vnode_unlock(vp);
2740 mount_lock(mp);
2741 continue;
2742 }
2743 /*
2744 * If requested, skip over vnodes marked VSWAP.
2745 */
2746 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
2747 vnode_unlock(vp);
2748 mount_lock(mp);
2749 continue;
2750 }
2751 /*
2752 * If requested, skip over vnodes marked VROOT.
2753 */
2754 if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) {
2755 vnode_unlock(vp);
2756 mount_lock(mp);
2757 continue;
2758 }
2759 /*
2760 * If WRITECLOSE is set, only flush out regular file
2761 * vnodes open for writing.
2762 */
2763 if ((flags & WRITECLOSE) &&
2764 (vp->v_writecount == 0 || vp->v_type != VREG)) {
2765 vnode_unlock(vp);
2766 mount_lock(mp);
2767 continue;
2768 }
2769 /*
2770 * If the real usecount is 0, all we need to do is clear
2771 * out the vnode data structures and we are done.
2772 */
2773 if (((vp->v_usecount == 0) ||
2774 ((vp->v_usecount - vp->v_kusecount) == 0))) {
2775 vnode_lock_convert(vp);
2776 vp->v_iocount++; /* so that drain waits for * other iocounts */
2777 #ifdef CONFIG_IOCOUNT_TRACE
2778 record_vp(vp, 1);
2779 #endif
2780 vnode_reclaim_internal(vp, 1, 1, 0);
2781 vnode_dropiocount(vp);
2782 vnode_list_add(vp);
2783 vnode_unlock(vp);
2784
2785 reclaimed++;
2786 mount_lock(mp);
2787 continue;
2788 }
2789 /*
2790 * If FORCECLOSE is set, forcibly close the vnode.
2791 * For block or character devices, revert to an
2792 * anonymous device. For all other files, just kill them.
2793 */
2794 if (flags & FORCECLOSE) {
2795 vnode_lock_convert(vp);
2796
2797 if (vp->v_type != VBLK && vp->v_type != VCHR) {
2798 vp->v_iocount++; /* so that drain waits * for other iocounts */
2799 #ifdef CONFIG_IOCOUNT_TRACE
2800 record_vp(vp, 1);
2801 #endif
2802 vnode_abort_advlocks(vp);
2803 vnode_reclaim_internal(vp, 1, 1, 0);
2804 vnode_dropiocount(vp);
2805 vnode_list_add(vp);
2806 vnode_unlock(vp);
2807 } else {
2808 vclean(vp, 0);
2809 vp->v_lflag &= ~VL_DEAD;
2810 vp->v_op = spec_vnodeop_p;
2811 vp->v_flag |= VDEVFLUSH;
2812 vnode_unlock(vp);
2813 }
2814 mount_lock(mp);
2815 continue;
2816 }
2817
2818 /* log vnodes blocking unforced unmounts */
2819 if (print_busy_vnodes && first_try && ((flags & FORCECLOSE) == 0)) {
2820 vprint("vflush - busy vnode", vp);
2821 }
2822
2823 vnode_unlock(vp);
2824 mount_lock(mp);
2825 busy++;
2826 }
2827
2828 /* At this point the worker queue is completed */
2829 if (busy && ((flags & FORCECLOSE) == 0) && reclaimed) {
2830 busy = 0;
2831 reclaimed = 0;
2832 (void)vnode_iterate_reloadq(mp);
2833 first_try = false;
2834 /* returned with mount lock held */
2835 goto loop;
2836 }
2837
2838 /* if new vnodes were created in between retry the reclaim */
2839 if (vnode_iterate_reloadq(mp) != 0) {
2840 if (!(busy && ((flags & FORCECLOSE) == 0))) {
2841 first_try = false;
2842 goto loop;
2843 }
2844 }
2845 vnode_iterate_clear(mp);
2846 mount_unlock(mp);
2847 mount_iterate_unlock(mp);
2848
2849 if (busy && ((flags & FORCECLOSE) == 0)) {
2850 return EBUSY;
2851 }
2852 return 0;
2853 }
2854
2855 long num_recycledvnodes = 0;
2856 /*
2857 * Disassociate the underlying file system from a vnode.
2858 * The vnode lock is held on entry.
2859 */
2860 static void
vclean(vnode_t vp,int flags)2861 vclean(vnode_t vp, int flags)
2862 {
2863 vfs_context_t ctx = vfs_context_current();
2864 int active;
2865 int need_inactive;
2866 int already_terminating;
2867 int clflags = 0;
2868 #if NAMEDSTREAMS
2869 int is_namedstream;
2870 #endif
2871
2872 /*
2873 * Check to see if the vnode is in use.
2874 * If so we have to reference it before we clean it out
2875 * so that its count cannot fall to zero and generate a
2876 * race against ourselves to recycle it.
2877 */
2878 active = vp->v_usecount;
2879
2880 /*
2881 * just in case we missed sending a needed
2882 * VNOP_INACTIVE, we'll do it now
2883 */
2884 need_inactive = (vp->v_lflag & VL_NEEDINACTIVE);
2885
2886 vp->v_lflag &= ~VL_NEEDINACTIVE;
2887
2888 /*
2889 * Prevent the vnode from being recycled or
2890 * brought into use while we clean it out.
2891 */
2892 already_terminating = (vp->v_lflag & VL_TERMINATE);
2893
2894 vp->v_lflag |= VL_TERMINATE;
2895
2896 #if NAMEDSTREAMS
2897 is_namedstream = vnode_isnamedstream(vp);
2898 #endif
2899
2900 vnode_unlock(vp);
2901
2902 OSAddAtomicLong(1, &num_recycledvnodes);
2903
2904 if (flags & DOCLOSE) {
2905 clflags |= IO_NDELAY;
2906 }
2907 if (flags & REVOKEALL) {
2908 clflags |= IO_REVOKE;
2909 }
2910
2911 #if CONFIG_MACF
2912 if (vp->v_mount) {
2913 /*
2914 * It is possible for bdevvp vnodes to not have a mount
2915 * pointer. It's fine to let it get reclaimed without
2916 * notifying.
2917 */
2918 mac_vnode_notify_reclaim(vp);
2919 }
2920 #endif
2921
2922 if (active && (flags & DOCLOSE)) {
2923 VNOP_CLOSE(vp, clflags, ctx);
2924 }
2925
2926 /*
2927 * Clean out any buffers associated with the vnode.
2928 */
2929 if (flags & DOCLOSE) {
2930 #if CONFIG_NFS_CLIENT
2931 if (vp->v_tag == VT_NFS) {
2932 nfs_vinvalbuf(vp, V_SAVE, ctx, 0);
2933 } else
2934 #endif /* CONFIG_NFS_CLIENT */
2935 {
2936 VNOP_FSYNC(vp, MNT_WAIT, ctx);
2937
2938 /*
2939 * If the vnode is still in use (by the journal for
2940 * example) we don't want to invalidate locked buffers
2941 * here. In that case, either the journal will tidy them
2942 * up, or we will deal with it when the usecount is
2943 * finally released in vnode_rele_internal.
2944 */
2945 buf_invalidateblks(vp, BUF_WRITE_DATA | (active ? 0 : BUF_INVALIDATE_LOCKED), 0, 0);
2946 }
2947 if (UBCINFOEXISTS(vp)) {
2948 /*
2949 * Clean the pages in VM.
2950 */
2951 (void)ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
2952 }
2953 }
2954 if (active || need_inactive) {
2955 VNOP_INACTIVE(vp, ctx);
2956 }
2957
2958 #if NAMEDSTREAMS
2959 if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) {
2960 vnode_t pvp = vp->v_parent;
2961
2962 /* Delete the shadow stream file before we reclaim its vnode */
2963 if (vnode_isshadow(vp)) {
2964 vnode_relenamedstream(pvp, vp);
2965 }
2966
2967 /*
2968 * No more streams associated with the parent. We
2969 * have a ref on it, so its identity is stable.
2970 * If the parent is on an opaque volume, then we need to know
2971 * whether it has associated named streams.
2972 */
2973 if (vfs_authopaque(pvp->v_mount)) {
2974 vnode_lock_spin(pvp);
2975 pvp->v_lflag &= ~VL_HASSTREAMS;
2976 vnode_unlock(pvp);
2977 }
2978 }
2979 #endif
2980
2981 /*
2982 * Destroy ubc named reference
2983 * cluster_release is done on this path
2984 * along with dropping the reference on the ucred
2985 * (and in the case of forced unmount of an mmap-ed file,
2986 * the ubc reference on the vnode is dropped here too).
2987 */
2988 ubc_destroy_named(vp);
2989
2990 #if CONFIG_TRIGGERS
2991 /*
2992 * cleanup trigger info from vnode (if any)
2993 */
2994 if (vp->v_resolve) {
2995 vnode_resolver_detach(vp);
2996 }
2997 #endif
2998
2999 #if CONFIG_IO_COMPRESSION_STATS
3000 if ((vp->io_compression_stats)) {
3001 vnode_iocs_record_and_free(vp);
3002 }
3003 #endif /* CONFIG_IO_COMPRESSION_STATS */
3004
3005 /*
3006 * Reclaim the vnode.
3007 */
3008 if (VNOP_RECLAIM(vp, ctx)) {
3009 panic("vclean: cannot reclaim");
3010 }
3011
3012 // make sure the name & parent ptrs get cleaned out!
3013 vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE | VNODE_UPDATE_PURGEFIRMLINK);
3014
3015 vnode_lock(vp);
3016
3017 /*
3018 * Remove the vnode from any mount list it might be on. It is not
3019 * safe to do this any earlier because unmount needs to wait for
3020 * any vnodes to terminate and it cannot do that if it cannot find
3021 * them.
3022 */
3023 insmntque(vp, (struct mount *)0);
3024
3025 vp->v_mount = dead_mountp;
3026 vp->v_op = dead_vnodeop_p;
3027 vp->v_tag = VT_NON;
3028 vp->v_data = NULL;
3029
3030 vp->v_lflag |= VL_DEAD;
3031 vp->v_flag &= ~VISDIRTY;
3032
3033 if (already_terminating == 0) {
3034 vp->v_lflag &= ~VL_TERMINATE;
3035 /*
3036 * Done with purge, notify sleepers of the grim news.
3037 */
3038 if (vp->v_lflag & VL_TERMWANT) {
3039 vp->v_lflag &= ~VL_TERMWANT;
3040 wakeup(&vp->v_lflag);
3041 }
3042 }
3043 }
3044
3045 /*
3046 * Eliminate all activity associated with the requested vnode
3047 * and with all vnodes aliased to the requested vnode.
3048 */
3049 int
3050 #if DIAGNOSTIC
vn_revoke(vnode_t vp,int flags,__unused vfs_context_t a_context)3051 vn_revoke(vnode_t vp, int flags, __unused vfs_context_t a_context)
3052 #else
3053 vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context)
3054 #endif
3055 {
3056 struct vnode *vq;
3057 int vid;
3058
3059 #if DIAGNOSTIC
3060 if ((flags & REVOKEALL) == 0) {
3061 panic("vnop_revoke");
3062 }
3063 #endif
3064
3065 if (vnode_isaliased(vp)) {
3066 /*
3067 * If a vgone (or vclean) is already in progress,
3068 * return an immediate error
3069 */
3070 if (vp->v_lflag & VL_TERMINATE) {
3071 return ENOENT;
3072 }
3073
3074 /*
3075 * Ensure that vp will not be vgone'd while we
3076 * are eliminating its aliases.
3077 */
3078 SPECHASH_LOCK();
3079 while ((vp->v_specflags & SI_ALIASED)) {
3080 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3081 if (vq->v_rdev != vp->v_rdev ||
3082 vq->v_type != vp->v_type || vp == vq) {
3083 continue;
3084 }
3085 vid = vq->v_id;
3086 SPECHASH_UNLOCK();
3087 if (vnode_getwithvid(vq, vid)) {
3088 SPECHASH_LOCK();
3089 break;
3090 }
3091 vnode_lock(vq);
3092 if (!(vq->v_lflag & VL_TERMINATE)) {
3093 vnode_reclaim_internal(vq, 1, 1, 0);
3094 }
3095 vnode_put_locked(vq);
3096 vnode_unlock(vq);
3097 SPECHASH_LOCK();
3098 break;
3099 }
3100 }
3101 SPECHASH_UNLOCK();
3102 }
3103 vnode_lock(vp);
3104 if (vp->v_lflag & VL_TERMINATE) {
3105 vnode_unlock(vp);
3106 return ENOENT;
3107 }
3108 vnode_reclaim_internal(vp, 1, 0, REVOKEALL);
3109 vnode_unlock(vp);
3110
3111 return 0;
3112 }
3113
3114 /*
3115 * Recycle an unused vnode to the front of the free list.
3116 * Release the passed interlock if the vnode will be recycled.
3117 */
3118 int
vnode_recycle(struct vnode * vp)3119 vnode_recycle(struct vnode *vp)
3120 {
3121 vnode_lock_spin(vp);
3122
3123 if (vp->v_iocount || vp->v_usecount) {
3124 vp->v_lflag |= VL_MARKTERM;
3125 vnode_unlock(vp);
3126 return 0;
3127 }
3128 vnode_lock_convert(vp);
3129 vnode_reclaim_internal(vp, 1, 0, 0);
3130
3131 vnode_unlock(vp);
3132
3133 return 1;
3134 }
3135
3136 static int
vnode_reload(vnode_t vp)3137 vnode_reload(vnode_t vp)
3138 {
3139 vnode_lock_spin(vp);
3140
3141 if ((vp->v_iocount > 1) || vp->v_usecount) {
3142 vnode_unlock(vp);
3143 return 0;
3144 }
3145 if (vp->v_iocount <= 0) {
3146 panic("vnode_reload with no iocount %d", vp->v_iocount);
3147 }
3148
3149 /* mark for release when iocount is dopped */
3150 vp->v_lflag |= VL_MARKTERM;
3151 vnode_unlock(vp);
3152
3153 return 1;
3154 }
3155
3156
3157 static void
vgone(vnode_t vp,int flags)3158 vgone(vnode_t vp, int flags)
3159 {
3160 struct vnode *vq;
3161 struct vnode *vx;
3162
3163 /*
3164 * Clean out the filesystem specific data.
3165 * vclean also takes care of removing the
3166 * vnode from any mount list it might be on
3167 */
3168 vclean(vp, flags | DOCLOSE);
3169
3170 /*
3171 * If special device, remove it from special device alias list
3172 * if it is on one.
3173 */
3174 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
3175 SPECHASH_LOCK();
3176 if (*vp->v_hashchain == vp) {
3177 *vp->v_hashchain = vp->v_specnext;
3178 } else {
3179 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3180 if (vq->v_specnext != vp) {
3181 continue;
3182 }
3183 vq->v_specnext = vp->v_specnext;
3184 break;
3185 }
3186 if (vq == NULL) {
3187 panic("missing bdev");
3188 }
3189 }
3190 if (vp->v_specflags & SI_ALIASED) {
3191 vx = NULL;
3192 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3193 if (vq->v_rdev != vp->v_rdev ||
3194 vq->v_type != vp->v_type) {
3195 continue;
3196 }
3197 if (vx) {
3198 break;
3199 }
3200 vx = vq;
3201 }
3202 if (vx == NULL) {
3203 panic("missing alias");
3204 }
3205 if (vq == NULL) {
3206 vx->v_specflags &= ~SI_ALIASED;
3207 }
3208 vp->v_specflags &= ~SI_ALIASED;
3209 }
3210 SPECHASH_UNLOCK();
3211 {
3212 struct specinfo *tmp = vp->v_specinfo;
3213 vp->v_specinfo = NULL;
3214 zfree(specinfo_zone, tmp);
3215 }
3216 }
3217 }
3218
3219 /*
3220 * Lookup a vnode by device number.
3221 */
3222 int
check_mountedon(dev_t dev,enum vtype type,int * errorp)3223 check_mountedon(dev_t dev, enum vtype type, int *errorp)
3224 {
3225 vnode_t vp;
3226 int rc = 0;
3227 int vid;
3228
3229 loop:
3230 SPECHASH_LOCK();
3231 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
3232 if (dev != vp->v_rdev || type != vp->v_type) {
3233 continue;
3234 }
3235 vid = vp->v_id;
3236 SPECHASH_UNLOCK();
3237 if (vnode_getwithvid(vp, vid)) {
3238 goto loop;
3239 }
3240 vnode_lock_spin(vp);
3241 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
3242 vnode_unlock(vp);
3243 if ((*errorp = vfs_mountedon(vp)) != 0) {
3244 rc = 1;
3245 }
3246 } else {
3247 vnode_unlock(vp);
3248 }
3249 vnode_put(vp);
3250 return rc;
3251 }
3252 SPECHASH_UNLOCK();
3253 return 0;
3254 }
3255
3256 /*
3257 * Calculate the total number of references to a special device.
3258 */
3259 int
vcount(vnode_t vp)3260 vcount(vnode_t vp)
3261 {
3262 vnode_t vq, vnext;
3263 int count;
3264 int vid;
3265
3266 if (!vnode_isspec(vp)) {
3267 return vp->v_usecount - vp->v_kusecount;
3268 }
3269
3270 loop:
3271 if (!vnode_isaliased(vp)) {
3272 return vp->v_specinfo->si_opencount;
3273 }
3274 count = 0;
3275
3276 SPECHASH_LOCK();
3277 /*
3278 * Grab first vnode and its vid.
3279 */
3280 vq = *vp->v_hashchain;
3281 vid = vq ? vq->v_id : 0;
3282
3283 SPECHASH_UNLOCK();
3284
3285 while (vq) {
3286 /*
3287 * Attempt to get the vnode outside the SPECHASH lock.
3288 */
3289 if (vnode_getwithvid(vq, vid)) {
3290 goto loop;
3291 }
3292 vnode_lock(vq);
3293
3294 if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) {
3295 if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) {
3296 /*
3297 * Alias, but not in use, so flush it out.
3298 */
3299 vnode_reclaim_internal(vq, 1, 1, 0);
3300 vnode_put_locked(vq);
3301 vnode_unlock(vq);
3302 goto loop;
3303 }
3304 count += vq->v_specinfo->si_opencount;
3305 }
3306 vnode_unlock(vq);
3307
3308 SPECHASH_LOCK();
3309 /*
3310 * must do this with the reference still held on 'vq'
3311 * so that it can't be destroyed while we're poking
3312 * through v_specnext
3313 */
3314 vnext = vq->v_specnext;
3315 vid = vnext ? vnext->v_id : 0;
3316
3317 SPECHASH_UNLOCK();
3318
3319 vnode_put(vq);
3320
3321 vq = vnext;
3322 }
3323
3324 return count;
3325 }
3326
3327 int prtactive = 0; /* 1 => print out reclaim of active vnodes */
3328
3329 /*
3330 * Print out a description of a vnode.
3331 */
3332 static const char *typename[] =
3333 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
3334
3335 void
vprint(const char * label,struct vnode * vp)3336 vprint(const char *label, struct vnode *vp)
3337 {
3338 char sbuf[64];
3339
3340 if (label != NULL) {
3341 printf("%s: ", label);
3342 }
3343 printf("name %s type %s, usecount %d, writecount %d\n",
3344 vp->v_name, typename[vp->v_type],
3345 vp->v_usecount, vp->v_writecount);
3346 sbuf[0] = '\0';
3347 if (vp->v_flag & VROOT) {
3348 strlcat(sbuf, "|VROOT", sizeof(sbuf));
3349 }
3350 if (vp->v_flag & VTEXT) {
3351 strlcat(sbuf, "|VTEXT", sizeof(sbuf));
3352 }
3353 if (vp->v_flag & VSYSTEM) {
3354 strlcat(sbuf, "|VSYSTEM", sizeof(sbuf));
3355 }
3356 if (vp->v_flag & VNOFLUSH) {
3357 strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf));
3358 }
3359 if (vp->v_flag & VBWAIT) {
3360 strlcat(sbuf, "|VBWAIT", sizeof(sbuf));
3361 }
3362 if (vnode_isaliased(vp)) {
3363 strlcat(sbuf, "|VALIASED", sizeof(sbuf));
3364 }
3365 if (sbuf[0] != '\0') {
3366 printf("vnode flags (%s\n", &sbuf[1]);
3367 }
3368 }
3369
3370
3371 int
vn_getpath(struct vnode * vp,char * pathbuf,int * len)3372 vn_getpath(struct vnode *vp, char *pathbuf, int *len)
3373 {
3374 return build_path(vp, pathbuf, *len, len, BUILDPATH_NO_FS_ENTER, vfs_context_current());
3375 }
3376
3377 int
vn_getpath_fsenter(struct vnode * vp,char * pathbuf,int * len)3378 vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len)
3379 {
3380 return build_path(vp, pathbuf, *len, len, 0, vfs_context_current());
3381 }
3382
3383 /*
3384 * vn_getpath_fsenter_with_parent will reenter the file system to fine the path of the
3385 * vnode. It requires that there are IO counts on both the vnode and the directory vnode.
3386 *
3387 * vn_getpath_fsenter is called by MAC hooks to authorize operations for every thing, but
3388 * unlink, rmdir and rename. For these operation the MAC hook calls vn_getpath. This presents
3389 * problems where if the path can not be found from the name cache, those operations can
3390 * erroneously fail with EPERM even though the call should succeed. When removing or moving
3391 * file system objects with operations such as unlink or rename, those operations need to
3392 * take IO counts on the target and containing directory. Calling vn_getpath_fsenter from a
3393 * MAC hook from these operations during forced unmount operations can lead to dead
3394 * lock. This happens when the operation starts, IO counts are taken on the containing
3395 * directories and targets. Before the MAC hook is called a forced unmount from another
3396 * thread takes place and blocks on the on going operation's directory vnode in vdrain.
3397 * After which, the MAC hook gets called and calls vn_getpath_fsenter. vn_getpath_fsenter
3398 * is called with the understanding that there is an IO count on the target. If in
3399 * build_path the directory vnode is no longer in the cache, then the parent object id via
3400 * vnode_getattr from the target is obtain and used to call VFS_VGET to get the parent
3401 * vnode. The file system's VFS_VGET then looks up by inode in its hash and tries to get
3402 * an IO count. But VFS_VGET "sees" the directory vnode is in vdrain and can block
3403 * depending on which version and how it calls the vnode_get family of interfaces.
3404 *
3405 * N.B. A reasonable interface to use is vnode_getwithvid. This interface was modified to
3406 * call vnode_getiocount with VNODE_DRAINO, so it will happily get an IO count and not
3407 * cause issues, but there is no guarantee that all or any file systems are doing that.
3408 *
3409 * vn_getpath_fsenter_with_parent can enter the file system safely since there is a known
3410 * IO count on the directory vnode by calling build_path_with_parent.
3411 */
3412
3413 int
vn_getpath_fsenter_with_parent(struct vnode * dvp,struct vnode * vp,char * pathbuf,int * len)3414 vn_getpath_fsenter_with_parent(struct vnode *dvp, struct vnode *vp, char *pathbuf, int *len)
3415 {
3416 return build_path_with_parent(vp, dvp, pathbuf, *len, len, NULL, 0, vfs_context_current());
3417 }
3418
3419 int
vn_getpath_ext(struct vnode * vp,struct vnode * dvp,char * pathbuf,int * len,int flags)3420 vn_getpath_ext(struct vnode *vp, struct vnode *dvp, char *pathbuf, int *len, int flags)
3421 {
3422 int bpflags = (flags & VN_GETPATH_FSENTER) ? 0 : BUILDPATH_NO_FS_ENTER;
3423
3424 if (flags && (flags != VN_GETPATH_FSENTER)) {
3425 if (flags & VN_GETPATH_NO_FIRMLINK) {
3426 bpflags |= BUILDPATH_NO_FIRMLINK;
3427 }
3428 if (flags & VN_GETPATH_VOLUME_RELATIVE) {
3429 bpflags |= (BUILDPATH_VOLUME_RELATIVE | BUILDPATH_NO_FIRMLINK);
3430 }
3431 if (flags & VN_GETPATH_NO_PROCROOT) {
3432 bpflags |= BUILDPATH_NO_PROCROOT;
3433 }
3434 }
3435
3436 return build_path_with_parent(vp, dvp, pathbuf, *len, len, NULL, bpflags, vfs_context_current());
3437 }
3438
3439 int
vn_getpath_no_firmlink(struct vnode * vp,char * pathbuf,int * len)3440 vn_getpath_no_firmlink(struct vnode *vp, char *pathbuf, int *len)
3441 {
3442 return vn_getpath_ext(vp, NULLVP, pathbuf, len, VN_GETPATH_NO_FIRMLINK);
3443 }
3444
3445 int
vn_getpath_ext_with_mntlen(struct vnode * vp,struct vnode * dvp,char * pathbuf,size_t * len,size_t * mntlen,int flags)3446 vn_getpath_ext_with_mntlen(struct vnode *vp, struct vnode *dvp, char *pathbuf, size_t *len, size_t *mntlen, int flags)
3447 {
3448 int bpflags = (flags & VN_GETPATH_FSENTER) ? 0 : BUILDPATH_NO_FS_ENTER;
3449 int local_len;
3450 int error;
3451
3452 if (*len > INT_MAX) {
3453 return EINVAL;
3454 }
3455
3456 local_len = *len;
3457
3458 if (flags && (flags != VN_GETPATH_FSENTER)) {
3459 if (flags & VN_GETPATH_NO_FIRMLINK) {
3460 bpflags |= BUILDPATH_NO_FIRMLINK;
3461 }
3462 if (flags & VN_GETPATH_VOLUME_RELATIVE) {
3463 bpflags |= (BUILDPATH_VOLUME_RELATIVE | BUILDPATH_NO_FIRMLINK);
3464 }
3465 if (flags & VN_GETPATH_NO_PROCROOT) {
3466 bpflags |= BUILDPATH_NO_PROCROOT;
3467 }
3468 }
3469
3470 error = build_path_with_parent(vp, dvp, pathbuf, local_len, &local_len, mntlen, bpflags, vfs_context_current());
3471
3472 if (local_len >= 0 && local_len <= (int)*len) {
3473 *len = (size_t)local_len;
3474 }
3475
3476 return error;
3477 }
3478
3479 int
vn_getcdhash(struct vnode * vp,off_t offset,unsigned char * cdhash)3480 vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash)
3481 {
3482 return ubc_cs_getcdhash(vp, offset, cdhash);
3483 }
3484
3485
3486 static char *extension_table = NULL;
3487 static int nexts;
3488 static int max_ext_width;
3489
3490 static int
extension_cmp(const void * a,const void * b)3491 extension_cmp(const void *a, const void *b)
3492 {
3493 return (int)(strlen((const char *)a) - strlen((const char *)b));
3494 }
3495
3496
3497 //
3498 // This is the api LaunchServices uses to inform the kernel
3499 // the list of package extensions to ignore.
3500 //
3501 // Internally we keep the list sorted by the length of the
3502 // the extension (from longest to shortest). We sort the
3503 // list of extensions so that we can speed up our searches
3504 // when comparing file names -- we only compare extensions
3505 // that could possibly fit into the file name, not all of
3506 // them (i.e. a short 8 character name can't have an 8
3507 // character extension).
3508 //
3509 extern lck_mtx_t pkg_extensions_lck;
3510
3511 __private_extern__ int
set_package_extensions_table(user_addr_t data,int nentries,int maxwidth)3512 set_package_extensions_table(user_addr_t data, int nentries, int maxwidth)
3513 {
3514 char *new_exts, *old_exts;
3515 int old_nentries = 0, old_maxwidth = 0;
3516 int error;
3517
3518 if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) {
3519 return EINVAL;
3520 }
3521
3522
3523 // allocate one byte extra so we can guarantee null termination
3524 new_exts = kalloc_data((nentries * maxwidth) + 1, Z_WAITOK);
3525 if (new_exts == NULL) {
3526 return ENOMEM;
3527 }
3528
3529 error = copyin(data, new_exts, nentries * maxwidth);
3530 if (error) {
3531 kfree_data(new_exts, (nentries * maxwidth) + 1);
3532 return error;
3533 }
3534
3535 new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block
3536
3537 qsort(new_exts, nentries, maxwidth, extension_cmp);
3538
3539 lck_mtx_lock(&pkg_extensions_lck);
3540
3541 old_exts = extension_table;
3542 old_nentries = nexts;
3543 old_maxwidth = max_ext_width;
3544 extension_table = new_exts;
3545 nexts = nentries;
3546 max_ext_width = maxwidth;
3547
3548 lck_mtx_unlock(&pkg_extensions_lck);
3549
3550 kfree_data(old_exts, (old_nentries * old_maxwidth) + 1);
3551
3552 return 0;
3553 }
3554
3555
3556 int
is_package_name(const char * name,int len)3557 is_package_name(const char *name, int len)
3558 {
3559 int i;
3560 size_t extlen;
3561 const char *ptr, *name_ext;
3562
3563 // if the name is less than 3 bytes it can't be of the
3564 // form A.B and if it begins with a "." then it is also
3565 // not a package.
3566 if (len <= 3 || name[0] == '.') {
3567 return 0;
3568 }
3569
3570 name_ext = NULL;
3571 for (ptr = name; *ptr != '\0'; ptr++) {
3572 if (*ptr == '.') {
3573 name_ext = ptr;
3574 }
3575 }
3576
3577 // if there is no "." extension, it can't match
3578 if (name_ext == NULL) {
3579 return 0;
3580 }
3581
3582 // advance over the "."
3583 name_ext++;
3584
3585 lck_mtx_lock(&pkg_extensions_lck);
3586
3587 // now iterate over all the extensions to see if any match
3588 ptr = &extension_table[0];
3589 for (i = 0; i < nexts; i++, ptr += max_ext_width) {
3590 extlen = strlen(ptr);
3591 if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') {
3592 // aha, a match!
3593 lck_mtx_unlock(&pkg_extensions_lck);
3594 return 1;
3595 }
3596 }
3597
3598 lck_mtx_unlock(&pkg_extensions_lck);
3599
3600 // if we get here, no extension matched
3601 return 0;
3602 }
3603
3604 int
vn_path_package_check(__unused vnode_t vp,char * path,int pathlen,int * component)3605 vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component)
3606 {
3607 char *ptr, *end;
3608 int comp = 0;
3609
3610 if (pathlen < 0) {
3611 return EINVAL;
3612 }
3613
3614 *component = -1;
3615 if (*path != '/') {
3616 return EINVAL;
3617 }
3618
3619 end = path + 1;
3620 while (end < path + pathlen && *end != '\0') {
3621 while (end < path + pathlen && *end == '/' && *end != '\0') {
3622 end++;
3623 }
3624
3625 ptr = end;
3626
3627 while (end < path + pathlen && *end != '/' && *end != '\0') {
3628 end++;
3629 }
3630
3631 if (end > path + pathlen) {
3632 // hmm, string wasn't null terminated
3633 return EINVAL;
3634 }
3635
3636 *end = '\0';
3637 if (is_package_name(ptr, (int)(end - ptr))) {
3638 *component = comp;
3639 break;
3640 }
3641
3642 end++;
3643 comp++;
3644 }
3645
3646 return 0;
3647 }
3648
3649 /*
3650 * Determine if a name is inappropriate for a searchfs query.
3651 * This list consists of /System currently.
3652 */
3653
3654 int
vn_searchfs_inappropriate_name(const char * name,int len)3655 vn_searchfs_inappropriate_name(const char *name, int len)
3656 {
3657 const char *bad_names[] = { "System" };
3658 int bad_len[] = { 6 };
3659 int i;
3660
3661 if (len < 0) {
3662 return EINVAL;
3663 }
3664
3665 for (i = 0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) {
3666 if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) {
3667 return 1;
3668 }
3669 }
3670
3671 // if we get here, no name matched
3672 return 0;
3673 }
3674
3675 /*
3676 * Top level filesystem related information gathering.
3677 */
3678 extern unsigned int vfs_nummntops;
3679
3680 /*
3681 * The VFS_NUMMNTOPS shouldn't be at name[1] since
3682 * is a VFS generic variable. Since we no longer support
3683 * VT_UFS, we reserve its value to support this sysctl node.
3684 *
3685 * It should have been:
3686 * name[0]: VFS_GENERIC
3687 * name[1]: VFS_NUMMNTOPS
3688 */
3689 SYSCTL_INT(_vfs, VFS_NUMMNTOPS, nummntops,
3690 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
3691 &vfs_nummntops, 0, "");
3692
3693 int
3694 vfs_sysctl(int *name __unused, u_int namelen __unused,
3695 user_addr_t oldp __unused, size_t *oldlenp __unused,
3696 user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused);
3697
3698 int
vfs_sysctl(int * name __unused,u_int namelen __unused,user_addr_t oldp __unused,size_t * oldlenp __unused,user_addr_t newp __unused,size_t newlen __unused,proc_t p __unused)3699 vfs_sysctl(int *name __unused, u_int namelen __unused,
3700 user_addr_t oldp __unused, size_t *oldlenp __unused,
3701 user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused)
3702 {
3703 return EINVAL;
3704 }
3705
3706
3707 //
3708 // The following code disallows specific sysctl's that came through
3709 // the direct sysctl interface (vfs_sysctl_node) instead of the newer
3710 // sysctl_vfs_ctlbyfsid() interface. We can not allow these selectors
3711 // through vfs_sysctl_node() because it passes the user's oldp pointer
3712 // directly to the file system which (for these selectors) casts it
3713 // back to a struct sysctl_req and then proceed to use SYSCTL_IN()
3714 // which jumps through an arbitrary function pointer. When called
3715 // through the sysctl_vfs_ctlbyfsid() interface this does not happen
3716 // and so it's safe.
3717 //
3718 // Unfortunately we have to pull in definitions from AFP and SMB and
3719 // perform explicit name checks on the file system to determine if
3720 // these selectors are being used.
3721 //
3722
3723 #define AFPFS_VFS_CTL_GETID 0x00020001
3724 #define AFPFS_VFS_CTL_NETCHANGE 0x00020002
3725 #define AFPFS_VFS_CTL_VOLCHANGE 0x00020003
3726
3727 #define SMBFS_SYSCTL_REMOUNT 1
3728 #define SMBFS_SYSCTL_REMOUNT_INFO 2
3729 #define SMBFS_SYSCTL_GET_SERVER_SHARE 3
3730
3731
3732 static int
is_bad_sysctl_name(struct vfstable * vfsp,int selector_name)3733 is_bad_sysctl_name(struct vfstable *vfsp, int selector_name)
3734 {
3735 switch (selector_name) {
3736 case VFS_CTL_QUERY:
3737 case VFS_CTL_TIMEO:
3738 case VFS_CTL_NOLOCKS:
3739 case VFS_CTL_NSTATUS:
3740 case VFS_CTL_SADDR:
3741 case VFS_CTL_DISC:
3742 case VFS_CTL_SERVERINFO:
3743 return 1;
3744
3745 default:
3746 break;
3747 }
3748
3749 // the more complicated check for some of SMB's special values
3750 if (strcmp(vfsp->vfc_name, "smbfs") == 0) {
3751 switch (selector_name) {
3752 case SMBFS_SYSCTL_REMOUNT:
3753 case SMBFS_SYSCTL_REMOUNT_INFO:
3754 case SMBFS_SYSCTL_GET_SERVER_SHARE:
3755 return 1;
3756 }
3757 } else if (strcmp(vfsp->vfc_name, "afpfs") == 0) {
3758 switch (selector_name) {
3759 case AFPFS_VFS_CTL_GETID:
3760 case AFPFS_VFS_CTL_NETCHANGE:
3761 case AFPFS_VFS_CTL_VOLCHANGE:
3762 return 1;
3763 }
3764 }
3765
3766 //
3767 // If we get here we passed all the checks so the selector is ok
3768 //
3769 return 0;
3770 }
3771
3772
3773 int vfs_sysctl_node SYSCTL_HANDLER_ARGS
3774 {
3775 int *name, namelen;
3776 struct vfstable *vfsp;
3777 int error;
3778 int fstypenum;
3779
3780 fstypenum = oidp->oid_number;
3781 name = arg1;
3782 namelen = arg2;
3783
3784 /* all sysctl names at this level should have at least one name slot for the FS */
3785 if (namelen < 1) {
3786 return EISDIR; /* overloaded */
3787 }
3788 mount_list_lock();
3789 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
3790 if (vfsp->vfc_typenum == fstypenum) {
3791 vfsp->vfc_refcount++;
3792 break;
3793 }
3794 }
3795 mount_list_unlock();
3796
3797 if (vfsp == NULL) {
3798 return ENOTSUP;
3799 }
3800
3801 if (is_bad_sysctl_name(vfsp, name[0])) {
3802 printf("vfs: bad selector 0x%.8x for old-style sysctl(). use the sysctl-by-fsid interface instead\n", name[0]);
3803 error = EPERM;
3804 } else {
3805 error = (vfsp->vfc_vfsops->vfs_sysctl)(name, namelen,
3806 req->oldptr, &req->oldlen, req->newptr, req->newlen,
3807 vfs_context_current());
3808 }
3809
3810 mount_list_lock();
3811 vfsp->vfc_refcount--;
3812 mount_list_unlock();
3813
3814 return error;
3815 }
3816
3817 /*
3818 * Check to see if a filesystem is mounted on a block device.
3819 */
3820 int
vfs_mountedon(struct vnode * vp)3821 vfs_mountedon(struct vnode *vp)
3822 {
3823 struct vnode *vq;
3824 int error = 0;
3825
3826 SPECHASH_LOCK();
3827 if (vp->v_specflags & SI_MOUNTEDON) {
3828 error = EBUSY;
3829 goto out;
3830 }
3831 if (vp->v_specflags & SI_ALIASED) {
3832 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3833 if (vq->v_rdev != vp->v_rdev ||
3834 vq->v_type != vp->v_type) {
3835 continue;
3836 }
3837 if (vq->v_specflags & SI_MOUNTEDON) {
3838 error = EBUSY;
3839 break;
3840 }
3841 }
3842 }
3843 out:
3844 SPECHASH_UNLOCK();
3845 return error;
3846 }
3847
3848 struct unmount_info {
3849 int u_errs; // Total failed unmounts
3850 int u_busy; // EBUSY failed unmounts
3851 int u_count; // Total volumes iterated
3852 int u_only_non_system;
3853 };
3854
3855 static int
unmount_callback(mount_t mp,void * arg)3856 unmount_callback(mount_t mp, void *arg)
3857 {
3858 int error;
3859 char *mntname;
3860 struct unmount_info *uip = arg;
3861
3862 uip->u_count++;
3863
3864 mntname = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_NOFAIL);
3865 strlcpy(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN);
3866
3867 if (uip->u_only_non_system
3868 && ((mp->mnt_flag & MNT_ROOTFS) || (mp->mnt_kern_flag & MNTK_SYSTEM))) { //MNTK_BACKS_ROOT
3869 printf("unmount(%d) %s skipped\n", uip->u_only_non_system, mntname);
3870 mount_iterdrop(mp); // VFS_ITERATE_CB_DROPREF
3871 } else {
3872 printf("unmount(%d) %s\n", uip->u_only_non_system, mntname);
3873
3874 mount_ref(mp, 0);
3875 mount_iterdrop(mp); // VFS_ITERATE_CB_DROPREF
3876 error = dounmount(mp, MNT_FORCE, 1, vfs_context_current());
3877 if (error) {
3878 uip->u_errs++;
3879 printf("Unmount of %s failed (%d)\n", mntname ? mntname:"?", error);
3880 if (error == EBUSY) {
3881 uip->u_busy++;
3882 }
3883 }
3884 }
3885 zfree(ZV_NAMEI, mntname);
3886
3887 return VFS_RETURNED;
3888 }
3889
3890 /*
3891 * Unmount all filesystems. The list is traversed in reverse order
3892 * of mounting to avoid dependencies.
3893 * Busy mounts are retried.
3894 */
3895 __private_extern__ void
vfs_unmountall(int only_non_system)3896 vfs_unmountall(int only_non_system)
3897 {
3898 int mounts, sec = 1;
3899 struct unmount_info ui;
3900
3901 vfs_unmountall_started = 1;
3902 printf("vfs_unmountall(%ssystem) start\n", only_non_system ? "non" : "");
3903
3904 retry:
3905 ui.u_errs = ui.u_busy = ui.u_count = 0;
3906 ui.u_only_non_system = only_non_system;
3907 // avoid vfs_iterate deadlock in dounmount(), use VFS_ITERATE_CB_DROPREF
3908 vfs_iterate(VFS_ITERATE_CB_DROPREF | VFS_ITERATE_TAIL_FIRST, unmount_callback, &ui);
3909 mounts = mount_getvfscnt();
3910 if (mounts == 0) {
3911 return;
3912 }
3913 if (ui.u_busy > 0) { // Busy mounts - wait & retry
3914 tsleep(&nummounts, PVFS, "busy mount", sec * hz);
3915 sec *= 2;
3916 if (sec <= 32) {
3917 goto retry;
3918 }
3919 printf("Unmounting timed out\n");
3920 } else if (ui.u_count < mounts) {
3921 // If the vfs_iterate missed mounts in progress - wait a bit
3922 tsleep(&nummounts, PVFS, "missed mount", 2 * hz);
3923 }
3924
3925 printf("vfs_unmountall(%ssystem) end\n", only_non_system ? "non" : "");
3926 }
3927
3928 /*
3929 * This routine is called from vnode_pager_deallocate out of the VM
3930 * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
3931 * on a vnode that has a UBCINFO
3932 */
3933 __private_extern__ void
vnode_pager_vrele(vnode_t vp)3934 vnode_pager_vrele(vnode_t vp)
3935 {
3936 struct ubc_info *uip;
3937
3938 vnode_lock_spin(vp);
3939
3940 vp->v_lflag &= ~VNAMED_UBC;
3941 if (vp->v_usecount != 0) {
3942 /*
3943 * At the eleventh hour, just before the ubcinfo is
3944 * destroyed, ensure the ubc-specific v_usecount
3945 * reference has gone. We use v_usecount != 0 as a hint;
3946 * ubc_unmap() does nothing if there's no mapping.
3947 *
3948 * This case is caused by coming here via forced unmount,
3949 * versus the usual vm_object_deallocate() path.
3950 * In the forced unmount case, ubc_destroy_named()
3951 * releases the pager before memory_object_last_unmap()
3952 * can be called.
3953 */
3954 vnode_unlock(vp);
3955 ubc_unmap(vp);
3956 vnode_lock_spin(vp);
3957 }
3958
3959 uip = vp->v_ubcinfo;
3960 vp->v_ubcinfo = UBC_INFO_NULL;
3961
3962 vnode_unlock(vp);
3963
3964 ubc_info_deallocate(uip);
3965 }
3966
3967
3968 #include <sys/disk.h>
3969
3970 u_int32_t rootunit = (u_int32_t)-1;
3971
3972 #if CONFIG_IOSCHED
3973 extern int lowpri_throttle_enabled;
3974 extern int iosched_enabled;
3975 #endif
3976
3977 errno_t
vfs_init_io_attributes(vnode_t devvp,mount_t mp)3978 vfs_init_io_attributes(vnode_t devvp, mount_t mp)
3979 {
3980 int error;
3981 off_t readblockcnt = 0;
3982 off_t writeblockcnt = 0;
3983 off_t readmaxcnt = 0;
3984 off_t writemaxcnt = 0;
3985 off_t readsegcnt = 0;
3986 off_t writesegcnt = 0;
3987 off_t readsegsize = 0;
3988 off_t writesegsize = 0;
3989 off_t alignment = 0;
3990 u_int32_t minsaturationbytecount = 0;
3991 u_int32_t ioqueue_depth = 0;
3992 u_int32_t blksize;
3993 u_int64_t temp;
3994 u_int32_t features;
3995 u_int64_t location = 0;
3996 vfs_context_t ctx = vfs_context_current();
3997 dk_corestorage_info_t cs_info;
3998 boolean_t cs_present = FALSE;
3999 int isssd = 0;
4000 int isvirtual = 0;
4001
4002
4003 VNOP_IOCTL(devvp, DKIOCGETTHROTTLEMASK, (caddr_t)&mp->mnt_throttle_mask, 0, NULL);
4004 /*
4005 * as a reasonable approximation, only use the lowest bit of the mask
4006 * to generate a disk unit number
4007 */
4008 mp->mnt_devbsdunit = num_trailing_0(mp->mnt_throttle_mask);
4009
4010 if (devvp == rootvp) {
4011 rootunit = mp->mnt_devbsdunit;
4012 }
4013
4014 if (mp->mnt_devbsdunit == rootunit) {
4015 /*
4016 * this mount point exists on the same device as the root
4017 * partition, so it comes under the hard throttle control...
4018 * this is true even for the root mount point itself
4019 */
4020 mp->mnt_kern_flag |= MNTK_ROOTDEV;
4021 }
4022 /*
4023 * force the spec device to re-cache
4024 * the underlying block size in case
4025 * the filesystem overrode the initial value
4026 */
4027 set_fsblocksize(devvp);
4028
4029
4030 if ((error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE,
4031 (caddr_t)&blksize, 0, ctx))) {
4032 return error;
4033 }
4034
4035 mp->mnt_devblocksize = blksize;
4036
4037 /*
4038 * set the maximum possible I/O size
4039 * this may get clipped to a smaller value
4040 * based on which constraints are being advertised
4041 * and if those advertised constraints result in a smaller
4042 * limit for a given I/O
4043 */
4044 mp->mnt_maxreadcnt = MAX_UPL_SIZE_BYTES;
4045 mp->mnt_maxwritecnt = MAX_UPL_SIZE_BYTES;
4046
4047 if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) {
4048 if (isvirtual) {
4049 mp->mnt_kern_flag |= MNTK_VIRTUALDEV;
4050 mp->mnt_flag |= MNT_REMOVABLE;
4051 }
4052 }
4053 if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ctx) == 0) {
4054 if (isssd) {
4055 mp->mnt_kern_flag |= MNTK_SSD;
4056 }
4057 }
4058 if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES,
4059 (caddr_t)&features, 0, ctx))) {
4060 return error;
4061 }
4062
4063 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD,
4064 (caddr_t)&readblockcnt, 0, ctx))) {
4065 return error;
4066 }
4067
4068 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE,
4069 (caddr_t)&writeblockcnt, 0, ctx))) {
4070 return error;
4071 }
4072
4073 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD,
4074 (caddr_t)&readmaxcnt, 0, ctx))) {
4075 return error;
4076 }
4077
4078 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE,
4079 (caddr_t)&writemaxcnt, 0, ctx))) {
4080 return error;
4081 }
4082
4083 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD,
4084 (caddr_t)&readsegcnt, 0, ctx))) {
4085 return error;
4086 }
4087
4088 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE,
4089 (caddr_t)&writesegcnt, 0, ctx))) {
4090 return error;
4091 }
4092
4093 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD,
4094 (caddr_t)&readsegsize, 0, ctx))) {
4095 return error;
4096 }
4097
4098 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE,
4099 (caddr_t)&writesegsize, 0, ctx))) {
4100 return error;
4101 }
4102
4103 if ((error = VNOP_IOCTL(devvp, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT,
4104 (caddr_t)&alignment, 0, ctx))) {
4105 return error;
4106 }
4107
4108 if ((error = VNOP_IOCTL(devvp, DKIOCGETCOMMANDPOOLSIZE,
4109 (caddr_t)&ioqueue_depth, 0, ctx))) {
4110 return error;
4111 }
4112
4113 if (readmaxcnt) {
4114 mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX :(uint32_t) readmaxcnt;
4115 }
4116
4117 if (readblockcnt) {
4118 temp = readblockcnt * blksize;
4119 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
4120
4121 if (temp < mp->mnt_maxreadcnt) {
4122 mp->mnt_maxreadcnt = (u_int32_t)temp;
4123 }
4124 }
4125
4126 if (writemaxcnt) {
4127 mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : (uint32_t)writemaxcnt;
4128 }
4129
4130 if (writeblockcnt) {
4131 temp = writeblockcnt * blksize;
4132 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
4133
4134 if (temp < mp->mnt_maxwritecnt) {
4135 mp->mnt_maxwritecnt = (u_int32_t)temp;
4136 }
4137 }
4138
4139 if (readsegcnt) {
4140 temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt;
4141 } else {
4142 temp = mp->mnt_maxreadcnt / PAGE_SIZE;
4143
4144 if (temp > UINT16_MAX) {
4145 temp = UINT16_MAX;
4146 }
4147 }
4148 mp->mnt_segreadcnt = (u_int16_t)temp;
4149
4150 if (writesegcnt) {
4151 temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt;
4152 } else {
4153 temp = mp->mnt_maxwritecnt / PAGE_SIZE;
4154
4155 if (temp > UINT16_MAX) {
4156 temp = UINT16_MAX;
4157 }
4158 }
4159 mp->mnt_segwritecnt = (u_int16_t)temp;
4160
4161 if (readsegsize) {
4162 temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize;
4163 } else {
4164 temp = mp->mnt_maxreadcnt;
4165 }
4166 mp->mnt_maxsegreadsize = (u_int32_t)temp;
4167
4168 if (writesegsize) {
4169 temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize;
4170 } else {
4171 temp = mp->mnt_maxwritecnt;
4172 }
4173 mp->mnt_maxsegwritesize = (u_int32_t)temp;
4174
4175 if (alignment) {
4176 temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1;
4177 } else {
4178 temp = 0;
4179 }
4180 mp->mnt_alignmentmask = (uint32_t)temp;
4181
4182
4183 if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH) {
4184 temp = ioqueue_depth;
4185 } else {
4186 temp = MNT_DEFAULT_IOQUEUE_DEPTH;
4187 }
4188
4189 mp->mnt_ioqueue_depth = (uint32_t)temp;
4190 mp->mnt_ioscale = MNT_IOSCALE(mp->mnt_ioqueue_depth);
4191
4192 if (mp->mnt_ioscale > 1) {
4193 printf("ioqueue_depth = %d, ioscale = %d\n", (int)mp->mnt_ioqueue_depth, (int)mp->mnt_ioscale);
4194 }
4195
4196 if (features & DK_FEATURE_FORCE_UNIT_ACCESS) {
4197 mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED;
4198 }
4199
4200 if (VNOP_IOCTL(devvp, DKIOCGETIOMINSATURATIONBYTECOUNT, (caddr_t)&minsaturationbytecount, 0, ctx) == 0) {
4201 mp->mnt_minsaturationbytecount = minsaturationbytecount;
4202 } else {
4203 mp->mnt_minsaturationbytecount = 0;
4204 }
4205
4206 if (VNOP_IOCTL(devvp, DKIOCCORESTORAGE, (caddr_t)&cs_info, 0, ctx) == 0) {
4207 cs_present = TRUE;
4208 }
4209
4210 if (features & DK_FEATURE_UNMAP) {
4211 mp->mnt_ioflags |= MNT_IOFLAGS_UNMAP_SUPPORTED;
4212
4213 if (cs_present == TRUE) {
4214 mp->mnt_ioflags |= MNT_IOFLAGS_CSUNMAP_SUPPORTED;
4215 }
4216 }
4217 if (cs_present == TRUE) {
4218 /*
4219 * for now we'll use the following test as a proxy for
4220 * the underlying drive being FUSION in nature
4221 */
4222 if ((cs_info.flags & DK_CORESTORAGE_PIN_YOUR_METADATA)) {
4223 mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE;
4224 }
4225 } else {
4226 /* Check for APFS Fusion */
4227 dk_apfs_flavour_t flavour;
4228 if ((VNOP_IOCTL(devvp, DKIOCGETAPFSFLAVOUR, (caddr_t)&flavour, 0, ctx) == 0) &&
4229 (flavour == DK_APFS_FUSION)) {
4230 mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE;
4231 }
4232 }
4233
4234 if (VNOP_IOCTL(devvp, DKIOCGETLOCATION, (caddr_t)&location, 0, ctx) == 0) {
4235 if (location & DK_LOCATION_EXTERNAL) {
4236 mp->mnt_ioflags |= MNT_IOFLAGS_PERIPHERAL_DRIVE;
4237 mp->mnt_flag |= MNT_REMOVABLE;
4238 }
4239 }
4240
4241 #if CONFIG_IOSCHED
4242 if (iosched_enabled && (features & DK_FEATURE_PRIORITY)) {
4243 mp->mnt_ioflags |= MNT_IOFLAGS_IOSCHED_SUPPORTED;
4244 throttle_info_disable_throttle(mp->mnt_devbsdunit, (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) != 0);
4245 }
4246 #endif /* CONFIG_IOSCHED */
4247 return error;
4248 }
4249
4250 static struct klist fs_klist;
4251 static LCK_GRP_DECLARE(fs_klist_lck_grp, "fs_klist");
4252 static LCK_MTX_DECLARE(fs_klist_lock, &fs_klist_lck_grp);
4253
4254 void
vfs_event_init(void)4255 vfs_event_init(void)
4256 {
4257 klist_init(&fs_klist);
4258 }
4259
4260 void
vfs_event_signal(fsid_t * fsid,u_int32_t event,intptr_t data)4261 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data)
4262 {
4263 if (event == VQ_DEAD || event == VQ_NOTRESP) {
4264 struct mount *mp = vfs_getvfs(fsid);
4265 if (mp) {
4266 mount_lock_spin(mp);
4267 if (data) {
4268 mp->mnt_kern_flag &= ~MNT_LNOTRESP; // Now responding
4269 } else {
4270 mp->mnt_kern_flag |= MNT_LNOTRESP; // Not responding
4271 }
4272 mount_unlock(mp);
4273 }
4274 }
4275
4276 lck_mtx_lock(&fs_klist_lock);
4277 KNOTE(&fs_klist, event);
4278 lck_mtx_unlock(&fs_klist_lock);
4279 }
4280
4281 /*
4282 * return the number of mounted filesystems.
4283 */
4284 static int
sysctl_vfs_getvfscnt(void)4285 sysctl_vfs_getvfscnt(void)
4286 {
4287 return mount_getvfscnt();
4288 }
4289
4290
4291 static int
mount_getvfscnt(void)4292 mount_getvfscnt(void)
4293 {
4294 int ret;
4295
4296 mount_list_lock();
4297 ret = nummounts;
4298 mount_list_unlock();
4299 return ret;
4300 }
4301
4302
4303
4304 static int
mount_fillfsids(fsid_t * fsidlst,int count)4305 mount_fillfsids(fsid_t *fsidlst, int count)
4306 {
4307 struct mount *mp;
4308 int actual = 0;
4309
4310 actual = 0;
4311 mount_list_lock();
4312 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4313 if (actual < count) {
4314 fsidlst[actual] = mp->mnt_vfsstat.f_fsid;
4315 actual++;
4316 }
4317 }
4318 mount_list_unlock();
4319 return actual;
4320 }
4321
4322 /*
4323 * fill in the array of fsid_t's up to a max of 'count', the actual
4324 * number filled in will be set in '*actual'. If there are more fsid_t's
4325 * than room in fsidlst then ENOMEM will be returned and '*actual' will
4326 * have the actual count.
4327 * having *actual filled out even in the error case is depended upon.
4328 */
4329 static int
sysctl_vfs_getvfslist(fsid_t * fsidlst,unsigned long count,unsigned long * actual)4330 sysctl_vfs_getvfslist(fsid_t *fsidlst, unsigned long count, unsigned long *actual)
4331 {
4332 struct mount *mp;
4333
4334 *actual = 0;
4335 mount_list_lock();
4336 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4337 (*actual)++;
4338 if (*actual <= count) {
4339 fsidlst[(*actual) - 1] = mp->mnt_vfsstat.f_fsid;
4340 }
4341 }
4342 mount_list_unlock();
4343 return *actual <= count ? 0 : ENOMEM;
4344 }
4345
4346 static int
sysctl_vfs_vfslist(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)4347 sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1,
4348 __unused int arg2, struct sysctl_req *req)
4349 {
4350 unsigned long actual;
4351 int error;
4352 size_t space;
4353 fsid_t *fsidlst;
4354
4355 /* This is a readonly node. */
4356 if (req->newptr != USER_ADDR_NULL) {
4357 return EPERM;
4358 }
4359
4360 /* they are querying us so just return the space required. */
4361 if (req->oldptr == USER_ADDR_NULL) {
4362 req->oldidx = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
4363 return 0;
4364 }
4365 again:
4366 /*
4367 * Retrieve an accurate count of the amount of space required to copy
4368 * out all the fsids in the system.
4369 */
4370 space = req->oldlen;
4371 req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
4372
4373 /* they didn't give us enough space. */
4374 if (space < req->oldlen) {
4375 return ENOMEM;
4376 }
4377
4378 fsidlst = kalloc_data(req->oldlen, Z_WAITOK | Z_ZERO);
4379 if (fsidlst == NULL) {
4380 return ENOMEM;
4381 }
4382
4383 error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t),
4384 &actual);
4385 /*
4386 * If we get back ENOMEM, then another mount has been added while we
4387 * slept in malloc above. If this is the case then try again.
4388 */
4389 if (error == ENOMEM) {
4390 kfree_data(fsidlst, req->oldlen);
4391 req->oldlen = space;
4392 goto again;
4393 }
4394 if (error == 0) {
4395 error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t));
4396 }
4397 kfree_data(fsidlst, req->oldlen);
4398 return error;
4399 }
4400
4401 /*
4402 * Do a sysctl by fsid.
4403 */
4404 static int
sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid * oidp,void * arg1,int arg2,struct sysctl_req * req)4405 sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
4406 struct sysctl_req *req)
4407 {
4408 union union_vfsidctl vc;
4409 struct mount *mp;
4410 struct vfsstatfs *sp;
4411 int *name, namelen;
4412 int flags = 0;
4413 int error = 0, gotref = 0;
4414 vfs_context_t ctx = vfs_context_current();
4415 proc_t p = req->p; /* XXX req->p != current_proc()? */
4416 boolean_t is_64_bit;
4417 union {
4418 struct statfs64 sfs64;
4419 struct user64_statfs osfs64;
4420 struct user32_statfs osfs32;
4421 } *sfsbuf;
4422
4423 if (req->newptr == USER_ADDR_NULL) {
4424 error = EINVAL;
4425 goto out;
4426 }
4427
4428 name = arg1;
4429 namelen = arg2;
4430 is_64_bit = proc_is64bit(p);
4431
4432 error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
4433 if (error) {
4434 goto out;
4435 }
4436 if (vc.vc32.vc_vers != VFS_CTL_VERS1) { /* works for 32 and 64 */
4437 error = EINVAL;
4438 goto out;
4439 }
4440 mp = mount_list_lookupby_fsid(&vc.vc32.vc_fsid, 0, 1); /* works for 32 and 64 */
4441 if (mp == NULL) {
4442 error = ENOENT;
4443 goto out;
4444 }
4445 gotref = 1;
4446 /* reset so that the fs specific code can fetch it. */
4447 req->newidx = 0;
4448 /*
4449 * Note if this is a VFS_CTL then we pass the actual sysctl req
4450 * in for "oldp" so that the lower layer can DTRT and use the
4451 * SYSCTL_IN/OUT routines.
4452 */
4453 if (mp->mnt_op->vfs_sysctl != NULL) {
4454 if (is_64_bit) {
4455 if (vfs_64bitready(mp)) {
4456 error = mp->mnt_op->vfs_sysctl(name, namelen,
4457 CAST_USER_ADDR_T(req),
4458 NULL, USER_ADDR_NULL, 0,
4459 ctx);
4460 } else {
4461 error = ENOTSUP;
4462 }
4463 } else {
4464 error = mp->mnt_op->vfs_sysctl(name, namelen,
4465 CAST_USER_ADDR_T(req),
4466 NULL, USER_ADDR_NULL, 0,
4467 ctx);
4468 }
4469 if (error != ENOTSUP) {
4470 goto out;
4471 }
4472 }
4473 switch (name[0]) {
4474 case VFS_CTL_UMOUNT:
4475 #if CONFIG_MACF
4476 error = mac_mount_check_umount(ctx, mp);
4477 if (error != 0) {
4478 goto out;
4479 }
4480 #endif
4481 req->newidx = 0;
4482 if (is_64_bit) {
4483 req->newptr = vc.vc64.vc_ptr;
4484 req->newlen = (size_t)vc.vc64.vc_len;
4485 } else {
4486 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
4487 req->newlen = vc.vc32.vc_len;
4488 }
4489 error = SYSCTL_IN(req, &flags, sizeof(flags));
4490 if (error) {
4491 break;
4492 }
4493
4494 mount_ref(mp, 0);
4495 mount_iterdrop(mp);
4496 gotref = 0;
4497 /* safedounmount consumes a ref */
4498 error = safedounmount(mp, flags, ctx);
4499 break;
4500 case VFS_CTL_OSTATFS:
4501 case VFS_CTL_STATFS64:
4502 #if CONFIG_MACF
4503 error = mac_mount_check_stat(ctx, mp);
4504 if (error != 0) {
4505 break;
4506 }
4507 #endif
4508 req->newidx = 0;
4509 if (is_64_bit) {
4510 req->newptr = vc.vc64.vc_ptr;
4511 req->newlen = (size_t)vc.vc64.vc_len;
4512 } else {
4513 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
4514 req->newlen = vc.vc32.vc_len;
4515 }
4516 error = SYSCTL_IN(req, &flags, sizeof(flags));
4517 if (error) {
4518 break;
4519 }
4520 sp = &mp->mnt_vfsstat;
4521 if (((flags & MNT_NOWAIT) == 0 || (flags & (MNT_WAIT | MNT_DWAIT))) &&
4522 (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT))) {
4523 goto out;
4524 }
4525
4526 sfsbuf = kalloc_type(typeof(*sfsbuf), Z_WAITOK);
4527
4528 if (name[0] == VFS_CTL_STATFS64) {
4529 struct statfs64 *sfs = &sfsbuf->sfs64;
4530
4531 vfs_get_statfs64(mp, sfs);
4532 error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
4533 } else if (is_64_bit) {
4534 struct user64_statfs *sfs = &sfsbuf->osfs64;
4535
4536 bzero(sfs, sizeof(*sfs));
4537 sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
4538 sfs->f_type = (short)mp->mnt_vtable->vfc_typenum;
4539 sfs->f_bsize = (user64_long_t)sp->f_bsize;
4540 sfs->f_iosize = (user64_long_t)sp->f_iosize;
4541 sfs->f_blocks = (user64_long_t)sp->f_blocks;
4542 sfs->f_bfree = (user64_long_t)sp->f_bfree;
4543 sfs->f_bavail = (user64_long_t)sp->f_bavail;
4544 sfs->f_files = (user64_long_t)sp->f_files;
4545 sfs->f_ffree = (user64_long_t)sp->f_ffree;
4546 sfs->f_fsid = sp->f_fsid;
4547 sfs->f_owner = sp->f_owner;
4548 #ifdef CONFIG_NFS_CLIENT
4549 if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
4550 strlcpy(&sfs->f_fstypename[0], &mp->fstypename_override[0], MFSNAMELEN);
4551 } else
4552 #endif /* CONFIG_NFS_CLIENT */
4553 {
4554 strlcpy(sfs->f_fstypename, sp->f_fstypename, MFSNAMELEN);
4555 }
4556 strlcpy(sfs->f_mntonname, sp->f_mntonname, MNAMELEN);
4557 strlcpy(sfs->f_mntfromname, sp->f_mntfromname, MNAMELEN);
4558
4559 error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
4560 } else {
4561 struct user32_statfs *sfs = &sfsbuf->osfs32;
4562 long temp;
4563
4564 bzero(sfs, sizeof(*sfs));
4565 sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
4566 sfs->f_type = (short)mp->mnt_vtable->vfc_typenum;
4567
4568 /*
4569 * It's possible for there to be more than 2^^31 blocks in the filesystem, so we
4570 * have to fudge the numbers here in that case. We inflate the blocksize in order
4571 * to reflect the filesystem size as best we can.
4572 */
4573 if (sp->f_blocks > INT_MAX) {
4574 int shift;
4575
4576 /*
4577 * Work out how far we have to shift the block count down to make it fit.
4578 * Note that it's possible to have to shift so far that the resulting
4579 * blocksize would be unreportably large. At that point, we will clip
4580 * any values that don't fit.
4581 *
4582 * For safety's sake, we also ensure that f_iosize is never reported as
4583 * being smaller than f_bsize.
4584 */
4585 for (shift = 0; shift < 32; shift++) {
4586 if ((sp->f_blocks >> shift) <= INT_MAX) {
4587 break;
4588 }
4589 if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX) {
4590 break;
4591 }
4592 }
4593 #define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
4594 sfs->f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift);
4595 sfs->f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift);
4596 sfs->f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift);
4597 #undef __SHIFT_OR_CLIP
4598 sfs->f_bsize = (user32_long_t)(sp->f_bsize << shift);
4599 temp = lmax(sp->f_iosize, sp->f_bsize);
4600 if (temp > INT32_MAX) {
4601 error = EINVAL;
4602 kfree_type(typeof(*sfsbuf), sfsbuf);
4603 goto out;
4604 }
4605 sfs->f_iosize = (user32_long_t)temp;
4606 } else {
4607 sfs->f_bsize = (user32_long_t)sp->f_bsize;
4608 sfs->f_iosize = (user32_long_t)sp->f_iosize;
4609 sfs->f_blocks = (user32_long_t)sp->f_blocks;
4610 sfs->f_bfree = (user32_long_t)sp->f_bfree;
4611 sfs->f_bavail = (user32_long_t)sp->f_bavail;
4612 }
4613 sfs->f_files = (user32_long_t)sp->f_files;
4614 sfs->f_ffree = (user32_long_t)sp->f_ffree;
4615 sfs->f_fsid = sp->f_fsid;
4616 sfs->f_owner = sp->f_owner;
4617
4618 #ifdef CONFIG_NFS_CLIENT
4619 if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
4620 strlcpy(&sfs->f_fstypename[0], &mp->fstypename_override[0], MFSNAMELEN);
4621 } else
4622 #endif /* CONFIG_NFS_CLIENT */
4623 {
4624 strlcpy(sfs->f_fstypename, sp->f_fstypename, MFSNAMELEN);
4625 }
4626 strlcpy(sfs->f_mntonname, sp->f_mntonname, MNAMELEN);
4627 strlcpy(sfs->f_mntfromname, sp->f_mntfromname, MNAMELEN);
4628
4629 error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
4630 }
4631 kfree_type(typeof(*sfsbuf), sfsbuf);
4632 break;
4633 default:
4634 error = ENOTSUP;
4635 goto out;
4636 }
4637 out:
4638 if (gotref != 0) {
4639 mount_iterdrop(mp);
4640 }
4641 return error;
4642 }
4643
4644 static int filt_fsattach(struct knote *kn, struct kevent_qos_s *kev);
4645 static void filt_fsdetach(struct knote *kn);
4646 static int filt_fsevent(struct knote *kn, long hint);
4647 static int filt_fstouch(struct knote *kn, struct kevent_qos_s *kev);
4648 static int filt_fsprocess(struct knote *kn, struct kevent_qos_s *kev);
4649 SECURITY_READ_ONLY_EARLY(struct filterops) fs_filtops = {
4650 .f_attach = filt_fsattach,
4651 .f_detach = filt_fsdetach,
4652 .f_event = filt_fsevent,
4653 .f_touch = filt_fstouch,
4654 .f_process = filt_fsprocess,
4655 };
4656
4657 static int
filt_fsattach(struct knote * kn,__unused struct kevent_qos_s * kev)4658 filt_fsattach(struct knote *kn, __unused struct kevent_qos_s *kev)
4659 {
4660 kn->kn_flags |= EV_CLEAR; /* automatic */
4661 kn->kn_sdata = 0; /* incoming data is ignored */
4662
4663 lck_mtx_lock(&fs_klist_lock);
4664 KNOTE_ATTACH(&fs_klist, kn);
4665 lck_mtx_unlock(&fs_klist_lock);
4666
4667 /*
4668 * filter only sees future events,
4669 * so it can't be fired already.
4670 */
4671 return 0;
4672 }
4673
4674 static void
filt_fsdetach(struct knote * kn)4675 filt_fsdetach(struct knote *kn)
4676 {
4677 lck_mtx_lock(&fs_klist_lock);
4678 KNOTE_DETACH(&fs_klist, kn);
4679 lck_mtx_unlock(&fs_klist_lock);
4680 }
4681
4682 static int
filt_fsevent(struct knote * kn,long hint)4683 filt_fsevent(struct knote *kn, long hint)
4684 {
4685 /*
4686 * Backwards compatibility:
4687 * Other filters would do nothing if kn->kn_sfflags == 0
4688 */
4689
4690 if ((kn->kn_sfflags == 0) || (kn->kn_sfflags & hint)) {
4691 kn->kn_fflags |= hint;
4692 }
4693
4694 return kn->kn_fflags != 0;
4695 }
4696
4697 static int
filt_fstouch(struct knote * kn,struct kevent_qos_s * kev)4698 filt_fstouch(struct knote *kn, struct kevent_qos_s *kev)
4699 {
4700 int res;
4701
4702 lck_mtx_lock(&fs_klist_lock);
4703
4704 kn->kn_sfflags = kev->fflags;
4705
4706 /*
4707 * the above filter function sets bits even if nobody is looking for them.
4708 * Just preserve those bits even in the new mask is more selective
4709 * than before.
4710 *
4711 * For compatibility with previous implementations, we leave kn_fflags
4712 * as they were before.
4713 */
4714 //if (kn->kn_sfflags)
4715 // kn->kn_fflags &= kn->kn_sfflags;
4716 res = (kn->kn_fflags != 0);
4717
4718 lck_mtx_unlock(&fs_klist_lock);
4719
4720 return res;
4721 }
4722
4723 static int
filt_fsprocess(struct knote * kn,struct kevent_qos_s * kev)4724 filt_fsprocess(struct knote *kn, struct kevent_qos_s *kev)
4725 {
4726 int res = 0;
4727
4728 lck_mtx_lock(&fs_klist_lock);
4729 if (kn->kn_fflags) {
4730 knote_fill_kevent(kn, kev, 0);
4731 res = 1;
4732 }
4733 lck_mtx_unlock(&fs_klist_lock);
4734 return res;
4735 }
4736
4737 static int
sysctl_vfs_noremotehang(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)4738 sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp,
4739 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4740 {
4741 int out, error;
4742 pid_t pid;
4743 proc_t p;
4744
4745 /* We need a pid. */
4746 if (req->newptr == USER_ADDR_NULL) {
4747 return EINVAL;
4748 }
4749
4750 error = SYSCTL_IN(req, &pid, sizeof(pid));
4751 if (error) {
4752 return error;
4753 }
4754
4755 p = proc_find(pid < 0 ? -pid : pid);
4756 if (p == NULL) {
4757 return ESRCH;
4758 }
4759
4760 /*
4761 * Fetching the value is ok, but we only fetch if the old
4762 * pointer is given.
4763 */
4764 if (req->oldptr != USER_ADDR_NULL) {
4765 out = !((p->p_flag & P_NOREMOTEHANG) == 0);
4766 proc_rele(p);
4767 error = SYSCTL_OUT(req, &out, sizeof(out));
4768 return error;
4769 }
4770
4771 /* cansignal offers us enough security. */
4772 if (p != req->p && proc_suser(req->p) != 0) {
4773 proc_rele(p);
4774 return EPERM;
4775 }
4776
4777 if (pid < 0) {
4778 OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), &p->p_flag);
4779 } else {
4780 OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
4781 }
4782 proc_rele(p);
4783
4784 return 0;
4785 }
4786
4787 static int
4788 sysctl_vfs_generic_conf SYSCTL_HANDLER_ARGS
4789 {
4790 int *name, namelen;
4791 struct vfstable *vfsp;
4792 struct vfsconf vfsc = {};
4793
4794 (void)oidp;
4795 name = arg1;
4796 namelen = arg2;
4797
4798 if (namelen < 1) {
4799 return EISDIR;
4800 } else if (namelen > 1) {
4801 return ENOTDIR;
4802 }
4803
4804 mount_list_lock();
4805 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
4806 if (vfsp->vfc_typenum == name[0]) {
4807 break;
4808 }
4809 }
4810
4811 if (vfsp == NULL) {
4812 mount_list_unlock();
4813 return ENOTSUP;
4814 }
4815
4816 vfsc.vfc_reserved1 = 0;
4817 bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
4818 vfsc.vfc_typenum = vfsp->vfc_typenum;
4819 vfsc.vfc_refcount = vfsp->vfc_refcount;
4820 vfsc.vfc_flags = vfsp->vfc_flags;
4821 vfsc.vfc_reserved2 = 0;
4822 vfsc.vfc_reserved3 = 0;
4823
4824 mount_list_unlock();
4825 return SYSCTL_OUT(req, &vfsc, sizeof(struct vfsconf));
4826 }
4827
4828 /* the vfs.generic. branch. */
4829 SYSCTL_EXTENSIBLE_NODE(_vfs, VFS_GENERIC, generic,
4830 CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "vfs generic hinge");
4831 /* retreive a list of mounted filesystem fsid_t */
4832 SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist,
4833 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
4834 NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids");
4835 /* perform operations on filesystem via fsid_t */
4836 SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW | CTLFLAG_LOCKED,
4837 sysctl_vfs_ctlbyfsid, "ctlbyfsid");
4838 SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW | CTLFLAG_ANYBODY,
4839 NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang");
4840 SYSCTL_INT(_vfs_generic, VFS_MAXTYPENUM, maxtypenum,
4841 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
4842 &maxvfstypenum, 0, "");
4843 SYSCTL_INT(_vfs_generic, OID_AUTO, sync_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, &sync_timeout_seconds, 0, "");
4844 SYSCTL_NODE(_vfs_generic, VFS_CONF, conf,
4845 CTLFLAG_RD | CTLFLAG_LOCKED,
4846 sysctl_vfs_generic_conf, "");
4847 #if DEVELOPMENT || DEBUG
4848 SYSCTL_INT(_vfs_generic, OID_AUTO, print_busy_vnodes,
4849 CTLTYPE_INT | CTLFLAG_RW,
4850 &print_busy_vnodes, 0,
4851 "VFS log busy vnodes blocking unmount");
4852 #endif
4853
4854 /* Indicate that the root file system unmounted cleanly */
4855 static int vfs_root_unmounted_cleanly = 0;
4856 SYSCTL_INT(_vfs_generic, OID_AUTO, root_unmounted_cleanly, CTLFLAG_RD, &vfs_root_unmounted_cleanly, 0, "Root filesystem was unmounted cleanly");
4857
4858 void
vfs_set_root_unmounted_cleanly(void)4859 vfs_set_root_unmounted_cleanly(void)
4860 {
4861 vfs_root_unmounted_cleanly = 1;
4862 }
4863
4864 /*
4865 * Print vnode state.
4866 */
4867 void
vn_print_state(struct vnode * vp,const char * fmt,...)4868 vn_print_state(struct vnode *vp, const char *fmt, ...)
4869 {
4870 va_list ap;
4871 char perm_str[] = "(VM_KERNEL_ADDRPERM pointer)";
4872 char fs_name[MFSNAMELEN];
4873
4874 va_start(ap, fmt);
4875 vprintf(fmt, ap);
4876 va_end(ap);
4877 printf("vp 0x%0llx %s: ", (uint64_t)VM_KERNEL_ADDRPERM(vp), perm_str);
4878 printf("tag %d, type %d\n", vp->v_tag, vp->v_type);
4879 /* Counts .. */
4880 printf(" iocount %d, usecount %d, kusecount %d references %d\n",
4881 vp->v_iocount, vp->v_usecount, vp->v_kusecount, vp->v_references);
4882 printf(" writecount %d, numoutput %d\n", vp->v_writecount,
4883 vp->v_numoutput);
4884 /* Flags */
4885 printf(" flag 0x%x, lflag 0x%x, listflag 0x%x\n", vp->v_flag,
4886 vp->v_lflag, vp->v_listflag);
4887
4888 if (vp->v_mount == NULL || vp->v_mount == dead_mountp) {
4889 strlcpy(fs_name, "deadfs", MFSNAMELEN);
4890 } else {
4891 vfs_name(vp->v_mount, fs_name);
4892 }
4893
4894 printf(" v_data 0x%0llx %s\n",
4895 (vp->v_data ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_data) : 0),
4896 perm_str);
4897 printf(" v_mount 0x%0llx %s vfs_name %s\n",
4898 (vp->v_mount ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_mount) : 0),
4899 perm_str, fs_name);
4900 }
4901
4902 long num_reusedvnodes = 0;
4903
4904
4905 static vnode_t
process_vp(vnode_t vp,int want_vp,bool can_defer,int * deferred)4906 process_vp(vnode_t vp, int want_vp, bool can_defer, int *deferred)
4907 {
4908 unsigned int vpid;
4909
4910 *deferred = 0;
4911
4912 vpid = vp->v_id;
4913
4914 vnode_list_remove_locked(vp);
4915
4916 vnode_list_unlock();
4917
4918 vnode_lock_spin(vp);
4919
4920 /*
4921 * We could wait for the vnode_lock after removing the vp from the freelist
4922 * and the vid is bumped only at the very end of reclaim. So it is possible
4923 * that we are looking at a vnode that is being terminated. If so skip it.
4924 */
4925 if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
4926 VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
4927 /*
4928 * we lost the race between dropping the list lock
4929 * and picking up the vnode_lock... someone else
4930 * used this vnode and it is now in a new state
4931 */
4932 vnode_unlock(vp);
4933
4934 return NULLVP;
4935 }
4936 if ((vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE) {
4937 /*
4938 * we did a vnode_rele_ext that asked for
4939 * us not to reenter the filesystem during
4940 * the release even though VL_NEEDINACTIVE was
4941 * set... we'll do it here by doing a
4942 * vnode_get/vnode_put
4943 *
4944 * pick up an iocount so that we can call
4945 * vnode_put and drive the VNOP_INACTIVE...
4946 * vnode_put will either leave us off
4947 * the freelist if a new ref comes in,
4948 * or put us back on the end of the freelist
4949 * or recycle us if we were marked for termination...
4950 * so we'll just go grab a new candidate
4951 */
4952 vp->v_iocount++;
4953 #ifdef CONFIG_IOCOUNT_TRACE
4954 record_vp(vp, 1);
4955 #endif
4956 vnode_put_locked(vp);
4957 vnode_unlock(vp);
4958
4959 return NULLVP;
4960 }
4961 /*
4962 * Checks for anyone racing us for recycle
4963 */
4964 if (vp->v_type != VBAD) {
4965 if ((want_vp || can_defer) && (vnode_on_reliable_media(vp) == FALSE || (vp->v_flag & VISDIRTY))) {
4966 vnode_async_list_add(vp);
4967 vnode_unlock(vp);
4968
4969 *deferred = 1;
4970
4971 return NULLVP;
4972 }
4973 if (vp->v_lflag & VL_DEAD) {
4974 panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
4975 }
4976
4977 vnode_lock_convert(vp);
4978 (void)vnode_reclaim_internal(vp, 1, want_vp, 0);
4979
4980 if (want_vp) {
4981 if ((VONLIST(vp))) {
4982 panic("new_vnode(%p): vp on list", vp);
4983 }
4984 if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
4985 (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH))) {
4986 panic("new_vnode(%p): free vnode still referenced", vp);
4987 }
4988 if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0)) {
4989 panic("new_vnode(%p): vnode seems to be on mount list", vp);
4990 }
4991 if (!LIST_EMPTY(&vp->v_nclinks) || !TAILQ_EMPTY(&vp->v_ncchildren)) {
4992 panic("new_vnode(%p): vnode still hooked into the name cache", vp);
4993 }
4994 } else {
4995 vnode_unlock(vp);
4996 vp = NULLVP;
4997 }
4998 }
4999 return vp;
5000 }
5001
5002 __attribute__((noreturn))
5003 static void
async_work_continue(void)5004 async_work_continue(void)
5005 {
5006 struct async_work_lst *q;
5007 int deferred;
5008 vnode_t vp;
5009
5010 q = &vnode_async_work_list;
5011
5012 for (;;) {
5013 vnode_list_lock();
5014
5015 if (TAILQ_EMPTY(q)) {
5016 assert_wait(q, (THREAD_UNINT));
5017
5018 vnode_list_unlock();
5019
5020 thread_block((thread_continue_t)async_work_continue);
5021
5022 continue;
5023 }
5024 async_work_handled++;
5025
5026 vp = TAILQ_FIRST(q);
5027
5028 vp = process_vp(vp, 0, false, &deferred);
5029
5030 if (vp != NULLVP) {
5031 panic("found VBAD vp (%p) on async queue", vp);
5032 }
5033 }
5034 }
5035
5036 __attribute__((noreturn))
5037 static void
vn_laundry_continue(void)5038 vn_laundry_continue(void)
5039 {
5040 struct freelst *free_q;
5041 struct ragelst *rage_q;
5042 int deferred;
5043 vnode_t vp;
5044 bool rage_q_empty;
5045 bool free_q_empty;
5046
5047
5048 free_q = &vnode_free_list;
5049 rage_q = &vnode_rage_list;
5050
5051 for (;;) {
5052 vnode_list_lock();
5053
5054 free_q_empty = TAILQ_EMPTY(free_q);
5055 rage_q_empty = TAILQ_EMPTY(rage_q);
5056
5057 if (!rage_q_empty && !free_q_empty) {
5058 struct timeval current_tv;
5059
5060 microuptime(¤t_tv);
5061 if (ragevnodes < rage_limit &&
5062 ((current_tv.tv_sec - rage_tv.tv_sec) < RAGE_TIME_LIMIT)) {
5063 rage_q_empty = true;
5064 }
5065 }
5066
5067 if (deadvnodes >= deadvnodes_high ||
5068 (rage_q_empty && free_q_empty) ||
5069 numvnodes < desiredvnodes) {
5070 assert_wait(free_q, (THREAD_UNINT));
5071
5072 vnode_list_unlock();
5073
5074 thread_block((thread_continue_t)vn_laundry_continue);
5075
5076 continue;
5077 }
5078
5079 if (!rage_q_empty) {
5080 vp = TAILQ_FIRST(rage_q);
5081 } else {
5082 vp = TAILQ_FIRST(free_q);
5083 }
5084
5085 vp = process_vp(vp, 0, true, &deferred);
5086 }
5087 }
5088
5089 static inline void
wakeup_laundry_thread()5090 wakeup_laundry_thread()
5091 {
5092 if ((deadvnodes < deadvnodes_low) &&
5093 /* Minimum number of free vnodes the thread should act on */
5094 ((freevnodes + ragevnodes) > 10)) {
5095 wakeup(&vnode_free_list);
5096 }
5097 }
5098
5099 static int
new_vnode(vnode_t * vpp)5100 new_vnode(vnode_t *vpp)
5101 {
5102 vnode_t vp;
5103 uint32_t retries = 0, max_retries = 100; /* retry incase of tablefull */
5104 uint32_t bdevvp_vnodes = 0;
5105 int force_alloc = 0, walk_count = 0;
5106 boolean_t need_reliable_vp = FALSE;
5107 int deferred;
5108 struct timeval initial_tv;
5109 struct timeval current_tv;
5110 proc_t curproc = current_proc();
5111
5112 initial_tv.tv_sec = 0;
5113 retry:
5114 vp = NULLVP;
5115
5116 vnode_list_lock();
5117 newvnode++;
5118
5119 if (need_reliable_vp == TRUE) {
5120 async_work_timed_out++;
5121 }
5122
5123 if ((numvnodes - deadvnodes) < desiredvnodes || force_alloc) {
5124 struct timespec ts;
5125
5126 if (!TAILQ_EMPTY(&vnode_dead_list)) {
5127 /*
5128 * Can always reuse a dead one
5129 */
5130 vp = TAILQ_FIRST(&vnode_dead_list);
5131 if (numvnodes >= desiredvnodes) {
5132 wakeup_laundry_thread();
5133 }
5134 goto steal_this_vp;
5135 }
5136 /*
5137 * no dead vnodes available... if we're under
5138 * the limit, we'll create a new vnode
5139 */
5140 numvnodes++;
5141 if (numvnodes >= desiredvnodes) {
5142 wakeup_laundry_thread();
5143 }
5144 vnode_list_unlock();
5145
5146 vp = zalloc_flags(vnode_zone, Z_WAITOK | Z_ZERO);
5147 VLISTNONE(vp); /* avoid double queue removal */
5148 lck_mtx_init(&vp->v_lock, &vnode_lck_grp, &vnode_lck_attr);
5149
5150 TAILQ_INIT(&vp->v_ncchildren);
5151
5152 klist_init(&vp->v_knotes);
5153 nanouptime(&ts);
5154 vp->v_id = (uint32_t)ts.tv_nsec;
5155 vp->v_flag = VSTANDARD;
5156
5157 #if CONFIG_MACF
5158 if (mac_vnode_label_init_needed(vp)) {
5159 mac_vnode_label_init(vp);
5160 }
5161 #endif /* MAC */
5162
5163 #if CONFIG_IOCOUNT_TRACE
5164 if (__improbable(bootarg_vnode_iocount_trace)) {
5165 vp->v_iocount_trace = (vnode_iocount_trace_t)kalloc_data(
5166 IOCOUNT_TRACE_MAX_TYPES * sizeof(struct vnode_iocount_trace),
5167 Z_WAITOK | Z_ZERO);
5168 }
5169 #endif /* CONFIG_IOCOUNT_TRACE */
5170
5171 vp->v_iocount = 1;
5172 goto done;
5173 }
5174
5175 wakeup_laundry_thread();
5176
5177 microuptime(¤t_tv);
5178
5179 #define MAX_WALK_COUNT 1000
5180
5181 if (!TAILQ_EMPTY(&vnode_rage_list) &&
5182 (ragevnodes >= rage_limit ||
5183 (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) {
5184 TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
5185 if (!(vp->v_listflag & VLIST_RAGE)) {
5186 panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
5187 }
5188
5189 // if we're a dependency-capable process, skip vnodes that can
5190 // cause recycling deadlocks. (i.e. this process is diskimages
5191 // helper and the vnode is in a disk image). Querying the
5192 // mnt_kern_flag for the mount's virtual device status
5193 // is safer than checking the mnt_dependent_process, which
5194 // may not be updated if there are multiple devnode layers
5195 // in between the disk image and the final consumer.
5196
5197 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
5198 (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) {
5199 /*
5200 * if need_reliable_vp == TRUE, then we've already sent one or more
5201 * non-reliable vnodes to the async thread for processing and timed
5202 * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT
5203 * mechanism to first scan for a reliable vnode before forcing
5204 * a new vnode to be created
5205 */
5206 if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) {
5207 break;
5208 }
5209 }
5210
5211 // don't iterate more than MAX_WALK_COUNT vnodes to
5212 // avoid keeping the vnode list lock held for too long.
5213
5214 if (walk_count++ > MAX_WALK_COUNT) {
5215 vp = NULL;
5216 break;
5217 }
5218 }
5219 }
5220
5221 if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) {
5222 /*
5223 * Pick the first vp for possible reuse
5224 */
5225 walk_count = 0;
5226 TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
5227 // if we're a dependency-capable process, skip vnodes that can
5228 // cause recycling deadlocks. (i.e. this process is diskimages
5229 // helper and the vnode is in a disk image). Querying the
5230 // mnt_kern_flag for the mount's virtual device status
5231 // is safer than checking the mnt_dependent_process, which
5232 // may not be updated if there are multiple devnode layers
5233 // in between the disk image and the final consumer.
5234
5235 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
5236 (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) {
5237 /*
5238 * if need_reliable_vp == TRUE, then we've already sent one or more
5239 * non-reliable vnodes to the async thread for processing and timed
5240 * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT
5241 * mechanism to first scan for a reliable vnode before forcing
5242 * a new vnode to be created
5243 */
5244 if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) {
5245 break;
5246 }
5247 }
5248
5249 // don't iterate more than MAX_WALK_COUNT vnodes to
5250 // avoid keeping the vnode list lock held for too long.
5251
5252 if (walk_count++ > MAX_WALK_COUNT) {
5253 vp = NULL;
5254 break;
5255 }
5256 }
5257 }
5258
5259 //
5260 // if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT
5261 // then we're trying to create a vnode on behalf of a
5262 // process like diskimages-helper that has file systems
5263 // mounted on top of itself (and thus we can't reclaim
5264 // vnodes in the file systems on top of us). if we can't
5265 // find a vnode to reclaim then we'll just have to force
5266 // the allocation.
5267 //
5268 if (vp == NULL && walk_count >= MAX_WALK_COUNT) {
5269 force_alloc = 1;
5270 vnode_list_unlock();
5271 goto retry;
5272 }
5273
5274 if (vp == NULL) {
5275 /*
5276 * we've reached the system imposed maximum number of vnodes
5277 * but there isn't a single one available
5278 * wait a bit and then retry... if we can't get a vnode
5279 * after our target number of retries, than log a complaint
5280 */
5281 if (++retries <= max_retries) {
5282 vnode_list_unlock();
5283 delay_for_interval(1, 1000 * 1000);
5284 goto retry;
5285 }
5286
5287 vnode_list_unlock();
5288 tablefull("vnode");
5289 log(LOG_EMERG, "%d desired, %ld numvnodes, "
5290 "%ld free, %ld dead, %ld async, %d rage %d bdevvp\n",
5291 desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes, bdevvp_vnodes);
5292 #if CONFIG_JETSAM
5293
5294 #if DEVELOPMENT || DEBUG
5295 if (bootarg_no_vnode_jetsam) {
5296 panic("vnode table is full");
5297 }
5298 #endif /* DEVELOPMENT || DEBUG */
5299
5300 /*
5301 * Running out of vnodes tends to make a system unusable. Start killing
5302 * processes that jetsam knows are killable.
5303 */
5304 if (memorystatus_kill_on_vnode_limit() == FALSE) {
5305 /*
5306 * If jetsam can't find any more processes to kill and there
5307 * still aren't any free vnodes, panic. Hopefully we'll get a
5308 * panic log to tell us why we ran out.
5309 */
5310 panic("vnode table is full");
5311 }
5312
5313 /*
5314 * Now that we've killed someone, wait a bit and continue looking
5315 * (with fewer retries before trying another kill).
5316 */
5317 delay_for_interval(3, 1000 * 1000);
5318 retries = 0;
5319 max_retries = 10;
5320 goto retry;
5321 #endif
5322
5323 *vpp = NULL;
5324 return ENFILE;
5325 }
5326 newvnode_nodead++;
5327 steal_this_vp:
5328 if ((vp = process_vp(vp, 1, true, &deferred)) == NULLVP) {
5329 if (deferred) {
5330 int elapsed_msecs;
5331 struct timeval elapsed_tv;
5332
5333 if (initial_tv.tv_sec == 0) {
5334 microuptime(&initial_tv);
5335 }
5336
5337 vnode_list_lock();
5338
5339 dead_vnode_waited++;
5340 dead_vnode_wanted++;
5341
5342 /*
5343 * note that we're only going to explicitly wait 10ms
5344 * for a dead vnode to become available, since even if one
5345 * isn't available, a reliable vnode might now be available
5346 * at the head of the VRAGE or free lists... if so, we
5347 * can satisfy the new_vnode request with less latency then waiting
5348 * for the full 100ms duration we're ultimately willing to tolerate
5349 */
5350 assert_wait_timeout((caddr_t)&dead_vnode_wanted, (THREAD_INTERRUPTIBLE), 10000, NSEC_PER_USEC);
5351
5352 vnode_list_unlock();
5353
5354 thread_block(THREAD_CONTINUE_NULL);
5355
5356 microuptime(&elapsed_tv);
5357
5358 timevalsub(&elapsed_tv, &initial_tv);
5359 elapsed_msecs = (int)(elapsed_tv.tv_sec * 1000 + elapsed_tv.tv_usec / 1000);
5360
5361 if (elapsed_msecs >= 100) {
5362 /*
5363 * we've waited long enough... 100ms is
5364 * somewhat arbitrary for this case, but the
5365 * normal worst case latency used for UI
5366 * interaction is 100ms, so I've chosen to
5367 * go with that.
5368 *
5369 * setting need_reliable_vp to TRUE
5370 * forces us to find a reliable vnode
5371 * that we can process synchronously, or
5372 * to create a new one if the scan for
5373 * a reliable one hits the scan limit
5374 */
5375 need_reliable_vp = TRUE;
5376 }
5377 }
5378 goto retry;
5379 }
5380 OSAddAtomicLong(1, &num_reusedvnodes);
5381
5382
5383 #if CONFIG_MACF
5384 /*
5385 * We should never see VL_LABELWAIT or VL_LABEL here.
5386 * as those operations hold a reference.
5387 */
5388 assert((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT);
5389 assert((vp->v_lflag & VL_LABEL) != VL_LABEL);
5390 if (vp->v_lflag & VL_LABELED || mac_vnode_label(vp) != NULL) {
5391 vnode_lock_convert(vp);
5392 mac_vnode_label_recycle(vp);
5393 } else if (mac_vnode_label_init_needed(vp)) {
5394 vnode_lock_convert(vp);
5395 mac_vnode_label_init(vp);
5396 }
5397
5398 #endif /* MAC */
5399
5400 vp->v_iocount = 1;
5401 vp->v_lflag = 0;
5402 vp->v_writecount = 0;
5403 vp->v_references = 0;
5404 vp->v_iterblkflags = 0;
5405 vp->v_flag = VSTANDARD;
5406 /* vbad vnodes can point to dead_mountp */
5407 vp->v_mount = NULL;
5408 vp->v_defer_reclaimlist = (vnode_t)0;
5409
5410 vnode_unlock(vp);
5411
5412 done:
5413 *vpp = vp;
5414
5415 return 0;
5416 }
5417
5418 void
vnode_lock(vnode_t vp)5419 vnode_lock(vnode_t vp)
5420 {
5421 lck_mtx_lock(&vp->v_lock);
5422 }
5423
5424 void
vnode_lock_spin(vnode_t vp)5425 vnode_lock_spin(vnode_t vp)
5426 {
5427 lck_mtx_lock_spin(&vp->v_lock);
5428 }
5429
5430 void
vnode_unlock(vnode_t vp)5431 vnode_unlock(vnode_t vp)
5432 {
5433 lck_mtx_unlock(&vp->v_lock);
5434 }
5435
5436
5437
5438 int
vnode_get(struct vnode * vp)5439 vnode_get(struct vnode *vp)
5440 {
5441 int retval;
5442
5443 vnode_lock_spin(vp);
5444 retval = vnode_get_locked(vp);
5445 vnode_unlock(vp);
5446
5447 return retval;
5448 }
5449
5450 int
vnode_get_locked(struct vnode * vp)5451 vnode_get_locked(struct vnode *vp)
5452 {
5453 #if DIAGNOSTIC
5454 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
5455 #endif
5456 if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
5457 return ENOENT;
5458 }
5459
5460 if (os_add_overflow(vp->v_iocount, 1, &vp->v_iocount)) {
5461 panic("v_iocount overflow");
5462 }
5463
5464 #ifdef CONFIG_IOCOUNT_TRACE
5465 record_vp(vp, 1);
5466 #endif
5467 return 0;
5468 }
5469
5470 /*
5471 * vnode_getwithvid() cuts in line in front of a vnode drain (that is,
5472 * while the vnode is draining, but at no point after that) to prevent
5473 * deadlocks when getting vnodes from filesystem hashes while holding
5474 * resources that may prevent other iocounts from being released.
5475 */
5476 int
vnode_getwithvid(vnode_t vp,uint32_t vid)5477 vnode_getwithvid(vnode_t vp, uint32_t vid)
5478 {
5479 return vget_internal(vp, vid, (VNODE_NODEAD | VNODE_WITHID | VNODE_DRAINO));
5480 }
5481
5482 /*
5483 * vnode_getwithvid_drainok() is like vnode_getwithvid(), but *does* block behind a vnode
5484 * drain; it exists for use in the VFS name cache, where we really do want to block behind
5485 * vnode drain to prevent holding off an unmount.
5486 */
5487 int
vnode_getwithvid_drainok(vnode_t vp,uint32_t vid)5488 vnode_getwithvid_drainok(vnode_t vp, uint32_t vid)
5489 {
5490 return vget_internal(vp, vid, (VNODE_NODEAD | VNODE_WITHID));
5491 }
5492
5493 int
vnode_getwithref(vnode_t vp)5494 vnode_getwithref(vnode_t vp)
5495 {
5496 return vget_internal(vp, 0, 0);
5497 }
5498
5499
5500 __private_extern__ int
vnode_getalways(vnode_t vp)5501 vnode_getalways(vnode_t vp)
5502 {
5503 return vget_internal(vp, 0, VNODE_ALWAYS);
5504 }
5505
5506 __private_extern__ int
vnode_getalways_from_pager(vnode_t vp)5507 vnode_getalways_from_pager(vnode_t vp)
5508 {
5509 return vget_internal(vp, 0, VNODE_ALWAYS | VNODE_PAGER);
5510 }
5511
5512 static inline void
vn_set_dead(vnode_t vp)5513 vn_set_dead(vnode_t vp)
5514 {
5515 vp->v_mount = NULL;
5516 vp->v_op = dead_vnodeop_p;
5517 vp->v_tag = VT_NON;
5518 vp->v_data = NULL;
5519 vp->v_type = VBAD;
5520 vp->v_lflag |= VL_DEAD;
5521 }
5522
5523 static int
vnode_put_internal_locked(vnode_t vp,bool from_pager)5524 vnode_put_internal_locked(vnode_t vp, bool from_pager)
5525 {
5526 vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */
5527
5528 #if DIAGNOSTIC
5529 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
5530 #endif
5531 retry:
5532 if (vp->v_iocount < 1) {
5533 panic("vnode_put(%p): iocount < 1", vp);
5534 }
5535
5536 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
5537 vnode_dropiocount(vp);
5538 return 0;
5539 }
5540
5541 if (((vp->v_lflag & (VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE)) {
5542 vp->v_lflag &= ~VL_NEEDINACTIVE;
5543 vnode_unlock(vp);
5544
5545 VNOP_INACTIVE(vp, ctx);
5546
5547 vnode_lock_spin(vp);
5548 /*
5549 * because we had to drop the vnode lock before calling
5550 * VNOP_INACTIVE, the state of this vnode may have changed...
5551 * we may pick up both VL_MARTERM and either
5552 * an iocount or a usecount while in the VNOP_INACTIVE call
5553 * we don't want to call vnode_reclaim_internal on a vnode
5554 * that has active references on it... so loop back around
5555 * and reevaluate the state
5556 */
5557 goto retry;
5558 }
5559 vp->v_lflag &= ~VL_NEEDINACTIVE;
5560
5561 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
5562 if (from_pager) {
5563 /*
5564 * We can't initiate reclaim when called from the pager
5565 * because it will deadlock with itself so we hand it
5566 * off to the async cleaner thread.
5567 */
5568 if (VONLIST(vp)) {
5569 if (!(vp->v_listflag & VLIST_ASYNC_WORK)) {
5570 vnode_list_lock();
5571 vnode_list_remove_locked(vp);
5572 vnode_async_list_add_locked(vp);
5573 vnode_list_unlock();
5574 }
5575 wakeup(&vnode_async_work_list);
5576 } else {
5577 vnode_async_list_add(vp);
5578 }
5579 } else {
5580 vnode_lock_convert(vp);
5581 vnode_reclaim_internal(vp, 1, 1, 0);
5582 }
5583 }
5584 vnode_dropiocount(vp);
5585 vnode_list_add(vp);
5586
5587 return 0;
5588 }
5589
5590 int
vnode_put_locked(vnode_t vp)5591 vnode_put_locked(vnode_t vp)
5592 {
5593 return vnode_put_internal_locked(vp, false);
5594 }
5595
5596 int
vnode_put(vnode_t vp)5597 vnode_put(vnode_t vp)
5598 {
5599 int retval;
5600
5601 vnode_lock_spin(vp);
5602 retval = vnode_put_internal_locked(vp, false);
5603 vnode_unlock(vp);
5604
5605 return retval;
5606 }
5607
5608 int
vnode_put_from_pager(vnode_t vp)5609 vnode_put_from_pager(vnode_t vp)
5610 {
5611 int retval;
5612
5613 vnode_lock_spin(vp);
5614 /* Cannot initiate reclaim while paging */
5615 retval = vnode_put_internal_locked(vp, true);
5616 vnode_unlock(vp);
5617
5618 return retval;
5619 }
5620
5621 /* is vnode_t in use by others? */
5622 int
vnode_isinuse(vnode_t vp,int refcnt)5623 vnode_isinuse(vnode_t vp, int refcnt)
5624 {
5625 return vnode_isinuse_locked(vp, refcnt, 0);
5626 }
5627
5628 int
vnode_usecount(vnode_t vp)5629 vnode_usecount(vnode_t vp)
5630 {
5631 return vp->v_usecount;
5632 }
5633
5634 int
vnode_iocount(vnode_t vp)5635 vnode_iocount(vnode_t vp)
5636 {
5637 return vp->v_iocount;
5638 }
5639
5640 int
vnode_isinuse_locked(vnode_t vp,int refcnt,int locked)5641 vnode_isinuse_locked(vnode_t vp, int refcnt, int locked)
5642 {
5643 int retval = 0;
5644
5645 if (!locked) {
5646 vnode_lock_spin(vp);
5647 }
5648 if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) {
5649 retval = 1;
5650 goto out;
5651 }
5652 if (vp->v_type == VREG) {
5653 retval = ubc_isinuse_locked(vp, refcnt, 1);
5654 }
5655
5656 out:
5657 if (!locked) {
5658 vnode_unlock(vp);
5659 }
5660 return retval;
5661 }
5662
5663 kauth_cred_t
vnode_cred(vnode_t vp)5664 vnode_cred(vnode_t vp)
5665 {
5666 if (vp->v_cred) {
5667 return kauth_cred_require(vp->v_cred);
5668 }
5669
5670 return NULL;
5671 }
5672
5673
5674 /* resume vnode_t */
5675 errno_t
vnode_resume(vnode_t vp)5676 vnode_resume(vnode_t vp)
5677 {
5678 if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) {
5679 vnode_lock_spin(vp);
5680 vp->v_lflag &= ~VL_SUSPENDED;
5681 vp->v_owner = NULL;
5682 vnode_unlock(vp);
5683
5684 wakeup(&vp->v_iocount);
5685 }
5686 return 0;
5687 }
5688
5689 /* suspend vnode_t
5690 * Please do not use on more than one vnode at a time as it may
5691 * cause deadlocks.
5692 * xxx should we explicity prevent this from happening?
5693 */
5694
5695 errno_t
vnode_suspend(vnode_t vp)5696 vnode_suspend(vnode_t vp)
5697 {
5698 if (vp->v_lflag & VL_SUSPENDED) {
5699 return EBUSY;
5700 }
5701
5702 vnode_lock_spin(vp);
5703
5704 /*
5705 * xxx is this sufficient to check if a vnode_drain is
5706 * progress?
5707 */
5708
5709 if (vp->v_owner == NULL) {
5710 vp->v_lflag |= VL_SUSPENDED;
5711 vp->v_owner = current_thread();
5712 }
5713 vnode_unlock(vp);
5714
5715 return 0;
5716 }
5717
5718 /*
5719 * Release any blocked locking requests on the vnode.
5720 * Used for forced-unmounts.
5721 *
5722 * XXX What about network filesystems?
5723 */
5724 static void
vnode_abort_advlocks(vnode_t vp)5725 vnode_abort_advlocks(vnode_t vp)
5726 {
5727 if (vp->v_flag & VLOCKLOCAL) {
5728 lf_abort_advlocks(vp);
5729 }
5730 }
5731
5732
5733 static errno_t
vnode_drain(vnode_t vp)5734 vnode_drain(vnode_t vp)
5735 {
5736 if (vp->v_lflag & VL_DRAIN) {
5737 panic("vnode_drain: recursive drain");
5738 return ENOENT;
5739 }
5740 vp->v_lflag |= VL_DRAIN;
5741 vp->v_owner = current_thread();
5742
5743 while (vp->v_iocount > 1) {
5744 if (bootarg_no_vnode_drain) {
5745 struct timespec ts = {.tv_sec = 10, .tv_nsec = 0};
5746 int error;
5747
5748 if (vfs_unmountall_started) {
5749 ts.tv_sec = 1;
5750 }
5751
5752 error = msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain_with_timeout", &ts);
5753
5754 /* Try to deal with leaked iocounts under bootarg and shutting down */
5755 if (vp->v_iocount > 1 && error == EWOULDBLOCK &&
5756 ts.tv_sec == 1 && vp->v_numoutput == 0) {
5757 vp->v_iocount = 1;
5758 break;
5759 }
5760 } else {
5761 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL);
5762 }
5763 }
5764
5765 vp->v_lflag &= ~VL_DRAIN;
5766
5767 return 0;
5768 }
5769
5770
5771 /*
5772 * if the number of recent references via vnode_getwithvid or vnode_getwithref
5773 * exceeds this threshold, than 'UN-AGE' the vnode by removing it from
5774 * the LRU list if it's currently on it... once the iocount and usecount both drop
5775 * to 0, it will get put back on the end of the list, effectively making it younger
5776 * this allows us to keep actively referenced vnodes in the list without having
5777 * to constantly remove and add to the list each time a vnode w/o a usecount is
5778 * referenced which costs us taking and dropping a global lock twice.
5779 * However, if the vnode is marked DIRTY, we want to pull it out much earlier
5780 */
5781 #define UNAGE_THRESHHOLD 25
5782 #define UNAGE_DIRTYTHRESHHOLD 6
5783
5784 errno_t
vnode_getiocount(vnode_t vp,unsigned int vid,int vflags)5785 vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
5786 {
5787 int nodead = vflags & VNODE_NODEAD;
5788 int nosusp = vflags & VNODE_NOSUSPEND;
5789 int always = vflags & VNODE_ALWAYS;
5790 int beatdrain = vflags & VNODE_DRAINO;
5791 int withvid = vflags & VNODE_WITHID;
5792 int forpager = vflags & VNODE_PAGER;
5793
5794 for (;;) {
5795 int sleepflg = 0;
5796
5797 /*
5798 * if it is a dead vnode with deadfs
5799 */
5800 if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) {
5801 return ENOENT;
5802 }
5803 /*
5804 * will return VL_DEAD ones
5805 */
5806 if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0) {
5807 break;
5808 }
5809 /*
5810 * if suspended vnodes are to be failed
5811 */
5812 if (nosusp && (vp->v_lflag & VL_SUSPENDED)) {
5813 return ENOENT;
5814 }
5815 /*
5816 * if you are the owner of drain/suspend/termination , can acquire iocount
5817 * check for VL_TERMINATE; it does not set owner
5818 */
5819 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) &&
5820 (vp->v_owner == current_thread())) {
5821 break;
5822 }
5823
5824 if (always != 0) {
5825 break;
5826 }
5827
5828 /*
5829 * If this vnode is getting drained, there are some cases where
5830 * we can't block or, in case of tty vnodes, want to be
5831 * interruptible.
5832 */
5833 if (vp->v_lflag & VL_DRAIN) {
5834 /*
5835 * In some situations, we want to get an iocount
5836 * even if the vnode is draining to prevent deadlock,
5837 * e.g. if we're in the filesystem, potentially holding
5838 * resources that could prevent other iocounts from
5839 * being released.
5840 */
5841 if (beatdrain) {
5842 break;
5843 }
5844 /*
5845 * Don't block if the vnode's mount point is unmounting as
5846 * we may be the thread the unmount is itself waiting on
5847 * Only callers who pass in vids (at this point, we've already
5848 * handled nosusp and nodead) are expecting error returns
5849 * from this function, so only we can only return errors for
5850 * those. ENODEV is intended to inform callers that the call
5851 * failed because an unmount is in progress.
5852 */
5853 if (withvid && (vp->v_mount) && vfs_isunmount(vp->v_mount)) {
5854 return ENODEV;
5855 }
5856
5857 if (vnode_istty(vp)) {
5858 sleepflg = PCATCH;
5859 }
5860 }
5861
5862 vnode_lock_convert(vp);
5863
5864 if (vp->v_lflag & VL_TERMINATE) {
5865 int error;
5866
5867 vp->v_lflag |= VL_TERMWANT;
5868
5869 error = msleep(&vp->v_lflag, &vp->v_lock,
5870 (PVFS | sleepflg), "vnode getiocount", NULL);
5871 if (error) {
5872 return error;
5873 }
5874 } else {
5875 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL);
5876 }
5877 }
5878 if (withvid && vid != vp->v_id) {
5879 return ENOENT;
5880 }
5881 if (!forpager && (++vp->v_references >= UNAGE_THRESHHOLD ||
5882 (vp->v_flag & VISDIRTY && vp->v_references >= UNAGE_DIRTYTHRESHHOLD))) {
5883 vp->v_references = 0;
5884 vnode_list_remove(vp);
5885 }
5886 vp->v_iocount++;
5887 #ifdef CONFIG_IOCOUNT_TRACE
5888 record_vp(vp, 1);
5889 #endif
5890 return 0;
5891 }
5892
5893 static void
vnode_dropiocount(vnode_t vp)5894 vnode_dropiocount(vnode_t vp)
5895 {
5896 if (vp->v_iocount < 1) {
5897 panic("vnode_dropiocount(%p): v_iocount < 1", vp);
5898 }
5899
5900 vp->v_iocount--;
5901 #ifdef CONFIG_IOCOUNT_TRACE
5902 record_vp(vp, -1);
5903 #endif
5904 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1)) {
5905 wakeup(&vp->v_iocount);
5906 }
5907 }
5908
5909
5910 void
vnode_reclaim(struct vnode * vp)5911 vnode_reclaim(struct vnode * vp)
5912 {
5913 vnode_reclaim_internal(vp, 0, 0, 0);
5914 }
5915
5916 __private_extern__
5917 void
vnode_reclaim_internal(struct vnode * vp,int locked,int reuse,int flags)5918 vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags)
5919 {
5920 int isfifo = 0;
5921 bool clear_tty_revoke = false;
5922
5923 if (!locked) {
5924 vnode_lock(vp);
5925 }
5926
5927 if (vp->v_lflag & VL_TERMINATE) {
5928 panic("vnode reclaim in progress");
5929 }
5930 vp->v_lflag |= VL_TERMINATE;
5931
5932 vn_clearunionwait(vp, 1);
5933
5934 /*
5935 * We have to force any terminals in reads to return and give up
5936 * their iocounts. It's important to do this after VL_TERMINATE
5937 * has been set to ensure new reads are blocked while the
5938 * revoke is in progress.
5939 */
5940 if (vnode_istty(vp) && (flags & REVOKEALL) && (vp->v_iocount > 1)) {
5941 vnode_unlock(vp);
5942 VNOP_IOCTL(vp, TIOCREVOKE, (caddr_t)NULL, 0, vfs_context_kernel());
5943 clear_tty_revoke = true;
5944 vnode_lock(vp);
5945 }
5946
5947 vnode_drain(vp);
5948
5949 if (clear_tty_revoke) {
5950 vnode_unlock(vp);
5951 VNOP_IOCTL(vp, TIOCREVOKECLEAR, (caddr_t)NULL, 0, vfs_context_kernel());
5952 vnode_lock(vp);
5953 }
5954
5955 isfifo = (vp->v_type == VFIFO);
5956
5957 if (vp->v_type != VBAD) {
5958 vgone(vp, flags); /* clean and reclaim the vnode */
5959 }
5960 /*
5961 * give the vnode a new identity so that vnode_getwithvid will fail
5962 * on any stale cache accesses...
5963 * grab the list_lock so that if we're in "new_vnode"
5964 * behind the list_lock trying to steal this vnode, the v_id is stable...
5965 * once new_vnode drops the list_lock, it will block trying to take
5966 * the vnode lock until we release it... at that point it will evaluate
5967 * whether the v_vid has changed
5968 * also need to make sure that the vnode isn't on a list where "new_vnode"
5969 * can find it after the v_id has been bumped until we are completely done
5970 * with the vnode (i.e. putting it back on a list has to be the very last
5971 * thing we do to this vnode... many of the callers of vnode_reclaim_internal
5972 * are holding an io_count on the vnode... they need to drop the io_count
5973 * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
5974 * they are completely done with the vnode
5975 */
5976 vnode_list_lock();
5977
5978 vnode_list_remove_locked(vp);
5979 vp->v_id++;
5980
5981 vnode_list_unlock();
5982
5983 if (isfifo) {
5984 struct fifoinfo * fip;
5985
5986 fip = vp->v_fifoinfo;
5987 vp->v_fifoinfo = NULL;
5988 kfree_type(struct fifoinfo, fip);
5989 }
5990 vp->v_type = VBAD;
5991
5992 if (vp->v_data) {
5993 panic("vnode_reclaim_internal: cleaned vnode isn't");
5994 }
5995 if (vp->v_numoutput) {
5996 panic("vnode_reclaim_internal: clean vnode has pending I/O's");
5997 }
5998 if (UBCINFOEXISTS(vp)) {
5999 panic("vnode_reclaim_internal: ubcinfo not cleaned");
6000 }
6001 if (vp->v_parent) {
6002 panic("vnode_reclaim_internal: vparent not removed");
6003 }
6004 if (vp->v_name) {
6005 panic("vnode_reclaim_internal: vname not removed");
6006 }
6007
6008 vp->v_socket = NULL;
6009
6010 vp->v_lflag &= ~VL_TERMINATE;
6011 vp->v_owner = NULL;
6012
6013 #if CONFIG_IOCOUNT_TRACE
6014 if (__improbable(bootarg_vnode_iocount_trace)) {
6015 bzero(vp->v_iocount_trace,
6016 IOCOUNT_TRACE_MAX_TYPES * sizeof(struct vnode_iocount_trace));
6017 }
6018 #endif /* CONFIG_IOCOUNT_TRACE */
6019
6020 KNOTE(&vp->v_knotes, NOTE_REVOKE);
6021
6022 /* Make sure that when we reuse the vnode, no knotes left over */
6023 klist_init(&vp->v_knotes);
6024
6025 if (vp->v_lflag & VL_TERMWANT) {
6026 vp->v_lflag &= ~VL_TERMWANT;
6027 wakeup(&vp->v_lflag);
6028 }
6029 if (!reuse) {
6030 /*
6031 * make sure we get on the
6032 * dead list if appropriate
6033 */
6034 vnode_list_add(vp);
6035 }
6036 if (!locked) {
6037 vnode_unlock(vp);
6038 }
6039 }
6040
6041 static int
vnode_create_internal(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp,int init_vnode)6042 vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp,
6043 int init_vnode)
6044 {
6045 int error;
6046 int insert = 1;
6047 int existing_vnode;
6048 vnode_t vp;
6049 vnode_t nvp;
6050 vnode_t dvp;
6051 struct uthread *ut;
6052 struct componentname *cnp;
6053 struct vnode_fsparam *param = (struct vnode_fsparam *)data;
6054 #if CONFIG_TRIGGERS
6055 struct vnode_trigger_param *tinfo = NULL;
6056 #endif
6057 if (*vpp) {
6058 vp = *vpp;
6059 *vpp = NULLVP;
6060 existing_vnode = 1;
6061 } else {
6062 existing_vnode = 0;
6063 }
6064
6065 if (init_vnode) {
6066 /* Do quick sanity check on the parameters. */
6067 if ((param == NULL) || (param->vnfs_vtype == VBAD)) {
6068 error = EINVAL;
6069 goto error_out;
6070 }
6071
6072 #if CONFIG_TRIGGERS
6073 if ((flavor == VNCREATE_TRIGGER) && (size == VNCREATE_TRIGGER_SIZE)) {
6074 tinfo = (struct vnode_trigger_param *)data;
6075
6076 /* Validate trigger vnode input */
6077 if ((param->vnfs_vtype != VDIR) ||
6078 (tinfo->vnt_resolve_func == NULL) ||
6079 (tinfo->vnt_flags & ~VNT_VALID_MASK)) {
6080 error = EINVAL;
6081 goto error_out;
6082 }
6083 /* Fall through a normal create (params will be the same) */
6084 flavor = VNCREATE_FLAVOR;
6085 size = VCREATESIZE;
6086 }
6087 #endif
6088 if ((flavor != VNCREATE_FLAVOR) || (size != VCREATESIZE)) {
6089 error = EINVAL;
6090 goto error_out;
6091 }
6092 }
6093
6094 if (!existing_vnode) {
6095 if ((error = new_vnode(&vp))) {
6096 return error;
6097 }
6098 if (!init_vnode) {
6099 /* Make it so that it can be released by a vnode_put) */
6100 vn_set_dead(vp);
6101 *vpp = vp;
6102 return 0;
6103 }
6104 } else {
6105 /*
6106 * A vnode obtained by vnode_create_empty has been passed to
6107 * vnode_initialize - Unset VL_DEAD set by vn_set_dead. After
6108 * this point, it is set back on any error.
6109 *
6110 * N.B. vnode locking - We make the same assumptions as the
6111 * "unsplit" vnode_create did - i.e. it is safe to update the
6112 * vnode's fields without the vnode lock. This vnode has been
6113 * out and about with the filesystem and hopefully nothing
6114 * was done to the vnode between the vnode_create_empty and
6115 * now when it has come in through vnode_initialize.
6116 */
6117 vp->v_lflag &= ~VL_DEAD;
6118 }
6119
6120 dvp = param->vnfs_dvp;
6121 cnp = param->vnfs_cnp;
6122
6123 vp->v_op = param->vnfs_vops;
6124 vp->v_type = (uint16_t)param->vnfs_vtype;
6125 vp->v_data = param->vnfs_fsnode;
6126
6127 if (param->vnfs_markroot) {
6128 vp->v_flag |= VROOT;
6129 }
6130 if (param->vnfs_marksystem) {
6131 vp->v_flag |= VSYSTEM;
6132 }
6133 if (vp->v_type == VREG) {
6134 error = ubc_info_init_withsize(vp, param->vnfs_filesize);
6135 if (error) {
6136 #ifdef CONFIG_IOCOUNT_TRACE
6137 record_vp(vp, 1);
6138 #endif
6139 vn_set_dead(vp);
6140
6141 vnode_put(vp);
6142 return error;
6143 }
6144 if (param->vnfs_mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED) {
6145 memory_object_mark_io_tracking(vp->v_ubcinfo->ui_control);
6146 }
6147 }
6148 #ifdef CONFIG_IOCOUNT_TRACE
6149 record_vp(vp, 1);
6150 #endif
6151
6152 #if CONFIG_FIRMLINKS
6153 vp->v_fmlink = NULLVP;
6154 #endif
6155 vp->v_flag &= ~VFMLINKTARGET;
6156
6157 #if CONFIG_TRIGGERS
6158 /*
6159 * For trigger vnodes, attach trigger info to vnode
6160 */
6161 if ((vp->v_type == VDIR) && (tinfo != NULL)) {
6162 /*
6163 * Note: has a side effect of incrementing trigger count on the
6164 * mount if successful, which we would need to undo on a
6165 * subsequent failure.
6166 */
6167 #ifdef CONFIG_IOCOUNT_TRACE
6168 record_vp(vp, -1);
6169 #endif
6170 error = vnode_resolver_create(param->vnfs_mp, vp, tinfo, FALSE);
6171 if (error) {
6172 printf("vnode_create: vnode_resolver_create() err %d\n", error);
6173 vn_set_dead(vp);
6174 #ifdef CONFIG_IOCOUNT_TRACE
6175 record_vp(vp, 1);
6176 #endif
6177 vnode_put(vp);
6178 return error;
6179 }
6180 }
6181 #endif
6182 if (vp->v_type == VCHR || vp->v_type == VBLK) {
6183 vp->v_tag = VT_DEVFS; /* callers will reset if needed (bdevvp) */
6184
6185 if ((nvp = checkalias(vp, param->vnfs_rdev))) {
6186 /*
6187 * if checkalias returns a vnode, it will be locked
6188 *
6189 * first get rid of the unneeded vnode we acquired
6190 */
6191 vp->v_data = NULL;
6192 vp->v_op = spec_vnodeop_p;
6193 vp->v_type = VBAD;
6194 vp->v_lflag = VL_DEAD;
6195 vp->v_data = NULL;
6196 vp->v_tag = VT_NON;
6197 vnode_put(vp);
6198
6199 /*
6200 * switch to aliased vnode and finish
6201 * preparing it
6202 */
6203 vp = nvp;
6204
6205 vclean(vp, 0);
6206 vp->v_op = param->vnfs_vops;
6207 vp->v_type = (uint16_t)param->vnfs_vtype;
6208 vp->v_data = param->vnfs_fsnode;
6209 vp->v_lflag = 0;
6210 vp->v_mount = NULL;
6211 insmntque(vp, param->vnfs_mp);
6212 insert = 0;
6213 vnode_unlock(vp);
6214 }
6215
6216 if (VCHR == vp->v_type) {
6217 u_int maj = major(vp->v_rdev);
6218
6219 if (maj < (u_int)nchrdev && cdevsw[maj].d_type == D_TTY) {
6220 vp->v_flag |= VISTTY;
6221 }
6222 }
6223 }
6224
6225 if (vp->v_type == VFIFO) {
6226 struct fifoinfo *fip;
6227
6228 fip = kalloc_type(struct fifoinfo, Z_WAITOK | Z_ZERO);
6229 vp->v_fifoinfo = fip;
6230 }
6231 /* The file systems must pass the address of the location where
6232 * they store the vnode pointer. When we add the vnode into the mount
6233 * list and name cache they become discoverable. So the file system node
6234 * must have the connection to vnode setup by then
6235 */
6236 *vpp = vp;
6237
6238 /* Add fs named reference. */
6239 if (param->vnfs_flags & VNFS_ADDFSREF) {
6240 vp->v_lflag |= VNAMED_FSHASH;
6241 }
6242 if (param->vnfs_mp) {
6243 if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL) {
6244 vp->v_flag |= VLOCKLOCAL;
6245 }
6246 if (insert) {
6247 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) {
6248 panic("insmntque: vp on the free list");
6249 }
6250
6251 /*
6252 * enter in mount vnode list
6253 */
6254 insmntque(vp, param->vnfs_mp);
6255 }
6256 }
6257 if (dvp && vnode_ref(dvp) == 0) {
6258 vp->v_parent = dvp;
6259 }
6260 if (cnp) {
6261 if (dvp && ((param->vnfs_flags & (VNFS_NOCACHE | VNFS_CANTCACHE)) == 0)) {
6262 /*
6263 * enter into name cache
6264 * we've got the info to enter it into the name cache now
6265 * cache_enter_create will pick up an extra reference on
6266 * the name entered into the string cache
6267 */
6268 vp->v_name = cache_enter_create(dvp, vp, cnp);
6269 } else {
6270 vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
6271 }
6272
6273 if ((cnp->cn_flags & UNIONCREATED) == UNIONCREATED) {
6274 vp->v_flag |= VISUNION;
6275 }
6276 }
6277 if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
6278 /*
6279 * this vnode is being created as cacheable in the name cache
6280 * this allows us to re-enter it in the cache
6281 */
6282 vp->v_flag |= VNCACHEABLE;
6283 }
6284 ut = current_uthread();
6285
6286 if ((current_proc()->p_lflag & P_LRAGE_VNODES) ||
6287 (ut->uu_flag & (UT_RAGE_VNODES | UT_KERN_RAGE_VNODES))) {
6288 /*
6289 * process has indicated that it wants any
6290 * vnodes created on its behalf to be rapidly
6291 * aged to reduce the impact on the cached set
6292 * of vnodes
6293 *
6294 * if UT_KERN_RAGE_VNODES is set, then the
6295 * kernel internally wants vnodes to be rapidly
6296 * aged, even if the process hasn't requested
6297 * this
6298 */
6299 vp->v_flag |= VRAGE;
6300 }
6301
6302 #if CONFIG_SECLUDED_MEMORY
6303 switch (secluded_for_filecache) {
6304 case 0:
6305 /*
6306 * secluded_for_filecache == 0:
6307 * + no file contents in secluded pool
6308 */
6309 break;
6310 case 1:
6311 /*
6312 * secluded_for_filecache == 1:
6313 * + no files from /
6314 * + files from /Applications/ are OK
6315 * + files from /Applications/Camera are not OK
6316 * + no files that are open for write
6317 */
6318 if (vnode_vtype(vp) == VREG &&
6319 vnode_mount(vp) != NULL &&
6320 (!(vfs_flags(vnode_mount(vp)) & MNT_ROOTFS))) {
6321 /* not from root filesystem: eligible for secluded pages */
6322 memory_object_mark_eligible_for_secluded(
6323 ubc_getobject(vp, UBC_FLAGS_NONE),
6324 TRUE);
6325 }
6326 break;
6327 case 2:
6328 /*
6329 * secluded_for_filecache == 2:
6330 * + all read-only files OK, except:
6331 * + dyld_shared_cache_arm64*
6332 * + Camera
6333 * + mediaserverd
6334 */
6335 if (vnode_vtype(vp) == VREG) {
6336 memory_object_mark_eligible_for_secluded(
6337 ubc_getobject(vp, UBC_FLAGS_NONE),
6338 TRUE);
6339 }
6340 break;
6341 default:
6342 break;
6343 }
6344 #endif /* CONFIG_SECLUDED_MEMORY */
6345
6346 return 0;
6347
6348 error_out:
6349 if (existing_vnode) {
6350 vnode_put(vp);
6351 }
6352 return error;
6353 }
6354
6355 /* USAGE:
6356 * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
6357 * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
6358 * is obsoleted by this.
6359 */
6360 int
vnode_create(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp)6361 vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
6362 {
6363 *vpp = NULLVP;
6364 return vnode_create_internal(flavor, size, data, vpp, 1);
6365 }
6366
6367 int
vnode_create_empty(vnode_t * vpp)6368 vnode_create_empty(vnode_t *vpp)
6369 {
6370 *vpp = NULLVP;
6371 return vnode_create_internal(VNCREATE_FLAVOR, VCREATESIZE, NULL,
6372 vpp, 0);
6373 }
6374
6375 int
vnode_initialize(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp)6376 vnode_initialize(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
6377 {
6378 if (*vpp == NULLVP) {
6379 panic("NULL vnode passed to vnode_initialize");
6380 }
6381 #if DEVELOPMENT || DEBUG
6382 /*
6383 * We lock to check that vnode is fit for unlocked use in
6384 * vnode_create_internal.
6385 */
6386 vnode_lock_spin(*vpp);
6387 VNASSERT(((*vpp)->v_iocount == 1), *vpp,
6388 ("vnode_initialize : iocount not 1, is %d", (*vpp)->v_iocount));
6389 VNASSERT(((*vpp)->v_usecount == 0), *vpp,
6390 ("vnode_initialize : usecount not 0, is %d", (*vpp)->v_usecount));
6391 VNASSERT(((*vpp)->v_lflag & VL_DEAD), *vpp,
6392 ("vnode_initialize : v_lflag does not have VL_DEAD, is 0x%x",
6393 (*vpp)->v_lflag));
6394 VNASSERT(((*vpp)->v_data == NULL), *vpp,
6395 ("vnode_initialize : v_data not NULL"));
6396 vnode_unlock(*vpp);
6397 #endif
6398 return vnode_create_internal(flavor, size, data, vpp, 1);
6399 }
6400
6401 int
vnode_addfsref(vnode_t vp)6402 vnode_addfsref(vnode_t vp)
6403 {
6404 vnode_lock_spin(vp);
6405 if (vp->v_lflag & VNAMED_FSHASH) {
6406 panic("add_fsref: vp already has named reference");
6407 }
6408 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) {
6409 panic("addfsref: vp on the free list");
6410 }
6411 vp->v_lflag |= VNAMED_FSHASH;
6412 vnode_unlock(vp);
6413 return 0;
6414 }
6415 int
vnode_removefsref(vnode_t vp)6416 vnode_removefsref(vnode_t vp)
6417 {
6418 vnode_lock_spin(vp);
6419 if ((vp->v_lflag & VNAMED_FSHASH) == 0) {
6420 panic("remove_fsref: no named reference");
6421 }
6422 vp->v_lflag &= ~VNAMED_FSHASH;
6423 vnode_unlock(vp);
6424 return 0;
6425 }
6426
6427
6428 int
vfs_iterate(int flags,int (* callout)(mount_t,void *),void * arg)6429 vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg)
6430 {
6431 mount_t mp;
6432 int ret = 0;
6433 fsid_t * fsid_list;
6434 int count, actualcount, i;
6435 void * allocmem;
6436 int indx_start, indx_stop, indx_incr;
6437 int cb_dropref = (flags & VFS_ITERATE_CB_DROPREF);
6438 int noskip_unmount = (flags & VFS_ITERATE_NOSKIP_UNMOUNT);
6439
6440 count = mount_getvfscnt();
6441 count += 10;
6442
6443 fsid_list = kalloc_data(count * sizeof(fsid_t), Z_WAITOK);
6444 allocmem = (void *)fsid_list;
6445
6446 actualcount = mount_fillfsids(fsid_list, count);
6447
6448 /*
6449 * Establish the iteration direction
6450 * VFS_ITERATE_TAIL_FIRST overrides default head first order (oldest first)
6451 */
6452 if (flags & VFS_ITERATE_TAIL_FIRST) {
6453 indx_start = actualcount - 1;
6454 indx_stop = -1;
6455 indx_incr = -1;
6456 } else { /* Head first by default */
6457 indx_start = 0;
6458 indx_stop = actualcount;
6459 indx_incr = 1;
6460 }
6461
6462 for (i = indx_start; i != indx_stop; i += indx_incr) {
6463 /* obtain the mount point with iteration reference */
6464 mp = mount_list_lookupby_fsid(&fsid_list[i], 0, 1);
6465
6466 if (mp == (struct mount *)0) {
6467 continue;
6468 }
6469 mount_lock(mp);
6470 if ((mp->mnt_lflag & MNT_LDEAD) ||
6471 (!noskip_unmount && (mp->mnt_lflag & MNT_LUNMOUNT))) {
6472 mount_unlock(mp);
6473 mount_iterdrop(mp);
6474 continue;
6475 }
6476 mount_unlock(mp);
6477
6478 /* iterate over all the vnodes */
6479 ret = callout(mp, arg);
6480
6481 /*
6482 * Drop the iterref here if the callback didn't do it.
6483 * Note: If cb_dropref is set the mp may no longer exist.
6484 */
6485 if (!cb_dropref) {
6486 mount_iterdrop(mp);
6487 }
6488
6489 switch (ret) {
6490 case VFS_RETURNED:
6491 case VFS_RETURNED_DONE:
6492 if (ret == VFS_RETURNED_DONE) {
6493 ret = 0;
6494 goto out;
6495 }
6496 break;
6497
6498 case VFS_CLAIMED_DONE:
6499 ret = 0;
6500 goto out;
6501 case VFS_CLAIMED:
6502 default:
6503 break;
6504 }
6505 ret = 0;
6506 }
6507
6508 out:
6509 kfree_data(allocmem, count * sizeof(fsid_t));
6510 return ret;
6511 }
6512
6513 /*
6514 * Update the vfsstatfs structure in the mountpoint.
6515 * MAC: Parameter eventtype added, indicating whether the event that
6516 * triggered this update came from user space, via a system call
6517 * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT).
6518 */
6519 int
vfs_update_vfsstat(mount_t mp,vfs_context_t ctx,__unused int eventtype)6520 vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype)
6521 {
6522 struct vfs_attr va;
6523 int error;
6524
6525 /*
6526 * Request the attributes we want to propagate into
6527 * the per-mount vfsstat structure.
6528 */
6529 VFSATTR_INIT(&va);
6530 VFSATTR_WANTED(&va, f_iosize);
6531 VFSATTR_WANTED(&va, f_blocks);
6532 VFSATTR_WANTED(&va, f_bfree);
6533 VFSATTR_WANTED(&va, f_bavail);
6534 VFSATTR_WANTED(&va, f_bused);
6535 VFSATTR_WANTED(&va, f_files);
6536 VFSATTR_WANTED(&va, f_ffree);
6537 VFSATTR_WANTED(&va, f_bsize);
6538 VFSATTR_WANTED(&va, f_fssubtype);
6539
6540 if ((error = vfs_getattr(mp, &va, ctx)) != 0) {
6541 KAUTH_DEBUG("STAT - filesystem returned error %d", error);
6542 return error;
6543 }
6544 #if CONFIG_MACF
6545 if (eventtype == VFS_USER_EVENT) {
6546 error = mac_mount_check_getattr(ctx, mp, &va);
6547 if (error != 0) {
6548 return error;
6549 }
6550 }
6551 #endif
6552 /*
6553 * Unpack into the per-mount structure.
6554 *
6555 * We only overwrite these fields, which are likely to change:
6556 * f_blocks
6557 * f_bfree
6558 * f_bavail
6559 * f_bused
6560 * f_files
6561 * f_ffree
6562 *
6563 * And these which are not, but which the FS has no other way
6564 * of providing to us:
6565 * f_bsize
6566 * f_iosize
6567 * f_fssubtype
6568 *
6569 */
6570 if (VFSATTR_IS_SUPPORTED(&va, f_bsize)) {
6571 /* 4822056 - protect against malformed server mount */
6572 mp->mnt_vfsstat.f_bsize = (va.f_bsize > 0 ? va.f_bsize : 512);
6573 } else {
6574 mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */
6575 }
6576 if (VFSATTR_IS_SUPPORTED(&va, f_iosize)) {
6577 mp->mnt_vfsstat.f_iosize = va.f_iosize;
6578 } else {
6579 mp->mnt_vfsstat.f_iosize = 1024 * 1024; /* 1MB sensible I/O size */
6580 }
6581 if (VFSATTR_IS_SUPPORTED(&va, f_blocks)) {
6582 mp->mnt_vfsstat.f_blocks = va.f_blocks;
6583 }
6584 if (VFSATTR_IS_SUPPORTED(&va, f_bfree)) {
6585 mp->mnt_vfsstat.f_bfree = va.f_bfree;
6586 }
6587 if (VFSATTR_IS_SUPPORTED(&va, f_bavail)) {
6588 mp->mnt_vfsstat.f_bavail = va.f_bavail;
6589 }
6590 if (VFSATTR_IS_SUPPORTED(&va, f_bused)) {
6591 mp->mnt_vfsstat.f_bused = va.f_bused;
6592 }
6593 if (VFSATTR_IS_SUPPORTED(&va, f_files)) {
6594 mp->mnt_vfsstat.f_files = va.f_files;
6595 }
6596 if (VFSATTR_IS_SUPPORTED(&va, f_ffree)) {
6597 mp->mnt_vfsstat.f_ffree = va.f_ffree;
6598 }
6599
6600 /* this is unlikely to change, but has to be queried for */
6601 if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype)) {
6602 mp->mnt_vfsstat.f_fssubtype = va.f_fssubtype;
6603 }
6604
6605 return 0;
6606 }
6607
6608 int
mount_list_add(mount_t mp)6609 mount_list_add(mount_t mp)
6610 {
6611 int res;
6612
6613 mount_list_lock();
6614 if (get_system_inshutdown() != 0) {
6615 res = -1;
6616 } else {
6617 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
6618 nummounts++;
6619 res = 0;
6620 }
6621 mount_list_unlock();
6622
6623 return res;
6624 }
6625
6626 void
mount_list_remove(mount_t mp)6627 mount_list_remove(mount_t mp)
6628 {
6629 mount_list_lock();
6630 TAILQ_REMOVE(&mountlist, mp, mnt_list);
6631 nummounts--;
6632 mp->mnt_list.tqe_next = NULL;
6633 mp->mnt_list.tqe_prev = NULL;
6634 mount_list_unlock();
6635 }
6636
6637 mount_t
mount_lookupby_volfsid(int volfs_id,int withref)6638 mount_lookupby_volfsid(int volfs_id, int withref)
6639 {
6640 mount_t cur_mount = (mount_t)0;
6641 mount_t mp;
6642
6643 mount_list_lock();
6644 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
6645 if (!(mp->mnt_kern_flag & MNTK_UNMOUNT) &&
6646 (mp->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
6647 (mp->mnt_vfsstat.f_fsid.val[0] == volfs_id)) {
6648 cur_mount = mp;
6649 if (withref) {
6650 if (mount_iterref(cur_mount, 1)) {
6651 cur_mount = (mount_t)0;
6652 mount_list_unlock();
6653 goto out;
6654 }
6655 }
6656 break;
6657 }
6658 }
6659 mount_list_unlock();
6660 if (withref && (cur_mount != (mount_t)0)) {
6661 mp = cur_mount;
6662 if (vfs_busy(mp, LK_NOWAIT) != 0) {
6663 cur_mount = (mount_t)0;
6664 }
6665 mount_iterdrop(mp);
6666 }
6667 out:
6668 return cur_mount;
6669 }
6670
6671 mount_t
mount_list_lookupby_fsid(fsid_t * fsid,int locked,int withref)6672 mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref)
6673 {
6674 mount_t retmp = (mount_t)0;
6675 mount_t mp;
6676
6677 if (!locked) {
6678 mount_list_lock();
6679 }
6680 TAILQ_FOREACH(mp, &mountlist, mnt_list)
6681 if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] &&
6682 mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) {
6683 retmp = mp;
6684 if (withref) {
6685 if (mount_iterref(retmp, 1)) {
6686 retmp = (mount_t)0;
6687 }
6688 }
6689 goto out;
6690 }
6691 out:
6692 if (!locked) {
6693 mount_list_unlock();
6694 }
6695 return retmp;
6696 }
6697
6698 errno_t
vnode_lookupat(const char * path,int flags,vnode_t * vpp,vfs_context_t ctx,vnode_t start_dvp)6699 vnode_lookupat(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx,
6700 vnode_t start_dvp)
6701 {
6702 struct nameidata *ndp;
6703 int error = 0;
6704 u_int32_t ndflags = 0;
6705
6706 if (ctx == NULL) {
6707 return EINVAL;
6708 }
6709
6710 ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
6711
6712 if (flags & VNODE_LOOKUP_NOFOLLOW) {
6713 ndflags = NOFOLLOW;
6714 } else {
6715 ndflags = FOLLOW;
6716 }
6717
6718 if (flags & VNODE_LOOKUP_NOCROSSMOUNT) {
6719 ndflags |= NOCROSSMOUNT;
6720 }
6721
6722 if (flags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) {
6723 ndflags |= CN_NBMOUNTLOOK;
6724 }
6725
6726 /* XXX AUDITVNPATH1 needed ? */
6727 NDINIT(ndp, LOOKUP, OP_LOOKUP, ndflags, UIO_SYSSPACE,
6728 CAST_USER_ADDR_T(path), ctx);
6729
6730 if (start_dvp && (path[0] != '/')) {
6731 ndp->ni_dvp = start_dvp;
6732 ndp->ni_cnd.cn_flags |= USEDVP;
6733 }
6734
6735 if ((error = namei(ndp))) {
6736 goto out_free;
6737 }
6738
6739 ndp->ni_cnd.cn_flags &= ~USEDVP;
6740
6741 *vpp = ndp->ni_vp;
6742 nameidone(ndp);
6743
6744 out_free:
6745 kfree_type(struct nameidata, ndp);
6746 return error;
6747 }
6748
6749 errno_t
vnode_lookup(const char * path,int flags,vnode_t * vpp,vfs_context_t ctx)6750 vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx)
6751 {
6752 return vnode_lookupat(path, flags, vpp, ctx, NULLVP);
6753 }
6754
6755 errno_t
vnode_open(const char * path,int fmode,int cmode,int flags,vnode_t * vpp,vfs_context_t ctx)6756 vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx)
6757 {
6758 struct nameidata *ndp = NULL;
6759 int error;
6760 u_int32_t ndflags = 0;
6761 int lflags = flags;
6762
6763 if (ctx == NULL) { /* XXX technically an error */
6764 ctx = vfs_context_current();
6765 }
6766
6767 ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
6768
6769 if (fmode & O_NOFOLLOW) {
6770 lflags |= VNODE_LOOKUP_NOFOLLOW;
6771 }
6772
6773 if (lflags & VNODE_LOOKUP_NOFOLLOW) {
6774 ndflags = NOFOLLOW;
6775 } else {
6776 ndflags = FOLLOW;
6777 }
6778
6779 if (lflags & VNODE_LOOKUP_NOCROSSMOUNT) {
6780 ndflags |= NOCROSSMOUNT;
6781 }
6782
6783 if (lflags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) {
6784 ndflags |= CN_NBMOUNTLOOK;
6785 }
6786
6787 /* XXX AUDITVNPATH1 needed ? */
6788 NDINIT(ndp, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE,
6789 CAST_USER_ADDR_T(path), ctx);
6790
6791 if ((error = vn_open(ndp, fmode, cmode))) {
6792 *vpp = NULL;
6793 } else {
6794 *vpp = ndp->ni_vp;
6795 }
6796
6797 kfree_type(struct nameidata, ndp);
6798 return error;
6799 }
6800
6801 errno_t
vnode_close(vnode_t vp,int flags,vfs_context_t ctx)6802 vnode_close(vnode_t vp, int flags, vfs_context_t ctx)
6803 {
6804 int error;
6805
6806 if (ctx == NULL) {
6807 ctx = vfs_context_current();
6808 }
6809
6810 error = vn_close(vp, flags, ctx);
6811 vnode_put(vp);
6812 return error;
6813 }
6814
6815 errno_t
vnode_mtime(vnode_t vp,struct timespec * mtime,vfs_context_t ctx)6816 vnode_mtime(vnode_t vp, struct timespec *mtime, vfs_context_t ctx)
6817 {
6818 struct vnode_attr va;
6819 int error;
6820
6821 VATTR_INIT(&va);
6822 VATTR_WANTED(&va, va_modify_time);
6823 error = vnode_getattr(vp, &va, ctx);
6824 if (!error) {
6825 *mtime = va.va_modify_time;
6826 }
6827 return error;
6828 }
6829
6830 errno_t
vnode_flags(vnode_t vp,uint32_t * flags,vfs_context_t ctx)6831 vnode_flags(vnode_t vp, uint32_t *flags, vfs_context_t ctx)
6832 {
6833 struct vnode_attr va;
6834 int error;
6835
6836 VATTR_INIT(&va);
6837 VATTR_WANTED(&va, va_flags);
6838 error = vnode_getattr(vp, &va, ctx);
6839 if (!error) {
6840 *flags = va.va_flags;
6841 }
6842 return error;
6843 }
6844
6845 /*
6846 * Returns: 0 Success
6847 * vnode_getattr:???
6848 */
6849 errno_t
vnode_size(vnode_t vp,off_t * sizep,vfs_context_t ctx)6850 vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx)
6851 {
6852 struct vnode_attr va;
6853 int error;
6854
6855 VATTR_INIT(&va);
6856 VATTR_WANTED(&va, va_data_size);
6857 error = vnode_getattr(vp, &va, ctx);
6858 if (!error) {
6859 *sizep = va.va_data_size;
6860 }
6861 return error;
6862 }
6863
6864 errno_t
vnode_setsize(vnode_t vp,off_t size,int ioflag,vfs_context_t ctx)6865 vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx)
6866 {
6867 struct vnode_attr va;
6868
6869 VATTR_INIT(&va);
6870 VATTR_SET(&va, va_data_size, size);
6871 va.va_vaflags = ioflag & 0xffff;
6872 return vnode_setattr(vp, &va, ctx);
6873 }
6874
6875 int
vnode_setdirty(vnode_t vp)6876 vnode_setdirty(vnode_t vp)
6877 {
6878 vnode_lock_spin(vp);
6879 vp->v_flag |= VISDIRTY;
6880 vnode_unlock(vp);
6881 return 0;
6882 }
6883
6884 int
vnode_cleardirty(vnode_t vp)6885 vnode_cleardirty(vnode_t vp)
6886 {
6887 vnode_lock_spin(vp);
6888 vp->v_flag &= ~VISDIRTY;
6889 vnode_unlock(vp);
6890 return 0;
6891 }
6892
6893 int
vnode_isdirty(vnode_t vp)6894 vnode_isdirty(vnode_t vp)
6895 {
6896 int dirty;
6897
6898 vnode_lock_spin(vp);
6899 dirty = (vp->v_flag & VISDIRTY) ? 1 : 0;
6900 vnode_unlock(vp);
6901
6902 return dirty;
6903 }
6904
6905 static int
vn_create_reg(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,uint32_t flags,int fmode,uint32_t * statusp,vfs_context_t ctx)6906 vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
6907 {
6908 /* Only use compound VNOP for compound operation */
6909 if (vnode_compound_open_available(dvp) && ((flags & VN_CREATE_DOOPEN) != 0)) {
6910 *vpp = NULLVP;
6911 return VNOP_COMPOUND_OPEN(dvp, vpp, ndp, O_CREAT, fmode, statusp, vap, ctx);
6912 } else {
6913 return VNOP_CREATE(dvp, vpp, &ndp->ni_cnd, vap, ctx);
6914 }
6915 }
6916
6917 /*
6918 * Create a filesystem object of arbitrary type with arbitrary attributes in
6919 * the spevied directory with the specified name.
6920 *
6921 * Parameters: dvp Pointer to the vnode of the directory
6922 * in which to create the object.
6923 * vpp Pointer to the area into which to
6924 * return the vnode of the created object.
6925 * cnp Component name pointer from the namei
6926 * data structure, containing the name to
6927 * use for the create object.
6928 * vap Pointer to the vnode_attr structure
6929 * describing the object to be created,
6930 * including the type of object.
6931 * flags VN_* flags controlling ACL inheritance
6932 * and whether or not authorization is to
6933 * be required for the operation.
6934 *
6935 * Returns: 0 Success
6936 * !0 errno value
6937 *
6938 * Implicit: *vpp Contains the vnode of the object that
6939 * was created, if successful.
6940 * *cnp May be modified by the underlying VFS.
6941 * *vap May be modified by the underlying VFS.
6942 * modified by either ACL inheritance or
6943 *
6944 *
6945 * be modified, even if the operation is
6946 *
6947 *
6948 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
6949 *
6950 * Modification of '*cnp' and '*vap' by the underlying VFS is
6951 * strongly discouraged.
6952 *
6953 * XXX: This function is a 'vn_*' function; it belongs in vfs_vnops.c
6954 *
6955 * XXX: We should enummerate the possible errno values here, and where
6956 * in the code they originated.
6957 */
6958 errno_t
vn_create(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,uint32_t flags,int fmode,uint32_t * statusp,vfs_context_t ctx)6959 vn_create(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
6960 {
6961 errno_t error, old_error;
6962 vnode_t vp = (vnode_t)0;
6963 boolean_t batched;
6964 struct componentname *cnp;
6965 uint32_t defaulted;
6966
6967 cnp = &ndp->ni_cnd;
6968 error = 0;
6969 batched = namei_compound_available(dvp, ndp) ? TRUE : FALSE;
6970
6971 KAUTH_DEBUG("%p CREATE - '%s'", dvp, cnp->cn_nameptr);
6972
6973 if (flags & VN_CREATE_NOINHERIT) {
6974 vap->va_vaflags |= VA_NOINHERIT;
6975 }
6976 if (flags & VN_CREATE_NOAUTH) {
6977 vap->va_vaflags |= VA_NOAUTH;
6978 }
6979 /*
6980 * Handle ACL inheritance, initialize vap.
6981 */
6982 error = vn_attribute_prepare(dvp, vap, &defaulted, ctx);
6983 if (error) {
6984 return error;
6985 }
6986
6987 if (vap->va_type != VREG && (fmode != 0 || (flags & VN_CREATE_DOOPEN) || statusp)) {
6988 panic("Open parameters, but not a regular file.");
6989 }
6990 if ((fmode != 0) && ((flags & VN_CREATE_DOOPEN) == 0)) {
6991 panic("Mode for open, but not trying to open...");
6992 }
6993
6994
6995 /*
6996 * Create the requested node.
6997 */
6998 switch (vap->va_type) {
6999 case VREG:
7000 error = vn_create_reg(dvp, vpp, ndp, vap, flags, fmode, statusp, ctx);
7001 break;
7002 case VDIR:
7003 error = vn_mkdir(dvp, vpp, ndp, vap, ctx);
7004 break;
7005 case VSOCK:
7006 case VFIFO:
7007 case VBLK:
7008 case VCHR:
7009 error = VNOP_MKNOD(dvp, vpp, cnp, vap, ctx);
7010 break;
7011 default:
7012 panic("vnode_create: unknown vtype %d", vap->va_type);
7013 }
7014 if (error != 0) {
7015 KAUTH_DEBUG("%p CREATE - error %d returned by filesystem", dvp, error);
7016 goto out;
7017 }
7018
7019 vp = *vpp;
7020 old_error = error;
7021
7022 /*
7023 * If some of the requested attributes weren't handled by the VNOP,
7024 * use our fallback code.
7025 */
7026 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap) && *vpp) {
7027 KAUTH_DEBUG(" CREATE - doing fallback with ACL %p", vap->va_acl);
7028 error = vnode_setattr_fallback(*vpp, vap, ctx);
7029 }
7030
7031 #if CONFIG_MACF
7032 if ((error == 0) && !(flags & VN_CREATE_NOLABEL)) {
7033 error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx);
7034 }
7035 #endif
7036
7037 if ((error != 0) && (vp != (vnode_t)0)) {
7038 /* If we've done a compound open, close */
7039 if (batched && (old_error == 0) && (vap->va_type == VREG)) {
7040 VNOP_CLOSE(vp, fmode, ctx);
7041 }
7042
7043 /* Need to provide notifications if a create succeeded */
7044 if (!batched) {
7045 *vpp = (vnode_t) 0;
7046 vnode_put(vp);
7047 vp = NULLVP;
7048 }
7049 }
7050
7051 /*
7052 * For creation VNOPs, this is the equivalent of
7053 * lookup_handle_found_vnode.
7054 */
7055 if (kdebug_enable && *vpp) {
7056 kdebug_lookup(*vpp, cnp);
7057 }
7058
7059 out:
7060 vn_attribute_cleanup(vap, defaulted);
7061
7062 return error;
7063 }
7064
7065 static kauth_scope_t vnode_scope;
7066 static int vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action,
7067 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
7068 static int vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx,
7069 vnode_t vp, vnode_t dvp, int *errorp);
7070
7071 typedef struct _vnode_authorize_context {
7072 vnode_t vp;
7073 struct vnode_attr *vap;
7074 vnode_t dvp;
7075 struct vnode_attr *dvap;
7076 vfs_context_t ctx;
7077 int flags;
7078 int flags_valid;
7079 #define _VAC_IS_OWNER (1<<0)
7080 #define _VAC_IN_GROUP (1<<1)
7081 #define _VAC_IS_DIR_OWNER (1<<2)
7082 #define _VAC_IN_DIR_GROUP (1<<3)
7083 #define _VAC_NO_VNODE_POINTERS (1<<4)
7084 } *vauth_ctx;
7085
7086 void
vnode_authorize_init(void)7087 vnode_authorize_init(void)
7088 {
7089 vnode_scope = kauth_register_scope(KAUTH_SCOPE_VNODE, vnode_authorize_callback, NULL);
7090 }
7091
7092 #define VATTR_PREPARE_DEFAULTED_UID 0x1
7093 #define VATTR_PREPARE_DEFAULTED_GID 0x2
7094 #define VATTR_PREPARE_DEFAULTED_MODE 0x4
7095
7096 int
vn_attribute_prepare(vnode_t dvp,struct vnode_attr * vap,uint32_t * defaulted_fieldsp,vfs_context_t ctx)7097 vn_attribute_prepare(vnode_t dvp, struct vnode_attr *vap, uint32_t *defaulted_fieldsp, vfs_context_t ctx)
7098 {
7099 kauth_acl_t nacl = NULL, oacl = NULL;
7100 int error;
7101
7102 /*
7103 * Handle ACL inheritance.
7104 */
7105 if (!(vap->va_vaflags & VA_NOINHERIT) && vfs_extendedsecurity(dvp->v_mount)) {
7106 /* save the original filesec */
7107 if (VATTR_IS_ACTIVE(vap, va_acl)) {
7108 oacl = vap->va_acl;
7109 }
7110
7111 vap->va_acl = NULL;
7112 if ((error = kauth_acl_inherit(dvp,
7113 oacl,
7114 &nacl,
7115 vap->va_type == VDIR,
7116 ctx)) != 0) {
7117 KAUTH_DEBUG("%p CREATE - error %d processing inheritance", dvp, error);
7118 return error;
7119 }
7120
7121 /*
7122 * If the generated ACL is NULL, then we can save ourselves some effort
7123 * by clearing the active bit.
7124 */
7125 if (nacl == NULL) {
7126 VATTR_CLEAR_ACTIVE(vap, va_acl);
7127 } else {
7128 vap->va_base_acl = oacl;
7129 VATTR_SET(vap, va_acl, nacl);
7130 }
7131 }
7132
7133 error = vnode_authattr_new_internal(dvp, vap, (vap->va_vaflags & VA_NOAUTH), defaulted_fieldsp, ctx);
7134 if (error) {
7135 vn_attribute_cleanup(vap, *defaulted_fieldsp);
7136 }
7137
7138 return error;
7139 }
7140
7141 void
vn_attribute_cleanup(struct vnode_attr * vap,uint32_t defaulted_fields)7142 vn_attribute_cleanup(struct vnode_attr *vap, uint32_t defaulted_fields)
7143 {
7144 /*
7145 * If the caller supplied a filesec in vap, it has been replaced
7146 * now by the post-inheritance copy. We need to put the original back
7147 * and free the inherited product.
7148 */
7149 kauth_acl_t nacl, oacl;
7150
7151 if (VATTR_IS_ACTIVE(vap, va_acl)) {
7152 nacl = vap->va_acl;
7153 oacl = vap->va_base_acl;
7154
7155 if (oacl) {
7156 VATTR_SET(vap, va_acl, oacl);
7157 vap->va_base_acl = NULL;
7158 } else {
7159 VATTR_CLEAR_ACTIVE(vap, va_acl);
7160 }
7161
7162 if (nacl != NULL) {
7163 /*
7164 * Only free the ACL buffer if 'VA_FILESEC_ACL' is not set as it
7165 * should be freed by the caller or it is a post-inheritance copy.
7166 */
7167 if (!(vap->va_vaflags & VA_FILESEC_ACL) ||
7168 (oacl != NULL && nacl != oacl)) {
7169 kauth_acl_free(nacl);
7170 }
7171 }
7172 }
7173
7174 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_MODE) != 0) {
7175 VATTR_CLEAR_ACTIVE(vap, va_mode);
7176 }
7177 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_GID) != 0) {
7178 VATTR_CLEAR_ACTIVE(vap, va_gid);
7179 }
7180 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_UID) != 0) {
7181 VATTR_CLEAR_ACTIVE(vap, va_uid);
7182 }
7183
7184 return;
7185 }
7186
7187 int
vn_authorize_unlink(vnode_t dvp,vnode_t vp,struct componentname * cnp,vfs_context_t ctx,__unused void * reserved)7188 vn_authorize_unlink(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, __unused void *reserved)
7189 {
7190 #if !CONFIG_MACF
7191 #pragma unused(cnp)
7192 #endif
7193 int error = 0;
7194
7195 /*
7196 * Normally, unlinking of directories is not supported.
7197 * However, some file systems may have limited support.
7198 */
7199 if ((vp->v_type == VDIR) &&
7200 !(vp->v_mount->mnt_kern_flag & MNTK_DIR_HARDLINKS)) {
7201 return EPERM; /* POSIX */
7202 }
7203
7204 /* authorize the delete operation */
7205 #if CONFIG_MACF
7206 if (!error) {
7207 error = mac_vnode_check_unlink(ctx, dvp, vp, cnp);
7208 }
7209 #endif /* MAC */
7210 if (!error) {
7211 error = vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
7212 }
7213
7214 return error;
7215 }
7216
7217 int
vn_authorize_open_existing(vnode_t vp,struct componentname * cnp,int fmode,vfs_context_t ctx,void * reserved)7218 vn_authorize_open_existing(vnode_t vp, struct componentname *cnp, int fmode, vfs_context_t ctx, void *reserved)
7219 {
7220 /* Open of existing case */
7221 kauth_action_t action;
7222 int error = 0;
7223 if (cnp->cn_ndp == NULL) {
7224 panic("NULL ndp");
7225 }
7226 if (reserved != NULL) {
7227 panic("reserved not NULL.");
7228 }
7229
7230 #if CONFIG_MACF
7231 /* XXX may do duplicate work here, but ignore that for now (idempotent) */
7232 if (vfs_flags(vnode_mount(vp)) & MNT_MULTILABEL) {
7233 error = vnode_label(vnode_mount(vp), NULL, vp, NULL, 0, ctx);
7234 if (error) {
7235 return error;
7236 }
7237 }
7238 #endif
7239
7240 if ((fmode & O_DIRECTORY) && vp->v_type != VDIR) {
7241 return ENOTDIR;
7242 }
7243
7244 if (vp->v_type == VSOCK && vp->v_tag != VT_FDESC) {
7245 return EOPNOTSUPP; /* Operation not supported on socket */
7246 }
7247
7248 if (vp->v_type == VLNK && (fmode & O_NOFOLLOW) != 0) {
7249 return ELOOP; /* O_NOFOLLOW was specified and the target is a symbolic link */
7250 }
7251
7252 /* disallow write operations on directories */
7253 if (vnode_isdir(vp) && (fmode & (FWRITE | O_TRUNC))) {
7254 return EISDIR;
7255 }
7256
7257 if ((cnp->cn_ndp->ni_flag & NAMEI_TRAILINGSLASH)) {
7258 if (vp->v_type != VDIR) {
7259 return ENOTDIR;
7260 }
7261 }
7262
7263 #if CONFIG_MACF
7264 /* If a file being opened is a shadow file containing
7265 * namedstream data, ignore the macf checks because it
7266 * is a kernel internal file and access should always
7267 * be allowed.
7268 */
7269 if (!(vnode_isshadow(vp) && vnode_isnamedstream(vp))) {
7270 error = mac_vnode_check_open(ctx, vp, fmode);
7271 if (error) {
7272 return error;
7273 }
7274 }
7275 #endif
7276
7277 /* compute action to be authorized */
7278 action = 0;
7279 if (fmode & FREAD) {
7280 action |= KAUTH_VNODE_READ_DATA;
7281 }
7282 if (fmode & (FWRITE | O_TRUNC)) {
7283 /*
7284 * If we are writing, appending, and not truncating,
7285 * indicate that we are appending so that if the
7286 * UF_APPEND or SF_APPEND bits are set, we do not deny
7287 * the open.
7288 */
7289 if ((fmode & O_APPEND) && !(fmode & O_TRUNC)) {
7290 action |= KAUTH_VNODE_APPEND_DATA;
7291 } else {
7292 action |= KAUTH_VNODE_WRITE_DATA;
7293 }
7294 }
7295 error = vnode_authorize(vp, NULL, action, ctx);
7296 #if NAMEDSTREAMS
7297 if (error == EACCES) {
7298 /*
7299 * Shadow files may exist on-disk with a different UID/GID
7300 * than that of the current context. Verify that this file
7301 * is really a shadow file. If it was created successfully
7302 * then it should be authorized.
7303 */
7304 if (vnode_isshadow(vp) && vnode_isnamedstream(vp)) {
7305 error = vnode_verifynamedstream(vp);
7306 }
7307 }
7308 #endif
7309
7310 return error;
7311 }
7312
7313 int
vn_authorize_create(vnode_t dvp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx,void * reserved)7314 vn_authorize_create(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
7315 {
7316 #if !CONFIG_MACF
7317 #pragma unused(vap)
7318 #endif
7319 /* Creation case */
7320 int error;
7321
7322 if (cnp->cn_ndp == NULL) {
7323 panic("NULL cn_ndp");
7324 }
7325 if (reserved != NULL) {
7326 panic("reserved not NULL.");
7327 }
7328
7329 /* Only validate path for creation if we didn't do a complete lookup */
7330 if (cnp->cn_ndp->ni_flag & NAMEI_UNFINISHED) {
7331 error = lookup_validate_creation_path(cnp->cn_ndp);
7332 if (error) {
7333 return error;
7334 }
7335 }
7336
7337 #if CONFIG_MACF
7338 error = mac_vnode_check_create(ctx, dvp, cnp, vap);
7339 if (error) {
7340 return error;
7341 }
7342 #endif /* CONFIG_MACF */
7343
7344 return vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx);
7345 }
7346
7347 int
vn_authorize_rename(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx,void * reserved)7348 vn_authorize_rename(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
7349 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
7350 vfs_context_t ctx, void *reserved)
7351 {
7352 return vn_authorize_renamex(fdvp, fvp, fcnp, tdvp, tvp, tcnp, ctx, 0, reserved);
7353 }
7354
7355 int
vn_authorize_renamex(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx,vfs_rename_flags_t flags,void * reserved)7356 vn_authorize_renamex(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
7357 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
7358 vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved)
7359 {
7360 return vn_authorize_renamex_with_paths(fdvp, fvp, fcnp, NULL, tdvp, tvp, tcnp, NULL, ctx, flags, reserved);
7361 }
7362
7363 int
vn_authorize_renamex_with_paths(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,const char * from_path,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,const char * to_path,vfs_context_t ctx,vfs_rename_flags_t flags,void * reserved)7364 vn_authorize_renamex_with_paths(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, const char *from_path,
7365 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, const char *to_path,
7366 vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved)
7367 {
7368 int error = 0;
7369 int moving = 0;
7370 bool swap = flags & VFS_RENAME_SWAP;
7371
7372 if (reserved != NULL) {
7373 panic("Passed something other than NULL as reserved field!");
7374 }
7375
7376 /*
7377 * Avoid renaming "." and "..".
7378 *
7379 * XXX No need to check for this in the FS. We should always have the leaves
7380 * in VFS in this case.
7381 */
7382 if (fvp->v_type == VDIR &&
7383 ((fdvp == fvp) ||
7384 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') ||
7385 ((fcnp->cn_flags | tcnp->cn_flags) & ISDOTDOT))) {
7386 error = EINVAL;
7387 goto out;
7388 }
7389
7390 if (tvp == NULLVP && vnode_compound_rename_available(tdvp)) {
7391 error = lookup_validate_creation_path(tcnp->cn_ndp);
7392 if (error) {
7393 goto out;
7394 }
7395 }
7396
7397 /***** <MACF> *****/
7398 #if CONFIG_MACF
7399 error = mac_vnode_check_rename(ctx, fdvp, fvp, fcnp, tdvp, tvp, tcnp);
7400 if (error) {
7401 goto out;
7402 }
7403 if (swap) {
7404 error = mac_vnode_check_rename(ctx, tdvp, tvp, tcnp, fdvp, fvp, fcnp);
7405 if (error) {
7406 goto out;
7407 }
7408 }
7409 #endif
7410 /***** </MACF> *****/
7411
7412 /***** <MiscChecks> *****/
7413 if (tvp != NULL) {
7414 if (!swap) {
7415 if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
7416 error = ENOTDIR;
7417 goto out;
7418 } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
7419 error = EISDIR;
7420 goto out;
7421 }
7422 }
7423 } else if (swap) {
7424 /*
7425 * Caller should have already checked this and returned
7426 * ENOENT. If we send back ENOENT here, caller will retry
7427 * which isn't what we want so we send back EINVAL here
7428 * instead.
7429 */
7430 error = EINVAL;
7431 goto out;
7432 }
7433
7434 if (fvp == tdvp) {
7435 error = EINVAL;
7436 goto out;
7437 }
7438
7439 /*
7440 * The following edge case is caught here:
7441 * (to cannot be a descendent of from)
7442 *
7443 * o fdvp
7444 * /
7445 * /
7446 * o fvp
7447 * \
7448 * \
7449 * o tdvp
7450 * /
7451 * /
7452 * o tvp
7453 */
7454 if (tdvp->v_parent == fvp) {
7455 error = EINVAL;
7456 goto out;
7457 }
7458
7459 if (swap && fdvp->v_parent == tvp) {
7460 error = EINVAL;
7461 goto out;
7462 }
7463 /***** </MiscChecks> *****/
7464
7465 /***** <Kauth> *****/
7466
7467 /*
7468 * As part of the Kauth step, we call out to allow 3rd-party
7469 * fileop notification of "about to rename". This is needed
7470 * in the event that 3rd-parties need to know that the DELETE
7471 * authorization is actually part of a rename. It's important
7472 * that we guarantee that the DELETE call-out will always be
7473 * made if the WILL_RENAME call-out is made. Another fileop
7474 * call-out will be performed once the operation is completed.
7475 * We can ignore the result of kauth_authorize_fileop().
7476 *
7477 * N.B. We are passing the vnode and *both* paths to each
7478 * call; kauth_authorize_fileop() extracts the "from" path
7479 * when posting a KAUTH_FILEOP_WILL_RENAME notification.
7480 * As such, we only post these notifications if all of the
7481 * information we need is provided.
7482 */
7483
7484 if (swap) {
7485 kauth_action_t f = 0, t = 0;
7486
7487 /*
7488 * Directories changing parents need ...ADD_SUBDIR... to
7489 * permit changing ".."
7490 */
7491 if (fdvp != tdvp) {
7492 if (vnode_isdir(fvp)) {
7493 f = KAUTH_VNODE_ADD_SUBDIRECTORY;
7494 }
7495 if (vnode_isdir(tvp)) {
7496 t = KAUTH_VNODE_ADD_SUBDIRECTORY;
7497 }
7498 }
7499 if (to_path != NULL) {
7500 kauth_authorize_fileop(vfs_context_ucred(ctx),
7501 KAUTH_FILEOP_WILL_RENAME,
7502 (uintptr_t)fvp,
7503 (uintptr_t)to_path);
7504 }
7505 error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | f, ctx);
7506 if (error) {
7507 goto out;
7508 }
7509 if (from_path != NULL) {
7510 kauth_authorize_fileop(vfs_context_ucred(ctx),
7511 KAUTH_FILEOP_WILL_RENAME,
7512 (uintptr_t)tvp,
7513 (uintptr_t)from_path);
7514 }
7515 error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE | t, ctx);
7516 if (error) {
7517 goto out;
7518 }
7519 f = vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE;
7520 t = vnode_isdir(tvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE;
7521 if (fdvp == tdvp) {
7522 error = vnode_authorize(fdvp, NULL, f | t, ctx);
7523 } else {
7524 error = vnode_authorize(fdvp, NULL, t, ctx);
7525 if (error) {
7526 goto out;
7527 }
7528 error = vnode_authorize(tdvp, NULL, f, ctx);
7529 }
7530 if (error) {
7531 goto out;
7532 }
7533 } else {
7534 error = 0;
7535 if ((tvp != NULL) && vnode_isdir(tvp)) {
7536 if (tvp != fdvp) {
7537 moving = 1;
7538 }
7539 } else if (tdvp != fdvp) {
7540 moving = 1;
7541 }
7542
7543 /*
7544 * must have delete rights to remove the old name even in
7545 * the simple case of fdvp == tdvp.
7546 *
7547 * If fvp is a directory, and we are changing it's parent,
7548 * then we also need rights to rewrite its ".." entry as well.
7549 */
7550 if (to_path != NULL) {
7551 kauth_authorize_fileop(vfs_context_ucred(ctx),
7552 KAUTH_FILEOP_WILL_RENAME,
7553 (uintptr_t)fvp,
7554 (uintptr_t)to_path);
7555 }
7556 if (vnode_isdir(fvp)) {
7557 if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) {
7558 goto out;
7559 }
7560 } else {
7561 if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE, ctx)) != 0) {
7562 goto out;
7563 }
7564 }
7565 if (moving) {
7566 /* moving into tdvp or tvp, must have rights to add */
7567 if ((error = vnode_authorize(((tvp != NULL) && vnode_isdir(tvp)) ? tvp : tdvp,
7568 NULL,
7569 vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE,
7570 ctx)) != 0) {
7571 goto out;
7572 }
7573 } else {
7574 /* node staying in same directory, must be allowed to add new name */
7575 if ((error = vnode_authorize(fdvp, NULL,
7576 vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, ctx)) != 0) {
7577 goto out;
7578 }
7579 }
7580 /* overwriting tvp */
7581 if ((tvp != NULL) && !vnode_isdir(tvp) &&
7582 ((error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE, ctx)) != 0)) {
7583 goto out;
7584 }
7585 }
7586
7587 /***** </Kauth> *****/
7588
7589 /* XXX more checks? */
7590 out:
7591 return error;
7592 }
7593
7594 int
vn_authorize_mkdir(vnode_t dvp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx,void * reserved)7595 vn_authorize_mkdir(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
7596 {
7597 #if !CONFIG_MACF
7598 #pragma unused(vap)
7599 #endif
7600 int error;
7601
7602 if (reserved != NULL) {
7603 panic("reserved not NULL in vn_authorize_mkdir()");
7604 }
7605
7606 /* XXX A hack for now, to make shadow files work */
7607 if (cnp->cn_ndp == NULL) {
7608 return 0;
7609 }
7610
7611 if (vnode_compound_mkdir_available(dvp)) {
7612 error = lookup_validate_creation_path(cnp->cn_ndp);
7613 if (error) {
7614 goto out;
7615 }
7616 }
7617
7618 #if CONFIG_MACF
7619 error = mac_vnode_check_create(ctx,
7620 dvp, cnp, vap);
7621 if (error) {
7622 goto out;
7623 }
7624 #endif
7625
7626 /* authorize addition of a directory to the parent */
7627 if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) {
7628 goto out;
7629 }
7630
7631 out:
7632 return error;
7633 }
7634
7635 int
vn_authorize_rmdir(vnode_t dvp,vnode_t vp,struct componentname * cnp,vfs_context_t ctx,void * reserved)7636 vn_authorize_rmdir(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, void *reserved)
7637 {
7638 #if CONFIG_MACF
7639 int error;
7640 #else
7641 #pragma unused(cnp)
7642 #endif
7643 if (reserved != NULL) {
7644 panic("Non-NULL reserved argument to vn_authorize_rmdir()");
7645 }
7646
7647 if (vp->v_type != VDIR) {
7648 /*
7649 * rmdir only deals with directories
7650 */
7651 return ENOTDIR;
7652 }
7653
7654 if (dvp == vp) {
7655 /*
7656 * No rmdir "." please.
7657 */
7658 return EINVAL;
7659 }
7660
7661 #if CONFIG_MACF
7662 error = mac_vnode_check_unlink(ctx, dvp,
7663 vp, cnp);
7664 if (error) {
7665 return error;
7666 }
7667 #endif
7668
7669 return vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
7670 }
7671
7672 /*
7673 * Authorizer for directory cloning. This does not use vnodes but instead
7674 * uses prefilled vnode attributes from the filesystem.
7675 *
7676 * The same function is called to set up the attributes required, perform the
7677 * authorization and cleanup (if required)
7678 */
7679 int
vnode_attr_authorize_dir_clone(struct vnode_attr * vap,kauth_action_t action,struct vnode_attr * dvap,__unused vnode_t sdvp,mount_t mp,dir_clone_authorizer_op_t vattr_op,uint32_t flags,vfs_context_t ctx,__unused void * reserved)7680 vnode_attr_authorize_dir_clone(struct vnode_attr *vap, kauth_action_t action,
7681 struct vnode_attr *dvap, __unused vnode_t sdvp, mount_t mp,
7682 dir_clone_authorizer_op_t vattr_op, uint32_t flags, vfs_context_t ctx,
7683 __unused void *reserved)
7684 {
7685 int error;
7686 int is_suser = vfs_context_issuser(ctx);
7687
7688 if (vattr_op == OP_VATTR_SETUP) {
7689 VATTR_INIT(vap);
7690
7691 /*
7692 * When ACL inheritence is implemented, both vap->va_acl and
7693 * dvap->va_acl will be required (even as superuser).
7694 */
7695 VATTR_WANTED(vap, va_type);
7696 VATTR_WANTED(vap, va_mode);
7697 VATTR_WANTED(vap, va_flags);
7698 VATTR_WANTED(vap, va_uid);
7699 VATTR_WANTED(vap, va_gid);
7700 if (dvap) {
7701 VATTR_INIT(dvap);
7702 VATTR_WANTED(dvap, va_flags);
7703 }
7704
7705 if (!is_suser) {
7706 /*
7707 * If not superuser, we have to evaluate ACLs and
7708 * need the target directory gid to set the initial
7709 * gid of the new object.
7710 */
7711 VATTR_WANTED(vap, va_acl);
7712 if (dvap) {
7713 VATTR_WANTED(dvap, va_gid);
7714 }
7715 } else if (dvap && (flags & VNODE_CLONEFILE_NOOWNERCOPY)) {
7716 VATTR_WANTED(dvap, va_gid);
7717 }
7718 return 0;
7719 } else if (vattr_op == OP_VATTR_CLEANUP) {
7720 return 0; /* Nothing to do for now */
7721 }
7722
7723 /* dvap isn't used for authorization */
7724 error = vnode_attr_authorize(vap, NULL, mp, action, ctx);
7725
7726 if (error) {
7727 return error;
7728 }
7729
7730 /*
7731 * vn_attribute_prepare should be able to accept attributes as well as
7732 * vnodes but for now we do this inline.
7733 */
7734 if (!is_suser || (flags & VNODE_CLONEFILE_NOOWNERCOPY)) {
7735 /*
7736 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit
7737 * owner is set, that owner takes ownership of all new files.
7738 */
7739 if ((mp->mnt_flag & MNT_IGNORE_OWNERSHIP) &&
7740 (mp->mnt_fsowner != KAUTH_UID_NONE)) {
7741 VATTR_SET(vap, va_uid, mp->mnt_fsowner);
7742 } else {
7743 /* default owner is current user */
7744 VATTR_SET(vap, va_uid,
7745 kauth_cred_getuid(vfs_context_ucred(ctx)));
7746 }
7747
7748 if ((mp->mnt_flag & MNT_IGNORE_OWNERSHIP) &&
7749 (mp->mnt_fsgroup != KAUTH_GID_NONE)) {
7750 VATTR_SET(vap, va_gid, mp->mnt_fsgroup);
7751 } else {
7752 /*
7753 * default group comes from parent object,
7754 * fallback to current user
7755 */
7756 if (VATTR_IS_SUPPORTED(dvap, va_gid)) {
7757 VATTR_SET(vap, va_gid, dvap->va_gid);
7758 } else {
7759 VATTR_SET(vap, va_gid,
7760 kauth_cred_getgid(vfs_context_ucred(ctx)));
7761 }
7762 }
7763 }
7764
7765 /* Inherit SF_RESTRICTED bit from destination directory only */
7766 if (VATTR_IS_ACTIVE(vap, va_flags)) {
7767 VATTR_SET(vap, va_flags,
7768 ((vap->va_flags & ~(UF_DATAVAULT | SF_RESTRICTED)))); /* Turn off from source */
7769 if (VATTR_IS_ACTIVE(dvap, va_flags)) {
7770 VATTR_SET(vap, va_flags,
7771 vap->va_flags | (dvap->va_flags & (UF_DATAVAULT | SF_RESTRICTED)));
7772 }
7773 } else if (VATTR_IS_ACTIVE(dvap, va_flags)) {
7774 VATTR_SET(vap, va_flags, (dvap->va_flags & (UF_DATAVAULT | SF_RESTRICTED)));
7775 }
7776
7777 return 0;
7778 }
7779
7780
7781 /*
7782 * Authorize an operation on a vnode.
7783 *
7784 * This is KPI, but here because it needs vnode_scope.
7785 *
7786 * Returns: 0 Success
7787 * kauth_authorize_action:EPERM ...
7788 * xlate => EACCES Permission denied
7789 * kauth_authorize_action:0 Success
7790 * kauth_authorize_action: Depends on callback return; this is
7791 * usually only vnode_authorize_callback(),
7792 * but may include other listerners, if any
7793 * exist.
7794 * EROFS
7795 * EACCES
7796 * EPERM
7797 * ???
7798 */
7799 int
vnode_authorize(vnode_t vp,vnode_t dvp,kauth_action_t action,vfs_context_t ctx)7800 vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx)
7801 {
7802 int error, result;
7803
7804 /*
7805 * We can't authorize against a dead vnode; allow all operations through so that
7806 * the correct error can be returned.
7807 */
7808 if (vp->v_type == VBAD) {
7809 return 0;
7810 }
7811
7812 error = 0;
7813 result = kauth_authorize_action(vnode_scope, vfs_context_ucred(ctx), action,
7814 (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error);
7815 if (result == EPERM) { /* traditional behaviour */
7816 result = EACCES;
7817 }
7818 /* did the lower layers give a better error return? */
7819 if ((result != 0) && (error != 0)) {
7820 return error;
7821 }
7822 return result;
7823 }
7824
7825 /*
7826 * Test for vnode immutability.
7827 *
7828 * The 'append' flag is set when the authorization request is constrained
7829 * to operations which only request the right to append to a file.
7830 *
7831 * The 'ignore' flag is set when an operation modifying the immutability flags
7832 * is being authorized. We check the system securelevel to determine which
7833 * immutability flags we can ignore.
7834 */
7835 static int
vnode_immutable(struct vnode_attr * vap,int append,int ignore)7836 vnode_immutable(struct vnode_attr *vap, int append, int ignore)
7837 {
7838 int mask;
7839
7840 /* start with all bits precluding the operation */
7841 mask = IMMUTABLE | APPEND;
7842
7843 /* if appending only, remove the append-only bits */
7844 if (append) {
7845 mask &= ~APPEND;
7846 }
7847
7848 /* ignore only set when authorizing flags changes */
7849 if (ignore) {
7850 if (securelevel <= 0) {
7851 /* in insecure state, flags do not inhibit changes */
7852 mask = 0;
7853 } else {
7854 /* in secure state, user flags don't inhibit */
7855 mask &= ~(UF_IMMUTABLE | UF_APPEND);
7856 }
7857 }
7858 KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap->va_flags, mask, append, ignore);
7859 if ((vap->va_flags & mask) != 0) {
7860 return EPERM;
7861 }
7862 return 0;
7863 }
7864
7865 static int
vauth_node_owner(struct vnode_attr * vap,kauth_cred_t cred)7866 vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred)
7867 {
7868 int result;
7869
7870 /* default assumption is not-owner */
7871 result = 0;
7872
7873 /*
7874 * If the filesystem has given us a UID, we treat this as authoritative.
7875 */
7876 if (vap && VATTR_IS_SUPPORTED(vap, va_uid)) {
7877 result = (vap->va_uid == kauth_cred_getuid(cred)) ? 1 : 0;
7878 }
7879 /* we could test the owner UUID here if we had a policy for it */
7880
7881 return result;
7882 }
7883
7884 /*
7885 * vauth_node_group
7886 *
7887 * Description: Ask if a cred is a member of the group owning the vnode object
7888 *
7889 * Parameters: vap vnode attribute
7890 * vap->va_gid group owner of vnode object
7891 * cred credential to check
7892 * ismember pointer to where to put the answer
7893 * idontknow Return this if we can't get an answer
7894 *
7895 * Returns: 0 Success
7896 * idontknow Can't get information
7897 * kauth_cred_ismember_gid:? Error from kauth subsystem
7898 * kauth_cred_ismember_gid:? Error from kauth subsystem
7899 */
7900 static int
vauth_node_group(struct vnode_attr * vap,kauth_cred_t cred,int * ismember,int idontknow)7901 vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int idontknow)
7902 {
7903 int error;
7904 int result;
7905
7906 error = 0;
7907 result = 0;
7908
7909 /*
7910 * The caller is expected to have asked the filesystem for a group
7911 * at some point prior to calling this function. The answer may
7912 * have been that there is no group ownership supported for the
7913 * vnode object, in which case we return
7914 */
7915 if (vap && VATTR_IS_SUPPORTED(vap, va_gid)) {
7916 error = kauth_cred_ismember_gid(cred, vap->va_gid, &result);
7917 /*
7918 * Credentials which are opted into external group membership
7919 * resolution which are not known to the external resolver
7920 * will result in an ENOENT error. We translate this into
7921 * the appropriate 'idontknow' response for our caller.
7922 *
7923 * XXX We do not make a distinction here between an ENOENT
7924 * XXX arising from a response from the external resolver,
7925 * XXX and an ENOENT which is internally generated. This is
7926 * XXX a deficiency of the published kauth_cred_ismember_gid()
7927 * XXX KPI which can not be overcome without new KPI. For
7928 * XXX all currently known cases, however, this wil result
7929 * XXX in correct behaviour.
7930 */
7931 if (error == ENOENT) {
7932 error = idontknow;
7933 }
7934 }
7935 /*
7936 * XXX We could test the group UUID here if we had a policy for it,
7937 * XXX but this is problematic from the perspective of synchronizing
7938 * XXX group UUID and POSIX GID ownership of a file and keeping the
7939 * XXX values coherent over time. The problem is that the local
7940 * XXX system will vend transient group UUIDs for unknown POSIX GID
7941 * XXX values, and these are not persistent, whereas storage of values
7942 * XXX is persistent. One potential solution to this is a local
7943 * XXX (persistent) replica of remote directory entries and vended
7944 * XXX local ids in a local directory server (think in terms of a
7945 * XXX caching DNS server).
7946 */
7947
7948 if (!error) {
7949 *ismember = result;
7950 }
7951 return error;
7952 }
7953
7954 static int
vauth_file_owner(vauth_ctx vcp)7955 vauth_file_owner(vauth_ctx vcp)
7956 {
7957 int result;
7958
7959 if (vcp->flags_valid & _VAC_IS_OWNER) {
7960 result = (vcp->flags & _VAC_IS_OWNER) ? 1 : 0;
7961 } else {
7962 result = vauth_node_owner(vcp->vap, vcp->ctx->vc_ucred);
7963
7964 /* cache our result */
7965 vcp->flags_valid |= _VAC_IS_OWNER;
7966 if (result) {
7967 vcp->flags |= _VAC_IS_OWNER;
7968 } else {
7969 vcp->flags &= ~_VAC_IS_OWNER;
7970 }
7971 }
7972 return result;
7973 }
7974
7975
7976 /*
7977 * vauth_file_ingroup
7978 *
7979 * Description: Ask if a user is a member of the group owning the directory
7980 *
7981 * Parameters: vcp The vnode authorization context that
7982 * contains the user and directory info
7983 * vcp->flags_valid Valid flags
7984 * vcp->flags Flags values
7985 * vcp->vap File vnode attributes
7986 * vcp->ctx VFS Context (for user)
7987 * ismember pointer to where to put the answer
7988 * idontknow Return this if we can't get an answer
7989 *
7990 * Returns: 0 Success
7991 * vauth_node_group:? Error from vauth_node_group()
7992 *
7993 * Implicit returns: *ismember 0 The user is not a group member
7994 * 1 The user is a group member
7995 */
7996 static int
vauth_file_ingroup(vauth_ctx vcp,int * ismember,int idontknow)7997 vauth_file_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
7998 {
7999 int error;
8000
8001 /* Check for a cached answer first, to avoid the check if possible */
8002 if (vcp->flags_valid & _VAC_IN_GROUP) {
8003 *ismember = (vcp->flags & _VAC_IN_GROUP) ? 1 : 0;
8004 error = 0;
8005 } else {
8006 /* Otherwise, go look for it */
8007 error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember, idontknow);
8008
8009 if (!error) {
8010 /* cache our result */
8011 vcp->flags_valid |= _VAC_IN_GROUP;
8012 if (*ismember) {
8013 vcp->flags |= _VAC_IN_GROUP;
8014 } else {
8015 vcp->flags &= ~_VAC_IN_GROUP;
8016 }
8017 }
8018 }
8019 return error;
8020 }
8021
8022 static int
vauth_dir_owner(vauth_ctx vcp)8023 vauth_dir_owner(vauth_ctx vcp)
8024 {
8025 int result;
8026
8027 if (vcp->flags_valid & _VAC_IS_DIR_OWNER) {
8028 result = (vcp->flags & _VAC_IS_DIR_OWNER) ? 1 : 0;
8029 } else {
8030 result = vauth_node_owner(vcp->dvap, vcp->ctx->vc_ucred);
8031
8032 /* cache our result */
8033 vcp->flags_valid |= _VAC_IS_DIR_OWNER;
8034 if (result) {
8035 vcp->flags |= _VAC_IS_DIR_OWNER;
8036 } else {
8037 vcp->flags &= ~_VAC_IS_DIR_OWNER;
8038 }
8039 }
8040 return result;
8041 }
8042
8043 /*
8044 * vauth_dir_ingroup
8045 *
8046 * Description: Ask if a user is a member of the group owning the directory
8047 *
8048 * Parameters: vcp The vnode authorization context that
8049 * contains the user and directory info
8050 * vcp->flags_valid Valid flags
8051 * vcp->flags Flags values
8052 * vcp->dvap Dir vnode attributes
8053 * vcp->ctx VFS Context (for user)
8054 * ismember pointer to where to put the answer
8055 * idontknow Return this if we can't get an answer
8056 *
8057 * Returns: 0 Success
8058 * vauth_node_group:? Error from vauth_node_group()
8059 *
8060 * Implicit returns: *ismember 0 The user is not a group member
8061 * 1 The user is a group member
8062 */
8063 static int
vauth_dir_ingroup(vauth_ctx vcp,int * ismember,int idontknow)8064 vauth_dir_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
8065 {
8066 int error;
8067
8068 /* Check for a cached answer first, to avoid the check if possible */
8069 if (vcp->flags_valid & _VAC_IN_DIR_GROUP) {
8070 *ismember = (vcp->flags & _VAC_IN_DIR_GROUP) ? 1 : 0;
8071 error = 0;
8072 } else {
8073 /* Otherwise, go look for it */
8074 error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember, idontknow);
8075
8076 if (!error) {
8077 /* cache our result */
8078 vcp->flags_valid |= _VAC_IN_DIR_GROUP;
8079 if (*ismember) {
8080 vcp->flags |= _VAC_IN_DIR_GROUP;
8081 } else {
8082 vcp->flags &= ~_VAC_IN_DIR_GROUP;
8083 }
8084 }
8085 }
8086 return error;
8087 }
8088
8089 /*
8090 * Test the posix permissions in (vap) to determine whether (credential)
8091 * may perform (action)
8092 */
8093 static int
vnode_authorize_posix(vauth_ctx vcp,int action,int on_dir)8094 vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir)
8095 {
8096 struct vnode_attr *vap;
8097 int needed, error, owner_ok, group_ok, world_ok, ismember;
8098 #ifdef KAUTH_DEBUG_ENABLE
8099 const char *where = "uninitialized";
8100 # define _SETWHERE(c) where = c;
8101 #else
8102 # define _SETWHERE(c)
8103 #endif
8104
8105 /* checking file or directory? */
8106 if (on_dir) {
8107 vap = vcp->dvap;
8108 } else {
8109 vap = vcp->vap;
8110 }
8111
8112 error = 0;
8113
8114 /*
8115 * We want to do as little work here as possible. So first we check
8116 * which sets of permissions grant us the access we need, and avoid checking
8117 * whether specific permissions grant access when more generic ones would.
8118 */
8119
8120 /* owner permissions */
8121 needed = 0;
8122 if (action & VREAD) {
8123 needed |= S_IRUSR;
8124 }
8125 if (action & VWRITE) {
8126 needed |= S_IWUSR;
8127 }
8128 if (action & VEXEC) {
8129 needed |= S_IXUSR;
8130 }
8131 owner_ok = (needed & vap->va_mode) == needed;
8132
8133 /*
8134 * Processes with the appropriate entitlement can marked themselves as
8135 * ignoring file/directory permissions if they own it.
8136 */
8137 if (!owner_ok && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
8138 owner_ok = 1;
8139 }
8140
8141 /* group permissions */
8142 needed = 0;
8143 if (action & VREAD) {
8144 needed |= S_IRGRP;
8145 }
8146 if (action & VWRITE) {
8147 needed |= S_IWGRP;
8148 }
8149 if (action & VEXEC) {
8150 needed |= S_IXGRP;
8151 }
8152 group_ok = (needed & vap->va_mode) == needed;
8153
8154 /* world permissions */
8155 needed = 0;
8156 if (action & VREAD) {
8157 needed |= S_IROTH;
8158 }
8159 if (action & VWRITE) {
8160 needed |= S_IWOTH;
8161 }
8162 if (action & VEXEC) {
8163 needed |= S_IXOTH;
8164 }
8165 world_ok = (needed & vap->va_mode) == needed;
8166
8167 /* If granted/denied by all three, we're done */
8168 if (owner_ok && group_ok && world_ok) {
8169 _SETWHERE("all");
8170 goto out;
8171 }
8172
8173 if (!owner_ok && !group_ok && !world_ok) {
8174 _SETWHERE("all");
8175 error = EACCES;
8176 goto out;
8177 }
8178
8179 /* Check ownership (relatively cheap) */
8180 if ((on_dir && vauth_dir_owner(vcp)) ||
8181 (!on_dir && vauth_file_owner(vcp))) {
8182 _SETWHERE("user");
8183 if (!owner_ok) {
8184 error = EACCES;
8185 }
8186 goto out;
8187 }
8188
8189 /* Not owner; if group and world both grant it we're done */
8190 if (group_ok && world_ok) {
8191 _SETWHERE("group/world");
8192 goto out;
8193 }
8194 if (!group_ok && !world_ok) {
8195 _SETWHERE("group/world");
8196 error = EACCES;
8197 goto out;
8198 }
8199
8200 /* Check group membership (most expensive) */
8201 ismember = 0; /* Default to allow, if the target has no group owner */
8202
8203 /*
8204 * In the case we can't get an answer about the user from the call to
8205 * vauth_dir_ingroup() or vauth_file_ingroup(), we want to fail on
8206 * the side of caution, rather than simply granting access, or we will
8207 * fail to correctly implement exclusion groups, so we set the third
8208 * parameter on the basis of the state of 'group_ok'.
8209 */
8210 if (on_dir) {
8211 error = vauth_dir_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
8212 } else {
8213 error = vauth_file_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
8214 }
8215 if (error) {
8216 if (!group_ok) {
8217 ismember = 1;
8218 }
8219 error = 0;
8220 }
8221 if (ismember) {
8222 _SETWHERE("group");
8223 if (!group_ok) {
8224 error = EACCES;
8225 }
8226 goto out;
8227 }
8228
8229 /* Not owner, not in group, use world result */
8230 _SETWHERE("world");
8231 if (!world_ok) {
8232 error = EACCES;
8233 }
8234
8235 /* FALLTHROUGH */
8236
8237 out:
8238 KAUTH_DEBUG("%p %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d",
8239 vcp->vp, (error == 0) ? "ALLOWED" : "DENIED", where,
8240 (action & VREAD) ? "r" : "-",
8241 (action & VWRITE) ? "w" : "-",
8242 (action & VEXEC) ? "x" : "-",
8243 needed,
8244 (vap->va_mode & S_IRUSR) ? "r" : "-",
8245 (vap->va_mode & S_IWUSR) ? "w" : "-",
8246 (vap->va_mode & S_IXUSR) ? "x" : "-",
8247 (vap->va_mode & S_IRGRP) ? "r" : "-",
8248 (vap->va_mode & S_IWGRP) ? "w" : "-",
8249 (vap->va_mode & S_IXGRP) ? "x" : "-",
8250 (vap->va_mode & S_IROTH) ? "r" : "-",
8251 (vap->va_mode & S_IWOTH) ? "w" : "-",
8252 (vap->va_mode & S_IXOTH) ? "x" : "-",
8253 kauth_cred_getuid(vcp->ctx->vc_ucred),
8254 on_dir ? vcp->dvap->va_uid : vcp->vap->va_uid,
8255 on_dir ? vcp->dvap->va_gid : vcp->vap->va_gid);
8256 return error;
8257 }
8258
8259 /*
8260 * Authorize the deletion of the node vp from the directory dvp.
8261 *
8262 * We assume that:
8263 * - Neither the node nor the directory are immutable.
8264 * - The user is not the superuser.
8265 *
8266 * The precedence of factors for authorizing or denying delete for a credential
8267 *
8268 * 1) Explicit ACE on the node. (allow or deny DELETE)
8269 * 2) Explicit ACE on the directory (allow or deny DELETE_CHILD).
8270 *
8271 * If there are conflicting ACEs on the node and the directory, the node
8272 * ACE wins.
8273 *
8274 * 3) Sticky bit on the directory.
8275 * Deletion is not permitted if the directory is sticky and the caller is
8276 * not owner of the node or directory. The sticky bit rules are like a deny
8277 * delete ACE except lower in priority than ACL's either allowing or denying
8278 * delete.
8279 *
8280 * 4) POSIX permisions on the directory.
8281 *
8282 * As an optimization, we cache whether or not delete child is permitted
8283 * on directories. This enables us to skip directory ACL and POSIX checks
8284 * as we already have the result from those checks. However, we always check the
8285 * node ACL and, if the directory has the sticky bit set, we always check its
8286 * ACL (even for a directory with an authorized delete child). Furthermore,
8287 * caching the delete child authorization is independent of the sticky bit
8288 * being set as it is only applicable in determining whether the node can be
8289 * deleted or not.
8290 */
8291 static int
vnode_authorize_delete(vauth_ctx vcp,boolean_t cached_delete_child)8292 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child)
8293 {
8294 struct vnode_attr *vap = vcp->vap;
8295 struct vnode_attr *dvap = vcp->dvap;
8296 kauth_cred_t cred = vcp->ctx->vc_ucred;
8297 struct kauth_acl_eval eval;
8298 int error, ismember;
8299
8300 /* Check the ACL on the node first */
8301 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
8302 eval.ae_requested = KAUTH_VNODE_DELETE;
8303 eval.ae_acl = &vap->va_acl->acl_ace[0];
8304 eval.ae_count = vap->va_acl->acl_entrycount;
8305 eval.ae_options = 0;
8306 if (vauth_file_owner(vcp)) {
8307 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
8308 }
8309 /*
8310 * We use ENOENT as a marker to indicate we could not get
8311 * information in order to delay evaluation until after we
8312 * have the ACL evaluation answer. Previously, we would
8313 * always deny the operation at this point.
8314 */
8315 if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
8316 return error;
8317 }
8318 if (error == ENOENT) {
8319 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
8320 } else if (ismember) {
8321 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
8322 }
8323 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
8324 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
8325 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
8326 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
8327
8328 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
8329 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
8330 return error;
8331 }
8332
8333 switch (eval.ae_result) {
8334 case KAUTH_RESULT_DENY:
8335 if (vauth_file_owner(vcp) && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
8336 KAUTH_DEBUG("%p Override DENY due to entitlement", vcp->vp);
8337 return 0;
8338 }
8339 KAUTH_DEBUG("%p DENIED - denied by ACL", vcp->vp);
8340 return EACCES;
8341 case KAUTH_RESULT_ALLOW:
8342 KAUTH_DEBUG("%p ALLOWED - granted by ACL", vcp->vp);
8343 return 0;
8344 case KAUTH_RESULT_DEFER:
8345 default:
8346 /* Defer to directory */
8347 KAUTH_DEBUG("%p DEFERRED - by file ACL", vcp->vp);
8348 break;
8349 }
8350 }
8351
8352 /*
8353 * Without a sticky bit, a previously authorized delete child is
8354 * sufficient to authorize this delete.
8355 *
8356 * If the sticky bit is set, a directory ACL which allows delete child
8357 * overrides a (potential) sticky bit deny. The authorized delete child
8358 * cannot tell us if it was authorized because of an explicit delete
8359 * child allow ACE or because of POSIX permisions so we have to check
8360 * the directory ACL everytime if the directory has a sticky bit.
8361 */
8362 if (!(dvap->va_mode & S_ISTXT) && cached_delete_child) {
8363 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL or POSIX permissions and no sticky bit on directory", vcp->vp);
8364 return 0;
8365 }
8366
8367 /* check the ACL on the directory */
8368 if (VATTR_IS_NOT(dvap, va_acl, NULL)) {
8369 eval.ae_requested = KAUTH_VNODE_DELETE_CHILD;
8370 eval.ae_acl = &dvap->va_acl->acl_ace[0];
8371 eval.ae_count = dvap->va_acl->acl_entrycount;
8372 eval.ae_options = 0;
8373 if (vauth_dir_owner(vcp)) {
8374 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
8375 }
8376 /*
8377 * We use ENOENT as a marker to indicate we could not get
8378 * information in order to delay evaluation until after we
8379 * have the ACL evaluation answer. Previously, we would
8380 * always deny the operation at this point.
8381 */
8382 if ((error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
8383 return error;
8384 }
8385 if (error == ENOENT) {
8386 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
8387 } else if (ismember) {
8388 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
8389 }
8390 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
8391 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
8392 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
8393 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
8394
8395 /*
8396 * If there is no entry, we are going to defer to other
8397 * authorization mechanisms.
8398 */
8399 error = kauth_acl_evaluate(cred, &eval);
8400
8401 if (error != 0) {
8402 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
8403 return error;
8404 }
8405 switch (eval.ae_result) {
8406 case KAUTH_RESULT_DENY:
8407 if (vauth_dir_owner(vcp) && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
8408 KAUTH_DEBUG("%p Override DENY due to entitlement", vcp->vp);
8409 return 0;
8410 }
8411 KAUTH_DEBUG("%p DENIED - denied by directory ACL", vcp->vp);
8412 return EACCES;
8413 case KAUTH_RESULT_ALLOW:
8414 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp);
8415 if (!cached_delete_child && vcp->dvp) {
8416 vnode_cache_authorized_action(vcp->dvp,
8417 vcp->ctx, KAUTH_VNODE_DELETE_CHILD);
8418 }
8419 return 0;
8420 case KAUTH_RESULT_DEFER:
8421 default:
8422 /* Deferred by directory ACL */
8423 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
8424 break;
8425 }
8426 }
8427
8428 /*
8429 * From this point, we can't explicitly allow and if we reach the end
8430 * of the function without a denial, then the delete is authorized.
8431 */
8432 if (!cached_delete_child) {
8433 if (vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */) != 0) {
8434 KAUTH_DEBUG("%p DENIED - denied by posix permisssions", vcp->vp);
8435 return EACCES;
8436 }
8437 /*
8438 * Cache the authorized action on the vnode if allowed by the
8439 * directory ACL or POSIX permissions. It is correct to cache
8440 * this action even if sticky bit would deny deleting the node.
8441 */
8442 if (vcp->dvp) {
8443 vnode_cache_authorized_action(vcp->dvp, vcp->ctx,
8444 KAUTH_VNODE_DELETE_CHILD);
8445 }
8446 }
8447
8448 /* enforce sticky bit behaviour */
8449 if ((dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) {
8450 KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)",
8451 vcp->vp, cred->cr_posix.cr_uid, vap->va_uid, dvap->va_uid);
8452 return EACCES;
8453 }
8454
8455 /* not denied, must be OK */
8456 return 0;
8457 }
8458
8459
8460 /*
8461 * Authorize an operation based on the node's attributes.
8462 */
8463 static int
vnode_authorize_simple(vauth_ctx vcp,kauth_ace_rights_t acl_rights,kauth_ace_rights_t preauth_rights,boolean_t * found_deny)8464 vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_rights_t preauth_rights, boolean_t *found_deny)
8465 {
8466 struct vnode_attr *vap = vcp->vap;
8467 kauth_cred_t cred = vcp->ctx->vc_ucred;
8468 struct kauth_acl_eval eval;
8469 int error, ismember;
8470 mode_t posix_action;
8471
8472 /*
8473 * If we are the file owner, we automatically have some rights.
8474 *
8475 * Do we need to expand this to support group ownership?
8476 */
8477 if (vauth_file_owner(vcp)) {
8478 acl_rights &= ~(KAUTH_VNODE_WRITE_SECURITY);
8479 }
8480
8481 /*
8482 * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can
8483 * mask the latter. If TAKE_OWNERSHIP is requested the caller is about to
8484 * change ownership to themselves, and WRITE_SECURITY is implicitly
8485 * granted to the owner. We need to do this because at this point
8486 * WRITE_SECURITY may not be granted as the caller is not currently
8487 * the owner.
8488 */
8489 if ((acl_rights & KAUTH_VNODE_TAKE_OWNERSHIP) &&
8490 (acl_rights & KAUTH_VNODE_WRITE_SECURITY)) {
8491 acl_rights &= ~KAUTH_VNODE_WRITE_SECURITY;
8492 }
8493
8494 if (acl_rights == 0) {
8495 KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp->vp);
8496 return 0;
8497 }
8498
8499 /* if we have an ACL, evaluate it */
8500 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
8501 eval.ae_requested = acl_rights;
8502 eval.ae_acl = &vap->va_acl->acl_ace[0];
8503 eval.ae_count = vap->va_acl->acl_entrycount;
8504 eval.ae_options = 0;
8505 if (vauth_file_owner(vcp)) {
8506 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
8507 }
8508 /*
8509 * We use ENOENT as a marker to indicate we could not get
8510 * information in order to delay evaluation until after we
8511 * have the ACL evaluation answer. Previously, we would
8512 * always deny the operation at this point.
8513 */
8514 if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
8515 return error;
8516 }
8517 if (error == ENOENT) {
8518 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
8519 } else if (ismember) {
8520 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
8521 }
8522 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
8523 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
8524 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
8525 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
8526
8527 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
8528 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
8529 return error;
8530 }
8531
8532 switch (eval.ae_result) {
8533 case KAUTH_RESULT_DENY:
8534 if (vauth_file_owner(vcp) && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
8535 KAUTH_DEBUG("%p Override DENY due to entitlement", vcp->vp);
8536 return 0;
8537 }
8538 KAUTH_DEBUG("%p DENIED - by ACL", vcp->vp);
8539 return EACCES; /* deny, deny, counter-allege */
8540 case KAUTH_RESULT_ALLOW:
8541 KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp->vp);
8542 return 0;
8543 case KAUTH_RESULT_DEFER:
8544 default:
8545 /* Effectively the same as !delete_child_denied */
8546 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
8547 break;
8548 }
8549
8550 *found_deny = eval.ae_found_deny;
8551
8552 /* fall through and evaluate residual rights */
8553 } else {
8554 /* no ACL, everything is residual */
8555 eval.ae_residual = acl_rights;
8556 }
8557
8558 /*
8559 * Grant residual rights that have been pre-authorized.
8560 */
8561 eval.ae_residual &= ~preauth_rights;
8562
8563 /*
8564 * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied.
8565 */
8566 if (vauth_file_owner(vcp)) {
8567 eval.ae_residual &= ~KAUTH_VNODE_WRITE_ATTRIBUTES;
8568 }
8569
8570 if (eval.ae_residual == 0) {
8571 KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp->vp);
8572 return 0;
8573 }
8574
8575 /*
8576 * Bail if we have residual rights that can't be granted by posix permissions,
8577 * or aren't presumed granted at this point.
8578 *
8579 * XXX these can be collapsed for performance
8580 */
8581 if (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) {
8582 KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp->vp);
8583 return EACCES;
8584 }
8585 if (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) {
8586 KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp->vp);
8587 return EACCES;
8588 }
8589
8590 #if DIAGNOSTIC
8591 if (eval.ae_residual & KAUTH_VNODE_DELETE) {
8592 panic("vnode_authorize: can't be checking delete permission here");
8593 }
8594 #endif
8595
8596 /*
8597 * Compute the fallback posix permissions that will satisfy the remaining
8598 * rights.
8599 */
8600 posix_action = 0;
8601 if (eval.ae_residual & (KAUTH_VNODE_READ_DATA |
8602 KAUTH_VNODE_LIST_DIRECTORY |
8603 KAUTH_VNODE_READ_EXTATTRIBUTES)) {
8604 posix_action |= VREAD;
8605 }
8606 if (eval.ae_residual & (KAUTH_VNODE_WRITE_DATA |
8607 KAUTH_VNODE_ADD_FILE |
8608 KAUTH_VNODE_ADD_SUBDIRECTORY |
8609 KAUTH_VNODE_DELETE_CHILD |
8610 KAUTH_VNODE_WRITE_ATTRIBUTES |
8611 KAUTH_VNODE_WRITE_EXTATTRIBUTES)) {
8612 posix_action |= VWRITE;
8613 }
8614 if (eval.ae_residual & (KAUTH_VNODE_EXECUTE |
8615 KAUTH_VNODE_SEARCH)) {
8616 posix_action |= VEXEC;
8617 }
8618
8619 if (posix_action != 0) {
8620 return vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */);
8621 } else {
8622 KAUTH_DEBUG("%p ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping",
8623 vcp->vp,
8624 (eval.ae_residual & KAUTH_VNODE_READ_DATA)
8625 ? vnode_isdir(vcp->vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
8626 (eval.ae_residual & KAUTH_VNODE_WRITE_DATA)
8627 ? vnode_isdir(vcp->vp) ? " ADD_FILE" : " WRITE_DATA" : "",
8628 (eval.ae_residual & KAUTH_VNODE_EXECUTE)
8629 ? vnode_isdir(vcp->vp) ? " SEARCH" : " EXECUTE" : "",
8630 (eval.ae_residual & KAUTH_VNODE_DELETE)
8631 ? " DELETE" : "",
8632 (eval.ae_residual & KAUTH_VNODE_APPEND_DATA)
8633 ? vnode_isdir(vcp->vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
8634 (eval.ae_residual & KAUTH_VNODE_DELETE_CHILD)
8635 ? " DELETE_CHILD" : "",
8636 (eval.ae_residual & KAUTH_VNODE_READ_ATTRIBUTES)
8637 ? " READ_ATTRIBUTES" : "",
8638 (eval.ae_residual & KAUTH_VNODE_WRITE_ATTRIBUTES)
8639 ? " WRITE_ATTRIBUTES" : "",
8640 (eval.ae_residual & KAUTH_VNODE_READ_EXTATTRIBUTES)
8641 ? " READ_EXTATTRIBUTES" : "",
8642 (eval.ae_residual & KAUTH_VNODE_WRITE_EXTATTRIBUTES)
8643 ? " WRITE_EXTATTRIBUTES" : "",
8644 (eval.ae_residual & KAUTH_VNODE_READ_SECURITY)
8645 ? " READ_SECURITY" : "",
8646 (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY)
8647 ? " WRITE_SECURITY" : "",
8648 (eval.ae_residual & KAUTH_VNODE_CHECKIMMUTABLE)
8649 ? " CHECKIMMUTABLE" : "",
8650 (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER)
8651 ? " CHANGE_OWNER" : "");
8652 }
8653
8654 /*
8655 * Lack of required Posix permissions implies no reason to deny access.
8656 */
8657 return 0;
8658 }
8659
8660 /*
8661 * Check for file immutability.
8662 */
8663 static int
vnode_authorize_checkimmutable(mount_t mp,vauth_ctx vcp,struct vnode_attr * vap,int rights,int ignore)8664 vnode_authorize_checkimmutable(mount_t mp, vauth_ctx vcp,
8665 struct vnode_attr *vap, int rights, int ignore)
8666 {
8667 int error;
8668 int append;
8669
8670 /*
8671 * Perform immutability checks for operations that change data.
8672 *
8673 * Sockets, fifos and devices require special handling.
8674 */
8675 switch (vap->va_type) {
8676 case VSOCK:
8677 case VFIFO:
8678 case VBLK:
8679 case VCHR:
8680 /*
8681 * Writing to these nodes does not change the filesystem data,
8682 * so forget that it's being tried.
8683 */
8684 rights &= ~KAUTH_VNODE_WRITE_DATA;
8685 break;
8686 default:
8687 break;
8688 }
8689
8690 error = 0;
8691 if (rights & KAUTH_VNODE_WRITE_RIGHTS) {
8692 /* check per-filesystem options if possible */
8693 if (mp != NULL) {
8694 /* check for no-EA filesystems */
8695 if ((rights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) &&
8696 (vfs_flags(mp) & MNT_NOUSERXATTR)) {
8697 KAUTH_DEBUG("%p DENIED - filesystem disallowed extended attributes", vap);
8698 error = EACCES; /* User attributes disabled */
8699 goto out;
8700 }
8701 }
8702
8703 /*
8704 * check for file immutability. first, check if the requested rights are
8705 * allowable for a UF_APPEND file.
8706 */
8707 append = 0;
8708 if (vap->va_type == VDIR) {
8709 if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights) {
8710 append = 1;
8711 }
8712 } else {
8713 if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights) {
8714 append = 1;
8715 }
8716 }
8717 if ((error = vnode_immutable(vap, append, ignore)) != 0) {
8718 if (error && !ignore) {
8719 /*
8720 * In case of a rename, we want to check ownership for dvp as well.
8721 */
8722 int owner = 0;
8723 if (rights & KAUTH_VNODE_DELETE_CHILD && vcp->dvp != NULL) {
8724 owner = vauth_file_owner(vcp) && vauth_dir_owner(vcp);
8725 } else {
8726 owner = vauth_file_owner(vcp);
8727 }
8728 if (owner && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
8729 error = vnode_immutable(vap, append, 1);
8730 }
8731 }
8732 }
8733 if (error) {
8734 KAUTH_DEBUG("%p DENIED - file is immutable", vap);
8735 goto out;
8736 }
8737 }
8738 out:
8739 return error;
8740 }
8741
8742 /*
8743 * Handle authorization actions for filesystems that advertise that the
8744 * server will be enforcing.
8745 *
8746 * Returns: 0 Authorization should be handled locally
8747 * 1 Authorization was handled by the FS
8748 *
8749 * Note: Imputed returns will only occur if the authorization request
8750 * was handled by the FS.
8751 *
8752 * Imputed: *resultp, modified Return code from FS when the request is
8753 * handled by the FS.
8754 * VNOP_ACCESS:???
8755 * VNOP_OPEN:???
8756 */
8757 static int
vnode_authorize_opaque(vnode_t vp,int * resultp,kauth_action_t action,vfs_context_t ctx)8758 vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx)
8759 {
8760 int error;
8761
8762 /*
8763 * If the vp is a device node, socket or FIFO it actually represents a local
8764 * endpoint, so we need to handle it locally.
8765 */
8766 switch (vp->v_type) {
8767 case VBLK:
8768 case VCHR:
8769 case VSOCK:
8770 case VFIFO:
8771 return 0;
8772 default:
8773 break;
8774 }
8775
8776 /*
8777 * In the advisory request case, if the filesystem doesn't think it's reliable
8778 * we will attempt to formulate a result ourselves based on VNOP_GETATTR data.
8779 */
8780 if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount)) {
8781 return 0;
8782 }
8783
8784 /*
8785 * Let the filesystem have a say in the matter. It's OK for it to not implemnent
8786 * VNOP_ACCESS, as most will authorise inline with the actual request.
8787 */
8788 if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) {
8789 *resultp = error;
8790 KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp);
8791 return 1;
8792 }
8793
8794 /*
8795 * Typically opaque filesystems do authorisation in-line, but exec is a special case. In
8796 * order to be reasonably sure that exec will be permitted, we try a bit harder here.
8797 */
8798 if ((action & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG)) {
8799 /* try a VNOP_OPEN for readonly access */
8800 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
8801 *resultp = error;
8802 KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp);
8803 return 1;
8804 }
8805 VNOP_CLOSE(vp, FREAD, ctx);
8806 }
8807
8808 /*
8809 * We don't have any reason to believe that the request has to be denied at this point,
8810 * so go ahead and allow it.
8811 */
8812 *resultp = 0;
8813 KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp);
8814 return 1;
8815 }
8816
8817
8818
8819
8820 /*
8821 * Returns: KAUTH_RESULT_ALLOW
8822 * KAUTH_RESULT_DENY
8823 *
8824 * Imputed: *arg3, modified Error code in the deny case
8825 * EROFS Read-only file system
8826 * EACCES Permission denied
8827 * EPERM Operation not permitted [no execute]
8828 * vnode_getattr:ENOMEM Not enough space [only if has filesec]
8829 * vnode_getattr:???
8830 * vnode_authorize_opaque:*arg2 ???
8831 * vnode_authorize_checkimmutable:???
8832 * vnode_authorize_delete:???
8833 * vnode_authorize_simple:???
8834 */
8835
8836
8837 static int
vnode_authorize_callback(__unused kauth_cred_t cred,__unused void * idata,kauth_action_t action,uintptr_t arg0,uintptr_t arg1,uintptr_t arg2,uintptr_t arg3)8838 vnode_authorize_callback(__unused kauth_cred_t cred, __unused void *idata,
8839 kauth_action_t action, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2,
8840 uintptr_t arg3)
8841 {
8842 vfs_context_t ctx;
8843 vnode_t cvp = NULLVP;
8844 vnode_t vp, dvp;
8845 int result = KAUTH_RESULT_DENY;
8846 int parent_iocount = 0;
8847 int parent_action; /* In case we need to use namedstream's data fork for cached rights*/
8848
8849 ctx = (vfs_context_t)arg0;
8850 vp = (vnode_t)arg1;
8851 dvp = (vnode_t)arg2;
8852
8853 /*
8854 * if there are 2 vnodes passed in, we don't know at
8855 * this point which rights to look at based on the
8856 * combined action being passed in... defer until later...
8857 * otherwise check the kauth 'rights' cache hung
8858 * off of the vnode we're interested in... if we've already
8859 * been granted the right we're currently interested in,
8860 * we can just return success... otherwise we'll go through
8861 * the process of authorizing the requested right(s)... if that
8862 * succeeds, we'll add the right(s) to the cache.
8863 * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache
8864 */
8865 if (dvp && vp) {
8866 goto defer;
8867 }
8868 if (dvp) {
8869 cvp = dvp;
8870 } else {
8871 /*
8872 * For named streams on local-authorization volumes, rights are cached on the parent;
8873 * authorization is determined by looking at the parent's properties anyway, so storing
8874 * on the parent means that we don't recompute for the named stream and that if
8875 * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
8876 * stream to flush its cache separately. If we miss in the cache, then we authorize
8877 * as if there were no cached rights (passing the named stream vnode and desired rights to
8878 * vnode_authorize_callback_int()).
8879 *
8880 * On an opaquely authorized volume, we don't know the relationship between the
8881 * data fork's properties and the rights granted on a stream. Thus, named stream vnodes
8882 * on such a volume are authorized directly (rather than using the parent) and have their
8883 * own caches. When a named stream vnode is created, we mark the parent as having a named
8884 * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
8885 * find the stream and flush its cache.
8886 */
8887 if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) {
8888 cvp = vnode_getparent(vp);
8889 if (cvp != NULLVP) {
8890 parent_iocount = 1;
8891 } else {
8892 cvp = NULL;
8893 goto defer; /* If we can't use the parent, take the slow path */
8894 }
8895
8896 /* Have to translate some actions */
8897 parent_action = action;
8898 if (parent_action & KAUTH_VNODE_READ_DATA) {
8899 parent_action &= ~KAUTH_VNODE_READ_DATA;
8900 parent_action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
8901 }
8902 if (parent_action & KAUTH_VNODE_WRITE_DATA) {
8903 parent_action &= ~KAUTH_VNODE_WRITE_DATA;
8904 parent_action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
8905 }
8906 } else {
8907 cvp = vp;
8908 }
8909 }
8910
8911 if (vnode_cache_is_authorized(cvp, ctx, parent_iocount ? parent_action : action) == TRUE) {
8912 result = KAUTH_RESULT_ALLOW;
8913 goto out;
8914 }
8915 defer:
8916 result = vnode_authorize_callback_int(action, ctx, vp, dvp, (int *)arg3);
8917
8918 if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP) {
8919 KAUTH_DEBUG("%p - caching action = %x", cvp, action);
8920 vnode_cache_authorized_action(cvp, ctx, action);
8921 }
8922
8923 out:
8924 if (parent_iocount) {
8925 vnode_put(cvp);
8926 }
8927
8928 return result;
8929 }
8930
8931 static int
vnode_attr_authorize_internal(vauth_ctx vcp,mount_t mp,kauth_ace_rights_t rights,int is_suser,boolean_t * found_deny,int noimmutable,int parent_authorized_for_delete_child)8932 vnode_attr_authorize_internal(vauth_ctx vcp, mount_t mp,
8933 kauth_ace_rights_t rights, int is_suser, boolean_t *found_deny,
8934 int noimmutable, int parent_authorized_for_delete_child)
8935 {
8936 int result;
8937
8938 /*
8939 * Check for immutability.
8940 *
8941 * In the deletion case, parent directory immutability vetoes specific
8942 * file rights.
8943 */
8944 if ((result = vnode_authorize_checkimmutable(mp, vcp, vcp->vap, rights,
8945 noimmutable)) != 0) {
8946 goto out;
8947 }
8948
8949 if ((rights & KAUTH_VNODE_DELETE) &&
8950 !parent_authorized_for_delete_child) {
8951 result = vnode_authorize_checkimmutable(mp, vcp, vcp->dvap,
8952 KAUTH_VNODE_DELETE_CHILD, 0);
8953 if (result) {
8954 goto out;
8955 }
8956 }
8957
8958 /*
8959 * Clear rights that have been authorized by reaching this point, bail if nothing left to
8960 * check.
8961 */
8962 rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE);
8963 if (rights == 0) {
8964 goto out;
8965 }
8966
8967 /*
8968 * If we're not the superuser, authorize based on file properties;
8969 * note that even if parent_authorized_for_delete_child is TRUE, we
8970 * need to check on the node itself.
8971 */
8972 if (!is_suser) {
8973 /* process delete rights */
8974 if ((rights & KAUTH_VNODE_DELETE) &&
8975 ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0)) {
8976 goto out;
8977 }
8978
8979 /* process remaining rights */
8980 if ((rights & ~KAUTH_VNODE_DELETE) &&
8981 (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, found_deny)) != 0) {
8982 goto out;
8983 }
8984 } else {
8985 /*
8986 * Execute is only granted to root if one of the x bits is set. This check only
8987 * makes sense if the posix mode bits are actually supported.
8988 */
8989 if ((rights & KAUTH_VNODE_EXECUTE) &&
8990 (vcp->vap->va_type == VREG) &&
8991 VATTR_IS_SUPPORTED(vcp->vap, va_mode) &&
8992 !(vcp->vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
8993 result = EPERM;
8994 KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vcp, vcp->vap->va_mode);
8995 goto out;
8996 }
8997
8998 /* Assume that there were DENYs so we don't wrongly cache KAUTH_VNODE_SEARCHBYANYONE */
8999 *found_deny = TRUE;
9000
9001 KAUTH_DEBUG("%p ALLOWED - caller is superuser", vcp);
9002 }
9003 out:
9004 return result;
9005 }
9006
9007 static int
vnode_authorize_callback_int(kauth_action_t action,vfs_context_t ctx,vnode_t vp,vnode_t dvp,int * errorp)9008 vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx,
9009 vnode_t vp, vnode_t dvp, int *errorp)
9010 {
9011 struct _vnode_authorize_context auth_context;
9012 vauth_ctx vcp;
9013 kauth_cred_t cred;
9014 kauth_ace_rights_t rights;
9015 struct vnode_attr va, dva;
9016 int result;
9017 int noimmutable;
9018 boolean_t parent_authorized_for_delete_child = FALSE;
9019 boolean_t found_deny = FALSE;
9020 boolean_t parent_ref = FALSE;
9021 boolean_t is_suser = FALSE;
9022
9023 vcp = &auth_context;
9024 vcp->ctx = ctx;
9025 vcp->vp = vp;
9026 vcp->dvp = dvp;
9027 /*
9028 * Note that we authorize against the context, not the passed cred
9029 * (the same thing anyway)
9030 */
9031 cred = ctx->vc_ucred;
9032
9033 VATTR_INIT(&va);
9034 vcp->vap = &va;
9035 VATTR_INIT(&dva);
9036 vcp->dvap = &dva;
9037
9038 vcp->flags = vcp->flags_valid = 0;
9039
9040 #if DIAGNOSTIC
9041 if ((ctx == NULL) || (vp == NULL) || (cred == NULL)) {
9042 panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx, vp, cred);
9043 }
9044 #endif
9045
9046 KAUTH_DEBUG("%p AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)",
9047 vp, vfs_context_proc(ctx)->p_comm,
9048 (action & KAUTH_VNODE_ACCESS) ? "access" : "auth",
9049 (action & KAUTH_VNODE_READ_DATA) ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
9050 (action & KAUTH_VNODE_WRITE_DATA) ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "",
9051 (action & KAUTH_VNODE_EXECUTE) ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "",
9052 (action & KAUTH_VNODE_DELETE) ? " DELETE" : "",
9053 (action & KAUTH_VNODE_APPEND_DATA) ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
9054 (action & KAUTH_VNODE_DELETE_CHILD) ? " DELETE_CHILD" : "",
9055 (action & KAUTH_VNODE_READ_ATTRIBUTES) ? " READ_ATTRIBUTES" : "",
9056 (action & KAUTH_VNODE_WRITE_ATTRIBUTES) ? " WRITE_ATTRIBUTES" : "",
9057 (action & KAUTH_VNODE_READ_EXTATTRIBUTES) ? " READ_EXTATTRIBUTES" : "",
9058 (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES) ? " WRITE_EXTATTRIBUTES" : "",
9059 (action & KAUTH_VNODE_READ_SECURITY) ? " READ_SECURITY" : "",
9060 (action & KAUTH_VNODE_WRITE_SECURITY) ? " WRITE_SECURITY" : "",
9061 (action & KAUTH_VNODE_CHANGE_OWNER) ? " CHANGE_OWNER" : "",
9062 (action & KAUTH_VNODE_NOIMMUTABLE) ? " (noimmutable)" : "",
9063 vnode_isdir(vp) ? "directory" : "file",
9064 vp->v_name ? vp->v_name : "<NULL>", action, vp, dvp);
9065
9066 /*
9067 * Extract the control bits from the action, everything else is
9068 * requested rights.
9069 */
9070 noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
9071 rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
9072
9073 if (rights & KAUTH_VNODE_DELETE) {
9074 #if DIAGNOSTIC
9075 if (dvp == NULL) {
9076 panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory");
9077 }
9078 #endif
9079 /*
9080 * check to see if we've already authorized the parent
9081 * directory for deletion of its children... if so, we
9082 * can skip a whole bunch of work... we will still have to
9083 * authorize that this specific child can be removed
9084 */
9085 if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE) {
9086 parent_authorized_for_delete_child = TRUE;
9087 }
9088 } else {
9089 vcp->dvp = NULLVP;
9090 vcp->dvap = NULL;
9091 }
9092
9093 /*
9094 * Check for read-only filesystems.
9095 */
9096 if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
9097 (vp->v_mount->mnt_flag & MNT_RDONLY) &&
9098 ((vp->v_type == VREG) || (vp->v_type == VDIR) ||
9099 (vp->v_type == VLNK) || (vp->v_type == VCPLX) ||
9100 (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) {
9101 result = EROFS;
9102 goto out;
9103 }
9104
9105 /*
9106 * Check for noexec filesystems.
9107 */
9108 if ((rights & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG) && (vp->v_mount->mnt_flag & MNT_NOEXEC)) {
9109 result = EACCES;
9110 goto out;
9111 }
9112
9113 /*
9114 * Handle cases related to filesystems with non-local enforcement.
9115 * This call can return 0, in which case we will fall through to perform a
9116 * check based on VNOP_GETATTR data. Otherwise it returns 1 and sets
9117 * an appropriate result, at which point we can return immediately.
9118 */
9119 if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx)) {
9120 goto out;
9121 }
9122
9123 /*
9124 * If the vnode is a namedstream (extended attribute) data vnode (eg.
9125 * a resource fork), *_DATA becomes *_EXTATTRIBUTES.
9126 */
9127 if (vnode_isnamedstream(vp)) {
9128 if (rights & KAUTH_VNODE_READ_DATA) {
9129 rights &= ~KAUTH_VNODE_READ_DATA;
9130 rights |= KAUTH_VNODE_READ_EXTATTRIBUTES;
9131 }
9132 if (rights & KAUTH_VNODE_WRITE_DATA) {
9133 rights &= ~KAUTH_VNODE_WRITE_DATA;
9134 rights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
9135 }
9136
9137 /*
9138 * Point 'vp' to the namedstream's parent for ACL checking
9139 */
9140 if ((vp->v_parent != NULL) &&
9141 (vget_internal(vp->v_parent, 0, VNODE_NODEAD | VNODE_DRAINO) == 0)) {
9142 parent_ref = TRUE;
9143 vcp->vp = vp = vp->v_parent;
9144 }
9145 }
9146
9147 if (vfs_context_issuser(ctx)) {
9148 /*
9149 * if we're not asking for execute permissions or modifications,
9150 * then we're done, this action is authorized.
9151 */
9152 if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) {
9153 goto success;
9154 }
9155
9156 is_suser = TRUE;
9157 }
9158
9159 /*
9160 * Get vnode attributes and extended security information for the vnode
9161 * and directory if required.
9162 *
9163 * If we're root we only want mode bits and flags for checking
9164 * execute and immutability.
9165 */
9166 VATTR_WANTED(&va, va_mode);
9167 VATTR_WANTED(&va, va_flags);
9168 if (!is_suser) {
9169 VATTR_WANTED(&va, va_uid);
9170 VATTR_WANTED(&va, va_gid);
9171 VATTR_WANTED(&va, va_acl);
9172 }
9173 if ((result = vnode_getattr(vp, &va, ctx)) != 0) {
9174 KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result);
9175 goto out;
9176 }
9177 VATTR_WANTED(&va, va_type);
9178 VATTR_RETURN(&va, va_type, vnode_vtype(vp));
9179
9180 if (vcp->dvp) {
9181 VATTR_WANTED(&dva, va_mode);
9182 VATTR_WANTED(&dva, va_flags);
9183 if (!is_suser) {
9184 VATTR_WANTED(&dva, va_uid);
9185 VATTR_WANTED(&dva, va_gid);
9186 VATTR_WANTED(&dva, va_acl);
9187 }
9188 if ((result = vnode_getattr(vcp->dvp, &dva, ctx)) != 0) {
9189 KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp, result);
9190 goto out;
9191 }
9192 VATTR_WANTED(&dva, va_type);
9193 VATTR_RETURN(&dva, va_type, vnode_vtype(vcp->dvp));
9194 }
9195
9196 result = vnode_attr_authorize_internal(vcp, vp->v_mount, rights, is_suser,
9197 &found_deny, noimmutable, parent_authorized_for_delete_child);
9198 out:
9199 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) {
9200 kauth_acl_free(va.va_acl);
9201 }
9202 if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL)) {
9203 kauth_acl_free(dva.va_acl);
9204 }
9205
9206 if (result) {
9207 if (parent_ref) {
9208 vnode_put(vp);
9209 }
9210 *errorp = result;
9211 KAUTH_DEBUG("%p DENIED - auth denied", vp);
9212 return KAUTH_RESULT_DENY;
9213 }
9214 if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) {
9215 /*
9216 * if we were successfully granted the right to search this directory
9217 * and there were NO ACL DENYs for search and the posix permissions also don't
9218 * deny execute, we can synthesize a global right that allows anyone to
9219 * traverse this directory during a pathname lookup without having to
9220 * match the credential associated with this cache of rights.
9221 *
9222 * Note that we can correctly cache KAUTH_VNODE_SEARCHBYANYONE
9223 * only if we actually check ACLs which we don't for root. As
9224 * a workaround, the lookup fast path checks for root.
9225 */
9226 if (!VATTR_IS_SUPPORTED(&va, va_mode) ||
9227 ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) ==
9228 (S_IXUSR | S_IXGRP | S_IXOTH))) {
9229 vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE);
9230 }
9231 }
9232 success:
9233 if (parent_ref) {
9234 vnode_put(vp);
9235 }
9236
9237 /*
9238 * Note that this implies that we will allow requests for no rights, as well as
9239 * for rights that we do not recognise. There should be none of these.
9240 */
9241 KAUTH_DEBUG("%p ALLOWED - auth granted", vp);
9242 return KAUTH_RESULT_ALLOW;
9243 }
9244
9245 int
vnode_attr_authorize_init(struct vnode_attr * vap,struct vnode_attr * dvap,kauth_action_t action,vfs_context_t ctx)9246 vnode_attr_authorize_init(struct vnode_attr *vap, struct vnode_attr *dvap,
9247 kauth_action_t action, vfs_context_t ctx)
9248 {
9249 VATTR_INIT(vap);
9250 VATTR_WANTED(vap, va_type);
9251 VATTR_WANTED(vap, va_mode);
9252 VATTR_WANTED(vap, va_flags);
9253 if (dvap) {
9254 VATTR_INIT(dvap);
9255 if (action & KAUTH_VNODE_DELETE) {
9256 VATTR_WANTED(dvap, va_type);
9257 VATTR_WANTED(dvap, va_mode);
9258 VATTR_WANTED(dvap, va_flags);
9259 }
9260 } else if (action & KAUTH_VNODE_DELETE) {
9261 return EINVAL;
9262 }
9263
9264 if (!vfs_context_issuser(ctx)) {
9265 VATTR_WANTED(vap, va_uid);
9266 VATTR_WANTED(vap, va_gid);
9267 VATTR_WANTED(vap, va_acl);
9268 if (dvap && (action & KAUTH_VNODE_DELETE)) {
9269 VATTR_WANTED(dvap, va_uid);
9270 VATTR_WANTED(dvap, va_gid);
9271 VATTR_WANTED(dvap, va_acl);
9272 }
9273 }
9274
9275 return 0;
9276 }
9277
9278 int
vnode_attr_authorize(struct vnode_attr * vap,struct vnode_attr * dvap,mount_t mp,kauth_action_t action,vfs_context_t ctx)9279 vnode_attr_authorize(struct vnode_attr *vap, struct vnode_attr *dvap, mount_t mp,
9280 kauth_action_t action, vfs_context_t ctx)
9281 {
9282 struct _vnode_authorize_context auth_context;
9283 vauth_ctx vcp;
9284 kauth_ace_rights_t rights;
9285 int noimmutable;
9286 boolean_t found_deny;
9287 boolean_t is_suser = FALSE;
9288 int result = 0;
9289
9290 vcp = &auth_context;
9291 vcp->ctx = ctx;
9292 vcp->vp = NULLVP;
9293 vcp->vap = vap;
9294 vcp->dvp = NULLVP;
9295 vcp->dvap = dvap;
9296 vcp->flags = vcp->flags_valid = 0;
9297
9298 noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
9299 rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
9300
9301 /*
9302 * Check for read-only filesystems.
9303 */
9304 if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
9305 mp && (mp->mnt_flag & MNT_RDONLY) &&
9306 ((vap->va_type == VREG) || (vap->va_type == VDIR) ||
9307 (vap->va_type == VLNK) || (rights & KAUTH_VNODE_DELETE) ||
9308 (rights & KAUTH_VNODE_DELETE_CHILD))) {
9309 result = EROFS;
9310 goto out;
9311 }
9312
9313 /*
9314 * Check for noexec filesystems.
9315 */
9316 if ((rights & KAUTH_VNODE_EXECUTE) &&
9317 (vap->va_type == VREG) && mp && (mp->mnt_flag & MNT_NOEXEC)) {
9318 result = EACCES;
9319 goto out;
9320 }
9321
9322 if (vfs_context_issuser(ctx)) {
9323 /*
9324 * if we're not asking for execute permissions or modifications,
9325 * then we're done, this action is authorized.
9326 */
9327 if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) {
9328 goto out;
9329 }
9330 is_suser = TRUE;
9331 } else {
9332 if (!VATTR_IS_SUPPORTED(vap, va_uid) ||
9333 !VATTR_IS_SUPPORTED(vap, va_gid) ||
9334 (mp && vfs_extendedsecurity(mp) && !VATTR_IS_SUPPORTED(vap, va_acl))) {
9335 panic("vnode attrs not complete for vnode_attr_authorize");
9336 }
9337 }
9338
9339 if (mp) {
9340 vnode_attr_handle_mnt_ignore_ownership(vap, mp, ctx);
9341 }
9342
9343 result = vnode_attr_authorize_internal(vcp, mp, rights, is_suser,
9344 &found_deny, noimmutable, FALSE);
9345
9346 if (result == EPERM) {
9347 result = EACCES;
9348 }
9349 out:
9350 return result;
9351 }
9352
9353
9354 int
vnode_authattr_new(vnode_t dvp,struct vnode_attr * vap,int noauth,vfs_context_t ctx)9355 vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx)
9356 {
9357 return vnode_authattr_new_internal(dvp, vap, noauth, NULL, ctx);
9358 }
9359
9360 /*
9361 * Check that the attribute information in vattr can be legally applied to
9362 * a new file by the context.
9363 */
9364 static int
vnode_authattr_new_internal(vnode_t dvp,struct vnode_attr * vap,int noauth,uint32_t * defaulted_fieldsp,vfs_context_t ctx)9365 vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx)
9366 {
9367 int error;
9368 int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode;
9369 uint32_t inherit_flags;
9370 kauth_cred_t cred;
9371 guid_t changer;
9372 mount_t dmp;
9373 struct vnode_attr dva;
9374
9375 error = 0;
9376
9377 if (defaulted_fieldsp) {
9378 *defaulted_fieldsp = 0;
9379 }
9380
9381 defaulted_owner = defaulted_group = defaulted_mode = 0;
9382
9383 inherit_flags = 0;
9384
9385 /*
9386 * Require that the filesystem support extended security to apply any.
9387 */
9388 if (!vfs_extendedsecurity(dvp->v_mount) &&
9389 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
9390 error = EINVAL;
9391 goto out;
9392 }
9393
9394 /*
9395 * Default some fields.
9396 */
9397 dmp = dvp->v_mount;
9398
9399 /*
9400 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that
9401 * owner takes ownership of all new files.
9402 */
9403 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsowner != KAUTH_UID_NONE)) {
9404 VATTR_SET(vap, va_uid, dmp->mnt_fsowner);
9405 defaulted_owner = 1;
9406 } else {
9407 if (!VATTR_IS_ACTIVE(vap, va_uid)) {
9408 /* default owner is current user */
9409 VATTR_SET(vap, va_uid, kauth_cred_getuid(vfs_context_ucred(ctx)));
9410 defaulted_owner = 1;
9411 }
9412 }
9413
9414 /*
9415 * We need the dvp's va_flags and *may* need the gid of the directory,
9416 * we ask for both here.
9417 */
9418 VATTR_INIT(&dva);
9419 VATTR_WANTED(&dva, va_gid);
9420 VATTR_WANTED(&dva, va_flags);
9421 if ((error = vnode_getattr(dvp, &dva, ctx)) != 0) {
9422 goto out;
9423 }
9424
9425 /*
9426 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that
9427 * group takes ownership of all new files.
9428 */
9429 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsgroup != KAUTH_GID_NONE)) {
9430 VATTR_SET(vap, va_gid, dmp->mnt_fsgroup);
9431 defaulted_group = 1;
9432 } else {
9433 if (!VATTR_IS_ACTIVE(vap, va_gid)) {
9434 /* default group comes from parent object, fallback to current user */
9435 if (VATTR_IS_SUPPORTED(&dva, va_gid)) {
9436 VATTR_SET(vap, va_gid, dva.va_gid);
9437 } else {
9438 VATTR_SET(vap, va_gid, kauth_cred_getgid(vfs_context_ucred(ctx)));
9439 }
9440 defaulted_group = 1;
9441 }
9442 }
9443
9444 if (!VATTR_IS_ACTIVE(vap, va_flags)) {
9445 VATTR_SET(vap, va_flags, 0);
9446 }
9447
9448 /* Determine if SF_RESTRICTED should be inherited from the parent
9449 * directory. */
9450 if (VATTR_IS_SUPPORTED(&dva, va_flags)) {
9451 inherit_flags = dva.va_flags & (UF_DATAVAULT | SF_RESTRICTED);
9452 }
9453
9454 /* default mode is everything, masked with current umask */
9455 if (!VATTR_IS_ACTIVE(vap, va_mode)) {
9456 VATTR_SET(vap, va_mode, ACCESSPERMS & ~vfs_context_proc(ctx)->p_fd.fd_cmask);
9457 KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o",
9458 vap->va_mode, vfs_context_proc(ctx)->p_fd.fd_cmask);
9459 defaulted_mode = 1;
9460 }
9461 /* set timestamps to now */
9462 if (!VATTR_IS_ACTIVE(vap, va_create_time)) {
9463 nanotime(&vap->va_create_time);
9464 VATTR_SET_ACTIVE(vap, va_create_time);
9465 }
9466
9467 /*
9468 * Check for attempts to set nonsensical fields.
9469 */
9470 if (vap->va_active & ~VNODE_ATTR_NEWOBJ) {
9471 error = EINVAL;
9472 KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx",
9473 vap->va_active & ~VNODE_ATTR_NEWOBJ);
9474 goto out;
9475 }
9476
9477 /*
9478 * Quickly check for the applicability of any enforcement here.
9479 * Tests below maintain the integrity of the local security model.
9480 */
9481 if (vfs_authopaque(dvp->v_mount)) {
9482 goto out;
9483 }
9484
9485 /*
9486 * We need to know if the caller is the superuser, or if the work is
9487 * otherwise already authorised.
9488 */
9489 cred = vfs_context_ucred(ctx);
9490 if (noauth) {
9491 /* doing work for the kernel */
9492 has_priv_suser = 1;
9493 } else {
9494 has_priv_suser = vfs_context_issuser(ctx);
9495 }
9496
9497
9498 if (VATTR_IS_ACTIVE(vap, va_flags)) {
9499 vap->va_flags &= ~SF_SYNTHETIC;
9500 if (has_priv_suser) {
9501 if ((vap->va_flags & (UF_SETTABLE | SF_SETTABLE)) != vap->va_flags) {
9502 error = EPERM;
9503 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
9504 goto out;
9505 }
9506 } else {
9507 if ((vap->va_flags & UF_SETTABLE) != vap->va_flags) {
9508 error = EPERM;
9509 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
9510 goto out;
9511 }
9512 }
9513 }
9514
9515 /* if not superuser, validate legality of new-item attributes */
9516 if (!has_priv_suser) {
9517 if (!defaulted_mode && VATTR_IS_ACTIVE(vap, va_mode)) {
9518 /* setgid? */
9519 if (vap->va_mode & S_ISGID) {
9520 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
9521 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
9522 goto out;
9523 }
9524 if (!ismember) {
9525 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", vap->va_gid);
9526 error = EPERM;
9527 goto out;
9528 }
9529 }
9530
9531 /* setuid? */
9532 if ((vap->va_mode & S_ISUID) && (vap->va_uid != kauth_cred_getuid(cred))) {
9533 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
9534 error = EPERM;
9535 goto out;
9536 }
9537 }
9538 if (!defaulted_owner && (vap->va_uid != kauth_cred_getuid(cred))) {
9539 KAUTH_DEBUG(" DENIED - cannot create new item owned by %d", vap->va_uid);
9540 error = EPERM;
9541 goto out;
9542 }
9543 if (!defaulted_group) {
9544 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
9545 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
9546 goto out;
9547 }
9548 if (!ismember) {
9549 KAUTH_DEBUG(" DENIED - cannot create new item with group %d - not a member", vap->va_gid);
9550 error = EPERM;
9551 goto out;
9552 }
9553 }
9554
9555 /* initialising owner/group UUID */
9556 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
9557 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
9558 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
9559 /* XXX ENOENT here - no GUID - should perhaps become EPERM */
9560 goto out;
9561 }
9562 if (!kauth_guid_equal(&vap->va_uuuid, &changer)) {
9563 KAUTH_DEBUG(" ERROR - cannot create item with supplied owner UUID - not us");
9564 error = EPERM;
9565 goto out;
9566 }
9567 }
9568 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
9569 if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
9570 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
9571 goto out;
9572 }
9573 if (!ismember) {
9574 KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member");
9575 error = EPERM;
9576 goto out;
9577 }
9578 }
9579 }
9580 out:
9581 if (inherit_flags) {
9582 /* Apply SF_RESTRICTED to the file if its parent directory was
9583 * restricted. This is done at the end so that root is not
9584 * required if this flag is only set due to inheritance. */
9585 VATTR_SET(vap, va_flags, (vap->va_flags | inherit_flags));
9586 }
9587 if (defaulted_fieldsp) {
9588 if (defaulted_mode) {
9589 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_MODE;
9590 }
9591 if (defaulted_group) {
9592 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_GID;
9593 }
9594 if (defaulted_owner) {
9595 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_UID;
9596 }
9597 }
9598 return error;
9599 }
9600
9601 /*
9602 * Check that the attribute information in vap can be legally written by the
9603 * context.
9604 *
9605 * Call this when you're not sure about the vnode_attr; either its contents
9606 * have come from an unknown source, or when they are variable.
9607 *
9608 * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that
9609 * must be authorized to be permitted to write the vattr.
9610 */
9611 int
vnode_authattr(vnode_t vp,struct vnode_attr * vap,kauth_action_t * actionp,vfs_context_t ctx)9612 vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx)
9613 {
9614 struct vnode_attr ova;
9615 kauth_action_t required_action;
9616 int error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid;
9617 guid_t changer;
9618 gid_t group;
9619 uid_t owner;
9620 mode_t newmode;
9621 kauth_cred_t cred;
9622 uint32_t fdelta;
9623
9624 VATTR_INIT(&ova);
9625 required_action = 0;
9626 error = 0;
9627
9628 /*
9629 * Quickly check for enforcement applicability.
9630 */
9631 if (vfs_authopaque(vp->v_mount)) {
9632 goto out;
9633 }
9634
9635 /*
9636 * Check for attempts to set nonsensical fields.
9637 */
9638 if (vap->va_active & VNODE_ATTR_RDONLY) {
9639 KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)");
9640 error = EINVAL;
9641 goto out;
9642 }
9643
9644 /*
9645 * We need to know if the caller is the superuser.
9646 */
9647 cred = vfs_context_ucred(ctx);
9648 has_priv_suser = kauth_cred_issuser(cred);
9649
9650 /*
9651 * If any of the following are changing, we need information from the old file:
9652 * va_uid
9653 * va_gid
9654 * va_mode
9655 * va_uuuid
9656 * va_guuid
9657 */
9658 if (VATTR_IS_ACTIVE(vap, va_uid) ||
9659 VATTR_IS_ACTIVE(vap, va_gid) ||
9660 VATTR_IS_ACTIVE(vap, va_mode) ||
9661 VATTR_IS_ACTIVE(vap, va_uuuid) ||
9662 VATTR_IS_ACTIVE(vap, va_guuid)) {
9663 VATTR_WANTED(&ova, va_mode);
9664 VATTR_WANTED(&ova, va_uid);
9665 VATTR_WANTED(&ova, va_gid);
9666 VATTR_WANTED(&ova, va_uuuid);
9667 VATTR_WANTED(&ova, va_guuid);
9668 KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes");
9669 }
9670
9671 /*
9672 * If timestamps are being changed, we need to know who the file is owned
9673 * by.
9674 */
9675 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
9676 VATTR_IS_ACTIVE(vap, va_change_time) ||
9677 VATTR_IS_ACTIVE(vap, va_modify_time) ||
9678 VATTR_IS_ACTIVE(vap, va_access_time) ||
9679 VATTR_IS_ACTIVE(vap, va_backup_time) ||
9680 VATTR_IS_ACTIVE(vap, va_addedtime)) {
9681 VATTR_WANTED(&ova, va_uid);
9682 #if 0 /* enable this when we support UUIDs as official owners */
9683 VATTR_WANTED(&ova, va_uuuid);
9684 #endif
9685 KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID");
9686 }
9687
9688 /*
9689 * If flags are being changed, we need the old flags.
9690 */
9691 if (VATTR_IS_ACTIVE(vap, va_flags)) {
9692 KAUTH_DEBUG("ATTR - flags changing, fetching old flags");
9693 VATTR_WANTED(&ova, va_flags);
9694 }
9695
9696 /*
9697 * If ACLs are being changed, we need the old ACLs.
9698 */
9699 if (VATTR_IS_ACTIVE(vap, va_acl)) {
9700 KAUTH_DEBUG("ATTR - acl changing, fetching old flags");
9701 VATTR_WANTED(&ova, va_acl);
9702 }
9703
9704 /*
9705 * If the size is being set, make sure it's not a directory.
9706 */
9707 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
9708 /* size is only meaningful on regular files, don't permit otherwise */
9709 if (!vnode_isreg(vp)) {
9710 KAUTH_DEBUG("ATTR - ERROR: size change requested on non-file");
9711 error = vnode_isdir(vp) ? EISDIR : EINVAL;
9712 goto out;
9713 }
9714 }
9715
9716 /*
9717 * Get old data.
9718 */
9719 KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova.va_active);
9720 if ((error = vnode_getattr(vp, &ova, ctx)) != 0) {
9721 KAUTH_DEBUG(" ERROR - got %d trying to get attributes", error);
9722 goto out;
9723 }
9724
9725 /*
9726 * Size changes require write access to the file data.
9727 */
9728 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
9729 /* if we can't get the size, or it's different, we need write access */
9730 KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA");
9731 required_action |= KAUTH_VNODE_WRITE_DATA;
9732 }
9733
9734 /*
9735 * Changing timestamps?
9736 *
9737 * Note that we are only called to authorize user-requested time changes;
9738 * side-effect time changes are not authorized. Authorisation is only
9739 * required for existing files.
9740 *
9741 * Non-owners are not permitted to change the time on an existing
9742 * file to anything other than the current time.
9743 */
9744 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
9745 VATTR_IS_ACTIVE(vap, va_change_time) ||
9746 VATTR_IS_ACTIVE(vap, va_modify_time) ||
9747 VATTR_IS_ACTIVE(vap, va_access_time) ||
9748 VATTR_IS_ACTIVE(vap, va_backup_time) ||
9749 VATTR_IS_ACTIVE(vap, va_addedtime)) {
9750 /*
9751 * The owner and root may set any timestamps they like,
9752 * provided that the file is not immutable. The owner still needs
9753 * WRITE_ATTRIBUTES (implied by ownership but still deniable).
9754 */
9755 if (has_priv_suser || vauth_node_owner(&ova, cred)) {
9756 KAUTH_DEBUG("ATTR - root or owner changing timestamps");
9757 required_action |= KAUTH_VNODE_CHECKIMMUTABLE | KAUTH_VNODE_WRITE_ATTRIBUTES;
9758 } else {
9759 /* just setting the current time? */
9760 if (vap->va_vaflags & VA_UTIMES_NULL) {
9761 KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES");
9762 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
9763 } else {
9764 KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted");
9765 error = EACCES;
9766 goto out;
9767 }
9768 }
9769 }
9770
9771 /*
9772 * Changing file mode?
9773 */
9774 if (VATTR_IS_ACTIVE(vap, va_mode) && VATTR_IS_SUPPORTED(&ova, va_mode) && (ova.va_mode != vap->va_mode)) {
9775 KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova.va_mode, vap->va_mode);
9776
9777 /*
9778 * Mode changes always have the same basic auth requirements.
9779 */
9780 if (has_priv_suser) {
9781 KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check");
9782 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
9783 } else {
9784 /* need WRITE_SECURITY */
9785 KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY");
9786 required_action |= KAUTH_VNODE_WRITE_SECURITY;
9787 }
9788
9789 /*
9790 * Can't set the setgid bit if you're not in the group and not root. Have to have
9791 * existing group information in the case we're not setting it right now.
9792 */
9793 if (vap->va_mode & S_ISGID) {
9794 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
9795 if (!has_priv_suser) {
9796 if (VATTR_IS_ACTIVE(vap, va_gid)) {
9797 group = vap->va_gid;
9798 } else if (VATTR_IS_SUPPORTED(&ova, va_gid)) {
9799 group = ova.va_gid;
9800 } else {
9801 KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available");
9802 error = EINVAL;
9803 goto out;
9804 }
9805 /*
9806 * This might be too restrictive; WRITE_SECURITY might be implied by
9807 * membership in this case, rather than being an additional requirement.
9808 */
9809 if ((error = kauth_cred_ismember_gid(cred, group, &ismember)) != 0) {
9810 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
9811 goto out;
9812 }
9813 if (!ismember) {
9814 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", group);
9815 error = EPERM;
9816 goto out;
9817 }
9818 }
9819 }
9820
9821 /*
9822 * Can't set the setuid bit unless you're root or the file's owner.
9823 */
9824 if (vap->va_mode & S_ISUID) {
9825 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
9826 if (!has_priv_suser) {
9827 if (VATTR_IS_ACTIVE(vap, va_uid)) {
9828 owner = vap->va_uid;
9829 } else if (VATTR_IS_SUPPORTED(&ova, va_uid)) {
9830 owner = ova.va_uid;
9831 } else {
9832 KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available");
9833 error = EINVAL;
9834 goto out;
9835 }
9836 if (owner != kauth_cred_getuid(cred)) {
9837 /*
9838 * We could allow this if WRITE_SECURITY is permitted, perhaps.
9839 */
9840 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
9841 error = EPERM;
9842 goto out;
9843 }
9844 }
9845 }
9846 }
9847
9848 /*
9849 * Validate/mask flags changes. This checks that only the flags in
9850 * the UF_SETTABLE mask are being set, and preserves the flags in
9851 * the SF_SETTABLE case.
9852 *
9853 * Since flags changes may be made in conjunction with other changes,
9854 * we will ask the auth code to ignore immutability in the case that
9855 * the SF_* flags are not set and we are only manipulating the file flags.
9856 *
9857 */
9858 if (VATTR_IS_ACTIVE(vap, va_flags)) {
9859 /* compute changing flags bits */
9860 vap->va_flags &= ~SF_SYNTHETIC;
9861 ova.va_flags &= ~SF_SYNTHETIC;
9862 if (VATTR_IS_SUPPORTED(&ova, va_flags)) {
9863 fdelta = vap->va_flags ^ ova.va_flags;
9864 } else {
9865 fdelta = vap->va_flags;
9866 }
9867
9868 if (fdelta != 0) {
9869 KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY");
9870 required_action |= KAUTH_VNODE_WRITE_SECURITY;
9871
9872 /* check that changing bits are legal */
9873 if (has_priv_suser) {
9874 /*
9875 * The immutability check will prevent us from clearing the SF_*
9876 * flags unless the system securelevel permits it, so just check
9877 * for legal flags here.
9878 */
9879 if (fdelta & ~(UF_SETTABLE | SF_SETTABLE)) {
9880 error = EPERM;
9881 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
9882 goto out;
9883 }
9884 } else {
9885 if (fdelta & ~UF_SETTABLE) {
9886 error = EPERM;
9887 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
9888 goto out;
9889 }
9890 }
9891 /*
9892 * If the caller has the ability to manipulate file flags,
9893 * security is not reduced by ignoring them for this operation.
9894 *
9895 * A more complete test here would consider the 'after' states of the flags
9896 * to determine whether it would permit the operation, but this becomes
9897 * very complex.
9898 *
9899 * Ignoring immutability is conditional on securelevel; this does not bypass
9900 * the SF_* flags if securelevel > 0.
9901 */
9902 required_action |= KAUTH_VNODE_NOIMMUTABLE;
9903 }
9904 }
9905
9906 /*
9907 * Validate ownership information.
9908 */
9909 chowner = 0;
9910 chgroup = 0;
9911 clear_suid = 0;
9912 clear_sgid = 0;
9913
9914 /*
9915 * uid changing
9916 * Note that if the filesystem didn't give us a UID, we expect that it doesn't
9917 * support them in general, and will ignore it if/when we try to set it.
9918 * We might want to clear the uid out of vap completely here.
9919 */
9920 if (VATTR_IS_ACTIVE(vap, va_uid)) {
9921 if (VATTR_IS_SUPPORTED(&ova, va_uid) && (vap->va_uid != ova.va_uid)) {
9922 if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) {
9923 KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party");
9924 error = EPERM;
9925 goto out;
9926 }
9927 chowner = 1;
9928 }
9929 clear_suid = 1;
9930 }
9931
9932 /*
9933 * gid changing
9934 * Note that if the filesystem didn't give us a GID, we expect that it doesn't
9935 * support them in general, and will ignore it if/when we try to set it.
9936 * We might want to clear the gid out of vap completely here.
9937 */
9938 if (VATTR_IS_ACTIVE(vap, va_gid)) {
9939 if (VATTR_IS_SUPPORTED(&ova, va_gid) && (vap->va_gid != ova.va_gid)) {
9940 if (!has_priv_suser) {
9941 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
9942 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
9943 goto out;
9944 }
9945 if (!ismember) {
9946 KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group",
9947 ova.va_gid, vap->va_gid);
9948 error = EPERM;
9949 goto out;
9950 }
9951 }
9952 chgroup = 1;
9953 }
9954 clear_sgid = 1;
9955 }
9956
9957 /*
9958 * Owner UUID being set or changed.
9959 */
9960 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
9961 /* if the owner UUID is not actually changing ... */
9962 if (VATTR_IS_SUPPORTED(&ova, va_uuuid)) {
9963 if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid)) {
9964 goto no_uuuid_change;
9965 }
9966
9967 /*
9968 * If the current owner UUID is a null GUID, check
9969 * it against the UUID corresponding to the owner UID.
9970 */
9971 if (kauth_guid_equal(&ova.va_uuuid, &kauth_null_guid) &&
9972 VATTR_IS_SUPPORTED(&ova, va_uid)) {
9973 guid_t uid_guid;
9974
9975 if (kauth_cred_uid2guid(ova.va_uid, &uid_guid) == 0 &&
9976 kauth_guid_equal(&vap->va_uuuid, &uid_guid)) {
9977 goto no_uuuid_change;
9978 }
9979 }
9980 }
9981
9982 /*
9983 * The owner UUID cannot be set by a non-superuser to anything other than
9984 * their own or a null GUID (to "unset" the owner UUID).
9985 * Note that file systems must be prepared to handle the
9986 * null UUID case in a manner appropriate for that file
9987 * system.
9988 */
9989 if (!has_priv_suser) {
9990 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
9991 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
9992 /* XXX ENOENT here - no UUID - should perhaps become EPERM */
9993 goto out;
9994 }
9995 if (!kauth_guid_equal(&vap->va_uuuid, &changer) &&
9996 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
9997 KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us / null");
9998 error = EPERM;
9999 goto out;
10000 }
10001 }
10002 chowner = 1;
10003 clear_suid = 1;
10004 }
10005 no_uuuid_change:
10006 /*
10007 * Group UUID being set or changed.
10008 */
10009 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
10010 /* if the group UUID is not actually changing ... */
10011 if (VATTR_IS_SUPPORTED(&ova, va_guuid)) {
10012 if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid)) {
10013 goto no_guuid_change;
10014 }
10015
10016 /*
10017 * If the current group UUID is a null UUID, check
10018 * it against the UUID corresponding to the group GID.
10019 */
10020 if (kauth_guid_equal(&ova.va_guuid, &kauth_null_guid) &&
10021 VATTR_IS_SUPPORTED(&ova, va_gid)) {
10022 guid_t gid_guid;
10023
10024 if (kauth_cred_gid2guid(ova.va_gid, &gid_guid) == 0 &&
10025 kauth_guid_equal(&vap->va_guuid, &gid_guid)) {
10026 goto no_guuid_change;
10027 }
10028 }
10029 }
10030
10031 /*
10032 * The group UUID cannot be set by a non-superuser to anything other than
10033 * one of which they are a member or a null GUID (to "unset"
10034 * the group UUID).
10035 * Note that file systems must be prepared to handle the
10036 * null UUID case in a manner appropriate for that file
10037 * system.
10038 */
10039 if (!has_priv_suser) {
10040 if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
10041 ismember = 1;
10042 } else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
10043 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
10044 goto out;
10045 }
10046 if (!ismember) {
10047 KAUTH_DEBUG(" ERROR - cannot set supplied group UUID - not a member / null");
10048 error = EPERM;
10049 goto out;
10050 }
10051 }
10052 chgroup = 1;
10053 }
10054 no_guuid_change:
10055
10056 /*
10057 * Compute authorisation for group/ownership changes.
10058 */
10059 if (chowner || chgroup || clear_suid || clear_sgid) {
10060 if (has_priv_suser) {
10061 KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check");
10062 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
10063 } else {
10064 if (chowner) {
10065 KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP");
10066 required_action |= KAUTH_VNODE_TAKE_OWNERSHIP;
10067 }
10068 if (chgroup && !chowner) {
10069 KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY");
10070 required_action |= KAUTH_VNODE_WRITE_SECURITY;
10071 }
10072 }
10073
10074 /*
10075 * clear set-uid and set-gid bits. POSIX only requires this for
10076 * non-privileged processes but we do it even for root.
10077 */
10078 if (VATTR_IS_ACTIVE(vap, va_mode)) {
10079 newmode = vap->va_mode;
10080 } else if (VATTR_IS_SUPPORTED(&ova, va_mode)) {
10081 newmode = ova.va_mode;
10082 } else {
10083 KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits");
10084 newmode = 0;
10085 }
10086
10087 /* chown always clears setuid/gid bits. An exception is made for
10088 * setattrlist which can set both at the same time: <uid, gid, mode> on a file:
10089 * setattrlist is allowed to set the new mode on the file and change (chown)
10090 * uid/gid.
10091 */
10092 if (newmode & (S_ISUID | S_ISGID)) {
10093 if (!VATTR_IS_ACTIVE(vap, va_mode)) {
10094 KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o",
10095 newmode, newmode & ~(S_ISUID | S_ISGID));
10096 newmode &= ~(S_ISUID | S_ISGID);
10097 }
10098 VATTR_SET(vap, va_mode, newmode);
10099 }
10100 }
10101
10102 /*
10103 * Authorise changes in the ACL.
10104 */
10105 if (VATTR_IS_ACTIVE(vap, va_acl)) {
10106 /* no existing ACL */
10107 if (!VATTR_IS_ACTIVE(&ova, va_acl) || (ova.va_acl == NULL)) {
10108 /* adding an ACL */
10109 if (vap->va_acl != NULL) {
10110 required_action |= KAUTH_VNODE_WRITE_SECURITY;
10111 KAUTH_DEBUG("CHMOD - adding ACL");
10112 }
10113
10114 /* removing an existing ACL */
10115 } else if (vap->va_acl == NULL) {
10116 required_action |= KAUTH_VNODE_WRITE_SECURITY;
10117 KAUTH_DEBUG("CHMOD - removing ACL");
10118
10119 /* updating an existing ACL */
10120 } else {
10121 if (vap->va_acl->acl_entrycount != ova.va_acl->acl_entrycount) {
10122 /* entry count changed, must be different */
10123 required_action |= KAUTH_VNODE_WRITE_SECURITY;
10124 KAUTH_DEBUG("CHMOD - adding/removing ACL entries");
10125 } else if (vap->va_acl->acl_entrycount > 0) {
10126 /* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */
10127 if (memcmp(&vap->va_acl->acl_ace[0], &ova.va_acl->acl_ace[0],
10128 sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) {
10129 required_action |= KAUTH_VNODE_WRITE_SECURITY;
10130 KAUTH_DEBUG("CHMOD - changing ACL entries");
10131 }
10132 }
10133 }
10134 }
10135
10136 /*
10137 * Other attributes that require authorisation.
10138 */
10139 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
10140 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
10141 }
10142
10143 out:
10144 if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL)) {
10145 kauth_acl_free(ova.va_acl);
10146 }
10147 if (error == 0) {
10148 *actionp = required_action;
10149 }
10150 return error;
10151 }
10152
10153 static int
setlocklocal_callback(struct vnode * vp,__unused void * cargs)10154 setlocklocal_callback(struct vnode *vp, __unused void *cargs)
10155 {
10156 vnode_lock_spin(vp);
10157 vp->v_flag |= VLOCKLOCAL;
10158 vnode_unlock(vp);
10159
10160 return VNODE_RETURNED;
10161 }
10162
10163 void
vfs_setlocklocal(mount_t mp)10164 vfs_setlocklocal(mount_t mp)
10165 {
10166 mount_lock_spin(mp);
10167 mp->mnt_kern_flag |= MNTK_LOCK_LOCAL;
10168 mount_unlock(mp);
10169
10170 /*
10171 * The number of active vnodes is expected to be
10172 * very small when vfs_setlocklocal is invoked.
10173 */
10174 vnode_iterate(mp, 0, setlocklocal_callback, NULL);
10175 }
10176
10177 void
vfs_setcompoundopen(mount_t mp)10178 vfs_setcompoundopen(mount_t mp)
10179 {
10180 mount_lock_spin(mp);
10181 mp->mnt_compound_ops |= COMPOUND_VNOP_OPEN;
10182 mount_unlock(mp);
10183 }
10184
10185 void
vnode_setswapmount(vnode_t vp)10186 vnode_setswapmount(vnode_t vp)
10187 {
10188 mount_lock(vp->v_mount);
10189 vp->v_mount->mnt_kern_flag |= MNTK_SWAP_MOUNT;
10190 mount_unlock(vp->v_mount);
10191 }
10192
10193
10194 int64_t
vnode_getswappin_avail(vnode_t vp)10195 vnode_getswappin_avail(vnode_t vp)
10196 {
10197 int64_t max_swappin_avail = 0;
10198
10199 mount_lock(vp->v_mount);
10200 if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_SWAPPIN_SUPPORTED) {
10201 max_swappin_avail = vp->v_mount->mnt_max_swappin_available;
10202 }
10203 mount_unlock(vp->v_mount);
10204
10205 return max_swappin_avail;
10206 }
10207
10208
10209 void
vn_setunionwait(vnode_t vp)10210 vn_setunionwait(vnode_t vp)
10211 {
10212 vnode_lock_spin(vp);
10213 vp->v_flag |= VISUNION;
10214 vnode_unlock(vp);
10215 }
10216
10217
10218 void
vn_checkunionwait(vnode_t vp)10219 vn_checkunionwait(vnode_t vp)
10220 {
10221 vnode_lock_spin(vp);
10222 while ((vp->v_flag & VISUNION) == VISUNION) {
10223 msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0);
10224 }
10225 vnode_unlock(vp);
10226 }
10227
10228 void
vn_clearunionwait(vnode_t vp,int locked)10229 vn_clearunionwait(vnode_t vp, int locked)
10230 {
10231 if (!locked) {
10232 vnode_lock_spin(vp);
10233 }
10234 if ((vp->v_flag & VISUNION) == VISUNION) {
10235 vp->v_flag &= ~VISUNION;
10236 wakeup((caddr_t)&vp->v_flag);
10237 }
10238 if (!locked) {
10239 vnode_unlock(vp);
10240 }
10241 }
10242
10243 int
vnode_materialize_dataless_file(vnode_t vp,uint64_t op_type)10244 vnode_materialize_dataless_file(vnode_t vp, uint64_t op_type)
10245 {
10246 int error;
10247
10248 /* Swap files are special; ignore them */
10249 if (vnode_isswap(vp)) {
10250 return 0;
10251 }
10252
10253 error = resolve_nspace_item(vp,
10254 op_type | NAMESPACE_HANDLER_NSPACE_EVENT);
10255
10256 /*
10257 * The file resolver owns the logic about what error to return
10258 * to the caller. We only need to handle a couple of special
10259 * cases here:
10260 */
10261 if (error == EJUSTRETURN) {
10262 /*
10263 * The requesting process is allowed to interact with
10264 * dataless objects. Make a couple of sanity-checks
10265 * here to ensure the action makes sense.
10266 */
10267 switch (op_type) {
10268 case NAMESPACE_HANDLER_WRITE_OP:
10269 case NAMESPACE_HANDLER_TRUNCATE_OP:
10270 case NAMESPACE_HANDLER_RENAME_OP:
10271 /*
10272 * This handles the case of the resolver itself
10273 * writing data to the file (or throwing it
10274 * away).
10275 */
10276 error = 0;
10277 break;
10278 case NAMESPACE_HANDLER_READ_OP:
10279 /*
10280 * This handles the case of the resolver needing
10281 * to look up inside of a dataless directory while
10282 * it's in the process of materializing it (for
10283 * example, creating files or directories).
10284 */
10285 error = (vnode_vtype(vp) == VDIR) ? 0 : EBADF;
10286 break;
10287 default:
10288 error = EBADF;
10289 break;
10290 }
10291 }
10292
10293 return error;
10294 }
10295
10296 /*
10297 * Removes orphaned apple double files during a rmdir
10298 * Works by:
10299 * 1. vnode_suspend().
10300 * 2. Call VNOP_READDIR() till the end of directory is reached.
10301 * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY.
10302 * 4. Continue (2) and (3) till end of directory is reached.
10303 * 5. If all the entries in the directory were files with "._" name, delete all the files.
10304 * 6. vnode_resume()
10305 * 7. If deletion of all files succeeded, call VNOP_RMDIR() again.
10306 */
10307
10308 errno_t
rmdir_remove_orphaned_appleDouble(vnode_t vp,vfs_context_t ctx,int * restart_flag)10309 rmdir_remove_orphaned_appleDouble(vnode_t vp, vfs_context_t ctx, int * restart_flag)
10310 {
10311 #define UIO_BUFF_SIZE 2048
10312 uio_t auio = NULL;
10313 int eofflag, siz = UIO_BUFF_SIZE, alloc_size = 0, nentries = 0;
10314 int open_flag = 0, full_erase_flag = 0;
10315 uio_stackbuf_t uio_buf[UIO_SIZEOF(1)];
10316 char *rbuf = NULL;
10317 void *dir_pos;
10318 void *dir_end;
10319 struct dirent *dp;
10320 errno_t error;
10321
10322 error = vnode_suspend(vp);
10323
10324 /*
10325 * restart_flag is set so that the calling rmdir sleeps and resets
10326 */
10327 if (error == EBUSY) {
10328 *restart_flag = 1;
10329 }
10330 if (error != 0) {
10331 return error;
10332 }
10333
10334 /*
10335 * Prevent dataless fault materialization while we have
10336 * a suspended vnode.
10337 */
10338 uthread_t ut = current_uthread();
10339 bool saved_nodatalessfaults =
10340 (ut->uu_flag & UT_NSPACE_NODATALESSFAULTS) ? true : false;
10341 ut->uu_flag |= UT_NSPACE_NODATALESSFAULTS;
10342
10343 /*
10344 * set up UIO
10345 */
10346 rbuf = kalloc_data(siz, Z_WAITOK);
10347 alloc_size = siz;
10348 if (rbuf) {
10349 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
10350 &uio_buf[0], sizeof(uio_buf));
10351 }
10352 if (!rbuf || !auio) {
10353 error = ENOMEM;
10354 goto outsc;
10355 }
10356
10357 uio_setoffset(auio, 0);
10358
10359 eofflag = 0;
10360
10361 if ((error = VNOP_OPEN(vp, FREAD, ctx))) {
10362 goto outsc;
10363 } else {
10364 open_flag = 1;
10365 }
10366
10367 /*
10368 * First pass checks if all files are appleDouble files.
10369 */
10370
10371 do {
10372 siz = UIO_BUFF_SIZE;
10373 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
10374 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
10375
10376 if ((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx))) {
10377 goto outsc;
10378 }
10379
10380 if (uio_resid(auio) != 0) {
10381 siz -= uio_resid(auio);
10382 }
10383
10384 /*
10385 * Iterate through directory
10386 */
10387 dir_pos = (void*) rbuf;
10388 dir_end = (void*) (rbuf + siz);
10389 dp = (struct dirent*) (dir_pos);
10390
10391 if (dir_pos == dir_end) {
10392 eofflag = 1;
10393 }
10394
10395 while (dir_pos < dir_end) {
10396 /*
10397 * Check for . and .. as well as directories
10398 */
10399 if (dp->d_ino != 0 &&
10400 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
10401 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) {
10402 /*
10403 * Check for irregular files and ._ files
10404 * If there is a ._._ file abort the op
10405 */
10406 if (dp->d_namlen < 2 ||
10407 strncmp(dp->d_name, "._", 2) ||
10408 (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._", 2))) {
10409 error = ENOTEMPTY;
10410 goto outsc;
10411 }
10412 }
10413 dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen);
10414 dp = (struct dirent*)dir_pos;
10415 }
10416
10417 /*
10418 * workaround for HFS/NFS setting eofflag before end of file
10419 */
10420 if (vp->v_tag == VT_HFS && nentries > 2) {
10421 eofflag = 0;
10422 }
10423
10424 if (vp->v_tag == VT_NFS) {
10425 if (eofflag && !full_erase_flag) {
10426 full_erase_flag = 1;
10427 eofflag = 0;
10428 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
10429 } else if (!eofflag && full_erase_flag) {
10430 full_erase_flag = 0;
10431 }
10432 }
10433 } while (!eofflag);
10434 /*
10435 * If we've made it here all the files in the dir are ._ files.
10436 * We can delete the files even though the node is suspended
10437 * because we are the owner of the file.
10438 */
10439
10440 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
10441 eofflag = 0;
10442 full_erase_flag = 0;
10443
10444 do {
10445 siz = UIO_BUFF_SIZE;
10446 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
10447 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
10448
10449 error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx);
10450
10451 if (error != 0) {
10452 goto outsc;
10453 }
10454
10455 if (uio_resid(auio) != 0) {
10456 siz -= uio_resid(auio);
10457 }
10458
10459 /*
10460 * Iterate through directory
10461 */
10462 dir_pos = (void*) rbuf;
10463 dir_end = (void*) (rbuf + siz);
10464 dp = (struct dirent*) dir_pos;
10465
10466 if (dir_pos == dir_end) {
10467 eofflag = 1;
10468 }
10469
10470 while (dir_pos < dir_end) {
10471 /*
10472 * Check for . and .. as well as directories
10473 */
10474 if (dp->d_ino != 0 &&
10475 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
10476 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))
10477 ) {
10478 error = unlink1(ctx, vp,
10479 CAST_USER_ADDR_T(dp->d_name), UIO_SYSSPACE,
10480 VNODE_REMOVE_SKIP_NAMESPACE_EVENT |
10481 VNODE_REMOVE_NO_AUDIT_PATH);
10482
10483 if (error && error != ENOENT) {
10484 goto outsc;
10485 }
10486 }
10487 dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen);
10488 dp = (struct dirent*)dir_pos;
10489 }
10490
10491 /*
10492 * workaround for HFS/NFS setting eofflag before end of file
10493 */
10494 if (vp->v_tag == VT_HFS && nentries > 2) {
10495 eofflag = 0;
10496 }
10497
10498 if (vp->v_tag == VT_NFS) {
10499 if (eofflag && !full_erase_flag) {
10500 full_erase_flag = 1;
10501 eofflag = 0;
10502 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
10503 } else if (!eofflag && full_erase_flag) {
10504 full_erase_flag = 0;
10505 }
10506 }
10507 } while (!eofflag);
10508
10509
10510 error = 0;
10511
10512 outsc:
10513 if (open_flag) {
10514 VNOP_CLOSE(vp, FREAD, ctx);
10515 }
10516
10517 if (auio) {
10518 uio_free(auio);
10519 }
10520 kfree_data(rbuf, alloc_size);
10521
10522 if (saved_nodatalessfaults == false) {
10523 ut->uu_flag &= ~UT_NSPACE_NODATALESSFAULTS;
10524 }
10525
10526 vnode_resume(vp);
10527
10528 return error;
10529 }
10530
10531
10532 void
lock_vnode_and_post(vnode_t vp,int kevent_num)10533 lock_vnode_and_post(vnode_t vp, int kevent_num)
10534 {
10535 /* Only take the lock if there's something there! */
10536 if (vp->v_knotes.slh_first != NULL) {
10537 vnode_lock(vp);
10538 KNOTE(&vp->v_knotes, kevent_num);
10539 vnode_unlock(vp);
10540 }
10541 }
10542
10543 void panic_print_vnodes(void);
10544
10545 /* define PANIC_PRINTS_VNODES only if investigation is required. */
10546 #ifdef PANIC_PRINTS_VNODES
10547
10548 static const char *
__vtype(uint16_t vtype)10549 __vtype(uint16_t vtype)
10550 {
10551 switch (vtype) {
10552 case VREG:
10553 return "R";
10554 case VDIR:
10555 return "D";
10556 case VBLK:
10557 return "B";
10558 case VCHR:
10559 return "C";
10560 case VLNK:
10561 return "L";
10562 case VSOCK:
10563 return "S";
10564 case VFIFO:
10565 return "F";
10566 case VBAD:
10567 return "x";
10568 case VSTR:
10569 return "T";
10570 case VCPLX:
10571 return "X";
10572 default:
10573 return "?";
10574 }
10575 }
10576
10577 /*
10578 * build a path from the bottom up
10579 * NOTE: called from the panic path - no alloc'ing of memory and no locks!
10580 */
10581 static char *
__vpath(vnode_t vp,char * str,int len,int depth)10582 __vpath(vnode_t vp, char *str, int len, int depth)
10583 {
10584 int vnm_len;
10585 const char *src;
10586 char *dst;
10587
10588 if (len <= 0) {
10589 return str;
10590 }
10591 /* str + len is the start of the string we created */
10592 if (!vp->v_name) {
10593 return str + len;
10594 }
10595
10596 /* follow mount vnodes to get the full path */
10597 if ((vp->v_flag & VROOT)) {
10598 if (vp->v_mount != NULL && vp->v_mount->mnt_vnodecovered) {
10599 return __vpath(vp->v_mount->mnt_vnodecovered,
10600 str, len, depth + 1);
10601 }
10602 return str + len;
10603 }
10604
10605 src = vp->v_name;
10606 vnm_len = strlen(src);
10607 if (vnm_len > len) {
10608 /* truncate the name to fit in the string */
10609 src += (vnm_len - len);
10610 vnm_len = len;
10611 }
10612
10613 /* start from the back and copy just characters (no NULLs) */
10614
10615 /* this will chop off leaf path (file) names */
10616 if (depth > 0) {
10617 dst = str + len - vnm_len;
10618 memcpy(dst, src, vnm_len);
10619 len -= vnm_len;
10620 } else {
10621 dst = str + len;
10622 }
10623
10624 if (vp->v_parent && len > 1) {
10625 /* follow parents up the chain */
10626 len--;
10627 *(dst - 1) = '/';
10628 return __vpath(vp->v_parent, str, len, depth + 1);
10629 }
10630
10631 return dst;
10632 }
10633
10634 #define SANE_VNODE_PRINT_LIMIT 5000
10635 void
panic_print_vnodes(void)10636 panic_print_vnodes(void)
10637 {
10638 mount_t mnt;
10639 vnode_t vp;
10640 int nvnodes = 0;
10641 const char *type;
10642 char *nm;
10643 char vname[257];
10644
10645 paniclog_append_noflush("\n***** VNODES *****\n"
10646 "TYPE UREF ICNT PATH\n");
10647
10648 /* NULL-terminate the path name */
10649 vname[sizeof(vname) - 1] = '\0';
10650
10651 /*
10652 * iterate all vnodelist items in all mounts (mntlist) -> mnt_vnodelist
10653 */
10654 TAILQ_FOREACH(mnt, &mountlist, mnt_list) {
10655 if (!ml_validate_nofault((vm_offset_t)mnt, sizeof(mount_t))) {
10656 paniclog_append_noflush("Unable to iterate the mount list %p - encountered an invalid mount pointer %p \n",
10657 &mountlist, mnt);
10658 break;
10659 }
10660
10661 TAILQ_FOREACH(vp, &mnt->mnt_vnodelist, v_mntvnodes) {
10662 if (!ml_validate_nofault((vm_offset_t)vp, sizeof(vnode_t))) {
10663 paniclog_append_noflush("Unable to iterate the vnode list %p - encountered an invalid vnode pointer %p \n",
10664 &mnt->mnt_vnodelist, vp);
10665 break;
10666 }
10667
10668 if (++nvnodes > SANE_VNODE_PRINT_LIMIT) {
10669 return;
10670 }
10671 type = __vtype(vp->v_type);
10672 nm = __vpath(vp, vname, sizeof(vname) - 1, 0);
10673 paniclog_append_noflush("%s %0d %0d %s\n",
10674 type, vp->v_usecount, vp->v_iocount, nm);
10675 }
10676 }
10677 }
10678
10679 #else /* !PANIC_PRINTS_VNODES */
10680 void
panic_print_vnodes(void)10681 panic_print_vnodes(void)
10682 {
10683 return;
10684 }
10685 #endif
10686
10687
10688 #ifdef CONFIG_IOCOUNT_TRACE
10689 static void
record_iocount_trace_vnode(vnode_t vp,int type)10690 record_iocount_trace_vnode(vnode_t vp, int type)
10691 {
10692 void *stacks[IOCOUNT_TRACE_MAX_FRAMES] = {0};
10693 int idx = vp->v_iocount_trace[type].idx;
10694
10695 if (idx >= IOCOUNT_TRACE_MAX_IDX) {
10696 return;
10697 }
10698
10699 OSBacktrace((void **)&stacks[0], IOCOUNT_TRACE_MAX_FRAMES);
10700
10701 /*
10702 * To save index space, only store the unique backtraces. If dup is found,
10703 * just bump the count and return.
10704 */
10705 for (int i = 0; i < idx; i++) {
10706 if (memcmp(&stacks[0], &vp->v_iocount_trace[type].stacks[i][0],
10707 sizeof(stacks)) == 0) {
10708 vp->v_iocount_trace[type].counts[i]++;
10709 return;
10710 }
10711 }
10712
10713 memcpy(&vp->v_iocount_trace[type].stacks[idx][0], &stacks[0],
10714 sizeof(stacks));
10715 vp->v_iocount_trace[type].counts[idx] = 1;
10716 vp->v_iocount_trace[type].idx++;
10717 }
10718
10719 static void
record_iocount_trace_uthread(vnode_t vp,int count)10720 record_iocount_trace_uthread(vnode_t vp, int count)
10721 {
10722 struct uthread *ut;
10723
10724 ut = current_uthread();
10725 ut->uu_iocount += count;
10726
10727 if (count == 1) {
10728 if (ut->uu_vpindex < 32) {
10729 OSBacktrace((void **)&ut->uu_pcs[ut->uu_vpindex][0], 10);
10730
10731 ut->uu_vps[ut->uu_vpindex] = vp;
10732 ut->uu_vpindex++;
10733 }
10734 }
10735 }
10736
10737 static void
record_vp(vnode_t vp,int count)10738 record_vp(vnode_t vp, int count)
10739 {
10740 if (__probable(bootarg_vnode_iocount_trace == 0 &&
10741 bootarg_uthread_iocount_trace == 0)) {
10742 return;
10743 }
10744
10745 #if CONFIG_TRIGGERS
10746 if (vp->v_resolve) {
10747 return;
10748 }
10749 #endif
10750 if ((vp->v_flag & VSYSTEM)) {
10751 return;
10752 }
10753
10754 if (bootarg_vnode_iocount_trace) {
10755 record_iocount_trace_vnode(vp,
10756 (count > 0) ? IOCOUNT_TRACE_VGET : IOCOUNT_TRACE_VPUT);
10757 }
10758 if (bootarg_uthread_iocount_trace) {
10759 record_iocount_trace_uthread(vp, count);
10760 }
10761 }
10762 #endif /* CONFIG_IOCOUNT_TRACE */
10763
10764 #if CONFIG_TRIGGERS
10765 #define __triggers_unused
10766 #else
10767 #define __triggers_unused __unused
10768 #endif
10769
10770 resolver_result_t
vfs_resolver_result(__triggers_unused uint32_t seq,__triggers_unused enum resolver_status stat,__triggers_unused int aux)10771 vfs_resolver_result(__triggers_unused uint32_t seq, __triggers_unused enum resolver_status stat, __triggers_unused int aux)
10772 {
10773 #if CONFIG_TRIGGERS
10774 /*
10775 * |<--- 32 --->|<--- 28 --->|<- 4 ->|
10776 * sequence auxiliary status
10777 */
10778 return (((uint64_t)seq) << 32) |
10779 (((uint64_t)(aux & 0x0fffffff)) << 4) |
10780 (uint64_t)(stat & 0x0000000F);
10781 #else
10782 return (0x0ULL) | (((uint64_t)ENOTSUP) << 4) | (((uint64_t)RESOLVER_ERROR) & 0xF);
10783 #endif
10784 }
10785
10786 #if CONFIG_TRIGGERS
10787
10788 #define TRIG_DEBUG 0
10789
10790 #if TRIG_DEBUG
10791 #define TRIG_LOG(...) do { printf("%s: ", __FUNCTION__); printf(__VA_ARGS__); } while (0)
10792 #else
10793 #define TRIG_LOG(...)
10794 #endif
10795
10796 /*
10797 * Resolver result functions
10798 */
10799
10800
10801 enum resolver_status
vfs_resolver_status(resolver_result_t result)10802 vfs_resolver_status(resolver_result_t result)
10803 {
10804 /* lower 4 bits is status */
10805 return result & 0x0000000F;
10806 }
10807
10808 uint32_t
vfs_resolver_sequence(resolver_result_t result)10809 vfs_resolver_sequence(resolver_result_t result)
10810 {
10811 /* upper 32 bits is sequence */
10812 return (uint32_t)(result >> 32);
10813 }
10814
10815 int
vfs_resolver_auxiliary(resolver_result_t result)10816 vfs_resolver_auxiliary(resolver_result_t result)
10817 {
10818 /* 28 bits of auxiliary */
10819 return (int)(((uint32_t)(result & 0xFFFFFFF0)) >> 4);
10820 }
10821
10822 /*
10823 * SPI
10824 * Call in for resolvers to update vnode trigger state
10825 */
10826 int
vnode_trigger_update(vnode_t vp,resolver_result_t result)10827 vnode_trigger_update(vnode_t vp, resolver_result_t result)
10828 {
10829 vnode_resolve_t rp;
10830 uint32_t seq;
10831 enum resolver_status stat;
10832
10833 if (vp->v_resolve == NULL) {
10834 return EINVAL;
10835 }
10836
10837 stat = vfs_resolver_status(result);
10838 seq = vfs_resolver_sequence(result);
10839
10840 if ((stat != RESOLVER_RESOLVED) && (stat != RESOLVER_UNRESOLVED)) {
10841 return EINVAL;
10842 }
10843
10844 rp = vp->v_resolve;
10845 lck_mtx_lock(&rp->vr_lock);
10846
10847 if (seq > rp->vr_lastseq) {
10848 if (stat == RESOLVER_RESOLVED) {
10849 rp->vr_flags |= VNT_RESOLVED;
10850 } else {
10851 rp->vr_flags &= ~VNT_RESOLVED;
10852 }
10853
10854 rp->vr_lastseq = seq;
10855 }
10856
10857 lck_mtx_unlock(&rp->vr_lock);
10858
10859 return 0;
10860 }
10861
10862 static int
vnode_resolver_attach(vnode_t vp,vnode_resolve_t rp,boolean_t ref)10863 vnode_resolver_attach(vnode_t vp, vnode_resolve_t rp, boolean_t ref)
10864 {
10865 int error;
10866
10867 vnode_lock_spin(vp);
10868 if (vp->v_resolve != NULL) {
10869 vnode_unlock(vp);
10870 return EINVAL;
10871 } else {
10872 vp->v_resolve = rp;
10873 }
10874 vnode_unlock(vp);
10875
10876 if (ref) {
10877 error = vnode_ref_ext(vp, O_EVTONLY, VNODE_REF_FORCE);
10878 if (error != 0) {
10879 panic("VNODE_REF_FORCE didn't help...");
10880 }
10881 }
10882
10883 return 0;
10884 }
10885
10886 /*
10887 * VFS internal interfaces for vnode triggers
10888 *
10889 * vnode must already have an io count on entry
10890 * v_resolve is stable when io count is non-zero
10891 */
10892 static int
vnode_resolver_create(mount_t mp,vnode_t vp,struct vnode_trigger_param * tinfo,boolean_t external)10893 vnode_resolver_create(mount_t mp, vnode_t vp, struct vnode_trigger_param *tinfo, boolean_t external)
10894 {
10895 vnode_resolve_t rp;
10896 int result;
10897 char byte;
10898
10899 #if 1
10900 /* minimum pointer test (debugging) */
10901 if (tinfo->vnt_data) {
10902 byte = *((char *)tinfo->vnt_data);
10903 }
10904 #endif
10905 rp = kalloc_type(struct vnode_resolve, Z_WAITOK | Z_NOFAIL);
10906
10907 lck_mtx_init(&rp->vr_lock, &trigger_vnode_lck_grp, &trigger_vnode_lck_attr);
10908
10909 rp->vr_resolve_func = tinfo->vnt_resolve_func;
10910 rp->vr_unresolve_func = tinfo->vnt_unresolve_func;
10911 rp->vr_rearm_func = tinfo->vnt_rearm_func;
10912 rp->vr_reclaim_func = tinfo->vnt_reclaim_func;
10913 rp->vr_data = tinfo->vnt_data;
10914 rp->vr_lastseq = 0;
10915 rp->vr_flags = tinfo->vnt_flags & VNT_VALID_MASK;
10916 if (external) {
10917 rp->vr_flags |= VNT_EXTERNAL;
10918 }
10919
10920 result = vnode_resolver_attach(vp, rp, external);
10921 if (result != 0) {
10922 goto out;
10923 }
10924
10925 if (mp) {
10926 OSAddAtomic(1, &mp->mnt_numtriggers);
10927 }
10928
10929 return result;
10930
10931 out:
10932 kfree_type(struct vnode_resolve, rp);
10933 return result;
10934 }
10935
10936 static void
vnode_resolver_release(vnode_resolve_t rp)10937 vnode_resolver_release(vnode_resolve_t rp)
10938 {
10939 /*
10940 * Give them a chance to free any private data
10941 */
10942 if (rp->vr_data && rp->vr_reclaim_func) {
10943 rp->vr_reclaim_func(NULLVP, rp->vr_data);
10944 }
10945
10946 lck_mtx_destroy(&rp->vr_lock, &trigger_vnode_lck_grp);
10947 kfree_type(struct vnode_resolve, rp);
10948 }
10949
10950 /* Called after the vnode has been drained */
10951 static void
vnode_resolver_detach(vnode_t vp)10952 vnode_resolver_detach(vnode_t vp)
10953 {
10954 vnode_resolve_t rp;
10955 mount_t mp;
10956
10957 mp = vnode_mount(vp);
10958
10959 vnode_lock(vp);
10960 rp = vp->v_resolve;
10961 vp->v_resolve = NULL;
10962 vnode_unlock(vp);
10963
10964 if ((rp->vr_flags & VNT_EXTERNAL) != 0) {
10965 vnode_rele_ext(vp, O_EVTONLY, 1);
10966 }
10967
10968 vnode_resolver_release(rp);
10969
10970 /* Keep count of active trigger vnodes per mount */
10971 OSAddAtomic(-1, &mp->mnt_numtriggers);
10972 }
10973
10974 __private_extern__
10975 void
vnode_trigger_rearm(vnode_t vp,vfs_context_t ctx)10976 vnode_trigger_rearm(vnode_t vp, vfs_context_t ctx)
10977 {
10978 vnode_resolve_t rp;
10979 resolver_result_t result;
10980 enum resolver_status status;
10981 uint32_t seq;
10982
10983 if ((vp->v_resolve == NULL) ||
10984 (vp->v_resolve->vr_rearm_func == NULL) ||
10985 (vp->v_resolve->vr_flags & VNT_AUTO_REARM) == 0) {
10986 return;
10987 }
10988
10989 rp = vp->v_resolve;
10990 lck_mtx_lock(&rp->vr_lock);
10991
10992 /*
10993 * Check if VFS initiated this unmount. If so, we'll catch it after the unresolve completes.
10994 */
10995 if (rp->vr_flags & VNT_VFS_UNMOUNTED) {
10996 lck_mtx_unlock(&rp->vr_lock);
10997 return;
10998 }
10999
11000 /* Check if this vnode is already armed */
11001 if ((rp->vr_flags & VNT_RESOLVED) == 0) {
11002 lck_mtx_unlock(&rp->vr_lock);
11003 return;
11004 }
11005
11006 lck_mtx_unlock(&rp->vr_lock);
11007
11008 result = rp->vr_rearm_func(vp, 0, rp->vr_data, ctx);
11009 status = vfs_resolver_status(result);
11010 seq = vfs_resolver_sequence(result);
11011
11012 lck_mtx_lock(&rp->vr_lock);
11013 if (seq > rp->vr_lastseq) {
11014 if (status == RESOLVER_UNRESOLVED) {
11015 rp->vr_flags &= ~VNT_RESOLVED;
11016 }
11017 rp->vr_lastseq = seq;
11018 }
11019 lck_mtx_unlock(&rp->vr_lock);
11020 }
11021
11022 __private_extern__
11023 int
vnode_trigger_resolve(vnode_t vp,struct nameidata * ndp,vfs_context_t ctx)11024 vnode_trigger_resolve(vnode_t vp, struct nameidata *ndp, vfs_context_t ctx)
11025 {
11026 vnode_resolve_t rp;
11027 enum path_operation op;
11028 resolver_result_t result;
11029 enum resolver_status status;
11030 uint32_t seq;
11031
11032 /*
11033 * N.B. we cannot call vfs_context_can_resolve_triggers()
11034 * here because we really only want to suppress that in
11035 * the event the trigger will be resolved by something in
11036 * user-space. Any triggers that are resolved by the kernel
11037 * do not pose a threat of deadlock.
11038 */
11039
11040 /* Only trigger on topmost vnodes */
11041 if ((vp->v_resolve == NULL) ||
11042 (vp->v_resolve->vr_resolve_func == NULL) ||
11043 (vp->v_mountedhere != NULL)) {
11044 return 0;
11045 }
11046
11047 rp = vp->v_resolve;
11048 lck_mtx_lock(&rp->vr_lock);
11049
11050 /* Check if this vnode is already resolved */
11051 if (rp->vr_flags & VNT_RESOLVED) {
11052 lck_mtx_unlock(&rp->vr_lock);
11053 return 0;
11054 }
11055
11056 lck_mtx_unlock(&rp->vr_lock);
11057
11058 #if CONFIG_MACF
11059 if ((rp->vr_flags & VNT_KERN_RESOLVE) == 0) {
11060 /*
11061 * VNT_KERN_RESOLVE indicates this trigger has no parameters
11062 * at the discression of the accessing process other than
11063 * the act of access. All other triggers must be checked
11064 */
11065 int rv = mac_vnode_check_trigger_resolve(ctx, vp, &ndp->ni_cnd);
11066 if (rv != 0) {
11067 return rv;
11068 }
11069 }
11070 #endif
11071
11072 /*
11073 * XXX
11074 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
11075 * is there anyway to know this???
11076 * there can also be other legitimate lookups in parallel
11077 *
11078 * XXX - should we call this on a separate thread with a timeout?
11079 *
11080 * XXX - should we use ISLASTCN to pick the op value??? Perhaps only leafs should
11081 * get the richer set and non-leafs should get generic OP_LOOKUP? TBD
11082 */
11083 op = (ndp->ni_op < OP_MAXOP) ? ndp->ni_op: OP_LOOKUP;
11084
11085 result = rp->vr_resolve_func(vp, &ndp->ni_cnd, op, 0, rp->vr_data, ctx);
11086 status = vfs_resolver_status(result);
11087 seq = vfs_resolver_sequence(result);
11088
11089 lck_mtx_lock(&rp->vr_lock);
11090 if (seq > rp->vr_lastseq) {
11091 if (status == RESOLVER_RESOLVED) {
11092 rp->vr_flags |= VNT_RESOLVED;
11093 }
11094 rp->vr_lastseq = seq;
11095 }
11096 lck_mtx_unlock(&rp->vr_lock);
11097
11098 /* On resolver errors, propagate the error back up */
11099 return status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0;
11100 }
11101
11102 static int
vnode_trigger_unresolve(vnode_t vp,int flags,vfs_context_t ctx)11103 vnode_trigger_unresolve(vnode_t vp, int flags, vfs_context_t ctx)
11104 {
11105 vnode_resolve_t rp;
11106 resolver_result_t result;
11107 enum resolver_status status;
11108 uint32_t seq;
11109
11110 if ((vp->v_resolve == NULL) || (vp->v_resolve->vr_unresolve_func == NULL)) {
11111 return 0;
11112 }
11113
11114 rp = vp->v_resolve;
11115 lck_mtx_lock(&rp->vr_lock);
11116
11117 /* Check if this vnode is already resolved */
11118 if ((rp->vr_flags & VNT_RESOLVED) == 0) {
11119 printf("vnode_trigger_unresolve: not currently resolved\n");
11120 lck_mtx_unlock(&rp->vr_lock);
11121 return 0;
11122 }
11123
11124 rp->vr_flags |= VNT_VFS_UNMOUNTED;
11125
11126 lck_mtx_unlock(&rp->vr_lock);
11127
11128 /*
11129 * XXX
11130 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
11131 * there can also be other legitimate lookups in parallel
11132 *
11133 * XXX - should we call this on a separate thread with a timeout?
11134 */
11135
11136 result = rp->vr_unresolve_func(vp, flags, rp->vr_data, ctx);
11137 status = vfs_resolver_status(result);
11138 seq = vfs_resolver_sequence(result);
11139
11140 lck_mtx_lock(&rp->vr_lock);
11141 if (seq > rp->vr_lastseq) {
11142 if (status == RESOLVER_UNRESOLVED) {
11143 rp->vr_flags &= ~VNT_RESOLVED;
11144 }
11145 rp->vr_lastseq = seq;
11146 }
11147 rp->vr_flags &= ~VNT_VFS_UNMOUNTED;
11148 lck_mtx_unlock(&rp->vr_lock);
11149
11150 /* On resolver errors, propagate the error back up */
11151 return status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0;
11152 }
11153
11154 static int
triggerisdescendant(mount_t mp,mount_t rmp)11155 triggerisdescendant(mount_t mp, mount_t rmp)
11156 {
11157 int match = FALSE;
11158
11159 /*
11160 * walk up vnode covered chain looking for a match
11161 */
11162 name_cache_lock_shared();
11163
11164 while (1) {
11165 vnode_t vp;
11166
11167 /* did we encounter "/" ? */
11168 if (mp->mnt_flag & MNT_ROOTFS) {
11169 break;
11170 }
11171
11172 vp = mp->mnt_vnodecovered;
11173 if (vp == NULLVP) {
11174 break;
11175 }
11176
11177 mp = vp->v_mount;
11178 if (mp == rmp) {
11179 match = TRUE;
11180 break;
11181 }
11182 }
11183
11184 name_cache_unlock();
11185
11186 return match;
11187 }
11188
11189 struct trigger_unmount_info {
11190 vfs_context_t ctx;
11191 mount_t top_mp;
11192 vnode_t trigger_vp;
11193 mount_t trigger_mp;
11194 uint32_t trigger_vid;
11195 int flags;
11196 };
11197
11198 static int
trigger_unmount_callback(mount_t mp,void * arg)11199 trigger_unmount_callback(mount_t mp, void * arg)
11200 {
11201 struct trigger_unmount_info * infop = (struct trigger_unmount_info *)arg;
11202 boolean_t mountedtrigger = FALSE;
11203
11204 /*
11205 * When we encounter the top level mount we're done
11206 */
11207 if (mp == infop->top_mp) {
11208 return VFS_RETURNED_DONE;
11209 }
11210
11211 if ((mp->mnt_vnodecovered == NULL) ||
11212 (vnode_getwithref(mp->mnt_vnodecovered) != 0)) {
11213 return VFS_RETURNED;
11214 }
11215
11216 if ((mp->mnt_vnodecovered->v_mountedhere == mp) &&
11217 (mp->mnt_vnodecovered->v_resolve != NULL) &&
11218 (mp->mnt_vnodecovered->v_resolve->vr_flags & VNT_RESOLVED)) {
11219 mountedtrigger = TRUE;
11220 }
11221 vnode_put(mp->mnt_vnodecovered);
11222
11223 /*
11224 * When we encounter a mounted trigger, check if its under the top level mount
11225 */
11226 if (!mountedtrigger || !triggerisdescendant(mp, infop->top_mp)) {
11227 return VFS_RETURNED;
11228 }
11229
11230 /*
11231 * Process any pending nested mount (now that its not referenced)
11232 */
11233 if ((infop->trigger_vp != NULLVP) &&
11234 (vnode_getwithvid(infop->trigger_vp, infop->trigger_vid) == 0)) {
11235 vnode_t vp = infop->trigger_vp;
11236 int error;
11237
11238 infop->trigger_vp = NULLVP;
11239
11240 if (mp == vp->v_mountedhere) {
11241 vnode_put(vp);
11242 printf("trigger_unmount_callback: unexpected match '%s'\n",
11243 mp->mnt_vfsstat.f_mntonname);
11244 return VFS_RETURNED;
11245 }
11246 if (infop->trigger_mp != vp->v_mountedhere) {
11247 vnode_put(vp);
11248 printf("trigger_unmount_callback: trigger mnt changed! (%p != %p)\n",
11249 infop->trigger_mp, vp->v_mountedhere);
11250 goto savenext;
11251 }
11252
11253 error = vnode_trigger_unresolve(vp, infop->flags, infop->ctx);
11254 vnode_put(vp);
11255 if (error) {
11256 printf("unresolving: '%s', err %d\n",
11257 vp->v_mountedhere ? vp->v_mountedhere->mnt_vfsstat.f_mntonname :
11258 "???", error);
11259 return VFS_RETURNED_DONE; /* stop iteration on errors */
11260 }
11261 }
11262 savenext:
11263 /*
11264 * We can't call resolver here since we hold a mount iter
11265 * ref on mp so save its covered vp for later processing
11266 */
11267 infop->trigger_vp = mp->mnt_vnodecovered;
11268 if ((infop->trigger_vp != NULLVP) &&
11269 (vnode_getwithref(infop->trigger_vp) == 0)) {
11270 if (infop->trigger_vp->v_mountedhere == mp) {
11271 infop->trigger_vid = infop->trigger_vp->v_id;
11272 infop->trigger_mp = mp;
11273 }
11274 vnode_put(infop->trigger_vp);
11275 }
11276
11277 return VFS_RETURNED;
11278 }
11279
11280 /*
11281 * Attempt to unmount any trigger mounts nested underneath a mount.
11282 * This is a best effort attempt and no retries are performed here.
11283 *
11284 * Note: mp->mnt_rwlock is held exclusively on entry (so be carefull)
11285 */
11286 __private_extern__
11287 void
vfs_nested_trigger_unmounts(mount_t mp,int flags,vfs_context_t ctx)11288 vfs_nested_trigger_unmounts(mount_t mp, int flags, vfs_context_t ctx)
11289 {
11290 struct trigger_unmount_info info;
11291
11292 /* Must have trigger vnodes */
11293 if (mp->mnt_numtriggers == 0) {
11294 return;
11295 }
11296 /* Avoid recursive requests (by checking covered vnode) */
11297 if ((mp->mnt_vnodecovered != NULL) &&
11298 (vnode_getwithref(mp->mnt_vnodecovered) == 0)) {
11299 boolean_t recursive = FALSE;
11300
11301 if ((mp->mnt_vnodecovered->v_mountedhere == mp) &&
11302 (mp->mnt_vnodecovered->v_resolve != NULL) &&
11303 (mp->mnt_vnodecovered->v_resolve->vr_flags & VNT_VFS_UNMOUNTED)) {
11304 recursive = TRUE;
11305 }
11306 vnode_put(mp->mnt_vnodecovered);
11307 if (recursive) {
11308 return;
11309 }
11310 }
11311
11312 /*
11313 * Attempt to unmount any nested trigger mounts (best effort)
11314 */
11315 info.ctx = ctx;
11316 info.top_mp = mp;
11317 info.trigger_vp = NULLVP;
11318 info.trigger_vid = 0;
11319 info.trigger_mp = NULL;
11320 info.flags = flags;
11321
11322 (void) vfs_iterate(VFS_ITERATE_TAIL_FIRST, trigger_unmount_callback, &info);
11323
11324 /*
11325 * Process remaining nested mount (now that its not referenced)
11326 */
11327 if ((info.trigger_vp != NULLVP) &&
11328 (vnode_getwithvid(info.trigger_vp, info.trigger_vid) == 0)) {
11329 vnode_t vp = info.trigger_vp;
11330
11331 if (info.trigger_mp == vp->v_mountedhere) {
11332 (void) vnode_trigger_unresolve(vp, flags, ctx);
11333 }
11334 vnode_put(vp);
11335 }
11336 }
11337
11338 int
vfs_addtrigger(mount_t mp,const char * relpath,struct vnode_trigger_info * vtip,vfs_context_t ctx)11339 vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, vfs_context_t ctx)
11340 {
11341 struct nameidata *ndp;
11342 int res;
11343 vnode_t rvp, vp;
11344 struct vnode_trigger_param vtp;
11345
11346 /*
11347 * Must be called for trigger callback, wherein rwlock is held
11348 */
11349 lck_rw_assert(&mp->mnt_rwlock, LCK_RW_ASSERT_HELD);
11350
11351 TRIG_LOG("Adding trigger at %s\n", relpath);
11352 TRIG_LOG("Trying VFS_ROOT\n");
11353
11354 ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
11355
11356 /*
11357 * We do a lookup starting at the root of the mountpoint, unwilling
11358 * to cross into other mountpoints.
11359 */
11360 res = VFS_ROOT(mp, &rvp, ctx);
11361 if (res != 0) {
11362 goto out;
11363 }
11364
11365 TRIG_LOG("Trying namei\n");
11366
11367 NDINIT(ndp, LOOKUP, OP_LOOKUP, USEDVP | NOCROSSMOUNT | FOLLOW, UIO_SYSSPACE,
11368 CAST_USER_ADDR_T(relpath), ctx);
11369 ndp->ni_dvp = rvp;
11370 res = namei(ndp);
11371 if (res != 0) {
11372 vnode_put(rvp);
11373 goto out;
11374 }
11375
11376 vp = ndp->ni_vp;
11377 nameidone(ndp);
11378 vnode_put(rvp);
11379
11380 TRIG_LOG("Trying vnode_resolver_create()\n");
11381
11382 /*
11383 * Set up blob. vnode_create() takes a larger structure
11384 * with creation info, and we needed something different
11385 * for this case. One needs to win, or we need to munge both;
11386 * vnode_create() wins.
11387 */
11388 bzero(&vtp, sizeof(vtp));
11389 vtp.vnt_resolve_func = vtip->vti_resolve_func;
11390 vtp.vnt_unresolve_func = vtip->vti_unresolve_func;
11391 vtp.vnt_rearm_func = vtip->vti_rearm_func;
11392 vtp.vnt_reclaim_func = vtip->vti_reclaim_func;
11393 vtp.vnt_reclaim_func = vtip->vti_reclaim_func;
11394 vtp.vnt_data = vtip->vti_data;
11395 vtp.vnt_flags = vtip->vti_flags;
11396
11397 res = vnode_resolver_create(mp, vp, &vtp, TRUE);
11398 vnode_put(vp);
11399 out:
11400 kfree_type(struct nameidata, ndp);
11401 TRIG_LOG("Returning %d\n", res);
11402 return res;
11403 }
11404
11405 #endif /* CONFIG_TRIGGERS */
11406
11407 vm_offset_t
kdebug_vnode(vnode_t vp)11408 kdebug_vnode(vnode_t vp)
11409 {
11410 return VM_KERNEL_ADDRPERM(vp);
11411 }
11412
11413 static int flush_cache_on_write = 0;
11414 SYSCTL_INT(_kern, OID_AUTO, flush_cache_on_write,
11415 CTLFLAG_RW | CTLFLAG_LOCKED, &flush_cache_on_write, 0,
11416 "always flush the drive cache on writes to uncached files");
11417
11418 int
vnode_should_flush_after_write(vnode_t vp,int ioflag)11419 vnode_should_flush_after_write(vnode_t vp, int ioflag)
11420 {
11421 return flush_cache_on_write
11422 && (ISSET(ioflag, IO_NOCACHE) || vnode_isnocache(vp));
11423 }
11424
11425 /*
11426 * sysctl for use by disk I/O tracing tools to get the list of existing
11427 * vnodes' paths
11428 */
11429
11430 #define NPATH_WORDS (MAXPATHLEN / sizeof(unsigned long))
11431 struct vnode_trace_paths_context {
11432 uint64_t count;
11433 /*
11434 * Must be a multiple of 4, then -1, for tracing!
11435 */
11436 unsigned long path[NPATH_WORDS + (4 - (NPATH_WORDS % 4)) - 1];
11437 };
11438
11439 static int
vnode_trace_path_callback(struct vnode * vp,void * vctx)11440 vnode_trace_path_callback(struct vnode *vp, void *vctx)
11441 {
11442 struct vnode_trace_paths_context *ctx = vctx;
11443 size_t path_len = sizeof(ctx->path);
11444
11445 int getpath_len = (int)path_len;
11446 if (vn_getpath(vp, (char *)ctx->path, &getpath_len) == 0) {
11447 /* vn_getpath() NUL-terminates, and len includes the NUL. */
11448 assert(getpath_len >= 0);
11449 path_len = (size_t)getpath_len;
11450
11451 assert(path_len <= sizeof(ctx->path));
11452 kdebug_vfs_lookup(ctx->path, (int)path_len, vp,
11453 KDBG_VFS_LOOKUP_FLAG_LOOKUP | KDBG_VFS_LOOKUP_FLAG_NOPROCFILT);
11454
11455 if (++(ctx->count) == 1000) {
11456 thread_yield_to_preemption();
11457 ctx->count = 0;
11458 }
11459 }
11460
11461 return VNODE_RETURNED;
11462 }
11463
11464 static int
vfs_trace_paths_callback(mount_t mp,void * arg)11465 vfs_trace_paths_callback(mount_t mp, void *arg)
11466 {
11467 if (mp->mnt_flag & MNT_LOCAL) {
11468 vnode_iterate(mp, VNODE_ITERATE_ALL, vnode_trace_path_callback, arg);
11469 }
11470
11471 return VFS_RETURNED;
11472 }
11473
11474 static int sysctl_vfs_trace_paths SYSCTL_HANDLER_ARGS {
11475 struct vnode_trace_paths_context ctx;
11476
11477 (void)oidp;
11478 (void)arg1;
11479 (void)arg2;
11480 (void)req;
11481
11482 if (!kauth_cred_issuser(kauth_cred_get())) {
11483 return EPERM;
11484 }
11485
11486 if (!kdebug_enable || !kdebug_debugid_enabled(VFS_LOOKUP)) {
11487 return EINVAL;
11488 }
11489
11490 bzero(&ctx, sizeof(struct vnode_trace_paths_context));
11491
11492 vfs_iterate(0, vfs_trace_paths_callback, &ctx);
11493
11494 return 0;
11495 }
11496
11497 SYSCTL_PROC(_vfs_generic, OID_AUTO, trace_paths, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED, NULL, 0, &sysctl_vfs_trace_paths, "-", "trace_paths");
11498