1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/proc_internal.h>
82 #include <sys/kauth.h>
83 #include <sys/mount_internal.h>
84 #include <sys/time.h>
85 #include <sys/lock.h>
86 #include <sys/vnode.h>
87 #include <sys/vnode_internal.h>
88 #include <sys/stat.h>
89 #include <sys/namei.h>
90 #include <sys/ucred.h>
91 #include <sys/buf_internal.h>
92 #include <sys/errno.h>
93 #include <kern/kalloc.h>
94 #include <sys/uio_internal.h>
95 #include <sys/uio.h>
96 #include <sys/domain.h>
97 #include <sys/mbuf.h>
98 #include <sys/syslog.h>
99 #include <sys/ubc_internal.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/filedesc.h>
103 #include <sys/event.h>
104 #include <sys/kdebug.h>
105 #include <sys/kauth.h>
106 #include <sys/user.h>
107 #include <sys/systm.h>
108 #include <sys/kern_memorystatus.h>
109 #include <sys/lockf.h>
110 #include <sys/reboot.h>
111 #include <miscfs/fifofs/fifo.h>
112
113 #include <nfs/nfs_conf.h>
114
115 #include <string.h>
116 #include <machine/machine_routines.h>
117
118 #include <kern/assert.h>
119 #include <mach/kern_return.h>
120 #include <kern/thread.h>
121 #include <kern/sched_prim.h>
122
123 #include <miscfs/specfs/specdev.h>
124
125 #include <mach/mach_types.h>
126 #include <mach/memory_object_types.h>
127 #include <mach/memory_object_control.h>
128
129 #include <kern/kalloc.h> /* kalloc()/kfree() */
130 #include <kern/clock.h> /* delay_for_interval() */
131 #include <libkern/OSAtomic.h> /* OSAddAtomic() */
132 #include <os/atomic_private.h>
133 #if defined(XNU_TARGET_OS_OSX)
134 #include <console/video_console.h>
135 #endif
136
137 #ifdef CONFIG_IOCOUNT_TRACE
138 #include <libkern/OSDebug.h>
139 #endif
140
141 #include <vm/vm_protos.h> /* vnode_pager_vrele() */
142
143 #if CONFIG_MACF
144 #include <security/mac_framework.h>
145 #endif
146
147 #include <vfs/vfs_disk_conditioner.h>
148 #include <libkern/section_keywords.h>
149
150 static LCK_GRP_DECLARE(vnode_lck_grp, "vnode");
151 static LCK_ATTR_DECLARE(vnode_lck_attr, 0, 0);
152
153 #if CONFIG_TRIGGERS
154 static LCK_GRP_DECLARE(trigger_vnode_lck_grp, "trigger_vnode");
155 static LCK_ATTR_DECLARE(trigger_vnode_lck_attr, 0, 0);
156 #endif
157
158 extern lck_mtx_t mnt_list_mtx_lock;
159
160 ZONE_DEFINE(specinfo_zone, "specinfo",
161 sizeof(struct specinfo), ZC_ZFREE_CLEARMEM);
162
163 ZONE_DEFINE(vnode_zone, "vnodes",
164 sizeof(struct vnode), ZC_NOGC | ZC_ZFREE_CLEARMEM);
165
166 enum vtype iftovt_tab[16] = {
167 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
168 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
169 };
170 int vttoif_tab[9] = {
171 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
172 S_IFSOCK, S_IFIFO, S_IFMT,
173 };
174
175 /* XXX These should be in a BSD accessible Mach header, but aren't. */
176 extern void memory_object_mark_used(
177 memory_object_control_t control);
178
179 extern void memory_object_mark_unused(
180 memory_object_control_t control,
181 boolean_t rage);
182
183 extern void memory_object_mark_io_tracking(
184 memory_object_control_t control);
185
186 /* XXX next protptype should be from <nfs/nfs.h> */
187 extern int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int);
188
189 extern int paniclog_append_noflush(const char *format, ...);
190
191 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
192 __private_extern__ void qsort(
193 void * array,
194 size_t nmembers,
195 size_t member_size,
196 int (*)(const void *, const void *));
197
198 __private_extern__ void vntblinit(void);
199 __private_extern__ int unlink1(vfs_context_t, vnode_t, user_addr_t,
200 enum uio_seg, int);
201
202 static void vnode_list_add(vnode_t);
203 static void vnode_async_list_add(vnode_t);
204 static void vnode_list_remove(vnode_t);
205 static void vnode_list_remove_locked(vnode_t);
206
207 static void vnode_abort_advlocks(vnode_t);
208 static errno_t vnode_drain(vnode_t);
209 static void vgone(vnode_t, int flags);
210 static void vclean(vnode_t vp, int flag);
211 static void vnode_reclaim_internal(vnode_t, int, int, int);
212
213 static void vnode_dropiocount(vnode_t);
214
215 static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev);
216 static int vnode_reload(vnode_t);
217
218 static int unmount_callback(mount_t, __unused void *);
219
220 static void insmntque(vnode_t vp, mount_t mp);
221 static int mount_getvfscnt(void);
222 static int mount_fillfsids(fsid_t *, int );
223 static void vnode_iterate_setup(mount_t);
224 int vnode_umount_preflight(mount_t, vnode_t, int);
225 static int vnode_iterate_prepare(mount_t);
226 static int vnode_iterate_reloadq(mount_t);
227 static void vnode_iterate_clear(mount_t);
228 static mount_t vfs_getvfs_locked(fsid_t *);
229 static int vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp,
230 struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx);
231 static int vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx);
232
233 errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
234
235 #ifdef CONFIG_IOCOUNT_TRACE
236 static void record_vp(vnode_t vp, int count);
237 static TUNABLE(int, bootarg_vnode_iocount_trace, "vnode_iocount_trace", 0);
238 static TUNABLE(int, bootarg_uthread_iocount_trace, "uthread_iocount_trace", 0);
239 #endif /* CONFIG_IOCOUNT_TRACE */
240
241 #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG)
242 static TUNABLE(bool, bootarg_no_vnode_jetsam, "-no_vnode_jetsam", false);
243 #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */
244
245 static TUNABLE(bool, bootarg_no_vnode_drain, "-no_vnode_drain", false);
246
247 boolean_t root_is_CF_drive = FALSE;
248
249 #if CONFIG_TRIGGERS
250 static int vnode_resolver_create(mount_t, vnode_t, struct vnode_trigger_param *, boolean_t external);
251 static void vnode_resolver_detach(vnode_t);
252 #endif
253
254 TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
255 TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */
256 TAILQ_HEAD(async_work_lst, vnode) vnode_async_work_list;
257
258
259 TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */
260 struct timeval rage_tv;
261 int rage_limit = 0;
262 int ragevnodes = 0;
263
264 int deadvnodes_low = 0;
265 int deadvnodes_high = 0;
266
267 uint64_t newvnode = 0;
268 uint64_t newvnode_nodead = 0;
269
270 static int vfs_unmountall_started = 0;
271
272 #define RAGE_LIMIT_MIN 100
273 #define RAGE_TIME_LIMIT 5
274
275 /*
276 * ROSV definitions
277 * NOTE: These are shadowed from PlatformSupport definitions, but XNU
278 * builds standalone.
279 */
280 #define PLATFORM_DATA_VOLUME_MOUNT_POINT "/System/Volumes/Data"
281
282 /*
283 * These could be in PlatformSupport but aren't yet
284 */
285 #define PLATFORM_PREBOOT_VOLUME_MOUNT_POINT "/System/Volumes/Preboot"
286 #define PLATFORM_RECOVERY_VOLUME_MOUNT_POINT "/System/Volumes/Recovery"
287
288 #if CONFIG_MOUNT_VM
289 #define PLATFORM_VM_VOLUME_MOUNT_POINT "/System/Volumes/VM"
290 #endif
291
292 struct mntlist mountlist; /* mounted filesystem list */
293 static int nummounts = 0;
294
295 static int print_busy_vnodes = 0; /* print out busy vnodes */
296
297 #if DIAGNOSTIC
298 #define VLISTCHECK(fun, vp, list) \
299 if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
300 panic("%s: %s vnode not on %slist", (fun), (list), (list));
301 #else
302 #define VLISTCHECK(fun, vp, list)
303 #endif /* DIAGNOSTIC */
304
305 #define VLISTNONE(vp) \
306 do { \
307 (vp)->v_freelist.tqe_next = (struct vnode *)0; \
308 (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \
309 } while(0)
310
311 #define VONLIST(vp) \
312 ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
313
314 /* remove a vnode from free vnode list */
315 #define VREMFREE(fun, vp) \
316 do { \
317 VLISTCHECK((fun), (vp), "free"); \
318 TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \
319 VLISTNONE((vp)); \
320 freevnodes--; \
321 } while(0)
322
323
324 /* remove a vnode from dead vnode list */
325 #define VREMDEAD(fun, vp) \
326 do { \
327 VLISTCHECK((fun), (vp), "dead"); \
328 TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \
329 VLISTNONE((vp)); \
330 vp->v_listflag &= ~VLIST_DEAD; \
331 deadvnodes--; \
332 } while(0)
333
334
335 /* remove a vnode from async work vnode list */
336 #define VREMASYNC_WORK(fun, vp) \
337 do { \
338 VLISTCHECK((fun), (vp), "async_work"); \
339 TAILQ_REMOVE(&vnode_async_work_list, (vp), v_freelist); \
340 VLISTNONE((vp)); \
341 vp->v_listflag &= ~VLIST_ASYNC_WORK; \
342 async_work_vnodes--; \
343 } while(0)
344
345
346 /* remove a vnode from rage vnode list */
347 #define VREMRAGE(fun, vp) \
348 do { \
349 if ( !(vp->v_listflag & VLIST_RAGE)) \
350 panic("VREMRAGE: vp not on rage list"); \
351 VLISTCHECK((fun), (vp), "rage"); \
352 TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \
353 VLISTNONE((vp)); \
354 vp->v_listflag &= ~VLIST_RAGE; \
355 ragevnodes--; \
356 } while(0)
357
358 static void async_work_continue(void);
359 static void vn_laundry_continue(void);
360
361 /*
362 * Initialize the vnode management data structures.
363 */
364 __private_extern__ void
vntblinit(void)365 vntblinit(void)
366 {
367 thread_t thread = THREAD_NULL;
368
369 TAILQ_INIT(&vnode_free_list);
370 TAILQ_INIT(&vnode_rage_list);
371 TAILQ_INIT(&vnode_dead_list);
372 TAILQ_INIT(&vnode_async_work_list);
373 TAILQ_INIT(&mountlist);
374
375 microuptime(&rage_tv);
376 rage_limit = desiredvnodes / 100;
377
378 if (rage_limit < RAGE_LIMIT_MIN) {
379 rage_limit = RAGE_LIMIT_MIN;
380 }
381
382 deadvnodes_low = (desiredvnodes) / 100;
383 if (deadvnodes_low > 300) {
384 deadvnodes_low = 300;
385 }
386 deadvnodes_high = deadvnodes_low * 2;
387
388 /*
389 * create worker threads
390 */
391 kernel_thread_start((thread_continue_t)async_work_continue, NULL, &thread);
392 thread_deallocate(thread);
393 kernel_thread_start((thread_continue_t)vn_laundry_continue, NULL, &thread);
394 thread_deallocate(thread);
395 }
396
397 /* the timeout is in 10 msecs */
398 int
vnode_waitforwrites(vnode_t vp,int output_target,int slpflag,int slptimeout,const char * msg)399 vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg)
400 {
401 int error = 0;
402 struct timespec ts;
403
404 if (output_target < 0) {
405 return EINVAL;
406 }
407
408 KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0);
409
410 if (vp->v_numoutput > output_target) {
411 slpflag |= PDROP;
412
413 vnode_lock_spin(vp);
414
415 while ((vp->v_numoutput > output_target) && error == 0) {
416 if (output_target) {
417 vp->v_flag |= VTHROTTLED;
418 } else {
419 vp->v_flag |= VBWAIT;
420 }
421
422 ts.tv_sec = (slptimeout / 100);
423 ts.tv_nsec = (slptimeout % 1000) * 10 * NSEC_PER_USEC * 1000;
424 error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts);
425
426 vnode_lock_spin(vp);
427 }
428 vnode_unlock(vp);
429 }
430 KERNEL_DEBUG(0x3010280 | DBG_FUNC_END, (int)vp, output_target, vp->v_numoutput, error, 0);
431
432 return error;
433 }
434
435
436 void
vnode_startwrite(vnode_t vp)437 vnode_startwrite(vnode_t vp)
438 {
439 OSAddAtomic(1, &vp->v_numoutput);
440 }
441
442
443 void
vnode_writedone(vnode_t vp)444 vnode_writedone(vnode_t vp)
445 {
446 if (vp) {
447 int need_wakeup = 0;
448
449 OSAddAtomic(-1, &vp->v_numoutput);
450
451 vnode_lock_spin(vp);
452
453 if (vp->v_numoutput < 0) {
454 panic("vnode_writedone: numoutput < 0");
455 }
456
457 if ((vp->v_flag & VTHROTTLED)) {
458 vp->v_flag &= ~VTHROTTLED;
459 need_wakeup = 1;
460 }
461 if ((vp->v_flag & VBWAIT) && (vp->v_numoutput == 0)) {
462 vp->v_flag &= ~VBWAIT;
463 need_wakeup = 1;
464 }
465 vnode_unlock(vp);
466
467 if (need_wakeup) {
468 wakeup((caddr_t)&vp->v_numoutput);
469 }
470 }
471 }
472
473
474
475 int
vnode_hasdirtyblks(vnode_t vp)476 vnode_hasdirtyblks(vnode_t vp)
477 {
478 struct cl_writebehind *wbp;
479
480 /*
481 * Not taking the buf_mtx as there is little
482 * point doing it. Even if the lock is taken the
483 * state can change right after that. If their
484 * needs to be a synchronization, it must be driven
485 * by the caller
486 */
487 if (vp->v_dirtyblkhd.lh_first) {
488 return 1;
489 }
490
491 if (!UBCINFOEXISTS(vp)) {
492 return 0;
493 }
494
495 wbp = vp->v_ubcinfo->cl_wbehind;
496
497 if (wbp && (wbp->cl_number || wbp->cl_scmap)) {
498 return 1;
499 }
500
501 return 0;
502 }
503
504 int
vnode_hascleanblks(vnode_t vp)505 vnode_hascleanblks(vnode_t vp)
506 {
507 /*
508 * Not taking the buf_mtx as there is little
509 * point doing it. Even if the lock is taken the
510 * state can change right after that. If their
511 * needs to be a synchronization, it must be driven
512 * by the caller
513 */
514 if (vp->v_cleanblkhd.lh_first) {
515 return 1;
516 }
517 return 0;
518 }
519
520 void
vnode_iterate_setup(mount_t mp)521 vnode_iterate_setup(mount_t mp)
522 {
523 mp->mnt_lflag |= MNT_LITER;
524 }
525
526 int
vnode_umount_preflight(mount_t mp,vnode_t skipvp,int flags)527 vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags)
528 {
529 vnode_t vp;
530 int ret = 0;
531
532 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
533 if (vp->v_type == VDIR) {
534 continue;
535 }
536 if (vp == skipvp) {
537 continue;
538 }
539 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || (vp->v_flag & VNOFLUSH))) {
540 continue;
541 }
542 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
543 continue;
544 }
545 if ((flags & WRITECLOSE) && (vp->v_writecount == 0 || vp->v_type != VREG)) {
546 continue;
547 }
548
549 /* Look for busy vnode */
550 if ((vp->v_usecount != 0) && ((vp->v_usecount - vp->v_kusecount) != 0)) {
551 ret = 1;
552 if (print_busy_vnodes && ((flags & FORCECLOSE) == 0)) {
553 vprint("vnode_umount_preflight - busy vnode", vp);
554 } else {
555 return ret;
556 }
557 } else if (vp->v_iocount > 0) {
558 /* Busy if iocount is > 0 for more than 3 seconds */
559 tsleep(&vp->v_iocount, PVFS, "vnode_drain_network", 3 * hz);
560 if (vp->v_iocount > 0) {
561 ret = 1;
562 if (print_busy_vnodes && ((flags & FORCECLOSE) == 0)) {
563 vprint("vnode_umount_preflight - busy vnode", vp);
564 } else {
565 return ret;
566 }
567 }
568 continue;
569 }
570 }
571
572 return ret;
573 }
574
575 /*
576 * This routine prepares iteration by moving all the vnodes to worker queue
577 * called with mount lock held
578 */
579 int
vnode_iterate_prepare(mount_t mp)580 vnode_iterate_prepare(mount_t mp)
581 {
582 vnode_t vp;
583
584 if (TAILQ_EMPTY(&mp->mnt_vnodelist)) {
585 /* nothing to do */
586 return 0;
587 }
588
589 vp = TAILQ_FIRST(&mp->mnt_vnodelist);
590 vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first);
591 mp->mnt_workerqueue.tqh_first = mp->mnt_vnodelist.tqh_first;
592 mp->mnt_workerqueue.tqh_last = mp->mnt_vnodelist.tqh_last;
593
594 TAILQ_INIT(&mp->mnt_vnodelist);
595 if (mp->mnt_newvnodes.tqh_first != NULL) {
596 panic("vnode_iterate_prepare: newvnode when entering vnode");
597 }
598 TAILQ_INIT(&mp->mnt_newvnodes);
599
600 return 1;
601 }
602
603
604 /* called with mount lock held */
605 int
vnode_iterate_reloadq(mount_t mp)606 vnode_iterate_reloadq(mount_t mp)
607 {
608 int moved = 0;
609
610 /* add the remaining entries in workerq to the end of mount vnode list */
611 if (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
612 struct vnode * mvp;
613 mvp = TAILQ_LAST(&mp->mnt_vnodelist, vnodelst);
614
615 /* Joining the workerque entities to mount vnode list */
616 if (mvp) {
617 mvp->v_mntvnodes.tqe_next = mp->mnt_workerqueue.tqh_first;
618 } else {
619 mp->mnt_vnodelist.tqh_first = mp->mnt_workerqueue.tqh_first;
620 }
621 mp->mnt_workerqueue.tqh_first->v_mntvnodes.tqe_prev = mp->mnt_vnodelist.tqh_last;
622 mp->mnt_vnodelist.tqh_last = mp->mnt_workerqueue.tqh_last;
623 TAILQ_INIT(&mp->mnt_workerqueue);
624 }
625
626 /* add the newvnodes to the head of mount vnode list */
627 if (!TAILQ_EMPTY(&mp->mnt_newvnodes)) {
628 struct vnode * nlvp;
629 nlvp = TAILQ_LAST(&mp->mnt_newvnodes, vnodelst);
630
631 mp->mnt_newvnodes.tqh_first->v_mntvnodes.tqe_prev = &mp->mnt_vnodelist.tqh_first;
632 nlvp->v_mntvnodes.tqe_next = mp->mnt_vnodelist.tqh_first;
633 if (mp->mnt_vnodelist.tqh_first) {
634 mp->mnt_vnodelist.tqh_first->v_mntvnodes.tqe_prev = &nlvp->v_mntvnodes.tqe_next;
635 } else {
636 mp->mnt_vnodelist.tqh_last = mp->mnt_newvnodes.tqh_last;
637 }
638 mp->mnt_vnodelist.tqh_first = mp->mnt_newvnodes.tqh_first;
639 TAILQ_INIT(&mp->mnt_newvnodes);
640 moved = 1;
641 }
642
643 return moved;
644 }
645
646
647 void
vnode_iterate_clear(mount_t mp)648 vnode_iterate_clear(mount_t mp)
649 {
650 mp->mnt_lflag &= ~MNT_LITER;
651 }
652
653 #if defined(__x86_64__)
654
655 #include <i386/panic_hooks.h>
656
657 struct vnode_iterate_panic_hook {
658 panic_hook_t hook;
659 mount_t mp;
660 struct vnode *vp;
661 };
662
663 static void
vnode_iterate_panic_hook(panic_hook_t * hook_)664 vnode_iterate_panic_hook(panic_hook_t *hook_)
665 {
666 struct vnode_iterate_panic_hook *hook = (struct vnode_iterate_panic_hook *)hook_;
667 panic_phys_range_t range;
668 uint64_t phys;
669
670 if (panic_phys_range_before(hook->mp, &phys, &range)) {
671 paniclog_append_noflush("mp = %p, phys = %p, prev (%p: %p-%p)\n",
672 hook->mp, phys, range.type, range.phys_start,
673 range.phys_start + range.len);
674 } else {
675 paniclog_append_noflush("mp = %p, phys = %p, prev (!)\n", hook->mp, phys);
676 }
677
678 if (panic_phys_range_before(hook->vp, &phys, &range)) {
679 paniclog_append_noflush("vp = %p, phys = %p, prev (%p: %p-%p)\n",
680 hook->vp, phys, range.type, range.phys_start,
681 range.phys_start + range.len);
682 } else {
683 paniclog_append_noflush("vp = %p, phys = %p, prev (!)\n", hook->vp, phys);
684 }
685 panic_dump_mem((void *)(((vm_offset_t)hook->mp - 4096) & ~4095), 12288);
686 }
687 #endif /* defined(__x86_64__) */
688
689 int
vnode_iterate(mount_t mp,int flags,int (* callout)(struct vnode *,void *),void * arg)690 vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *),
691 void *arg)
692 {
693 struct vnode *vp;
694 int vid, retval;
695 int ret = 0;
696
697 /*
698 * The mount iterate mutex is held for the duration of the iteration.
699 * This can be done by a state flag on the mount structure but we can
700 * run into priority inversion issues sometimes.
701 * Using a mutex allows us to benefit from the priority donation
702 * mechanisms in the kernel for locks. This mutex should never be
703 * acquired in spin mode and it should be acquired before attempting to
704 * acquire the mount lock.
705 */
706 mount_iterate_lock(mp);
707
708 mount_lock(mp);
709
710 vnode_iterate_setup(mp);
711
712 /* If it returns 0 then there is nothing to do */
713 retval = vnode_iterate_prepare(mp);
714
715 if (retval == 0) {
716 vnode_iterate_clear(mp);
717 mount_unlock(mp);
718 mount_iterate_unlock(mp);
719 return ret;
720 }
721
722 #if defined(__x86_64__)
723 struct vnode_iterate_panic_hook hook;
724 hook.mp = mp;
725 hook.vp = NULL;
726 panic_hook(&hook.hook, vnode_iterate_panic_hook);
727 #endif
728 /* iterate over all the vnodes */
729 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
730 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
731 #if defined(__x86_64__)
732 hook.vp = vp;
733 #endif
734 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
735 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
736 vid = vp->v_id;
737 if ((vp->v_data == NULL) || (vp->v_type == VNON) || (vp->v_mount != mp)) {
738 continue;
739 }
740 mount_unlock(mp);
741
742 if (vget_internal(vp, vid, (flags | VNODE_NODEAD | VNODE_WITHID | VNODE_NOSUSPEND))) {
743 mount_lock(mp);
744 continue;
745 }
746 if (flags & VNODE_RELOAD) {
747 /*
748 * we're reloading the filesystem
749 * cast out any inactive vnodes...
750 */
751 if (vnode_reload(vp)) {
752 /* vnode will be recycled on the refcount drop */
753 vnode_put(vp);
754 mount_lock(mp);
755 continue;
756 }
757 }
758
759 retval = callout(vp, arg);
760
761 switch (retval) {
762 case VNODE_RETURNED:
763 case VNODE_RETURNED_DONE:
764 vnode_put(vp);
765 if (retval == VNODE_RETURNED_DONE) {
766 mount_lock(mp);
767 ret = 0;
768 goto out;
769 }
770 break;
771
772 case VNODE_CLAIMED_DONE:
773 mount_lock(mp);
774 ret = 0;
775 goto out;
776 case VNODE_CLAIMED:
777 default:
778 break;
779 }
780 mount_lock(mp);
781 }
782
783 out:
784 #if defined(__x86_64__)
785 panic_unhook(&hook.hook);
786 #endif
787 (void)vnode_iterate_reloadq(mp);
788 vnode_iterate_clear(mp);
789 mount_unlock(mp);
790 mount_iterate_unlock(mp);
791 return ret;
792 }
793
794 void
mount_lock_renames(mount_t mp)795 mount_lock_renames(mount_t mp)
796 {
797 lck_mtx_lock(&mp->mnt_renamelock);
798 }
799
800 void
mount_unlock_renames(mount_t mp)801 mount_unlock_renames(mount_t mp)
802 {
803 lck_mtx_unlock(&mp->mnt_renamelock);
804 }
805
806 void
mount_iterate_lock(mount_t mp)807 mount_iterate_lock(mount_t mp)
808 {
809 lck_mtx_lock(&mp->mnt_iter_lock);
810 }
811
812 void
mount_iterate_unlock(mount_t mp)813 mount_iterate_unlock(mount_t mp)
814 {
815 lck_mtx_unlock(&mp->mnt_iter_lock);
816 }
817
818 void
mount_lock(mount_t mp)819 mount_lock(mount_t mp)
820 {
821 lck_mtx_lock(&mp->mnt_mlock);
822 }
823
824 void
mount_lock_spin(mount_t mp)825 mount_lock_spin(mount_t mp)
826 {
827 lck_mtx_lock_spin(&mp->mnt_mlock);
828 }
829
830 void
mount_unlock(mount_t mp)831 mount_unlock(mount_t mp)
832 {
833 lck_mtx_unlock(&mp->mnt_mlock);
834 }
835
836
837 void
mount_ref(mount_t mp,int locked)838 mount_ref(mount_t mp, int locked)
839 {
840 if (!locked) {
841 mount_lock_spin(mp);
842 }
843
844 mp->mnt_count++;
845
846 if (!locked) {
847 mount_unlock(mp);
848 }
849 }
850
851
852 void
mount_drop(mount_t mp,int locked)853 mount_drop(mount_t mp, int locked)
854 {
855 if (!locked) {
856 mount_lock_spin(mp);
857 }
858
859 mp->mnt_count--;
860
861 if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN)) {
862 wakeup(&mp->mnt_lflag);
863 }
864
865 if (!locked) {
866 mount_unlock(mp);
867 }
868 }
869
870
871 int
mount_iterref(mount_t mp,int locked)872 mount_iterref(mount_t mp, int locked)
873 {
874 int retval = 0;
875
876 if (!locked) {
877 mount_list_lock();
878 }
879 if (mp->mnt_iterref < 0) {
880 retval = 1;
881 } else {
882 mp->mnt_iterref++;
883 }
884 if (!locked) {
885 mount_list_unlock();
886 }
887 return retval;
888 }
889
890 int
mount_isdrained(mount_t mp,int locked)891 mount_isdrained(mount_t mp, int locked)
892 {
893 int retval;
894
895 if (!locked) {
896 mount_list_lock();
897 }
898 if (mp->mnt_iterref < 0) {
899 retval = 1;
900 } else {
901 retval = 0;
902 }
903 if (!locked) {
904 mount_list_unlock();
905 }
906 return retval;
907 }
908
909 void
mount_iterdrop(mount_t mp)910 mount_iterdrop(mount_t mp)
911 {
912 mount_list_lock();
913 mp->mnt_iterref--;
914 wakeup(&mp->mnt_iterref);
915 mount_list_unlock();
916 }
917
918 void
mount_iterdrain(mount_t mp)919 mount_iterdrain(mount_t mp)
920 {
921 mount_list_lock();
922 while (mp->mnt_iterref) {
923 msleep((caddr_t)&mp->mnt_iterref, &mnt_list_mtx_lock, PVFS, "mount_iterdrain", NULL);
924 }
925 /* mount iterations drained */
926 mp->mnt_iterref = -1;
927 mount_list_unlock();
928 }
929 void
mount_iterreset(mount_t mp)930 mount_iterreset(mount_t mp)
931 {
932 mount_list_lock();
933 if (mp->mnt_iterref == -1) {
934 mp->mnt_iterref = 0;
935 }
936 mount_list_unlock();
937 }
938
939 /* always called with mount lock held */
940 int
mount_refdrain(mount_t mp)941 mount_refdrain(mount_t mp)
942 {
943 if (mp->mnt_lflag & MNT_LDRAIN) {
944 panic("already in drain");
945 }
946 mp->mnt_lflag |= MNT_LDRAIN;
947
948 while (mp->mnt_count) {
949 msleep((caddr_t)&mp->mnt_lflag, &mp->mnt_mlock, PVFS, "mount_drain", NULL);
950 }
951
952 if (mp->mnt_vnodelist.tqh_first != NULL) {
953 panic("mount_refdrain: dangling vnode");
954 }
955
956 mp->mnt_lflag &= ~MNT_LDRAIN;
957
958 return 0;
959 }
960
961 /* Tags the mount point as not supportine extended readdir for NFS exports */
962 void
mount_set_noreaddirext(mount_t mp)963 mount_set_noreaddirext(mount_t mp)
964 {
965 mount_lock(mp);
966 mp->mnt_kern_flag |= MNTK_DENY_READDIREXT;
967 mount_unlock(mp);
968 }
969
970 /*
971 * Mark a mount point as busy. Used to synchronize access and to delay
972 * unmounting.
973 */
974 int
vfs_busy(mount_t mp,int flags)975 vfs_busy(mount_t mp, int flags)
976 {
977 restart:
978 if (mp->mnt_lflag & MNT_LDEAD) {
979 return ENOENT;
980 }
981
982 mount_lock(mp);
983
984 if (mp->mnt_lflag & MNT_LUNMOUNT) {
985 if (flags & LK_NOWAIT || mp->mnt_lflag & MNT_LDEAD) {
986 mount_unlock(mp);
987 return ENOENT;
988 }
989
990 /*
991 * Since all busy locks are shared except the exclusive
992 * lock granted when unmounting, the only place that a
993 * wakeup needs to be done is at the release of the
994 * exclusive lock at the end of dounmount.
995 */
996 mp->mnt_lflag |= MNT_LWAIT;
997 msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL);
998 return ENOENT;
999 }
1000
1001 mount_unlock(mp);
1002
1003 lck_rw_lock_shared(&mp->mnt_rwlock);
1004
1005 /*
1006 * Until we are granted the rwlock, it's possible for the mount point to
1007 * change state, so re-evaluate before granting the vfs_busy.
1008 */
1009 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
1010 lck_rw_done(&mp->mnt_rwlock);
1011 goto restart;
1012 }
1013 return 0;
1014 }
1015
1016 /*
1017 * Free a busy filesystem.
1018 */
1019 void
vfs_unbusy(mount_t mp)1020 vfs_unbusy(mount_t mp)
1021 {
1022 lck_rw_done(&mp->mnt_rwlock);
1023 }
1024
1025
1026
1027 static void
vfs_rootmountfailed(mount_t mp)1028 vfs_rootmountfailed(mount_t mp)
1029 {
1030 mount_list_lock();
1031 mp->mnt_vtable->vfc_refcount--;
1032 mount_list_unlock();
1033
1034 vfs_unbusy(mp);
1035
1036 mount_lock_destroy(mp);
1037
1038 #if CONFIG_MACF
1039 mac_mount_label_destroy(mp);
1040 #endif
1041
1042 zfree(mount_zone, mp);
1043 }
1044
1045 /*
1046 * Lookup a filesystem type, and if found allocate and initialize
1047 * a mount structure for it.
1048 *
1049 * Devname is usually updated by mount(8) after booting.
1050 */
1051 static mount_t
vfs_rootmountalloc_internal(struct vfstable * vfsp,const char * devname)1052 vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname)
1053 {
1054 mount_t mp;
1055
1056 mp = zalloc_flags(mount_zone, Z_WAITOK | Z_ZERO);
1057 /* Initialize the default IO constraints */
1058 mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
1059 mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
1060 mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
1061 mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
1062 mp->mnt_devblocksize = DEV_BSIZE;
1063 mp->mnt_alignmentmask = PAGE_MASK;
1064 mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
1065 mp->mnt_ioscale = 1;
1066 mp->mnt_ioflags = 0;
1067 mp->mnt_realrootvp = NULLVP;
1068 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
1069 mp->mnt_throttle_mask = LOWPRI_MAX_NUM_DEV - 1;
1070 mp->mnt_devbsdunit = 0;
1071
1072 mount_lock_init(mp);
1073 (void)vfs_busy(mp, LK_NOWAIT);
1074
1075 TAILQ_INIT(&mp->mnt_vnodelist);
1076 TAILQ_INIT(&mp->mnt_workerqueue);
1077 TAILQ_INIT(&mp->mnt_newvnodes);
1078
1079 mp->mnt_vtable = vfsp;
1080 mp->mnt_op = vfsp->vfc_vfsops;
1081 mp->mnt_flag = MNT_RDONLY | MNT_ROOTFS;
1082 mp->mnt_vnodecovered = NULLVP;
1083 //mp->mnt_stat.f_type = vfsp->vfc_typenum;
1084 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
1085
1086 mount_list_lock();
1087 vfsp->vfc_refcount++;
1088 mount_list_unlock();
1089
1090 strlcpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
1091 mp->mnt_vfsstat.f_mntonname[0] = '/';
1092 /* XXX const poisoning layering violation */
1093 (void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL);
1094
1095 #if CONFIG_MACF
1096 mac_mount_label_init(mp);
1097 mac_mount_label_associate(vfs_context_kernel(), mp);
1098 #endif
1099 return mp;
1100 }
1101
1102 errno_t
vfs_rootmountalloc(const char * fstypename,const char * devname,mount_t * mpp)1103 vfs_rootmountalloc(const char *fstypename, const char *devname, mount_t *mpp)
1104 {
1105 struct vfstable *vfsp;
1106
1107 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1108 if (!strncmp(vfsp->vfc_name, fstypename,
1109 sizeof(vfsp->vfc_name))) {
1110 break;
1111 }
1112 }
1113 if (vfsp == NULL) {
1114 return ENODEV;
1115 }
1116
1117 *mpp = vfs_rootmountalloc_internal(vfsp, devname);
1118
1119 if (*mpp) {
1120 return 0;
1121 }
1122
1123 return ENOMEM;
1124 }
1125
1126 #define DBG_MOUNTROOT (FSDBG_CODE(DBG_MOUNT, 0))
1127
1128 /*
1129 * Find an appropriate filesystem to use for the root. If a filesystem
1130 * has not been preselected, walk through the list of known filesystems
1131 * trying those that have mountroot routines, and try them until one
1132 * works or we have tried them all.
1133 */
1134 extern int (*mountroot)(void);
1135
1136 int
vfs_mountroot(void)1137 vfs_mountroot(void)
1138 {
1139 #if CONFIG_MACF
1140 struct vnode *vp;
1141 #endif
1142 struct vfstable *vfsp;
1143 vfs_context_t ctx = vfs_context_kernel();
1144 struct vfs_attr vfsattr;
1145 int error;
1146 mount_t mp;
1147 vnode_t bdevvp_rootvp;
1148
1149 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_START);
1150 if (mountroot != NULL) {
1151 /*
1152 * used for netboot which follows a different set of rules
1153 */
1154 error = (*mountroot)();
1155
1156 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 0);
1157 return error;
1158 }
1159 if ((error = bdevvp(rootdev, &rootvp))) {
1160 printf("vfs_mountroot: can't setup bdevvp\n");
1161
1162 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 1);
1163 return error;
1164 }
1165 /*
1166 * 4951998 - code we call in vfc_mountroot may replace rootvp
1167 * so keep a local copy for some house keeping.
1168 */
1169 bdevvp_rootvp = rootvp;
1170
1171 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1172 if (vfsp->vfc_mountroot == NULL
1173 && !ISSET(vfsp->vfc_vfsflags, VFC_VFSCANMOUNTROOT)) {
1174 continue;
1175 }
1176
1177 mp = vfs_rootmountalloc_internal(vfsp, "root_device");
1178 mp->mnt_devvp = rootvp;
1179
1180 if (vfsp->vfc_mountroot) {
1181 error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx);
1182 } else {
1183 error = VFS_MOUNT(mp, rootvp, 0, ctx);
1184 }
1185
1186 if (!error) {
1187 if (bdevvp_rootvp != rootvp) {
1188 /*
1189 * rootvp changed...
1190 * bump the iocount and fix up mnt_devvp for the
1191 * new rootvp (it will already have a usecount taken)...
1192 * drop the iocount and the usecount on the orignal
1193 * since we are no longer going to use it...
1194 */
1195 vnode_getwithref(rootvp);
1196 mp->mnt_devvp = rootvp;
1197
1198 vnode_rele(bdevvp_rootvp);
1199 vnode_put(bdevvp_rootvp);
1200 }
1201 mp->mnt_devvp->v_specflags |= SI_MOUNTEDON;
1202
1203 vfs_unbusy(mp);
1204
1205 mount_list_add(mp);
1206
1207 /*
1208 * cache the IO attributes for the underlying physical media...
1209 * an error return indicates the underlying driver doesn't
1210 * support all the queries necessary... however, reasonable
1211 * defaults will have been set, so no reason to bail or care
1212 */
1213 vfs_init_io_attributes(rootvp, mp);
1214
1215 if (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) {
1216 root_is_CF_drive = TRUE;
1217 }
1218
1219 /*
1220 * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
1221 */
1222 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR) {
1223 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1224 }
1225 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSPREFLIGHT) {
1226 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
1227 }
1228
1229 #if defined(XNU_TARGET_OS_OSX)
1230 uint32_t speed;
1231
1232 if (MNTK_VIRTUALDEV & mp->mnt_kern_flag) {
1233 speed = 128;
1234 } else if (disk_conditioner_mount_is_ssd(mp)) {
1235 speed = 7 * 256;
1236 } else {
1237 speed = 256;
1238 }
1239 vc_progress_setdiskspeed(speed);
1240 #endif /* XNU_TARGET_OS_OSX */
1241 /*
1242 * Probe root file system for additional features.
1243 */
1244 (void)VFS_START(mp, 0, ctx);
1245
1246 VFSATTR_INIT(&vfsattr);
1247 VFSATTR_WANTED(&vfsattr, f_capabilities);
1248 if (vfs_getattr(mp, &vfsattr, ctx) == 0 &&
1249 VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) {
1250 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) &&
1251 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) {
1252 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1253 }
1254 #if NAMEDSTREAMS
1255 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS) &&
1256 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS)) {
1257 mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1258 }
1259 #endif
1260 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID) &&
1261 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID)) {
1262 mp->mnt_kern_flag |= MNTK_PATH_FROM_ID;
1263 }
1264
1265 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS) &&
1266 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS)) {
1267 mp->mnt_kern_flag |= MNTK_DIR_HARDLINKS;
1268 }
1269 }
1270
1271 /*
1272 * get rid of iocount reference returned
1273 * by bdevvp (or picked up by us on the substitued
1274 * rootvp)... it (or we) will have also taken
1275 * a usecount reference which we want to keep
1276 */
1277 vnode_put(rootvp);
1278
1279 #if CONFIG_MACF
1280 if ((vfs_flags(mp) & MNT_MULTILABEL) == 0) {
1281 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 2);
1282 return 0;
1283 }
1284
1285 error = VFS_ROOT(mp, &vp, ctx);
1286 if (error) {
1287 printf("%s() VFS_ROOT() returned %d\n",
1288 __func__, error);
1289 dounmount(mp, MNT_FORCE, 0, ctx);
1290 goto fail;
1291 }
1292 error = vnode_label(mp, NULL, vp, NULL, 0, ctx);
1293 /*
1294 * get rid of reference provided by VFS_ROOT
1295 */
1296 vnode_put(vp);
1297
1298 if (error) {
1299 printf("%s() vnode_label() returned %d\n",
1300 __func__, error);
1301 dounmount(mp, MNT_FORCE, 0, ctx);
1302 goto fail;
1303 }
1304 #endif
1305 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 3);
1306 return 0;
1307 }
1308 vfs_rootmountfailed(mp);
1309 #if CONFIG_MACF
1310 fail:
1311 #endif
1312 if (error != EINVAL) {
1313 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
1314 }
1315 }
1316 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error ? error : ENODEV, 4);
1317 return ENODEV;
1318 }
1319
1320 static int
cache_purge_callback(mount_t mp,__unused void * arg)1321 cache_purge_callback(mount_t mp, __unused void * arg)
1322 {
1323 cache_purgevfs(mp);
1324 return VFS_RETURNED;
1325 }
1326
1327 extern lck_rw_t rootvnode_rw_lock;
1328 extern void set_rootvnode(vnode_t);
1329
1330
1331 static int
mntonname_fixup_callback(mount_t mp,__unused void * arg)1332 mntonname_fixup_callback(mount_t mp, __unused void *arg)
1333 {
1334 int error = 0;
1335
1336 if ((strncmp(&mp->mnt_vfsstat.f_mntonname[0], "/", sizeof("/")) == 0) ||
1337 (strncmp(&mp->mnt_vfsstat.f_mntonname[0], "/dev", sizeof("/dev")) == 0)) {
1338 return 0;
1339 }
1340
1341 if ((error = vfs_busy(mp, LK_NOWAIT))) {
1342 printf("vfs_busy failed with %d for %s\n", error, mp->mnt_vfsstat.f_mntonname);
1343 return -1;
1344 }
1345
1346 int pathlen = MAXPATHLEN;
1347 if ((error = vn_getpath_ext(mp->mnt_vnodecovered, NULL, mp->mnt_vfsstat.f_mntonname, &pathlen, VN_GETPATH_FSENTER))) {
1348 printf("vn_getpath_ext failed with %d for mnt_vnodecovered of %s\n", error, mp->mnt_vfsstat.f_mntonname);
1349 }
1350
1351 vfs_unbusy(mp);
1352
1353 return error;
1354 }
1355
1356 static int
clear_mntk_backs_root_callback(mount_t mp,__unused void * arg)1357 clear_mntk_backs_root_callback(mount_t mp, __unused void *arg)
1358 {
1359 lck_rw_lock_exclusive(&mp->mnt_rwlock);
1360 mp->mnt_kern_flag &= ~MNTK_BACKS_ROOT;
1361 lck_rw_done(&mp->mnt_rwlock);
1362 return VFS_RETURNED;
1363 }
1364
1365 static int
verify_incoming_rootfs(vnode_t * incoming_rootvnodep,vfs_context_t ctx,vfs_switch_root_flags_t flags)1366 verify_incoming_rootfs(vnode_t *incoming_rootvnodep, vfs_context_t ctx,
1367 vfs_switch_root_flags_t flags)
1368 {
1369 mount_t mp;
1370 vnode_t tdp;
1371 vnode_t incoming_rootvnode_with_iocount = *incoming_rootvnodep;
1372 vnode_t incoming_rootvnode_with_usecount = NULLVP;
1373 int error = 0;
1374
1375 if (vnode_vtype(incoming_rootvnode_with_iocount) != VDIR) {
1376 printf("Incoming rootfs path not a directory\n");
1377 error = ENOTDIR;
1378 goto done;
1379 }
1380
1381 /*
1382 * Before we call VFS_ROOT, we have to let go of the iocount already
1383 * acquired, but before doing that get a usecount.
1384 */
1385 vnode_ref_ext(incoming_rootvnode_with_iocount, 0, VNODE_REF_FORCE);
1386 incoming_rootvnode_with_usecount = incoming_rootvnode_with_iocount;
1387 vnode_lock_spin(incoming_rootvnode_with_usecount);
1388 if ((mp = incoming_rootvnode_with_usecount->v_mount)) {
1389 mp->mnt_crossref++;
1390 vnode_unlock(incoming_rootvnode_with_usecount);
1391 } else {
1392 vnode_unlock(incoming_rootvnode_with_usecount);
1393 printf("Incoming rootfs root vnode does not have associated mount\n");
1394 error = ENOTDIR;
1395 goto done;
1396 }
1397
1398 if (vfs_busy(mp, LK_NOWAIT)) {
1399 printf("Incoming rootfs root vnode mount is busy\n");
1400 error = ENOENT;
1401 goto out;
1402 }
1403
1404 vnode_put(incoming_rootvnode_with_iocount);
1405 incoming_rootvnode_with_iocount = NULLVP;
1406
1407 error = VFS_ROOT(mp, &tdp, ctx);
1408
1409 if (error) {
1410 printf("Could not get rootvnode of incoming rootfs\n");
1411 } else if (tdp != incoming_rootvnode_with_usecount) {
1412 vnode_put(tdp);
1413 tdp = NULLVP;
1414 printf("Incoming rootfs root vnode mount is is not a mountpoint\n");
1415 error = EINVAL;
1416 goto out_busy;
1417 } else {
1418 incoming_rootvnode_with_iocount = tdp;
1419 tdp = NULLVP;
1420 }
1421
1422 if ((flags & VFSSR_VIRTUALDEV_PROHIBITED) != 0) {
1423 if (mp->mnt_flag & MNTK_VIRTUALDEV) {
1424 error = ENODEV;
1425 }
1426 if (error) {
1427 printf("Incoming rootfs is backed by a virtual device; cannot switch to it");
1428 goto out_busy;
1429 }
1430 }
1431
1432 out_busy:
1433 vfs_unbusy(mp);
1434
1435 out:
1436 vnode_lock(incoming_rootvnode_with_usecount);
1437 mp->mnt_crossref--;
1438 if (mp->mnt_crossref < 0) {
1439 panic("mount cross refs -ve");
1440 }
1441 vnode_unlock(incoming_rootvnode_with_usecount);
1442
1443 done:
1444 if (incoming_rootvnode_with_usecount) {
1445 vnode_rele(incoming_rootvnode_with_usecount);
1446 incoming_rootvnode_with_usecount = NULLVP;
1447 }
1448
1449 if (error && incoming_rootvnode_with_iocount) {
1450 vnode_put(incoming_rootvnode_with_iocount);
1451 incoming_rootvnode_with_iocount = NULLVP;
1452 }
1453
1454 *incoming_rootvnodep = incoming_rootvnode_with_iocount;
1455 return error;
1456 }
1457
1458 /*
1459 * vfs_switch_root()
1460 *
1461 * Move the current root volume, and put a different volume at the root.
1462 *
1463 * incoming_vol_old_path: This is the path where the incoming root volume
1464 * is mounted when this function begins.
1465 * outgoing_vol_new_path: This is the path where the outgoing root volume
1466 * will be mounted when this function (successfully) ends.
1467 * Note: Do not use a leading slash.
1468 *
1469 * Volumes mounted at several fixed points (including /dev) will be preserved
1470 * at the same absolute path. That means they will move within the folder
1471 * hierarchy during the pivot operation. For example, /dev before the pivot
1472 * will be at /dev after the pivot.
1473 *
1474 * If any filesystem has MNTK_BACKS_ROOT set, it will be cleared. If the
1475 * incoming root volume is actually a disk image backed by some other
1476 * filesystem, it is the caller's responsibility to re-set MNTK_BACKS_ROOT
1477 * as appropriate.
1478 */
1479 int
vfs_switch_root(const char * incoming_vol_old_path,const char * outgoing_vol_new_path,vfs_switch_root_flags_t flags)1480 vfs_switch_root(const char *incoming_vol_old_path,
1481 const char *outgoing_vol_new_path,
1482 vfs_switch_root_flags_t flags)
1483 {
1484 // grumble grumble
1485 #define countof(x) (sizeof(x) / sizeof(x[0]))
1486
1487 struct preserved_mount {
1488 vnode_t pm_rootvnode;
1489 mount_t pm_mount;
1490 vnode_t pm_new_covered_vp;
1491 vnode_t pm_old_covered_vp;
1492 const char *pm_path;
1493 };
1494
1495 vfs_context_t ctx = vfs_context_kernel();
1496 vnode_t incoming_rootvnode = NULLVP;
1497 vnode_t outgoing_vol_new_covered_vp = NULLVP;
1498 vnode_t incoming_vol_old_covered_vp = NULLVP;
1499 mount_t outgoing = NULL;
1500 mount_t incoming = NULL;
1501
1502 struct preserved_mount devfs = { NULLVP, NULL, NULLVP, NULLVP, "dev" };
1503 struct preserved_mount preboot = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Preboot" };
1504 struct preserved_mount recovery = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Recovery" };
1505 struct preserved_mount vm = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/VM" };
1506 struct preserved_mount update = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Update" };
1507 struct preserved_mount iscPreboot = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/iSCPreboot" };
1508 struct preserved_mount hardware = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Hardware" };
1509 struct preserved_mount xarts = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/xarts" };
1510 struct preserved_mount factorylogs = { NULLVP, NULL, NULLVP, NULLVP, "FactoryLogs" };
1511 struct preserved_mount idiags = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Diags" };
1512
1513 struct preserved_mount *preserved[10];
1514 preserved[0] = &devfs;
1515 preserved[1] = &preboot;
1516 preserved[2] = &recovery;
1517 preserved[3] = &vm;
1518 preserved[4] = &update;
1519 preserved[5] = &iscPreboot;
1520 preserved[6] = &hardware;
1521 preserved[7] = &xarts;
1522 preserved[8] = &factorylogs;
1523 preserved[9] = &idiags;
1524
1525 int error;
1526
1527 printf("%s : shuffling mount points : %s <-> / <-> %s\n", __FUNCTION__, incoming_vol_old_path, outgoing_vol_new_path);
1528
1529 if (outgoing_vol_new_path[0] == '/') {
1530 // I should have written this to be more helpful and just advance the pointer forward past the slash
1531 printf("Do not use a leading slash in outgoing_vol_new_path\n");
1532 return EINVAL;
1533 }
1534
1535 // Set incoming_rootvnode.
1536 // Find the vnode representing the mountpoint of the new root
1537 // filesystem. That will be the new root directory.
1538 error = vnode_lookup(incoming_vol_old_path, 0, &incoming_rootvnode, ctx);
1539 if (error) {
1540 printf("Incoming rootfs root vnode not found\n");
1541 error = ENOENT;
1542 goto done;
1543 }
1544
1545 /*
1546 * This function drops the icoount and sets the vnode to NULL on error.
1547 */
1548 error = verify_incoming_rootfs(&incoming_rootvnode, ctx, flags);
1549 if (error) {
1550 goto done;
1551 }
1552
1553 /*
1554 * Set outgoing_vol_new_covered_vp.
1555 * Find the vnode representing the future mountpoint of the old
1556 * root filesystem, inside the directory incoming_rootvnode.
1557 * Right now it's at "/incoming_vol_old_path/outgoing_vol_new_path".
1558 * soon it will become "/oldrootfs_path_after", which will be covered.
1559 */
1560 error = vnode_lookupat(outgoing_vol_new_path, 0, &outgoing_vol_new_covered_vp, ctx, incoming_rootvnode);
1561 if (error) {
1562 printf("Outgoing rootfs path not found, abandoning / switch, error = %d\n", error);
1563 error = ENOENT;
1564 goto done;
1565 }
1566 if (vnode_vtype(outgoing_vol_new_covered_vp) != VDIR) {
1567 printf("Outgoing rootfs path is not a directory, abandoning / switch\n");
1568 error = ENOTDIR;
1569 goto done;
1570 }
1571
1572 /*
1573 * Find the preserved mounts - see if they are mounted. Get their root
1574 * vnode if they are. If they aren't, leave rootvnode NULL which will
1575 * be the signal to ignore this mount later on.
1576 *
1577 * Also get preserved mounts' new_covered_vp.
1578 * Find the node representing the folder "dev" inside the directory newrootvnode.
1579 * Right now it's at "/incoming_vol_old_path/dev".
1580 * Soon it will become /dev, which will be covered by the devfs mountpoint.
1581 */
1582 for (size_t i = 0; i < countof(preserved); i++) {
1583 struct preserved_mount *pmi = preserved[i];
1584
1585 error = vnode_lookupat(pmi->pm_path, 0, &pmi->pm_rootvnode, ctx, rootvnode);
1586 if (error) {
1587 printf("skipping preserved mountpoint because not found or error: %d: %s\n", error, pmi->pm_path);
1588 // not fatal. try the next one in the list.
1589 continue;
1590 }
1591 bool is_mountpoint = false;
1592 vnode_lock_spin(pmi->pm_rootvnode);
1593 if ((pmi->pm_rootvnode->v_flag & VROOT) != 0) {
1594 is_mountpoint = true;
1595 }
1596 vnode_unlock(pmi->pm_rootvnode);
1597 if (!is_mountpoint) {
1598 printf("skipping preserved mountpoint because not a mountpoint: %s\n", pmi->pm_path);
1599 vnode_put(pmi->pm_rootvnode);
1600 pmi->pm_rootvnode = NULLVP;
1601 // not fatal. try the next one in the list.
1602 continue;
1603 }
1604
1605 error = vnode_lookupat(pmi->pm_path, 0, &pmi->pm_new_covered_vp, ctx, incoming_rootvnode);
1606 if (error) {
1607 printf("preserved new mount directory not found or error: %d: %s\n", error, pmi->pm_path);
1608 error = ENOENT;
1609 goto done;
1610 }
1611 if (vnode_vtype(pmi->pm_new_covered_vp) != VDIR) {
1612 printf("preserved new mount directory not directory: %s\n", pmi->pm_path);
1613 error = ENOTDIR;
1614 goto done;
1615 }
1616
1617 printf("will preserve mountpoint across pivot: /%s\n", pmi->pm_path);
1618 }
1619
1620 /*
1621 * --
1622 * At this point, everything has been prepared and all error conditions
1623 * have been checked. We check everything we can before this point;
1624 * from now on we start making destructive changes, and we can't stop
1625 * until we reach the end.
1626 * ----
1627 */
1628
1629 /* this usecount is transferred to the mnt_vnodecovered */
1630 vnode_ref_ext(outgoing_vol_new_covered_vp, 0, VNODE_REF_FORCE);
1631 /* this usecount is transferred to set_rootvnode */
1632 vnode_ref_ext(incoming_rootvnode, 0, VNODE_REF_FORCE);
1633
1634
1635 for (size_t i = 0; i < countof(preserved); i++) {
1636 struct preserved_mount *pmi = preserved[i];
1637 if (pmi->pm_rootvnode == NULLVP) {
1638 continue;
1639 }
1640
1641 /* this usecount is transferred to the mnt_vnodecovered */
1642 vnode_ref_ext(pmi->pm_new_covered_vp, 0, VNODE_REF_FORCE);
1643
1644 /* The new_covered_vp is a mountpoint from now on. */
1645 vnode_lock_spin(pmi->pm_new_covered_vp);
1646 pmi->pm_new_covered_vp->v_flag |= VMOUNT;
1647 vnode_unlock(pmi->pm_new_covered_vp);
1648 }
1649
1650 /* The outgoing_vol_new_covered_vp is a mountpoint from now on. */
1651 vnode_lock_spin(outgoing_vol_new_covered_vp);
1652 outgoing_vol_new_covered_vp->v_flag |= VMOUNT;
1653 vnode_unlock(outgoing_vol_new_covered_vp);
1654
1655
1656 /*
1657 * Identify the mount_ts of the mounted filesystems that are being
1658 * manipulated: outgoing rootfs, incoming rootfs, and the preserved
1659 * mounts.
1660 */
1661 outgoing = rootvnode->v_mount;
1662 incoming = incoming_rootvnode->v_mount;
1663 for (size_t i = 0; i < countof(preserved); i++) {
1664 struct preserved_mount *pmi = preserved[i];
1665 if (pmi->pm_rootvnode == NULLVP) {
1666 continue;
1667 }
1668
1669 pmi->pm_mount = pmi->pm_rootvnode->v_mount;
1670 }
1671
1672 lck_rw_lock_exclusive(&rootvnode_rw_lock);
1673
1674 /* Setup incoming as the new rootfs */
1675 lck_rw_lock_exclusive(&incoming->mnt_rwlock);
1676 incoming_vol_old_covered_vp = incoming->mnt_vnodecovered;
1677 incoming->mnt_vnodecovered = NULLVP;
1678 strlcpy(incoming->mnt_vfsstat.f_mntonname, "/", MAXPATHLEN);
1679 incoming->mnt_flag |= MNT_ROOTFS;
1680 lck_rw_done(&incoming->mnt_rwlock);
1681
1682 /*
1683 * The preserved mountpoints will now be moved to
1684 * incoming_rootnode/pm_path, and then by the end of the function,
1685 * since incoming_rootnode is going to /, the preserved mounts
1686 * will be end up back at /pm_path
1687 */
1688 for (size_t i = 0; i < countof(preserved); i++) {
1689 struct preserved_mount *pmi = preserved[i];
1690 if (pmi->pm_rootvnode == NULLVP) {
1691 continue;
1692 }
1693
1694 lck_rw_lock_exclusive(&pmi->pm_mount->mnt_rwlock);
1695 pmi->pm_old_covered_vp = pmi->pm_mount->mnt_vnodecovered;
1696 pmi->pm_mount->mnt_vnodecovered = pmi->pm_new_covered_vp;
1697 vnode_lock_spin(pmi->pm_new_covered_vp);
1698 pmi->pm_new_covered_vp->v_mountedhere = pmi->pm_mount;
1699 vnode_unlock(pmi->pm_new_covered_vp);
1700 lck_rw_done(&pmi->pm_mount->mnt_rwlock);
1701 }
1702
1703 /*
1704 * The old root volume now covers outgoing_vol_new_covered_vp
1705 * on the new root volume. Remove the ROOTFS marker.
1706 * Now it is to be found at outgoing_vol_new_path
1707 */
1708 lck_rw_lock_exclusive(&outgoing->mnt_rwlock);
1709 outgoing->mnt_vnodecovered = outgoing_vol_new_covered_vp;
1710 strlcpy(outgoing->mnt_vfsstat.f_mntonname, "/", MAXPATHLEN);
1711 strlcat(outgoing->mnt_vfsstat.f_mntonname, outgoing_vol_new_path, MAXPATHLEN);
1712 outgoing->mnt_flag &= ~MNT_ROOTFS;
1713 vnode_lock_spin(outgoing_vol_new_covered_vp);
1714 outgoing_vol_new_covered_vp->v_mountedhere = outgoing;
1715 vnode_unlock(outgoing_vol_new_covered_vp);
1716 lck_rw_done(&outgoing->mnt_rwlock);
1717
1718 if (!(outgoing->mnt_kern_flag & MNTK_VIRTUALDEV) &&
1719 (TAILQ_FIRST(&mountlist) == outgoing)) {
1720 vfs_setmntsystem(outgoing);
1721 }
1722
1723 /*
1724 * Finally, remove the mount_t linkage from the previously covered
1725 * vnodes on the old root volume. These were incoming_vol_old_path,
1726 * and each preserved mounts's "/pm_path". The filesystems previously
1727 * mounted there have already been moved away.
1728 */
1729 vnode_lock_spin(incoming_vol_old_covered_vp);
1730 incoming_vol_old_covered_vp->v_flag &= ~VMOUNT;
1731 incoming_vol_old_covered_vp->v_mountedhere = NULL;
1732 vnode_unlock(incoming_vol_old_covered_vp);
1733
1734 for (size_t i = 0; i < countof(preserved); i++) {
1735 struct preserved_mount *pmi = preserved[i];
1736 if (pmi->pm_rootvnode == NULLVP) {
1737 continue;
1738 }
1739
1740 vnode_lock_spin(pmi->pm_old_covered_vp);
1741 pmi->pm_old_covered_vp->v_flag &= ~VMOUNT;
1742 pmi->pm_old_covered_vp->v_mountedhere = NULL;
1743 vnode_unlock(pmi->pm_old_covered_vp);
1744 }
1745
1746 /*
1747 * Clear the name cache since many cached names are now invalid.
1748 */
1749 vfs_iterate(0 /* flags */, cache_purge_callback, NULL);
1750
1751 /*
1752 * Actually change the rootvnode! And finally drop the lock that
1753 * prevents concurrent vnode_lookups.
1754 */
1755 set_rootvnode(incoming_rootvnode);
1756 lck_rw_unlock_exclusive(&rootvnode_rw_lock);
1757
1758 if (!(incoming->mnt_kern_flag & MNTK_VIRTUALDEV) &&
1759 !(outgoing->mnt_kern_flag & MNTK_VIRTUALDEV)) {
1760 /*
1761 * Switch the order of mount structures in the mountlist, new root
1762 * mount moves to the head of the list followed by /dev and the other
1763 * preserved mounts then all the preexisting mounts (old rootfs + any
1764 * others)
1765 */
1766 mount_list_lock();
1767 for (size_t i = 0; i < countof(preserved); i++) {
1768 struct preserved_mount *pmi = preserved[i];
1769 if (pmi->pm_rootvnode == NULLVP) {
1770 continue;
1771 }
1772
1773 TAILQ_REMOVE(&mountlist, pmi->pm_mount, mnt_list);
1774 TAILQ_INSERT_HEAD(&mountlist, pmi->pm_mount, mnt_list);
1775 }
1776 TAILQ_REMOVE(&mountlist, incoming, mnt_list);
1777 TAILQ_INSERT_HEAD(&mountlist, incoming, mnt_list);
1778 mount_list_unlock();
1779 }
1780
1781 /*
1782 * Fixups across all volumes
1783 */
1784 vfs_iterate(0 /* flags */, mntonname_fixup_callback, NULL);
1785 vfs_iterate(0 /* flags */, clear_mntk_backs_root_callback, NULL);
1786
1787 error = 0;
1788
1789 done:
1790 for (size_t i = 0; i < countof(preserved); i++) {
1791 struct preserved_mount *pmi = preserved[i];
1792
1793 if (pmi->pm_rootvnode) {
1794 vnode_put(pmi->pm_rootvnode);
1795 }
1796 if (pmi->pm_new_covered_vp) {
1797 vnode_put(pmi->pm_new_covered_vp);
1798 }
1799 if (pmi->pm_old_covered_vp) {
1800 vnode_rele(pmi->pm_old_covered_vp);
1801 }
1802 }
1803
1804 if (outgoing_vol_new_covered_vp) {
1805 vnode_put(outgoing_vol_new_covered_vp);
1806 }
1807
1808 if (incoming_vol_old_covered_vp) {
1809 vnode_rele(incoming_vol_old_covered_vp);
1810 }
1811
1812 if (incoming_rootvnode) {
1813 vnode_put(incoming_rootvnode);
1814 }
1815
1816 printf("%s : done shuffling mount points with error: %d\n", __FUNCTION__, error);
1817 return error;
1818 }
1819
1820 /*
1821 * Mount the Recovery volume of a container
1822 */
1823 int
vfs_mount_recovery(void)1824 vfs_mount_recovery(void)
1825 {
1826 #if CONFIG_MOUNT_PREBOOTRECOVERY
1827 int error = 0;
1828
1829 error = vnode_get(rootvnode);
1830 if (error) {
1831 /* root must be mounted first */
1832 printf("vnode_get(rootvnode) failed with error %d\n", error);
1833 return error;
1834 }
1835
1836 char recoverypath[] = PLATFORM_RECOVERY_VOLUME_MOUNT_POINT; /* !const because of internal casting */
1837
1838 /* Mount the recovery volume */
1839 printf("attempting kernel mount for recovery volume... \n");
1840 error = kernel_mount(rootvnode->v_mount->mnt_vfsstat.f_fstypename, NULLVP, NULLVP,
1841 recoverypath, (rootvnode->v_mount), 0, 0, (KERNEL_MOUNT_RECOVERYVOL), vfs_context_kernel());
1842
1843 if (error) {
1844 printf("Failed to mount recovery volume (%d)\n", error);
1845 } else {
1846 printf("mounted recovery volume\n");
1847 }
1848
1849 vnode_put(rootvnode);
1850 return error;
1851 #else
1852 return 0;
1853 #endif
1854 }
1855
1856 /*
1857 * Lookup a mount point by filesystem identifier.
1858 */
1859
1860 struct mount *
vfs_getvfs(fsid_t * fsid)1861 vfs_getvfs(fsid_t *fsid)
1862 {
1863 return mount_list_lookupby_fsid(fsid, 0, 0);
1864 }
1865
1866 static struct mount *
vfs_getvfs_locked(fsid_t * fsid)1867 vfs_getvfs_locked(fsid_t *fsid)
1868 {
1869 return mount_list_lookupby_fsid(fsid, 1, 0);
1870 }
1871
1872 struct mount *
vfs_getvfs_by_mntonname(char * path)1873 vfs_getvfs_by_mntonname(char *path)
1874 {
1875 mount_t retmp = (mount_t)0;
1876 mount_t mp;
1877
1878 mount_list_lock();
1879 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1880 if (!strncmp(mp->mnt_vfsstat.f_mntonname, path,
1881 sizeof(mp->mnt_vfsstat.f_mntonname))) {
1882 retmp = mp;
1883 if (mount_iterref(retmp, 1)) {
1884 retmp = NULL;
1885 }
1886 goto out;
1887 }
1888 }
1889 out:
1890 mount_list_unlock();
1891 return retmp;
1892 }
1893
1894 /* generation number for creation of new fsids */
1895 u_short mntid_gen = 0;
1896 /*
1897 * Get a new unique fsid
1898 */
1899 void
vfs_getnewfsid(struct mount * mp)1900 vfs_getnewfsid(struct mount *mp)
1901 {
1902 fsid_t tfsid;
1903 int mtype;
1904
1905 mount_list_lock();
1906
1907 /* generate a new fsid */
1908 mtype = mp->mnt_vtable->vfc_typenum;
1909 if (++mntid_gen == 0) {
1910 mntid_gen++;
1911 }
1912 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1913 tfsid.val[1] = mtype;
1914
1915 while (vfs_getvfs_locked(&tfsid)) {
1916 if (++mntid_gen == 0) {
1917 mntid_gen++;
1918 }
1919 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1920 }
1921
1922 mp->mnt_vfsstat.f_fsid.val[0] = tfsid.val[0];
1923 mp->mnt_vfsstat.f_fsid.val[1] = tfsid.val[1];
1924 mount_list_unlock();
1925 }
1926
1927 /*
1928 * Routines having to do with the management of the vnode table.
1929 */
1930 extern int(**dead_vnodeop_p)(void *);
1931 long numvnodes, freevnodes, deadvnodes, async_work_vnodes;
1932
1933
1934 int async_work_timed_out = 0;
1935 int async_work_handled = 0;
1936 int dead_vnode_wanted = 0;
1937 int dead_vnode_waited = 0;
1938
1939 /*
1940 * Move a vnode from one mount queue to another.
1941 */
1942 static void
insmntque(vnode_t vp,mount_t mp)1943 insmntque(vnode_t vp, mount_t mp)
1944 {
1945 mount_t lmp;
1946 /*
1947 * Delete from old mount point vnode list, if on one.
1948 */
1949 if ((lmp = vp->v_mount) != NULL && lmp != dead_mountp) {
1950 if ((vp->v_lflag & VNAMED_MOUNT) == 0) {
1951 panic("insmntque: vp not in mount vnode list");
1952 }
1953 vp->v_lflag &= ~VNAMED_MOUNT;
1954
1955 mount_lock_spin(lmp);
1956
1957 mount_drop(lmp, 1);
1958
1959 if (vp->v_mntvnodes.tqe_next == NULL) {
1960 if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp) {
1961 TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes);
1962 } else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp) {
1963 TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes);
1964 } else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp) {
1965 TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes);
1966 }
1967 } else {
1968 vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev;
1969 *vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next;
1970 }
1971 vp->v_mntvnodes.tqe_next = NULL;
1972 vp->v_mntvnodes.tqe_prev = NULL;
1973 mount_unlock(lmp);
1974 return;
1975 }
1976
1977 /*
1978 * Insert into list of vnodes for the new mount point, if available.
1979 */
1980 if ((vp->v_mount = mp) != NULL) {
1981 mount_lock_spin(mp);
1982 if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0)) {
1983 panic("vp already in mount list");
1984 }
1985 if (mp->mnt_lflag & MNT_LITER) {
1986 TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes);
1987 } else {
1988 TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
1989 }
1990 if (vp->v_lflag & VNAMED_MOUNT) {
1991 panic("insmntque: vp already in mount vnode list");
1992 }
1993 vp->v_lflag |= VNAMED_MOUNT;
1994 mount_ref(mp, 1);
1995 mount_unlock(mp);
1996 }
1997 }
1998
1999
2000 /*
2001 * Create a vnode for a block device.
2002 * Used for root filesystem, argdev, and swap areas.
2003 * Also used for memory file system special devices.
2004 */
2005 int
bdevvp(dev_t dev,vnode_t * vpp)2006 bdevvp(dev_t dev, vnode_t *vpp)
2007 {
2008 vnode_t nvp;
2009 int error;
2010 struct vnode_fsparam vfsp;
2011 struct vfs_context context;
2012
2013 if (dev == NODEV) {
2014 *vpp = NULLVP;
2015 return ENODEV;
2016 }
2017
2018 context.vc_thread = current_thread();
2019 context.vc_ucred = FSCRED;
2020
2021 vfsp.vnfs_mp = (struct mount *)0;
2022 vfsp.vnfs_vtype = VBLK;
2023 vfsp.vnfs_str = "bdevvp";
2024 vfsp.vnfs_dvp = NULL;
2025 vfsp.vnfs_fsnode = NULL;
2026 vfsp.vnfs_cnp = NULL;
2027 vfsp.vnfs_vops = spec_vnodeop_p;
2028 vfsp.vnfs_rdev = dev;
2029 vfsp.vnfs_filesize = 0;
2030
2031 vfsp.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE;
2032
2033 vfsp.vnfs_marksystem = 0;
2034 vfsp.vnfs_markroot = 0;
2035
2036 if ((error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp))) {
2037 *vpp = NULLVP;
2038 return error;
2039 }
2040 vnode_lock_spin(nvp);
2041 nvp->v_flag |= VBDEVVP;
2042 nvp->v_tag = VT_NON; /* set this to VT_NON so during aliasing it can be replaced */
2043 vnode_unlock(nvp);
2044 if ((error = vnode_ref(nvp))) {
2045 panic("bdevvp failed: vnode_ref");
2046 return error;
2047 }
2048 if ((error = VNOP_FSYNC(nvp, MNT_WAIT, &context))) {
2049 panic("bdevvp failed: fsync");
2050 return error;
2051 }
2052 if ((error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0))) {
2053 panic("bdevvp failed: invalidateblks");
2054 return error;
2055 }
2056
2057 #if CONFIG_MACF
2058 /*
2059 * XXXMAC: We can't put a MAC check here, the system will
2060 * panic without this vnode.
2061 */
2062 #endif /* MAC */
2063
2064 if ((error = VNOP_OPEN(nvp, FREAD, &context))) {
2065 panic("bdevvp failed: open");
2066 return error;
2067 }
2068 *vpp = nvp;
2069
2070 return 0;
2071 }
2072
2073 /*
2074 * Check to see if the new vnode represents a special device
2075 * for which we already have a vnode (either because of
2076 * bdevvp() or because of a different vnode representing
2077 * the same block device). If such an alias exists, deallocate
2078 * the existing contents and return the aliased vnode. The
2079 * caller is responsible for filling it with its new contents.
2080 */
2081 static vnode_t
checkalias(struct vnode * nvp,dev_t nvp_rdev)2082 checkalias(struct vnode *nvp, dev_t nvp_rdev)
2083 {
2084 struct vnode *vp;
2085 struct vnode **vpp;
2086 struct specinfo *sin = NULL;
2087 int vid = 0;
2088
2089 vpp = &speclisth[SPECHASH(nvp_rdev)];
2090 loop:
2091 SPECHASH_LOCK();
2092
2093 for (vp = *vpp; vp; vp = vp->v_specnext) {
2094 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
2095 vid = vp->v_id;
2096 break;
2097 }
2098 }
2099 SPECHASH_UNLOCK();
2100
2101 if (vp) {
2102 found_alias:
2103 if (vnode_getwithvid(vp, vid)) {
2104 goto loop;
2105 }
2106 /*
2107 * Termination state is checked in vnode_getwithvid
2108 */
2109 vnode_lock(vp);
2110
2111 /*
2112 * Alias, but not in use, so flush it out.
2113 */
2114 if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
2115 vnode_reclaim_internal(vp, 1, 1, 0);
2116 vnode_put_locked(vp);
2117 vnode_unlock(vp);
2118 goto loop;
2119 }
2120 }
2121 if (vp == NULL || vp->v_tag != VT_NON) {
2122 if (sin == NULL) {
2123 sin = zalloc_flags(specinfo_zone, Z_WAITOK | Z_ZERO);
2124 } else {
2125 bzero(sin, sizeof(struct specinfo));
2126 }
2127
2128 nvp->v_specinfo = sin;
2129 nvp->v_rdev = nvp_rdev;
2130 nvp->v_specflags = 0;
2131 nvp->v_speclastr = -1;
2132 nvp->v_specinfo->si_opencount = 0;
2133 nvp->v_specinfo->si_initted = 0;
2134 nvp->v_specinfo->si_throttleable = 0;
2135
2136 SPECHASH_LOCK();
2137
2138 /* We dropped the lock, someone could have added */
2139 if (vp == NULLVP) {
2140 for (vp = *vpp; vp; vp = vp->v_specnext) {
2141 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
2142 vid = vp->v_id;
2143 SPECHASH_UNLOCK();
2144 goto found_alias;
2145 }
2146 }
2147 }
2148
2149 nvp->v_hashchain = vpp;
2150 nvp->v_specnext = *vpp;
2151 *vpp = nvp;
2152
2153 if (vp != NULLVP) {
2154 nvp->v_specflags |= SI_ALIASED;
2155 vp->v_specflags |= SI_ALIASED;
2156 SPECHASH_UNLOCK();
2157 vnode_put_locked(vp);
2158 vnode_unlock(vp);
2159 } else {
2160 SPECHASH_UNLOCK();
2161 }
2162
2163 return NULLVP;
2164 }
2165
2166 if (sin) {
2167 zfree(specinfo_zone, sin);
2168 }
2169
2170 if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0) {
2171 return vp;
2172 }
2173
2174 panic("checkalias with VT_NON vp that shouldn't: %p", vp);
2175
2176 return vp;
2177 }
2178
2179
2180 /*
2181 * Get a reference on a particular vnode and lock it if requested.
2182 * If the vnode was on the inactive list, remove it from the list.
2183 * If the vnode was on the free list, remove it from the list and
2184 * move it to inactive list as needed.
2185 * The vnode lock bit is set if the vnode is being eliminated in
2186 * vgone. The process is awakened when the transition is completed,
2187 * and an error returned to indicate that the vnode is no longer
2188 * usable (possibly having been changed to a new file system type).
2189 */
2190 int
vget_internal(vnode_t vp,int vid,int vflags)2191 vget_internal(vnode_t vp, int vid, int vflags)
2192 {
2193 int error = 0;
2194
2195 vnode_lock_spin(vp);
2196
2197 if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0)) {
2198 /*
2199 * vnode to be returned only if it has writers opened
2200 */
2201 error = EINVAL;
2202 } else {
2203 error = vnode_getiocount(vp, vid, vflags);
2204 }
2205
2206 vnode_unlock(vp);
2207
2208 return error;
2209 }
2210
2211 /*
2212 * Returns: 0 Success
2213 * ENOENT No such file or directory [terminating]
2214 */
2215 int
vnode_ref(vnode_t vp)2216 vnode_ref(vnode_t vp)
2217 {
2218 return vnode_ref_ext(vp, 0, 0);
2219 }
2220
2221 /*
2222 * Returns: 0 Success
2223 * ENOENT No such file or directory [terminating]
2224 */
2225 int
vnode_ref_ext(vnode_t vp,int fmode,int flags)2226 vnode_ref_ext(vnode_t vp, int fmode, int flags)
2227 {
2228 int error = 0;
2229
2230 vnode_lock_spin(vp);
2231
2232 /*
2233 * once all the current call sites have been fixed to insure they have
2234 * taken an iocount, we can toughen this assert up and insist that the
2235 * iocount is non-zero... a non-zero usecount doesn't insure correctness
2236 */
2237 if (vp->v_iocount <= 0 && vp->v_usecount <= 0) {
2238 panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount);
2239 }
2240
2241 /*
2242 * if you are the owner of drain/termination, can acquire usecount
2243 */
2244 if ((flags & VNODE_REF_FORCE) == 0) {
2245 if ((vp->v_lflag & (VL_DRAIN | VL_TERMINATE | VL_DEAD))) {
2246 if (vp->v_owner != current_thread()) {
2247 error = ENOENT;
2248 goto out;
2249 }
2250 }
2251 }
2252
2253 /* Enable atomic ops on v_usecount without the vnode lock */
2254 os_atomic_inc(&vp->v_usecount, relaxed);
2255
2256 if (fmode & FWRITE) {
2257 if (++vp->v_writecount <= 0) {
2258 panic("vnode_ref_ext: v_writecount");
2259 }
2260 }
2261 if (fmode & O_EVTONLY) {
2262 if (++vp->v_kusecount <= 0) {
2263 panic("vnode_ref_ext: v_kusecount");
2264 }
2265 }
2266 if (vp->v_flag & VRAGE) {
2267 struct uthread *ut;
2268
2269 ut = current_uthread();
2270
2271 if (!(current_proc()->p_lflag & P_LRAGE_VNODES) &&
2272 !(ut->uu_flag & UT_RAGE_VNODES)) {
2273 /*
2274 * a 'normal' process accessed this vnode
2275 * so make sure its no longer marked
2276 * for rapid aging... also, make sure
2277 * it gets removed from the rage list...
2278 * when v_usecount drops back to 0, it
2279 * will be put back on the real free list
2280 */
2281 vp->v_flag &= ~VRAGE;
2282 vp->v_references = 0;
2283 vnode_list_remove(vp);
2284 }
2285 }
2286 if (vp->v_usecount == 1 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
2287 if (vp->v_ubcinfo) {
2288 vnode_lock_convert(vp);
2289 memory_object_mark_used(vp->v_ubcinfo->ui_control);
2290 }
2291 }
2292 out:
2293 vnode_unlock(vp);
2294
2295 return error;
2296 }
2297
2298
2299 boolean_t
vnode_on_reliable_media(vnode_t vp)2300 vnode_on_reliable_media(vnode_t vp)
2301 {
2302 mount_t mp = vp->v_mount;
2303
2304 /*
2305 * A NULL mountpoint would imply it's not attached to a any filesystem.
2306 * This can only happen with a vnode created by bdevvp(). We'll consider
2307 * those as not unreliable as the primary use of this function is determine
2308 * which vnodes are to be handed off to the async cleaner thread for
2309 * reclaim.
2310 */
2311 if (!mp || (!(mp->mnt_kern_flag & MNTK_VIRTUALDEV) && (mp->mnt_flag & MNT_LOCAL))) {
2312 return TRUE;
2313 }
2314
2315 return FALSE;
2316 }
2317
2318 static void
vnode_async_list_add_locked(vnode_t vp)2319 vnode_async_list_add_locked(vnode_t vp)
2320 {
2321 if (VONLIST(vp) || (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
2322 panic("vnode_async_list_add: %p is in wrong state", vp);
2323 }
2324
2325 TAILQ_INSERT_HEAD(&vnode_async_work_list, vp, v_freelist);
2326 vp->v_listflag |= VLIST_ASYNC_WORK;
2327
2328 async_work_vnodes++;
2329 }
2330
2331 static void
vnode_async_list_add(vnode_t vp)2332 vnode_async_list_add(vnode_t vp)
2333 {
2334 vnode_list_lock();
2335
2336 vnode_async_list_add_locked(vp);
2337
2338 vnode_list_unlock();
2339
2340 wakeup(&vnode_async_work_list);
2341 }
2342
2343
2344 /*
2345 * put the vnode on appropriate free list.
2346 * called with vnode LOCKED
2347 */
2348 static void
vnode_list_add(vnode_t vp)2349 vnode_list_add(vnode_t vp)
2350 {
2351 boolean_t need_dead_wakeup = FALSE;
2352
2353 #if DIAGNOSTIC
2354 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2355 #endif
2356
2357 again:
2358
2359 /*
2360 * if it is already on a list or non zero references return
2361 */
2362 if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE)) {
2363 return;
2364 }
2365
2366 /*
2367 * In vclean, we might have deferred ditching locked buffers
2368 * because something was still referencing them (indicated by
2369 * usecount). We can ditch them now.
2370 */
2371 if (ISSET(vp->v_lflag, VL_DEAD)
2372 && (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))) {
2373 ++vp->v_iocount; // Probably not necessary, but harmless
2374 #ifdef CONFIG_IOCOUNT_TRACE
2375 record_vp(vp, 1);
2376 #endif
2377 vnode_unlock(vp);
2378 buf_invalidateblks(vp, BUF_INVALIDATE_LOCKED, 0, 0);
2379 vnode_lock(vp);
2380 vnode_dropiocount(vp);
2381 goto again;
2382 }
2383
2384 vnode_list_lock();
2385
2386 if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
2387 /*
2388 * add the new guy to the appropriate end of the RAGE list
2389 */
2390 if ((vp->v_flag & VAGE)) {
2391 TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist);
2392 } else {
2393 TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist);
2394 }
2395
2396 vp->v_listflag |= VLIST_RAGE;
2397 ragevnodes++;
2398
2399 /*
2400 * reset the timestamp for the last inserted vp on the RAGE
2401 * queue to let new_vnode know that its not ok to start stealing
2402 * from this list... as long as we're actively adding to this list
2403 * we'll push out the vnodes we want to donate to the real free list
2404 * once we stop pushing, we'll let some time elapse before we start
2405 * stealing them in the new_vnode routine
2406 */
2407 microuptime(&rage_tv);
2408 } else {
2409 /*
2410 * if VL_DEAD, insert it at head of the dead list
2411 * else insert at tail of LRU list or at head if VAGE is set
2412 */
2413 if ((vp->v_lflag & VL_DEAD)) {
2414 TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist);
2415 vp->v_listflag |= VLIST_DEAD;
2416 deadvnodes++;
2417
2418 if (dead_vnode_wanted) {
2419 dead_vnode_wanted--;
2420 need_dead_wakeup = TRUE;
2421 }
2422 } else if ((vp->v_flag & VAGE)) {
2423 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2424 vp->v_flag &= ~VAGE;
2425 freevnodes++;
2426 } else {
2427 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2428 freevnodes++;
2429 }
2430 }
2431 vnode_list_unlock();
2432
2433 if (need_dead_wakeup == TRUE) {
2434 wakeup_one((caddr_t)&dead_vnode_wanted);
2435 }
2436 }
2437
2438
2439 /*
2440 * remove the vnode from appropriate free list.
2441 * called with vnode LOCKED and
2442 * the list lock held
2443 */
2444 static void
vnode_list_remove_locked(vnode_t vp)2445 vnode_list_remove_locked(vnode_t vp)
2446 {
2447 if (VONLIST(vp)) {
2448 /*
2449 * the v_listflag field is
2450 * protected by the vnode_list_lock
2451 */
2452 if (vp->v_listflag & VLIST_RAGE) {
2453 VREMRAGE("vnode_list_remove", vp);
2454 } else if (vp->v_listflag & VLIST_DEAD) {
2455 VREMDEAD("vnode_list_remove", vp);
2456 } else if (vp->v_listflag & VLIST_ASYNC_WORK) {
2457 VREMASYNC_WORK("vnode_list_remove", vp);
2458 } else {
2459 VREMFREE("vnode_list_remove", vp);
2460 }
2461 }
2462 }
2463
2464
2465 /*
2466 * remove the vnode from appropriate free list.
2467 * called with vnode LOCKED
2468 */
2469 static void
vnode_list_remove(vnode_t vp)2470 vnode_list_remove(vnode_t vp)
2471 {
2472 #if DIAGNOSTIC
2473 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2474 #endif
2475 /*
2476 * we want to avoid taking the list lock
2477 * in the case where we're not on the free
2478 * list... this will be true for most
2479 * directories and any currently in use files
2480 *
2481 * we're guaranteed that we can't go from
2482 * the not-on-list state to the on-list
2483 * state since we hold the vnode lock...
2484 * all calls to vnode_list_add are done
2485 * under the vnode lock... so we can
2486 * check for that condition (the prevelant one)
2487 * without taking the list lock
2488 */
2489 if (VONLIST(vp)) {
2490 vnode_list_lock();
2491 /*
2492 * however, we're not guaranteed that
2493 * we won't go from the on-list state
2494 * to the not-on-list state until we
2495 * hold the vnode_list_lock... this
2496 * is due to "new_vnode" removing vnodes
2497 * from the free list uder the list_lock
2498 * w/o the vnode lock... so we need to
2499 * check again whether we're currently
2500 * on the free list
2501 */
2502 vnode_list_remove_locked(vp);
2503
2504 vnode_list_unlock();
2505 }
2506 }
2507
2508
2509 void
vnode_rele(vnode_t vp)2510 vnode_rele(vnode_t vp)
2511 {
2512 vnode_rele_internal(vp, 0, 0, 0);
2513 }
2514
2515
2516 void
vnode_rele_ext(vnode_t vp,int fmode,int dont_reenter)2517 vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter)
2518 {
2519 vnode_rele_internal(vp, fmode, dont_reenter, 0);
2520 }
2521
2522
2523 void
vnode_rele_internal(vnode_t vp,int fmode,int dont_reenter,int locked)2524 vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked)
2525 {
2526 int32_t old_usecount;
2527
2528 if (!locked) {
2529 vnode_lock_spin(vp);
2530 }
2531 #if DIAGNOSTIC
2532 else {
2533 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2534 }
2535 #endif
2536 /* Enable atomic ops on v_usecount without the vnode lock */
2537 old_usecount = os_atomic_dec_orig(&vp->v_usecount, relaxed);
2538 if (old_usecount < 1) {
2539 /*
2540 * Because we allow atomic ops on usecount (in lookup only, under
2541 * specific conditions of already having a usecount) it is
2542 * possible that when the vnode is examined, its usecount is
2543 * different than what will be printed in this panic message.
2544 */
2545 panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.",
2546 vp, old_usecount - 1, vp->v_tag, vp->v_type, vp->v_flag);
2547 }
2548
2549 if (fmode & FWRITE) {
2550 if (--vp->v_writecount < 0) {
2551 panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
2552 }
2553 }
2554 if (fmode & O_EVTONLY) {
2555 if (--vp->v_kusecount < 0) {
2556 panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag);
2557 }
2558 }
2559 if (vp->v_kusecount > vp->v_usecount) {
2560 panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
2561 }
2562
2563 if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
2564 /*
2565 * vnode is still busy... if we're the last
2566 * usecount, mark for a future call to VNOP_INACTIVE
2567 * when the iocount finally drops to 0
2568 */
2569 if (vp->v_usecount == 0) {
2570 vp->v_lflag |= VL_NEEDINACTIVE;
2571 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
2572 }
2573 goto done;
2574 }
2575 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
2576
2577 if (ISSET(vp->v_lflag, VL_TERMINATE | VL_DEAD) || dont_reenter) {
2578 /*
2579 * vnode is being cleaned, or
2580 * we've requested that we don't reenter
2581 * the filesystem on this release...in
2582 * the latter case, we'll mark the vnode aged
2583 */
2584 if (dont_reenter) {
2585 if (!(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM))) {
2586 vp->v_lflag |= VL_NEEDINACTIVE;
2587
2588 if (vnode_on_reliable_media(vp) == FALSE || vp->v_flag & VISDIRTY) {
2589 vnode_async_list_add(vp);
2590 goto done;
2591 }
2592 }
2593 vp->v_flag |= VAGE;
2594 }
2595 vnode_list_add(vp);
2596
2597 goto done;
2598 }
2599 /*
2600 * at this point both the iocount and usecount
2601 * are zero
2602 * pick up an iocount so that we can call
2603 * VNOP_INACTIVE with the vnode lock unheld
2604 */
2605 vp->v_iocount++;
2606 #ifdef CONFIG_IOCOUNT_TRACE
2607 record_vp(vp, 1);
2608 #endif
2609 vp->v_lflag &= ~VL_NEEDINACTIVE;
2610 vnode_unlock(vp);
2611
2612 VNOP_INACTIVE(vp, vfs_context_current());
2613
2614 vnode_lock_spin(vp);
2615 /*
2616 * because we dropped the vnode lock to call VNOP_INACTIVE
2617 * the state of the vnode may have changed... we may have
2618 * picked up an iocount, usecount or the MARKTERM may have
2619 * been set... we need to reevaluate the reference counts
2620 * to determine if we can call vnode_reclaim_internal at
2621 * this point... if the reference counts are up, we'll pick
2622 * up the MARKTERM state when they get subsequently dropped
2623 */
2624 if ((vp->v_iocount == 1) && (vp->v_usecount == 0) &&
2625 ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
2626 struct uthread *ut;
2627
2628 ut = current_uthread();
2629
2630 if (ut->uu_defer_reclaims) {
2631 vp->v_defer_reclaimlist = ut->uu_vreclaims;
2632 ut->uu_vreclaims = vp;
2633 goto done;
2634 }
2635 vnode_lock_convert(vp);
2636 vnode_reclaim_internal(vp, 1, 1, 0);
2637 }
2638 vnode_dropiocount(vp);
2639 vnode_list_add(vp);
2640 done:
2641 if (vp->v_usecount == 0 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
2642 if (vp->v_ubcinfo) {
2643 vnode_lock_convert(vp);
2644 memory_object_mark_unused(vp->v_ubcinfo->ui_control, (vp->v_flag & VRAGE) == VRAGE);
2645 }
2646 }
2647 if (!locked) {
2648 vnode_unlock(vp);
2649 }
2650 return;
2651 }
2652
2653 /*
2654 * Remove any vnodes in the vnode table belonging to mount point mp.
2655 *
2656 * If MNT_NOFORCE is specified, there should not be any active ones,
2657 * return error if any are found (nb: this is a user error, not a
2658 * system error). If MNT_FORCE is specified, detach any active vnodes
2659 * that are found.
2660 */
2661
2662 int
vflush(struct mount * mp,struct vnode * skipvp,int flags)2663 vflush(struct mount *mp, struct vnode *skipvp, int flags)
2664 {
2665 struct vnode *vp;
2666 int busy = 0;
2667 int reclaimed = 0;
2668 int retval;
2669 unsigned int vid;
2670 bool first_try = true;
2671
2672 /*
2673 * See comments in vnode_iterate() for the rationale for this lock
2674 */
2675 mount_iterate_lock(mp);
2676
2677 mount_lock(mp);
2678 vnode_iterate_setup(mp);
2679 /*
2680 * On regular unmounts(not forced) do a
2681 * quick check for vnodes to be in use. This
2682 * preserves the caching of vnodes. automounter
2683 * tries unmounting every so often to see whether
2684 * it is still busy or not.
2685 */
2686 if (((flags & FORCECLOSE) == 0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) {
2687 if (vnode_umount_preflight(mp, skipvp, flags)) {
2688 vnode_iterate_clear(mp);
2689 mount_unlock(mp);
2690 mount_iterate_unlock(mp);
2691 return EBUSY;
2692 }
2693 }
2694 loop:
2695 /* If it returns 0 then there is nothing to do */
2696 retval = vnode_iterate_prepare(mp);
2697
2698 if (retval == 0) {
2699 vnode_iterate_clear(mp);
2700 mount_unlock(mp);
2701 mount_iterate_unlock(mp);
2702 return retval;
2703 }
2704
2705 /* iterate over all the vnodes */
2706 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
2707 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
2708 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
2709 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
2710
2711 if ((vp->v_mount != mp) || (vp == skipvp)) {
2712 continue;
2713 }
2714 vid = vp->v_id;
2715 mount_unlock(mp);
2716
2717 vnode_lock_spin(vp);
2718
2719 // If vnode is already terminating, wait for it...
2720 while (vp->v_id == vid && ISSET(vp->v_lflag, VL_TERMINATE)) {
2721 vp->v_lflag |= VL_TERMWANT;
2722 msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vflush", NULL);
2723 }
2724
2725 if ((vp->v_id != vid) || ISSET(vp->v_lflag, VL_DEAD)) {
2726 vnode_unlock(vp);
2727 mount_lock(mp);
2728 continue;
2729 }
2730
2731 /*
2732 * If requested, skip over vnodes marked VSYSTEM.
2733 * Skip over all vnodes marked VNOFLUSH.
2734 */
2735 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
2736 (vp->v_flag & VNOFLUSH))) {
2737 vnode_unlock(vp);
2738 mount_lock(mp);
2739 continue;
2740 }
2741 /*
2742 * If requested, skip over vnodes marked VSWAP.
2743 */
2744 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
2745 vnode_unlock(vp);
2746 mount_lock(mp);
2747 continue;
2748 }
2749 /*
2750 * If requested, skip over vnodes marked VROOT.
2751 */
2752 if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) {
2753 vnode_unlock(vp);
2754 mount_lock(mp);
2755 continue;
2756 }
2757 /*
2758 * If WRITECLOSE is set, only flush out regular file
2759 * vnodes open for writing.
2760 */
2761 if ((flags & WRITECLOSE) &&
2762 (vp->v_writecount == 0 || vp->v_type != VREG)) {
2763 vnode_unlock(vp);
2764 mount_lock(mp);
2765 continue;
2766 }
2767 /*
2768 * If the real usecount is 0, all we need to do is clear
2769 * out the vnode data structures and we are done.
2770 */
2771 if (((vp->v_usecount == 0) ||
2772 ((vp->v_usecount - vp->v_kusecount) == 0))) {
2773 vnode_lock_convert(vp);
2774 vp->v_iocount++; /* so that drain waits for * other iocounts */
2775 #ifdef CONFIG_IOCOUNT_TRACE
2776 record_vp(vp, 1);
2777 #endif
2778 vnode_reclaim_internal(vp, 1, 1, 0);
2779 vnode_dropiocount(vp);
2780 vnode_list_add(vp);
2781 vnode_unlock(vp);
2782
2783 reclaimed++;
2784 mount_lock(mp);
2785 continue;
2786 }
2787 /*
2788 * If FORCECLOSE is set, forcibly close the vnode.
2789 * For block or character devices, revert to an
2790 * anonymous device. For all other files, just kill them.
2791 */
2792 if (flags & FORCECLOSE) {
2793 vnode_lock_convert(vp);
2794
2795 if (vp->v_type != VBLK && vp->v_type != VCHR) {
2796 vp->v_iocount++; /* so that drain waits * for other iocounts */
2797 #ifdef CONFIG_IOCOUNT_TRACE
2798 record_vp(vp, 1);
2799 #endif
2800 vnode_abort_advlocks(vp);
2801 vnode_reclaim_internal(vp, 1, 1, 0);
2802 vnode_dropiocount(vp);
2803 vnode_list_add(vp);
2804 vnode_unlock(vp);
2805 } else {
2806 vclean(vp, 0);
2807 vp->v_lflag &= ~VL_DEAD;
2808 vp->v_op = spec_vnodeop_p;
2809 vp->v_flag |= VDEVFLUSH;
2810 vnode_unlock(vp);
2811 }
2812 mount_lock(mp);
2813 continue;
2814 }
2815
2816 /* log vnodes blocking unforced unmounts */
2817 if (print_busy_vnodes && first_try && ((flags & FORCECLOSE) == 0)) {
2818 vprint("vflush - busy vnode", vp);
2819 }
2820
2821 vnode_unlock(vp);
2822 mount_lock(mp);
2823 busy++;
2824 }
2825
2826 /* At this point the worker queue is completed */
2827 if (busy && ((flags & FORCECLOSE) == 0) && reclaimed) {
2828 busy = 0;
2829 reclaimed = 0;
2830 (void)vnode_iterate_reloadq(mp);
2831 first_try = false;
2832 /* returned with mount lock held */
2833 goto loop;
2834 }
2835
2836 /* if new vnodes were created in between retry the reclaim */
2837 if (vnode_iterate_reloadq(mp) != 0) {
2838 if (!(busy && ((flags & FORCECLOSE) == 0))) {
2839 first_try = false;
2840 goto loop;
2841 }
2842 }
2843 vnode_iterate_clear(mp);
2844 mount_unlock(mp);
2845 mount_iterate_unlock(mp);
2846
2847 if (busy && ((flags & FORCECLOSE) == 0)) {
2848 return EBUSY;
2849 }
2850 return 0;
2851 }
2852
2853 long num_recycledvnodes = 0;
2854 /*
2855 * Disassociate the underlying file system from a vnode.
2856 * The vnode lock is held on entry.
2857 */
2858 static void
vclean(vnode_t vp,int flags)2859 vclean(vnode_t vp, int flags)
2860 {
2861 vfs_context_t ctx = vfs_context_current();
2862 int active;
2863 int need_inactive;
2864 int already_terminating;
2865 int clflags = 0;
2866 #if NAMEDSTREAMS
2867 int is_namedstream;
2868 #endif
2869
2870 /*
2871 * Check to see if the vnode is in use.
2872 * If so we have to reference it before we clean it out
2873 * so that its count cannot fall to zero and generate a
2874 * race against ourselves to recycle it.
2875 */
2876 active = vp->v_usecount;
2877
2878 /*
2879 * just in case we missed sending a needed
2880 * VNOP_INACTIVE, we'll do it now
2881 */
2882 need_inactive = (vp->v_lflag & VL_NEEDINACTIVE);
2883
2884 vp->v_lflag &= ~VL_NEEDINACTIVE;
2885
2886 /*
2887 * Prevent the vnode from being recycled or
2888 * brought into use while we clean it out.
2889 */
2890 already_terminating = (vp->v_lflag & VL_TERMINATE);
2891
2892 vp->v_lflag |= VL_TERMINATE;
2893
2894 #if NAMEDSTREAMS
2895 is_namedstream = vnode_isnamedstream(vp);
2896 #endif
2897
2898 vnode_unlock(vp);
2899
2900 OSAddAtomicLong(1, &num_recycledvnodes);
2901
2902 if (flags & DOCLOSE) {
2903 clflags |= IO_NDELAY;
2904 }
2905 if (flags & REVOKEALL) {
2906 clflags |= IO_REVOKE;
2907 }
2908
2909 #if CONFIG_MACF
2910 if (vp->v_mount) {
2911 /*
2912 * It is possible for bdevvp vnodes to not have a mount
2913 * pointer. It's fine to let it get reclaimed without
2914 * notifying.
2915 */
2916 mac_vnode_notify_reclaim(vp);
2917 }
2918 #endif
2919
2920 if (active && (flags & DOCLOSE)) {
2921 VNOP_CLOSE(vp, clflags, ctx);
2922 }
2923
2924 /*
2925 * Clean out any buffers associated with the vnode.
2926 */
2927 if (flags & DOCLOSE) {
2928 #if CONFIG_NFS_CLIENT
2929 if (vp->v_tag == VT_NFS) {
2930 nfs_vinvalbuf(vp, V_SAVE, ctx, 0);
2931 } else
2932 #endif /* CONFIG_NFS_CLIENT */
2933 {
2934 VNOP_FSYNC(vp, MNT_WAIT, ctx);
2935
2936 /*
2937 * If the vnode is still in use (by the journal for
2938 * example) we don't want to invalidate locked buffers
2939 * here. In that case, either the journal will tidy them
2940 * up, or we will deal with it when the usecount is
2941 * finally released in vnode_rele_internal.
2942 */
2943 buf_invalidateblks(vp, BUF_WRITE_DATA | (active ? 0 : BUF_INVALIDATE_LOCKED), 0, 0);
2944 }
2945 if (UBCINFOEXISTS(vp)) {
2946 /*
2947 * Clean the pages in VM.
2948 */
2949 (void)ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
2950 }
2951 }
2952 if (active || need_inactive) {
2953 VNOP_INACTIVE(vp, ctx);
2954 }
2955
2956 #if NAMEDSTREAMS
2957 if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) {
2958 vnode_t pvp = vp->v_parent;
2959
2960 /* Delete the shadow stream file before we reclaim its vnode */
2961 if (vnode_isshadow(vp)) {
2962 vnode_relenamedstream(pvp, vp);
2963 }
2964
2965 /*
2966 * No more streams associated with the parent. We
2967 * have a ref on it, so its identity is stable.
2968 * If the parent is on an opaque volume, then we need to know
2969 * whether it has associated named streams.
2970 */
2971 if (vfs_authopaque(pvp->v_mount)) {
2972 vnode_lock_spin(pvp);
2973 pvp->v_lflag &= ~VL_HASSTREAMS;
2974 vnode_unlock(pvp);
2975 }
2976 }
2977 #endif
2978
2979 /*
2980 * Destroy ubc named reference
2981 * cluster_release is done on this path
2982 * along with dropping the reference on the ucred
2983 * (and in the case of forced unmount of an mmap-ed file,
2984 * the ubc reference on the vnode is dropped here too).
2985 */
2986 ubc_destroy_named(vp);
2987
2988 #if CONFIG_TRIGGERS
2989 /*
2990 * cleanup trigger info from vnode (if any)
2991 */
2992 if (vp->v_resolve) {
2993 vnode_resolver_detach(vp);
2994 }
2995 #endif
2996
2997 #if CONFIG_IO_COMPRESSION_STATS
2998 if ((vp->io_compression_stats)) {
2999 vnode_iocs_record_and_free(vp);
3000 }
3001 #endif /* CONFIG_IO_COMPRESSION_STATS */
3002
3003 /*
3004 * Reclaim the vnode.
3005 */
3006 if (VNOP_RECLAIM(vp, ctx)) {
3007 panic("vclean: cannot reclaim");
3008 }
3009
3010 // make sure the name & parent ptrs get cleaned out!
3011 vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE | VNODE_UPDATE_PURGEFIRMLINK);
3012
3013 vnode_lock(vp);
3014
3015 /*
3016 * Remove the vnode from any mount list it might be on. It is not
3017 * safe to do this any earlier because unmount needs to wait for
3018 * any vnodes to terminate and it cannot do that if it cannot find
3019 * them.
3020 */
3021 insmntque(vp, (struct mount *)0);
3022
3023 vp->v_mount = dead_mountp;
3024 vp->v_op = dead_vnodeop_p;
3025 vp->v_tag = VT_NON;
3026 vp->v_data = NULL;
3027
3028 vp->v_lflag |= VL_DEAD;
3029 vp->v_flag &= ~VISDIRTY;
3030
3031 if (already_terminating == 0) {
3032 vp->v_lflag &= ~VL_TERMINATE;
3033 /*
3034 * Done with purge, notify sleepers of the grim news.
3035 */
3036 if (vp->v_lflag & VL_TERMWANT) {
3037 vp->v_lflag &= ~VL_TERMWANT;
3038 wakeup(&vp->v_lflag);
3039 }
3040 }
3041 }
3042
3043 /*
3044 * Eliminate all activity associated with the requested vnode
3045 * and with all vnodes aliased to the requested vnode.
3046 */
3047 int
3048 #if DIAGNOSTIC
vn_revoke(vnode_t vp,int flags,__unused vfs_context_t a_context)3049 vn_revoke(vnode_t vp, int flags, __unused vfs_context_t a_context)
3050 #else
3051 vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context)
3052 #endif
3053 {
3054 struct vnode *vq;
3055 int vid;
3056
3057 #if DIAGNOSTIC
3058 if ((flags & REVOKEALL) == 0) {
3059 panic("vnop_revoke");
3060 }
3061 #endif
3062
3063 if (vnode_isaliased(vp)) {
3064 /*
3065 * If a vgone (or vclean) is already in progress,
3066 * return an immediate error
3067 */
3068 if (vp->v_lflag & VL_TERMINATE) {
3069 return ENOENT;
3070 }
3071
3072 /*
3073 * Ensure that vp will not be vgone'd while we
3074 * are eliminating its aliases.
3075 */
3076 SPECHASH_LOCK();
3077 while ((vp->v_specflags & SI_ALIASED)) {
3078 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3079 if (vq->v_rdev != vp->v_rdev ||
3080 vq->v_type != vp->v_type || vp == vq) {
3081 continue;
3082 }
3083 vid = vq->v_id;
3084 SPECHASH_UNLOCK();
3085 if (vnode_getwithvid(vq, vid)) {
3086 SPECHASH_LOCK();
3087 break;
3088 }
3089 vnode_lock(vq);
3090 if (!(vq->v_lflag & VL_TERMINATE)) {
3091 vnode_reclaim_internal(vq, 1, 1, 0);
3092 }
3093 vnode_put_locked(vq);
3094 vnode_unlock(vq);
3095 SPECHASH_LOCK();
3096 break;
3097 }
3098 }
3099 SPECHASH_UNLOCK();
3100 }
3101 vnode_lock(vp);
3102 if (vp->v_lflag & VL_TERMINATE) {
3103 vnode_unlock(vp);
3104 return ENOENT;
3105 }
3106 vnode_reclaim_internal(vp, 1, 0, REVOKEALL);
3107 vnode_unlock(vp);
3108
3109 return 0;
3110 }
3111
3112 /*
3113 * Recycle an unused vnode to the front of the free list.
3114 * Release the passed interlock if the vnode will be recycled.
3115 */
3116 int
vnode_recycle(struct vnode * vp)3117 vnode_recycle(struct vnode *vp)
3118 {
3119 vnode_lock_spin(vp);
3120
3121 if (vp->v_iocount || vp->v_usecount) {
3122 vp->v_lflag |= VL_MARKTERM;
3123 vnode_unlock(vp);
3124 return 0;
3125 }
3126 vnode_lock_convert(vp);
3127 vnode_reclaim_internal(vp, 1, 0, 0);
3128
3129 vnode_unlock(vp);
3130
3131 return 1;
3132 }
3133
3134 static int
vnode_reload(vnode_t vp)3135 vnode_reload(vnode_t vp)
3136 {
3137 vnode_lock_spin(vp);
3138
3139 if ((vp->v_iocount > 1) || vp->v_usecount) {
3140 vnode_unlock(vp);
3141 return 0;
3142 }
3143 if (vp->v_iocount <= 0) {
3144 panic("vnode_reload with no iocount %d", vp->v_iocount);
3145 }
3146
3147 /* mark for release when iocount is dopped */
3148 vp->v_lflag |= VL_MARKTERM;
3149 vnode_unlock(vp);
3150
3151 return 1;
3152 }
3153
3154
3155 static void
vgone(vnode_t vp,int flags)3156 vgone(vnode_t vp, int flags)
3157 {
3158 struct vnode *vq;
3159 struct vnode *vx;
3160
3161 /*
3162 * Clean out the filesystem specific data.
3163 * vclean also takes care of removing the
3164 * vnode from any mount list it might be on
3165 */
3166 vclean(vp, flags | DOCLOSE);
3167
3168 /*
3169 * If special device, remove it from special device alias list
3170 * if it is on one.
3171 */
3172 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
3173 SPECHASH_LOCK();
3174 if (*vp->v_hashchain == vp) {
3175 *vp->v_hashchain = vp->v_specnext;
3176 } else {
3177 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3178 if (vq->v_specnext != vp) {
3179 continue;
3180 }
3181 vq->v_specnext = vp->v_specnext;
3182 break;
3183 }
3184 if (vq == NULL) {
3185 panic("missing bdev");
3186 }
3187 }
3188 if (vp->v_specflags & SI_ALIASED) {
3189 vx = NULL;
3190 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3191 if (vq->v_rdev != vp->v_rdev ||
3192 vq->v_type != vp->v_type) {
3193 continue;
3194 }
3195 if (vx) {
3196 break;
3197 }
3198 vx = vq;
3199 }
3200 if (vx == NULL) {
3201 panic("missing alias");
3202 }
3203 if (vq == NULL) {
3204 vx->v_specflags &= ~SI_ALIASED;
3205 }
3206 vp->v_specflags &= ~SI_ALIASED;
3207 }
3208 SPECHASH_UNLOCK();
3209 {
3210 struct specinfo *tmp = vp->v_specinfo;
3211 vp->v_specinfo = NULL;
3212 zfree(specinfo_zone, tmp);
3213 }
3214 }
3215 }
3216
3217 /*
3218 * Lookup a vnode by device number.
3219 */
3220 int
check_mountedon(dev_t dev,enum vtype type,int * errorp)3221 check_mountedon(dev_t dev, enum vtype type, int *errorp)
3222 {
3223 vnode_t vp;
3224 int rc = 0;
3225 int vid;
3226
3227 loop:
3228 SPECHASH_LOCK();
3229 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
3230 if (dev != vp->v_rdev || type != vp->v_type) {
3231 continue;
3232 }
3233 vid = vp->v_id;
3234 SPECHASH_UNLOCK();
3235 if (vnode_getwithvid(vp, vid)) {
3236 goto loop;
3237 }
3238 vnode_lock_spin(vp);
3239 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
3240 vnode_unlock(vp);
3241 if ((*errorp = vfs_mountedon(vp)) != 0) {
3242 rc = 1;
3243 }
3244 } else {
3245 vnode_unlock(vp);
3246 }
3247 vnode_put(vp);
3248 return rc;
3249 }
3250 SPECHASH_UNLOCK();
3251 return 0;
3252 }
3253
3254 /*
3255 * Calculate the total number of references to a special device.
3256 */
3257 int
vcount(vnode_t vp)3258 vcount(vnode_t vp)
3259 {
3260 vnode_t vq, vnext;
3261 int count;
3262 int vid;
3263
3264 if (!vnode_isspec(vp)) {
3265 return vp->v_usecount - vp->v_kusecount;
3266 }
3267
3268 loop:
3269 if (!vnode_isaliased(vp)) {
3270 return vp->v_specinfo->si_opencount;
3271 }
3272 count = 0;
3273
3274 SPECHASH_LOCK();
3275 /*
3276 * Grab first vnode and its vid.
3277 */
3278 vq = *vp->v_hashchain;
3279 vid = vq ? vq->v_id : 0;
3280
3281 SPECHASH_UNLOCK();
3282
3283 while (vq) {
3284 /*
3285 * Attempt to get the vnode outside the SPECHASH lock.
3286 */
3287 if (vnode_getwithvid(vq, vid)) {
3288 goto loop;
3289 }
3290 vnode_lock(vq);
3291
3292 if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) {
3293 if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) {
3294 /*
3295 * Alias, but not in use, so flush it out.
3296 */
3297 vnode_reclaim_internal(vq, 1, 1, 0);
3298 vnode_put_locked(vq);
3299 vnode_unlock(vq);
3300 goto loop;
3301 }
3302 count += vq->v_specinfo->si_opencount;
3303 }
3304 vnode_unlock(vq);
3305
3306 SPECHASH_LOCK();
3307 /*
3308 * must do this with the reference still held on 'vq'
3309 * so that it can't be destroyed while we're poking
3310 * through v_specnext
3311 */
3312 vnext = vq->v_specnext;
3313 vid = vnext ? vnext->v_id : 0;
3314
3315 SPECHASH_UNLOCK();
3316
3317 vnode_put(vq);
3318
3319 vq = vnext;
3320 }
3321
3322 return count;
3323 }
3324
3325 int prtactive = 0; /* 1 => print out reclaim of active vnodes */
3326
3327 /*
3328 * Print out a description of a vnode.
3329 */
3330 static const char *typename[] =
3331 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
3332
3333 void
vprint(const char * label,struct vnode * vp)3334 vprint(const char *label, struct vnode *vp)
3335 {
3336 char sbuf[64];
3337
3338 if (label != NULL) {
3339 printf("%s: ", label);
3340 }
3341 printf("name %s type %s, usecount %d, writecount %d\n",
3342 vp->v_name, typename[vp->v_type],
3343 vp->v_usecount, vp->v_writecount);
3344 sbuf[0] = '\0';
3345 if (vp->v_flag & VROOT) {
3346 strlcat(sbuf, "|VROOT", sizeof(sbuf));
3347 }
3348 if (vp->v_flag & VTEXT) {
3349 strlcat(sbuf, "|VTEXT", sizeof(sbuf));
3350 }
3351 if (vp->v_flag & VSYSTEM) {
3352 strlcat(sbuf, "|VSYSTEM", sizeof(sbuf));
3353 }
3354 if (vp->v_flag & VNOFLUSH) {
3355 strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf));
3356 }
3357 if (vp->v_flag & VBWAIT) {
3358 strlcat(sbuf, "|VBWAIT", sizeof(sbuf));
3359 }
3360 if (vnode_isaliased(vp)) {
3361 strlcat(sbuf, "|VALIASED", sizeof(sbuf));
3362 }
3363 if (sbuf[0] != '\0') {
3364 printf("vnode flags (%s\n", &sbuf[1]);
3365 }
3366 }
3367
3368
3369 int
vn_getpath(struct vnode * vp,char * pathbuf,int * len)3370 vn_getpath(struct vnode *vp, char *pathbuf, int *len)
3371 {
3372 return build_path(vp, pathbuf, *len, len, BUILDPATH_NO_FS_ENTER, vfs_context_current());
3373 }
3374
3375 int
vn_getpath_fsenter(struct vnode * vp,char * pathbuf,int * len)3376 vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len)
3377 {
3378 return build_path(vp, pathbuf, *len, len, 0, vfs_context_current());
3379 }
3380
3381 /*
3382 * vn_getpath_fsenter_with_parent will reenter the file system to fine the path of the
3383 * vnode. It requires that there are IO counts on both the vnode and the directory vnode.
3384 *
3385 * vn_getpath_fsenter is called by MAC hooks to authorize operations for every thing, but
3386 * unlink, rmdir and rename. For these operation the MAC hook calls vn_getpath. This presents
3387 * problems where if the path can not be found from the name cache, those operations can
3388 * erroneously fail with EPERM even though the call should succeed. When removing or moving
3389 * file system objects with operations such as unlink or rename, those operations need to
3390 * take IO counts on the target and containing directory. Calling vn_getpath_fsenter from a
3391 * MAC hook from these operations during forced unmount operations can lead to dead
3392 * lock. This happens when the operation starts, IO counts are taken on the containing
3393 * directories and targets. Before the MAC hook is called a forced unmount from another
3394 * thread takes place and blocks on the on going operation's directory vnode in vdrain.
3395 * After which, the MAC hook gets called and calls vn_getpath_fsenter. vn_getpath_fsenter
3396 * is called with the understanding that there is an IO count on the target. If in
3397 * build_path the directory vnode is no longer in the cache, then the parent object id via
3398 * vnode_getattr from the target is obtain and used to call VFS_VGET to get the parent
3399 * vnode. The file system's VFS_VGET then looks up by inode in its hash and tries to get
3400 * an IO count. But VFS_VGET "sees" the directory vnode is in vdrain and can block
3401 * depending on which version and how it calls the vnode_get family of interfaces.
3402 *
3403 * N.B. A reasonable interface to use is vnode_getwithvid. This interface was modified to
3404 * call vnode_getiocount with VNODE_DRAINO, so it will happily get an IO count and not
3405 * cause issues, but there is no guarantee that all or any file systems are doing that.
3406 *
3407 * vn_getpath_fsenter_with_parent can enter the file system safely since there is a known
3408 * IO count on the directory vnode by calling build_path_with_parent.
3409 */
3410
3411 int
vn_getpath_fsenter_with_parent(struct vnode * dvp,struct vnode * vp,char * pathbuf,int * len)3412 vn_getpath_fsenter_with_parent(struct vnode *dvp, struct vnode *vp, char *pathbuf, int *len)
3413 {
3414 return build_path_with_parent(vp, dvp, pathbuf, *len, len, NULL, 0, vfs_context_current());
3415 }
3416
3417 int
vn_getpath_ext(struct vnode * vp,struct vnode * dvp,char * pathbuf,int * len,int flags)3418 vn_getpath_ext(struct vnode *vp, struct vnode *dvp, char *pathbuf, int *len, int flags)
3419 {
3420 int bpflags = (flags & VN_GETPATH_FSENTER) ? 0 : BUILDPATH_NO_FS_ENTER;
3421
3422 if (flags && (flags != VN_GETPATH_FSENTER)) {
3423 if (flags & VN_GETPATH_NO_FIRMLINK) {
3424 bpflags |= BUILDPATH_NO_FIRMLINK;
3425 }
3426 if (flags & VN_GETPATH_VOLUME_RELATIVE) {
3427 bpflags |= (BUILDPATH_VOLUME_RELATIVE | BUILDPATH_NO_FIRMLINK);
3428 }
3429 if (flags & VN_GETPATH_NO_PROCROOT) {
3430 bpflags |= BUILDPATH_NO_PROCROOT;
3431 }
3432 }
3433
3434 return build_path_with_parent(vp, dvp, pathbuf, *len, len, NULL, bpflags, vfs_context_current());
3435 }
3436
3437 int
vn_getpath_no_firmlink(struct vnode * vp,char * pathbuf,int * len)3438 vn_getpath_no_firmlink(struct vnode *vp, char *pathbuf, int *len)
3439 {
3440 return vn_getpath_ext(vp, NULLVP, pathbuf, len, VN_GETPATH_NO_FIRMLINK);
3441 }
3442
3443 int
vn_getpath_ext_with_mntlen(struct vnode * vp,struct vnode * dvp,char * pathbuf,size_t * len,size_t * mntlen,int flags)3444 vn_getpath_ext_with_mntlen(struct vnode *vp, struct vnode *dvp, char *pathbuf, size_t *len, size_t *mntlen, int flags)
3445 {
3446 int bpflags = (flags & VN_GETPATH_FSENTER) ? 0 : BUILDPATH_NO_FS_ENTER;
3447 int local_len;
3448 int error;
3449
3450 if (*len > INT_MAX) {
3451 return EINVAL;
3452 }
3453
3454 local_len = *len;
3455
3456 if (flags && (flags != VN_GETPATH_FSENTER)) {
3457 if (flags & VN_GETPATH_NO_FIRMLINK) {
3458 bpflags |= BUILDPATH_NO_FIRMLINK;
3459 }
3460 if (flags & VN_GETPATH_VOLUME_RELATIVE) {
3461 bpflags |= (BUILDPATH_VOLUME_RELATIVE | BUILDPATH_NO_FIRMLINK);
3462 }
3463 if (flags & VN_GETPATH_NO_PROCROOT) {
3464 bpflags |= BUILDPATH_NO_PROCROOT;
3465 }
3466 }
3467
3468 error = build_path_with_parent(vp, dvp, pathbuf, local_len, &local_len, mntlen, bpflags, vfs_context_current());
3469
3470 if (local_len >= 0 && local_len <= (int)*len) {
3471 *len = (size_t)local_len;
3472 }
3473
3474 return error;
3475 }
3476
3477 int
vn_getcdhash(struct vnode * vp,off_t offset,unsigned char * cdhash)3478 vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash)
3479 {
3480 return ubc_cs_getcdhash(vp, offset, cdhash);
3481 }
3482
3483
3484 static char *extension_table = NULL;
3485 static int nexts;
3486 static int max_ext_width;
3487
3488 static int
extension_cmp(const void * a,const void * b)3489 extension_cmp(const void *a, const void *b)
3490 {
3491 return (int)(strlen((const char *)a) - strlen((const char *)b));
3492 }
3493
3494
3495 //
3496 // This is the api LaunchServices uses to inform the kernel
3497 // the list of package extensions to ignore.
3498 //
3499 // Internally we keep the list sorted by the length of the
3500 // the extension (from longest to shortest). We sort the
3501 // list of extensions so that we can speed up our searches
3502 // when comparing file names -- we only compare extensions
3503 // that could possibly fit into the file name, not all of
3504 // them (i.e. a short 8 character name can't have an 8
3505 // character extension).
3506 //
3507 extern lck_mtx_t pkg_extensions_lck;
3508
3509 __private_extern__ int
set_package_extensions_table(user_addr_t data,int nentries,int maxwidth)3510 set_package_extensions_table(user_addr_t data, int nentries, int maxwidth)
3511 {
3512 char *new_exts, *old_exts;
3513 int old_nentries = 0, old_maxwidth = 0;
3514 int error;
3515
3516 if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) {
3517 return EINVAL;
3518 }
3519
3520
3521 // allocate one byte extra so we can guarantee null termination
3522 new_exts = kalloc_data((nentries * maxwidth) + 1, Z_WAITOK);
3523 if (new_exts == NULL) {
3524 return ENOMEM;
3525 }
3526
3527 error = copyin(data, new_exts, nentries * maxwidth);
3528 if (error) {
3529 kfree_data(new_exts, (nentries * maxwidth) + 1);
3530 return error;
3531 }
3532
3533 new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block
3534
3535 qsort(new_exts, nentries, maxwidth, extension_cmp);
3536
3537 lck_mtx_lock(&pkg_extensions_lck);
3538
3539 old_exts = extension_table;
3540 old_nentries = nexts;
3541 old_maxwidth = max_ext_width;
3542 extension_table = new_exts;
3543 nexts = nentries;
3544 max_ext_width = maxwidth;
3545
3546 lck_mtx_unlock(&pkg_extensions_lck);
3547
3548 kfree_data(old_exts, (old_nentries * old_maxwidth) + 1);
3549
3550 return 0;
3551 }
3552
3553
3554 int
is_package_name(const char * name,int len)3555 is_package_name(const char *name, int len)
3556 {
3557 int i;
3558 size_t extlen;
3559 const char *ptr, *name_ext;
3560
3561 // if the name is less than 3 bytes it can't be of the
3562 // form A.B and if it begins with a "." then it is also
3563 // not a package.
3564 if (len <= 3 || name[0] == '.') {
3565 return 0;
3566 }
3567
3568 name_ext = NULL;
3569 for (ptr = name; *ptr != '\0'; ptr++) {
3570 if (*ptr == '.') {
3571 name_ext = ptr;
3572 }
3573 }
3574
3575 // if there is no "." extension, it can't match
3576 if (name_ext == NULL) {
3577 return 0;
3578 }
3579
3580 // advance over the "."
3581 name_ext++;
3582
3583 lck_mtx_lock(&pkg_extensions_lck);
3584
3585 // now iterate over all the extensions to see if any match
3586 ptr = &extension_table[0];
3587 for (i = 0; i < nexts; i++, ptr += max_ext_width) {
3588 extlen = strlen(ptr);
3589 if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') {
3590 // aha, a match!
3591 lck_mtx_unlock(&pkg_extensions_lck);
3592 return 1;
3593 }
3594 }
3595
3596 lck_mtx_unlock(&pkg_extensions_lck);
3597
3598 // if we get here, no extension matched
3599 return 0;
3600 }
3601
3602 int
vn_path_package_check(__unused vnode_t vp,char * path,int pathlen,int * component)3603 vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component)
3604 {
3605 char *ptr, *end;
3606 int comp = 0;
3607
3608 if (pathlen < 0) {
3609 return EINVAL;
3610 }
3611
3612 *component = -1;
3613 if (*path != '/') {
3614 return EINVAL;
3615 }
3616
3617 end = path + 1;
3618 while (end < path + pathlen && *end != '\0') {
3619 while (end < path + pathlen && *end == '/' && *end != '\0') {
3620 end++;
3621 }
3622
3623 ptr = end;
3624
3625 while (end < path + pathlen && *end != '/' && *end != '\0') {
3626 end++;
3627 }
3628
3629 if (end > path + pathlen) {
3630 // hmm, string wasn't null terminated
3631 return EINVAL;
3632 }
3633
3634 *end = '\0';
3635 if (is_package_name(ptr, (int)(end - ptr))) {
3636 *component = comp;
3637 break;
3638 }
3639
3640 end++;
3641 comp++;
3642 }
3643
3644 return 0;
3645 }
3646
3647 /*
3648 * Determine if a name is inappropriate for a searchfs query.
3649 * This list consists of /System currently.
3650 */
3651
3652 int
vn_searchfs_inappropriate_name(const char * name,int len)3653 vn_searchfs_inappropriate_name(const char *name, int len)
3654 {
3655 const char *bad_names[] = { "System" };
3656 int bad_len[] = { 6 };
3657 int i;
3658
3659 if (len < 0) {
3660 return EINVAL;
3661 }
3662
3663 for (i = 0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) {
3664 if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) {
3665 return 1;
3666 }
3667 }
3668
3669 // if we get here, no name matched
3670 return 0;
3671 }
3672
3673 /*
3674 * Top level filesystem related information gathering.
3675 */
3676 extern unsigned int vfs_nummntops;
3677
3678 /*
3679 * The VFS_NUMMNTOPS shouldn't be at name[1] since
3680 * is a VFS generic variable. Since we no longer support
3681 * VT_UFS, we reserve its value to support this sysctl node.
3682 *
3683 * It should have been:
3684 * name[0]: VFS_GENERIC
3685 * name[1]: VFS_NUMMNTOPS
3686 */
3687 SYSCTL_INT(_vfs, VFS_NUMMNTOPS, nummntops,
3688 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
3689 &vfs_nummntops, 0, "");
3690
3691 int
3692 vfs_sysctl(int *name __unused, u_int namelen __unused,
3693 user_addr_t oldp __unused, size_t *oldlenp __unused,
3694 user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused);
3695
3696 int
vfs_sysctl(int * name __unused,u_int namelen __unused,user_addr_t oldp __unused,size_t * oldlenp __unused,user_addr_t newp __unused,size_t newlen __unused,proc_t p __unused)3697 vfs_sysctl(int *name __unused, u_int namelen __unused,
3698 user_addr_t oldp __unused, size_t *oldlenp __unused,
3699 user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused)
3700 {
3701 return EINVAL;
3702 }
3703
3704
3705 //
3706 // The following code disallows specific sysctl's that came through
3707 // the direct sysctl interface (vfs_sysctl_node) instead of the newer
3708 // sysctl_vfs_ctlbyfsid() interface. We can not allow these selectors
3709 // through vfs_sysctl_node() because it passes the user's oldp pointer
3710 // directly to the file system which (for these selectors) casts it
3711 // back to a struct sysctl_req and then proceed to use SYSCTL_IN()
3712 // which jumps through an arbitrary function pointer. When called
3713 // through the sysctl_vfs_ctlbyfsid() interface this does not happen
3714 // and so it's safe.
3715 //
3716 // Unfortunately we have to pull in definitions from AFP and SMB and
3717 // perform explicit name checks on the file system to determine if
3718 // these selectors are being used.
3719 //
3720
3721 #define AFPFS_VFS_CTL_GETID 0x00020001
3722 #define AFPFS_VFS_CTL_NETCHANGE 0x00020002
3723 #define AFPFS_VFS_CTL_VOLCHANGE 0x00020003
3724
3725 #define SMBFS_SYSCTL_REMOUNT 1
3726 #define SMBFS_SYSCTL_REMOUNT_INFO 2
3727 #define SMBFS_SYSCTL_GET_SERVER_SHARE 3
3728
3729
3730 static int
is_bad_sysctl_name(struct vfstable * vfsp,int selector_name)3731 is_bad_sysctl_name(struct vfstable *vfsp, int selector_name)
3732 {
3733 switch (selector_name) {
3734 case VFS_CTL_QUERY:
3735 case VFS_CTL_TIMEO:
3736 case VFS_CTL_NOLOCKS:
3737 case VFS_CTL_NSTATUS:
3738 case VFS_CTL_SADDR:
3739 case VFS_CTL_DISC:
3740 case VFS_CTL_SERVERINFO:
3741 return 1;
3742
3743 default:
3744 break;
3745 }
3746
3747 // the more complicated check for some of SMB's special values
3748 if (strcmp(vfsp->vfc_name, "smbfs") == 0) {
3749 switch (selector_name) {
3750 case SMBFS_SYSCTL_REMOUNT:
3751 case SMBFS_SYSCTL_REMOUNT_INFO:
3752 case SMBFS_SYSCTL_GET_SERVER_SHARE:
3753 return 1;
3754 }
3755 } else if (strcmp(vfsp->vfc_name, "afpfs") == 0) {
3756 switch (selector_name) {
3757 case AFPFS_VFS_CTL_GETID:
3758 case AFPFS_VFS_CTL_NETCHANGE:
3759 case AFPFS_VFS_CTL_VOLCHANGE:
3760 return 1;
3761 }
3762 }
3763
3764 //
3765 // If we get here we passed all the checks so the selector is ok
3766 //
3767 return 0;
3768 }
3769
3770
3771 int vfs_sysctl_node SYSCTL_HANDLER_ARGS
3772 {
3773 int *name, namelen;
3774 struct vfstable *vfsp;
3775 int error;
3776 int fstypenum;
3777
3778 fstypenum = oidp->oid_number;
3779 name = arg1;
3780 namelen = arg2;
3781
3782 /* all sysctl names at this level should have at least one name slot for the FS */
3783 if (namelen < 1) {
3784 return EISDIR; /* overloaded */
3785 }
3786 mount_list_lock();
3787 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
3788 if (vfsp->vfc_typenum == fstypenum) {
3789 vfsp->vfc_refcount++;
3790 break;
3791 }
3792 }
3793 mount_list_unlock();
3794
3795 if (vfsp == NULL) {
3796 return ENOTSUP;
3797 }
3798
3799 if (is_bad_sysctl_name(vfsp, name[0])) {
3800 printf("vfs: bad selector 0x%.8x for old-style sysctl(). use the sysctl-by-fsid interface instead\n", name[0]);
3801 error = EPERM;
3802 } else {
3803 error = (vfsp->vfc_vfsops->vfs_sysctl)(name, namelen,
3804 req->oldptr, &req->oldlen, req->newptr, req->newlen,
3805 vfs_context_current());
3806 }
3807
3808 mount_list_lock();
3809 vfsp->vfc_refcount--;
3810 mount_list_unlock();
3811
3812 return error;
3813 }
3814
3815 /*
3816 * Check to see if a filesystem is mounted on a block device.
3817 */
3818 int
vfs_mountedon(struct vnode * vp)3819 vfs_mountedon(struct vnode *vp)
3820 {
3821 struct vnode *vq;
3822 int error = 0;
3823
3824 SPECHASH_LOCK();
3825 if (vp->v_specflags & SI_MOUNTEDON) {
3826 error = EBUSY;
3827 goto out;
3828 }
3829 if (vp->v_specflags & SI_ALIASED) {
3830 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3831 if (vq->v_rdev != vp->v_rdev ||
3832 vq->v_type != vp->v_type) {
3833 continue;
3834 }
3835 if (vq->v_specflags & SI_MOUNTEDON) {
3836 error = EBUSY;
3837 break;
3838 }
3839 }
3840 }
3841 out:
3842 SPECHASH_UNLOCK();
3843 return error;
3844 }
3845
3846 struct unmount_info {
3847 int u_errs; // Total failed unmounts
3848 int u_busy; // EBUSY failed unmounts
3849 int u_count; // Total volumes iterated
3850 int u_only_non_system;
3851 };
3852
3853 static int
unmount_callback(mount_t mp,void * arg)3854 unmount_callback(mount_t mp, void *arg)
3855 {
3856 int error;
3857 char *mntname;
3858 struct unmount_info *uip = arg;
3859
3860 uip->u_count++;
3861
3862 mntname = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_NOFAIL);
3863 strlcpy(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN);
3864
3865 if (uip->u_only_non_system
3866 && ((mp->mnt_flag & MNT_ROOTFS) || (mp->mnt_kern_flag & MNTK_SYSTEM))) { //MNTK_BACKS_ROOT
3867 printf("unmount(%d) %s skipped\n", uip->u_only_non_system, mntname);
3868 mount_iterdrop(mp); // VFS_ITERATE_CB_DROPREF
3869 } else {
3870 printf("unmount(%d) %s\n", uip->u_only_non_system, mntname);
3871
3872 mount_ref(mp, 0);
3873 mount_iterdrop(mp); // VFS_ITERATE_CB_DROPREF
3874 error = dounmount(mp, MNT_FORCE, 1, vfs_context_current());
3875 if (error) {
3876 uip->u_errs++;
3877 printf("Unmount of %s failed (%d)\n", mntname ? mntname:"?", error);
3878 if (error == EBUSY) {
3879 uip->u_busy++;
3880 }
3881 }
3882 }
3883 zfree(ZV_NAMEI, mntname);
3884
3885 return VFS_RETURNED;
3886 }
3887
3888 /*
3889 * Unmount all filesystems. The list is traversed in reverse order
3890 * of mounting to avoid dependencies.
3891 * Busy mounts are retried.
3892 */
3893 __private_extern__ void
vfs_unmountall(int only_non_system)3894 vfs_unmountall(int only_non_system)
3895 {
3896 int mounts, sec = 1;
3897 struct unmount_info ui;
3898
3899 vfs_unmountall_started = 1;
3900 printf("vfs_unmountall(%ssystem) start\n", only_non_system ? "non" : "");
3901
3902 retry:
3903 ui.u_errs = ui.u_busy = ui.u_count = 0;
3904 ui.u_only_non_system = only_non_system;
3905 // avoid vfs_iterate deadlock in dounmount(), use VFS_ITERATE_CB_DROPREF
3906 vfs_iterate(VFS_ITERATE_CB_DROPREF | VFS_ITERATE_TAIL_FIRST, unmount_callback, &ui);
3907 mounts = mount_getvfscnt();
3908 if (mounts == 0) {
3909 return;
3910 }
3911 if (ui.u_busy > 0) { // Busy mounts - wait & retry
3912 tsleep(&nummounts, PVFS, "busy mount", sec * hz);
3913 sec *= 2;
3914 if (sec <= 32) {
3915 goto retry;
3916 }
3917 printf("Unmounting timed out\n");
3918 } else if (ui.u_count < mounts) {
3919 // If the vfs_iterate missed mounts in progress - wait a bit
3920 tsleep(&nummounts, PVFS, "missed mount", 2 * hz);
3921 }
3922
3923 printf("vfs_unmountall(%ssystem) end\n", only_non_system ? "non" : "");
3924 }
3925
3926 /*
3927 * This routine is called from vnode_pager_deallocate out of the VM
3928 * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
3929 * on a vnode that has a UBCINFO
3930 */
3931 __private_extern__ void
vnode_pager_vrele(vnode_t vp)3932 vnode_pager_vrele(vnode_t vp)
3933 {
3934 struct ubc_info *uip;
3935
3936 vnode_lock_spin(vp);
3937
3938 vp->v_lflag &= ~VNAMED_UBC;
3939 if (vp->v_usecount != 0) {
3940 /*
3941 * At the eleventh hour, just before the ubcinfo is
3942 * destroyed, ensure the ubc-specific v_usecount
3943 * reference has gone. We use v_usecount != 0 as a hint;
3944 * ubc_unmap() does nothing if there's no mapping.
3945 *
3946 * This case is caused by coming here via forced unmount,
3947 * versus the usual vm_object_deallocate() path.
3948 * In the forced unmount case, ubc_destroy_named()
3949 * releases the pager before memory_object_last_unmap()
3950 * can be called.
3951 */
3952 vnode_unlock(vp);
3953 ubc_unmap(vp);
3954 vnode_lock_spin(vp);
3955 }
3956
3957 uip = vp->v_ubcinfo;
3958 vp->v_ubcinfo = UBC_INFO_NULL;
3959
3960 vnode_unlock(vp);
3961
3962 ubc_info_deallocate(uip);
3963 }
3964
3965
3966 #include <sys/disk.h>
3967
3968 u_int32_t rootunit = (u_int32_t)-1;
3969
3970 #if CONFIG_IOSCHED
3971 extern int lowpri_throttle_enabled;
3972 extern int iosched_enabled;
3973 #endif
3974
3975 errno_t
vfs_init_io_attributes(vnode_t devvp,mount_t mp)3976 vfs_init_io_attributes(vnode_t devvp, mount_t mp)
3977 {
3978 int error;
3979 off_t readblockcnt = 0;
3980 off_t writeblockcnt = 0;
3981 off_t readmaxcnt = 0;
3982 off_t writemaxcnt = 0;
3983 off_t readsegcnt = 0;
3984 off_t writesegcnt = 0;
3985 off_t readsegsize = 0;
3986 off_t writesegsize = 0;
3987 off_t alignment = 0;
3988 u_int32_t minsaturationbytecount = 0;
3989 u_int32_t ioqueue_depth = 0;
3990 u_int32_t blksize;
3991 u_int64_t temp;
3992 u_int32_t features;
3993 u_int64_t location = 0;
3994 vfs_context_t ctx = vfs_context_current();
3995 dk_corestorage_info_t cs_info;
3996 boolean_t cs_present = FALSE;
3997 int isssd = 0;
3998 int isvirtual = 0;
3999
4000
4001 VNOP_IOCTL(devvp, DKIOCGETTHROTTLEMASK, (caddr_t)&mp->mnt_throttle_mask, 0, NULL);
4002 /*
4003 * as a reasonable approximation, only use the lowest bit of the mask
4004 * to generate a disk unit number
4005 */
4006 mp->mnt_devbsdunit = num_trailing_0(mp->mnt_throttle_mask);
4007
4008 if (devvp == rootvp) {
4009 rootunit = mp->mnt_devbsdunit;
4010 }
4011
4012 if (mp->mnt_devbsdunit == rootunit) {
4013 /*
4014 * this mount point exists on the same device as the root
4015 * partition, so it comes under the hard throttle control...
4016 * this is true even for the root mount point itself
4017 */
4018 mp->mnt_kern_flag |= MNTK_ROOTDEV;
4019 }
4020 /*
4021 * force the spec device to re-cache
4022 * the underlying block size in case
4023 * the filesystem overrode the initial value
4024 */
4025 set_fsblocksize(devvp);
4026
4027
4028 if ((error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE,
4029 (caddr_t)&blksize, 0, ctx))) {
4030 return error;
4031 }
4032
4033 mp->mnt_devblocksize = blksize;
4034
4035 /*
4036 * set the maximum possible I/O size
4037 * this may get clipped to a smaller value
4038 * based on which constraints are being advertised
4039 * and if those advertised constraints result in a smaller
4040 * limit for a given I/O
4041 */
4042 mp->mnt_maxreadcnt = MAX_UPL_SIZE_BYTES;
4043 mp->mnt_maxwritecnt = MAX_UPL_SIZE_BYTES;
4044
4045 if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) {
4046 if (isvirtual) {
4047 mp->mnt_kern_flag |= MNTK_VIRTUALDEV;
4048 mp->mnt_flag |= MNT_REMOVABLE;
4049 }
4050 }
4051 if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ctx) == 0) {
4052 if (isssd) {
4053 mp->mnt_kern_flag |= MNTK_SSD;
4054 }
4055 }
4056 if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES,
4057 (caddr_t)&features, 0, ctx))) {
4058 return error;
4059 }
4060
4061 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD,
4062 (caddr_t)&readblockcnt, 0, ctx))) {
4063 return error;
4064 }
4065
4066 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE,
4067 (caddr_t)&writeblockcnt, 0, ctx))) {
4068 return error;
4069 }
4070
4071 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD,
4072 (caddr_t)&readmaxcnt, 0, ctx))) {
4073 return error;
4074 }
4075
4076 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE,
4077 (caddr_t)&writemaxcnt, 0, ctx))) {
4078 return error;
4079 }
4080
4081 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD,
4082 (caddr_t)&readsegcnt, 0, ctx))) {
4083 return error;
4084 }
4085
4086 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE,
4087 (caddr_t)&writesegcnt, 0, ctx))) {
4088 return error;
4089 }
4090
4091 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD,
4092 (caddr_t)&readsegsize, 0, ctx))) {
4093 return error;
4094 }
4095
4096 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE,
4097 (caddr_t)&writesegsize, 0, ctx))) {
4098 return error;
4099 }
4100
4101 if ((error = VNOP_IOCTL(devvp, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT,
4102 (caddr_t)&alignment, 0, ctx))) {
4103 return error;
4104 }
4105
4106 if ((error = VNOP_IOCTL(devvp, DKIOCGETCOMMANDPOOLSIZE,
4107 (caddr_t)&ioqueue_depth, 0, ctx))) {
4108 return error;
4109 }
4110
4111 if (readmaxcnt) {
4112 mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX :(uint32_t) readmaxcnt;
4113 }
4114
4115 if (readblockcnt) {
4116 temp = readblockcnt * blksize;
4117 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
4118
4119 if (temp < mp->mnt_maxreadcnt) {
4120 mp->mnt_maxreadcnt = (u_int32_t)temp;
4121 }
4122 }
4123
4124 if (writemaxcnt) {
4125 mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : (uint32_t)writemaxcnt;
4126 }
4127
4128 if (writeblockcnt) {
4129 temp = writeblockcnt * blksize;
4130 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
4131
4132 if (temp < mp->mnt_maxwritecnt) {
4133 mp->mnt_maxwritecnt = (u_int32_t)temp;
4134 }
4135 }
4136
4137 if (readsegcnt) {
4138 temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt;
4139 } else {
4140 temp = mp->mnt_maxreadcnt / PAGE_SIZE;
4141
4142 if (temp > UINT16_MAX) {
4143 temp = UINT16_MAX;
4144 }
4145 }
4146 mp->mnt_segreadcnt = (u_int16_t)temp;
4147
4148 if (writesegcnt) {
4149 temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt;
4150 } else {
4151 temp = mp->mnt_maxwritecnt / PAGE_SIZE;
4152
4153 if (temp > UINT16_MAX) {
4154 temp = UINT16_MAX;
4155 }
4156 }
4157 mp->mnt_segwritecnt = (u_int16_t)temp;
4158
4159 if (readsegsize) {
4160 temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize;
4161 } else {
4162 temp = mp->mnt_maxreadcnt;
4163 }
4164 mp->mnt_maxsegreadsize = (u_int32_t)temp;
4165
4166 if (writesegsize) {
4167 temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize;
4168 } else {
4169 temp = mp->mnt_maxwritecnt;
4170 }
4171 mp->mnt_maxsegwritesize = (u_int32_t)temp;
4172
4173 if (alignment) {
4174 temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1;
4175 } else {
4176 temp = 0;
4177 }
4178 mp->mnt_alignmentmask = (uint32_t)temp;
4179
4180
4181 if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH) {
4182 temp = ioqueue_depth;
4183 } else {
4184 temp = MNT_DEFAULT_IOQUEUE_DEPTH;
4185 }
4186
4187 mp->mnt_ioqueue_depth = (uint32_t)temp;
4188 mp->mnt_ioscale = MNT_IOSCALE(mp->mnt_ioqueue_depth);
4189
4190 if (mp->mnt_ioscale > 1) {
4191 printf("ioqueue_depth = %d, ioscale = %d\n", (int)mp->mnt_ioqueue_depth, (int)mp->mnt_ioscale);
4192 }
4193
4194 if (features & DK_FEATURE_FORCE_UNIT_ACCESS) {
4195 mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED;
4196 }
4197
4198 if (VNOP_IOCTL(devvp, DKIOCGETIOMINSATURATIONBYTECOUNT, (caddr_t)&minsaturationbytecount, 0, ctx) == 0) {
4199 mp->mnt_minsaturationbytecount = minsaturationbytecount;
4200 } else {
4201 mp->mnt_minsaturationbytecount = 0;
4202 }
4203
4204 if (VNOP_IOCTL(devvp, DKIOCCORESTORAGE, (caddr_t)&cs_info, 0, ctx) == 0) {
4205 cs_present = TRUE;
4206 }
4207
4208 if (features & DK_FEATURE_UNMAP) {
4209 mp->mnt_ioflags |= MNT_IOFLAGS_UNMAP_SUPPORTED;
4210
4211 if (cs_present == TRUE) {
4212 mp->mnt_ioflags |= MNT_IOFLAGS_CSUNMAP_SUPPORTED;
4213 }
4214 }
4215 if (cs_present == TRUE) {
4216 /*
4217 * for now we'll use the following test as a proxy for
4218 * the underlying drive being FUSION in nature
4219 */
4220 if ((cs_info.flags & DK_CORESTORAGE_PIN_YOUR_METADATA)) {
4221 mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE;
4222 }
4223 } else {
4224 /* Check for APFS Fusion */
4225 dk_apfs_flavour_t flavour;
4226 if ((VNOP_IOCTL(devvp, DKIOCGETAPFSFLAVOUR, (caddr_t)&flavour, 0, ctx) == 0) &&
4227 (flavour == DK_APFS_FUSION)) {
4228 mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE;
4229 }
4230 }
4231
4232 if (VNOP_IOCTL(devvp, DKIOCGETLOCATION, (caddr_t)&location, 0, ctx) == 0) {
4233 if (location & DK_LOCATION_EXTERNAL) {
4234 mp->mnt_ioflags |= MNT_IOFLAGS_PERIPHERAL_DRIVE;
4235 mp->mnt_flag |= MNT_REMOVABLE;
4236 }
4237 }
4238
4239 #if CONFIG_IOSCHED
4240 if (iosched_enabled && (features & DK_FEATURE_PRIORITY)) {
4241 mp->mnt_ioflags |= MNT_IOFLAGS_IOSCHED_SUPPORTED;
4242 throttle_info_disable_throttle(mp->mnt_devbsdunit, (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) != 0);
4243 }
4244 #endif /* CONFIG_IOSCHED */
4245 return error;
4246 }
4247
4248 static struct klist fs_klist;
4249 static LCK_GRP_DECLARE(fs_klist_lck_grp, "fs_klist");
4250 static LCK_MTX_DECLARE(fs_klist_lock, &fs_klist_lck_grp);
4251
4252 void
vfs_event_init(void)4253 vfs_event_init(void)
4254 {
4255 klist_init(&fs_klist);
4256 }
4257
4258 void
vfs_event_signal(fsid_t * fsid,u_int32_t event,intptr_t data)4259 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data)
4260 {
4261 if (event == VQ_DEAD || event == VQ_NOTRESP) {
4262 struct mount *mp = vfs_getvfs(fsid);
4263 if (mp) {
4264 mount_lock_spin(mp);
4265 if (data) {
4266 mp->mnt_kern_flag &= ~MNT_LNOTRESP; // Now responding
4267 } else {
4268 mp->mnt_kern_flag |= MNT_LNOTRESP; // Not responding
4269 }
4270 mount_unlock(mp);
4271 }
4272 }
4273
4274 lck_mtx_lock(&fs_klist_lock);
4275 KNOTE(&fs_klist, event);
4276 lck_mtx_unlock(&fs_klist_lock);
4277 }
4278
4279 /*
4280 * return the number of mounted filesystems.
4281 */
4282 static int
sysctl_vfs_getvfscnt(void)4283 sysctl_vfs_getvfscnt(void)
4284 {
4285 return mount_getvfscnt();
4286 }
4287
4288
4289 static int
mount_getvfscnt(void)4290 mount_getvfscnt(void)
4291 {
4292 int ret;
4293
4294 mount_list_lock();
4295 ret = nummounts;
4296 mount_list_unlock();
4297 return ret;
4298 }
4299
4300
4301
4302 static int
mount_fillfsids(fsid_t * fsidlst,int count)4303 mount_fillfsids(fsid_t *fsidlst, int count)
4304 {
4305 struct mount *mp;
4306 int actual = 0;
4307
4308 actual = 0;
4309 mount_list_lock();
4310 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4311 if (actual < count) {
4312 fsidlst[actual] = mp->mnt_vfsstat.f_fsid;
4313 actual++;
4314 }
4315 }
4316 mount_list_unlock();
4317 return actual;
4318 }
4319
4320 /*
4321 * fill in the array of fsid_t's up to a max of 'count', the actual
4322 * number filled in will be set in '*actual'. If there are more fsid_t's
4323 * than room in fsidlst then ENOMEM will be returned and '*actual' will
4324 * have the actual count.
4325 * having *actual filled out even in the error case is depended upon.
4326 */
4327 static int
sysctl_vfs_getvfslist(fsid_t * fsidlst,unsigned long count,unsigned long * actual)4328 sysctl_vfs_getvfslist(fsid_t *fsidlst, unsigned long count, unsigned long *actual)
4329 {
4330 struct mount *mp;
4331
4332 *actual = 0;
4333 mount_list_lock();
4334 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4335 (*actual)++;
4336 if (*actual <= count) {
4337 fsidlst[(*actual) - 1] = mp->mnt_vfsstat.f_fsid;
4338 }
4339 }
4340 mount_list_unlock();
4341 return *actual <= count ? 0 : ENOMEM;
4342 }
4343
4344 static int
sysctl_vfs_vfslist(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)4345 sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1,
4346 __unused int arg2, struct sysctl_req *req)
4347 {
4348 unsigned long actual;
4349 int error;
4350 size_t space;
4351 fsid_t *fsidlst;
4352
4353 /* This is a readonly node. */
4354 if (req->newptr != USER_ADDR_NULL) {
4355 return EPERM;
4356 }
4357
4358 /* they are querying us so just return the space required. */
4359 if (req->oldptr == USER_ADDR_NULL) {
4360 req->oldidx = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
4361 return 0;
4362 }
4363 again:
4364 /*
4365 * Retrieve an accurate count of the amount of space required to copy
4366 * out all the fsids in the system.
4367 */
4368 space = req->oldlen;
4369 req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
4370
4371 /* they didn't give us enough space. */
4372 if (space < req->oldlen) {
4373 return ENOMEM;
4374 }
4375
4376 fsidlst = kalloc_data(req->oldlen, Z_WAITOK | Z_ZERO);
4377 if (fsidlst == NULL) {
4378 return ENOMEM;
4379 }
4380
4381 error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t),
4382 &actual);
4383 /*
4384 * If we get back ENOMEM, then another mount has been added while we
4385 * slept in malloc above. If this is the case then try again.
4386 */
4387 if (error == ENOMEM) {
4388 kfree_data(fsidlst, req->oldlen);
4389 req->oldlen = space;
4390 goto again;
4391 }
4392 if (error == 0) {
4393 error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t));
4394 }
4395 kfree_data(fsidlst, req->oldlen);
4396 return error;
4397 }
4398
4399 /*
4400 * Do a sysctl by fsid.
4401 */
4402 static int
sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid * oidp,void * arg1,int arg2,struct sysctl_req * req)4403 sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
4404 struct sysctl_req *req)
4405 {
4406 union union_vfsidctl vc;
4407 struct mount *mp;
4408 struct vfsstatfs *sp;
4409 int *name, namelen;
4410 int flags = 0;
4411 int error = 0, gotref = 0;
4412 vfs_context_t ctx = vfs_context_current();
4413 proc_t p = req->p; /* XXX req->p != current_proc()? */
4414 boolean_t is_64_bit;
4415 union {
4416 struct statfs64 sfs64;
4417 struct user64_statfs osfs64;
4418 struct user32_statfs osfs32;
4419 } *sfsbuf;
4420
4421 if (req->newptr == USER_ADDR_NULL) {
4422 error = EINVAL;
4423 goto out;
4424 }
4425
4426 name = arg1;
4427 namelen = arg2;
4428 is_64_bit = proc_is64bit(p);
4429
4430 error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
4431 if (error) {
4432 goto out;
4433 }
4434 if (vc.vc32.vc_vers != VFS_CTL_VERS1) { /* works for 32 and 64 */
4435 error = EINVAL;
4436 goto out;
4437 }
4438 mp = mount_list_lookupby_fsid(&vc.vc32.vc_fsid, 0, 1); /* works for 32 and 64 */
4439 if (mp == NULL) {
4440 error = ENOENT;
4441 goto out;
4442 }
4443 gotref = 1;
4444 /* reset so that the fs specific code can fetch it. */
4445 req->newidx = 0;
4446 /*
4447 * Note if this is a VFS_CTL then we pass the actual sysctl req
4448 * in for "oldp" so that the lower layer can DTRT and use the
4449 * SYSCTL_IN/OUT routines.
4450 */
4451 if (mp->mnt_op->vfs_sysctl != NULL) {
4452 if (is_64_bit) {
4453 if (vfs_64bitready(mp)) {
4454 error = mp->mnt_op->vfs_sysctl(name, namelen,
4455 CAST_USER_ADDR_T(req),
4456 NULL, USER_ADDR_NULL, 0,
4457 ctx);
4458 } else {
4459 error = ENOTSUP;
4460 }
4461 } else {
4462 error = mp->mnt_op->vfs_sysctl(name, namelen,
4463 CAST_USER_ADDR_T(req),
4464 NULL, USER_ADDR_NULL, 0,
4465 ctx);
4466 }
4467 if (error != ENOTSUP) {
4468 goto out;
4469 }
4470 }
4471 switch (name[0]) {
4472 case VFS_CTL_UMOUNT:
4473 #if CONFIG_MACF
4474 error = mac_mount_check_umount(ctx, mp);
4475 if (error != 0) {
4476 goto out;
4477 }
4478 #endif
4479 req->newidx = 0;
4480 if (is_64_bit) {
4481 req->newptr = vc.vc64.vc_ptr;
4482 req->newlen = (size_t)vc.vc64.vc_len;
4483 } else {
4484 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
4485 req->newlen = vc.vc32.vc_len;
4486 }
4487 error = SYSCTL_IN(req, &flags, sizeof(flags));
4488 if (error) {
4489 break;
4490 }
4491
4492 mount_ref(mp, 0);
4493 mount_iterdrop(mp);
4494 gotref = 0;
4495 /* safedounmount consumes a ref */
4496 error = safedounmount(mp, flags, ctx);
4497 break;
4498 case VFS_CTL_OSTATFS:
4499 case VFS_CTL_STATFS64:
4500 #if CONFIG_MACF
4501 error = mac_mount_check_stat(ctx, mp);
4502 if (error != 0) {
4503 break;
4504 }
4505 #endif
4506 req->newidx = 0;
4507 if (is_64_bit) {
4508 req->newptr = vc.vc64.vc_ptr;
4509 req->newlen = (size_t)vc.vc64.vc_len;
4510 } else {
4511 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
4512 req->newlen = vc.vc32.vc_len;
4513 }
4514 error = SYSCTL_IN(req, &flags, sizeof(flags));
4515 if (error) {
4516 break;
4517 }
4518 sp = &mp->mnt_vfsstat;
4519 if (((flags & MNT_NOWAIT) == 0 || (flags & (MNT_WAIT | MNT_DWAIT))) &&
4520 (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT))) {
4521 goto out;
4522 }
4523
4524 sfsbuf = kalloc_type(typeof(*sfsbuf), Z_WAITOK);
4525
4526 if (name[0] == VFS_CTL_STATFS64) {
4527 struct statfs64 *sfs = &sfsbuf->sfs64;
4528
4529 vfs_get_statfs64(mp, sfs);
4530 error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
4531 } else if (is_64_bit) {
4532 struct user64_statfs *sfs = &sfsbuf->osfs64;
4533
4534 bzero(sfs, sizeof(*sfs));
4535 sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
4536 sfs->f_type = (short)mp->mnt_vtable->vfc_typenum;
4537 sfs->f_bsize = (user64_long_t)sp->f_bsize;
4538 sfs->f_iosize = (user64_long_t)sp->f_iosize;
4539 sfs->f_blocks = (user64_long_t)sp->f_blocks;
4540 sfs->f_bfree = (user64_long_t)sp->f_bfree;
4541 sfs->f_bavail = (user64_long_t)sp->f_bavail;
4542 sfs->f_files = (user64_long_t)sp->f_files;
4543 sfs->f_ffree = (user64_long_t)sp->f_ffree;
4544 sfs->f_fsid = sp->f_fsid;
4545 sfs->f_owner = sp->f_owner;
4546 #ifdef CONFIG_NFS_CLIENT
4547 if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
4548 strlcpy(&sfs->f_fstypename[0], &mp->fstypename_override[0], MFSNAMELEN);
4549 } else
4550 #endif /* CONFIG_NFS_CLIENT */
4551 {
4552 strlcpy(sfs->f_fstypename, sp->f_fstypename, MFSNAMELEN);
4553 }
4554 strlcpy(sfs->f_mntonname, sp->f_mntonname, MNAMELEN);
4555 strlcpy(sfs->f_mntfromname, sp->f_mntfromname, MNAMELEN);
4556
4557 error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
4558 } else {
4559 struct user32_statfs *sfs = &sfsbuf->osfs32;
4560 long temp;
4561
4562 bzero(sfs, sizeof(*sfs));
4563 sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
4564 sfs->f_type = (short)mp->mnt_vtable->vfc_typenum;
4565
4566 /*
4567 * It's possible for there to be more than 2^^31 blocks in the filesystem, so we
4568 * have to fudge the numbers here in that case. We inflate the blocksize in order
4569 * to reflect the filesystem size as best we can.
4570 */
4571 if (sp->f_blocks > INT_MAX) {
4572 int shift;
4573
4574 /*
4575 * Work out how far we have to shift the block count down to make it fit.
4576 * Note that it's possible to have to shift so far that the resulting
4577 * blocksize would be unreportably large. At that point, we will clip
4578 * any values that don't fit.
4579 *
4580 * For safety's sake, we also ensure that f_iosize is never reported as
4581 * being smaller than f_bsize.
4582 */
4583 for (shift = 0; shift < 32; shift++) {
4584 if ((sp->f_blocks >> shift) <= INT_MAX) {
4585 break;
4586 }
4587 if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX) {
4588 break;
4589 }
4590 }
4591 #define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
4592 sfs->f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift);
4593 sfs->f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift);
4594 sfs->f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift);
4595 #undef __SHIFT_OR_CLIP
4596 sfs->f_bsize = (user32_long_t)(sp->f_bsize << shift);
4597 temp = lmax(sp->f_iosize, sp->f_bsize);
4598 if (temp > INT32_MAX) {
4599 error = EINVAL;
4600 kfree_type(typeof(*sfsbuf), sfsbuf);
4601 goto out;
4602 }
4603 sfs->f_iosize = (user32_long_t)temp;
4604 } else {
4605 sfs->f_bsize = (user32_long_t)sp->f_bsize;
4606 sfs->f_iosize = (user32_long_t)sp->f_iosize;
4607 sfs->f_blocks = (user32_long_t)sp->f_blocks;
4608 sfs->f_bfree = (user32_long_t)sp->f_bfree;
4609 sfs->f_bavail = (user32_long_t)sp->f_bavail;
4610 }
4611 sfs->f_files = (user32_long_t)sp->f_files;
4612 sfs->f_ffree = (user32_long_t)sp->f_ffree;
4613 sfs->f_fsid = sp->f_fsid;
4614 sfs->f_owner = sp->f_owner;
4615
4616 #ifdef CONFIG_NFS_CLIENT
4617 if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
4618 strlcpy(&sfs->f_fstypename[0], &mp->fstypename_override[0], MFSNAMELEN);
4619 } else
4620 #endif /* CONFIG_NFS_CLIENT */
4621 {
4622 strlcpy(sfs->f_fstypename, sp->f_fstypename, MFSNAMELEN);
4623 }
4624 strlcpy(sfs->f_mntonname, sp->f_mntonname, MNAMELEN);
4625 strlcpy(sfs->f_mntfromname, sp->f_mntfromname, MNAMELEN);
4626
4627 error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
4628 }
4629 kfree_type(typeof(*sfsbuf), sfsbuf);
4630 break;
4631 default:
4632 error = ENOTSUP;
4633 goto out;
4634 }
4635 out:
4636 if (gotref != 0) {
4637 mount_iterdrop(mp);
4638 }
4639 return error;
4640 }
4641
4642 static int filt_fsattach(struct knote *kn, struct kevent_qos_s *kev);
4643 static void filt_fsdetach(struct knote *kn);
4644 static int filt_fsevent(struct knote *kn, long hint);
4645 static int filt_fstouch(struct knote *kn, struct kevent_qos_s *kev);
4646 static int filt_fsprocess(struct knote *kn, struct kevent_qos_s *kev);
4647 SECURITY_READ_ONLY_EARLY(struct filterops) fs_filtops = {
4648 .f_attach = filt_fsattach,
4649 .f_detach = filt_fsdetach,
4650 .f_event = filt_fsevent,
4651 .f_touch = filt_fstouch,
4652 .f_process = filt_fsprocess,
4653 };
4654
4655 static int
filt_fsattach(struct knote * kn,__unused struct kevent_qos_s * kev)4656 filt_fsattach(struct knote *kn, __unused struct kevent_qos_s *kev)
4657 {
4658 kn->kn_flags |= EV_CLEAR; /* automatic */
4659 kn->kn_sdata = 0; /* incoming data is ignored */
4660
4661 lck_mtx_lock(&fs_klist_lock);
4662 KNOTE_ATTACH(&fs_klist, kn);
4663 lck_mtx_unlock(&fs_klist_lock);
4664
4665 /*
4666 * filter only sees future events,
4667 * so it can't be fired already.
4668 */
4669 return 0;
4670 }
4671
4672 static void
filt_fsdetach(struct knote * kn)4673 filt_fsdetach(struct knote *kn)
4674 {
4675 lck_mtx_lock(&fs_klist_lock);
4676 KNOTE_DETACH(&fs_klist, kn);
4677 lck_mtx_unlock(&fs_klist_lock);
4678 }
4679
4680 static int
filt_fsevent(struct knote * kn,long hint)4681 filt_fsevent(struct knote *kn, long hint)
4682 {
4683 /*
4684 * Backwards compatibility:
4685 * Other filters would do nothing if kn->kn_sfflags == 0
4686 */
4687
4688 if ((kn->kn_sfflags == 0) || (kn->kn_sfflags & hint)) {
4689 kn->kn_fflags |= hint;
4690 }
4691
4692 return kn->kn_fflags != 0;
4693 }
4694
4695 static int
filt_fstouch(struct knote * kn,struct kevent_qos_s * kev)4696 filt_fstouch(struct knote *kn, struct kevent_qos_s *kev)
4697 {
4698 int res;
4699
4700 lck_mtx_lock(&fs_klist_lock);
4701
4702 kn->kn_sfflags = kev->fflags;
4703
4704 /*
4705 * the above filter function sets bits even if nobody is looking for them.
4706 * Just preserve those bits even in the new mask is more selective
4707 * than before.
4708 *
4709 * For compatibility with previous implementations, we leave kn_fflags
4710 * as they were before.
4711 */
4712 //if (kn->kn_sfflags)
4713 // kn->kn_fflags &= kn->kn_sfflags;
4714 res = (kn->kn_fflags != 0);
4715
4716 lck_mtx_unlock(&fs_klist_lock);
4717
4718 return res;
4719 }
4720
4721 static int
filt_fsprocess(struct knote * kn,struct kevent_qos_s * kev)4722 filt_fsprocess(struct knote *kn, struct kevent_qos_s *kev)
4723 {
4724 int res = 0;
4725
4726 lck_mtx_lock(&fs_klist_lock);
4727 if (kn->kn_fflags) {
4728 knote_fill_kevent(kn, kev, 0);
4729 res = 1;
4730 }
4731 lck_mtx_unlock(&fs_klist_lock);
4732 return res;
4733 }
4734
4735 static int
sysctl_vfs_noremotehang(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)4736 sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp,
4737 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
4738 {
4739 int out, error;
4740 pid_t pid;
4741 proc_t p;
4742
4743 /* We need a pid. */
4744 if (req->newptr == USER_ADDR_NULL) {
4745 return EINVAL;
4746 }
4747
4748 error = SYSCTL_IN(req, &pid, sizeof(pid));
4749 if (error) {
4750 return error;
4751 }
4752
4753 p = proc_find(pid < 0 ? -pid : pid);
4754 if (p == NULL) {
4755 return ESRCH;
4756 }
4757
4758 /*
4759 * Fetching the value is ok, but we only fetch if the old
4760 * pointer is given.
4761 */
4762 if (req->oldptr != USER_ADDR_NULL) {
4763 out = !((p->p_flag & P_NOREMOTEHANG) == 0);
4764 proc_rele(p);
4765 error = SYSCTL_OUT(req, &out, sizeof(out));
4766 return error;
4767 }
4768
4769 /* cansignal offers us enough security. */
4770 if (p != req->p && proc_suser(req->p) != 0) {
4771 proc_rele(p);
4772 return EPERM;
4773 }
4774
4775 if (pid < 0) {
4776 OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), &p->p_flag);
4777 } else {
4778 OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
4779 }
4780 proc_rele(p);
4781
4782 return 0;
4783 }
4784
4785 static int
4786 sysctl_vfs_generic_conf SYSCTL_HANDLER_ARGS
4787 {
4788 int *name, namelen;
4789 struct vfstable *vfsp;
4790 struct vfsconf vfsc = {};
4791
4792 (void)oidp;
4793 name = arg1;
4794 namelen = arg2;
4795
4796 if (namelen < 1) {
4797 return EISDIR;
4798 } else if (namelen > 1) {
4799 return ENOTDIR;
4800 }
4801
4802 mount_list_lock();
4803 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
4804 if (vfsp->vfc_typenum == name[0]) {
4805 break;
4806 }
4807 }
4808
4809 if (vfsp == NULL) {
4810 mount_list_unlock();
4811 return ENOTSUP;
4812 }
4813
4814 vfsc.vfc_reserved1 = 0;
4815 bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
4816 vfsc.vfc_typenum = vfsp->vfc_typenum;
4817 vfsc.vfc_refcount = vfsp->vfc_refcount;
4818 vfsc.vfc_flags = vfsp->vfc_flags;
4819 vfsc.vfc_reserved2 = 0;
4820 vfsc.vfc_reserved3 = 0;
4821
4822 mount_list_unlock();
4823 return SYSCTL_OUT(req, &vfsc, sizeof(struct vfsconf));
4824 }
4825
4826 /* the vfs.generic. branch. */
4827 SYSCTL_EXTENSIBLE_NODE(_vfs, VFS_GENERIC, generic,
4828 CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "vfs generic hinge");
4829 /* retreive a list of mounted filesystem fsid_t */
4830 SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist,
4831 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
4832 NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids");
4833 /* perform operations on filesystem via fsid_t */
4834 SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW | CTLFLAG_LOCKED,
4835 sysctl_vfs_ctlbyfsid, "ctlbyfsid");
4836 SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW | CTLFLAG_ANYBODY,
4837 NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang");
4838 SYSCTL_INT(_vfs_generic, VFS_MAXTYPENUM, maxtypenum,
4839 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
4840 &maxvfstypenum, 0, "");
4841 SYSCTL_INT(_vfs_generic, OID_AUTO, sync_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, &sync_timeout_seconds, 0, "");
4842 SYSCTL_NODE(_vfs_generic, VFS_CONF, conf,
4843 CTLFLAG_RD | CTLFLAG_LOCKED,
4844 sysctl_vfs_generic_conf, "");
4845 #if DEVELOPMENT || DEBUG
4846 SYSCTL_INT(_vfs_generic, OID_AUTO, print_busy_vnodes,
4847 CTLTYPE_INT | CTLFLAG_RW,
4848 &print_busy_vnodes, 0,
4849 "VFS log busy vnodes blocking unmount");
4850 #endif
4851
4852 /* Indicate that the root file system unmounted cleanly */
4853 static int vfs_root_unmounted_cleanly = 0;
4854 SYSCTL_INT(_vfs_generic, OID_AUTO, root_unmounted_cleanly, CTLFLAG_RD, &vfs_root_unmounted_cleanly, 0, "Root filesystem was unmounted cleanly");
4855
4856 void
vfs_set_root_unmounted_cleanly(void)4857 vfs_set_root_unmounted_cleanly(void)
4858 {
4859 vfs_root_unmounted_cleanly = 1;
4860 }
4861
4862 /*
4863 * Print vnode state.
4864 */
4865 void
vn_print_state(struct vnode * vp,const char * fmt,...)4866 vn_print_state(struct vnode *vp, const char *fmt, ...)
4867 {
4868 va_list ap;
4869 char perm_str[] = "(VM_KERNEL_ADDRPERM pointer)";
4870 char fs_name[MFSNAMELEN];
4871
4872 va_start(ap, fmt);
4873 vprintf(fmt, ap);
4874 va_end(ap);
4875 printf("vp 0x%0llx %s: ", (uint64_t)VM_KERNEL_ADDRPERM(vp), perm_str);
4876 printf("tag %d, type %d\n", vp->v_tag, vp->v_type);
4877 /* Counts .. */
4878 printf(" iocount %d, usecount %d, kusecount %d references %d\n",
4879 vp->v_iocount, vp->v_usecount, vp->v_kusecount, vp->v_references);
4880 printf(" writecount %d, numoutput %d\n", vp->v_writecount,
4881 vp->v_numoutput);
4882 /* Flags */
4883 printf(" flag 0x%x, lflag 0x%x, listflag 0x%x\n", vp->v_flag,
4884 vp->v_lflag, vp->v_listflag);
4885
4886 if (vp->v_mount == NULL || vp->v_mount == dead_mountp) {
4887 strlcpy(fs_name, "deadfs", MFSNAMELEN);
4888 } else {
4889 vfs_name(vp->v_mount, fs_name);
4890 }
4891
4892 printf(" v_data 0x%0llx %s\n",
4893 (vp->v_data ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_data) : 0),
4894 perm_str);
4895 printf(" v_mount 0x%0llx %s vfs_name %s\n",
4896 (vp->v_mount ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_mount) : 0),
4897 perm_str, fs_name);
4898 }
4899
4900 long num_reusedvnodes = 0;
4901
4902
4903 static vnode_t
process_vp(vnode_t vp,int want_vp,bool can_defer,int * deferred)4904 process_vp(vnode_t vp, int want_vp, bool can_defer, int *deferred)
4905 {
4906 unsigned int vpid;
4907
4908 *deferred = 0;
4909
4910 vpid = vp->v_id;
4911
4912 vnode_list_remove_locked(vp);
4913
4914 vnode_list_unlock();
4915
4916 vnode_lock_spin(vp);
4917
4918 /*
4919 * We could wait for the vnode_lock after removing the vp from the freelist
4920 * and the vid is bumped only at the very end of reclaim. So it is possible
4921 * that we are looking at a vnode that is being terminated. If so skip it.
4922 */
4923 if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
4924 VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
4925 /*
4926 * we lost the race between dropping the list lock
4927 * and picking up the vnode_lock... someone else
4928 * used this vnode and it is now in a new state
4929 */
4930 vnode_unlock(vp);
4931
4932 return NULLVP;
4933 }
4934 if ((vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE) {
4935 /*
4936 * we did a vnode_rele_ext that asked for
4937 * us not to reenter the filesystem during
4938 * the release even though VL_NEEDINACTIVE was
4939 * set... we'll do it here by doing a
4940 * vnode_get/vnode_put
4941 *
4942 * pick up an iocount so that we can call
4943 * vnode_put and drive the VNOP_INACTIVE...
4944 * vnode_put will either leave us off
4945 * the freelist if a new ref comes in,
4946 * or put us back on the end of the freelist
4947 * or recycle us if we were marked for termination...
4948 * so we'll just go grab a new candidate
4949 */
4950 vp->v_iocount++;
4951 #ifdef CONFIG_IOCOUNT_TRACE
4952 record_vp(vp, 1);
4953 #endif
4954 vnode_put_locked(vp);
4955 vnode_unlock(vp);
4956
4957 return NULLVP;
4958 }
4959 /*
4960 * Checks for anyone racing us for recycle
4961 */
4962 if (vp->v_type != VBAD) {
4963 if ((want_vp || can_defer) && (vnode_on_reliable_media(vp) == FALSE || (vp->v_flag & VISDIRTY))) {
4964 vnode_async_list_add(vp);
4965 vnode_unlock(vp);
4966
4967 *deferred = 1;
4968
4969 return NULLVP;
4970 }
4971 if (vp->v_lflag & VL_DEAD) {
4972 panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
4973 }
4974
4975 vnode_lock_convert(vp);
4976 (void)vnode_reclaim_internal(vp, 1, want_vp, 0);
4977
4978 if (want_vp) {
4979 if ((VONLIST(vp))) {
4980 panic("new_vnode(%p): vp on list", vp);
4981 }
4982 if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
4983 (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH))) {
4984 panic("new_vnode(%p): free vnode still referenced", vp);
4985 }
4986 if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0)) {
4987 panic("new_vnode(%p): vnode seems to be on mount list", vp);
4988 }
4989 if (!LIST_EMPTY(&vp->v_nclinks) || !TAILQ_EMPTY(&vp->v_ncchildren)) {
4990 panic("new_vnode(%p): vnode still hooked into the name cache", vp);
4991 }
4992 } else {
4993 vnode_unlock(vp);
4994 vp = NULLVP;
4995 }
4996 }
4997 return vp;
4998 }
4999
5000 __attribute__((noreturn))
5001 static void
async_work_continue(void)5002 async_work_continue(void)
5003 {
5004 struct async_work_lst *q;
5005 int deferred;
5006 vnode_t vp;
5007
5008 q = &vnode_async_work_list;
5009
5010 for (;;) {
5011 vnode_list_lock();
5012
5013 if (TAILQ_EMPTY(q)) {
5014 assert_wait(q, (THREAD_UNINT));
5015
5016 vnode_list_unlock();
5017
5018 thread_block((thread_continue_t)async_work_continue);
5019
5020 continue;
5021 }
5022 async_work_handled++;
5023
5024 vp = TAILQ_FIRST(q);
5025
5026 vp = process_vp(vp, 0, false, &deferred);
5027
5028 if (vp != NULLVP) {
5029 panic("found VBAD vp (%p) on async queue", vp);
5030 }
5031 }
5032 }
5033
5034 __attribute__((noreturn))
5035 static void
vn_laundry_continue(void)5036 vn_laundry_continue(void)
5037 {
5038 struct freelst *free_q;
5039 struct ragelst *rage_q;
5040 int deferred;
5041 vnode_t vp;
5042 bool rage_q_empty;
5043 bool free_q_empty;
5044
5045
5046 free_q = &vnode_free_list;
5047 rage_q = &vnode_rage_list;
5048
5049 for (;;) {
5050 vnode_list_lock();
5051
5052 free_q_empty = TAILQ_EMPTY(free_q);
5053 rage_q_empty = TAILQ_EMPTY(rage_q);
5054
5055 if (!rage_q_empty && !free_q_empty) {
5056 struct timeval current_tv;
5057
5058 microuptime(¤t_tv);
5059 if (ragevnodes < rage_limit &&
5060 ((current_tv.tv_sec - rage_tv.tv_sec) < RAGE_TIME_LIMIT)) {
5061 rage_q_empty = true;
5062 }
5063 }
5064
5065 if (deadvnodes >= deadvnodes_high ||
5066 (rage_q_empty && free_q_empty) ||
5067 numvnodes < desiredvnodes) {
5068 assert_wait(free_q, (THREAD_UNINT));
5069
5070 vnode_list_unlock();
5071
5072 thread_block((thread_continue_t)vn_laundry_continue);
5073
5074 continue;
5075 }
5076
5077 if (!rage_q_empty) {
5078 vp = TAILQ_FIRST(rage_q);
5079 } else {
5080 vp = TAILQ_FIRST(free_q);
5081 }
5082
5083 vp = process_vp(vp, 0, true, &deferred);
5084 }
5085 }
5086
5087 static inline void
wakeup_laundry_thread()5088 wakeup_laundry_thread()
5089 {
5090 if ((deadvnodes < deadvnodes_low) &&
5091 /* Minimum number of free vnodes the thread should act on */
5092 ((freevnodes + ragevnodes) > 10)) {
5093 wakeup(&vnode_free_list);
5094 }
5095 }
5096
5097 static int
new_vnode(vnode_t * vpp)5098 new_vnode(vnode_t *vpp)
5099 {
5100 vnode_t vp;
5101 uint32_t retries = 0, max_retries = 100; /* retry incase of tablefull */
5102 uint32_t bdevvp_vnodes = 0;
5103 int force_alloc = 0, walk_count = 0;
5104 boolean_t need_reliable_vp = FALSE;
5105 int deferred;
5106 struct timeval initial_tv;
5107 struct timeval current_tv;
5108 proc_t curproc = current_proc();
5109
5110 initial_tv.tv_sec = 0;
5111 retry:
5112 vp = NULLVP;
5113
5114 vnode_list_lock();
5115 newvnode++;
5116
5117 if (need_reliable_vp == TRUE) {
5118 async_work_timed_out++;
5119 }
5120
5121 if ((numvnodes - deadvnodes) < desiredvnodes || force_alloc) {
5122 struct timespec ts;
5123
5124 if (!TAILQ_EMPTY(&vnode_dead_list)) {
5125 /*
5126 * Can always reuse a dead one
5127 */
5128 vp = TAILQ_FIRST(&vnode_dead_list);
5129 if (numvnodes >= desiredvnodes) {
5130 wakeup_laundry_thread();
5131 }
5132 goto steal_this_vp;
5133 }
5134 /*
5135 * no dead vnodes available... if we're under
5136 * the limit, we'll create a new vnode
5137 */
5138 numvnodes++;
5139 if (numvnodes >= desiredvnodes) {
5140 wakeup_laundry_thread();
5141 }
5142 vnode_list_unlock();
5143
5144 vp = zalloc_flags(vnode_zone, Z_WAITOK | Z_ZERO);
5145 VLISTNONE(vp); /* avoid double queue removal */
5146 lck_mtx_init(&vp->v_lock, &vnode_lck_grp, &vnode_lck_attr);
5147
5148 TAILQ_INIT(&vp->v_ncchildren);
5149
5150 klist_init(&vp->v_knotes);
5151 nanouptime(&ts);
5152 vp->v_id = (uint32_t)ts.tv_nsec;
5153 vp->v_flag = VSTANDARD;
5154
5155 #if CONFIG_MACF
5156 if (mac_vnode_label_init_needed(vp)) {
5157 mac_vnode_label_init(vp);
5158 }
5159 #endif /* MAC */
5160
5161 #if CONFIG_IOCOUNT_TRACE
5162 if (__improbable(bootarg_vnode_iocount_trace)) {
5163 vp->v_iocount_trace = (vnode_iocount_trace_t)kalloc_data(
5164 IOCOUNT_TRACE_MAX_TYPES * sizeof(struct vnode_iocount_trace),
5165 Z_WAITOK | Z_ZERO);
5166 }
5167 #endif /* CONFIG_IOCOUNT_TRACE */
5168
5169 vp->v_iocount = 1;
5170 goto done;
5171 }
5172
5173 wakeup_laundry_thread();
5174
5175 microuptime(¤t_tv);
5176
5177 #define MAX_WALK_COUNT 1000
5178
5179 if (!TAILQ_EMPTY(&vnode_rage_list) &&
5180 (ragevnodes >= rage_limit ||
5181 (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) {
5182 TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
5183 if (!(vp->v_listflag & VLIST_RAGE)) {
5184 panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
5185 }
5186
5187 // if we're a dependency-capable process, skip vnodes that can
5188 // cause recycling deadlocks. (i.e. this process is diskimages
5189 // helper and the vnode is in a disk image). Querying the
5190 // mnt_kern_flag for the mount's virtual device status
5191 // is safer than checking the mnt_dependent_process, which
5192 // may not be updated if there are multiple devnode layers
5193 // in between the disk image and the final consumer.
5194
5195 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
5196 (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) {
5197 /*
5198 * if need_reliable_vp == TRUE, then we've already sent one or more
5199 * non-reliable vnodes to the async thread for processing and timed
5200 * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT
5201 * mechanism to first scan for a reliable vnode before forcing
5202 * a new vnode to be created
5203 */
5204 if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) {
5205 break;
5206 }
5207 }
5208
5209 // don't iterate more than MAX_WALK_COUNT vnodes to
5210 // avoid keeping the vnode list lock held for too long.
5211
5212 if (walk_count++ > MAX_WALK_COUNT) {
5213 vp = NULL;
5214 break;
5215 }
5216 }
5217 }
5218
5219 if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) {
5220 /*
5221 * Pick the first vp for possible reuse
5222 */
5223 walk_count = 0;
5224 TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
5225 // if we're a dependency-capable process, skip vnodes that can
5226 // cause recycling deadlocks. (i.e. this process is diskimages
5227 // helper and the vnode is in a disk image). Querying the
5228 // mnt_kern_flag for the mount's virtual device status
5229 // is safer than checking the mnt_dependent_process, which
5230 // may not be updated if there are multiple devnode layers
5231 // in between the disk image and the final consumer.
5232
5233 if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
5234 (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) {
5235 /*
5236 * if need_reliable_vp == TRUE, then we've already sent one or more
5237 * non-reliable vnodes to the async thread for processing and timed
5238 * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT
5239 * mechanism to first scan for a reliable vnode before forcing
5240 * a new vnode to be created
5241 */
5242 if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) {
5243 break;
5244 }
5245 }
5246
5247 // don't iterate more than MAX_WALK_COUNT vnodes to
5248 // avoid keeping the vnode list lock held for too long.
5249
5250 if (walk_count++ > MAX_WALK_COUNT) {
5251 vp = NULL;
5252 break;
5253 }
5254 }
5255 }
5256
5257 //
5258 // if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT
5259 // then we're trying to create a vnode on behalf of a
5260 // process like diskimages-helper that has file systems
5261 // mounted on top of itself (and thus we can't reclaim
5262 // vnodes in the file systems on top of us). if we can't
5263 // find a vnode to reclaim then we'll just have to force
5264 // the allocation.
5265 //
5266 if (vp == NULL && walk_count >= MAX_WALK_COUNT) {
5267 force_alloc = 1;
5268 vnode_list_unlock();
5269 goto retry;
5270 }
5271
5272 if (vp == NULL) {
5273 /*
5274 * we've reached the system imposed maximum number of vnodes
5275 * but there isn't a single one available
5276 * wait a bit and then retry... if we can't get a vnode
5277 * after our target number of retries, than log a complaint
5278 */
5279 if (++retries <= max_retries) {
5280 vnode_list_unlock();
5281 delay_for_interval(1, 1000 * 1000);
5282 goto retry;
5283 }
5284
5285 vnode_list_unlock();
5286 tablefull("vnode");
5287 log(LOG_EMERG, "%d desired, %ld numvnodes, "
5288 "%ld free, %ld dead, %ld async, %d rage %d bdevvp\n",
5289 desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes, bdevvp_vnodes);
5290 #if CONFIG_JETSAM
5291
5292 #if DEVELOPMENT || DEBUG
5293 if (bootarg_no_vnode_jetsam) {
5294 panic("vnode table is full");
5295 }
5296 #endif /* DEVELOPMENT || DEBUG */
5297
5298 /*
5299 * Running out of vnodes tends to make a system unusable. Start killing
5300 * processes that jetsam knows are killable.
5301 */
5302 if (memorystatus_kill_on_vnode_limit() == FALSE) {
5303 /*
5304 * If jetsam can't find any more processes to kill and there
5305 * still aren't any free vnodes, panic. Hopefully we'll get a
5306 * panic log to tell us why we ran out.
5307 */
5308 panic("vnode table is full");
5309 }
5310
5311 /*
5312 * Now that we've killed someone, wait a bit and continue looking
5313 * (with fewer retries before trying another kill).
5314 */
5315 delay_for_interval(3, 1000 * 1000);
5316 retries = 0;
5317 max_retries = 10;
5318 goto retry;
5319 #endif
5320
5321 *vpp = NULL;
5322 return ENFILE;
5323 }
5324 newvnode_nodead++;
5325 steal_this_vp:
5326 if ((vp = process_vp(vp, 1, true, &deferred)) == NULLVP) {
5327 if (deferred) {
5328 int elapsed_msecs;
5329 struct timeval elapsed_tv;
5330
5331 if (initial_tv.tv_sec == 0) {
5332 microuptime(&initial_tv);
5333 }
5334
5335 vnode_list_lock();
5336
5337 dead_vnode_waited++;
5338 dead_vnode_wanted++;
5339
5340 /*
5341 * note that we're only going to explicitly wait 10ms
5342 * for a dead vnode to become available, since even if one
5343 * isn't available, a reliable vnode might now be available
5344 * at the head of the VRAGE or free lists... if so, we
5345 * can satisfy the new_vnode request with less latency then waiting
5346 * for the full 100ms duration we're ultimately willing to tolerate
5347 */
5348 assert_wait_timeout((caddr_t)&dead_vnode_wanted, (THREAD_INTERRUPTIBLE), 10000, NSEC_PER_USEC);
5349
5350 vnode_list_unlock();
5351
5352 thread_block(THREAD_CONTINUE_NULL);
5353
5354 microuptime(&elapsed_tv);
5355
5356 timevalsub(&elapsed_tv, &initial_tv);
5357 elapsed_msecs = (int)(elapsed_tv.tv_sec * 1000 + elapsed_tv.tv_usec / 1000);
5358
5359 if (elapsed_msecs >= 100) {
5360 /*
5361 * we've waited long enough... 100ms is
5362 * somewhat arbitrary for this case, but the
5363 * normal worst case latency used for UI
5364 * interaction is 100ms, so I've chosen to
5365 * go with that.
5366 *
5367 * setting need_reliable_vp to TRUE
5368 * forces us to find a reliable vnode
5369 * that we can process synchronously, or
5370 * to create a new one if the scan for
5371 * a reliable one hits the scan limit
5372 */
5373 need_reliable_vp = TRUE;
5374 }
5375 }
5376 goto retry;
5377 }
5378 OSAddAtomicLong(1, &num_reusedvnodes);
5379
5380
5381 #if CONFIG_MACF
5382 /*
5383 * We should never see VL_LABELWAIT or VL_LABEL here.
5384 * as those operations hold a reference.
5385 */
5386 assert((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT);
5387 assert((vp->v_lflag & VL_LABEL) != VL_LABEL);
5388 if (vp->v_lflag & VL_LABELED || mac_vnode_label(vp) != NULL) {
5389 vnode_lock_convert(vp);
5390 mac_vnode_label_recycle(vp);
5391 } else if (mac_vnode_label_init_needed(vp)) {
5392 vnode_lock_convert(vp);
5393 mac_vnode_label_init(vp);
5394 }
5395
5396 #endif /* MAC */
5397
5398 vp->v_iocount = 1;
5399 vp->v_lflag = 0;
5400 vp->v_writecount = 0;
5401 vp->v_references = 0;
5402 vp->v_iterblkflags = 0;
5403 vp->v_flag = VSTANDARD;
5404 /* vbad vnodes can point to dead_mountp */
5405 vp->v_mount = NULL;
5406 vp->v_defer_reclaimlist = (vnode_t)0;
5407
5408 vnode_unlock(vp);
5409
5410 done:
5411 *vpp = vp;
5412
5413 return 0;
5414 }
5415
5416 void
vnode_lock(vnode_t vp)5417 vnode_lock(vnode_t vp)
5418 {
5419 lck_mtx_lock(&vp->v_lock);
5420 }
5421
5422 void
vnode_lock_spin(vnode_t vp)5423 vnode_lock_spin(vnode_t vp)
5424 {
5425 lck_mtx_lock_spin(&vp->v_lock);
5426 }
5427
5428 void
vnode_unlock(vnode_t vp)5429 vnode_unlock(vnode_t vp)
5430 {
5431 lck_mtx_unlock(&vp->v_lock);
5432 }
5433
5434
5435
5436 int
vnode_get(struct vnode * vp)5437 vnode_get(struct vnode *vp)
5438 {
5439 int retval;
5440
5441 vnode_lock_spin(vp);
5442 retval = vnode_get_locked(vp);
5443 vnode_unlock(vp);
5444
5445 return retval;
5446 }
5447
5448 int
vnode_get_locked(struct vnode * vp)5449 vnode_get_locked(struct vnode *vp)
5450 {
5451 #if DIAGNOSTIC
5452 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
5453 #endif
5454 if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
5455 return ENOENT;
5456 }
5457
5458 if (os_add_overflow(vp->v_iocount, 1, &vp->v_iocount)) {
5459 panic("v_iocount overflow");
5460 }
5461
5462 #ifdef CONFIG_IOCOUNT_TRACE
5463 record_vp(vp, 1);
5464 #endif
5465 return 0;
5466 }
5467
5468 /*
5469 * vnode_getwithvid() cuts in line in front of a vnode drain (that is,
5470 * while the vnode is draining, but at no point after that) to prevent
5471 * deadlocks when getting vnodes from filesystem hashes while holding
5472 * resources that may prevent other iocounts from being released.
5473 */
5474 int
vnode_getwithvid(vnode_t vp,uint32_t vid)5475 vnode_getwithvid(vnode_t vp, uint32_t vid)
5476 {
5477 return vget_internal(vp, vid, (VNODE_NODEAD | VNODE_WITHID | VNODE_DRAINO));
5478 }
5479
5480 /*
5481 * vnode_getwithvid_drainok() is like vnode_getwithvid(), but *does* block behind a vnode
5482 * drain; it exists for use in the VFS name cache, where we really do want to block behind
5483 * vnode drain to prevent holding off an unmount.
5484 */
5485 int
vnode_getwithvid_drainok(vnode_t vp,uint32_t vid)5486 vnode_getwithvid_drainok(vnode_t vp, uint32_t vid)
5487 {
5488 return vget_internal(vp, vid, (VNODE_NODEAD | VNODE_WITHID));
5489 }
5490
5491 int
vnode_getwithref(vnode_t vp)5492 vnode_getwithref(vnode_t vp)
5493 {
5494 return vget_internal(vp, 0, 0);
5495 }
5496
5497
5498 __private_extern__ int
vnode_getalways(vnode_t vp)5499 vnode_getalways(vnode_t vp)
5500 {
5501 return vget_internal(vp, 0, VNODE_ALWAYS);
5502 }
5503
5504 __private_extern__ int
vnode_getalways_from_pager(vnode_t vp)5505 vnode_getalways_from_pager(vnode_t vp)
5506 {
5507 return vget_internal(vp, 0, VNODE_ALWAYS | VNODE_PAGER);
5508 }
5509
5510 static inline void
vn_set_dead(vnode_t vp)5511 vn_set_dead(vnode_t vp)
5512 {
5513 vp->v_mount = NULL;
5514 vp->v_op = dead_vnodeop_p;
5515 vp->v_tag = VT_NON;
5516 vp->v_data = NULL;
5517 vp->v_type = VBAD;
5518 vp->v_lflag |= VL_DEAD;
5519 }
5520
5521 static int
vnode_put_internal_locked(vnode_t vp,bool from_pager)5522 vnode_put_internal_locked(vnode_t vp, bool from_pager)
5523 {
5524 vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */
5525
5526 #if DIAGNOSTIC
5527 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
5528 #endif
5529 retry:
5530 if (vp->v_iocount < 1) {
5531 panic("vnode_put(%p): iocount < 1", vp);
5532 }
5533
5534 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
5535 vnode_dropiocount(vp);
5536 return 0;
5537 }
5538
5539 if (((vp->v_lflag & (VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE)) {
5540 vp->v_lflag &= ~VL_NEEDINACTIVE;
5541 vnode_unlock(vp);
5542
5543 VNOP_INACTIVE(vp, ctx);
5544
5545 vnode_lock_spin(vp);
5546 /*
5547 * because we had to drop the vnode lock before calling
5548 * VNOP_INACTIVE, the state of this vnode may have changed...
5549 * we may pick up both VL_MARTERM and either
5550 * an iocount or a usecount while in the VNOP_INACTIVE call
5551 * we don't want to call vnode_reclaim_internal on a vnode
5552 * that has active references on it... so loop back around
5553 * and reevaluate the state
5554 */
5555 goto retry;
5556 }
5557 vp->v_lflag &= ~VL_NEEDINACTIVE;
5558
5559 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
5560 if (from_pager) {
5561 /*
5562 * We can't initiate reclaim when called from the pager
5563 * because it will deadlock with itself so we hand it
5564 * off to the async cleaner thread.
5565 */
5566 if (VONLIST(vp)) {
5567 if (!(vp->v_listflag & VLIST_ASYNC_WORK)) {
5568 vnode_list_lock();
5569 vnode_list_remove_locked(vp);
5570 vnode_async_list_add_locked(vp);
5571 vnode_list_unlock();
5572 }
5573 wakeup(&vnode_async_work_list);
5574 } else {
5575 vnode_async_list_add(vp);
5576 }
5577 } else {
5578 vnode_lock_convert(vp);
5579 vnode_reclaim_internal(vp, 1, 1, 0);
5580 }
5581 }
5582 vnode_dropiocount(vp);
5583 vnode_list_add(vp);
5584
5585 return 0;
5586 }
5587
5588 int
vnode_put_locked(vnode_t vp)5589 vnode_put_locked(vnode_t vp)
5590 {
5591 return vnode_put_internal_locked(vp, false);
5592 }
5593
5594 int
vnode_put(vnode_t vp)5595 vnode_put(vnode_t vp)
5596 {
5597 int retval;
5598
5599 vnode_lock_spin(vp);
5600 retval = vnode_put_internal_locked(vp, false);
5601 vnode_unlock(vp);
5602
5603 return retval;
5604 }
5605
5606 int
vnode_put_from_pager(vnode_t vp)5607 vnode_put_from_pager(vnode_t vp)
5608 {
5609 int retval;
5610
5611 vnode_lock_spin(vp);
5612 /* Cannot initiate reclaim while paging */
5613 retval = vnode_put_internal_locked(vp, true);
5614 vnode_unlock(vp);
5615
5616 return retval;
5617 }
5618
5619 int
vnode_writecount(vnode_t vp)5620 vnode_writecount(vnode_t vp)
5621 {
5622 return vp->v_writecount;
5623 }
5624
5625 /* is vnode_t in use by others? */
5626 int
vnode_isinuse(vnode_t vp,int refcnt)5627 vnode_isinuse(vnode_t vp, int refcnt)
5628 {
5629 return vnode_isinuse_locked(vp, refcnt, 0);
5630 }
5631
5632 int
vnode_usecount(vnode_t vp)5633 vnode_usecount(vnode_t vp)
5634 {
5635 return vp->v_usecount;
5636 }
5637
5638 int
vnode_iocount(vnode_t vp)5639 vnode_iocount(vnode_t vp)
5640 {
5641 return vp->v_iocount;
5642 }
5643
5644 int
vnode_isinuse_locked(vnode_t vp,int refcnt,int locked)5645 vnode_isinuse_locked(vnode_t vp, int refcnt, int locked)
5646 {
5647 int retval = 0;
5648
5649 if (!locked) {
5650 vnode_lock_spin(vp);
5651 }
5652 if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) {
5653 retval = 1;
5654 goto out;
5655 }
5656 if (vp->v_type == VREG) {
5657 retval = ubc_isinuse_locked(vp, refcnt, 1);
5658 }
5659
5660 out:
5661 if (!locked) {
5662 vnode_unlock(vp);
5663 }
5664 return retval;
5665 }
5666
5667 kauth_cred_t
vnode_cred(vnode_t vp)5668 vnode_cred(vnode_t vp)
5669 {
5670 if (vp->v_cred) {
5671 return kauth_cred_require(vp->v_cred);
5672 }
5673
5674 return NULL;
5675 }
5676
5677
5678 /* resume vnode_t */
5679 errno_t
vnode_resume(vnode_t vp)5680 vnode_resume(vnode_t vp)
5681 {
5682 if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) {
5683 vnode_lock_spin(vp);
5684 vp->v_lflag &= ~VL_SUSPENDED;
5685 vp->v_owner = NULL;
5686 vnode_unlock(vp);
5687
5688 wakeup(&vp->v_iocount);
5689 }
5690 return 0;
5691 }
5692
5693 /* suspend vnode_t
5694 * Please do not use on more than one vnode at a time as it may
5695 * cause deadlocks.
5696 * xxx should we explicity prevent this from happening?
5697 */
5698
5699 errno_t
vnode_suspend(vnode_t vp)5700 vnode_suspend(vnode_t vp)
5701 {
5702 if (vp->v_lflag & VL_SUSPENDED) {
5703 return EBUSY;
5704 }
5705
5706 vnode_lock_spin(vp);
5707
5708 /*
5709 * xxx is this sufficient to check if a vnode_drain is
5710 * progress?
5711 */
5712
5713 if (vp->v_owner == NULL) {
5714 vp->v_lflag |= VL_SUSPENDED;
5715 vp->v_owner = current_thread();
5716 }
5717 vnode_unlock(vp);
5718
5719 return 0;
5720 }
5721
5722 /*
5723 * Release any blocked locking requests on the vnode.
5724 * Used for forced-unmounts.
5725 *
5726 * XXX What about network filesystems?
5727 */
5728 static void
vnode_abort_advlocks(vnode_t vp)5729 vnode_abort_advlocks(vnode_t vp)
5730 {
5731 if (vp->v_flag & VLOCKLOCAL) {
5732 lf_abort_advlocks(vp);
5733 }
5734 }
5735
5736
5737 static errno_t
vnode_drain(vnode_t vp)5738 vnode_drain(vnode_t vp)
5739 {
5740 if (vp->v_lflag & VL_DRAIN) {
5741 panic("vnode_drain: recursive drain");
5742 return ENOENT;
5743 }
5744 vp->v_lflag |= VL_DRAIN;
5745 vp->v_owner = current_thread();
5746
5747 while (vp->v_iocount > 1) {
5748 if (bootarg_no_vnode_drain) {
5749 struct timespec ts = {.tv_sec = 10, .tv_nsec = 0};
5750 int error;
5751
5752 if (vfs_unmountall_started) {
5753 ts.tv_sec = 1;
5754 }
5755
5756 error = msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain_with_timeout", &ts);
5757
5758 /* Try to deal with leaked iocounts under bootarg and shutting down */
5759 if (vp->v_iocount > 1 && error == EWOULDBLOCK &&
5760 ts.tv_sec == 1 && vp->v_numoutput == 0) {
5761 vp->v_iocount = 1;
5762 break;
5763 }
5764 } else {
5765 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL);
5766 }
5767 }
5768
5769 vp->v_lflag &= ~VL_DRAIN;
5770
5771 return 0;
5772 }
5773
5774
5775 /*
5776 * if the number of recent references via vnode_getwithvid or vnode_getwithref
5777 * exceeds this threshold, than 'UN-AGE' the vnode by removing it from
5778 * the LRU list if it's currently on it... once the iocount and usecount both drop
5779 * to 0, it will get put back on the end of the list, effectively making it younger
5780 * this allows us to keep actively referenced vnodes in the list without having
5781 * to constantly remove and add to the list each time a vnode w/o a usecount is
5782 * referenced which costs us taking and dropping a global lock twice.
5783 * However, if the vnode is marked DIRTY, we want to pull it out much earlier
5784 */
5785 #define UNAGE_THRESHHOLD 25
5786 #define UNAGE_DIRTYTHRESHHOLD 6
5787
5788 errno_t
vnode_getiocount(vnode_t vp,unsigned int vid,int vflags)5789 vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
5790 {
5791 int nodead = vflags & VNODE_NODEAD;
5792 int nosusp = vflags & VNODE_NOSUSPEND;
5793 int always = vflags & VNODE_ALWAYS;
5794 int beatdrain = vflags & VNODE_DRAINO;
5795 int withvid = vflags & VNODE_WITHID;
5796 int forpager = vflags & VNODE_PAGER;
5797
5798 for (;;) {
5799 int sleepflg = 0;
5800
5801 /*
5802 * if it is a dead vnode with deadfs
5803 */
5804 if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) {
5805 return ENOENT;
5806 }
5807 /*
5808 * will return VL_DEAD ones
5809 */
5810 if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0) {
5811 break;
5812 }
5813 /*
5814 * if suspended vnodes are to be failed
5815 */
5816 if (nosusp && (vp->v_lflag & VL_SUSPENDED)) {
5817 return ENOENT;
5818 }
5819 /*
5820 * if you are the owner of drain/suspend/termination , can acquire iocount
5821 * check for VL_TERMINATE; it does not set owner
5822 */
5823 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) &&
5824 (vp->v_owner == current_thread())) {
5825 break;
5826 }
5827
5828 if (always != 0) {
5829 break;
5830 }
5831
5832 /*
5833 * If this vnode is getting drained, there are some cases where
5834 * we can't block or, in case of tty vnodes, want to be
5835 * interruptible.
5836 */
5837 if (vp->v_lflag & VL_DRAIN) {
5838 /*
5839 * In some situations, we want to get an iocount
5840 * even if the vnode is draining to prevent deadlock,
5841 * e.g. if we're in the filesystem, potentially holding
5842 * resources that could prevent other iocounts from
5843 * being released.
5844 */
5845 if (beatdrain) {
5846 break;
5847 }
5848 /*
5849 * Don't block if the vnode's mount point is unmounting as
5850 * we may be the thread the unmount is itself waiting on
5851 * Only callers who pass in vids (at this point, we've already
5852 * handled nosusp and nodead) are expecting error returns
5853 * from this function, so only we can only return errors for
5854 * those. ENODEV is intended to inform callers that the call
5855 * failed because an unmount is in progress.
5856 */
5857 if (withvid && (vp->v_mount) && vfs_isunmount(vp->v_mount)) {
5858 return ENODEV;
5859 }
5860
5861 if (vnode_istty(vp)) {
5862 sleepflg = PCATCH;
5863 }
5864 }
5865
5866 vnode_lock_convert(vp);
5867
5868 if (vp->v_lflag & VL_TERMINATE) {
5869 int error;
5870
5871 vp->v_lflag |= VL_TERMWANT;
5872
5873 error = msleep(&vp->v_lflag, &vp->v_lock,
5874 (PVFS | sleepflg), "vnode getiocount", NULL);
5875 if (error) {
5876 return error;
5877 }
5878 } else {
5879 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL);
5880 }
5881 }
5882 if (withvid && vid != vp->v_id) {
5883 return ENOENT;
5884 }
5885 if (!forpager && (++vp->v_references >= UNAGE_THRESHHOLD ||
5886 (vp->v_flag & VISDIRTY && vp->v_references >= UNAGE_DIRTYTHRESHHOLD))) {
5887 vp->v_references = 0;
5888 vnode_list_remove(vp);
5889 }
5890 vp->v_iocount++;
5891 #ifdef CONFIG_IOCOUNT_TRACE
5892 record_vp(vp, 1);
5893 #endif
5894 return 0;
5895 }
5896
5897 static void
vnode_dropiocount(vnode_t vp)5898 vnode_dropiocount(vnode_t vp)
5899 {
5900 if (vp->v_iocount < 1) {
5901 panic("vnode_dropiocount(%p): v_iocount < 1", vp);
5902 }
5903
5904 vp->v_iocount--;
5905 #ifdef CONFIG_IOCOUNT_TRACE
5906 record_vp(vp, -1);
5907 #endif
5908 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1)) {
5909 wakeup(&vp->v_iocount);
5910 }
5911 }
5912
5913
5914 void
vnode_reclaim(struct vnode * vp)5915 vnode_reclaim(struct vnode * vp)
5916 {
5917 vnode_reclaim_internal(vp, 0, 0, 0);
5918 }
5919
5920 __private_extern__
5921 void
vnode_reclaim_internal(struct vnode * vp,int locked,int reuse,int flags)5922 vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags)
5923 {
5924 int isfifo = 0;
5925 bool clear_tty_revoke = false;
5926
5927 if (!locked) {
5928 vnode_lock(vp);
5929 }
5930
5931 if (vp->v_lflag & VL_TERMINATE) {
5932 panic("vnode reclaim in progress");
5933 }
5934 vp->v_lflag |= VL_TERMINATE;
5935
5936 vn_clearunionwait(vp, 1);
5937
5938 /*
5939 * We have to force any terminals in reads to return and give up
5940 * their iocounts. It's important to do this after VL_TERMINATE
5941 * has been set to ensure new reads are blocked while the
5942 * revoke is in progress.
5943 */
5944 if (vnode_istty(vp) && (flags & REVOKEALL) && (vp->v_iocount > 1)) {
5945 vnode_unlock(vp);
5946 VNOP_IOCTL(vp, TIOCREVOKE, (caddr_t)NULL, 0, vfs_context_kernel());
5947 clear_tty_revoke = true;
5948 vnode_lock(vp);
5949 }
5950
5951 vnode_drain(vp);
5952
5953 if (clear_tty_revoke) {
5954 vnode_unlock(vp);
5955 VNOP_IOCTL(vp, TIOCREVOKECLEAR, (caddr_t)NULL, 0, vfs_context_kernel());
5956 vnode_lock(vp);
5957 }
5958
5959 isfifo = (vp->v_type == VFIFO);
5960
5961 if (vp->v_type != VBAD) {
5962 vgone(vp, flags); /* clean and reclaim the vnode */
5963 }
5964 /*
5965 * give the vnode a new identity so that vnode_getwithvid will fail
5966 * on any stale cache accesses...
5967 * grab the list_lock so that if we're in "new_vnode"
5968 * behind the list_lock trying to steal this vnode, the v_id is stable...
5969 * once new_vnode drops the list_lock, it will block trying to take
5970 * the vnode lock until we release it... at that point it will evaluate
5971 * whether the v_vid has changed
5972 * also need to make sure that the vnode isn't on a list where "new_vnode"
5973 * can find it after the v_id has been bumped until we are completely done
5974 * with the vnode (i.e. putting it back on a list has to be the very last
5975 * thing we do to this vnode... many of the callers of vnode_reclaim_internal
5976 * are holding an io_count on the vnode... they need to drop the io_count
5977 * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
5978 * they are completely done with the vnode
5979 */
5980 vnode_list_lock();
5981
5982 vnode_list_remove_locked(vp);
5983 vp->v_id++;
5984
5985 vnode_list_unlock();
5986
5987 if (isfifo) {
5988 struct fifoinfo * fip;
5989
5990 fip = vp->v_fifoinfo;
5991 vp->v_fifoinfo = NULL;
5992 kfree_type(struct fifoinfo, fip);
5993 }
5994 vp->v_type = VBAD;
5995
5996 if (vp->v_data) {
5997 panic("vnode_reclaim_internal: cleaned vnode isn't");
5998 }
5999 if (vp->v_numoutput) {
6000 panic("vnode_reclaim_internal: clean vnode has pending I/O's");
6001 }
6002 if (UBCINFOEXISTS(vp)) {
6003 panic("vnode_reclaim_internal: ubcinfo not cleaned");
6004 }
6005 if (vp->v_parent) {
6006 panic("vnode_reclaim_internal: vparent not removed");
6007 }
6008 if (vp->v_name) {
6009 panic("vnode_reclaim_internal: vname not removed");
6010 }
6011
6012 vp->v_socket = NULL;
6013
6014 vp->v_lflag &= ~VL_TERMINATE;
6015 vp->v_owner = NULL;
6016
6017 #if CONFIG_IOCOUNT_TRACE
6018 if (__improbable(bootarg_vnode_iocount_trace)) {
6019 bzero(vp->v_iocount_trace,
6020 IOCOUNT_TRACE_MAX_TYPES * sizeof(struct vnode_iocount_trace));
6021 }
6022 #endif /* CONFIG_IOCOUNT_TRACE */
6023
6024 KNOTE(&vp->v_knotes, NOTE_REVOKE);
6025
6026 /* Make sure that when we reuse the vnode, no knotes left over */
6027 klist_init(&vp->v_knotes);
6028
6029 if (vp->v_lflag & VL_TERMWANT) {
6030 vp->v_lflag &= ~VL_TERMWANT;
6031 wakeup(&vp->v_lflag);
6032 }
6033 if (!reuse) {
6034 /*
6035 * make sure we get on the
6036 * dead list if appropriate
6037 */
6038 vnode_list_add(vp);
6039 }
6040 if (!locked) {
6041 vnode_unlock(vp);
6042 }
6043 }
6044
6045 static int
vnode_create_internal(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp,int init_vnode)6046 vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp,
6047 int init_vnode)
6048 {
6049 int error;
6050 int insert = 1;
6051 int existing_vnode;
6052 vnode_t vp;
6053 vnode_t nvp;
6054 vnode_t dvp;
6055 struct uthread *ut;
6056 struct componentname *cnp;
6057 struct vnode_fsparam *param = (struct vnode_fsparam *)data;
6058 #if CONFIG_TRIGGERS
6059 struct vnode_trigger_param *tinfo = NULL;
6060 #endif
6061 if (*vpp) {
6062 vp = *vpp;
6063 *vpp = NULLVP;
6064 existing_vnode = 1;
6065 } else {
6066 existing_vnode = 0;
6067 }
6068
6069 if (init_vnode) {
6070 /* Do quick sanity check on the parameters. */
6071 if ((param == NULL) || (param->vnfs_vtype == VBAD)) {
6072 error = EINVAL;
6073 goto error_out;
6074 }
6075
6076 #if CONFIG_TRIGGERS
6077 if ((flavor == VNCREATE_TRIGGER) && (size == VNCREATE_TRIGGER_SIZE)) {
6078 tinfo = (struct vnode_trigger_param *)data;
6079
6080 /* Validate trigger vnode input */
6081 if ((param->vnfs_vtype != VDIR) ||
6082 (tinfo->vnt_resolve_func == NULL) ||
6083 (tinfo->vnt_flags & ~VNT_VALID_MASK)) {
6084 error = EINVAL;
6085 goto error_out;
6086 }
6087 /* Fall through a normal create (params will be the same) */
6088 flavor = VNCREATE_FLAVOR;
6089 size = VCREATESIZE;
6090 }
6091 #endif
6092 if ((flavor != VNCREATE_FLAVOR) || (size != VCREATESIZE)) {
6093 error = EINVAL;
6094 goto error_out;
6095 }
6096 }
6097
6098 if (!existing_vnode) {
6099 if ((error = new_vnode(&vp))) {
6100 return error;
6101 }
6102 if (!init_vnode) {
6103 /* Make it so that it can be released by a vnode_put) */
6104 vn_set_dead(vp);
6105 *vpp = vp;
6106 return 0;
6107 }
6108 } else {
6109 /*
6110 * A vnode obtained by vnode_create_empty has been passed to
6111 * vnode_initialize - Unset VL_DEAD set by vn_set_dead. After
6112 * this point, it is set back on any error.
6113 *
6114 * N.B. vnode locking - We make the same assumptions as the
6115 * "unsplit" vnode_create did - i.e. it is safe to update the
6116 * vnode's fields without the vnode lock. This vnode has been
6117 * out and about with the filesystem and hopefully nothing
6118 * was done to the vnode between the vnode_create_empty and
6119 * now when it has come in through vnode_initialize.
6120 */
6121 vp->v_lflag &= ~VL_DEAD;
6122 }
6123
6124 dvp = param->vnfs_dvp;
6125 cnp = param->vnfs_cnp;
6126
6127 vp->v_op = param->vnfs_vops;
6128 vp->v_type = (uint16_t)param->vnfs_vtype;
6129 vp->v_data = param->vnfs_fsnode;
6130
6131 if (param->vnfs_markroot) {
6132 vp->v_flag |= VROOT;
6133 }
6134 if (param->vnfs_marksystem) {
6135 vp->v_flag |= VSYSTEM;
6136 }
6137 if (vp->v_type == VREG) {
6138 error = ubc_info_init_withsize(vp, param->vnfs_filesize);
6139 if (error) {
6140 #ifdef CONFIG_IOCOUNT_TRACE
6141 record_vp(vp, 1);
6142 #endif
6143 vn_set_dead(vp);
6144
6145 vnode_put(vp);
6146 return error;
6147 }
6148 if (param->vnfs_mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED) {
6149 memory_object_mark_io_tracking(vp->v_ubcinfo->ui_control);
6150 }
6151 }
6152 #ifdef CONFIG_IOCOUNT_TRACE
6153 record_vp(vp, 1);
6154 #endif
6155
6156 #if CONFIG_FIRMLINKS
6157 vp->v_fmlink = NULLVP;
6158 #endif
6159 vp->v_flag &= ~VFMLINKTARGET;
6160
6161 #if CONFIG_TRIGGERS
6162 /*
6163 * For trigger vnodes, attach trigger info to vnode
6164 */
6165 if ((vp->v_type == VDIR) && (tinfo != NULL)) {
6166 /*
6167 * Note: has a side effect of incrementing trigger count on the
6168 * mount if successful, which we would need to undo on a
6169 * subsequent failure.
6170 */
6171 #ifdef CONFIG_IOCOUNT_TRACE
6172 record_vp(vp, -1);
6173 #endif
6174 error = vnode_resolver_create(param->vnfs_mp, vp, tinfo, FALSE);
6175 if (error) {
6176 printf("vnode_create: vnode_resolver_create() err %d\n", error);
6177 vn_set_dead(vp);
6178 #ifdef CONFIG_IOCOUNT_TRACE
6179 record_vp(vp, 1);
6180 #endif
6181 vnode_put(vp);
6182 return error;
6183 }
6184 }
6185 #endif
6186 if (vp->v_type == VCHR || vp->v_type == VBLK) {
6187 vp->v_tag = VT_DEVFS; /* callers will reset if needed (bdevvp) */
6188
6189 if ((nvp = checkalias(vp, param->vnfs_rdev))) {
6190 /*
6191 * if checkalias returns a vnode, it will be locked
6192 *
6193 * first get rid of the unneeded vnode we acquired
6194 */
6195 vp->v_data = NULL;
6196 vp->v_op = spec_vnodeop_p;
6197 vp->v_type = VBAD;
6198 vp->v_lflag = VL_DEAD;
6199 vp->v_data = NULL;
6200 vp->v_tag = VT_NON;
6201 vnode_put(vp);
6202
6203 /*
6204 * switch to aliased vnode and finish
6205 * preparing it
6206 */
6207 vp = nvp;
6208
6209 vclean(vp, 0);
6210 vp->v_op = param->vnfs_vops;
6211 vp->v_type = (uint16_t)param->vnfs_vtype;
6212 vp->v_data = param->vnfs_fsnode;
6213 vp->v_lflag = 0;
6214 vp->v_mount = NULL;
6215 insmntque(vp, param->vnfs_mp);
6216 insert = 0;
6217 vnode_unlock(vp);
6218 }
6219
6220 if (VCHR == vp->v_type) {
6221 u_int maj = major(vp->v_rdev);
6222
6223 if (maj < (u_int)nchrdev && cdevsw[maj].d_type == D_TTY) {
6224 vp->v_flag |= VISTTY;
6225 }
6226 }
6227 }
6228
6229 if (vp->v_type == VFIFO) {
6230 struct fifoinfo *fip;
6231
6232 fip = kalloc_type(struct fifoinfo, Z_WAITOK | Z_ZERO);
6233 vp->v_fifoinfo = fip;
6234 }
6235 /* The file systems must pass the address of the location where
6236 * they store the vnode pointer. When we add the vnode into the mount
6237 * list and name cache they become discoverable. So the file system node
6238 * must have the connection to vnode setup by then
6239 */
6240 *vpp = vp;
6241
6242 /* Add fs named reference. */
6243 if (param->vnfs_flags & VNFS_ADDFSREF) {
6244 vp->v_lflag |= VNAMED_FSHASH;
6245 }
6246 if (param->vnfs_mp) {
6247 if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL) {
6248 vp->v_flag |= VLOCKLOCAL;
6249 }
6250 if (insert) {
6251 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) {
6252 panic("insmntque: vp on the free list");
6253 }
6254
6255 /*
6256 * enter in mount vnode list
6257 */
6258 insmntque(vp, param->vnfs_mp);
6259 }
6260 }
6261 if (dvp && vnode_ref(dvp) == 0) {
6262 vp->v_parent = dvp;
6263 }
6264 if (cnp) {
6265 if (dvp && ((param->vnfs_flags & (VNFS_NOCACHE | VNFS_CANTCACHE)) == 0)) {
6266 /*
6267 * enter into name cache
6268 * we've got the info to enter it into the name cache now
6269 * cache_enter_create will pick up an extra reference on
6270 * the name entered into the string cache
6271 */
6272 vp->v_name = cache_enter_create(dvp, vp, cnp);
6273 } else {
6274 vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
6275 }
6276
6277 if ((cnp->cn_flags & UNIONCREATED) == UNIONCREATED) {
6278 vp->v_flag |= VISUNION;
6279 }
6280 }
6281 if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
6282 /*
6283 * this vnode is being created as cacheable in the name cache
6284 * this allows us to re-enter it in the cache
6285 */
6286 vp->v_flag |= VNCACHEABLE;
6287 }
6288 ut = current_uthread();
6289
6290 if ((current_proc()->p_lflag & P_LRAGE_VNODES) ||
6291 (ut->uu_flag & (UT_RAGE_VNODES | UT_KERN_RAGE_VNODES))) {
6292 /*
6293 * process has indicated that it wants any
6294 * vnodes created on its behalf to be rapidly
6295 * aged to reduce the impact on the cached set
6296 * of vnodes
6297 *
6298 * if UT_KERN_RAGE_VNODES is set, then the
6299 * kernel internally wants vnodes to be rapidly
6300 * aged, even if the process hasn't requested
6301 * this
6302 */
6303 vp->v_flag |= VRAGE;
6304 }
6305
6306 #if CONFIG_SECLUDED_MEMORY
6307 switch (secluded_for_filecache) {
6308 case 0:
6309 /*
6310 * secluded_for_filecache == 0:
6311 * + no file contents in secluded pool
6312 */
6313 break;
6314 case 1:
6315 /*
6316 * secluded_for_filecache == 1:
6317 * + no files from /
6318 * + files from /Applications/ are OK
6319 * + files from /Applications/Camera are not OK
6320 * + no files that are open for write
6321 */
6322 if (vnode_vtype(vp) == VREG &&
6323 vnode_mount(vp) != NULL &&
6324 (!(vfs_flags(vnode_mount(vp)) & MNT_ROOTFS))) {
6325 /* not from root filesystem: eligible for secluded pages */
6326 memory_object_mark_eligible_for_secluded(
6327 ubc_getobject(vp, UBC_FLAGS_NONE),
6328 TRUE);
6329 }
6330 break;
6331 case 2:
6332 /*
6333 * secluded_for_filecache == 2:
6334 * + all read-only files OK, except:
6335 * + dyld_shared_cache_arm64*
6336 * + Camera
6337 * + mediaserverd
6338 */
6339 if (vnode_vtype(vp) == VREG) {
6340 memory_object_mark_eligible_for_secluded(
6341 ubc_getobject(vp, UBC_FLAGS_NONE),
6342 TRUE);
6343 }
6344 break;
6345 default:
6346 break;
6347 }
6348 #endif /* CONFIG_SECLUDED_MEMORY */
6349
6350 return 0;
6351
6352 error_out:
6353 if (existing_vnode) {
6354 vnode_put(vp);
6355 }
6356 return error;
6357 }
6358
6359 /* USAGE:
6360 * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
6361 * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
6362 * is obsoleted by this.
6363 */
6364 int
vnode_create(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp)6365 vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
6366 {
6367 *vpp = NULLVP;
6368 return vnode_create_internal(flavor, size, data, vpp, 1);
6369 }
6370
6371 int
vnode_create_empty(vnode_t * vpp)6372 vnode_create_empty(vnode_t *vpp)
6373 {
6374 *vpp = NULLVP;
6375 return vnode_create_internal(VNCREATE_FLAVOR, VCREATESIZE, NULL,
6376 vpp, 0);
6377 }
6378
6379 int
vnode_initialize(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp)6380 vnode_initialize(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
6381 {
6382 if (*vpp == NULLVP) {
6383 panic("NULL vnode passed to vnode_initialize");
6384 }
6385 #if DEVELOPMENT || DEBUG
6386 /*
6387 * We lock to check that vnode is fit for unlocked use in
6388 * vnode_create_internal.
6389 */
6390 vnode_lock_spin(*vpp);
6391 VNASSERT(((*vpp)->v_iocount == 1), *vpp,
6392 ("vnode_initialize : iocount not 1, is %d", (*vpp)->v_iocount));
6393 VNASSERT(((*vpp)->v_usecount == 0), *vpp,
6394 ("vnode_initialize : usecount not 0, is %d", (*vpp)->v_usecount));
6395 VNASSERT(((*vpp)->v_lflag & VL_DEAD), *vpp,
6396 ("vnode_initialize : v_lflag does not have VL_DEAD, is 0x%x",
6397 (*vpp)->v_lflag));
6398 VNASSERT(((*vpp)->v_data == NULL), *vpp,
6399 ("vnode_initialize : v_data not NULL"));
6400 vnode_unlock(*vpp);
6401 #endif
6402 return vnode_create_internal(flavor, size, data, vpp, 1);
6403 }
6404
6405 int
vnode_addfsref(vnode_t vp)6406 vnode_addfsref(vnode_t vp)
6407 {
6408 vnode_lock_spin(vp);
6409 if (vp->v_lflag & VNAMED_FSHASH) {
6410 panic("add_fsref: vp already has named reference");
6411 }
6412 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) {
6413 panic("addfsref: vp on the free list");
6414 }
6415 vp->v_lflag |= VNAMED_FSHASH;
6416 vnode_unlock(vp);
6417 return 0;
6418 }
6419 int
vnode_removefsref(vnode_t vp)6420 vnode_removefsref(vnode_t vp)
6421 {
6422 vnode_lock_spin(vp);
6423 if ((vp->v_lflag & VNAMED_FSHASH) == 0) {
6424 panic("remove_fsref: no named reference");
6425 }
6426 vp->v_lflag &= ~VNAMED_FSHASH;
6427 vnode_unlock(vp);
6428 return 0;
6429 }
6430
6431
6432 int
vfs_iterate(int flags,int (* callout)(mount_t,void *),void * arg)6433 vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg)
6434 {
6435 mount_t mp;
6436 int ret = 0;
6437 fsid_t * fsid_list;
6438 int count, actualcount, i;
6439 void * allocmem;
6440 int indx_start, indx_stop, indx_incr;
6441 int cb_dropref = (flags & VFS_ITERATE_CB_DROPREF);
6442 int noskip_unmount = (flags & VFS_ITERATE_NOSKIP_UNMOUNT);
6443
6444 count = mount_getvfscnt();
6445 count += 10;
6446
6447 fsid_list = kalloc_data(count * sizeof(fsid_t), Z_WAITOK);
6448 allocmem = (void *)fsid_list;
6449
6450 actualcount = mount_fillfsids(fsid_list, count);
6451
6452 /*
6453 * Establish the iteration direction
6454 * VFS_ITERATE_TAIL_FIRST overrides default head first order (oldest first)
6455 */
6456 if (flags & VFS_ITERATE_TAIL_FIRST) {
6457 indx_start = actualcount - 1;
6458 indx_stop = -1;
6459 indx_incr = -1;
6460 } else { /* Head first by default */
6461 indx_start = 0;
6462 indx_stop = actualcount;
6463 indx_incr = 1;
6464 }
6465
6466 for (i = indx_start; i != indx_stop; i += indx_incr) {
6467 /* obtain the mount point with iteration reference */
6468 mp = mount_list_lookupby_fsid(&fsid_list[i], 0, 1);
6469
6470 if (mp == (struct mount *)0) {
6471 continue;
6472 }
6473 mount_lock(mp);
6474 if ((mp->mnt_lflag & MNT_LDEAD) ||
6475 (!noskip_unmount && (mp->mnt_lflag & MNT_LUNMOUNT))) {
6476 mount_unlock(mp);
6477 mount_iterdrop(mp);
6478 continue;
6479 }
6480 mount_unlock(mp);
6481
6482 /* iterate over all the vnodes */
6483 ret = callout(mp, arg);
6484
6485 /*
6486 * Drop the iterref here if the callback didn't do it.
6487 * Note: If cb_dropref is set the mp may no longer exist.
6488 */
6489 if (!cb_dropref) {
6490 mount_iterdrop(mp);
6491 }
6492
6493 switch (ret) {
6494 case VFS_RETURNED:
6495 case VFS_RETURNED_DONE:
6496 if (ret == VFS_RETURNED_DONE) {
6497 ret = 0;
6498 goto out;
6499 }
6500 break;
6501
6502 case VFS_CLAIMED_DONE:
6503 ret = 0;
6504 goto out;
6505 case VFS_CLAIMED:
6506 default:
6507 break;
6508 }
6509 ret = 0;
6510 }
6511
6512 out:
6513 kfree_data(allocmem, count * sizeof(fsid_t));
6514 return ret;
6515 }
6516
6517 /*
6518 * Update the vfsstatfs structure in the mountpoint.
6519 * MAC: Parameter eventtype added, indicating whether the event that
6520 * triggered this update came from user space, via a system call
6521 * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT).
6522 */
6523 int
vfs_update_vfsstat(mount_t mp,vfs_context_t ctx,__unused int eventtype)6524 vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype)
6525 {
6526 struct vfs_attr va;
6527 int error;
6528
6529 /*
6530 * Request the attributes we want to propagate into
6531 * the per-mount vfsstat structure.
6532 */
6533 VFSATTR_INIT(&va);
6534 VFSATTR_WANTED(&va, f_iosize);
6535 VFSATTR_WANTED(&va, f_blocks);
6536 VFSATTR_WANTED(&va, f_bfree);
6537 VFSATTR_WANTED(&va, f_bavail);
6538 VFSATTR_WANTED(&va, f_bused);
6539 VFSATTR_WANTED(&va, f_files);
6540 VFSATTR_WANTED(&va, f_ffree);
6541 VFSATTR_WANTED(&va, f_bsize);
6542 VFSATTR_WANTED(&va, f_fssubtype);
6543
6544 if ((error = vfs_getattr(mp, &va, ctx)) != 0) {
6545 KAUTH_DEBUG("STAT - filesystem returned error %d", error);
6546 return error;
6547 }
6548 #if CONFIG_MACF
6549 if (eventtype == VFS_USER_EVENT) {
6550 error = mac_mount_check_getattr(ctx, mp, &va);
6551 if (error != 0) {
6552 return error;
6553 }
6554 }
6555 #endif
6556 /*
6557 * Unpack into the per-mount structure.
6558 *
6559 * We only overwrite these fields, which are likely to change:
6560 * f_blocks
6561 * f_bfree
6562 * f_bavail
6563 * f_bused
6564 * f_files
6565 * f_ffree
6566 *
6567 * And these which are not, but which the FS has no other way
6568 * of providing to us:
6569 * f_bsize
6570 * f_iosize
6571 * f_fssubtype
6572 *
6573 */
6574 if (VFSATTR_IS_SUPPORTED(&va, f_bsize)) {
6575 /* 4822056 - protect against malformed server mount */
6576 mp->mnt_vfsstat.f_bsize = (va.f_bsize > 0 ? va.f_bsize : 512);
6577 } else {
6578 mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */
6579 }
6580 if (VFSATTR_IS_SUPPORTED(&va, f_iosize)) {
6581 mp->mnt_vfsstat.f_iosize = va.f_iosize;
6582 } else {
6583 mp->mnt_vfsstat.f_iosize = 1024 * 1024; /* 1MB sensible I/O size */
6584 }
6585 if (VFSATTR_IS_SUPPORTED(&va, f_blocks)) {
6586 mp->mnt_vfsstat.f_blocks = va.f_blocks;
6587 }
6588 if (VFSATTR_IS_SUPPORTED(&va, f_bfree)) {
6589 mp->mnt_vfsstat.f_bfree = va.f_bfree;
6590 }
6591 if (VFSATTR_IS_SUPPORTED(&va, f_bavail)) {
6592 mp->mnt_vfsstat.f_bavail = va.f_bavail;
6593 }
6594 if (VFSATTR_IS_SUPPORTED(&va, f_bused)) {
6595 mp->mnt_vfsstat.f_bused = va.f_bused;
6596 }
6597 if (VFSATTR_IS_SUPPORTED(&va, f_files)) {
6598 mp->mnt_vfsstat.f_files = va.f_files;
6599 }
6600 if (VFSATTR_IS_SUPPORTED(&va, f_ffree)) {
6601 mp->mnt_vfsstat.f_ffree = va.f_ffree;
6602 }
6603
6604 /* this is unlikely to change, but has to be queried for */
6605 if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype)) {
6606 mp->mnt_vfsstat.f_fssubtype = va.f_fssubtype;
6607 }
6608
6609 return 0;
6610 }
6611
6612 int
mount_list_add(mount_t mp)6613 mount_list_add(mount_t mp)
6614 {
6615 int res;
6616
6617 mount_list_lock();
6618 if (get_system_inshutdown() != 0) {
6619 res = -1;
6620 } else {
6621 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
6622 nummounts++;
6623 res = 0;
6624 }
6625 mount_list_unlock();
6626
6627 return res;
6628 }
6629
6630 void
mount_list_remove(mount_t mp)6631 mount_list_remove(mount_t mp)
6632 {
6633 mount_list_lock();
6634 TAILQ_REMOVE(&mountlist, mp, mnt_list);
6635 nummounts--;
6636 mp->mnt_list.tqe_next = NULL;
6637 mp->mnt_list.tqe_prev = NULL;
6638 mount_list_unlock();
6639 }
6640
6641 mount_t
mount_lookupby_volfsid(int volfs_id,int withref)6642 mount_lookupby_volfsid(int volfs_id, int withref)
6643 {
6644 mount_t cur_mount = (mount_t)0;
6645 mount_t mp;
6646
6647 mount_list_lock();
6648 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
6649 if (!(mp->mnt_kern_flag & MNTK_UNMOUNT) &&
6650 (mp->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
6651 (mp->mnt_vfsstat.f_fsid.val[0] == volfs_id)) {
6652 cur_mount = mp;
6653 if (withref) {
6654 if (mount_iterref(cur_mount, 1)) {
6655 cur_mount = (mount_t)0;
6656 mount_list_unlock();
6657 goto out;
6658 }
6659 }
6660 break;
6661 }
6662 }
6663 mount_list_unlock();
6664 if (withref && (cur_mount != (mount_t)0)) {
6665 mp = cur_mount;
6666 if (vfs_busy(mp, LK_NOWAIT) != 0) {
6667 cur_mount = (mount_t)0;
6668 }
6669 mount_iterdrop(mp);
6670 }
6671 out:
6672 return cur_mount;
6673 }
6674
6675 mount_t
mount_list_lookupby_fsid(fsid_t * fsid,int locked,int withref)6676 mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref)
6677 {
6678 mount_t retmp = (mount_t)0;
6679 mount_t mp;
6680
6681 if (!locked) {
6682 mount_list_lock();
6683 }
6684 TAILQ_FOREACH(mp, &mountlist, mnt_list)
6685 if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] &&
6686 mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) {
6687 retmp = mp;
6688 if (withref) {
6689 if (mount_iterref(retmp, 1)) {
6690 retmp = (mount_t)0;
6691 }
6692 }
6693 goto out;
6694 }
6695 out:
6696 if (!locked) {
6697 mount_list_unlock();
6698 }
6699 return retmp;
6700 }
6701
6702 errno_t
vnode_lookupat(const char * path,int flags,vnode_t * vpp,vfs_context_t ctx,vnode_t start_dvp)6703 vnode_lookupat(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx,
6704 vnode_t start_dvp)
6705 {
6706 struct nameidata *ndp;
6707 int error = 0;
6708 u_int32_t ndflags = 0;
6709
6710 if (ctx == NULL) {
6711 return EINVAL;
6712 }
6713
6714 ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
6715
6716 if (flags & VNODE_LOOKUP_NOFOLLOW) {
6717 ndflags = NOFOLLOW;
6718 } else {
6719 ndflags = FOLLOW;
6720 }
6721
6722 if (flags & VNODE_LOOKUP_NOCROSSMOUNT) {
6723 ndflags |= NOCROSSMOUNT;
6724 }
6725
6726 if (flags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) {
6727 ndflags |= CN_NBMOUNTLOOK;
6728 }
6729
6730 /* XXX AUDITVNPATH1 needed ? */
6731 NDINIT(ndp, LOOKUP, OP_LOOKUP, ndflags, UIO_SYSSPACE,
6732 CAST_USER_ADDR_T(path), ctx);
6733
6734 if (start_dvp && (path[0] != '/')) {
6735 ndp->ni_dvp = start_dvp;
6736 ndp->ni_cnd.cn_flags |= USEDVP;
6737 }
6738
6739 if ((error = namei(ndp))) {
6740 goto out_free;
6741 }
6742
6743 ndp->ni_cnd.cn_flags &= ~USEDVP;
6744
6745 *vpp = ndp->ni_vp;
6746 nameidone(ndp);
6747
6748 out_free:
6749 kfree_type(struct nameidata, ndp);
6750 return error;
6751 }
6752
6753 errno_t
vnode_lookup(const char * path,int flags,vnode_t * vpp,vfs_context_t ctx)6754 vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx)
6755 {
6756 return vnode_lookupat(path, flags, vpp, ctx, NULLVP);
6757 }
6758
6759 errno_t
vnode_open(const char * path,int fmode,int cmode,int flags,vnode_t * vpp,vfs_context_t ctx)6760 vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx)
6761 {
6762 struct nameidata *ndp = NULL;
6763 int error;
6764 u_int32_t ndflags = 0;
6765 int lflags = flags;
6766
6767 if (ctx == NULL) { /* XXX technically an error */
6768 ctx = vfs_context_current();
6769 }
6770
6771 ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
6772
6773 if (fmode & O_NOFOLLOW) {
6774 lflags |= VNODE_LOOKUP_NOFOLLOW;
6775 }
6776
6777 if (lflags & VNODE_LOOKUP_NOFOLLOW) {
6778 ndflags = NOFOLLOW;
6779 } else {
6780 ndflags = FOLLOW;
6781 }
6782
6783 if (lflags & VNODE_LOOKUP_NOCROSSMOUNT) {
6784 ndflags |= NOCROSSMOUNT;
6785 }
6786
6787 if (lflags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) {
6788 ndflags |= CN_NBMOUNTLOOK;
6789 }
6790
6791 /* XXX AUDITVNPATH1 needed ? */
6792 NDINIT(ndp, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE,
6793 CAST_USER_ADDR_T(path), ctx);
6794
6795 if ((error = vn_open(ndp, fmode, cmode))) {
6796 *vpp = NULL;
6797 } else {
6798 *vpp = ndp->ni_vp;
6799 }
6800
6801 kfree_type(struct nameidata, ndp);
6802 return error;
6803 }
6804
6805 errno_t
vnode_close(vnode_t vp,int flags,vfs_context_t ctx)6806 vnode_close(vnode_t vp, int flags, vfs_context_t ctx)
6807 {
6808 int error;
6809
6810 if (ctx == NULL) {
6811 ctx = vfs_context_current();
6812 }
6813
6814 error = vn_close(vp, flags, ctx);
6815 vnode_put(vp);
6816 return error;
6817 }
6818
6819 errno_t
vnode_mtime(vnode_t vp,struct timespec * mtime,vfs_context_t ctx)6820 vnode_mtime(vnode_t vp, struct timespec *mtime, vfs_context_t ctx)
6821 {
6822 struct vnode_attr va;
6823 int error;
6824
6825 VATTR_INIT(&va);
6826 VATTR_WANTED(&va, va_modify_time);
6827 error = vnode_getattr(vp, &va, ctx);
6828 if (!error) {
6829 *mtime = va.va_modify_time;
6830 }
6831 return error;
6832 }
6833
6834 errno_t
vnode_flags(vnode_t vp,uint32_t * flags,vfs_context_t ctx)6835 vnode_flags(vnode_t vp, uint32_t *flags, vfs_context_t ctx)
6836 {
6837 struct vnode_attr va;
6838 int error;
6839
6840 VATTR_INIT(&va);
6841 VATTR_WANTED(&va, va_flags);
6842 error = vnode_getattr(vp, &va, ctx);
6843 if (!error) {
6844 *flags = va.va_flags;
6845 }
6846 return error;
6847 }
6848
6849 /*
6850 * Returns: 0 Success
6851 * vnode_getattr:???
6852 */
6853 errno_t
vnode_size(vnode_t vp,off_t * sizep,vfs_context_t ctx)6854 vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx)
6855 {
6856 struct vnode_attr va;
6857 int error;
6858
6859 VATTR_INIT(&va);
6860 VATTR_WANTED(&va, va_data_size);
6861 error = vnode_getattr(vp, &va, ctx);
6862 if (!error) {
6863 *sizep = va.va_data_size;
6864 }
6865 return error;
6866 }
6867
6868 errno_t
vnode_setsize(vnode_t vp,off_t size,int ioflag,vfs_context_t ctx)6869 vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx)
6870 {
6871 struct vnode_attr va;
6872
6873 VATTR_INIT(&va);
6874 VATTR_SET(&va, va_data_size, size);
6875 va.va_vaflags = ioflag & 0xffff;
6876 return vnode_setattr(vp, &va, ctx);
6877 }
6878
6879 int
vnode_setdirty(vnode_t vp)6880 vnode_setdirty(vnode_t vp)
6881 {
6882 vnode_lock_spin(vp);
6883 vp->v_flag |= VISDIRTY;
6884 vnode_unlock(vp);
6885 return 0;
6886 }
6887
6888 int
vnode_cleardirty(vnode_t vp)6889 vnode_cleardirty(vnode_t vp)
6890 {
6891 vnode_lock_spin(vp);
6892 vp->v_flag &= ~VISDIRTY;
6893 vnode_unlock(vp);
6894 return 0;
6895 }
6896
6897 int
vnode_isdirty(vnode_t vp)6898 vnode_isdirty(vnode_t vp)
6899 {
6900 int dirty;
6901
6902 vnode_lock_spin(vp);
6903 dirty = (vp->v_flag & VISDIRTY) ? 1 : 0;
6904 vnode_unlock(vp);
6905
6906 return dirty;
6907 }
6908
6909 static int
vn_create_reg(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,uint32_t flags,int fmode,uint32_t * statusp,vfs_context_t ctx)6910 vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
6911 {
6912 /* Only use compound VNOP for compound operation */
6913 if (vnode_compound_open_available(dvp) && ((flags & VN_CREATE_DOOPEN) != 0)) {
6914 *vpp = NULLVP;
6915 return VNOP_COMPOUND_OPEN(dvp, vpp, ndp, O_CREAT, fmode, statusp, vap, ctx);
6916 } else {
6917 return VNOP_CREATE(dvp, vpp, &ndp->ni_cnd, vap, ctx);
6918 }
6919 }
6920
6921 /*
6922 * Create a filesystem object of arbitrary type with arbitrary attributes in
6923 * the spevied directory with the specified name.
6924 *
6925 * Parameters: dvp Pointer to the vnode of the directory
6926 * in which to create the object.
6927 * vpp Pointer to the area into which to
6928 * return the vnode of the created object.
6929 * cnp Component name pointer from the namei
6930 * data structure, containing the name to
6931 * use for the create object.
6932 * vap Pointer to the vnode_attr structure
6933 * describing the object to be created,
6934 * including the type of object.
6935 * flags VN_* flags controlling ACL inheritance
6936 * and whether or not authorization is to
6937 * be required for the operation.
6938 *
6939 * Returns: 0 Success
6940 * !0 errno value
6941 *
6942 * Implicit: *vpp Contains the vnode of the object that
6943 * was created, if successful.
6944 * *cnp May be modified by the underlying VFS.
6945 * *vap May be modified by the underlying VFS.
6946 * modified by either ACL inheritance or
6947 *
6948 *
6949 * be modified, even if the operation is
6950 *
6951 *
6952 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
6953 *
6954 * Modification of '*cnp' and '*vap' by the underlying VFS is
6955 * strongly discouraged.
6956 *
6957 * XXX: This function is a 'vn_*' function; it belongs in vfs_vnops.c
6958 *
6959 * XXX: We should enummerate the possible errno values here, and where
6960 * in the code they originated.
6961 */
6962 errno_t
vn_create(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,uint32_t flags,int fmode,uint32_t * statusp,vfs_context_t ctx)6963 vn_create(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
6964 {
6965 errno_t error, old_error;
6966 vnode_t vp = (vnode_t)0;
6967 boolean_t batched;
6968 struct componentname *cnp;
6969 uint32_t defaulted;
6970
6971 cnp = &ndp->ni_cnd;
6972 error = 0;
6973 batched = namei_compound_available(dvp, ndp) ? TRUE : FALSE;
6974
6975 KAUTH_DEBUG("%p CREATE - '%s'", dvp, cnp->cn_nameptr);
6976
6977 if (flags & VN_CREATE_NOINHERIT) {
6978 vap->va_vaflags |= VA_NOINHERIT;
6979 }
6980 if (flags & VN_CREATE_NOAUTH) {
6981 vap->va_vaflags |= VA_NOAUTH;
6982 }
6983 /*
6984 * Handle ACL inheritance, initialize vap.
6985 */
6986 error = vn_attribute_prepare(dvp, vap, &defaulted, ctx);
6987 if (error) {
6988 return error;
6989 }
6990
6991 if (vap->va_type != VREG && (fmode != 0 || (flags & VN_CREATE_DOOPEN) || statusp)) {
6992 panic("Open parameters, but not a regular file.");
6993 }
6994 if ((fmode != 0) && ((flags & VN_CREATE_DOOPEN) == 0)) {
6995 panic("Mode for open, but not trying to open...");
6996 }
6997
6998
6999 /*
7000 * Create the requested node.
7001 */
7002 switch (vap->va_type) {
7003 case VREG:
7004 error = vn_create_reg(dvp, vpp, ndp, vap, flags, fmode, statusp, ctx);
7005 break;
7006 case VDIR:
7007 error = vn_mkdir(dvp, vpp, ndp, vap, ctx);
7008 break;
7009 case VSOCK:
7010 case VFIFO:
7011 case VBLK:
7012 case VCHR:
7013 error = VNOP_MKNOD(dvp, vpp, cnp, vap, ctx);
7014 break;
7015 default:
7016 panic("vnode_create: unknown vtype %d", vap->va_type);
7017 }
7018 if (error != 0) {
7019 KAUTH_DEBUG("%p CREATE - error %d returned by filesystem", dvp, error);
7020 goto out;
7021 }
7022
7023 vp = *vpp;
7024 old_error = error;
7025
7026 /*
7027 * If some of the requested attributes weren't handled by the VNOP,
7028 * use our fallback code.
7029 */
7030 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap) && *vpp) {
7031 KAUTH_DEBUG(" CREATE - doing fallback with ACL %p", vap->va_acl);
7032 error = vnode_setattr_fallback(*vpp, vap, ctx);
7033 }
7034
7035 #if CONFIG_MACF
7036 if ((error == 0) && !(flags & VN_CREATE_NOLABEL)) {
7037 error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx);
7038 }
7039 #endif
7040
7041 if ((error != 0) && (vp != (vnode_t)0)) {
7042 /* If we've done a compound open, close */
7043 if (batched && (old_error == 0) && (vap->va_type == VREG)) {
7044 VNOP_CLOSE(vp, fmode, ctx);
7045 }
7046
7047 /* Need to provide notifications if a create succeeded */
7048 if (!batched) {
7049 *vpp = (vnode_t) 0;
7050 vnode_put(vp);
7051 vp = NULLVP;
7052 }
7053 }
7054
7055 /*
7056 * For creation VNOPs, this is the equivalent of
7057 * lookup_handle_found_vnode.
7058 */
7059 if (kdebug_enable && *vpp) {
7060 kdebug_lookup(*vpp, cnp);
7061 }
7062
7063 out:
7064 vn_attribute_cleanup(vap, defaulted);
7065
7066 return error;
7067 }
7068
7069 static kauth_scope_t vnode_scope;
7070 static int vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action,
7071 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
7072 static int vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx,
7073 vnode_t vp, vnode_t dvp, int *errorp);
7074
7075 typedef struct _vnode_authorize_context {
7076 vnode_t vp;
7077 struct vnode_attr *vap;
7078 vnode_t dvp;
7079 struct vnode_attr *dvap;
7080 vfs_context_t ctx;
7081 int flags;
7082 int flags_valid;
7083 #define _VAC_IS_OWNER (1<<0)
7084 #define _VAC_IN_GROUP (1<<1)
7085 #define _VAC_IS_DIR_OWNER (1<<2)
7086 #define _VAC_IN_DIR_GROUP (1<<3)
7087 #define _VAC_NO_VNODE_POINTERS (1<<4)
7088 } *vauth_ctx;
7089
7090 void
vnode_authorize_init(void)7091 vnode_authorize_init(void)
7092 {
7093 vnode_scope = kauth_register_scope(KAUTH_SCOPE_VNODE, vnode_authorize_callback, NULL);
7094 }
7095
7096 #define VATTR_PREPARE_DEFAULTED_UID 0x1
7097 #define VATTR_PREPARE_DEFAULTED_GID 0x2
7098 #define VATTR_PREPARE_DEFAULTED_MODE 0x4
7099
7100 int
vn_attribute_prepare(vnode_t dvp,struct vnode_attr * vap,uint32_t * defaulted_fieldsp,vfs_context_t ctx)7101 vn_attribute_prepare(vnode_t dvp, struct vnode_attr *vap, uint32_t *defaulted_fieldsp, vfs_context_t ctx)
7102 {
7103 kauth_acl_t nacl = NULL, oacl = NULL;
7104 int error;
7105
7106 /*
7107 * Handle ACL inheritance.
7108 */
7109 if (!(vap->va_vaflags & VA_NOINHERIT) && vfs_extendedsecurity(dvp->v_mount)) {
7110 /* save the original filesec */
7111 if (VATTR_IS_ACTIVE(vap, va_acl)) {
7112 oacl = vap->va_acl;
7113 }
7114
7115 vap->va_acl = NULL;
7116 if ((error = kauth_acl_inherit(dvp,
7117 oacl,
7118 &nacl,
7119 vap->va_type == VDIR,
7120 ctx)) != 0) {
7121 KAUTH_DEBUG("%p CREATE - error %d processing inheritance", dvp, error);
7122 return error;
7123 }
7124
7125 /*
7126 * If the generated ACL is NULL, then we can save ourselves some effort
7127 * by clearing the active bit.
7128 */
7129 if (nacl == NULL) {
7130 VATTR_CLEAR_ACTIVE(vap, va_acl);
7131 } else {
7132 vap->va_base_acl = oacl;
7133 VATTR_SET(vap, va_acl, nacl);
7134 }
7135 }
7136
7137 error = vnode_authattr_new_internal(dvp, vap, (vap->va_vaflags & VA_NOAUTH), defaulted_fieldsp, ctx);
7138 if (error) {
7139 vn_attribute_cleanup(vap, *defaulted_fieldsp);
7140 }
7141
7142 return error;
7143 }
7144
7145 void
vn_attribute_cleanup(struct vnode_attr * vap,uint32_t defaulted_fields)7146 vn_attribute_cleanup(struct vnode_attr *vap, uint32_t defaulted_fields)
7147 {
7148 /*
7149 * If the caller supplied a filesec in vap, it has been replaced
7150 * now by the post-inheritance copy. We need to put the original back
7151 * and free the inherited product.
7152 */
7153 kauth_acl_t nacl, oacl;
7154
7155 if (VATTR_IS_ACTIVE(vap, va_acl)) {
7156 nacl = vap->va_acl;
7157 oacl = vap->va_base_acl;
7158
7159 if (oacl) {
7160 VATTR_SET(vap, va_acl, oacl);
7161 vap->va_base_acl = NULL;
7162 } else {
7163 VATTR_CLEAR_ACTIVE(vap, va_acl);
7164 }
7165
7166 if (nacl != NULL) {
7167 /*
7168 * Only free the ACL buffer if 'VA_FILESEC_ACL' is not set as it
7169 * should be freed by the caller or it is a post-inheritance copy.
7170 */
7171 if (!(vap->va_vaflags & VA_FILESEC_ACL) ||
7172 (oacl != NULL && nacl != oacl)) {
7173 kauth_acl_free(nacl);
7174 }
7175 }
7176 }
7177
7178 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_MODE) != 0) {
7179 VATTR_CLEAR_ACTIVE(vap, va_mode);
7180 }
7181 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_GID) != 0) {
7182 VATTR_CLEAR_ACTIVE(vap, va_gid);
7183 }
7184 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_UID) != 0) {
7185 VATTR_CLEAR_ACTIVE(vap, va_uid);
7186 }
7187
7188 return;
7189 }
7190
7191 int
vn_authorize_unlink(vnode_t dvp,vnode_t vp,struct componentname * cnp,vfs_context_t ctx,__unused void * reserved)7192 vn_authorize_unlink(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, __unused void *reserved)
7193 {
7194 #if !CONFIG_MACF
7195 #pragma unused(cnp)
7196 #endif
7197 int error = 0;
7198
7199 /*
7200 * Normally, unlinking of directories is not supported.
7201 * However, some file systems may have limited support.
7202 */
7203 if ((vp->v_type == VDIR) &&
7204 !(vp->v_mount->mnt_kern_flag & MNTK_DIR_HARDLINKS)) {
7205 return EPERM; /* POSIX */
7206 }
7207
7208 /* authorize the delete operation */
7209 #if CONFIG_MACF
7210 if (!error) {
7211 error = mac_vnode_check_unlink(ctx, dvp, vp, cnp);
7212 }
7213 #endif /* MAC */
7214 if (!error) {
7215 error = vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
7216 }
7217
7218 return error;
7219 }
7220
7221 int
vn_authorize_open_existing(vnode_t vp,struct componentname * cnp,int fmode,vfs_context_t ctx,void * reserved)7222 vn_authorize_open_existing(vnode_t vp, struct componentname *cnp, int fmode, vfs_context_t ctx, void *reserved)
7223 {
7224 /* Open of existing case */
7225 kauth_action_t action;
7226 int error = 0;
7227 if (cnp->cn_ndp == NULL) {
7228 panic("NULL ndp");
7229 }
7230 if (reserved != NULL) {
7231 panic("reserved not NULL.");
7232 }
7233
7234 #if CONFIG_MACF
7235 /* XXX may do duplicate work here, but ignore that for now (idempotent) */
7236 if (vfs_flags(vnode_mount(vp)) & MNT_MULTILABEL) {
7237 error = vnode_label(vnode_mount(vp), NULL, vp, NULL, 0, ctx);
7238 if (error) {
7239 return error;
7240 }
7241 }
7242 #endif
7243
7244 if ((fmode & O_DIRECTORY) && vp->v_type != VDIR) {
7245 return ENOTDIR;
7246 }
7247
7248 if (vp->v_type == VSOCK && vp->v_tag != VT_FDESC) {
7249 return EOPNOTSUPP; /* Operation not supported on socket */
7250 }
7251
7252 if (vp->v_type == VLNK && (fmode & O_NOFOLLOW) != 0) {
7253 return ELOOP; /* O_NOFOLLOW was specified and the target is a symbolic link */
7254 }
7255
7256 /* disallow write operations on directories */
7257 if (vnode_isdir(vp) && (fmode & (FWRITE | O_TRUNC))) {
7258 return EISDIR;
7259 }
7260
7261 if ((cnp->cn_ndp->ni_flag & NAMEI_TRAILINGSLASH)) {
7262 if (vp->v_type != VDIR) {
7263 return ENOTDIR;
7264 }
7265 }
7266
7267 #if CONFIG_MACF
7268 /* If a file being opened is a shadow file containing
7269 * namedstream data, ignore the macf checks because it
7270 * is a kernel internal file and access should always
7271 * be allowed.
7272 */
7273 if (!(vnode_isshadow(vp) && vnode_isnamedstream(vp))) {
7274 error = mac_vnode_check_open(ctx, vp, fmode);
7275 if (error) {
7276 return error;
7277 }
7278 }
7279 #endif
7280
7281 /* compute action to be authorized */
7282 action = 0;
7283 if (fmode & FREAD) {
7284 action |= KAUTH_VNODE_READ_DATA;
7285 }
7286 if (fmode & (FWRITE | O_TRUNC)) {
7287 /*
7288 * If we are writing, appending, and not truncating,
7289 * indicate that we are appending so that if the
7290 * UF_APPEND or SF_APPEND bits are set, we do not deny
7291 * the open.
7292 */
7293 if ((fmode & O_APPEND) && !(fmode & O_TRUNC)) {
7294 action |= KAUTH_VNODE_APPEND_DATA;
7295 } else {
7296 action |= KAUTH_VNODE_WRITE_DATA;
7297 }
7298 }
7299 error = vnode_authorize(vp, NULL, action, ctx);
7300 #if NAMEDSTREAMS
7301 if (error == EACCES) {
7302 /*
7303 * Shadow files may exist on-disk with a different UID/GID
7304 * than that of the current context. Verify that this file
7305 * is really a shadow file. If it was created successfully
7306 * then it should be authorized.
7307 */
7308 if (vnode_isshadow(vp) && vnode_isnamedstream(vp)) {
7309 error = vnode_verifynamedstream(vp);
7310 }
7311 }
7312 #endif
7313
7314 return error;
7315 }
7316
7317 int
vn_authorize_create(vnode_t dvp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx,void * reserved)7318 vn_authorize_create(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
7319 {
7320 #if !CONFIG_MACF
7321 #pragma unused(vap)
7322 #endif
7323 /* Creation case */
7324 int error;
7325
7326 if (cnp->cn_ndp == NULL) {
7327 panic("NULL cn_ndp");
7328 }
7329 if (reserved != NULL) {
7330 panic("reserved not NULL.");
7331 }
7332
7333 /* Only validate path for creation if we didn't do a complete lookup */
7334 if (cnp->cn_ndp->ni_flag & NAMEI_UNFINISHED) {
7335 error = lookup_validate_creation_path(cnp->cn_ndp);
7336 if (error) {
7337 return error;
7338 }
7339 }
7340
7341 #if CONFIG_MACF
7342 error = mac_vnode_check_create(ctx, dvp, cnp, vap);
7343 if (error) {
7344 return error;
7345 }
7346 #endif /* CONFIG_MACF */
7347
7348 return vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx);
7349 }
7350
7351 int
vn_authorize_rename(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx,void * reserved)7352 vn_authorize_rename(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
7353 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
7354 vfs_context_t ctx, void *reserved)
7355 {
7356 return vn_authorize_renamex(fdvp, fvp, fcnp, tdvp, tvp, tcnp, ctx, 0, reserved);
7357 }
7358
7359 int
vn_authorize_renamex(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx,vfs_rename_flags_t flags,void * reserved)7360 vn_authorize_renamex(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
7361 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
7362 vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved)
7363 {
7364 return vn_authorize_renamex_with_paths(fdvp, fvp, fcnp, NULL, tdvp, tvp, tcnp, NULL, ctx, flags, reserved);
7365 }
7366
7367 int
vn_authorize_renamex_with_paths(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,const char * from_path,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,const char * to_path,vfs_context_t ctx,vfs_rename_flags_t flags,void * reserved)7368 vn_authorize_renamex_with_paths(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, const char *from_path,
7369 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, const char *to_path,
7370 vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved)
7371 {
7372 int error = 0;
7373 int moving = 0;
7374 bool swap = flags & VFS_RENAME_SWAP;
7375
7376 if (reserved != NULL) {
7377 panic("Passed something other than NULL as reserved field!");
7378 }
7379
7380 /*
7381 * Avoid renaming "." and "..".
7382 *
7383 * XXX No need to check for this in the FS. We should always have the leaves
7384 * in VFS in this case.
7385 */
7386 if (fvp->v_type == VDIR &&
7387 ((fdvp == fvp) ||
7388 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') ||
7389 ((fcnp->cn_flags | tcnp->cn_flags) & ISDOTDOT))) {
7390 error = EINVAL;
7391 goto out;
7392 }
7393
7394 if (tvp == NULLVP && vnode_compound_rename_available(tdvp)) {
7395 error = lookup_validate_creation_path(tcnp->cn_ndp);
7396 if (error) {
7397 goto out;
7398 }
7399 }
7400
7401 /***** <MACF> *****/
7402 #if CONFIG_MACF
7403 error = mac_vnode_check_rename(ctx, fdvp, fvp, fcnp, tdvp, tvp, tcnp);
7404 if (error) {
7405 goto out;
7406 }
7407 if (swap) {
7408 error = mac_vnode_check_rename(ctx, tdvp, tvp, tcnp, fdvp, fvp, fcnp);
7409 if (error) {
7410 goto out;
7411 }
7412 }
7413 #endif
7414 /***** </MACF> *****/
7415
7416 /***** <MiscChecks> *****/
7417 if (tvp != NULL) {
7418 if (!swap) {
7419 if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
7420 error = ENOTDIR;
7421 goto out;
7422 } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
7423 error = EISDIR;
7424 goto out;
7425 }
7426 }
7427 } else if (swap) {
7428 /*
7429 * Caller should have already checked this and returned
7430 * ENOENT. If we send back ENOENT here, caller will retry
7431 * which isn't what we want so we send back EINVAL here
7432 * instead.
7433 */
7434 error = EINVAL;
7435 goto out;
7436 }
7437
7438 if (fvp == tdvp) {
7439 error = EINVAL;
7440 goto out;
7441 }
7442
7443 /*
7444 * The following edge case is caught here:
7445 * (to cannot be a descendent of from)
7446 *
7447 * o fdvp
7448 * /
7449 * /
7450 * o fvp
7451 * \
7452 * \
7453 * o tdvp
7454 * /
7455 * /
7456 * o tvp
7457 */
7458 if (tdvp->v_parent == fvp) {
7459 error = EINVAL;
7460 goto out;
7461 }
7462
7463 if (swap && fdvp->v_parent == tvp) {
7464 error = EINVAL;
7465 goto out;
7466 }
7467 /***** </MiscChecks> *****/
7468
7469 /***** <Kauth> *****/
7470
7471 /*
7472 * As part of the Kauth step, we call out to allow 3rd-party
7473 * fileop notification of "about to rename". This is needed
7474 * in the event that 3rd-parties need to know that the DELETE
7475 * authorization is actually part of a rename. It's important
7476 * that we guarantee that the DELETE call-out will always be
7477 * made if the WILL_RENAME call-out is made. Another fileop
7478 * call-out will be performed once the operation is completed.
7479 * We can ignore the result of kauth_authorize_fileop().
7480 *
7481 * N.B. We are passing the vnode and *both* paths to each
7482 * call; kauth_authorize_fileop() extracts the "from" path
7483 * when posting a KAUTH_FILEOP_WILL_RENAME notification.
7484 * As such, we only post these notifications if all of the
7485 * information we need is provided.
7486 */
7487
7488 if (swap) {
7489 kauth_action_t f = 0, t = 0;
7490
7491 /*
7492 * Directories changing parents need ...ADD_SUBDIR... to
7493 * permit changing ".."
7494 */
7495 if (fdvp != tdvp) {
7496 if (vnode_isdir(fvp)) {
7497 f = KAUTH_VNODE_ADD_SUBDIRECTORY;
7498 }
7499 if (vnode_isdir(tvp)) {
7500 t = KAUTH_VNODE_ADD_SUBDIRECTORY;
7501 }
7502 }
7503 if (to_path != NULL) {
7504 kauth_authorize_fileop(vfs_context_ucred(ctx),
7505 KAUTH_FILEOP_WILL_RENAME,
7506 (uintptr_t)fvp,
7507 (uintptr_t)to_path);
7508 }
7509 error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | f, ctx);
7510 if (error) {
7511 goto out;
7512 }
7513 if (from_path != NULL) {
7514 kauth_authorize_fileop(vfs_context_ucred(ctx),
7515 KAUTH_FILEOP_WILL_RENAME,
7516 (uintptr_t)tvp,
7517 (uintptr_t)from_path);
7518 }
7519 error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE | t, ctx);
7520 if (error) {
7521 goto out;
7522 }
7523 f = vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE;
7524 t = vnode_isdir(tvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE;
7525 if (fdvp == tdvp) {
7526 error = vnode_authorize(fdvp, NULL, f | t, ctx);
7527 } else {
7528 error = vnode_authorize(fdvp, NULL, t, ctx);
7529 if (error) {
7530 goto out;
7531 }
7532 error = vnode_authorize(tdvp, NULL, f, ctx);
7533 }
7534 if (error) {
7535 goto out;
7536 }
7537 } else {
7538 error = 0;
7539 if ((tvp != NULL) && vnode_isdir(tvp)) {
7540 if (tvp != fdvp) {
7541 moving = 1;
7542 }
7543 } else if (tdvp != fdvp) {
7544 moving = 1;
7545 }
7546
7547 /*
7548 * must have delete rights to remove the old name even in
7549 * the simple case of fdvp == tdvp.
7550 *
7551 * If fvp is a directory, and we are changing it's parent,
7552 * then we also need rights to rewrite its ".." entry as well.
7553 */
7554 if (to_path != NULL) {
7555 kauth_authorize_fileop(vfs_context_ucred(ctx),
7556 KAUTH_FILEOP_WILL_RENAME,
7557 (uintptr_t)fvp,
7558 (uintptr_t)to_path);
7559 }
7560 if (vnode_isdir(fvp)) {
7561 if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) {
7562 goto out;
7563 }
7564 } else {
7565 if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE, ctx)) != 0) {
7566 goto out;
7567 }
7568 }
7569 if (moving) {
7570 /* moving into tdvp or tvp, must have rights to add */
7571 if ((error = vnode_authorize(((tvp != NULL) && vnode_isdir(tvp)) ? tvp : tdvp,
7572 NULL,
7573 vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE,
7574 ctx)) != 0) {
7575 goto out;
7576 }
7577 } else {
7578 /* node staying in same directory, must be allowed to add new name */
7579 if ((error = vnode_authorize(fdvp, NULL,
7580 vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, ctx)) != 0) {
7581 goto out;
7582 }
7583 }
7584 /* overwriting tvp */
7585 if ((tvp != NULL) && !vnode_isdir(tvp) &&
7586 ((error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE, ctx)) != 0)) {
7587 goto out;
7588 }
7589 }
7590
7591 /***** </Kauth> *****/
7592
7593 /* XXX more checks? */
7594 out:
7595 return error;
7596 }
7597
7598 int
vn_authorize_mkdir(vnode_t dvp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx,void * reserved)7599 vn_authorize_mkdir(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
7600 {
7601 #if !CONFIG_MACF
7602 #pragma unused(vap)
7603 #endif
7604 int error;
7605
7606 if (reserved != NULL) {
7607 panic("reserved not NULL in vn_authorize_mkdir()");
7608 }
7609
7610 /* XXX A hack for now, to make shadow files work */
7611 if (cnp->cn_ndp == NULL) {
7612 return 0;
7613 }
7614
7615 if (vnode_compound_mkdir_available(dvp)) {
7616 error = lookup_validate_creation_path(cnp->cn_ndp);
7617 if (error) {
7618 goto out;
7619 }
7620 }
7621
7622 #if CONFIG_MACF
7623 error = mac_vnode_check_create(ctx,
7624 dvp, cnp, vap);
7625 if (error) {
7626 goto out;
7627 }
7628 #endif
7629
7630 /* authorize addition of a directory to the parent */
7631 if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) {
7632 goto out;
7633 }
7634
7635 out:
7636 return error;
7637 }
7638
7639 int
vn_authorize_rmdir(vnode_t dvp,vnode_t vp,struct componentname * cnp,vfs_context_t ctx,void * reserved)7640 vn_authorize_rmdir(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, void *reserved)
7641 {
7642 #if CONFIG_MACF
7643 int error;
7644 #else
7645 #pragma unused(cnp)
7646 #endif
7647 if (reserved != NULL) {
7648 panic("Non-NULL reserved argument to vn_authorize_rmdir()");
7649 }
7650
7651 if (vp->v_type != VDIR) {
7652 /*
7653 * rmdir only deals with directories
7654 */
7655 return ENOTDIR;
7656 }
7657
7658 if (dvp == vp) {
7659 /*
7660 * No rmdir "." please.
7661 */
7662 return EINVAL;
7663 }
7664
7665 #if CONFIG_MACF
7666 error = mac_vnode_check_unlink(ctx, dvp,
7667 vp, cnp);
7668 if (error) {
7669 return error;
7670 }
7671 #endif
7672
7673 return vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
7674 }
7675
7676 /*
7677 * Authorizer for directory cloning. This does not use vnodes but instead
7678 * uses prefilled vnode attributes from the filesystem.
7679 *
7680 * The same function is called to set up the attributes required, perform the
7681 * authorization and cleanup (if required)
7682 */
7683 int
vnode_attr_authorize_dir_clone(struct vnode_attr * vap,kauth_action_t action,struct vnode_attr * dvap,__unused vnode_t sdvp,mount_t mp,dir_clone_authorizer_op_t vattr_op,uint32_t flags,vfs_context_t ctx,__unused void * reserved)7684 vnode_attr_authorize_dir_clone(struct vnode_attr *vap, kauth_action_t action,
7685 struct vnode_attr *dvap, __unused vnode_t sdvp, mount_t mp,
7686 dir_clone_authorizer_op_t vattr_op, uint32_t flags, vfs_context_t ctx,
7687 __unused void *reserved)
7688 {
7689 int error;
7690 int is_suser = vfs_context_issuser(ctx);
7691
7692 if (vattr_op == OP_VATTR_SETUP) {
7693 VATTR_INIT(vap);
7694
7695 /*
7696 * When ACL inheritence is implemented, both vap->va_acl and
7697 * dvap->va_acl will be required (even as superuser).
7698 */
7699 VATTR_WANTED(vap, va_type);
7700 VATTR_WANTED(vap, va_mode);
7701 VATTR_WANTED(vap, va_flags);
7702 VATTR_WANTED(vap, va_uid);
7703 VATTR_WANTED(vap, va_gid);
7704 if (dvap) {
7705 VATTR_INIT(dvap);
7706 VATTR_WANTED(dvap, va_flags);
7707 }
7708
7709 if (!is_suser) {
7710 /*
7711 * If not superuser, we have to evaluate ACLs and
7712 * need the target directory gid to set the initial
7713 * gid of the new object.
7714 */
7715 VATTR_WANTED(vap, va_acl);
7716 if (dvap) {
7717 VATTR_WANTED(dvap, va_gid);
7718 }
7719 } else if (dvap && (flags & VNODE_CLONEFILE_NOOWNERCOPY)) {
7720 VATTR_WANTED(dvap, va_gid);
7721 }
7722 return 0;
7723 } else if (vattr_op == OP_VATTR_CLEANUP) {
7724 return 0; /* Nothing to do for now */
7725 }
7726
7727 /* dvap isn't used for authorization */
7728 error = vnode_attr_authorize(vap, NULL, mp, action, ctx);
7729
7730 if (error) {
7731 return error;
7732 }
7733
7734 /*
7735 * vn_attribute_prepare should be able to accept attributes as well as
7736 * vnodes but for now we do this inline.
7737 */
7738 if (!is_suser || (flags & VNODE_CLONEFILE_NOOWNERCOPY)) {
7739 /*
7740 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit
7741 * owner is set, that owner takes ownership of all new files.
7742 */
7743 if ((mp->mnt_flag & MNT_IGNORE_OWNERSHIP) &&
7744 (mp->mnt_fsowner != KAUTH_UID_NONE)) {
7745 VATTR_SET(vap, va_uid, mp->mnt_fsowner);
7746 } else {
7747 /* default owner is current user */
7748 VATTR_SET(vap, va_uid,
7749 kauth_cred_getuid(vfs_context_ucred(ctx)));
7750 }
7751
7752 if ((mp->mnt_flag & MNT_IGNORE_OWNERSHIP) &&
7753 (mp->mnt_fsgroup != KAUTH_GID_NONE)) {
7754 VATTR_SET(vap, va_gid, mp->mnt_fsgroup);
7755 } else {
7756 /*
7757 * default group comes from parent object,
7758 * fallback to current user
7759 */
7760 if (VATTR_IS_SUPPORTED(dvap, va_gid)) {
7761 VATTR_SET(vap, va_gid, dvap->va_gid);
7762 } else {
7763 VATTR_SET(vap, va_gid,
7764 kauth_cred_getgid(vfs_context_ucred(ctx)));
7765 }
7766 }
7767 }
7768
7769 /* Inherit SF_RESTRICTED bit from destination directory only */
7770 if (VATTR_IS_ACTIVE(vap, va_flags)) {
7771 VATTR_SET(vap, va_flags,
7772 ((vap->va_flags & ~(UF_DATAVAULT | SF_RESTRICTED)))); /* Turn off from source */
7773 if (VATTR_IS_ACTIVE(dvap, va_flags)) {
7774 VATTR_SET(vap, va_flags,
7775 vap->va_flags | (dvap->va_flags & (UF_DATAVAULT | SF_RESTRICTED)));
7776 }
7777 } else if (VATTR_IS_ACTIVE(dvap, va_flags)) {
7778 VATTR_SET(vap, va_flags, (dvap->va_flags & (UF_DATAVAULT | SF_RESTRICTED)));
7779 }
7780
7781 return 0;
7782 }
7783
7784
7785 /*
7786 * Authorize an operation on a vnode.
7787 *
7788 * This is KPI, but here because it needs vnode_scope.
7789 *
7790 * Returns: 0 Success
7791 * kauth_authorize_action:EPERM ...
7792 * xlate => EACCES Permission denied
7793 * kauth_authorize_action:0 Success
7794 * kauth_authorize_action: Depends on callback return; this is
7795 * usually only vnode_authorize_callback(),
7796 * but may include other listerners, if any
7797 * exist.
7798 * EROFS
7799 * EACCES
7800 * EPERM
7801 * ???
7802 */
7803 int
vnode_authorize(vnode_t vp,vnode_t dvp,kauth_action_t action,vfs_context_t ctx)7804 vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx)
7805 {
7806 int error, result;
7807
7808 /*
7809 * We can't authorize against a dead vnode; allow all operations through so that
7810 * the correct error can be returned.
7811 */
7812 if (vp->v_type == VBAD) {
7813 return 0;
7814 }
7815
7816 error = 0;
7817 result = kauth_authorize_action(vnode_scope, vfs_context_ucred(ctx), action,
7818 (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error);
7819 if (result == EPERM) { /* traditional behaviour */
7820 result = EACCES;
7821 }
7822 /* did the lower layers give a better error return? */
7823 if ((result != 0) && (error != 0)) {
7824 return error;
7825 }
7826 return result;
7827 }
7828
7829 /*
7830 * Test for vnode immutability.
7831 *
7832 * The 'append' flag is set when the authorization request is constrained
7833 * to operations which only request the right to append to a file.
7834 *
7835 * The 'ignore' flag is set when an operation modifying the immutability flags
7836 * is being authorized. We check the system securelevel to determine which
7837 * immutability flags we can ignore.
7838 */
7839 static int
vnode_immutable(struct vnode_attr * vap,int append,int ignore)7840 vnode_immutable(struct vnode_attr *vap, int append, int ignore)
7841 {
7842 int mask;
7843
7844 /* start with all bits precluding the operation */
7845 mask = IMMUTABLE | APPEND;
7846
7847 /* if appending only, remove the append-only bits */
7848 if (append) {
7849 mask &= ~APPEND;
7850 }
7851
7852 /* ignore only set when authorizing flags changes */
7853 if (ignore) {
7854 if (securelevel <= 0) {
7855 /* in insecure state, flags do not inhibit changes */
7856 mask = 0;
7857 } else {
7858 /* in secure state, user flags don't inhibit */
7859 mask &= ~(UF_IMMUTABLE | UF_APPEND);
7860 }
7861 }
7862 KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap->va_flags, mask, append, ignore);
7863 if ((vap->va_flags & mask) != 0) {
7864 return EPERM;
7865 }
7866 return 0;
7867 }
7868
7869 static int
vauth_node_owner(struct vnode_attr * vap,kauth_cred_t cred)7870 vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred)
7871 {
7872 int result;
7873
7874 /* default assumption is not-owner */
7875 result = 0;
7876
7877 /*
7878 * If the filesystem has given us a UID, we treat this as authoritative.
7879 */
7880 if (vap && VATTR_IS_SUPPORTED(vap, va_uid)) {
7881 result = (vap->va_uid == kauth_cred_getuid(cred)) ? 1 : 0;
7882 }
7883 /* we could test the owner UUID here if we had a policy for it */
7884
7885 return result;
7886 }
7887
7888 /*
7889 * vauth_node_group
7890 *
7891 * Description: Ask if a cred is a member of the group owning the vnode object
7892 *
7893 * Parameters: vap vnode attribute
7894 * vap->va_gid group owner of vnode object
7895 * cred credential to check
7896 * ismember pointer to where to put the answer
7897 * idontknow Return this if we can't get an answer
7898 *
7899 * Returns: 0 Success
7900 * idontknow Can't get information
7901 * kauth_cred_ismember_gid:? Error from kauth subsystem
7902 * kauth_cred_ismember_gid:? Error from kauth subsystem
7903 */
7904 static int
vauth_node_group(struct vnode_attr * vap,kauth_cred_t cred,int * ismember,int idontknow)7905 vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int idontknow)
7906 {
7907 int error;
7908 int result;
7909
7910 error = 0;
7911 result = 0;
7912
7913 /*
7914 * The caller is expected to have asked the filesystem for a group
7915 * at some point prior to calling this function. The answer may
7916 * have been that there is no group ownership supported for the
7917 * vnode object, in which case we return
7918 */
7919 if (vap && VATTR_IS_SUPPORTED(vap, va_gid)) {
7920 error = kauth_cred_ismember_gid(cred, vap->va_gid, &result);
7921 /*
7922 * Credentials which are opted into external group membership
7923 * resolution which are not known to the external resolver
7924 * will result in an ENOENT error. We translate this into
7925 * the appropriate 'idontknow' response for our caller.
7926 *
7927 * XXX We do not make a distinction here between an ENOENT
7928 * XXX arising from a response from the external resolver,
7929 * XXX and an ENOENT which is internally generated. This is
7930 * XXX a deficiency of the published kauth_cred_ismember_gid()
7931 * XXX KPI which can not be overcome without new KPI. For
7932 * XXX all currently known cases, however, this wil result
7933 * XXX in correct behaviour.
7934 */
7935 if (error == ENOENT) {
7936 error = idontknow;
7937 }
7938 }
7939 /*
7940 * XXX We could test the group UUID here if we had a policy for it,
7941 * XXX but this is problematic from the perspective of synchronizing
7942 * XXX group UUID and POSIX GID ownership of a file and keeping the
7943 * XXX values coherent over time. The problem is that the local
7944 * XXX system will vend transient group UUIDs for unknown POSIX GID
7945 * XXX values, and these are not persistent, whereas storage of values
7946 * XXX is persistent. One potential solution to this is a local
7947 * XXX (persistent) replica of remote directory entries and vended
7948 * XXX local ids in a local directory server (think in terms of a
7949 * XXX caching DNS server).
7950 */
7951
7952 if (!error) {
7953 *ismember = result;
7954 }
7955 return error;
7956 }
7957
7958 static int
vauth_file_owner(vauth_ctx vcp)7959 vauth_file_owner(vauth_ctx vcp)
7960 {
7961 int result;
7962
7963 if (vcp->flags_valid & _VAC_IS_OWNER) {
7964 result = (vcp->flags & _VAC_IS_OWNER) ? 1 : 0;
7965 } else {
7966 result = vauth_node_owner(vcp->vap, vcp->ctx->vc_ucred);
7967
7968 /* cache our result */
7969 vcp->flags_valid |= _VAC_IS_OWNER;
7970 if (result) {
7971 vcp->flags |= _VAC_IS_OWNER;
7972 } else {
7973 vcp->flags &= ~_VAC_IS_OWNER;
7974 }
7975 }
7976 return result;
7977 }
7978
7979
7980 /*
7981 * vauth_file_ingroup
7982 *
7983 * Description: Ask if a user is a member of the group owning the directory
7984 *
7985 * Parameters: vcp The vnode authorization context that
7986 * contains the user and directory info
7987 * vcp->flags_valid Valid flags
7988 * vcp->flags Flags values
7989 * vcp->vap File vnode attributes
7990 * vcp->ctx VFS Context (for user)
7991 * ismember pointer to where to put the answer
7992 * idontknow Return this if we can't get an answer
7993 *
7994 * Returns: 0 Success
7995 * vauth_node_group:? Error from vauth_node_group()
7996 *
7997 * Implicit returns: *ismember 0 The user is not a group member
7998 * 1 The user is a group member
7999 */
8000 static int
vauth_file_ingroup(vauth_ctx vcp,int * ismember,int idontknow)8001 vauth_file_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
8002 {
8003 int error;
8004
8005 /* Check for a cached answer first, to avoid the check if possible */
8006 if (vcp->flags_valid & _VAC_IN_GROUP) {
8007 *ismember = (vcp->flags & _VAC_IN_GROUP) ? 1 : 0;
8008 error = 0;
8009 } else {
8010 /* Otherwise, go look for it */
8011 error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember, idontknow);
8012
8013 if (!error) {
8014 /* cache our result */
8015 vcp->flags_valid |= _VAC_IN_GROUP;
8016 if (*ismember) {
8017 vcp->flags |= _VAC_IN_GROUP;
8018 } else {
8019 vcp->flags &= ~_VAC_IN_GROUP;
8020 }
8021 }
8022 }
8023 return error;
8024 }
8025
8026 static int
vauth_dir_owner(vauth_ctx vcp)8027 vauth_dir_owner(vauth_ctx vcp)
8028 {
8029 int result;
8030
8031 if (vcp->flags_valid & _VAC_IS_DIR_OWNER) {
8032 result = (vcp->flags & _VAC_IS_DIR_OWNER) ? 1 : 0;
8033 } else {
8034 result = vauth_node_owner(vcp->dvap, vcp->ctx->vc_ucred);
8035
8036 /* cache our result */
8037 vcp->flags_valid |= _VAC_IS_DIR_OWNER;
8038 if (result) {
8039 vcp->flags |= _VAC_IS_DIR_OWNER;
8040 } else {
8041 vcp->flags &= ~_VAC_IS_DIR_OWNER;
8042 }
8043 }
8044 return result;
8045 }
8046
8047 /*
8048 * vauth_dir_ingroup
8049 *
8050 * Description: Ask if a user is a member of the group owning the directory
8051 *
8052 * Parameters: vcp The vnode authorization context that
8053 * contains the user and directory info
8054 * vcp->flags_valid Valid flags
8055 * vcp->flags Flags values
8056 * vcp->dvap Dir vnode attributes
8057 * vcp->ctx VFS Context (for user)
8058 * ismember pointer to where to put the answer
8059 * idontknow Return this if we can't get an answer
8060 *
8061 * Returns: 0 Success
8062 * vauth_node_group:? Error from vauth_node_group()
8063 *
8064 * Implicit returns: *ismember 0 The user is not a group member
8065 * 1 The user is a group member
8066 */
8067 static int
vauth_dir_ingroup(vauth_ctx vcp,int * ismember,int idontknow)8068 vauth_dir_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
8069 {
8070 int error;
8071
8072 /* Check for a cached answer first, to avoid the check if possible */
8073 if (vcp->flags_valid & _VAC_IN_DIR_GROUP) {
8074 *ismember = (vcp->flags & _VAC_IN_DIR_GROUP) ? 1 : 0;
8075 error = 0;
8076 } else {
8077 /* Otherwise, go look for it */
8078 error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember, idontknow);
8079
8080 if (!error) {
8081 /* cache our result */
8082 vcp->flags_valid |= _VAC_IN_DIR_GROUP;
8083 if (*ismember) {
8084 vcp->flags |= _VAC_IN_DIR_GROUP;
8085 } else {
8086 vcp->flags &= ~_VAC_IN_DIR_GROUP;
8087 }
8088 }
8089 }
8090 return error;
8091 }
8092
8093 /*
8094 * Test the posix permissions in (vap) to determine whether (credential)
8095 * may perform (action)
8096 */
8097 static int
vnode_authorize_posix(vauth_ctx vcp,int action,int on_dir)8098 vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir)
8099 {
8100 struct vnode_attr *vap;
8101 int needed, error, owner_ok, group_ok, world_ok, ismember;
8102 #ifdef KAUTH_DEBUG_ENABLE
8103 const char *where = "uninitialized";
8104 # define _SETWHERE(c) where = c;
8105 #else
8106 # define _SETWHERE(c)
8107 #endif
8108
8109 /* checking file or directory? */
8110 if (on_dir) {
8111 vap = vcp->dvap;
8112 } else {
8113 vap = vcp->vap;
8114 }
8115
8116 error = 0;
8117
8118 /*
8119 * We want to do as little work here as possible. So first we check
8120 * which sets of permissions grant us the access we need, and avoid checking
8121 * whether specific permissions grant access when more generic ones would.
8122 */
8123
8124 /* owner permissions */
8125 needed = 0;
8126 if (action & VREAD) {
8127 needed |= S_IRUSR;
8128 }
8129 if (action & VWRITE) {
8130 needed |= S_IWUSR;
8131 }
8132 if (action & VEXEC) {
8133 needed |= S_IXUSR;
8134 }
8135 owner_ok = (needed & vap->va_mode) == needed;
8136
8137 /*
8138 * Processes with the appropriate entitlement can marked themselves as
8139 * ignoring file/directory permissions if they own it.
8140 */
8141 if (!owner_ok && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
8142 owner_ok = 1;
8143 }
8144
8145 /* group permissions */
8146 needed = 0;
8147 if (action & VREAD) {
8148 needed |= S_IRGRP;
8149 }
8150 if (action & VWRITE) {
8151 needed |= S_IWGRP;
8152 }
8153 if (action & VEXEC) {
8154 needed |= S_IXGRP;
8155 }
8156 group_ok = (needed & vap->va_mode) == needed;
8157
8158 /* world permissions */
8159 needed = 0;
8160 if (action & VREAD) {
8161 needed |= S_IROTH;
8162 }
8163 if (action & VWRITE) {
8164 needed |= S_IWOTH;
8165 }
8166 if (action & VEXEC) {
8167 needed |= S_IXOTH;
8168 }
8169 world_ok = (needed & vap->va_mode) == needed;
8170
8171 /* If granted/denied by all three, we're done */
8172 if (owner_ok && group_ok && world_ok) {
8173 _SETWHERE("all");
8174 goto out;
8175 }
8176
8177 if (!owner_ok && !group_ok && !world_ok) {
8178 _SETWHERE("all");
8179 error = EACCES;
8180 goto out;
8181 }
8182
8183 /* Check ownership (relatively cheap) */
8184 if ((on_dir && vauth_dir_owner(vcp)) ||
8185 (!on_dir && vauth_file_owner(vcp))) {
8186 _SETWHERE("user");
8187 if (!owner_ok) {
8188 error = EACCES;
8189 }
8190 goto out;
8191 }
8192
8193 /* Not owner; if group and world both grant it we're done */
8194 if (group_ok && world_ok) {
8195 _SETWHERE("group/world");
8196 goto out;
8197 }
8198 if (!group_ok && !world_ok) {
8199 _SETWHERE("group/world");
8200 error = EACCES;
8201 goto out;
8202 }
8203
8204 /* Check group membership (most expensive) */
8205 ismember = 0; /* Default to allow, if the target has no group owner */
8206
8207 /*
8208 * In the case we can't get an answer about the user from the call to
8209 * vauth_dir_ingroup() or vauth_file_ingroup(), we want to fail on
8210 * the side of caution, rather than simply granting access, or we will
8211 * fail to correctly implement exclusion groups, so we set the third
8212 * parameter on the basis of the state of 'group_ok'.
8213 */
8214 if (on_dir) {
8215 error = vauth_dir_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
8216 } else {
8217 error = vauth_file_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
8218 }
8219 if (error) {
8220 if (!group_ok) {
8221 ismember = 1;
8222 }
8223 error = 0;
8224 }
8225 if (ismember) {
8226 _SETWHERE("group");
8227 if (!group_ok) {
8228 error = EACCES;
8229 }
8230 goto out;
8231 }
8232
8233 /* Not owner, not in group, use world result */
8234 _SETWHERE("world");
8235 if (!world_ok) {
8236 error = EACCES;
8237 }
8238
8239 /* FALLTHROUGH */
8240
8241 out:
8242 KAUTH_DEBUG("%p %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d",
8243 vcp->vp, (error == 0) ? "ALLOWED" : "DENIED", where,
8244 (action & VREAD) ? "r" : "-",
8245 (action & VWRITE) ? "w" : "-",
8246 (action & VEXEC) ? "x" : "-",
8247 needed,
8248 (vap->va_mode & S_IRUSR) ? "r" : "-",
8249 (vap->va_mode & S_IWUSR) ? "w" : "-",
8250 (vap->va_mode & S_IXUSR) ? "x" : "-",
8251 (vap->va_mode & S_IRGRP) ? "r" : "-",
8252 (vap->va_mode & S_IWGRP) ? "w" : "-",
8253 (vap->va_mode & S_IXGRP) ? "x" : "-",
8254 (vap->va_mode & S_IROTH) ? "r" : "-",
8255 (vap->va_mode & S_IWOTH) ? "w" : "-",
8256 (vap->va_mode & S_IXOTH) ? "x" : "-",
8257 kauth_cred_getuid(vcp->ctx->vc_ucred),
8258 on_dir ? vcp->dvap->va_uid : vcp->vap->va_uid,
8259 on_dir ? vcp->dvap->va_gid : vcp->vap->va_gid);
8260 return error;
8261 }
8262
8263 /*
8264 * Authorize the deletion of the node vp from the directory dvp.
8265 *
8266 * We assume that:
8267 * - Neither the node nor the directory are immutable.
8268 * - The user is not the superuser.
8269 *
8270 * The precedence of factors for authorizing or denying delete for a credential
8271 *
8272 * 1) Explicit ACE on the node. (allow or deny DELETE)
8273 * 2) Explicit ACE on the directory (allow or deny DELETE_CHILD).
8274 *
8275 * If there are conflicting ACEs on the node and the directory, the node
8276 * ACE wins.
8277 *
8278 * 3) Sticky bit on the directory.
8279 * Deletion is not permitted if the directory is sticky and the caller is
8280 * not owner of the node or directory. The sticky bit rules are like a deny
8281 * delete ACE except lower in priority than ACL's either allowing or denying
8282 * delete.
8283 *
8284 * 4) POSIX permisions on the directory.
8285 *
8286 * As an optimization, we cache whether or not delete child is permitted
8287 * on directories. This enables us to skip directory ACL and POSIX checks
8288 * as we already have the result from those checks. However, we always check the
8289 * node ACL and, if the directory has the sticky bit set, we always check its
8290 * ACL (even for a directory with an authorized delete child). Furthermore,
8291 * caching the delete child authorization is independent of the sticky bit
8292 * being set as it is only applicable in determining whether the node can be
8293 * deleted or not.
8294 */
8295 static int
vnode_authorize_delete(vauth_ctx vcp,boolean_t cached_delete_child)8296 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child)
8297 {
8298 struct vnode_attr *vap = vcp->vap;
8299 struct vnode_attr *dvap = vcp->dvap;
8300 kauth_cred_t cred = vcp->ctx->vc_ucred;
8301 struct kauth_acl_eval eval;
8302 int error, ismember;
8303
8304 /* Check the ACL on the node first */
8305 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
8306 eval.ae_requested = KAUTH_VNODE_DELETE;
8307 eval.ae_acl = &vap->va_acl->acl_ace[0];
8308 eval.ae_count = vap->va_acl->acl_entrycount;
8309 eval.ae_options = 0;
8310 if (vauth_file_owner(vcp)) {
8311 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
8312 }
8313 /*
8314 * We use ENOENT as a marker to indicate we could not get
8315 * information in order to delay evaluation until after we
8316 * have the ACL evaluation answer. Previously, we would
8317 * always deny the operation at this point.
8318 */
8319 if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
8320 return error;
8321 }
8322 if (error == ENOENT) {
8323 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
8324 } else if (ismember) {
8325 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
8326 }
8327 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
8328 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
8329 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
8330 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
8331
8332 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
8333 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
8334 return error;
8335 }
8336
8337 switch (eval.ae_result) {
8338 case KAUTH_RESULT_DENY:
8339 if (vauth_file_owner(vcp) && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
8340 KAUTH_DEBUG("%p Override DENY due to entitlement", vcp->vp);
8341 return 0;
8342 }
8343 KAUTH_DEBUG("%p DENIED - denied by ACL", vcp->vp);
8344 return EACCES;
8345 case KAUTH_RESULT_ALLOW:
8346 KAUTH_DEBUG("%p ALLOWED - granted by ACL", vcp->vp);
8347 return 0;
8348 case KAUTH_RESULT_DEFER:
8349 default:
8350 /* Defer to directory */
8351 KAUTH_DEBUG("%p DEFERRED - by file ACL", vcp->vp);
8352 break;
8353 }
8354 }
8355
8356 /*
8357 * Without a sticky bit, a previously authorized delete child is
8358 * sufficient to authorize this delete.
8359 *
8360 * If the sticky bit is set, a directory ACL which allows delete child
8361 * overrides a (potential) sticky bit deny. The authorized delete child
8362 * cannot tell us if it was authorized because of an explicit delete
8363 * child allow ACE or because of POSIX permisions so we have to check
8364 * the directory ACL everytime if the directory has a sticky bit.
8365 */
8366 if (!(dvap->va_mode & S_ISTXT) && cached_delete_child) {
8367 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL or POSIX permissions and no sticky bit on directory", vcp->vp);
8368 return 0;
8369 }
8370
8371 /* check the ACL on the directory */
8372 if (VATTR_IS_NOT(dvap, va_acl, NULL)) {
8373 eval.ae_requested = KAUTH_VNODE_DELETE_CHILD;
8374 eval.ae_acl = &dvap->va_acl->acl_ace[0];
8375 eval.ae_count = dvap->va_acl->acl_entrycount;
8376 eval.ae_options = 0;
8377 if (vauth_dir_owner(vcp)) {
8378 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
8379 }
8380 /*
8381 * We use ENOENT as a marker to indicate we could not get
8382 * information in order to delay evaluation until after we
8383 * have the ACL evaluation answer. Previously, we would
8384 * always deny the operation at this point.
8385 */
8386 if ((error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
8387 return error;
8388 }
8389 if (error == ENOENT) {
8390 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
8391 } else if (ismember) {
8392 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
8393 }
8394 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
8395 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
8396 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
8397 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
8398
8399 /*
8400 * If there is no entry, we are going to defer to other
8401 * authorization mechanisms.
8402 */
8403 error = kauth_acl_evaluate(cred, &eval);
8404
8405 if (error != 0) {
8406 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
8407 return error;
8408 }
8409 switch (eval.ae_result) {
8410 case KAUTH_RESULT_DENY:
8411 if (vauth_dir_owner(vcp) && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
8412 KAUTH_DEBUG("%p Override DENY due to entitlement", vcp->vp);
8413 return 0;
8414 }
8415 KAUTH_DEBUG("%p DENIED - denied by directory ACL", vcp->vp);
8416 return EACCES;
8417 case KAUTH_RESULT_ALLOW:
8418 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp);
8419 if (!cached_delete_child && vcp->dvp) {
8420 vnode_cache_authorized_action(vcp->dvp,
8421 vcp->ctx, KAUTH_VNODE_DELETE_CHILD);
8422 }
8423 return 0;
8424 case KAUTH_RESULT_DEFER:
8425 default:
8426 /* Deferred by directory ACL */
8427 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
8428 break;
8429 }
8430 }
8431
8432 /*
8433 * From this point, we can't explicitly allow and if we reach the end
8434 * of the function without a denial, then the delete is authorized.
8435 */
8436 if (!cached_delete_child) {
8437 if (vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */) != 0) {
8438 KAUTH_DEBUG("%p DENIED - denied by posix permisssions", vcp->vp);
8439 return EACCES;
8440 }
8441 /*
8442 * Cache the authorized action on the vnode if allowed by the
8443 * directory ACL or POSIX permissions. It is correct to cache
8444 * this action even if sticky bit would deny deleting the node.
8445 */
8446 if (vcp->dvp) {
8447 vnode_cache_authorized_action(vcp->dvp, vcp->ctx,
8448 KAUTH_VNODE_DELETE_CHILD);
8449 }
8450 }
8451
8452 /* enforce sticky bit behaviour */
8453 if ((dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) {
8454 KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)",
8455 vcp->vp, cred->cr_posix.cr_uid, vap->va_uid, dvap->va_uid);
8456 return EACCES;
8457 }
8458
8459 /* not denied, must be OK */
8460 return 0;
8461 }
8462
8463
8464 /*
8465 * Authorize an operation based on the node's attributes.
8466 */
8467 static int
vnode_authorize_simple(vauth_ctx vcp,kauth_ace_rights_t acl_rights,kauth_ace_rights_t preauth_rights,boolean_t * found_deny)8468 vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_rights_t preauth_rights, boolean_t *found_deny)
8469 {
8470 struct vnode_attr *vap = vcp->vap;
8471 kauth_cred_t cred = vcp->ctx->vc_ucred;
8472 struct kauth_acl_eval eval;
8473 int error, ismember;
8474 mode_t posix_action;
8475
8476 /*
8477 * If we are the file owner, we automatically have some rights.
8478 *
8479 * Do we need to expand this to support group ownership?
8480 */
8481 if (vauth_file_owner(vcp)) {
8482 acl_rights &= ~(KAUTH_VNODE_WRITE_SECURITY);
8483 }
8484
8485 /*
8486 * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can
8487 * mask the latter. If TAKE_OWNERSHIP is requested the caller is about to
8488 * change ownership to themselves, and WRITE_SECURITY is implicitly
8489 * granted to the owner. We need to do this because at this point
8490 * WRITE_SECURITY may not be granted as the caller is not currently
8491 * the owner.
8492 */
8493 if ((acl_rights & KAUTH_VNODE_TAKE_OWNERSHIP) &&
8494 (acl_rights & KAUTH_VNODE_WRITE_SECURITY)) {
8495 acl_rights &= ~KAUTH_VNODE_WRITE_SECURITY;
8496 }
8497
8498 if (acl_rights == 0) {
8499 KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp->vp);
8500 return 0;
8501 }
8502
8503 /* if we have an ACL, evaluate it */
8504 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
8505 eval.ae_requested = acl_rights;
8506 eval.ae_acl = &vap->va_acl->acl_ace[0];
8507 eval.ae_count = vap->va_acl->acl_entrycount;
8508 eval.ae_options = 0;
8509 if (vauth_file_owner(vcp)) {
8510 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
8511 }
8512 /*
8513 * We use ENOENT as a marker to indicate we could not get
8514 * information in order to delay evaluation until after we
8515 * have the ACL evaluation answer. Previously, we would
8516 * always deny the operation at this point.
8517 */
8518 if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
8519 return error;
8520 }
8521 if (error == ENOENT) {
8522 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
8523 } else if (ismember) {
8524 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
8525 }
8526 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
8527 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
8528 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
8529 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
8530
8531 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
8532 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
8533 return error;
8534 }
8535
8536 switch (eval.ae_result) {
8537 case KAUTH_RESULT_DENY:
8538 if (vauth_file_owner(vcp) && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
8539 KAUTH_DEBUG("%p Override DENY due to entitlement", vcp->vp);
8540 return 0;
8541 }
8542 KAUTH_DEBUG("%p DENIED - by ACL", vcp->vp);
8543 return EACCES; /* deny, deny, counter-allege */
8544 case KAUTH_RESULT_ALLOW:
8545 KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp->vp);
8546 return 0;
8547 case KAUTH_RESULT_DEFER:
8548 default:
8549 /* Effectively the same as !delete_child_denied */
8550 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
8551 break;
8552 }
8553
8554 *found_deny = eval.ae_found_deny;
8555
8556 /* fall through and evaluate residual rights */
8557 } else {
8558 /* no ACL, everything is residual */
8559 eval.ae_residual = acl_rights;
8560 }
8561
8562 /*
8563 * Grant residual rights that have been pre-authorized.
8564 */
8565 eval.ae_residual &= ~preauth_rights;
8566
8567 /*
8568 * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied.
8569 */
8570 if (vauth_file_owner(vcp)) {
8571 eval.ae_residual &= ~KAUTH_VNODE_WRITE_ATTRIBUTES;
8572 }
8573
8574 if (eval.ae_residual == 0) {
8575 KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp->vp);
8576 return 0;
8577 }
8578
8579 /*
8580 * Bail if we have residual rights that can't be granted by posix permissions,
8581 * or aren't presumed granted at this point.
8582 *
8583 * XXX these can be collapsed for performance
8584 */
8585 if (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) {
8586 KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp->vp);
8587 return EACCES;
8588 }
8589 if (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) {
8590 KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp->vp);
8591 return EACCES;
8592 }
8593
8594 #if DIAGNOSTIC
8595 if (eval.ae_residual & KAUTH_VNODE_DELETE) {
8596 panic("vnode_authorize: can't be checking delete permission here");
8597 }
8598 #endif
8599
8600 /*
8601 * Compute the fallback posix permissions that will satisfy the remaining
8602 * rights.
8603 */
8604 posix_action = 0;
8605 if (eval.ae_residual & (KAUTH_VNODE_READ_DATA |
8606 KAUTH_VNODE_LIST_DIRECTORY |
8607 KAUTH_VNODE_READ_EXTATTRIBUTES)) {
8608 posix_action |= VREAD;
8609 }
8610 if (eval.ae_residual & (KAUTH_VNODE_WRITE_DATA |
8611 KAUTH_VNODE_ADD_FILE |
8612 KAUTH_VNODE_ADD_SUBDIRECTORY |
8613 KAUTH_VNODE_DELETE_CHILD |
8614 KAUTH_VNODE_WRITE_ATTRIBUTES |
8615 KAUTH_VNODE_WRITE_EXTATTRIBUTES)) {
8616 posix_action |= VWRITE;
8617 }
8618 if (eval.ae_residual & (KAUTH_VNODE_EXECUTE |
8619 KAUTH_VNODE_SEARCH)) {
8620 posix_action |= VEXEC;
8621 }
8622
8623 if (posix_action != 0) {
8624 return vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */);
8625 } else {
8626 KAUTH_DEBUG("%p ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping",
8627 vcp->vp,
8628 (eval.ae_residual & KAUTH_VNODE_READ_DATA)
8629 ? vnode_isdir(vcp->vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
8630 (eval.ae_residual & KAUTH_VNODE_WRITE_DATA)
8631 ? vnode_isdir(vcp->vp) ? " ADD_FILE" : " WRITE_DATA" : "",
8632 (eval.ae_residual & KAUTH_VNODE_EXECUTE)
8633 ? vnode_isdir(vcp->vp) ? " SEARCH" : " EXECUTE" : "",
8634 (eval.ae_residual & KAUTH_VNODE_DELETE)
8635 ? " DELETE" : "",
8636 (eval.ae_residual & KAUTH_VNODE_APPEND_DATA)
8637 ? vnode_isdir(vcp->vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
8638 (eval.ae_residual & KAUTH_VNODE_DELETE_CHILD)
8639 ? " DELETE_CHILD" : "",
8640 (eval.ae_residual & KAUTH_VNODE_READ_ATTRIBUTES)
8641 ? " READ_ATTRIBUTES" : "",
8642 (eval.ae_residual & KAUTH_VNODE_WRITE_ATTRIBUTES)
8643 ? " WRITE_ATTRIBUTES" : "",
8644 (eval.ae_residual & KAUTH_VNODE_READ_EXTATTRIBUTES)
8645 ? " READ_EXTATTRIBUTES" : "",
8646 (eval.ae_residual & KAUTH_VNODE_WRITE_EXTATTRIBUTES)
8647 ? " WRITE_EXTATTRIBUTES" : "",
8648 (eval.ae_residual & KAUTH_VNODE_READ_SECURITY)
8649 ? " READ_SECURITY" : "",
8650 (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY)
8651 ? " WRITE_SECURITY" : "",
8652 (eval.ae_residual & KAUTH_VNODE_CHECKIMMUTABLE)
8653 ? " CHECKIMMUTABLE" : "",
8654 (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER)
8655 ? " CHANGE_OWNER" : "");
8656 }
8657
8658 /*
8659 * Lack of required Posix permissions implies no reason to deny access.
8660 */
8661 return 0;
8662 }
8663
8664 /*
8665 * Check for file immutability.
8666 */
8667 static int
vnode_authorize_checkimmutable(mount_t mp,vauth_ctx vcp,struct vnode_attr * vap,int rights,int ignore)8668 vnode_authorize_checkimmutable(mount_t mp, vauth_ctx vcp,
8669 struct vnode_attr *vap, int rights, int ignore)
8670 {
8671 int error;
8672 int append;
8673
8674 /*
8675 * Perform immutability checks for operations that change data.
8676 *
8677 * Sockets, fifos and devices require special handling.
8678 */
8679 switch (vap->va_type) {
8680 case VSOCK:
8681 case VFIFO:
8682 case VBLK:
8683 case VCHR:
8684 /*
8685 * Writing to these nodes does not change the filesystem data,
8686 * so forget that it's being tried.
8687 */
8688 rights &= ~KAUTH_VNODE_WRITE_DATA;
8689 break;
8690 default:
8691 break;
8692 }
8693
8694 error = 0;
8695 if (rights & KAUTH_VNODE_WRITE_RIGHTS) {
8696 /* check per-filesystem options if possible */
8697 if (mp != NULL) {
8698 /* check for no-EA filesystems */
8699 if ((rights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) &&
8700 (vfs_flags(mp) & MNT_NOUSERXATTR)) {
8701 KAUTH_DEBUG("%p DENIED - filesystem disallowed extended attributes", vap);
8702 error = EACCES; /* User attributes disabled */
8703 goto out;
8704 }
8705 }
8706
8707 /*
8708 * check for file immutability. first, check if the requested rights are
8709 * allowable for a UF_APPEND file.
8710 */
8711 append = 0;
8712 if (vap->va_type == VDIR) {
8713 if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights) {
8714 append = 1;
8715 }
8716 } else {
8717 if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights) {
8718 append = 1;
8719 }
8720 }
8721 if ((error = vnode_immutable(vap, append, ignore)) != 0) {
8722 if (error && !ignore) {
8723 /*
8724 * In case of a rename, we want to check ownership for dvp as well.
8725 */
8726 int owner = 0;
8727 if (rights & KAUTH_VNODE_DELETE_CHILD && vcp->dvp != NULL) {
8728 owner = vauth_file_owner(vcp) && vauth_dir_owner(vcp);
8729 } else {
8730 owner = vauth_file_owner(vcp);
8731 }
8732 if (owner && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
8733 error = vnode_immutable(vap, append, 1);
8734 }
8735 }
8736 }
8737 if (error) {
8738 KAUTH_DEBUG("%p DENIED - file is immutable", vap);
8739 goto out;
8740 }
8741 }
8742 out:
8743 return error;
8744 }
8745
8746 /*
8747 * Handle authorization actions for filesystems that advertise that the
8748 * server will be enforcing.
8749 *
8750 * Returns: 0 Authorization should be handled locally
8751 * 1 Authorization was handled by the FS
8752 *
8753 * Note: Imputed returns will only occur if the authorization request
8754 * was handled by the FS.
8755 *
8756 * Imputed: *resultp, modified Return code from FS when the request is
8757 * handled by the FS.
8758 * VNOP_ACCESS:???
8759 * VNOP_OPEN:???
8760 */
8761 static int
vnode_authorize_opaque(vnode_t vp,int * resultp,kauth_action_t action,vfs_context_t ctx)8762 vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx)
8763 {
8764 int error;
8765
8766 /*
8767 * If the vp is a device node, socket or FIFO it actually represents a local
8768 * endpoint, so we need to handle it locally.
8769 */
8770 switch (vp->v_type) {
8771 case VBLK:
8772 case VCHR:
8773 case VSOCK:
8774 case VFIFO:
8775 return 0;
8776 default:
8777 break;
8778 }
8779
8780 /*
8781 * In the advisory request case, if the filesystem doesn't think it's reliable
8782 * we will attempt to formulate a result ourselves based on VNOP_GETATTR data.
8783 */
8784 if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount)) {
8785 return 0;
8786 }
8787
8788 /*
8789 * Let the filesystem have a say in the matter. It's OK for it to not implemnent
8790 * VNOP_ACCESS, as most will authorise inline with the actual request.
8791 */
8792 if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) {
8793 *resultp = error;
8794 KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp);
8795 return 1;
8796 }
8797
8798 /*
8799 * Typically opaque filesystems do authorisation in-line, but exec is a special case. In
8800 * order to be reasonably sure that exec will be permitted, we try a bit harder here.
8801 */
8802 if ((action & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG)) {
8803 /* try a VNOP_OPEN for readonly access */
8804 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
8805 *resultp = error;
8806 KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp);
8807 return 1;
8808 }
8809 VNOP_CLOSE(vp, FREAD, ctx);
8810 }
8811
8812 /*
8813 * We don't have any reason to believe that the request has to be denied at this point,
8814 * so go ahead and allow it.
8815 */
8816 *resultp = 0;
8817 KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp);
8818 return 1;
8819 }
8820
8821
8822
8823
8824 /*
8825 * Returns: KAUTH_RESULT_ALLOW
8826 * KAUTH_RESULT_DENY
8827 *
8828 * Imputed: *arg3, modified Error code in the deny case
8829 * EROFS Read-only file system
8830 * EACCES Permission denied
8831 * EPERM Operation not permitted [no execute]
8832 * vnode_getattr:ENOMEM Not enough space [only if has filesec]
8833 * vnode_getattr:???
8834 * vnode_authorize_opaque:*arg2 ???
8835 * vnode_authorize_checkimmutable:???
8836 * vnode_authorize_delete:???
8837 * vnode_authorize_simple:???
8838 */
8839
8840
8841 static int
vnode_authorize_callback(__unused kauth_cred_t cred,__unused void * idata,kauth_action_t action,uintptr_t arg0,uintptr_t arg1,uintptr_t arg2,uintptr_t arg3)8842 vnode_authorize_callback(__unused kauth_cred_t cred, __unused void *idata,
8843 kauth_action_t action, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2,
8844 uintptr_t arg3)
8845 {
8846 vfs_context_t ctx;
8847 vnode_t cvp = NULLVP;
8848 vnode_t vp, dvp;
8849 int result = KAUTH_RESULT_DENY;
8850 int parent_iocount = 0;
8851 int parent_action; /* In case we need to use namedstream's data fork for cached rights*/
8852
8853 ctx = (vfs_context_t)arg0;
8854 vp = (vnode_t)arg1;
8855 dvp = (vnode_t)arg2;
8856
8857 /*
8858 * if there are 2 vnodes passed in, we don't know at
8859 * this point which rights to look at based on the
8860 * combined action being passed in... defer until later...
8861 * otherwise check the kauth 'rights' cache hung
8862 * off of the vnode we're interested in... if we've already
8863 * been granted the right we're currently interested in,
8864 * we can just return success... otherwise we'll go through
8865 * the process of authorizing the requested right(s)... if that
8866 * succeeds, we'll add the right(s) to the cache.
8867 * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache
8868 */
8869 if (dvp && vp) {
8870 goto defer;
8871 }
8872 if (dvp) {
8873 cvp = dvp;
8874 } else {
8875 /*
8876 * For named streams on local-authorization volumes, rights are cached on the parent;
8877 * authorization is determined by looking at the parent's properties anyway, so storing
8878 * on the parent means that we don't recompute for the named stream and that if
8879 * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
8880 * stream to flush its cache separately. If we miss in the cache, then we authorize
8881 * as if there were no cached rights (passing the named stream vnode and desired rights to
8882 * vnode_authorize_callback_int()).
8883 *
8884 * On an opaquely authorized volume, we don't know the relationship between the
8885 * data fork's properties and the rights granted on a stream. Thus, named stream vnodes
8886 * on such a volume are authorized directly (rather than using the parent) and have their
8887 * own caches. When a named stream vnode is created, we mark the parent as having a named
8888 * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
8889 * find the stream and flush its cache.
8890 */
8891 if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) {
8892 cvp = vnode_getparent(vp);
8893 if (cvp != NULLVP) {
8894 parent_iocount = 1;
8895 } else {
8896 cvp = NULL;
8897 goto defer; /* If we can't use the parent, take the slow path */
8898 }
8899
8900 /* Have to translate some actions */
8901 parent_action = action;
8902 if (parent_action & KAUTH_VNODE_READ_DATA) {
8903 parent_action &= ~KAUTH_VNODE_READ_DATA;
8904 parent_action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
8905 }
8906 if (parent_action & KAUTH_VNODE_WRITE_DATA) {
8907 parent_action &= ~KAUTH_VNODE_WRITE_DATA;
8908 parent_action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
8909 }
8910 } else {
8911 cvp = vp;
8912 }
8913 }
8914
8915 if (vnode_cache_is_authorized(cvp, ctx, parent_iocount ? parent_action : action) == TRUE) {
8916 result = KAUTH_RESULT_ALLOW;
8917 goto out;
8918 }
8919 defer:
8920 result = vnode_authorize_callback_int(action, ctx, vp, dvp, (int *)arg3);
8921
8922 if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP) {
8923 KAUTH_DEBUG("%p - caching action = %x", cvp, action);
8924 vnode_cache_authorized_action(cvp, ctx, action);
8925 }
8926
8927 out:
8928 if (parent_iocount) {
8929 vnode_put(cvp);
8930 }
8931
8932 return result;
8933 }
8934
8935 static int
vnode_attr_authorize_internal(vauth_ctx vcp,mount_t mp,kauth_ace_rights_t rights,int is_suser,boolean_t * found_deny,int noimmutable,int parent_authorized_for_delete_child)8936 vnode_attr_authorize_internal(vauth_ctx vcp, mount_t mp,
8937 kauth_ace_rights_t rights, int is_suser, boolean_t *found_deny,
8938 int noimmutable, int parent_authorized_for_delete_child)
8939 {
8940 int result;
8941
8942 /*
8943 * Check for immutability.
8944 *
8945 * In the deletion case, parent directory immutability vetoes specific
8946 * file rights.
8947 */
8948 if ((result = vnode_authorize_checkimmutable(mp, vcp, vcp->vap, rights,
8949 noimmutable)) != 0) {
8950 goto out;
8951 }
8952
8953 if ((rights & KAUTH_VNODE_DELETE) &&
8954 !parent_authorized_for_delete_child) {
8955 result = vnode_authorize_checkimmutable(mp, vcp, vcp->dvap,
8956 KAUTH_VNODE_DELETE_CHILD, 0);
8957 if (result) {
8958 goto out;
8959 }
8960 }
8961
8962 /*
8963 * Clear rights that have been authorized by reaching this point, bail if nothing left to
8964 * check.
8965 */
8966 rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE);
8967 if (rights == 0) {
8968 goto out;
8969 }
8970
8971 /*
8972 * If we're not the superuser, authorize based on file properties;
8973 * note that even if parent_authorized_for_delete_child is TRUE, we
8974 * need to check on the node itself.
8975 */
8976 if (!is_suser) {
8977 /* process delete rights */
8978 if ((rights & KAUTH_VNODE_DELETE) &&
8979 ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0)) {
8980 goto out;
8981 }
8982
8983 /* process remaining rights */
8984 if ((rights & ~KAUTH_VNODE_DELETE) &&
8985 (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, found_deny)) != 0) {
8986 goto out;
8987 }
8988 } else {
8989 /*
8990 * Execute is only granted to root if one of the x bits is set. This check only
8991 * makes sense if the posix mode bits are actually supported.
8992 */
8993 if ((rights & KAUTH_VNODE_EXECUTE) &&
8994 (vcp->vap->va_type == VREG) &&
8995 VATTR_IS_SUPPORTED(vcp->vap, va_mode) &&
8996 !(vcp->vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
8997 result = EPERM;
8998 KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vcp, vcp->vap->va_mode);
8999 goto out;
9000 }
9001
9002 /* Assume that there were DENYs so we don't wrongly cache KAUTH_VNODE_SEARCHBYANYONE */
9003 *found_deny = TRUE;
9004
9005 KAUTH_DEBUG("%p ALLOWED - caller is superuser", vcp);
9006 }
9007 out:
9008 return result;
9009 }
9010
9011 static int
vnode_authorize_callback_int(kauth_action_t action,vfs_context_t ctx,vnode_t vp,vnode_t dvp,int * errorp)9012 vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx,
9013 vnode_t vp, vnode_t dvp, int *errorp)
9014 {
9015 struct _vnode_authorize_context auth_context;
9016 vauth_ctx vcp;
9017 kauth_cred_t cred;
9018 kauth_ace_rights_t rights;
9019 struct vnode_attr va, dva;
9020 int result;
9021 int noimmutable;
9022 boolean_t parent_authorized_for_delete_child = FALSE;
9023 boolean_t found_deny = FALSE;
9024 boolean_t parent_ref = FALSE;
9025 boolean_t is_suser = FALSE;
9026
9027 vcp = &auth_context;
9028 vcp->ctx = ctx;
9029 vcp->vp = vp;
9030 vcp->dvp = dvp;
9031 /*
9032 * Note that we authorize against the context, not the passed cred
9033 * (the same thing anyway)
9034 */
9035 cred = ctx->vc_ucred;
9036
9037 VATTR_INIT(&va);
9038 vcp->vap = &va;
9039 VATTR_INIT(&dva);
9040 vcp->dvap = &dva;
9041
9042 vcp->flags = vcp->flags_valid = 0;
9043
9044 #if DIAGNOSTIC
9045 if ((ctx == NULL) || (vp == NULL) || (cred == NULL)) {
9046 panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx, vp, cred);
9047 }
9048 #endif
9049
9050 KAUTH_DEBUG("%p AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)",
9051 vp, vfs_context_proc(ctx)->p_comm,
9052 (action & KAUTH_VNODE_ACCESS) ? "access" : "auth",
9053 (action & KAUTH_VNODE_READ_DATA) ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
9054 (action & KAUTH_VNODE_WRITE_DATA) ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "",
9055 (action & KAUTH_VNODE_EXECUTE) ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "",
9056 (action & KAUTH_VNODE_DELETE) ? " DELETE" : "",
9057 (action & KAUTH_VNODE_APPEND_DATA) ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
9058 (action & KAUTH_VNODE_DELETE_CHILD) ? " DELETE_CHILD" : "",
9059 (action & KAUTH_VNODE_READ_ATTRIBUTES) ? " READ_ATTRIBUTES" : "",
9060 (action & KAUTH_VNODE_WRITE_ATTRIBUTES) ? " WRITE_ATTRIBUTES" : "",
9061 (action & KAUTH_VNODE_READ_EXTATTRIBUTES) ? " READ_EXTATTRIBUTES" : "",
9062 (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES) ? " WRITE_EXTATTRIBUTES" : "",
9063 (action & KAUTH_VNODE_READ_SECURITY) ? " READ_SECURITY" : "",
9064 (action & KAUTH_VNODE_WRITE_SECURITY) ? " WRITE_SECURITY" : "",
9065 (action & KAUTH_VNODE_CHANGE_OWNER) ? " CHANGE_OWNER" : "",
9066 (action & KAUTH_VNODE_NOIMMUTABLE) ? " (noimmutable)" : "",
9067 vnode_isdir(vp) ? "directory" : "file",
9068 vp->v_name ? vp->v_name : "<NULL>", action, vp, dvp);
9069
9070 /*
9071 * Extract the control bits from the action, everything else is
9072 * requested rights.
9073 */
9074 noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
9075 rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
9076
9077 if (rights & KAUTH_VNODE_DELETE) {
9078 #if DIAGNOSTIC
9079 if (dvp == NULL) {
9080 panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory");
9081 }
9082 #endif
9083 /*
9084 * check to see if we've already authorized the parent
9085 * directory for deletion of its children... if so, we
9086 * can skip a whole bunch of work... we will still have to
9087 * authorize that this specific child can be removed
9088 */
9089 if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE) {
9090 parent_authorized_for_delete_child = TRUE;
9091 }
9092 } else {
9093 vcp->dvp = NULLVP;
9094 vcp->dvap = NULL;
9095 }
9096
9097 /*
9098 * Check for read-only filesystems.
9099 */
9100 if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
9101 (vp->v_mount->mnt_flag & MNT_RDONLY) &&
9102 ((vp->v_type == VREG) || (vp->v_type == VDIR) ||
9103 (vp->v_type == VLNK) || (vp->v_type == VCPLX) ||
9104 (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) {
9105 result = EROFS;
9106 goto out;
9107 }
9108
9109 /*
9110 * Check for noexec filesystems.
9111 */
9112 if ((rights & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG) && (vp->v_mount->mnt_flag & MNT_NOEXEC)) {
9113 result = EACCES;
9114 goto out;
9115 }
9116
9117 /*
9118 * Handle cases related to filesystems with non-local enforcement.
9119 * This call can return 0, in which case we will fall through to perform a
9120 * check based on VNOP_GETATTR data. Otherwise it returns 1 and sets
9121 * an appropriate result, at which point we can return immediately.
9122 */
9123 if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx)) {
9124 goto out;
9125 }
9126
9127 /*
9128 * If the vnode is a namedstream (extended attribute) data vnode (eg.
9129 * a resource fork), *_DATA becomes *_EXTATTRIBUTES.
9130 */
9131 if (vnode_isnamedstream(vp)) {
9132 if (rights & KAUTH_VNODE_READ_DATA) {
9133 rights &= ~KAUTH_VNODE_READ_DATA;
9134 rights |= KAUTH_VNODE_READ_EXTATTRIBUTES;
9135 }
9136 if (rights & KAUTH_VNODE_WRITE_DATA) {
9137 rights &= ~KAUTH_VNODE_WRITE_DATA;
9138 rights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
9139 }
9140
9141 /*
9142 * Point 'vp' to the namedstream's parent for ACL checking
9143 */
9144 if ((vp->v_parent != NULL) &&
9145 (vget_internal(vp->v_parent, 0, VNODE_NODEAD | VNODE_DRAINO) == 0)) {
9146 parent_ref = TRUE;
9147 vcp->vp = vp = vp->v_parent;
9148 }
9149 }
9150
9151 if (vfs_context_issuser(ctx)) {
9152 /*
9153 * if we're not asking for execute permissions or modifications,
9154 * then we're done, this action is authorized.
9155 */
9156 if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) {
9157 goto success;
9158 }
9159
9160 is_suser = TRUE;
9161 }
9162
9163 /*
9164 * Get vnode attributes and extended security information for the vnode
9165 * and directory if required.
9166 *
9167 * If we're root we only want mode bits and flags for checking
9168 * execute and immutability.
9169 */
9170 VATTR_WANTED(&va, va_mode);
9171 VATTR_WANTED(&va, va_flags);
9172 if (!is_suser) {
9173 VATTR_WANTED(&va, va_uid);
9174 VATTR_WANTED(&va, va_gid);
9175 VATTR_WANTED(&va, va_acl);
9176 }
9177 if ((result = vnode_getattr(vp, &va, ctx)) != 0) {
9178 KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result);
9179 goto out;
9180 }
9181 VATTR_WANTED(&va, va_type);
9182 VATTR_RETURN(&va, va_type, vnode_vtype(vp));
9183
9184 if (vcp->dvp) {
9185 VATTR_WANTED(&dva, va_mode);
9186 VATTR_WANTED(&dva, va_flags);
9187 if (!is_suser) {
9188 VATTR_WANTED(&dva, va_uid);
9189 VATTR_WANTED(&dva, va_gid);
9190 VATTR_WANTED(&dva, va_acl);
9191 }
9192 if ((result = vnode_getattr(vcp->dvp, &dva, ctx)) != 0) {
9193 KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp, result);
9194 goto out;
9195 }
9196 VATTR_WANTED(&dva, va_type);
9197 VATTR_RETURN(&dva, va_type, vnode_vtype(vcp->dvp));
9198 }
9199
9200 result = vnode_attr_authorize_internal(vcp, vp->v_mount, rights, is_suser,
9201 &found_deny, noimmutable, parent_authorized_for_delete_child);
9202 out:
9203 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) {
9204 kauth_acl_free(va.va_acl);
9205 }
9206 if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL)) {
9207 kauth_acl_free(dva.va_acl);
9208 }
9209
9210 if (result) {
9211 if (parent_ref) {
9212 vnode_put(vp);
9213 }
9214 *errorp = result;
9215 KAUTH_DEBUG("%p DENIED - auth denied", vp);
9216 return KAUTH_RESULT_DENY;
9217 }
9218 if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) {
9219 /*
9220 * if we were successfully granted the right to search this directory
9221 * and there were NO ACL DENYs for search and the posix permissions also don't
9222 * deny execute, we can synthesize a global right that allows anyone to
9223 * traverse this directory during a pathname lookup without having to
9224 * match the credential associated with this cache of rights.
9225 *
9226 * Note that we can correctly cache KAUTH_VNODE_SEARCHBYANYONE
9227 * only if we actually check ACLs which we don't for root. As
9228 * a workaround, the lookup fast path checks for root.
9229 */
9230 if (!VATTR_IS_SUPPORTED(&va, va_mode) ||
9231 ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) ==
9232 (S_IXUSR | S_IXGRP | S_IXOTH))) {
9233 vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE);
9234 }
9235 }
9236 success:
9237 if (parent_ref) {
9238 vnode_put(vp);
9239 }
9240
9241 /*
9242 * Note that this implies that we will allow requests for no rights, as well as
9243 * for rights that we do not recognise. There should be none of these.
9244 */
9245 KAUTH_DEBUG("%p ALLOWED - auth granted", vp);
9246 return KAUTH_RESULT_ALLOW;
9247 }
9248
9249 int
vnode_attr_authorize_init(struct vnode_attr * vap,struct vnode_attr * dvap,kauth_action_t action,vfs_context_t ctx)9250 vnode_attr_authorize_init(struct vnode_attr *vap, struct vnode_attr *dvap,
9251 kauth_action_t action, vfs_context_t ctx)
9252 {
9253 VATTR_INIT(vap);
9254 VATTR_WANTED(vap, va_type);
9255 VATTR_WANTED(vap, va_mode);
9256 VATTR_WANTED(vap, va_flags);
9257 if (dvap) {
9258 VATTR_INIT(dvap);
9259 if (action & KAUTH_VNODE_DELETE) {
9260 VATTR_WANTED(dvap, va_type);
9261 VATTR_WANTED(dvap, va_mode);
9262 VATTR_WANTED(dvap, va_flags);
9263 }
9264 } else if (action & KAUTH_VNODE_DELETE) {
9265 return EINVAL;
9266 }
9267
9268 if (!vfs_context_issuser(ctx)) {
9269 VATTR_WANTED(vap, va_uid);
9270 VATTR_WANTED(vap, va_gid);
9271 VATTR_WANTED(vap, va_acl);
9272 if (dvap && (action & KAUTH_VNODE_DELETE)) {
9273 VATTR_WANTED(dvap, va_uid);
9274 VATTR_WANTED(dvap, va_gid);
9275 VATTR_WANTED(dvap, va_acl);
9276 }
9277 }
9278
9279 return 0;
9280 }
9281
9282 int
vnode_attr_authorize(struct vnode_attr * vap,struct vnode_attr * dvap,mount_t mp,kauth_action_t action,vfs_context_t ctx)9283 vnode_attr_authorize(struct vnode_attr *vap, struct vnode_attr *dvap, mount_t mp,
9284 kauth_action_t action, vfs_context_t ctx)
9285 {
9286 struct _vnode_authorize_context auth_context;
9287 vauth_ctx vcp;
9288 kauth_ace_rights_t rights;
9289 int noimmutable;
9290 boolean_t found_deny;
9291 boolean_t is_suser = FALSE;
9292 int result = 0;
9293
9294 vcp = &auth_context;
9295 vcp->ctx = ctx;
9296 vcp->vp = NULLVP;
9297 vcp->vap = vap;
9298 vcp->dvp = NULLVP;
9299 vcp->dvap = dvap;
9300 vcp->flags = vcp->flags_valid = 0;
9301
9302 noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
9303 rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
9304
9305 /*
9306 * Check for read-only filesystems.
9307 */
9308 if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
9309 mp && (mp->mnt_flag & MNT_RDONLY) &&
9310 ((vap->va_type == VREG) || (vap->va_type == VDIR) ||
9311 (vap->va_type == VLNK) || (rights & KAUTH_VNODE_DELETE) ||
9312 (rights & KAUTH_VNODE_DELETE_CHILD))) {
9313 result = EROFS;
9314 goto out;
9315 }
9316
9317 /*
9318 * Check for noexec filesystems.
9319 */
9320 if ((rights & KAUTH_VNODE_EXECUTE) &&
9321 (vap->va_type == VREG) && mp && (mp->mnt_flag & MNT_NOEXEC)) {
9322 result = EACCES;
9323 goto out;
9324 }
9325
9326 if (vfs_context_issuser(ctx)) {
9327 /*
9328 * if we're not asking for execute permissions or modifications,
9329 * then we're done, this action is authorized.
9330 */
9331 if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) {
9332 goto out;
9333 }
9334 is_suser = TRUE;
9335 } else {
9336 if (!VATTR_IS_SUPPORTED(vap, va_uid) ||
9337 !VATTR_IS_SUPPORTED(vap, va_gid) ||
9338 (mp && vfs_extendedsecurity(mp) && !VATTR_IS_SUPPORTED(vap, va_acl))) {
9339 panic("vnode attrs not complete for vnode_attr_authorize");
9340 }
9341 }
9342
9343 if (mp) {
9344 vnode_attr_handle_mnt_ignore_ownership(vap, mp, ctx);
9345 }
9346
9347 result = vnode_attr_authorize_internal(vcp, mp, rights, is_suser,
9348 &found_deny, noimmutable, FALSE);
9349
9350 if (result == EPERM) {
9351 result = EACCES;
9352 }
9353 out:
9354 return result;
9355 }
9356
9357
9358 int
vnode_authattr_new(vnode_t dvp,struct vnode_attr * vap,int noauth,vfs_context_t ctx)9359 vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx)
9360 {
9361 return vnode_authattr_new_internal(dvp, vap, noauth, NULL, ctx);
9362 }
9363
9364 /*
9365 * Check that the attribute information in vattr can be legally applied to
9366 * a new file by the context.
9367 */
9368 static int
vnode_authattr_new_internal(vnode_t dvp,struct vnode_attr * vap,int noauth,uint32_t * defaulted_fieldsp,vfs_context_t ctx)9369 vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx)
9370 {
9371 int error;
9372 int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode;
9373 uint32_t inherit_flags;
9374 kauth_cred_t cred;
9375 guid_t changer;
9376 mount_t dmp;
9377 struct vnode_attr dva;
9378
9379 error = 0;
9380
9381 if (defaulted_fieldsp) {
9382 *defaulted_fieldsp = 0;
9383 }
9384
9385 defaulted_owner = defaulted_group = defaulted_mode = 0;
9386
9387 inherit_flags = 0;
9388
9389 /*
9390 * Require that the filesystem support extended security to apply any.
9391 */
9392 if (!vfs_extendedsecurity(dvp->v_mount) &&
9393 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
9394 error = EINVAL;
9395 goto out;
9396 }
9397
9398 /*
9399 * Default some fields.
9400 */
9401 dmp = dvp->v_mount;
9402
9403 /*
9404 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that
9405 * owner takes ownership of all new files.
9406 */
9407 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsowner != KAUTH_UID_NONE)) {
9408 VATTR_SET(vap, va_uid, dmp->mnt_fsowner);
9409 defaulted_owner = 1;
9410 } else {
9411 if (!VATTR_IS_ACTIVE(vap, va_uid)) {
9412 /* default owner is current user */
9413 VATTR_SET(vap, va_uid, kauth_cred_getuid(vfs_context_ucred(ctx)));
9414 defaulted_owner = 1;
9415 }
9416 }
9417
9418 /*
9419 * We need the dvp's va_flags and *may* need the gid of the directory,
9420 * we ask for both here.
9421 */
9422 VATTR_INIT(&dva);
9423 VATTR_WANTED(&dva, va_gid);
9424 VATTR_WANTED(&dva, va_flags);
9425 if ((error = vnode_getattr(dvp, &dva, ctx)) != 0) {
9426 goto out;
9427 }
9428
9429 /*
9430 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that
9431 * group takes ownership of all new files.
9432 */
9433 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsgroup != KAUTH_GID_NONE)) {
9434 VATTR_SET(vap, va_gid, dmp->mnt_fsgroup);
9435 defaulted_group = 1;
9436 } else {
9437 if (!VATTR_IS_ACTIVE(vap, va_gid)) {
9438 /* default group comes from parent object, fallback to current user */
9439 if (VATTR_IS_SUPPORTED(&dva, va_gid)) {
9440 VATTR_SET(vap, va_gid, dva.va_gid);
9441 } else {
9442 VATTR_SET(vap, va_gid, kauth_cred_getgid(vfs_context_ucred(ctx)));
9443 }
9444 defaulted_group = 1;
9445 }
9446 }
9447
9448 if (!VATTR_IS_ACTIVE(vap, va_flags)) {
9449 VATTR_SET(vap, va_flags, 0);
9450 }
9451
9452 /* Determine if SF_RESTRICTED should be inherited from the parent
9453 * directory. */
9454 if (VATTR_IS_SUPPORTED(&dva, va_flags)) {
9455 inherit_flags = dva.va_flags & (UF_DATAVAULT | SF_RESTRICTED);
9456 }
9457
9458 /* default mode is everything, masked with current umask */
9459 if (!VATTR_IS_ACTIVE(vap, va_mode)) {
9460 VATTR_SET(vap, va_mode, ACCESSPERMS & ~vfs_context_proc(ctx)->p_fd.fd_cmask);
9461 KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o",
9462 vap->va_mode, vfs_context_proc(ctx)->p_fd.fd_cmask);
9463 defaulted_mode = 1;
9464 }
9465 /* set timestamps to now */
9466 if (!VATTR_IS_ACTIVE(vap, va_create_time)) {
9467 nanotime(&vap->va_create_time);
9468 VATTR_SET_ACTIVE(vap, va_create_time);
9469 }
9470
9471 /*
9472 * Check for attempts to set nonsensical fields.
9473 */
9474 if (vap->va_active & ~VNODE_ATTR_NEWOBJ) {
9475 error = EINVAL;
9476 KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx",
9477 vap->va_active & ~VNODE_ATTR_NEWOBJ);
9478 goto out;
9479 }
9480
9481 /*
9482 * Quickly check for the applicability of any enforcement here.
9483 * Tests below maintain the integrity of the local security model.
9484 */
9485 if (vfs_authopaque(dvp->v_mount)) {
9486 goto out;
9487 }
9488
9489 /*
9490 * We need to know if the caller is the superuser, or if the work is
9491 * otherwise already authorised.
9492 */
9493 cred = vfs_context_ucred(ctx);
9494 if (noauth) {
9495 /* doing work for the kernel */
9496 has_priv_suser = 1;
9497 } else {
9498 has_priv_suser = vfs_context_issuser(ctx);
9499 }
9500
9501
9502 if (VATTR_IS_ACTIVE(vap, va_flags)) {
9503 vap->va_flags &= ~SF_SYNTHETIC;
9504 if (has_priv_suser) {
9505 if ((vap->va_flags & (UF_SETTABLE | SF_SETTABLE)) != vap->va_flags) {
9506 error = EPERM;
9507 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
9508 goto out;
9509 }
9510 } else {
9511 if ((vap->va_flags & UF_SETTABLE) != vap->va_flags) {
9512 error = EPERM;
9513 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
9514 goto out;
9515 }
9516 }
9517 }
9518
9519 /* if not superuser, validate legality of new-item attributes */
9520 if (!has_priv_suser) {
9521 if (!defaulted_mode && VATTR_IS_ACTIVE(vap, va_mode)) {
9522 /* setgid? */
9523 if (vap->va_mode & S_ISGID) {
9524 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
9525 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
9526 goto out;
9527 }
9528 if (!ismember) {
9529 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", vap->va_gid);
9530 error = EPERM;
9531 goto out;
9532 }
9533 }
9534
9535 /* setuid? */
9536 if ((vap->va_mode & S_ISUID) && (vap->va_uid != kauth_cred_getuid(cred))) {
9537 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
9538 error = EPERM;
9539 goto out;
9540 }
9541 }
9542 if (!defaulted_owner && (vap->va_uid != kauth_cred_getuid(cred))) {
9543 KAUTH_DEBUG(" DENIED - cannot create new item owned by %d", vap->va_uid);
9544 error = EPERM;
9545 goto out;
9546 }
9547 if (!defaulted_group) {
9548 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
9549 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
9550 goto out;
9551 }
9552 if (!ismember) {
9553 KAUTH_DEBUG(" DENIED - cannot create new item with group %d - not a member", vap->va_gid);
9554 error = EPERM;
9555 goto out;
9556 }
9557 }
9558
9559 /* initialising owner/group UUID */
9560 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
9561 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
9562 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
9563 /* XXX ENOENT here - no GUID - should perhaps become EPERM */
9564 goto out;
9565 }
9566 if (!kauth_guid_equal(&vap->va_uuuid, &changer)) {
9567 KAUTH_DEBUG(" ERROR - cannot create item with supplied owner UUID - not us");
9568 error = EPERM;
9569 goto out;
9570 }
9571 }
9572 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
9573 if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
9574 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
9575 goto out;
9576 }
9577 if (!ismember) {
9578 KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member");
9579 error = EPERM;
9580 goto out;
9581 }
9582 }
9583 }
9584 out:
9585 if (inherit_flags) {
9586 /* Apply SF_RESTRICTED to the file if its parent directory was
9587 * restricted. This is done at the end so that root is not
9588 * required if this flag is only set due to inheritance. */
9589 VATTR_SET(vap, va_flags, (vap->va_flags | inherit_flags));
9590 }
9591 if (defaulted_fieldsp) {
9592 if (defaulted_mode) {
9593 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_MODE;
9594 }
9595 if (defaulted_group) {
9596 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_GID;
9597 }
9598 if (defaulted_owner) {
9599 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_UID;
9600 }
9601 }
9602 return error;
9603 }
9604
9605 /*
9606 * Check that the attribute information in vap can be legally written by the
9607 * context.
9608 *
9609 * Call this when you're not sure about the vnode_attr; either its contents
9610 * have come from an unknown source, or when they are variable.
9611 *
9612 * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that
9613 * must be authorized to be permitted to write the vattr.
9614 */
9615 int
vnode_authattr(vnode_t vp,struct vnode_attr * vap,kauth_action_t * actionp,vfs_context_t ctx)9616 vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx)
9617 {
9618 struct vnode_attr ova;
9619 kauth_action_t required_action;
9620 int error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid;
9621 guid_t changer;
9622 gid_t group;
9623 uid_t owner;
9624 mode_t newmode;
9625 kauth_cred_t cred;
9626 uint32_t fdelta;
9627
9628 VATTR_INIT(&ova);
9629 required_action = 0;
9630 error = 0;
9631
9632 /*
9633 * Quickly check for enforcement applicability.
9634 */
9635 if (vfs_authopaque(vp->v_mount)) {
9636 goto out;
9637 }
9638
9639 /*
9640 * Check for attempts to set nonsensical fields.
9641 */
9642 if (vap->va_active & VNODE_ATTR_RDONLY) {
9643 KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)");
9644 error = EINVAL;
9645 goto out;
9646 }
9647
9648 /*
9649 * We need to know if the caller is the superuser.
9650 */
9651 cred = vfs_context_ucred(ctx);
9652 has_priv_suser = kauth_cred_issuser(cred);
9653
9654 /*
9655 * If any of the following are changing, we need information from the old file:
9656 * va_uid
9657 * va_gid
9658 * va_mode
9659 * va_uuuid
9660 * va_guuid
9661 */
9662 if (VATTR_IS_ACTIVE(vap, va_uid) ||
9663 VATTR_IS_ACTIVE(vap, va_gid) ||
9664 VATTR_IS_ACTIVE(vap, va_mode) ||
9665 VATTR_IS_ACTIVE(vap, va_uuuid) ||
9666 VATTR_IS_ACTIVE(vap, va_guuid)) {
9667 VATTR_WANTED(&ova, va_mode);
9668 VATTR_WANTED(&ova, va_uid);
9669 VATTR_WANTED(&ova, va_gid);
9670 VATTR_WANTED(&ova, va_uuuid);
9671 VATTR_WANTED(&ova, va_guuid);
9672 KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes");
9673 }
9674
9675 /*
9676 * If timestamps are being changed, we need to know who the file is owned
9677 * by.
9678 */
9679 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
9680 VATTR_IS_ACTIVE(vap, va_change_time) ||
9681 VATTR_IS_ACTIVE(vap, va_modify_time) ||
9682 VATTR_IS_ACTIVE(vap, va_access_time) ||
9683 VATTR_IS_ACTIVE(vap, va_backup_time) ||
9684 VATTR_IS_ACTIVE(vap, va_addedtime)) {
9685 VATTR_WANTED(&ova, va_uid);
9686 #if 0 /* enable this when we support UUIDs as official owners */
9687 VATTR_WANTED(&ova, va_uuuid);
9688 #endif
9689 KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID");
9690 }
9691
9692 /*
9693 * If flags are being changed, we need the old flags.
9694 */
9695 if (VATTR_IS_ACTIVE(vap, va_flags)) {
9696 KAUTH_DEBUG("ATTR - flags changing, fetching old flags");
9697 VATTR_WANTED(&ova, va_flags);
9698 }
9699
9700 /*
9701 * If ACLs are being changed, we need the old ACLs.
9702 */
9703 if (VATTR_IS_ACTIVE(vap, va_acl)) {
9704 KAUTH_DEBUG("ATTR - acl changing, fetching old flags");
9705 VATTR_WANTED(&ova, va_acl);
9706 }
9707
9708 /*
9709 * If the size is being set, make sure it's not a directory.
9710 */
9711 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
9712 /* size is only meaningful on regular files, don't permit otherwise */
9713 if (!vnode_isreg(vp)) {
9714 KAUTH_DEBUG("ATTR - ERROR: size change requested on non-file");
9715 error = vnode_isdir(vp) ? EISDIR : EINVAL;
9716 goto out;
9717 }
9718 }
9719
9720 /*
9721 * Get old data.
9722 */
9723 KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova.va_active);
9724 if ((error = vnode_getattr(vp, &ova, ctx)) != 0) {
9725 KAUTH_DEBUG(" ERROR - got %d trying to get attributes", error);
9726 goto out;
9727 }
9728
9729 /*
9730 * Size changes require write access to the file data.
9731 */
9732 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
9733 /* if we can't get the size, or it's different, we need write access */
9734 KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA");
9735 required_action |= KAUTH_VNODE_WRITE_DATA;
9736 }
9737
9738 /*
9739 * Changing timestamps?
9740 *
9741 * Note that we are only called to authorize user-requested time changes;
9742 * side-effect time changes are not authorized. Authorisation is only
9743 * required for existing files.
9744 *
9745 * Non-owners are not permitted to change the time on an existing
9746 * file to anything other than the current time.
9747 */
9748 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
9749 VATTR_IS_ACTIVE(vap, va_change_time) ||
9750 VATTR_IS_ACTIVE(vap, va_modify_time) ||
9751 VATTR_IS_ACTIVE(vap, va_access_time) ||
9752 VATTR_IS_ACTIVE(vap, va_backup_time) ||
9753 VATTR_IS_ACTIVE(vap, va_addedtime)) {
9754 /*
9755 * The owner and root may set any timestamps they like,
9756 * provided that the file is not immutable. The owner still needs
9757 * WRITE_ATTRIBUTES (implied by ownership but still deniable).
9758 */
9759 if (has_priv_suser || vauth_node_owner(&ova, cred)) {
9760 KAUTH_DEBUG("ATTR - root or owner changing timestamps");
9761 required_action |= KAUTH_VNODE_CHECKIMMUTABLE | KAUTH_VNODE_WRITE_ATTRIBUTES;
9762 } else {
9763 /* just setting the current time? */
9764 if (vap->va_vaflags & VA_UTIMES_NULL) {
9765 KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES");
9766 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
9767 } else {
9768 KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted");
9769 error = EACCES;
9770 goto out;
9771 }
9772 }
9773 }
9774
9775 /*
9776 * Changing file mode?
9777 */
9778 if (VATTR_IS_ACTIVE(vap, va_mode) && VATTR_IS_SUPPORTED(&ova, va_mode) && (ova.va_mode != vap->va_mode)) {
9779 KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova.va_mode, vap->va_mode);
9780
9781 /*
9782 * Mode changes always have the same basic auth requirements.
9783 */
9784 if (has_priv_suser) {
9785 KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check");
9786 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
9787 } else {
9788 /* need WRITE_SECURITY */
9789 KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY");
9790 required_action |= KAUTH_VNODE_WRITE_SECURITY;
9791 }
9792
9793 /*
9794 * Can't set the setgid bit if you're not in the group and not root. Have to have
9795 * existing group information in the case we're not setting it right now.
9796 */
9797 if (vap->va_mode & S_ISGID) {
9798 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
9799 if (!has_priv_suser) {
9800 if (VATTR_IS_ACTIVE(vap, va_gid)) {
9801 group = vap->va_gid;
9802 } else if (VATTR_IS_SUPPORTED(&ova, va_gid)) {
9803 group = ova.va_gid;
9804 } else {
9805 KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available");
9806 error = EINVAL;
9807 goto out;
9808 }
9809 /*
9810 * This might be too restrictive; WRITE_SECURITY might be implied by
9811 * membership in this case, rather than being an additional requirement.
9812 */
9813 if ((error = kauth_cred_ismember_gid(cred, group, &ismember)) != 0) {
9814 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
9815 goto out;
9816 }
9817 if (!ismember) {
9818 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", group);
9819 error = EPERM;
9820 goto out;
9821 }
9822 }
9823 }
9824
9825 /*
9826 * Can't set the setuid bit unless you're root or the file's owner.
9827 */
9828 if (vap->va_mode & S_ISUID) {
9829 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
9830 if (!has_priv_suser) {
9831 if (VATTR_IS_ACTIVE(vap, va_uid)) {
9832 owner = vap->va_uid;
9833 } else if (VATTR_IS_SUPPORTED(&ova, va_uid)) {
9834 owner = ova.va_uid;
9835 } else {
9836 KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available");
9837 error = EINVAL;
9838 goto out;
9839 }
9840 if (owner != kauth_cred_getuid(cred)) {
9841 /*
9842 * We could allow this if WRITE_SECURITY is permitted, perhaps.
9843 */
9844 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
9845 error = EPERM;
9846 goto out;
9847 }
9848 }
9849 }
9850 }
9851
9852 /*
9853 * Validate/mask flags changes. This checks that only the flags in
9854 * the UF_SETTABLE mask are being set, and preserves the flags in
9855 * the SF_SETTABLE case.
9856 *
9857 * Since flags changes may be made in conjunction with other changes,
9858 * we will ask the auth code to ignore immutability in the case that
9859 * the SF_* flags are not set and we are only manipulating the file flags.
9860 *
9861 */
9862 if (VATTR_IS_ACTIVE(vap, va_flags)) {
9863 /* compute changing flags bits */
9864 vap->va_flags &= ~SF_SYNTHETIC;
9865 ova.va_flags &= ~SF_SYNTHETIC;
9866 if (VATTR_IS_SUPPORTED(&ova, va_flags)) {
9867 fdelta = vap->va_flags ^ ova.va_flags;
9868 } else {
9869 fdelta = vap->va_flags;
9870 }
9871
9872 if (fdelta != 0) {
9873 KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY");
9874 required_action |= KAUTH_VNODE_WRITE_SECURITY;
9875
9876 /* check that changing bits are legal */
9877 if (has_priv_suser) {
9878 /*
9879 * The immutability check will prevent us from clearing the SF_*
9880 * flags unless the system securelevel permits it, so just check
9881 * for legal flags here.
9882 */
9883 if (fdelta & ~(UF_SETTABLE | SF_SETTABLE)) {
9884 error = EPERM;
9885 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
9886 goto out;
9887 }
9888 } else {
9889 if (fdelta & ~UF_SETTABLE) {
9890 error = EPERM;
9891 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
9892 goto out;
9893 }
9894 }
9895 /*
9896 * If the caller has the ability to manipulate file flags,
9897 * security is not reduced by ignoring them for this operation.
9898 *
9899 * A more complete test here would consider the 'after' states of the flags
9900 * to determine whether it would permit the operation, but this becomes
9901 * very complex.
9902 *
9903 * Ignoring immutability is conditional on securelevel; this does not bypass
9904 * the SF_* flags if securelevel > 0.
9905 */
9906 required_action |= KAUTH_VNODE_NOIMMUTABLE;
9907 }
9908 }
9909
9910 /*
9911 * Validate ownership information.
9912 */
9913 chowner = 0;
9914 chgroup = 0;
9915 clear_suid = 0;
9916 clear_sgid = 0;
9917
9918 /*
9919 * uid changing
9920 * Note that if the filesystem didn't give us a UID, we expect that it doesn't
9921 * support them in general, and will ignore it if/when we try to set it.
9922 * We might want to clear the uid out of vap completely here.
9923 */
9924 if (VATTR_IS_ACTIVE(vap, va_uid)) {
9925 if (VATTR_IS_SUPPORTED(&ova, va_uid) && (vap->va_uid != ova.va_uid)) {
9926 if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) {
9927 KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party");
9928 error = EPERM;
9929 goto out;
9930 }
9931 chowner = 1;
9932 }
9933 clear_suid = 1;
9934 }
9935
9936 /*
9937 * gid changing
9938 * Note that if the filesystem didn't give us a GID, we expect that it doesn't
9939 * support them in general, and will ignore it if/when we try to set it.
9940 * We might want to clear the gid out of vap completely here.
9941 */
9942 if (VATTR_IS_ACTIVE(vap, va_gid)) {
9943 if (VATTR_IS_SUPPORTED(&ova, va_gid) && (vap->va_gid != ova.va_gid)) {
9944 if (!has_priv_suser) {
9945 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
9946 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
9947 goto out;
9948 }
9949 if (!ismember) {
9950 KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group",
9951 ova.va_gid, vap->va_gid);
9952 error = EPERM;
9953 goto out;
9954 }
9955 }
9956 chgroup = 1;
9957 }
9958 clear_sgid = 1;
9959 }
9960
9961 /*
9962 * Owner UUID being set or changed.
9963 */
9964 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
9965 /* if the owner UUID is not actually changing ... */
9966 if (VATTR_IS_SUPPORTED(&ova, va_uuuid)) {
9967 if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid)) {
9968 goto no_uuuid_change;
9969 }
9970
9971 /*
9972 * If the current owner UUID is a null GUID, check
9973 * it against the UUID corresponding to the owner UID.
9974 */
9975 if (kauth_guid_equal(&ova.va_uuuid, &kauth_null_guid) &&
9976 VATTR_IS_SUPPORTED(&ova, va_uid)) {
9977 guid_t uid_guid;
9978
9979 if (kauth_cred_uid2guid(ova.va_uid, &uid_guid) == 0 &&
9980 kauth_guid_equal(&vap->va_uuuid, &uid_guid)) {
9981 goto no_uuuid_change;
9982 }
9983 }
9984 }
9985
9986 /*
9987 * The owner UUID cannot be set by a non-superuser to anything other than
9988 * their own or a null GUID (to "unset" the owner UUID).
9989 * Note that file systems must be prepared to handle the
9990 * null UUID case in a manner appropriate for that file
9991 * system.
9992 */
9993 if (!has_priv_suser) {
9994 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
9995 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
9996 /* XXX ENOENT here - no UUID - should perhaps become EPERM */
9997 goto out;
9998 }
9999 if (!kauth_guid_equal(&vap->va_uuuid, &changer) &&
10000 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
10001 KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us / null");
10002 error = EPERM;
10003 goto out;
10004 }
10005 }
10006 chowner = 1;
10007 clear_suid = 1;
10008 }
10009 no_uuuid_change:
10010 /*
10011 * Group UUID being set or changed.
10012 */
10013 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
10014 /* if the group UUID is not actually changing ... */
10015 if (VATTR_IS_SUPPORTED(&ova, va_guuid)) {
10016 if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid)) {
10017 goto no_guuid_change;
10018 }
10019
10020 /*
10021 * If the current group UUID is a null UUID, check
10022 * it against the UUID corresponding to the group GID.
10023 */
10024 if (kauth_guid_equal(&ova.va_guuid, &kauth_null_guid) &&
10025 VATTR_IS_SUPPORTED(&ova, va_gid)) {
10026 guid_t gid_guid;
10027
10028 if (kauth_cred_gid2guid(ova.va_gid, &gid_guid) == 0 &&
10029 kauth_guid_equal(&vap->va_guuid, &gid_guid)) {
10030 goto no_guuid_change;
10031 }
10032 }
10033 }
10034
10035 /*
10036 * The group UUID cannot be set by a non-superuser to anything other than
10037 * one of which they are a member or a null GUID (to "unset"
10038 * the group UUID).
10039 * Note that file systems must be prepared to handle the
10040 * null UUID case in a manner appropriate for that file
10041 * system.
10042 */
10043 if (!has_priv_suser) {
10044 if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
10045 ismember = 1;
10046 } else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
10047 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
10048 goto out;
10049 }
10050 if (!ismember) {
10051 KAUTH_DEBUG(" ERROR - cannot set supplied group UUID - not a member / null");
10052 error = EPERM;
10053 goto out;
10054 }
10055 }
10056 chgroup = 1;
10057 }
10058 no_guuid_change:
10059
10060 /*
10061 * Compute authorisation for group/ownership changes.
10062 */
10063 if (chowner || chgroup || clear_suid || clear_sgid) {
10064 if (has_priv_suser) {
10065 KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check");
10066 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
10067 } else {
10068 if (chowner) {
10069 KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP");
10070 required_action |= KAUTH_VNODE_TAKE_OWNERSHIP;
10071 }
10072 if (chgroup && !chowner) {
10073 KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY");
10074 required_action |= KAUTH_VNODE_WRITE_SECURITY;
10075 }
10076 }
10077
10078 /*
10079 * clear set-uid and set-gid bits. POSIX only requires this for
10080 * non-privileged processes but we do it even for root.
10081 */
10082 if (VATTR_IS_ACTIVE(vap, va_mode)) {
10083 newmode = vap->va_mode;
10084 } else if (VATTR_IS_SUPPORTED(&ova, va_mode)) {
10085 newmode = ova.va_mode;
10086 } else {
10087 KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits");
10088 newmode = 0;
10089 }
10090
10091 /* chown always clears setuid/gid bits. An exception is made for
10092 * setattrlist which can set both at the same time: <uid, gid, mode> on a file:
10093 * setattrlist is allowed to set the new mode on the file and change (chown)
10094 * uid/gid.
10095 */
10096 if (newmode & (S_ISUID | S_ISGID)) {
10097 if (!VATTR_IS_ACTIVE(vap, va_mode)) {
10098 KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o",
10099 newmode, newmode & ~(S_ISUID | S_ISGID));
10100 newmode &= ~(S_ISUID | S_ISGID);
10101 }
10102 VATTR_SET(vap, va_mode, newmode);
10103 }
10104 }
10105
10106 /*
10107 * Authorise changes in the ACL.
10108 */
10109 if (VATTR_IS_ACTIVE(vap, va_acl)) {
10110 /* no existing ACL */
10111 if (!VATTR_IS_ACTIVE(&ova, va_acl) || (ova.va_acl == NULL)) {
10112 /* adding an ACL */
10113 if (vap->va_acl != NULL) {
10114 required_action |= KAUTH_VNODE_WRITE_SECURITY;
10115 KAUTH_DEBUG("CHMOD - adding ACL");
10116 }
10117
10118 /* removing an existing ACL */
10119 } else if (vap->va_acl == NULL) {
10120 required_action |= KAUTH_VNODE_WRITE_SECURITY;
10121 KAUTH_DEBUG("CHMOD - removing ACL");
10122
10123 /* updating an existing ACL */
10124 } else {
10125 if (vap->va_acl->acl_entrycount != ova.va_acl->acl_entrycount) {
10126 /* entry count changed, must be different */
10127 required_action |= KAUTH_VNODE_WRITE_SECURITY;
10128 KAUTH_DEBUG("CHMOD - adding/removing ACL entries");
10129 } else if (vap->va_acl->acl_entrycount > 0) {
10130 /* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */
10131 if (memcmp(&vap->va_acl->acl_ace[0], &ova.va_acl->acl_ace[0],
10132 sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) {
10133 required_action |= KAUTH_VNODE_WRITE_SECURITY;
10134 KAUTH_DEBUG("CHMOD - changing ACL entries");
10135 }
10136 }
10137 }
10138 }
10139
10140 /*
10141 * Other attributes that require authorisation.
10142 */
10143 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
10144 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
10145 }
10146
10147 out:
10148 if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL)) {
10149 kauth_acl_free(ova.va_acl);
10150 }
10151 if (error == 0) {
10152 *actionp = required_action;
10153 }
10154 return error;
10155 }
10156
10157 static int
setlocklocal_callback(struct vnode * vp,__unused void * cargs)10158 setlocklocal_callback(struct vnode *vp, __unused void *cargs)
10159 {
10160 vnode_lock_spin(vp);
10161 vp->v_flag |= VLOCKLOCAL;
10162 vnode_unlock(vp);
10163
10164 return VNODE_RETURNED;
10165 }
10166
10167 void
vfs_setlocklocal(mount_t mp)10168 vfs_setlocklocal(mount_t mp)
10169 {
10170 mount_lock_spin(mp);
10171 mp->mnt_kern_flag |= MNTK_LOCK_LOCAL;
10172 mount_unlock(mp);
10173
10174 /*
10175 * The number of active vnodes is expected to be
10176 * very small when vfs_setlocklocal is invoked.
10177 */
10178 vnode_iterate(mp, 0, setlocklocal_callback, NULL);
10179 }
10180
10181 void
vfs_setcompoundopen(mount_t mp)10182 vfs_setcompoundopen(mount_t mp)
10183 {
10184 mount_lock_spin(mp);
10185 mp->mnt_compound_ops |= COMPOUND_VNOP_OPEN;
10186 mount_unlock(mp);
10187 }
10188
10189 void
vnode_setswapmount(vnode_t vp)10190 vnode_setswapmount(vnode_t vp)
10191 {
10192 mount_lock(vp->v_mount);
10193 vp->v_mount->mnt_kern_flag |= MNTK_SWAP_MOUNT;
10194 mount_unlock(vp->v_mount);
10195 }
10196
10197
10198 int64_t
vnode_getswappin_avail(vnode_t vp)10199 vnode_getswappin_avail(vnode_t vp)
10200 {
10201 int64_t max_swappin_avail = 0;
10202
10203 mount_lock(vp->v_mount);
10204 if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_SWAPPIN_SUPPORTED) {
10205 max_swappin_avail = vp->v_mount->mnt_max_swappin_available;
10206 }
10207 mount_unlock(vp->v_mount);
10208
10209 return max_swappin_avail;
10210 }
10211
10212
10213 void
vn_setunionwait(vnode_t vp)10214 vn_setunionwait(vnode_t vp)
10215 {
10216 vnode_lock_spin(vp);
10217 vp->v_flag |= VISUNION;
10218 vnode_unlock(vp);
10219 }
10220
10221
10222 void
vn_checkunionwait(vnode_t vp)10223 vn_checkunionwait(vnode_t vp)
10224 {
10225 vnode_lock_spin(vp);
10226 while ((vp->v_flag & VISUNION) == VISUNION) {
10227 msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0);
10228 }
10229 vnode_unlock(vp);
10230 }
10231
10232 void
vn_clearunionwait(vnode_t vp,int locked)10233 vn_clearunionwait(vnode_t vp, int locked)
10234 {
10235 if (!locked) {
10236 vnode_lock_spin(vp);
10237 }
10238 if ((vp->v_flag & VISUNION) == VISUNION) {
10239 vp->v_flag &= ~VISUNION;
10240 wakeup((caddr_t)&vp->v_flag);
10241 }
10242 if (!locked) {
10243 vnode_unlock(vp);
10244 }
10245 }
10246
10247 int
vnode_materialize_dataless_file(vnode_t vp,uint64_t op_type)10248 vnode_materialize_dataless_file(vnode_t vp, uint64_t op_type)
10249 {
10250 int error;
10251
10252 /* Swap files are special; ignore them */
10253 if (vnode_isswap(vp)) {
10254 return 0;
10255 }
10256
10257 error = resolve_nspace_item(vp,
10258 op_type | NAMESPACE_HANDLER_NSPACE_EVENT);
10259
10260 /*
10261 * The file resolver owns the logic about what error to return
10262 * to the caller. We only need to handle a couple of special
10263 * cases here:
10264 */
10265 if (error == EJUSTRETURN) {
10266 /*
10267 * The requesting process is allowed to interact with
10268 * dataless objects. Make a couple of sanity-checks
10269 * here to ensure the action makes sense.
10270 */
10271 switch (op_type) {
10272 case NAMESPACE_HANDLER_WRITE_OP:
10273 case NAMESPACE_HANDLER_TRUNCATE_OP:
10274 case NAMESPACE_HANDLER_RENAME_OP:
10275 /*
10276 * This handles the case of the resolver itself
10277 * writing data to the file (or throwing it
10278 * away).
10279 */
10280 error = 0;
10281 break;
10282 case NAMESPACE_HANDLER_READ_OP:
10283 /*
10284 * This handles the case of the resolver needing
10285 * to look up inside of a dataless directory while
10286 * it's in the process of materializing it (for
10287 * example, creating files or directories).
10288 */
10289 error = (vnode_vtype(vp) == VDIR) ? 0 : EBADF;
10290 break;
10291 default:
10292 error = EBADF;
10293 break;
10294 }
10295 }
10296
10297 return error;
10298 }
10299
10300 /*
10301 * Removes orphaned apple double files during a rmdir
10302 * Works by:
10303 * 1. vnode_suspend().
10304 * 2. Call VNOP_READDIR() till the end of directory is reached.
10305 * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY.
10306 * 4. Continue (2) and (3) till end of directory is reached.
10307 * 5. If all the entries in the directory were files with "._" name, delete all the files.
10308 * 6. vnode_resume()
10309 * 7. If deletion of all files succeeded, call VNOP_RMDIR() again.
10310 */
10311
10312 errno_t
rmdir_remove_orphaned_appleDouble(vnode_t vp,vfs_context_t ctx,int * restart_flag)10313 rmdir_remove_orphaned_appleDouble(vnode_t vp, vfs_context_t ctx, int * restart_flag)
10314 {
10315 #define UIO_BUFF_SIZE 2048
10316 uio_t auio = NULL;
10317 int eofflag, siz = UIO_BUFF_SIZE, alloc_size = 0, nentries = 0;
10318 int open_flag = 0, full_erase_flag = 0;
10319 uio_stackbuf_t uio_buf[UIO_SIZEOF(1)];
10320 char *rbuf = NULL;
10321 void *dir_pos;
10322 void *dir_end;
10323 struct dirent *dp;
10324 errno_t error;
10325
10326 error = vnode_suspend(vp);
10327
10328 /*
10329 * restart_flag is set so that the calling rmdir sleeps and resets
10330 */
10331 if (error == EBUSY) {
10332 *restart_flag = 1;
10333 }
10334 if (error != 0) {
10335 return error;
10336 }
10337
10338 /*
10339 * Prevent dataless fault materialization while we have
10340 * a suspended vnode.
10341 */
10342 uthread_t ut = current_uthread();
10343 bool saved_nodatalessfaults =
10344 (ut->uu_flag & UT_NSPACE_NODATALESSFAULTS) ? true : false;
10345 ut->uu_flag |= UT_NSPACE_NODATALESSFAULTS;
10346
10347 /*
10348 * set up UIO
10349 */
10350 rbuf = kalloc_data(siz, Z_WAITOK);
10351 alloc_size = siz;
10352 if (rbuf) {
10353 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
10354 &uio_buf[0], sizeof(uio_buf));
10355 }
10356 if (!rbuf || !auio) {
10357 error = ENOMEM;
10358 goto outsc;
10359 }
10360
10361 uio_setoffset(auio, 0);
10362
10363 eofflag = 0;
10364
10365 if ((error = VNOP_OPEN(vp, FREAD, ctx))) {
10366 goto outsc;
10367 } else {
10368 open_flag = 1;
10369 }
10370
10371 /*
10372 * First pass checks if all files are appleDouble files.
10373 */
10374
10375 do {
10376 siz = UIO_BUFF_SIZE;
10377 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
10378 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
10379
10380 if ((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx))) {
10381 goto outsc;
10382 }
10383
10384 if (uio_resid(auio) != 0) {
10385 siz -= uio_resid(auio);
10386 }
10387
10388 /*
10389 * Iterate through directory
10390 */
10391 dir_pos = (void*) rbuf;
10392 dir_end = (void*) (rbuf + siz);
10393 dp = (struct dirent*) (dir_pos);
10394
10395 if (dir_pos == dir_end) {
10396 eofflag = 1;
10397 }
10398
10399 while (dir_pos < dir_end) {
10400 /*
10401 * Check for . and .. as well as directories
10402 */
10403 if (dp->d_ino != 0 &&
10404 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
10405 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) {
10406 /*
10407 * Check for irregular files and ._ files
10408 * If there is a ._._ file abort the op
10409 */
10410 if (dp->d_namlen < 2 ||
10411 strncmp(dp->d_name, "._", 2) ||
10412 (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._", 2))) {
10413 error = ENOTEMPTY;
10414 goto outsc;
10415 }
10416 }
10417 dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen);
10418 dp = (struct dirent*)dir_pos;
10419 }
10420
10421 /*
10422 * workaround for HFS/NFS setting eofflag before end of file
10423 */
10424 if (vp->v_tag == VT_HFS && nentries > 2) {
10425 eofflag = 0;
10426 }
10427
10428 if (vp->v_tag == VT_NFS) {
10429 if (eofflag && !full_erase_flag) {
10430 full_erase_flag = 1;
10431 eofflag = 0;
10432 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
10433 } else if (!eofflag && full_erase_flag) {
10434 full_erase_flag = 0;
10435 }
10436 }
10437 } while (!eofflag);
10438 /*
10439 * If we've made it here all the files in the dir are ._ files.
10440 * We can delete the files even though the node is suspended
10441 * because we are the owner of the file.
10442 */
10443
10444 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
10445 eofflag = 0;
10446 full_erase_flag = 0;
10447
10448 do {
10449 siz = UIO_BUFF_SIZE;
10450 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
10451 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
10452
10453 error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx);
10454
10455 if (error != 0) {
10456 goto outsc;
10457 }
10458
10459 if (uio_resid(auio) != 0) {
10460 siz -= uio_resid(auio);
10461 }
10462
10463 /*
10464 * Iterate through directory
10465 */
10466 dir_pos = (void*) rbuf;
10467 dir_end = (void*) (rbuf + siz);
10468 dp = (struct dirent*) dir_pos;
10469
10470 if (dir_pos == dir_end) {
10471 eofflag = 1;
10472 }
10473
10474 while (dir_pos < dir_end) {
10475 /*
10476 * Check for . and .. as well as directories
10477 */
10478 if (dp->d_ino != 0 &&
10479 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
10480 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))
10481 ) {
10482 error = unlink1(ctx, vp,
10483 CAST_USER_ADDR_T(dp->d_name), UIO_SYSSPACE,
10484 VNODE_REMOVE_SKIP_NAMESPACE_EVENT |
10485 VNODE_REMOVE_NO_AUDIT_PATH);
10486
10487 if (error && error != ENOENT) {
10488 goto outsc;
10489 }
10490 }
10491 dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen);
10492 dp = (struct dirent*)dir_pos;
10493 }
10494
10495 /*
10496 * workaround for HFS/NFS setting eofflag before end of file
10497 */
10498 if (vp->v_tag == VT_HFS && nentries > 2) {
10499 eofflag = 0;
10500 }
10501
10502 if (vp->v_tag == VT_NFS) {
10503 if (eofflag && !full_erase_flag) {
10504 full_erase_flag = 1;
10505 eofflag = 0;
10506 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
10507 } else if (!eofflag && full_erase_flag) {
10508 full_erase_flag = 0;
10509 }
10510 }
10511 } while (!eofflag);
10512
10513
10514 error = 0;
10515
10516 outsc:
10517 if (open_flag) {
10518 VNOP_CLOSE(vp, FREAD, ctx);
10519 }
10520
10521 if (auio) {
10522 uio_free(auio);
10523 }
10524 kfree_data(rbuf, alloc_size);
10525
10526 if (saved_nodatalessfaults == false) {
10527 ut->uu_flag &= ~UT_NSPACE_NODATALESSFAULTS;
10528 }
10529
10530 vnode_resume(vp);
10531
10532 return error;
10533 }
10534
10535
10536 void
lock_vnode_and_post(vnode_t vp,int kevent_num)10537 lock_vnode_and_post(vnode_t vp, int kevent_num)
10538 {
10539 /* Only take the lock if there's something there! */
10540 if (vp->v_knotes.slh_first != NULL) {
10541 vnode_lock(vp);
10542 KNOTE(&vp->v_knotes, kevent_num);
10543 vnode_unlock(vp);
10544 }
10545 }
10546
10547 void panic_print_vnodes(void);
10548
10549 /* define PANIC_PRINTS_VNODES only if investigation is required. */
10550 #ifdef PANIC_PRINTS_VNODES
10551
10552 static const char *
__vtype(uint16_t vtype)10553 __vtype(uint16_t vtype)
10554 {
10555 switch (vtype) {
10556 case VREG:
10557 return "R";
10558 case VDIR:
10559 return "D";
10560 case VBLK:
10561 return "B";
10562 case VCHR:
10563 return "C";
10564 case VLNK:
10565 return "L";
10566 case VSOCK:
10567 return "S";
10568 case VFIFO:
10569 return "F";
10570 case VBAD:
10571 return "x";
10572 case VSTR:
10573 return "T";
10574 case VCPLX:
10575 return "X";
10576 default:
10577 return "?";
10578 }
10579 }
10580
10581 /*
10582 * build a path from the bottom up
10583 * NOTE: called from the panic path - no alloc'ing of memory and no locks!
10584 */
10585 static char *
__vpath(vnode_t vp,char * str,int len,int depth)10586 __vpath(vnode_t vp, char *str, int len, int depth)
10587 {
10588 int vnm_len;
10589 const char *src;
10590 char *dst;
10591
10592 if (len <= 0) {
10593 return str;
10594 }
10595 /* str + len is the start of the string we created */
10596 if (!vp->v_name) {
10597 return str + len;
10598 }
10599
10600 /* follow mount vnodes to get the full path */
10601 if ((vp->v_flag & VROOT)) {
10602 if (vp->v_mount != NULL && vp->v_mount->mnt_vnodecovered) {
10603 return __vpath(vp->v_mount->mnt_vnodecovered,
10604 str, len, depth + 1);
10605 }
10606 return str + len;
10607 }
10608
10609 src = vp->v_name;
10610 vnm_len = strlen(src);
10611 if (vnm_len > len) {
10612 /* truncate the name to fit in the string */
10613 src += (vnm_len - len);
10614 vnm_len = len;
10615 }
10616
10617 /* start from the back and copy just characters (no NULLs) */
10618
10619 /* this will chop off leaf path (file) names */
10620 if (depth > 0) {
10621 dst = str + len - vnm_len;
10622 memcpy(dst, src, vnm_len);
10623 len -= vnm_len;
10624 } else {
10625 dst = str + len;
10626 }
10627
10628 if (vp->v_parent && len > 1) {
10629 /* follow parents up the chain */
10630 len--;
10631 *(dst - 1) = '/';
10632 return __vpath(vp->v_parent, str, len, depth + 1);
10633 }
10634
10635 return dst;
10636 }
10637
10638 #define SANE_VNODE_PRINT_LIMIT 5000
10639 void
panic_print_vnodes(void)10640 panic_print_vnodes(void)
10641 {
10642 mount_t mnt;
10643 vnode_t vp;
10644 int nvnodes = 0;
10645 const char *type;
10646 char *nm;
10647 char vname[257];
10648
10649 paniclog_append_noflush("\n***** VNODES *****\n"
10650 "TYPE UREF ICNT PATH\n");
10651
10652 /* NULL-terminate the path name */
10653 vname[sizeof(vname) - 1] = '\0';
10654
10655 /*
10656 * iterate all vnodelist items in all mounts (mntlist) -> mnt_vnodelist
10657 */
10658 TAILQ_FOREACH(mnt, &mountlist, mnt_list) {
10659 if (!ml_validate_nofault((vm_offset_t)mnt, sizeof(mount_t))) {
10660 paniclog_append_noflush("Unable to iterate the mount list %p - encountered an invalid mount pointer %p \n",
10661 &mountlist, mnt);
10662 break;
10663 }
10664
10665 TAILQ_FOREACH(vp, &mnt->mnt_vnodelist, v_mntvnodes) {
10666 if (!ml_validate_nofault((vm_offset_t)vp, sizeof(vnode_t))) {
10667 paniclog_append_noflush("Unable to iterate the vnode list %p - encountered an invalid vnode pointer %p \n",
10668 &mnt->mnt_vnodelist, vp);
10669 break;
10670 }
10671
10672 if (++nvnodes > SANE_VNODE_PRINT_LIMIT) {
10673 return;
10674 }
10675 type = __vtype(vp->v_type);
10676 nm = __vpath(vp, vname, sizeof(vname) - 1, 0);
10677 paniclog_append_noflush("%s %0d %0d %s\n",
10678 type, vp->v_usecount, vp->v_iocount, nm);
10679 }
10680 }
10681 }
10682
10683 #else /* !PANIC_PRINTS_VNODES */
10684 void
panic_print_vnodes(void)10685 panic_print_vnodes(void)
10686 {
10687 return;
10688 }
10689 #endif
10690
10691
10692 #ifdef CONFIG_IOCOUNT_TRACE
10693 static void
record_iocount_trace_vnode(vnode_t vp,int type)10694 record_iocount_trace_vnode(vnode_t vp, int type)
10695 {
10696 void *stacks[IOCOUNT_TRACE_MAX_FRAMES] = {0};
10697 int idx = vp->v_iocount_trace[type].idx;
10698
10699 if (idx >= IOCOUNT_TRACE_MAX_IDX) {
10700 return;
10701 }
10702
10703 OSBacktrace((void **)&stacks[0], IOCOUNT_TRACE_MAX_FRAMES);
10704
10705 /*
10706 * To save index space, only store the unique backtraces. If dup is found,
10707 * just bump the count and return.
10708 */
10709 for (int i = 0; i < idx; i++) {
10710 if (memcmp(&stacks[0], &vp->v_iocount_trace[type].stacks[i][0],
10711 sizeof(stacks)) == 0) {
10712 vp->v_iocount_trace[type].counts[i]++;
10713 return;
10714 }
10715 }
10716
10717 memcpy(&vp->v_iocount_trace[type].stacks[idx][0], &stacks[0],
10718 sizeof(stacks));
10719 vp->v_iocount_trace[type].counts[idx] = 1;
10720 vp->v_iocount_trace[type].idx++;
10721 }
10722
10723 static void
record_iocount_trace_uthread(vnode_t vp,int count)10724 record_iocount_trace_uthread(vnode_t vp, int count)
10725 {
10726 struct uthread *ut;
10727
10728 ut = current_uthread();
10729 ut->uu_iocount += count;
10730
10731 if (count == 1) {
10732 if (ut->uu_vpindex < 32) {
10733 OSBacktrace((void **)&ut->uu_pcs[ut->uu_vpindex][0], 10);
10734
10735 ut->uu_vps[ut->uu_vpindex] = vp;
10736 ut->uu_vpindex++;
10737 }
10738 }
10739 }
10740
10741 static void
record_vp(vnode_t vp,int count)10742 record_vp(vnode_t vp, int count)
10743 {
10744 if (__probable(bootarg_vnode_iocount_trace == 0 &&
10745 bootarg_uthread_iocount_trace == 0)) {
10746 return;
10747 }
10748
10749 #if CONFIG_TRIGGERS
10750 if (vp->v_resolve) {
10751 return;
10752 }
10753 #endif
10754 if ((vp->v_flag & VSYSTEM)) {
10755 return;
10756 }
10757
10758 if (bootarg_vnode_iocount_trace) {
10759 record_iocount_trace_vnode(vp,
10760 (count > 0) ? IOCOUNT_TRACE_VGET : IOCOUNT_TRACE_VPUT);
10761 }
10762 if (bootarg_uthread_iocount_trace) {
10763 record_iocount_trace_uthread(vp, count);
10764 }
10765 }
10766 #endif /* CONFIG_IOCOUNT_TRACE */
10767
10768 #if CONFIG_TRIGGERS
10769 #define __triggers_unused
10770 #else
10771 #define __triggers_unused __unused
10772 #endif
10773
10774 resolver_result_t
vfs_resolver_result(__triggers_unused uint32_t seq,__triggers_unused enum resolver_status stat,__triggers_unused int aux)10775 vfs_resolver_result(__triggers_unused uint32_t seq, __triggers_unused enum resolver_status stat, __triggers_unused int aux)
10776 {
10777 #if CONFIG_TRIGGERS
10778 /*
10779 * |<--- 32 --->|<--- 28 --->|<- 4 ->|
10780 * sequence auxiliary status
10781 */
10782 return (((uint64_t)seq) << 32) |
10783 (((uint64_t)(aux & 0x0fffffff)) << 4) |
10784 (uint64_t)(stat & 0x0000000F);
10785 #else
10786 return (0x0ULL) | (((uint64_t)ENOTSUP) << 4) | (((uint64_t)RESOLVER_ERROR) & 0xF);
10787 #endif
10788 }
10789
10790 #if CONFIG_TRIGGERS
10791
10792 #define TRIG_DEBUG 0
10793
10794 #if TRIG_DEBUG
10795 #define TRIG_LOG(...) do { printf("%s: ", __FUNCTION__); printf(__VA_ARGS__); } while (0)
10796 #else
10797 #define TRIG_LOG(...)
10798 #endif
10799
10800 /*
10801 * Resolver result functions
10802 */
10803
10804
10805 enum resolver_status
vfs_resolver_status(resolver_result_t result)10806 vfs_resolver_status(resolver_result_t result)
10807 {
10808 /* lower 4 bits is status */
10809 return result & 0x0000000F;
10810 }
10811
10812 uint32_t
vfs_resolver_sequence(resolver_result_t result)10813 vfs_resolver_sequence(resolver_result_t result)
10814 {
10815 /* upper 32 bits is sequence */
10816 return (uint32_t)(result >> 32);
10817 }
10818
10819 int
vfs_resolver_auxiliary(resolver_result_t result)10820 vfs_resolver_auxiliary(resolver_result_t result)
10821 {
10822 /* 28 bits of auxiliary */
10823 return (int)(((uint32_t)(result & 0xFFFFFFF0)) >> 4);
10824 }
10825
10826 /*
10827 * SPI
10828 * Call in for resolvers to update vnode trigger state
10829 */
10830 int
vnode_trigger_update(vnode_t vp,resolver_result_t result)10831 vnode_trigger_update(vnode_t vp, resolver_result_t result)
10832 {
10833 vnode_resolve_t rp;
10834 uint32_t seq;
10835 enum resolver_status stat;
10836
10837 if (vp->v_resolve == NULL) {
10838 return EINVAL;
10839 }
10840
10841 stat = vfs_resolver_status(result);
10842 seq = vfs_resolver_sequence(result);
10843
10844 if ((stat != RESOLVER_RESOLVED) && (stat != RESOLVER_UNRESOLVED)) {
10845 return EINVAL;
10846 }
10847
10848 rp = vp->v_resolve;
10849 lck_mtx_lock(&rp->vr_lock);
10850
10851 if (seq > rp->vr_lastseq) {
10852 if (stat == RESOLVER_RESOLVED) {
10853 rp->vr_flags |= VNT_RESOLVED;
10854 } else {
10855 rp->vr_flags &= ~VNT_RESOLVED;
10856 }
10857
10858 rp->vr_lastseq = seq;
10859 }
10860
10861 lck_mtx_unlock(&rp->vr_lock);
10862
10863 return 0;
10864 }
10865
10866 static int
vnode_resolver_attach(vnode_t vp,vnode_resolve_t rp,boolean_t ref)10867 vnode_resolver_attach(vnode_t vp, vnode_resolve_t rp, boolean_t ref)
10868 {
10869 int error;
10870
10871 vnode_lock_spin(vp);
10872 if (vp->v_resolve != NULL) {
10873 vnode_unlock(vp);
10874 return EINVAL;
10875 } else {
10876 vp->v_resolve = rp;
10877 }
10878 vnode_unlock(vp);
10879
10880 if (ref) {
10881 error = vnode_ref_ext(vp, O_EVTONLY, VNODE_REF_FORCE);
10882 if (error != 0) {
10883 panic("VNODE_REF_FORCE didn't help...");
10884 }
10885 }
10886
10887 return 0;
10888 }
10889
10890 /*
10891 * VFS internal interfaces for vnode triggers
10892 *
10893 * vnode must already have an io count on entry
10894 * v_resolve is stable when io count is non-zero
10895 */
10896 static int
vnode_resolver_create(mount_t mp,vnode_t vp,struct vnode_trigger_param * tinfo,boolean_t external)10897 vnode_resolver_create(mount_t mp, vnode_t vp, struct vnode_trigger_param *tinfo, boolean_t external)
10898 {
10899 vnode_resolve_t rp;
10900 int result;
10901 char byte;
10902
10903 #if 1
10904 /* minimum pointer test (debugging) */
10905 if (tinfo->vnt_data) {
10906 byte = *((char *)tinfo->vnt_data);
10907 }
10908 #endif
10909 rp = kalloc_type(struct vnode_resolve, Z_WAITOK | Z_NOFAIL);
10910
10911 lck_mtx_init(&rp->vr_lock, &trigger_vnode_lck_grp, &trigger_vnode_lck_attr);
10912
10913 rp->vr_resolve_func = tinfo->vnt_resolve_func;
10914 rp->vr_unresolve_func = tinfo->vnt_unresolve_func;
10915 rp->vr_rearm_func = tinfo->vnt_rearm_func;
10916 rp->vr_reclaim_func = tinfo->vnt_reclaim_func;
10917 rp->vr_data = tinfo->vnt_data;
10918 rp->vr_lastseq = 0;
10919 rp->vr_flags = tinfo->vnt_flags & VNT_VALID_MASK;
10920 if (external) {
10921 rp->vr_flags |= VNT_EXTERNAL;
10922 }
10923
10924 result = vnode_resolver_attach(vp, rp, external);
10925 if (result != 0) {
10926 goto out;
10927 }
10928
10929 if (mp) {
10930 OSAddAtomic(1, &mp->mnt_numtriggers);
10931 }
10932
10933 return result;
10934
10935 out:
10936 kfree_type(struct vnode_resolve, rp);
10937 return result;
10938 }
10939
10940 static void
vnode_resolver_release(vnode_resolve_t rp)10941 vnode_resolver_release(vnode_resolve_t rp)
10942 {
10943 /*
10944 * Give them a chance to free any private data
10945 */
10946 if (rp->vr_data && rp->vr_reclaim_func) {
10947 rp->vr_reclaim_func(NULLVP, rp->vr_data);
10948 }
10949
10950 lck_mtx_destroy(&rp->vr_lock, &trigger_vnode_lck_grp);
10951 kfree_type(struct vnode_resolve, rp);
10952 }
10953
10954 /* Called after the vnode has been drained */
10955 static void
vnode_resolver_detach(vnode_t vp)10956 vnode_resolver_detach(vnode_t vp)
10957 {
10958 vnode_resolve_t rp;
10959 mount_t mp;
10960
10961 mp = vnode_mount(vp);
10962
10963 vnode_lock(vp);
10964 rp = vp->v_resolve;
10965 vp->v_resolve = NULL;
10966 vnode_unlock(vp);
10967
10968 if ((rp->vr_flags & VNT_EXTERNAL) != 0) {
10969 vnode_rele_ext(vp, O_EVTONLY, 1);
10970 }
10971
10972 vnode_resolver_release(rp);
10973
10974 /* Keep count of active trigger vnodes per mount */
10975 OSAddAtomic(-1, &mp->mnt_numtriggers);
10976 }
10977
10978 __private_extern__
10979 void
vnode_trigger_rearm(vnode_t vp,vfs_context_t ctx)10980 vnode_trigger_rearm(vnode_t vp, vfs_context_t ctx)
10981 {
10982 vnode_resolve_t rp;
10983 resolver_result_t result;
10984 enum resolver_status status;
10985 uint32_t seq;
10986
10987 if ((vp->v_resolve == NULL) ||
10988 (vp->v_resolve->vr_rearm_func == NULL) ||
10989 (vp->v_resolve->vr_flags & VNT_AUTO_REARM) == 0) {
10990 return;
10991 }
10992
10993 rp = vp->v_resolve;
10994 lck_mtx_lock(&rp->vr_lock);
10995
10996 /*
10997 * Check if VFS initiated this unmount. If so, we'll catch it after the unresolve completes.
10998 */
10999 if (rp->vr_flags & VNT_VFS_UNMOUNTED) {
11000 lck_mtx_unlock(&rp->vr_lock);
11001 return;
11002 }
11003
11004 /* Check if this vnode is already armed */
11005 if ((rp->vr_flags & VNT_RESOLVED) == 0) {
11006 lck_mtx_unlock(&rp->vr_lock);
11007 return;
11008 }
11009
11010 lck_mtx_unlock(&rp->vr_lock);
11011
11012 result = rp->vr_rearm_func(vp, 0, rp->vr_data, ctx);
11013 status = vfs_resolver_status(result);
11014 seq = vfs_resolver_sequence(result);
11015
11016 lck_mtx_lock(&rp->vr_lock);
11017 if (seq > rp->vr_lastseq) {
11018 if (status == RESOLVER_UNRESOLVED) {
11019 rp->vr_flags &= ~VNT_RESOLVED;
11020 }
11021 rp->vr_lastseq = seq;
11022 }
11023 lck_mtx_unlock(&rp->vr_lock);
11024 }
11025
11026 __private_extern__
11027 int
vnode_trigger_resolve(vnode_t vp,struct nameidata * ndp,vfs_context_t ctx)11028 vnode_trigger_resolve(vnode_t vp, struct nameidata *ndp, vfs_context_t ctx)
11029 {
11030 vnode_resolve_t rp;
11031 enum path_operation op;
11032 resolver_result_t result;
11033 enum resolver_status status;
11034 uint32_t seq;
11035
11036 /*
11037 * N.B. we cannot call vfs_context_can_resolve_triggers()
11038 * here because we really only want to suppress that in
11039 * the event the trigger will be resolved by something in
11040 * user-space. Any triggers that are resolved by the kernel
11041 * do not pose a threat of deadlock.
11042 */
11043
11044 /* Only trigger on topmost vnodes */
11045 if ((vp->v_resolve == NULL) ||
11046 (vp->v_resolve->vr_resolve_func == NULL) ||
11047 (vp->v_mountedhere != NULL)) {
11048 return 0;
11049 }
11050
11051 rp = vp->v_resolve;
11052 lck_mtx_lock(&rp->vr_lock);
11053
11054 /* Check if this vnode is already resolved */
11055 if (rp->vr_flags & VNT_RESOLVED) {
11056 lck_mtx_unlock(&rp->vr_lock);
11057 return 0;
11058 }
11059
11060 lck_mtx_unlock(&rp->vr_lock);
11061
11062 #if CONFIG_MACF
11063 if ((rp->vr_flags & VNT_KERN_RESOLVE) == 0) {
11064 /*
11065 * VNT_KERN_RESOLVE indicates this trigger has no parameters
11066 * at the discression of the accessing process other than
11067 * the act of access. All other triggers must be checked
11068 */
11069 int rv = mac_vnode_check_trigger_resolve(ctx, vp, &ndp->ni_cnd);
11070 if (rv != 0) {
11071 return rv;
11072 }
11073 }
11074 #endif
11075
11076 /*
11077 * XXX
11078 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
11079 * is there anyway to know this???
11080 * there can also be other legitimate lookups in parallel
11081 *
11082 * XXX - should we call this on a separate thread with a timeout?
11083 *
11084 * XXX - should we use ISLASTCN to pick the op value??? Perhaps only leafs should
11085 * get the richer set and non-leafs should get generic OP_LOOKUP? TBD
11086 */
11087 op = (ndp->ni_op < OP_MAXOP) ? ndp->ni_op: OP_LOOKUP;
11088
11089 result = rp->vr_resolve_func(vp, &ndp->ni_cnd, op, 0, rp->vr_data, ctx);
11090 status = vfs_resolver_status(result);
11091 seq = vfs_resolver_sequence(result);
11092
11093 lck_mtx_lock(&rp->vr_lock);
11094 if (seq > rp->vr_lastseq) {
11095 if (status == RESOLVER_RESOLVED) {
11096 rp->vr_flags |= VNT_RESOLVED;
11097 }
11098 rp->vr_lastseq = seq;
11099 }
11100 lck_mtx_unlock(&rp->vr_lock);
11101
11102 /* On resolver errors, propagate the error back up */
11103 return status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0;
11104 }
11105
11106 static int
vnode_trigger_unresolve(vnode_t vp,int flags,vfs_context_t ctx)11107 vnode_trigger_unresolve(vnode_t vp, int flags, vfs_context_t ctx)
11108 {
11109 vnode_resolve_t rp;
11110 resolver_result_t result;
11111 enum resolver_status status;
11112 uint32_t seq;
11113
11114 if ((vp->v_resolve == NULL) || (vp->v_resolve->vr_unresolve_func == NULL)) {
11115 return 0;
11116 }
11117
11118 rp = vp->v_resolve;
11119 lck_mtx_lock(&rp->vr_lock);
11120
11121 /* Check if this vnode is already resolved */
11122 if ((rp->vr_flags & VNT_RESOLVED) == 0) {
11123 printf("vnode_trigger_unresolve: not currently resolved\n");
11124 lck_mtx_unlock(&rp->vr_lock);
11125 return 0;
11126 }
11127
11128 rp->vr_flags |= VNT_VFS_UNMOUNTED;
11129
11130 lck_mtx_unlock(&rp->vr_lock);
11131
11132 /*
11133 * XXX
11134 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
11135 * there can also be other legitimate lookups in parallel
11136 *
11137 * XXX - should we call this on a separate thread with a timeout?
11138 */
11139
11140 result = rp->vr_unresolve_func(vp, flags, rp->vr_data, ctx);
11141 status = vfs_resolver_status(result);
11142 seq = vfs_resolver_sequence(result);
11143
11144 lck_mtx_lock(&rp->vr_lock);
11145 if (seq > rp->vr_lastseq) {
11146 if (status == RESOLVER_UNRESOLVED) {
11147 rp->vr_flags &= ~VNT_RESOLVED;
11148 }
11149 rp->vr_lastseq = seq;
11150 }
11151 rp->vr_flags &= ~VNT_VFS_UNMOUNTED;
11152 lck_mtx_unlock(&rp->vr_lock);
11153
11154 /* On resolver errors, propagate the error back up */
11155 return status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0;
11156 }
11157
11158 static int
triggerisdescendant(mount_t mp,mount_t rmp)11159 triggerisdescendant(mount_t mp, mount_t rmp)
11160 {
11161 int match = FALSE;
11162
11163 /*
11164 * walk up vnode covered chain looking for a match
11165 */
11166 name_cache_lock_shared();
11167
11168 while (1) {
11169 vnode_t vp;
11170
11171 /* did we encounter "/" ? */
11172 if (mp->mnt_flag & MNT_ROOTFS) {
11173 break;
11174 }
11175
11176 vp = mp->mnt_vnodecovered;
11177 if (vp == NULLVP) {
11178 break;
11179 }
11180
11181 mp = vp->v_mount;
11182 if (mp == rmp) {
11183 match = TRUE;
11184 break;
11185 }
11186 }
11187
11188 name_cache_unlock();
11189
11190 return match;
11191 }
11192
11193 struct trigger_unmount_info {
11194 vfs_context_t ctx;
11195 mount_t top_mp;
11196 vnode_t trigger_vp;
11197 mount_t trigger_mp;
11198 uint32_t trigger_vid;
11199 int flags;
11200 };
11201
11202 static int
trigger_unmount_callback(mount_t mp,void * arg)11203 trigger_unmount_callback(mount_t mp, void * arg)
11204 {
11205 struct trigger_unmount_info * infop = (struct trigger_unmount_info *)arg;
11206 boolean_t mountedtrigger = FALSE;
11207
11208 /*
11209 * When we encounter the top level mount we're done
11210 */
11211 if (mp == infop->top_mp) {
11212 return VFS_RETURNED_DONE;
11213 }
11214
11215 if ((mp->mnt_vnodecovered == NULL) ||
11216 (vnode_getwithref(mp->mnt_vnodecovered) != 0)) {
11217 return VFS_RETURNED;
11218 }
11219
11220 if ((mp->mnt_vnodecovered->v_mountedhere == mp) &&
11221 (mp->mnt_vnodecovered->v_resolve != NULL) &&
11222 (mp->mnt_vnodecovered->v_resolve->vr_flags & VNT_RESOLVED)) {
11223 mountedtrigger = TRUE;
11224 }
11225 vnode_put(mp->mnt_vnodecovered);
11226
11227 /*
11228 * When we encounter a mounted trigger, check if its under the top level mount
11229 */
11230 if (!mountedtrigger || !triggerisdescendant(mp, infop->top_mp)) {
11231 return VFS_RETURNED;
11232 }
11233
11234 /*
11235 * Process any pending nested mount (now that its not referenced)
11236 */
11237 if ((infop->trigger_vp != NULLVP) &&
11238 (vnode_getwithvid(infop->trigger_vp, infop->trigger_vid) == 0)) {
11239 vnode_t vp = infop->trigger_vp;
11240 int error;
11241
11242 infop->trigger_vp = NULLVP;
11243
11244 if (mp == vp->v_mountedhere) {
11245 vnode_put(vp);
11246 printf("trigger_unmount_callback: unexpected match '%s'\n",
11247 mp->mnt_vfsstat.f_mntonname);
11248 return VFS_RETURNED;
11249 }
11250 if (infop->trigger_mp != vp->v_mountedhere) {
11251 vnode_put(vp);
11252 printf("trigger_unmount_callback: trigger mnt changed! (%p != %p)\n",
11253 infop->trigger_mp, vp->v_mountedhere);
11254 goto savenext;
11255 }
11256
11257 error = vnode_trigger_unresolve(vp, infop->flags, infop->ctx);
11258 vnode_put(vp);
11259 if (error) {
11260 printf("unresolving: '%s', err %d\n",
11261 vp->v_mountedhere ? vp->v_mountedhere->mnt_vfsstat.f_mntonname :
11262 "???", error);
11263 return VFS_RETURNED_DONE; /* stop iteration on errors */
11264 }
11265 }
11266 savenext:
11267 /*
11268 * We can't call resolver here since we hold a mount iter
11269 * ref on mp so save its covered vp for later processing
11270 */
11271 infop->trigger_vp = mp->mnt_vnodecovered;
11272 if ((infop->trigger_vp != NULLVP) &&
11273 (vnode_getwithref(infop->trigger_vp) == 0)) {
11274 if (infop->trigger_vp->v_mountedhere == mp) {
11275 infop->trigger_vid = infop->trigger_vp->v_id;
11276 infop->trigger_mp = mp;
11277 }
11278 vnode_put(infop->trigger_vp);
11279 }
11280
11281 return VFS_RETURNED;
11282 }
11283
11284 /*
11285 * Attempt to unmount any trigger mounts nested underneath a mount.
11286 * This is a best effort attempt and no retries are performed here.
11287 *
11288 * Note: mp->mnt_rwlock is held exclusively on entry (so be carefull)
11289 */
11290 __private_extern__
11291 void
vfs_nested_trigger_unmounts(mount_t mp,int flags,vfs_context_t ctx)11292 vfs_nested_trigger_unmounts(mount_t mp, int flags, vfs_context_t ctx)
11293 {
11294 struct trigger_unmount_info info;
11295
11296 /* Must have trigger vnodes */
11297 if (mp->mnt_numtriggers == 0) {
11298 return;
11299 }
11300 /* Avoid recursive requests (by checking covered vnode) */
11301 if ((mp->mnt_vnodecovered != NULL) &&
11302 (vnode_getwithref(mp->mnt_vnodecovered) == 0)) {
11303 boolean_t recursive = FALSE;
11304
11305 if ((mp->mnt_vnodecovered->v_mountedhere == mp) &&
11306 (mp->mnt_vnodecovered->v_resolve != NULL) &&
11307 (mp->mnt_vnodecovered->v_resolve->vr_flags & VNT_VFS_UNMOUNTED)) {
11308 recursive = TRUE;
11309 }
11310 vnode_put(mp->mnt_vnodecovered);
11311 if (recursive) {
11312 return;
11313 }
11314 }
11315
11316 /*
11317 * Attempt to unmount any nested trigger mounts (best effort)
11318 */
11319 info.ctx = ctx;
11320 info.top_mp = mp;
11321 info.trigger_vp = NULLVP;
11322 info.trigger_vid = 0;
11323 info.trigger_mp = NULL;
11324 info.flags = flags;
11325
11326 (void) vfs_iterate(VFS_ITERATE_TAIL_FIRST, trigger_unmount_callback, &info);
11327
11328 /*
11329 * Process remaining nested mount (now that its not referenced)
11330 */
11331 if ((info.trigger_vp != NULLVP) &&
11332 (vnode_getwithvid(info.trigger_vp, info.trigger_vid) == 0)) {
11333 vnode_t vp = info.trigger_vp;
11334
11335 if (info.trigger_mp == vp->v_mountedhere) {
11336 (void) vnode_trigger_unresolve(vp, flags, ctx);
11337 }
11338 vnode_put(vp);
11339 }
11340 }
11341
11342 int
vfs_addtrigger(mount_t mp,const char * relpath,struct vnode_trigger_info * vtip,vfs_context_t ctx)11343 vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, vfs_context_t ctx)
11344 {
11345 struct nameidata *ndp;
11346 int res;
11347 vnode_t rvp, vp;
11348 struct vnode_trigger_param vtp;
11349
11350 /*
11351 * Must be called for trigger callback, wherein rwlock is held
11352 */
11353 lck_rw_assert(&mp->mnt_rwlock, LCK_RW_ASSERT_HELD);
11354
11355 TRIG_LOG("Adding trigger at %s\n", relpath);
11356 TRIG_LOG("Trying VFS_ROOT\n");
11357
11358 ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
11359
11360 /*
11361 * We do a lookup starting at the root of the mountpoint, unwilling
11362 * to cross into other mountpoints.
11363 */
11364 res = VFS_ROOT(mp, &rvp, ctx);
11365 if (res != 0) {
11366 goto out;
11367 }
11368
11369 TRIG_LOG("Trying namei\n");
11370
11371 NDINIT(ndp, LOOKUP, OP_LOOKUP, USEDVP | NOCROSSMOUNT | FOLLOW, UIO_SYSSPACE,
11372 CAST_USER_ADDR_T(relpath), ctx);
11373 ndp->ni_dvp = rvp;
11374 res = namei(ndp);
11375 if (res != 0) {
11376 vnode_put(rvp);
11377 goto out;
11378 }
11379
11380 vp = ndp->ni_vp;
11381 nameidone(ndp);
11382 vnode_put(rvp);
11383
11384 TRIG_LOG("Trying vnode_resolver_create()\n");
11385
11386 /*
11387 * Set up blob. vnode_create() takes a larger structure
11388 * with creation info, and we needed something different
11389 * for this case. One needs to win, or we need to munge both;
11390 * vnode_create() wins.
11391 */
11392 bzero(&vtp, sizeof(vtp));
11393 vtp.vnt_resolve_func = vtip->vti_resolve_func;
11394 vtp.vnt_unresolve_func = vtip->vti_unresolve_func;
11395 vtp.vnt_rearm_func = vtip->vti_rearm_func;
11396 vtp.vnt_reclaim_func = vtip->vti_reclaim_func;
11397 vtp.vnt_reclaim_func = vtip->vti_reclaim_func;
11398 vtp.vnt_data = vtip->vti_data;
11399 vtp.vnt_flags = vtip->vti_flags;
11400
11401 res = vnode_resolver_create(mp, vp, &vtp, TRUE);
11402 vnode_put(vp);
11403 out:
11404 kfree_type(struct nameidata, ndp);
11405 TRIG_LOG("Returning %d\n", res);
11406 return res;
11407 }
11408
11409 #endif /* CONFIG_TRIGGERS */
11410
11411 vm_offset_t
kdebug_vnode(vnode_t vp)11412 kdebug_vnode(vnode_t vp)
11413 {
11414 return VM_KERNEL_ADDRPERM(vp);
11415 }
11416
11417 static int flush_cache_on_write = 0;
11418 SYSCTL_INT(_kern, OID_AUTO, flush_cache_on_write,
11419 CTLFLAG_RW | CTLFLAG_LOCKED, &flush_cache_on_write, 0,
11420 "always flush the drive cache on writes to uncached files");
11421
11422 int
vnode_should_flush_after_write(vnode_t vp,int ioflag)11423 vnode_should_flush_after_write(vnode_t vp, int ioflag)
11424 {
11425 return flush_cache_on_write
11426 && (ISSET(ioflag, IO_NOCACHE) || vnode_isnocache(vp));
11427 }
11428
11429 /*
11430 * sysctl for use by disk I/O tracing tools to get the list of existing
11431 * vnodes' paths
11432 */
11433
11434 #define NPATH_WORDS (MAXPATHLEN / sizeof(unsigned long))
11435 struct vnode_trace_paths_context {
11436 uint64_t count;
11437 /*
11438 * Must be a multiple of 4, then -1, for tracing!
11439 */
11440 unsigned long path[NPATH_WORDS + (4 - (NPATH_WORDS % 4)) - 1];
11441 };
11442
11443 static int
vnode_trace_path_callback(struct vnode * vp,void * vctx)11444 vnode_trace_path_callback(struct vnode *vp, void *vctx)
11445 {
11446 struct vnode_trace_paths_context *ctx = vctx;
11447 size_t path_len = sizeof(ctx->path);
11448
11449 int getpath_len = (int)path_len;
11450 if (vn_getpath(vp, (char *)ctx->path, &getpath_len) == 0) {
11451 /* vn_getpath() NUL-terminates, and len includes the NUL. */
11452 assert(getpath_len >= 0);
11453 path_len = (size_t)getpath_len;
11454
11455 assert(path_len <= sizeof(ctx->path));
11456 kdebug_vfs_lookup(ctx->path, (int)path_len, vp,
11457 KDBG_VFS_LOOKUP_FLAG_LOOKUP | KDBG_VFS_LOOKUP_FLAG_NOPROCFILT);
11458
11459 if (++(ctx->count) == 1000) {
11460 thread_yield_to_preemption();
11461 ctx->count = 0;
11462 }
11463 }
11464
11465 return VNODE_RETURNED;
11466 }
11467
11468 static int
vfs_trace_paths_callback(mount_t mp,void * arg)11469 vfs_trace_paths_callback(mount_t mp, void *arg)
11470 {
11471 if (mp->mnt_flag & MNT_LOCAL) {
11472 vnode_iterate(mp, VNODE_ITERATE_ALL, vnode_trace_path_callback, arg);
11473 }
11474
11475 return VFS_RETURNED;
11476 }
11477
11478 static int sysctl_vfs_trace_paths SYSCTL_HANDLER_ARGS {
11479 struct vnode_trace_paths_context ctx;
11480
11481 (void)oidp;
11482 (void)arg1;
11483 (void)arg2;
11484 (void)req;
11485
11486 if (!kauth_cred_issuser(kauth_cred_get())) {
11487 return EPERM;
11488 }
11489
11490 if (!kdebug_enable || !kdebug_debugid_enabled(VFS_LOOKUP)) {
11491 return EINVAL;
11492 }
11493
11494 bzero(&ctx, sizeof(struct vnode_trace_paths_context));
11495
11496 vfs_iterate(0, vfs_trace_paths_callback, &ctx);
11497
11498 return 0;
11499 }
11500
11501 SYSCTL_PROC(_vfs_generic, OID_AUTO, trace_paths, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED, NULL, 0, &sysctl_vfs_trace_paths, "-", "trace_paths");
11502