xref: /xnu-12377.41.6/bsd/vfs/vfs_subr.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  *
3  * Copyright (c) 2000-2024 Apple Inc. All rights reserved.
4  *
5  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6  *
7  * This file contains Original Code and/or Modifications of Original Code
8  * as defined in and that are subject to the Apple Public Source License
9  * Version 2.0 (the 'License'). You may not use this file except in
10  * compliance with the License. The rights granted to you under the License
11  * may not be used to create, or enable the creation or redistribution of,
12  * unlawful or unlicensed copies of an Apple operating system, or to
13  * circumvent, violate, or enable the circumvention or violation of, any
14  * terms of an Apple operating system software license agreement.
15  *
16  * Please obtain a copy of the License at
17  * http://www.opensource.apple.com/apsl/ and read it before using this file.
18  *
19  * The Original Code and all software distributed under the License are
20  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24  * Please see the License for the specific language governing rights and
25  * limitations under the License.
26  *
27  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28  */
29 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 /*
31  * Copyright (c) 1989, 1993
32  *	The Regents of the University of California.  All rights reserved.
33  * (c) UNIX System Laboratories, Inc.
34  * All or some portions of this file are derived from material licensed
35  * to the University of California by American Telephone and Telegraph
36  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
37  * the permission of UNIX System Laboratories, Inc.
38  *
39  * Redistribution and use in source and binary forms, with or without
40  * modification, are permitted provided that the following conditions
41  * are met:
42  * 1. Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  * 2. Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in the
46  *    documentation and/or other materials provided with the distribution.
47  * 3. All advertising materials mentioning features or use of this software
48  *    must display the following acknowledgement:
49  *	This product includes software developed by the University of
50  *	California, Berkeley and its contributors.
51  * 4. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
68  */
69 /*
70  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71  * support for mandatory and extensible security protections.  This notice
72  * is included in support of clause 2.2 (b) of the Apple Public License,
73  * Version 2.0.
74  */
75 
76 /*
77  * External virtual filesystem routines
78  */
79 
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/time.h>
86 #include <sys/lock.h>
87 #include <sys/vnode.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf_internal.h>
93 #include <sys/errno.h>
94 #include <kern/kalloc.h>
95 #include <sys/uio_internal.h>
96 #include <sys/uio.h>
97 #include <sys/domain.h>
98 #include <sys/mbuf.h>
99 #include <sys/syslog.h>
100 #include <sys/ubc_internal.h>
101 #include <sys/vm.h>
102 #include <sys/xattr.h>
103 #include <sys/sysctl.h>
104 #include <sys/filedesc.h>
105 #include <sys/fcntl.h>
106 #include <sys/event.h>
107 #include <sys/kdebug.h>
108 #include <sys/kauth.h>
109 #include <sys/user.h>
110 #include <sys/systm.h>
111 #include <sys/kern_memorystatus_xnu.h>
112 #include <sys/lockf.h>
113 #include <sys/reboot.h>
114 #include <miscfs/fifofs/fifo.h>
115 
116 #include <nfs/nfs.h>
117 
118 #include <string.h>
119 #include <machine/machine_routines.h>
120 
121 #include <kern/assert.h>
122 #include <mach/kern_return.h>
123 #include <kern/thread.h>
124 #include <kern/sched_prim.h>
125 #include <kern/smr.h>
126 
127 #include <miscfs/specfs/specdev.h>
128 
129 #include <mach/mach_types.h>
130 #include <mach/memory_object_types.h>
131 #include <mach/memory_object_control.h>
132 
133 #include <kern/kalloc.h>        /* kalloc()/kfree() */
134 #include <kern/clock.h>         /* delay_for_interval() */
135 #include <libkern/coreanalytics/coreanalytics.h>
136 #include <libkern/OSAtomic.h>   /* OSAddAtomic() */
137 #include <os/atomic_private.h>
138 #if defined(XNU_TARGET_OS_OSX)
139 #include <console/video_console.h>
140 #endif
141 
142 #ifdef CONFIG_IOCOUNT_TRACE
143 #include <libkern/OSDebug.h>
144 #endif
145 
146 #include <vm/vm_protos.h>       /* vnode_pager_vrele() */
147 #include <vm/vm_ubc.h>
148 #include <vm/memory_object_xnu.h>
149 
150 #if CONFIG_MACF
151 #include <security/mac_framework.h>
152 #endif
153 
154 #include <vfs/vfs_disk_conditioner.h>
155 #include <libkern/section_keywords.h>
156 
157 static LCK_GRP_DECLARE(vnode_lck_grp, "vnode");
158 static LCK_ATTR_DECLARE(vnode_lck_attr, 0, 0);
159 
160 #if CONFIG_TRIGGERS
161 static LCK_GRP_DECLARE(trigger_vnode_lck_grp, "trigger_vnode");
162 static LCK_ATTR_DECLARE(trigger_vnode_lck_attr, 0, 0);
163 #endif
164 
165 extern lck_mtx_t mnt_list_mtx_lock;
166 
167 static KALLOC_TYPE_DEFINE(specinfo_zone, struct specinfo, KT_DEFAULT);
168 
169 ZONE_DEFINE(vnode_zone, "vnodes",
170     sizeof(struct vnode), ZC_NOGC | ZC_ZFREE_CLEARMEM);
171 
172 enum vtype iftovt_tab[16] = {
173 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
174 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
175 };
176 int     vttoif_tab[9] = {
177 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
178 	S_IFSOCK, S_IFIFO, S_IFMT,
179 };
180 
181 extern int paniclog_append_noflush(const char *format, ...);
182 
183 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
184 __private_extern__ void qsort(
185 	void * array,
186 	size_t nmembers,
187 	size_t member_size,
188 	int (*)(const void *, const void *));
189 
190 __private_extern__ void vntblinit(void);
191 __private_extern__ int unlink1(vfs_context_t, vnode_t, user_addr_t,
192     enum uio_seg, int);
193 
194 static void vnode_list_add(vnode_t);
195 static void vnode_async_list_add(vnode_t);
196 static void vnode_list_remove(vnode_t);
197 static void vnode_list_remove_locked(vnode_t);
198 
199 static void vnode_abort_advlocks(vnode_t);
200 static errno_t vnode_drain(vnode_t);
201 static void vgone(vnode_t, int flags);
202 static void vclean(vnode_t vp, int flag);
203 static void vnode_reclaim_internal(vnode_t, int, int, int);
204 
205 static void vnode_dropiocount(vnode_t);
206 
207 static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev);
208 static int  vnode_reload(vnode_t);
209 
210 static int unmount_callback(mount_t, __unused void *);
211 
212 static void insmntque(vnode_t vp, mount_t mp);
213 static int mount_getvfscnt(void);
214 static int mount_fillfsids(fsid_t *, int );
215 static void vnode_iterate_setup(mount_t);
216 int vnode_umount_preflight(mount_t, vnode_t, int);
217 static int vnode_iterate_prepare(mount_t);
218 static int vnode_iterate_reloadq(mount_t);
219 static void vnode_iterate_clear(mount_t);
220 static mount_t vfs_getvfs_locked(fsid_t *);
221 static int vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp,
222     struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx);
223 static int vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx);
224 
225 errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
226 
227 #ifdef CONFIG_IOCOUNT_TRACE
228 static void record_vp(vnode_t vp, int count);
229 static TUNABLE(int, bootarg_vnode_iocount_trace, "vnode_iocount_trace", 0);
230 static TUNABLE(int, bootarg_uthread_iocount_trace, "uthread_iocount_trace", 0);
231 #endif /* CONFIG_IOCOUNT_TRACE */
232 
233 #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG)
234 static TUNABLE(bool, bootarg_no_vnode_jetsam, "-no_vnode_jetsam", false);
235 #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */
236 
237 static TUNABLE(bool, bootarg_no_vnode_drain, "-no_vnode_drain", false);
238 
239 __options_decl(freeable_vnode_level_t, uint32_t, {
240 	DEALLOC_VNODE_NONE = 0,
241 	DEALLOC_VNODE_ONLY_OVERFLOW = 1,
242 	DEALLOC_VNODE_ALL = 2
243 });
244 
245 #if XNU_TARGET_OS_OSX
246 static TUNABLE(freeable_vnode_level_t, bootarg_vn_dealloc_level, "vn_dealloc_level", DEALLOC_VNODE_NONE);
247 #else
248 static TUNABLE(freeable_vnode_level_t, bootarg_vn_dealloc_level, "vn_dealloc_level", DEALLOC_VNODE_ONLY_OVERFLOW);
249 #endif /* CONFIG_VNDEALLOC */
250 
251 static freeable_vnode_level_t vn_dealloc_level = DEALLOC_VNODE_NONE;
252 
253 boolean_t root_is_CF_drive = FALSE;
254 
255 #if CONFIG_TRIGGERS
256 static int vnode_resolver_create(mount_t, vnode_t, struct vnode_trigger_param *, boolean_t external);
257 static void vnode_resolver_detach(vnode_t);
258 #endif
259 
260 TAILQ_HEAD(freelst, vnode) vnode_free_list;     /* vnode free list */
261 TAILQ_HEAD(deadlst, vnode) vnode_dead_list;     /* vnode dead list */
262 TAILQ_HEAD(async_work_lst, vnode) vnode_async_work_list;
263 
264 
265 TAILQ_HEAD(ragelst, vnode) vnode_rage_list;     /* vnode rapid age list */
266 struct timeval rage_tv;
267 int     rage_limit = 0;
268 int     ragevnodes = 0;
269 
270 long  reusablevnodes_max = LONG_MAX;
271 long  reusablevnodes = 0;
272 int   deadvnodes_low = 0;
273 int   deadvnodes_high = 0;
274 int   numvnodes_min = 0;
275 int   numvnodes_max = 0;
276 
277 uint64_t newvnode = 0;
278 unsigned long newvnode_nodead = 0;
279 
280 static  int vfs_unmountall_started = 0;
281 static  int vfs_unmountall_finished = 0;
282 static  uint64_t vfs_shutdown_last_completion_time;
283 
284 #define RAGE_LIMIT_MIN  100
285 #define RAGE_TIME_LIMIT 5
286 
287 VFS_SMR_DECLARE;
288 extern uint32_t nc_smr_enabled;
289 
290 /*
291  * ROSV definitions
292  * NOTE: These are shadowed from PlatformSupport definitions, but XNU
293  * builds standalone.
294  */
295 #define PLATFORM_DATA_VOLUME_MOUNT_POINT "/System/Volumes/Data"
296 
297 /*
298  * These could be in PlatformSupport but aren't yet
299  */
300 #define PLATFORM_PREBOOT_VOLUME_MOUNT_POINT "/System/Volumes/Preboot"
301 #define PLATFORM_RECOVERY_VOLUME_MOUNT_POINT "/System/Volumes/Recovery"
302 
303 #if CONFIG_MOUNT_VM
304 #define PLATFORM_VM_VOLUME_MOUNT_POINT "/System/Volumes/VM"
305 #endif
306 
307 struct mntlist mountlist;                       /* mounted filesystem list */
308 static int nummounts = 0;
309 
310 static int print_busy_vnodes = 0;                               /* print out busy vnodes */
311 
312 #if DIAGNOSTIC
313 #define VLISTCHECK(fun, vp, list)       \
314 	if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
315 	        panic("%s: %s vnode not on %slist", (fun), (list), (list));
316 #else
317 #define VLISTCHECK(fun, vp, list)
318 #endif /* DIAGNOSTIC */
319 
320 #define VLISTNONE(vp)   \
321 	do {    \
322 	        (vp)->v_freelist.tqe_next = (struct vnode *)0;  \
323 	        (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb;   \
324 	} while(0)
325 
326 #define VONLIST(vp)     \
327 	((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
328 
329 /* remove a vnode from free vnode list */
330 #define VREMFREE(fun, vp)       \
331 	do {    \
332 	        VLISTCHECK((fun), (vp), "free");        \
333 	        TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist);       \
334 	        VLISTNONE((vp));        \
335 	        freevnodes--;   \
336 	        reusablevnodes--;    \
337 	} while(0)
338 
339 
340 /* remove a vnode from dead vnode list */
341 #define VREMDEAD(fun, vp)       \
342 	do {    \
343 	        VLISTCHECK((fun), (vp), "dead");        \
344 	        TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist);       \
345 	        VLISTNONE((vp));        \
346 	        vp->v_listflag &= ~VLIST_DEAD;  \
347 	        deadvnodes--;   \
348 	        if (vp->v_listflag & VLIST_NO_REUSE) {        \
349 	                deadvnodes_noreuse--;        \
350 	        }        \
351 	} while(0)
352 
353 
354 /* remove a vnode from async work vnode list */
355 #define VREMASYNC_WORK(fun, vp) \
356 	do {    \
357 	        VLISTCHECK((fun), (vp), "async_work");  \
358 	        TAILQ_REMOVE(&vnode_async_work_list, (vp), v_freelist); \
359 	        VLISTNONE((vp));        \
360 	        vp->v_listflag &= ~VLIST_ASYNC_WORK;    \
361 	        async_work_vnodes--;    \
362 	        if (!(vp->v_listflag & VLIST_NO_REUSE)) {        \
363 	                reusablevnodes--;    \
364 	        }        \
365 	} while(0)
366 
367 
368 /* remove a vnode from rage vnode list */
369 #define VREMRAGE(fun, vp)       \
370 	do {    \
371 	        if ( !(vp->v_listflag & VLIST_RAGE))                    \
372 	                panic("VREMRAGE: vp not on rage list");         \
373 	        VLISTCHECK((fun), (vp), "rage");                        \
374 	        TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist);       \
375 	        VLISTNONE((vp));                \
376 	        vp->v_listflag &= ~VLIST_RAGE;  \
377 	        ragevnodes--;                   \
378 	        reusablevnodes--;    \
379 	} while(0)
380 
381 static void async_work_continue(void);
382 static void vn_laundry_continue(void);
383 static void wakeup_laundry_thread(void);
384 static void vnode_smr_free(void *, size_t);
385 
386 CA_EVENT(freeable_vnodes,
387     CA_INT, numvnodes_min,
388     CA_INT, numvnodes_max,
389     CA_INT, desiredvnodes,
390     CA_INT, numvnodes,
391     CA_INT, freevnodes,
392     CA_INT, deadvnodes,
393     CA_INT, freeablevnodes,
394     CA_INT, busyvnodes,
395     CA_BOOL, threshold_crossed);
396 static CA_EVENT_TYPE(freeable_vnodes) freeable_vnodes_telemetry;
397 
398 static bool freeablevnodes_threshold_crossed = false;
399 
400 /*
401  * Initialize the vnode management data structures.
402  */
403 __private_extern__ void
vntblinit(void)404 vntblinit(void)
405 {
406 	thread_t        thread = THREAD_NULL;
407 	int desiredvnodes_one_percent = desiredvnodes / 100;
408 
409 	TAILQ_INIT(&vnode_free_list);
410 	TAILQ_INIT(&vnode_rage_list);
411 	TAILQ_INIT(&vnode_dead_list);
412 	TAILQ_INIT(&vnode_async_work_list);
413 	TAILQ_INIT(&mountlist);
414 
415 	microuptime(&rage_tv);
416 	rage_limit = desiredvnodes_one_percent;
417 	if (rage_limit < RAGE_LIMIT_MIN) {
418 		rage_limit = RAGE_LIMIT_MIN;
419 	}
420 
421 	deadvnodes_low = desiredvnodes_one_percent;
422 	if (deadvnodes_low > 300) {
423 		deadvnodes_low = 300;
424 	}
425 	deadvnodes_high = deadvnodes_low * 2;
426 
427 	numvnodes_min = numvnodes_max = desiredvnodes;
428 	if (bootarg_vn_dealloc_level == DEALLOC_VNODE_ONLY_OVERFLOW) {
429 		numvnodes_max = desiredvnodes * 2;
430 		vn_dealloc_level = bootarg_vn_dealloc_level;
431 	} else if (bootarg_vn_dealloc_level == DEALLOC_VNODE_ALL) {
432 		numvnodes_min = desiredvnodes_one_percent * 40;
433 		numvnodes_max = desiredvnodes * 2;
434 		reusablevnodes_max = (desiredvnodes_one_percent * 20) - deadvnodes_low;
435 		vn_dealloc_level = bootarg_vn_dealloc_level;
436 	}
437 
438 	bzero(&freeable_vnodes_telemetry, sizeof(CA_EVENT_TYPE(freeable_vnodes)));
439 	freeable_vnodes_telemetry.numvnodes_min = numvnodes_min;
440 	freeable_vnodes_telemetry.numvnodes_max = numvnodes_max;
441 	freeable_vnodes_telemetry.desiredvnodes = desiredvnodes;
442 
443 	if (nc_smr_enabled) {
444 		zone_enable_smr(vnode_zone, VFS_SMR(), &vnode_smr_free);
445 	}
446 
447 	/*
448 	 * create worker threads
449 	 */
450 	kernel_thread_start((thread_continue_t)async_work_continue, NULL, &thread);
451 	thread_deallocate(thread);
452 	kernel_thread_start((thread_continue_t)vn_laundry_continue, NULL, &thread);
453 	thread_deallocate(thread);
454 }
455 
456 /* the timeout is in 10 msecs */
457 int
vnode_waitforwrites(vnode_t vp,int output_target,int slpflag,int slptimeout,const char * msg)458 vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg)
459 {
460 	int error = 0;
461 	struct timespec ts;
462 
463 	if (output_target < 0) {
464 		return EINVAL;
465 	}
466 
467 	KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0);
468 
469 	if (vp->v_numoutput > output_target) {
470 		slpflag |= PDROP;
471 
472 		vnode_lock_spin(vp);
473 
474 		while ((vp->v_numoutput > output_target) && error == 0) {
475 			if (output_target) {
476 				vp->v_flag |= VTHROTTLED;
477 			} else {
478 				vp->v_flag |= VBWAIT;
479 			}
480 
481 			ts.tv_sec = (slptimeout / 100);
482 			ts.tv_nsec = (slptimeout % 1000)  * 10 * NSEC_PER_USEC * 1000;
483 			error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts);
484 
485 			vnode_lock_spin(vp);
486 		}
487 		vnode_unlock(vp);
488 	}
489 	KERNEL_DEBUG(0x3010280 | DBG_FUNC_END, (int)vp, output_target, vp->v_numoutput, error, 0);
490 
491 	return error;
492 }
493 
494 
495 void
vnode_startwrite(vnode_t vp)496 vnode_startwrite(vnode_t vp)
497 {
498 	OSAddAtomic(1, &vp->v_numoutput);
499 }
500 
501 
502 void
vnode_writedone(vnode_t vp)503 vnode_writedone(vnode_t vp)
504 {
505 	if (vp) {
506 		int need_wakeup = 0;
507 
508 		OSAddAtomic(-1, &vp->v_numoutput);
509 
510 		vnode_lock_spin(vp);
511 
512 		if (vp->v_numoutput < 0) {
513 			panic("vnode_writedone: numoutput < 0");
514 		}
515 
516 		if ((vp->v_flag & VTHROTTLED)) {
517 			vp->v_flag &= ~VTHROTTLED;
518 			need_wakeup = 1;
519 		}
520 		if ((vp->v_flag & VBWAIT) && (vp->v_numoutput == 0)) {
521 			vp->v_flag &= ~VBWAIT;
522 			need_wakeup = 1;
523 		}
524 		vnode_unlock(vp);
525 
526 		if (need_wakeup) {
527 			wakeup((caddr_t)&vp->v_numoutput);
528 		}
529 	}
530 }
531 
532 
533 
534 int
vnode_hasdirtyblks(vnode_t vp)535 vnode_hasdirtyblks(vnode_t vp)
536 {
537 	struct cl_writebehind *wbp;
538 
539 	/*
540 	 * Not taking the buf_mtx as there is little
541 	 * point doing it. Even if the lock is taken the
542 	 * state can change right after that. If their
543 	 * needs to be a synchronization, it must be driven
544 	 * by the caller
545 	 */
546 	if (vp->v_dirtyblkhd.lh_first) {
547 		return 1;
548 	}
549 
550 	if (!UBCINFOEXISTS(vp)) {
551 		return 0;
552 	}
553 
554 	wbp = vp->v_ubcinfo->cl_wbehind;
555 
556 	if (wbp && (wbp->cl_number || wbp->cl_scmap)) {
557 		return 1;
558 	}
559 
560 	return 0;
561 }
562 
563 int
vnode_hascleanblks(vnode_t vp)564 vnode_hascleanblks(vnode_t vp)
565 {
566 	/*
567 	 * Not taking the buf_mtx as there is little
568 	 * point doing it. Even if the lock is taken the
569 	 * state can change right after that. If their
570 	 * needs to be a synchronization, it must be driven
571 	 * by the caller
572 	 */
573 	if (vp->v_cleanblkhd.lh_first) {
574 		return 1;
575 	}
576 	return 0;
577 }
578 
579 void
vnode_iterate_setup(mount_t mp)580 vnode_iterate_setup(mount_t mp)
581 {
582 	mp->mnt_lflag |= MNT_LITER;
583 }
584 
585 int
vnode_umount_preflight(mount_t mp,vnode_t skipvp,int flags)586 vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags)
587 {
588 	vnode_t vp;
589 	int ret = 0;
590 
591 	TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
592 		if (vp->v_type == VDIR) {
593 			continue;
594 		}
595 		if (vp == skipvp) {
596 			continue;
597 		}
598 		if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || (vp->v_flag & VNOFLUSH))) {
599 			continue;
600 		}
601 		if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
602 			continue;
603 		}
604 		if ((flags & WRITECLOSE) && (vp->v_writecount == 0 || vp->v_type != VREG)) {
605 			continue;
606 		}
607 
608 		/* Look for busy vnode */
609 		if ((vp->v_usecount != 0) && ((vp->v_usecount - vp->v_kusecount) != 0)) {
610 			ret = 1;
611 			if (print_busy_vnodes && ((flags & FORCECLOSE) == 0)) {
612 				vprint_path("vnode_umount_preflight - busy vnode", vp);
613 			} else {
614 				return ret;
615 			}
616 		} else if (vp->v_iocount > 0) {
617 			/* Busy if iocount is > 0 for more than 3 seconds */
618 			tsleep(&vp->v_iocount, PVFS, "vnode_drain_network", 3 * hz);
619 			if (vp->v_iocount > 0) {
620 				ret = 1;
621 				if (print_busy_vnodes && ((flags & FORCECLOSE) == 0)) {
622 					vprint_path("vnode_umount_preflight - busy vnode", vp);
623 				} else {
624 					return ret;
625 				}
626 			}
627 			continue;
628 		}
629 	}
630 
631 	return ret;
632 }
633 
634 /*
635  * This routine prepares iteration by moving all the vnodes to worker queue
636  * called with mount lock held
637  */
638 int
vnode_iterate_prepare(mount_t mp)639 vnode_iterate_prepare(mount_t mp)
640 {
641 	vnode_t vp;
642 
643 	if (TAILQ_EMPTY(&mp->mnt_vnodelist)) {
644 		/* nothing to do */
645 		return 0;
646 	}
647 
648 	vp = TAILQ_FIRST(&mp->mnt_vnodelist);
649 	vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first);
650 	mp->mnt_workerqueue.tqh_first = mp->mnt_vnodelist.tqh_first;
651 	mp->mnt_workerqueue.tqh_last = mp->mnt_vnodelist.tqh_last;
652 
653 	TAILQ_INIT(&mp->mnt_vnodelist);
654 	if (mp->mnt_newvnodes.tqh_first != NULL) {
655 		panic("vnode_iterate_prepare: newvnode when entering vnode");
656 	}
657 	TAILQ_INIT(&mp->mnt_newvnodes);
658 
659 	return 1;
660 }
661 
662 
663 /* called with mount lock held */
664 int
vnode_iterate_reloadq(mount_t mp)665 vnode_iterate_reloadq(mount_t mp)
666 {
667 	int moved = 0;
668 
669 	/* add the remaining entries in workerq to the end of mount vnode list */
670 	if (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
671 		struct vnode * mvp;
672 		mvp = TAILQ_LAST(&mp->mnt_vnodelist, vnodelst);
673 
674 		/* Joining the workerque entities to mount vnode list */
675 		if (mvp) {
676 			mvp->v_mntvnodes.tqe_next = mp->mnt_workerqueue.tqh_first;
677 		} else {
678 			mp->mnt_vnodelist.tqh_first = mp->mnt_workerqueue.tqh_first;
679 		}
680 		mp->mnt_workerqueue.tqh_first->v_mntvnodes.tqe_prev = mp->mnt_vnodelist.tqh_last;
681 		mp->mnt_vnodelist.tqh_last = mp->mnt_workerqueue.tqh_last;
682 		TAILQ_INIT(&mp->mnt_workerqueue);
683 	}
684 
685 	/* add the newvnodes to the head of mount vnode list */
686 	if (!TAILQ_EMPTY(&mp->mnt_newvnodes)) {
687 		struct vnode * nlvp;
688 		nlvp = TAILQ_LAST(&mp->mnt_newvnodes, vnodelst);
689 
690 		mp->mnt_newvnodes.tqh_first->v_mntvnodes.tqe_prev = &mp->mnt_vnodelist.tqh_first;
691 		nlvp->v_mntvnodes.tqe_next = mp->mnt_vnodelist.tqh_first;
692 		if (mp->mnt_vnodelist.tqh_first) {
693 			mp->mnt_vnodelist.tqh_first->v_mntvnodes.tqe_prev = &nlvp->v_mntvnodes.tqe_next;
694 		} else {
695 			mp->mnt_vnodelist.tqh_last = mp->mnt_newvnodes.tqh_last;
696 		}
697 		mp->mnt_vnodelist.tqh_first = mp->mnt_newvnodes.tqh_first;
698 		TAILQ_INIT(&mp->mnt_newvnodes);
699 		moved = 1;
700 	}
701 
702 	return moved;
703 }
704 
705 
706 void
vnode_iterate_clear(mount_t mp)707 vnode_iterate_clear(mount_t mp)
708 {
709 	mp->mnt_lflag &= ~MNT_LITER;
710 }
711 
712 #if defined(__x86_64__)
713 
714 #include <i386/panic_hooks.h>
715 
716 struct vnode_iterate_panic_hook {
717 	panic_hook_t hook;
718 	mount_t mp;
719 	struct vnode *vp;
720 };
721 
722 static void
vnode_iterate_panic_hook(panic_hook_t * hook_)723 vnode_iterate_panic_hook(panic_hook_t *hook_)
724 {
725 	struct vnode_iterate_panic_hook *hook = (struct vnode_iterate_panic_hook *)hook_;
726 	panic_phys_range_t range;
727 	uint64_t phys;
728 
729 	if (panic_phys_range_before(hook->mp, &phys, &range)) {
730 		paniclog_append_noflush("mp = %p, phys = %p, prev (%p: %p-%p)\n",
731 		    hook->mp, phys, range.type, range.phys_start,
732 		    range.phys_start + range.len);
733 	} else {
734 		paniclog_append_noflush("mp = %p, phys = %p, prev (!)\n", hook->mp, phys);
735 	}
736 
737 	if (panic_phys_range_before(hook->vp, &phys, &range)) {
738 		paniclog_append_noflush("vp = %p, phys = %p, prev (%p: %p-%p)\n",
739 		    hook->vp, phys, range.type, range.phys_start,
740 		    range.phys_start + range.len);
741 	} else {
742 		paniclog_append_noflush("vp = %p, phys = %p, prev (!)\n", hook->vp, phys);
743 	}
744 	panic_dump_mem((void *)(((vm_offset_t)hook->mp - 4096) & ~4095), 12288);
745 }
746 #endif /* defined(__x86_64__) */
747 
748 int
vnode_iterate(mount_t mp,int flags,int (* callout)(struct vnode *,void *),void * arg)749 vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *),
750     void *arg)
751 {
752 	struct vnode *vp;
753 	int vid, retval;
754 	int ret = 0;
755 
756 	/*
757 	 * The mount iterate mutex is held for the duration of the iteration.
758 	 * This can be done by a state flag on the mount structure but we can
759 	 * run into priority inversion issues sometimes.
760 	 * Using a mutex allows us to benefit from the priority donation
761 	 * mechanisms in the kernel for locks. This mutex should never be
762 	 * acquired in spin mode and it should be acquired before attempting to
763 	 * acquire the mount lock.
764 	 */
765 	mount_iterate_lock(mp);
766 
767 	mount_lock(mp);
768 
769 	vnode_iterate_setup(mp);
770 
771 	/* If it returns 0 then there is nothing to do */
772 	retval = vnode_iterate_prepare(mp);
773 
774 	if (retval == 0) {
775 		vnode_iterate_clear(mp);
776 		mount_unlock(mp);
777 		mount_iterate_unlock(mp);
778 		return ret;
779 	}
780 
781 #if defined(__x86_64__)
782 	struct vnode_iterate_panic_hook hook;
783 	hook.mp = mp;
784 	hook.vp = NULL;
785 	panic_hook(&hook.hook, vnode_iterate_panic_hook);
786 #endif
787 	/* iterate over all the vnodes */
788 	while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
789 		vp = TAILQ_FIRST(&mp->mnt_workerqueue);
790 #if defined(__x86_64__)
791 		hook.vp = vp;
792 #endif
793 		TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
794 		TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
795 		vid = vp->v_id;
796 		if ((vp->v_data == NULL) || (vp->v_type == VNON) || (vp->v_mount != mp)) {
797 			continue;
798 		}
799 		vnode_hold(vp);
800 		mount_unlock(mp);
801 
802 		if (vget_internal(vp, vid, (flags | VNODE_NODEAD | VNODE_WITHID | VNODE_NOSUSPEND))) {
803 			mount_lock(mp);
804 			vnode_drop(vp);
805 			continue;
806 		}
807 		vnode_drop(vp);
808 		if (flags & VNODE_RELOAD) {
809 			/*
810 			 * we're reloading the filesystem
811 			 * cast out any inactive vnodes...
812 			 */
813 			if (vnode_reload(vp)) {
814 				/* vnode will be recycled on the refcount drop */
815 				vnode_put(vp);
816 				mount_lock(mp);
817 				continue;
818 			}
819 		}
820 
821 		retval = callout(vp, arg);
822 
823 		switch (retval) {
824 		case VNODE_RETURNED:
825 		case VNODE_RETURNED_DONE:
826 			vnode_put(vp);
827 			if (retval == VNODE_RETURNED_DONE) {
828 				mount_lock(mp);
829 				ret = 0;
830 				goto out;
831 			}
832 			break;
833 
834 		case VNODE_CLAIMED_DONE:
835 			mount_lock(mp);
836 			ret = 0;
837 			goto out;
838 		case VNODE_CLAIMED:
839 		default:
840 			break;
841 		}
842 		mount_lock(mp);
843 	}
844 
845 out:
846 #if defined(__x86_64__)
847 	panic_unhook(&hook.hook);
848 #endif
849 	(void)vnode_iterate_reloadq(mp);
850 	vnode_iterate_clear(mp);
851 	mount_unlock(mp);
852 	mount_iterate_unlock(mp);
853 	return ret;
854 }
855 
856 void
mount_lock_renames(mount_t mp)857 mount_lock_renames(mount_t mp)
858 {
859 	lck_mtx_lock(&mp->mnt_renamelock);
860 }
861 
862 void
mount_unlock_renames(mount_t mp)863 mount_unlock_renames(mount_t mp)
864 {
865 	lck_mtx_unlock(&mp->mnt_renamelock);
866 }
867 
868 void
mount_iterate_lock(mount_t mp)869 mount_iterate_lock(mount_t mp)
870 {
871 	lck_mtx_lock(&mp->mnt_iter_lock);
872 }
873 
874 void
mount_iterate_unlock(mount_t mp)875 mount_iterate_unlock(mount_t mp)
876 {
877 	lck_mtx_unlock(&mp->mnt_iter_lock);
878 }
879 
880 void
mount_lock(mount_t mp)881 mount_lock(mount_t mp)
882 {
883 	lck_mtx_lock(&mp->mnt_mlock);
884 }
885 
886 void
mount_lock_spin(mount_t mp)887 mount_lock_spin(mount_t mp)
888 {
889 	lck_mtx_lock_spin(&mp->mnt_mlock);
890 }
891 
892 void
mount_unlock(mount_t mp)893 mount_unlock(mount_t mp)
894 {
895 	lck_mtx_unlock(&mp->mnt_mlock);
896 }
897 
898 
899 void
mount_ref(mount_t mp,int locked)900 mount_ref(mount_t mp, int locked)
901 {
902 	if (!locked) {
903 		mount_lock_spin(mp);
904 	}
905 
906 	mp->mnt_count++;
907 
908 	if (!locked) {
909 		mount_unlock(mp);
910 	}
911 }
912 
913 
914 void
mount_drop(mount_t mp,int locked)915 mount_drop(mount_t mp, int locked)
916 {
917 	if (!locked) {
918 		mount_lock_spin(mp);
919 	}
920 
921 	mp->mnt_count--;
922 
923 	if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN)) {
924 		wakeup(&mp->mnt_lflag);
925 	}
926 
927 	if (!locked) {
928 		mount_unlock(mp);
929 	}
930 }
931 
932 
933 int
mount_iterref(mount_t mp,int locked)934 mount_iterref(mount_t mp, int locked)
935 {
936 	int retval = 0;
937 
938 	if (!locked) {
939 		mount_list_lock();
940 	}
941 	if (mp->mnt_iterref < 0) {
942 		retval = 1;
943 	} else {
944 		mp->mnt_iterref++;
945 	}
946 	if (!locked) {
947 		mount_list_unlock();
948 	}
949 	return retval;
950 }
951 
952 int
mount_isdrained(mount_t mp,int locked)953 mount_isdrained(mount_t mp, int locked)
954 {
955 	int retval;
956 
957 	if (!locked) {
958 		mount_list_lock();
959 	}
960 	if (mp->mnt_iterref < 0) {
961 		retval = 1;
962 	} else {
963 		retval = 0;
964 	}
965 	if (!locked) {
966 		mount_list_unlock();
967 	}
968 	return retval;
969 }
970 
971 void
mount_iterdrop(mount_t mp)972 mount_iterdrop(mount_t mp)
973 {
974 	mount_list_lock();
975 	mp->mnt_iterref--;
976 	wakeup(&mp->mnt_iterref);
977 	mount_list_unlock();
978 }
979 
980 void
mount_iterdrain(mount_t mp)981 mount_iterdrain(mount_t mp)
982 {
983 	mount_list_lock();
984 	while (mp->mnt_iterref) {
985 		msleep((caddr_t)&mp->mnt_iterref, &mnt_list_mtx_lock, PVFS, "mount_iterdrain", NULL);
986 	}
987 	/* mount iterations drained */
988 	mp->mnt_iterref = -1;
989 	mount_list_unlock();
990 }
991 void
mount_iterreset(mount_t mp)992 mount_iterreset(mount_t mp)
993 {
994 	mount_list_lock();
995 	if (mp->mnt_iterref == -1) {
996 		mp->mnt_iterref = 0;
997 	}
998 	mount_list_unlock();
999 }
1000 
1001 /* always called with  mount lock held */
1002 int
mount_refdrain(mount_t mp)1003 mount_refdrain(mount_t mp)
1004 {
1005 	if (mp->mnt_lflag & MNT_LDRAIN) {
1006 		panic("already in drain");
1007 	}
1008 	mp->mnt_lflag |= MNT_LDRAIN;
1009 
1010 	while (mp->mnt_count) {
1011 		msleep((caddr_t)&mp->mnt_lflag, &mp->mnt_mlock, PVFS, "mount_drain", NULL);
1012 	}
1013 
1014 	if (mp->mnt_vnodelist.tqh_first != NULL) {
1015 		panic("mount_refdrain: dangling vnode");
1016 	}
1017 
1018 	mp->mnt_lflag &= ~MNT_LDRAIN;
1019 
1020 	return 0;
1021 }
1022 
1023 /* Tags the mount point as not supportine extended readdir for NFS exports */
1024 void
mount_set_noreaddirext(mount_t mp)1025 mount_set_noreaddirext(mount_t mp)
1026 {
1027 	mount_lock(mp);
1028 	mp->mnt_kern_flag |= MNTK_DENY_READDIREXT;
1029 	mount_unlock(mp);
1030 }
1031 
1032 /*
1033  * Mark a mount point as busy. Used to synchronize access and to delay
1034  * unmounting.
1035  */
1036 int
vfs_busy(mount_t mp,int flags)1037 vfs_busy(mount_t mp, int flags)
1038 {
1039 restart:
1040 	if (mp->mnt_lflag & MNT_LDEAD) {
1041 		return ENOENT;
1042 	}
1043 
1044 	mount_lock(mp);
1045 
1046 	if (mp->mnt_lflag & MNT_LUNMOUNT) {
1047 		if (flags & LK_NOWAIT || mp->mnt_lflag & MNT_LDEAD) {
1048 			mount_unlock(mp);
1049 			return ENOENT;
1050 		}
1051 
1052 		/*
1053 		 * Since all busy locks are shared except the exclusive
1054 		 * lock granted when unmounting, the only place that a
1055 		 * wakeup needs to be done is at the release of the
1056 		 * exclusive lock at the end of dounmount.
1057 		 */
1058 		mp->mnt_lflag |= MNT_LWAIT;
1059 		msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL);
1060 		return ENOENT;
1061 	}
1062 
1063 	mount_unlock(mp);
1064 
1065 	lck_rw_lock_shared(&mp->mnt_rwlock);
1066 
1067 	/*
1068 	 * Until we are granted the rwlock, it's possible for the mount point to
1069 	 * change state, so re-evaluate before granting the vfs_busy.
1070 	 */
1071 	if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
1072 		lck_rw_done(&mp->mnt_rwlock);
1073 		goto restart;
1074 	}
1075 	return 0;
1076 }
1077 
1078 /*
1079  * Free a busy filesystem.
1080  */
1081 void
vfs_unbusy(mount_t mp)1082 vfs_unbusy(mount_t mp)
1083 {
1084 	lck_rw_done(&mp->mnt_rwlock);
1085 }
1086 
1087 
1088 
1089 static void
vfs_rootmountfailed(mount_t mp)1090 vfs_rootmountfailed(mount_t mp)
1091 {
1092 	mount_list_lock();
1093 	mp->mnt_vtable->vfc_refcount--;
1094 	mount_list_unlock();
1095 
1096 	vfs_unbusy(mp);
1097 
1098 	if (nc_smr_enabled) {
1099 		vfs_smr_synchronize();
1100 	}
1101 
1102 	mount_lock_destroy(mp);
1103 
1104 #if CONFIG_MACF
1105 	mac_mount_label_destroy(mp);
1106 #endif
1107 
1108 	zfree(mount_zone, mp);
1109 }
1110 
1111 /*
1112  * Lookup a filesystem type, and if found allocate and initialize
1113  * a mount structure for it.
1114  *
1115  * Devname is usually updated by mount(8) after booting.
1116  */
1117 static mount_t
vfs_rootmountalloc_internal(struct vfstable * vfsp,const char * devname)1118 vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname)
1119 {
1120 	mount_t mp;
1121 
1122 	mp = zalloc_flags(mount_zone, Z_WAITOK | Z_ZERO);
1123 	/* Initialize the default IO constraints */
1124 	mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
1125 	mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
1126 	mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
1127 	mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
1128 	mp->mnt_devblocksize = DEV_BSIZE;
1129 	mp->mnt_alignmentmask = PAGE_MASK;
1130 	mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
1131 	mp->mnt_ioscale = 1;
1132 	mp->mnt_ioflags = 0;
1133 	mp->mnt_realrootvp = NULLVP;
1134 	mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
1135 	mp->mnt_throttle_mask = LOWPRI_MAX_NUM_DEV - 1;
1136 	mp->mnt_devbsdunit = 0;
1137 
1138 	mount_lock_init(mp);
1139 	(void)vfs_busy(mp, LK_NOWAIT);
1140 
1141 	TAILQ_INIT(&mp->mnt_vnodelist);
1142 	TAILQ_INIT(&mp->mnt_workerqueue);
1143 	TAILQ_INIT(&mp->mnt_newvnodes);
1144 
1145 	mp->mnt_vtable = vfsp;
1146 	mp->mnt_op = vfsp->vfc_vfsops;
1147 	mp->mnt_flag = MNT_RDONLY | MNT_ROOTFS;
1148 	mp->mnt_vnodecovered = NULLVP;
1149 	//mp->mnt_stat.f_type = vfsp->vfc_typenum;
1150 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
1151 
1152 	mount_list_lock();
1153 	vfsp->vfc_refcount++;
1154 	mount_list_unlock();
1155 
1156 	strlcpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
1157 	mp->mnt_vfsstat.f_mntonname[0] = '/';
1158 	/* XXX const poisoning layering violation */
1159 	(void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL);
1160 
1161 #if CONFIG_MACF
1162 	mac_mount_label_init(mp);
1163 	mac_mount_label_associate(vfs_context_kernel(), mp);
1164 #endif
1165 	return mp;
1166 }
1167 
1168 errno_t
vfs_rootmountalloc(const char * fstypename,const char * devname,mount_t * mpp)1169 vfs_rootmountalloc(const char *fstypename, const char *devname, mount_t *mpp)
1170 {
1171 	struct vfstable *vfsp;
1172 
1173 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1174 		if (!strncmp(vfsp->vfc_name, fstypename,
1175 		    sizeof(vfsp->vfc_name))) {
1176 			break;
1177 		}
1178 	}
1179 	if (vfsp == NULL) {
1180 		return ENODEV;
1181 	}
1182 
1183 	*mpp = vfs_rootmountalloc_internal(vfsp, devname);
1184 
1185 	if (*mpp) {
1186 		return 0;
1187 	}
1188 
1189 	return ENOMEM;
1190 }
1191 
1192 #define DBG_MOUNTROOT (FSDBG_CODE(DBG_MOUNT, 0))
1193 
1194 /*
1195  * Find an appropriate filesystem to use for the root. If a filesystem
1196  * has not been preselected, walk through the list of known filesystems
1197  * trying those that have mountroot routines, and try them until one
1198  * works or we have tried them all.
1199  */
1200 extern int (*mountroot)(void);
1201 
1202 int
vfs_mountroot(void)1203 vfs_mountroot(void)
1204 {
1205 #if CONFIG_MACF
1206 	struct vnode *vp;
1207 #endif
1208 	struct vfstable *vfsp;
1209 	vfs_context_t ctx = vfs_context_kernel();
1210 	struct vfs_attr vfsattr;
1211 	int     error;
1212 	mount_t mp;
1213 	vnode_t bdevvp_rootvp;
1214 
1215 	/*
1216 	 * Reset any prior "unmounting everything" state.  This handles the
1217 	 * situation where mount root and then unmountall and re-mountroot
1218 	 * a new image (see bsd/kern/imageboot.c).
1219 	 */
1220 	vfs_unmountall_started = vfs_unmountall_finished = 0;
1221 	OSMemoryBarrier();
1222 
1223 	KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_START);
1224 	if (mountroot != NULL) {
1225 		/*
1226 		 * used for netboot which follows a different set of rules
1227 		 */
1228 		error = (*mountroot)();
1229 
1230 		KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 0);
1231 		return error;
1232 	}
1233 	if ((error = bdevvp(rootdev, &rootvp))) {
1234 		printf("vfs_mountroot: can't setup bdevvp\n");
1235 
1236 		KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 1);
1237 		return error;
1238 	}
1239 	/*
1240 	 * 4951998 - code we call in vfc_mountroot may replace rootvp
1241 	 * so keep a local copy for some house keeping.
1242 	 */
1243 	bdevvp_rootvp = rootvp;
1244 
1245 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1246 		if (vfsp->vfc_mountroot == NULL
1247 		    && !ISSET(vfsp->vfc_vfsflags, VFC_VFSCANMOUNTROOT)) {
1248 			continue;
1249 		}
1250 
1251 		mp = vfs_rootmountalloc_internal(vfsp, "root_device");
1252 		mp->mnt_devvp = rootvp;
1253 
1254 		if (vfsp->vfc_mountroot) {
1255 			error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx);
1256 		} else {
1257 			error = VFS_MOUNT(mp, rootvp, 0, ctx);
1258 		}
1259 
1260 		if (!error) {
1261 			if (bdevvp_rootvp != rootvp) {
1262 				/*
1263 				 * rootvp changed...
1264 				 *   bump the iocount and fix up mnt_devvp for the
1265 				 *   new rootvp (it will already have a usecount taken)...
1266 				 *   drop the iocount and the usecount on the orignal
1267 				 *   since we are no longer going to use it...
1268 				 */
1269 				vnode_getwithref(rootvp);
1270 				mp->mnt_devvp = rootvp;
1271 
1272 				vnode_rele(bdevvp_rootvp);
1273 				vnode_put(bdevvp_rootvp);
1274 			}
1275 			mp->mnt_devvp->v_specflags |= SI_MOUNTEDON;
1276 
1277 			vfs_unbusy(mp);
1278 
1279 			mount_list_add(mp);
1280 
1281 			/*
1282 			 *   cache the IO attributes for the underlying physical media...
1283 			 *   an error return indicates the underlying driver doesn't
1284 			 *   support all the queries necessary... however, reasonable
1285 			 *   defaults will have been set, so no reason to bail or care
1286 			 */
1287 			vfs_init_io_attributes(rootvp, mp);
1288 
1289 			if (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) {
1290 				root_is_CF_drive = TRUE;
1291 			}
1292 
1293 			/*
1294 			 * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
1295 			 */
1296 			if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR) {
1297 				mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1298 			}
1299 			if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSPREFLIGHT) {
1300 				mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
1301 			}
1302 
1303 #if defined(XNU_TARGET_OS_OSX)
1304 			uint32_t speed;
1305 
1306 			if (MNTK_VIRTUALDEV & mp->mnt_kern_flag) {
1307 				speed = 128;
1308 			} else if (disk_conditioner_mount_is_ssd(mp)) {
1309 				speed = 7 * 256;
1310 			} else {
1311 				speed = 256;
1312 			}
1313 			vc_progress_setdiskspeed(speed);
1314 #endif /* XNU_TARGET_OS_OSX */
1315 			/*
1316 			 * Probe root file system for additional features.
1317 			 */
1318 			(void)VFS_START(mp, 0, ctx);
1319 
1320 			VFSATTR_INIT(&vfsattr);
1321 			VFSATTR_WANTED(&vfsattr, f_capabilities);
1322 			if (vfs_getattr(mp, &vfsattr, ctx) == 0 &&
1323 			    VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) {
1324 				if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) &&
1325 				    (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) {
1326 					mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1327 				}
1328 #if NAMEDSTREAMS
1329 				if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS) &&
1330 				    (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS)) {
1331 					mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1332 				}
1333 #endif
1334 				if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID) &&
1335 				    (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID)) {
1336 					mp->mnt_kern_flag |= MNTK_PATH_FROM_ID;
1337 				}
1338 
1339 				if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS) &&
1340 				    (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS)) {
1341 					mp->mnt_kern_flag |= MNTK_DIR_HARDLINKS;
1342 				}
1343 			}
1344 
1345 			/*
1346 			 * get rid of iocount reference returned
1347 			 * by bdevvp (or picked up by us on the substitued
1348 			 * rootvp)... it (or we) will have also taken
1349 			 * a usecount reference which we want to keep
1350 			 */
1351 			vnode_put(rootvp);
1352 
1353 #if CONFIG_MACF
1354 			if ((vfs_flags(mp) & MNT_MULTILABEL) == 0) {
1355 				KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 2);
1356 				return 0;
1357 			}
1358 
1359 			error = VFS_ROOT(mp, &vp, ctx);
1360 			if (error) {
1361 				printf("%s() VFS_ROOT() returned %d\n",
1362 				    __func__, error);
1363 				dounmount(mp, MNT_FORCE, 0, ctx);
1364 				goto fail;
1365 			}
1366 			error = vnode_label(mp, NULL, vp, NULL, 0, ctx);
1367 			/*
1368 			 * get rid of reference provided by VFS_ROOT
1369 			 */
1370 			vnode_put(vp);
1371 
1372 			if (error) {
1373 				printf("%s() vnode_label() returned %d\n",
1374 				    __func__, error);
1375 				dounmount(mp, MNT_FORCE, 0, ctx);
1376 				goto fail;
1377 			}
1378 #endif
1379 			KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 3);
1380 			return 0;
1381 		}
1382 		vfs_rootmountfailed(mp);
1383 #if CONFIG_MACF
1384 fail:
1385 #endif
1386 		if (error != EINVAL) {
1387 			printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
1388 		}
1389 	}
1390 	KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error ? error : ENODEV, 4);
1391 	return ENODEV;
1392 }
1393 
1394 static int
cache_purge_callback(mount_t mp,__unused void * arg)1395 cache_purge_callback(mount_t mp, __unused void * arg)
1396 {
1397 	cache_purgevfs(mp);
1398 	return VFS_RETURNED;
1399 }
1400 
1401 extern lck_rw_t rootvnode_rw_lock;
1402 extern void set_rootvnode(vnode_t);
1403 
1404 
1405 static int
mntonname_fixup_callback(mount_t mp,__unused void * arg)1406 mntonname_fixup_callback(mount_t mp, __unused void *arg)
1407 {
1408 	int error = 0;
1409 
1410 	if ((strncmp(&mp->mnt_vfsstat.f_mntonname[0], "/", sizeof("/")) == 0) ||
1411 	    (strncmp(&mp->mnt_vfsstat.f_mntonname[0], "/dev", sizeof("/dev")) == 0)) {
1412 		return 0;
1413 	}
1414 
1415 	if ((error = vfs_busy(mp, LK_NOWAIT))) {
1416 		printf("vfs_busy failed with %d for %s\n", error, mp->mnt_vfsstat.f_mntonname);
1417 		return -1;
1418 	}
1419 
1420 	size_t pathlen = MAXPATHLEN;
1421 	if ((error = vn_getpath_ext(mp->mnt_vnodecovered, NULL, mp->mnt_vfsstat.f_mntonname, &pathlen, VN_GETPATH_FSENTER))) {
1422 		printf("vn_getpath_ext failed with %d for mnt_vnodecovered of %s\n", error, mp->mnt_vfsstat.f_mntonname);
1423 	}
1424 
1425 	vfs_unbusy(mp);
1426 
1427 	return error;
1428 }
1429 
1430 static int
clear_mntk_backs_root_callback(mount_t mp,__unused void * arg)1431 clear_mntk_backs_root_callback(mount_t mp, __unused void *arg)
1432 {
1433 	lck_rw_lock_exclusive(&mp->mnt_rwlock);
1434 	mp->mnt_kern_flag &= ~MNTK_BACKS_ROOT;
1435 	lck_rw_done(&mp->mnt_rwlock);
1436 	return VFS_RETURNED;
1437 }
1438 
1439 static int
verify_incoming_rootfs(vnode_t * incoming_rootvnodep,vfs_context_t ctx,vfs_switch_root_flags_t flags)1440 verify_incoming_rootfs(vnode_t *incoming_rootvnodep, vfs_context_t ctx,
1441     vfs_switch_root_flags_t flags)
1442 {
1443 	mount_t mp;
1444 	vnode_t tdp;
1445 	vnode_t incoming_rootvnode_with_iocount = *incoming_rootvnodep;
1446 	vnode_t incoming_rootvnode_with_usecount = NULLVP;
1447 	int error = 0;
1448 
1449 	if (vnode_vtype(incoming_rootvnode_with_iocount) != VDIR) {
1450 		printf("Incoming rootfs path not a directory\n");
1451 		error = ENOTDIR;
1452 		goto done;
1453 	}
1454 
1455 	/*
1456 	 * Before we call VFS_ROOT, we have to let go of the iocount already
1457 	 * acquired, but before doing that get a usecount.
1458 	 */
1459 	vnode_ref_ext(incoming_rootvnode_with_iocount, 0, VNODE_REF_FORCE);
1460 	incoming_rootvnode_with_usecount = incoming_rootvnode_with_iocount;
1461 	vnode_lock_spin(incoming_rootvnode_with_usecount);
1462 	if ((mp = incoming_rootvnode_with_usecount->v_mount)) {
1463 		mp->mnt_crossref++;
1464 		vnode_unlock(incoming_rootvnode_with_usecount);
1465 	} else {
1466 		vnode_unlock(incoming_rootvnode_with_usecount);
1467 		printf("Incoming rootfs root vnode does not have associated mount\n");
1468 		error = ENOTDIR;
1469 		goto done;
1470 	}
1471 
1472 	if (vfs_busy(mp, LK_NOWAIT)) {
1473 		printf("Incoming rootfs root vnode mount is busy\n");
1474 		error = ENOENT;
1475 		goto out;
1476 	}
1477 
1478 	vnode_put(incoming_rootvnode_with_iocount);
1479 	incoming_rootvnode_with_iocount = NULLVP;
1480 
1481 	error = VFS_ROOT(mp, &tdp, ctx);
1482 
1483 	if (error) {
1484 		printf("Could not get rootvnode of incoming rootfs\n");
1485 	} else if (tdp != incoming_rootvnode_with_usecount) {
1486 		vnode_put(tdp);
1487 		tdp = NULLVP;
1488 		printf("Incoming rootfs root vnode mount is is not a mountpoint\n");
1489 		error = EINVAL;
1490 		goto out_busy;
1491 	} else {
1492 		incoming_rootvnode_with_iocount = tdp;
1493 		tdp = NULLVP;
1494 	}
1495 
1496 	if ((flags & VFSSR_VIRTUALDEV_PROHIBITED) != 0) {
1497 		if (mp->mnt_kern_flag & MNTK_VIRTUALDEV) {
1498 			error = ENODEV;
1499 		}
1500 		if (error) {
1501 			printf("Incoming rootfs is backed by a virtual device; cannot switch to it");
1502 			goto out_busy;
1503 		}
1504 	}
1505 
1506 out_busy:
1507 	vfs_unbusy(mp);
1508 
1509 out:
1510 	vnode_lock(incoming_rootvnode_with_usecount);
1511 	mp->mnt_crossref--;
1512 	if (mp->mnt_crossref < 0) {
1513 		panic("mount cross refs -ve");
1514 	}
1515 	vnode_unlock(incoming_rootvnode_with_usecount);
1516 
1517 done:
1518 	if (incoming_rootvnode_with_usecount) {
1519 		vnode_rele(incoming_rootvnode_with_usecount);
1520 		incoming_rootvnode_with_usecount = NULLVP;
1521 	}
1522 
1523 	if (error && incoming_rootvnode_with_iocount) {
1524 		vnode_put(incoming_rootvnode_with_iocount);
1525 		incoming_rootvnode_with_iocount = NULLVP;
1526 	}
1527 
1528 	*incoming_rootvnodep = incoming_rootvnode_with_iocount;
1529 	return error;
1530 }
1531 
1532 /*
1533  * vfs_switch_root()
1534  *
1535  * Move the current root volume, and put a different volume at the root.
1536  *
1537  * incoming_vol_old_path: This is the path where the incoming root volume
1538  *	is mounted when this function begins.
1539  * outgoing_vol_new_path: This is the path where the outgoing root volume
1540  *	will be mounted when this function (successfully) ends.
1541  *	Note: Do not use a leading slash.
1542  *
1543  * Volumes mounted at several fixed points (including /dev) will be preserved
1544  * at the same absolute path. That means they will move within the folder
1545  * hierarchy during the pivot operation. For example, /dev before the pivot
1546  * will be at /dev after the pivot.
1547  *
1548  * If any filesystem has MNTK_BACKS_ROOT set, it will be cleared. If the
1549  * incoming root volume is actually a disk image backed by some other
1550  * filesystem, it is the caller's responsibility to re-set MNTK_BACKS_ROOT
1551  * as appropriate.
1552  */
1553 int
vfs_switch_root(const char * incoming_vol_old_path,const char * outgoing_vol_new_path,vfs_switch_root_flags_t flags)1554 vfs_switch_root(const char *incoming_vol_old_path,
1555     const char *outgoing_vol_new_path,
1556     vfs_switch_root_flags_t flags)
1557 {
1558 	// grumble grumble
1559 #define countof(x) (sizeof(x) / sizeof(x[0]))
1560 
1561 	struct preserved_mount {
1562 		vnode_t pm_rootvnode;
1563 		mount_t pm_mount;
1564 		vnode_t pm_new_covered_vp;
1565 		vnode_t pm_old_covered_vp;
1566 		const char *pm_path;
1567 	};
1568 
1569 	vfs_context_t ctx = vfs_context_kernel();
1570 	vnode_t incoming_rootvnode = NULLVP;
1571 	vnode_t outgoing_vol_new_covered_vp = NULLVP;
1572 	vnode_t incoming_vol_old_covered_vp = NULLVP;
1573 	mount_t outgoing = NULL;
1574 	mount_t incoming = NULL;
1575 
1576 	struct preserved_mount devfs = { NULLVP, NULL, NULLVP, NULLVP, "dev" };
1577 	struct preserved_mount preboot = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Preboot" };
1578 	struct preserved_mount recovery = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Recovery" };
1579 	struct preserved_mount vm = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/VM" };
1580 	struct preserved_mount update = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Update" };
1581 	struct preserved_mount iscPreboot = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/iSCPreboot" };
1582 	struct preserved_mount hardware = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Hardware" };
1583 	struct preserved_mount xarts = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/xarts" };
1584 	struct preserved_mount factorylogs = { NULLVP, NULL, NULLVP, NULLVP, "FactoryLogs" };
1585 	struct preserved_mount idiags = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Diags" };
1586 
1587 	struct preserved_mount *preserved[10];
1588 	preserved[0] = &devfs;
1589 	preserved[1] = &preboot;
1590 	preserved[2] = &recovery;
1591 	preserved[3] = &vm;
1592 	preserved[4] = &update;
1593 	preserved[5] = &iscPreboot;
1594 	preserved[6] = &hardware;
1595 	preserved[7] = &xarts;
1596 	preserved[8] = &factorylogs;
1597 	preserved[9] = &idiags;
1598 
1599 	int error;
1600 
1601 	printf("%s : shuffling mount points : %s <-> / <-> %s\n", __FUNCTION__, incoming_vol_old_path, outgoing_vol_new_path);
1602 
1603 	if (outgoing_vol_new_path[0] == '/') {
1604 		// I should have written this to be more helpful and just advance the pointer forward past the slash
1605 		printf("Do not use a leading slash in outgoing_vol_new_path\n");
1606 		return EINVAL;
1607 	}
1608 
1609 	// Set incoming_rootvnode.
1610 	// Find the vnode representing the mountpoint of the new root
1611 	// filesystem. That will be the new root directory.
1612 	error = vnode_lookup(incoming_vol_old_path, 0, &incoming_rootvnode, ctx);
1613 	if (error) {
1614 		printf("Incoming rootfs root vnode not found\n");
1615 		error = ENOENT;
1616 		goto done;
1617 	}
1618 
1619 	/*
1620 	 * This function drops the icoount and sets the vnode to NULL on error.
1621 	 */
1622 	error = verify_incoming_rootfs(&incoming_rootvnode, ctx, flags);
1623 	if (error) {
1624 		goto done;
1625 	}
1626 
1627 	/*
1628 	 * Set outgoing_vol_new_covered_vp.
1629 	 * Find the vnode representing the future mountpoint of the old
1630 	 * root filesystem, inside the directory incoming_rootvnode.
1631 	 * Right now it's at "/incoming_vol_old_path/outgoing_vol_new_path".
1632 	 * soon it will become "/oldrootfs_path_after", which will be covered.
1633 	 */
1634 	error = vnode_lookupat(outgoing_vol_new_path, 0, &outgoing_vol_new_covered_vp, ctx, incoming_rootvnode);
1635 	if (error) {
1636 		printf("Outgoing rootfs path not found, abandoning / switch, error = %d\n", error);
1637 		error = ENOENT;
1638 		goto done;
1639 	}
1640 	if (vnode_vtype(outgoing_vol_new_covered_vp) != VDIR) {
1641 		printf("Outgoing rootfs path is not a directory, abandoning / switch\n");
1642 		error = ENOTDIR;
1643 		goto done;
1644 	}
1645 
1646 	/*
1647 	 * Find the preserved mounts - see if they are mounted. Get their root
1648 	 * vnode if they are. If they aren't, leave rootvnode NULL which will
1649 	 * be the signal to ignore this mount later on.
1650 	 *
1651 	 * Also get preserved mounts' new_covered_vp.
1652 	 * Find the node representing the folder "dev" inside the directory newrootvnode.
1653 	 * Right now it's at "/incoming_vol_old_path/dev".
1654 	 * Soon it will become /dev, which will be covered by the devfs mountpoint.
1655 	 */
1656 	for (size_t i = 0; i < countof(preserved); i++) {
1657 		struct preserved_mount *pmi = preserved[i];
1658 
1659 		error = vnode_lookupat(pmi->pm_path, 0, &pmi->pm_rootvnode, ctx, rootvnode);
1660 		if (error) {
1661 			printf("skipping preserved mountpoint because not found or error: %d: %s\n", error, pmi->pm_path);
1662 			// not fatal. try the next one in the list.
1663 			continue;
1664 		}
1665 		bool is_mountpoint = false;
1666 		vnode_lock_spin(pmi->pm_rootvnode);
1667 		if ((pmi->pm_rootvnode->v_flag & VROOT) != 0) {
1668 			is_mountpoint = true;
1669 		}
1670 		vnode_unlock(pmi->pm_rootvnode);
1671 		if (!is_mountpoint) {
1672 			printf("skipping preserved mountpoint because not a mountpoint: %s\n", pmi->pm_path);
1673 			vnode_put(pmi->pm_rootvnode);
1674 			pmi->pm_rootvnode = NULLVP;
1675 			// not fatal. try the next one in the list.
1676 			continue;
1677 		}
1678 
1679 		error = vnode_lookupat(pmi->pm_path, 0, &pmi->pm_new_covered_vp, ctx, incoming_rootvnode);
1680 		if (error) {
1681 			printf("preserved new mount directory not found or error: %d: %s\n", error, pmi->pm_path);
1682 			error = ENOENT;
1683 			goto done;
1684 		}
1685 		if (vnode_vtype(pmi->pm_new_covered_vp) != VDIR) {
1686 			printf("preserved new mount directory not directory: %s\n", pmi->pm_path);
1687 			error = ENOTDIR;
1688 			goto done;
1689 		}
1690 
1691 		printf("will preserve mountpoint across pivot: /%s\n", pmi->pm_path);
1692 	}
1693 
1694 	/*
1695 	 * --
1696 	 * At this point, everything has been prepared and all error conditions
1697 	 * have been checked. We check everything we can before this point;
1698 	 * from now on we start making destructive changes, and we can't stop
1699 	 * until we reach the end.
1700 	 * ----
1701 	 */
1702 
1703 	/* this usecount is transferred to the mnt_vnodecovered */
1704 	vnode_ref_ext(outgoing_vol_new_covered_vp, 0, VNODE_REF_FORCE);
1705 	/* this usecount is transferred to set_rootvnode */
1706 	vnode_ref_ext(incoming_rootvnode, 0, VNODE_REF_FORCE);
1707 
1708 
1709 	for (size_t i = 0; i < countof(preserved); i++) {
1710 		struct preserved_mount *pmi = preserved[i];
1711 		if (pmi->pm_rootvnode == NULLVP) {
1712 			continue;
1713 		}
1714 
1715 		/* this usecount is transferred to the mnt_vnodecovered */
1716 		vnode_ref_ext(pmi->pm_new_covered_vp, 0, VNODE_REF_FORCE);
1717 
1718 		/* The new_covered_vp is a mountpoint from now on. */
1719 		vnode_lock_spin(pmi->pm_new_covered_vp);
1720 		pmi->pm_new_covered_vp->v_flag |= VMOUNTEDHERE;
1721 		vnode_unlock(pmi->pm_new_covered_vp);
1722 	}
1723 
1724 	/* The outgoing_vol_new_covered_vp is a mountpoint from now on. */
1725 	vnode_lock_spin(outgoing_vol_new_covered_vp);
1726 	outgoing_vol_new_covered_vp->v_flag |= VMOUNTEDHERE;
1727 	vnode_unlock(outgoing_vol_new_covered_vp);
1728 
1729 
1730 	/*
1731 	 * Identify the mount_ts of the mounted filesystems that are being
1732 	 * manipulated: outgoing rootfs, incoming rootfs, and the preserved
1733 	 * mounts.
1734 	 */
1735 	outgoing = rootvnode->v_mount;
1736 	incoming = incoming_rootvnode->v_mount;
1737 	for (size_t i = 0; i < countof(preserved); i++) {
1738 		struct preserved_mount *pmi = preserved[i];
1739 		if (pmi->pm_rootvnode == NULLVP) {
1740 			continue;
1741 		}
1742 
1743 		pmi->pm_mount = pmi->pm_rootvnode->v_mount;
1744 	}
1745 
1746 	lck_rw_lock_exclusive(&rootvnode_rw_lock);
1747 
1748 	/* Setup incoming as the new rootfs */
1749 	lck_rw_lock_exclusive(&incoming->mnt_rwlock);
1750 	incoming_vol_old_covered_vp = incoming->mnt_vnodecovered;
1751 	incoming->mnt_vnodecovered = NULLVP;
1752 	strlcpy(incoming->mnt_vfsstat.f_mntonname, "/", MAXPATHLEN);
1753 	incoming->mnt_flag |= MNT_ROOTFS;
1754 	lck_rw_done(&incoming->mnt_rwlock);
1755 
1756 	/*
1757 	 * The preserved mountpoints will now be moved to
1758 	 * incoming_rootnode/pm_path, and then by the end of the function,
1759 	 * since incoming_rootnode is going to /, the preserved mounts
1760 	 * will be end up back at /pm_path
1761 	 */
1762 	for (size_t i = 0; i < countof(preserved); i++) {
1763 		struct preserved_mount *pmi = preserved[i];
1764 		if (pmi->pm_rootvnode == NULLVP) {
1765 			continue;
1766 		}
1767 
1768 		lck_rw_lock_exclusive(&pmi->pm_mount->mnt_rwlock);
1769 		pmi->pm_old_covered_vp = pmi->pm_mount->mnt_vnodecovered;
1770 		pmi->pm_mount->mnt_vnodecovered = pmi->pm_new_covered_vp;
1771 		vnode_lock_spin(pmi->pm_new_covered_vp);
1772 		pmi->pm_new_covered_vp->v_mountedhere = pmi->pm_mount;
1773 		SET(pmi->pm_new_covered_vp->v_flag, VMOUNTEDHERE);
1774 		vnode_unlock(pmi->pm_new_covered_vp);
1775 		lck_rw_done(&pmi->pm_mount->mnt_rwlock);
1776 	}
1777 
1778 	/*
1779 	 * The old root volume now covers outgoing_vol_new_covered_vp
1780 	 * on the new root volume. Remove the ROOTFS marker.
1781 	 * Now it is to be found at outgoing_vol_new_path
1782 	 */
1783 	lck_rw_lock_exclusive(&outgoing->mnt_rwlock);
1784 	outgoing->mnt_vnodecovered = outgoing_vol_new_covered_vp;
1785 	strlcpy(outgoing->mnt_vfsstat.f_mntonname, "/", MAXPATHLEN);
1786 	strlcat(outgoing->mnt_vfsstat.f_mntonname, outgoing_vol_new_path, MAXPATHLEN);
1787 	outgoing->mnt_flag &= ~MNT_ROOTFS;
1788 	vnode_lock_spin(outgoing_vol_new_covered_vp);
1789 	outgoing_vol_new_covered_vp->v_mountedhere = outgoing;
1790 	vnode_unlock(outgoing_vol_new_covered_vp);
1791 	lck_rw_done(&outgoing->mnt_rwlock);
1792 
1793 	if (!(outgoing->mnt_kern_flag & MNTK_VIRTUALDEV) &&
1794 	    (TAILQ_FIRST(&mountlist) == outgoing)) {
1795 		vfs_setmntsystem(outgoing);
1796 	}
1797 
1798 	/*
1799 	 * Finally, remove the mount_t linkage from the previously covered
1800 	 * vnodes on the old root volume. These were incoming_vol_old_path,
1801 	 * and each preserved mounts's "/pm_path". The filesystems previously
1802 	 * mounted there have already been moved away.
1803 	 */
1804 	vnode_lock_spin(incoming_vol_old_covered_vp);
1805 	incoming_vol_old_covered_vp->v_flag &= ~VMOUNT;
1806 	incoming_vol_old_covered_vp->v_mountedhere = NULL;
1807 	vnode_unlock(incoming_vol_old_covered_vp);
1808 
1809 	for (size_t i = 0; i < countof(preserved); i++) {
1810 		struct preserved_mount *pmi = preserved[i];
1811 		if (pmi->pm_rootvnode == NULLVP) {
1812 			continue;
1813 		}
1814 
1815 		vnode_lock_spin(pmi->pm_old_covered_vp);
1816 		CLR(pmi->pm_old_covered_vp->v_flag, VMOUNTEDHERE);
1817 		pmi->pm_old_covered_vp->v_mountedhere = NULL;
1818 		vnode_unlock(pmi->pm_old_covered_vp);
1819 	}
1820 
1821 	/*
1822 	 * Clear the name cache since many cached names are now invalid.
1823 	 */
1824 	vfs_iterate(0 /* flags */, cache_purge_callback, NULL);
1825 
1826 	/*
1827 	 * Actually change the rootvnode! And finally drop the lock that
1828 	 * prevents concurrent vnode_lookups.
1829 	 */
1830 	set_rootvnode(incoming_rootvnode);
1831 	lck_rw_unlock_exclusive(&rootvnode_rw_lock);
1832 
1833 	if (!(incoming->mnt_kern_flag & MNTK_VIRTUALDEV) &&
1834 	    !(outgoing->mnt_kern_flag & MNTK_VIRTUALDEV)) {
1835 		/*
1836 		 * Switch the order of mount structures in the mountlist, new root
1837 		 * mount moves to the head of the list followed by /dev and the other
1838 		 * preserved mounts then all the preexisting mounts (old rootfs + any
1839 		 * others)
1840 		 */
1841 		mount_list_lock();
1842 		for (size_t i = 0; i < countof(preserved); i++) {
1843 			struct preserved_mount *pmi = preserved[i];
1844 			if (pmi->pm_rootvnode == NULLVP) {
1845 				continue;
1846 			}
1847 
1848 			TAILQ_REMOVE(&mountlist, pmi->pm_mount, mnt_list);
1849 			TAILQ_INSERT_HEAD(&mountlist, pmi->pm_mount, mnt_list);
1850 		}
1851 		TAILQ_REMOVE(&mountlist, incoming, mnt_list);
1852 		TAILQ_INSERT_HEAD(&mountlist, incoming, mnt_list);
1853 		mount_list_unlock();
1854 	}
1855 
1856 	/*
1857 	 * Fixups across all volumes
1858 	 */
1859 	vfs_iterate(0 /* flags */, mntonname_fixup_callback, NULL);
1860 	vfs_iterate(0 /* flags */, clear_mntk_backs_root_callback, NULL);
1861 
1862 	error = 0;
1863 
1864 done:
1865 	for (size_t i = 0; i < countof(preserved); i++) {
1866 		struct preserved_mount *pmi = preserved[i];
1867 
1868 		if (pmi->pm_rootvnode) {
1869 			vnode_put(pmi->pm_rootvnode);
1870 		}
1871 		if (pmi->pm_new_covered_vp) {
1872 			vnode_put(pmi->pm_new_covered_vp);
1873 		}
1874 		if (pmi->pm_old_covered_vp) {
1875 			vnode_rele(pmi->pm_old_covered_vp);
1876 		}
1877 	}
1878 
1879 	if (outgoing_vol_new_covered_vp) {
1880 		vnode_put(outgoing_vol_new_covered_vp);
1881 	}
1882 
1883 	if (incoming_vol_old_covered_vp) {
1884 		vnode_rele(incoming_vol_old_covered_vp);
1885 	}
1886 
1887 	if (incoming_rootvnode) {
1888 		vnode_put(incoming_rootvnode);
1889 	}
1890 
1891 	printf("%s : done shuffling mount points with error: %d\n", __FUNCTION__, error);
1892 	return error;
1893 }
1894 
1895 /*
1896  * Mount the Recovery volume of a container
1897  */
1898 int
vfs_mount_recovery(void)1899 vfs_mount_recovery(void)
1900 {
1901 #if CONFIG_MOUNT_PREBOOTRECOVERY
1902 	int error = 0;
1903 
1904 	error = vnode_get(rootvnode);
1905 	if (error) {
1906 		/* root must be mounted first */
1907 		printf("vnode_get(rootvnode) failed with error %d\n", error);
1908 		return error;
1909 	}
1910 
1911 	char recoverypath[] = PLATFORM_RECOVERY_VOLUME_MOUNT_POINT; /* !const because of internal casting */
1912 
1913 	/* Mount the recovery volume */
1914 	printf("attempting kernel mount for recovery volume... \n");
1915 	error = kernel_mount(rootvnode->v_mount->mnt_vfsstat.f_fstypename, NULLVP, NULLVP,
1916 	    recoverypath, (rootvnode->v_mount), 0, 0, (KERNEL_MOUNT_RECOVERYVOL), vfs_context_kernel());
1917 
1918 	if (error) {
1919 		printf("Failed to mount recovery volume (%d)\n", error);
1920 	} else {
1921 		printf("mounted recovery volume\n");
1922 	}
1923 
1924 	vnode_put(rootvnode);
1925 	return error;
1926 #else
1927 	return 0;
1928 #endif
1929 }
1930 
1931 /*
1932  * Lookup a mount point by filesystem identifier.
1933  */
1934 
1935 struct mount *
vfs_getvfs(fsid_t * fsid)1936 vfs_getvfs(fsid_t *fsid)
1937 {
1938 	return mount_list_lookupby_fsid(fsid, 0, 0);
1939 }
1940 
1941 static struct mount *
vfs_getvfs_locked(fsid_t * fsid)1942 vfs_getvfs_locked(fsid_t *fsid)
1943 {
1944 	return mount_list_lookupby_fsid(fsid, 1, 0);
1945 }
1946 
1947 struct mount *
vfs_getvfs_with_vfsops(fsid_t * fsid,const struct vfsops * const ops)1948 vfs_getvfs_with_vfsops(fsid_t *fsid, const struct vfsops * const ops)
1949 {
1950 	mount_t mp = mount_list_lookupby_fsid(fsid, 0, 0);
1951 
1952 	if (mp != NULL && mp->mnt_op != ops) {
1953 		mp = NULL;
1954 	}
1955 	return mp;
1956 }
1957 
1958 struct mount *
vfs_getvfs_by_mntonname(char * path)1959 vfs_getvfs_by_mntonname(char *path)
1960 {
1961 	mount_t retmp = (mount_t)0;
1962 	mount_t mp;
1963 
1964 	mount_list_lock();
1965 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1966 		if (!strncmp(mp->mnt_vfsstat.f_mntonname, path,
1967 		    sizeof(mp->mnt_vfsstat.f_mntonname))) {
1968 			retmp = mp;
1969 			if (mount_iterref(retmp, 1)) {
1970 				retmp = NULL;
1971 			}
1972 			goto out;
1973 		}
1974 	}
1975 out:
1976 	mount_list_unlock();
1977 	return retmp;
1978 }
1979 
1980 /* generation number for creation of new fsids */
1981 u_short mntid_gen = 0;
1982 /*
1983  * Get a new unique fsid
1984  */
1985 void
vfs_getnewfsid(struct mount * mp)1986 vfs_getnewfsid(struct mount *mp)
1987 {
1988 	fsid_t tfsid;
1989 	int mtype;
1990 
1991 	mount_list_lock();
1992 
1993 	/* generate a new fsid */
1994 	mtype = mp->mnt_vtable->vfc_typenum;
1995 	if (++mntid_gen == 0) {
1996 		mntid_gen++;
1997 	}
1998 	tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1999 	tfsid.val[1] = mtype;
2000 
2001 	while (vfs_getvfs_locked(&tfsid)) {
2002 		if (++mntid_gen == 0) {
2003 			mntid_gen++;
2004 		}
2005 		tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
2006 	}
2007 
2008 	mp->mnt_vfsstat.f_fsid.val[0] = tfsid.val[0];
2009 	mp->mnt_vfsstat.f_fsid.val[1] = tfsid.val[1];
2010 	mount_list_unlock();
2011 }
2012 
2013 /*
2014  * Routines having to do with the management of the vnode table.
2015  */
2016 extern int(**dead_vnodeop_p)(void *);
2017 long numvnodes, freevnodes, deadvnodes, async_work_vnodes;
2018 long busyvnodes = 0;
2019 long deadvnodes_noreuse = 0;
2020 int32_t freeablevnodes = 0;
2021 uint64_t allocedvnodes = 0;
2022 uint64_t deallocedvnodes = 0;
2023 
2024 
2025 int async_work_timed_out = 0;
2026 int async_work_handled = 0;
2027 int dead_vnode_wanted = 0;
2028 int dead_vnode_waited = 0;
2029 
2030 /*
2031  * Move a vnode from one mount queue to another.
2032  */
2033 static void
insmntque(vnode_t vp,mount_t mp)2034 insmntque(vnode_t vp, mount_t mp)
2035 {
2036 	mount_t lmp;
2037 	/*
2038 	 * Delete from old mount point vnode list, if on one.
2039 	 */
2040 	if ((lmp = vp->v_mount) != NULL && lmp != dead_mountp) {
2041 		if ((vp->v_lflag & VNAMED_MOUNT) == 0) {
2042 			panic("insmntque: vp not in mount vnode list");
2043 		}
2044 		vp->v_lflag &= ~VNAMED_MOUNT;
2045 
2046 		mount_lock_spin(lmp);
2047 
2048 		mount_drop(lmp, 1);
2049 
2050 		if (vp->v_mntvnodes.tqe_next == NULL) {
2051 			if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp) {
2052 				TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes);
2053 			} else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp) {
2054 				TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes);
2055 			} else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp) {
2056 				TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes);
2057 			}
2058 		} else {
2059 			vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev;
2060 			*vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next;
2061 		}
2062 		vp->v_mntvnodes.tqe_next = NULL;
2063 		vp->v_mntvnodes.tqe_prev = NULL;
2064 		mount_unlock(lmp);
2065 		vnode_drop(vp);
2066 		return;
2067 	}
2068 
2069 	/*
2070 	 * Insert into list of vnodes for the new mount point, if available.
2071 	 */
2072 	if ((vp->v_mount = mp) != NULL) {
2073 		mount_lock_spin(mp);
2074 		if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0)) {
2075 			panic("vp already in mount list");
2076 		}
2077 		if (mp->mnt_lflag & MNT_LITER) {
2078 			TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes);
2079 		} else {
2080 			TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
2081 		}
2082 		if (vp->v_lflag & VNAMED_MOUNT) {
2083 			panic("insmntque: vp already in mount vnode list");
2084 		}
2085 		vnode_hold(vp);
2086 		vp->v_lflag |= VNAMED_MOUNT;
2087 		mount_ref(mp, 1);
2088 		mount_unlock(mp);
2089 	}
2090 }
2091 
2092 
2093 /*
2094  * Create a vnode for a block device.
2095  * Used for root filesystem, argdev, and swap areas.
2096  * Also used for memory file system special devices.
2097  */
2098 int
bdevvp(dev_t dev,vnode_t * vpp)2099 bdevvp(dev_t dev, vnode_t *vpp)
2100 {
2101 	vnode_t nvp;
2102 	int     error;
2103 	struct vnode_fsparam vfsp;
2104 	struct vfs_context context;
2105 
2106 	if (dev == NODEV) {
2107 		*vpp = NULLVP;
2108 		return ENODEV;
2109 	}
2110 
2111 	context.vc_thread = current_thread();
2112 	context.vc_ucred = FSCRED;
2113 
2114 	vfsp.vnfs_mp = (struct mount *)0;
2115 	vfsp.vnfs_vtype = VBLK;
2116 	vfsp.vnfs_str = "bdevvp";
2117 	vfsp.vnfs_dvp = NULL;
2118 	vfsp.vnfs_fsnode = NULL;
2119 	vfsp.vnfs_cnp = NULL;
2120 	vfsp.vnfs_vops = spec_vnodeop_p;
2121 	vfsp.vnfs_rdev = dev;
2122 	vfsp.vnfs_filesize = 0;
2123 
2124 	vfsp.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE;
2125 
2126 	vfsp.vnfs_marksystem = 0;
2127 	vfsp.vnfs_markroot = 0;
2128 
2129 	if ((error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp))) {
2130 		*vpp = NULLVP;
2131 		return error;
2132 	}
2133 	vnode_lock_spin(nvp);
2134 	nvp->v_flag |= VBDEVVP;
2135 	nvp->v_tag = VT_NON;    /* set this to VT_NON so during aliasing it can be replaced */
2136 	vnode_unlock(nvp);
2137 	if ((error = vnode_ref(nvp))) {
2138 		panic("bdevvp failed: vnode_ref");
2139 		return error;
2140 	}
2141 	if ((error = VNOP_FSYNC(nvp, MNT_WAIT, &context))) {
2142 		panic("bdevvp failed: fsync");
2143 		return error;
2144 	}
2145 	if ((error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0))) {
2146 		panic("bdevvp failed: invalidateblks");
2147 		return error;
2148 	}
2149 
2150 #if CONFIG_MACF
2151 	/*
2152 	 * XXXMAC: We can't put a MAC check here, the system will
2153 	 * panic without this vnode.
2154 	 */
2155 #endif /* MAC */
2156 
2157 	if ((error = VNOP_OPEN(nvp, FREAD, &context))) {
2158 		panic("bdevvp failed: open");
2159 		return error;
2160 	}
2161 	*vpp = nvp;
2162 
2163 	return 0;
2164 }
2165 
2166 /*
2167  * Check to see if the new vnode represents a special device
2168  * for which we already have a vnode (either because of
2169  * bdevvp() or because of a different vnode representing
2170  * the same block device). If such an alias exists, deallocate
2171  * the existing contents and return the aliased vnode. The
2172  * caller is responsible for filling it with its new contents.
2173  */
2174 static vnode_t
checkalias(struct vnode * nvp,dev_t nvp_rdev)2175 checkalias(struct vnode *nvp, dev_t nvp_rdev)
2176 {
2177 	struct vnode *vp;
2178 	struct vnode **vpp;
2179 	struct specinfo *sin = NULL;
2180 	int vid = 0;
2181 
2182 	vpp = &speclisth[SPECHASH(nvp_rdev)];
2183 loop:
2184 	SPECHASH_LOCK();
2185 
2186 	for (vp = *vpp; vp; vp = vp->v_specnext) {
2187 		if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
2188 			vid = vp->v_id;
2189 			vnode_hold(vp);
2190 			break;
2191 		}
2192 	}
2193 	SPECHASH_UNLOCK();
2194 
2195 	if (vp) {
2196 found_alias:
2197 		if (vnode_getwithvid(vp, vid)) {
2198 			vnode_drop(vp);
2199 			goto loop;
2200 		}
2201 		vnode_drop(vp);
2202 		/*
2203 		 * Termination state is checked in vnode_getwithvid
2204 		 */
2205 		vnode_lock(vp);
2206 
2207 		/*
2208 		 * Alias, but not in use, so flush it out.
2209 		 */
2210 		if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
2211 			vnode_hold(vp);
2212 			vnode_reclaim_internal(vp, 1, 1, 0);
2213 			vnode_put_locked(vp);
2214 			vnode_drop_and_unlock(vp);
2215 			goto loop;
2216 		}
2217 	}
2218 	if (vp == NULL || vp->v_tag != VT_NON) {
2219 		if (sin == NULL) {
2220 			sin = zalloc_flags(specinfo_zone, Z_WAITOK | Z_ZERO);
2221 		} else {
2222 			bzero(sin, sizeof(struct specinfo));
2223 		}
2224 
2225 		nvp->v_specinfo = sin;
2226 		nvp->v_rdev = nvp_rdev;
2227 		nvp->v_specflags = 0;
2228 		nvp->v_speclastr = -1;
2229 		nvp->v_specinfo->si_opencount = 0;
2230 		nvp->v_specinfo->si_initted = 0;
2231 		nvp->v_specinfo->si_throttleable = 0;
2232 		nvp->v_specinfo->si_devbsdunit = LOWPRI_MAX_NUM_DEV - 1;
2233 
2234 		SPECHASH_LOCK();
2235 
2236 		/* We dropped the lock, someone could have added */
2237 		if (vp == NULLVP) {
2238 			for (vp = *vpp; vp; vp = vp->v_specnext) {
2239 				if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
2240 					vid = vp->v_id;
2241 					vnode_hold(vp);
2242 					SPECHASH_UNLOCK();
2243 					goto found_alias;
2244 				}
2245 			}
2246 		}
2247 
2248 		nvp->v_hashchain = vpp;
2249 		nvp->v_specnext = *vpp;
2250 		*vpp = nvp;
2251 
2252 		if (vp != NULLVP) {
2253 			nvp->v_specflags |= SI_ALIASED;
2254 			vp->v_specflags |= SI_ALIASED;
2255 			SPECHASH_UNLOCK();
2256 			vnode_put_locked(vp);
2257 			vnode_unlock(vp);
2258 		} else {
2259 			SPECHASH_UNLOCK();
2260 		}
2261 
2262 		return NULLVP;
2263 	}
2264 
2265 	if (sin) {
2266 		zfree(specinfo_zone, sin);
2267 	}
2268 
2269 	if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0) {
2270 		return vp;
2271 	}
2272 
2273 	panic("checkalias with VT_NON vp that shouldn't: %p", vp);
2274 
2275 	return vp;
2276 }
2277 
2278 
2279 /*
2280  * Get a reference on a particular vnode and lock it if requested.
2281  * If the vnode was on the inactive list, remove it from the list.
2282  * If the vnode was on the free list, remove it from the list and
2283  * move it to inactive list as needed.
2284  * The vnode lock bit is set if the vnode is being eliminated in
2285  * vgone. The process is awakened when the transition is completed,
2286  * and an error returned to indicate that the vnode is no longer
2287  * usable (possibly having been changed to a new file system type).
2288  */
2289 int
vget_internal(vnode_t vp,int vid,int vflags)2290 vget_internal(vnode_t vp, int vid, int vflags)
2291 {
2292 	int error = 0;
2293 
2294 	vnode_lock_spin(vp);
2295 
2296 	if ((vflags & VNODE_WITHREF) && (vp->v_usecount == 0) && (vp->v_iocount == 0)) {
2297 		panic("Expected to have usecount or iocount on vnode");
2298 	}
2299 
2300 	if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0)) {
2301 		/*
2302 		 * vnode to be returned only if it has writers opened
2303 		 */
2304 		error = EINVAL;
2305 	} else {
2306 		error = vnode_getiocount(vp, vid, vflags);
2307 	}
2308 
2309 	vnode_unlock(vp);
2310 
2311 	return error;
2312 }
2313 
2314 /*
2315  * Returns:	0			Success
2316  *		ENOENT			No such file or directory [terminating]
2317  */
2318 int
vnode_ref(vnode_t vp)2319 vnode_ref(vnode_t vp)
2320 {
2321 	return vnode_ref_ext(vp, 0, 0);
2322 }
2323 
2324 /*
2325  * Returns:	0			Success
2326  *		ENOENT			No such file or directory [terminating]
2327  */
2328 int
vnode_ref_ext(vnode_t vp,int fmode,int flags)2329 vnode_ref_ext(vnode_t vp, int fmode, int flags)
2330 {
2331 	int     error = 0;
2332 
2333 	vnode_lock_spin(vp);
2334 
2335 	/*
2336 	 * once all the current call sites have been fixed to insure they have
2337 	 * taken an iocount, we can toughen this assert up and insist that the
2338 	 * iocount is non-zero... a non-zero usecount doesn't insure correctness
2339 	 */
2340 	if (vp->v_iocount <= 0 && vp->v_usecount <= 0) {
2341 		panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount);
2342 	}
2343 
2344 	/*
2345 	 * if you are the owner of drain/termination, can acquire usecount
2346 	 */
2347 	if (((flags & VNODE_REF_FORCE) == 0) &&
2348 	    ((vp->v_lflag & (VL_DRAIN | VL_TERMINATE | VL_DEAD))) &&
2349 	    !(vp->v_lflag & VL_OPSCHANGE) &&
2350 	    (vp->v_owner != current_thread())) {
2351 		error = ENOENT;
2352 		goto out;
2353 	}
2354 
2355 	/* Enable atomic ops on v_usecount without the vnode lock */
2356 	os_atomic_inc(&vp->v_usecount, relaxed);
2357 
2358 	if (fmode & FWRITE) {
2359 		if (++vp->v_writecount <= 0) {
2360 			panic("vnode_ref_ext: v_writecount");
2361 		}
2362 	}
2363 	if (fmode & O_EVTONLY) {
2364 		if (++vp->v_kusecount <= 0) {
2365 			panic("vnode_ref_ext: v_kusecount");
2366 		}
2367 	}
2368 	if (vp->v_flag & VRAGE) {
2369 		struct  uthread *ut;
2370 
2371 		ut = current_uthread();
2372 
2373 		if (!(current_proc()->p_lflag & P_LRAGE_VNODES) &&
2374 		    !(ut->uu_flag & UT_RAGE_VNODES)) {
2375 			/*
2376 			 * a 'normal' process accessed this vnode
2377 			 * so make sure its no longer marked
2378 			 * for rapid aging...  also, make sure
2379 			 * it gets removed from the rage list...
2380 			 * when v_usecount drops back to 0, it
2381 			 * will be put back on the real free list
2382 			 */
2383 			vp->v_flag &= ~VRAGE;
2384 			vp->v_references = 0;
2385 			vnode_list_remove(vp);
2386 		}
2387 	}
2388 	if (vp->v_usecount == 1 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
2389 		if (vp->v_ubcinfo) {
2390 			vnode_lock_convert(vp);
2391 			memory_object_mark_used(vp->v_ubcinfo->ui_control);
2392 		}
2393 	}
2394 out:
2395 	vnode_unlock(vp);
2396 
2397 	return error;
2398 }
2399 
2400 
2401 boolean_t
vnode_on_reliable_media(vnode_t vp)2402 vnode_on_reliable_media(vnode_t vp)
2403 {
2404 	mount_t mp = vp->v_mount;
2405 
2406 	/*
2407 	 * A NULL mountpoint would imply it's not attached to a any filesystem.
2408 	 * This can only happen with a vnode created by bdevvp(). We'll consider
2409 	 * those as not unreliable as the primary use of this function is determine
2410 	 * which vnodes are to be handed off to the async cleaner thread for
2411 	 * reclaim.
2412 	 */
2413 	if (!mp || (!(mp->mnt_kern_flag & MNTK_VIRTUALDEV) && (mp->mnt_flag & MNT_LOCAL))) {
2414 		return TRUE;
2415 	}
2416 
2417 	return FALSE;
2418 }
2419 
2420 static void
vnode_async_list_add_locked(vnode_t vp)2421 vnode_async_list_add_locked(vnode_t vp)
2422 {
2423 	if (VONLIST(vp) || (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
2424 		panic("vnode_async_list_add: %p is in wrong state", vp);
2425 	}
2426 
2427 	TAILQ_INSERT_HEAD(&vnode_async_work_list, vp, v_freelist);
2428 	vp->v_listflag |= VLIST_ASYNC_WORK;
2429 
2430 	async_work_vnodes++;
2431 	if (!(vp->v_listflag & VLIST_NO_REUSE)) {
2432 		reusablevnodes++;
2433 	}
2434 	if (vp->v_flag & VCANDEALLOC) {
2435 		os_atomic_dec(&busyvnodes, relaxed);
2436 	}
2437 }
2438 
2439 static void
vnode_async_list_add(vnode_t vp)2440 vnode_async_list_add(vnode_t vp)
2441 {
2442 	vnode_list_lock();
2443 
2444 	if (VONLIST(vp)) {
2445 		if (!(vp->v_listflag & VLIST_ASYNC_WORK)) {
2446 			vnode_list_remove_locked(vp);
2447 			vnode_async_list_add_locked(vp);
2448 		}
2449 	} else {
2450 		vnode_async_list_add_locked(vp);
2451 	}
2452 
2453 	vnode_list_unlock();
2454 
2455 	wakeup(&vnode_async_work_list);
2456 }
2457 
2458 
2459 /*
2460  * put the vnode on appropriate free list.
2461  * called with vnode LOCKED
2462  */
2463 static void
vnode_list_add(vnode_t vp)2464 vnode_list_add(vnode_t vp)
2465 {
2466 	boolean_t need_dead_wakeup = FALSE;
2467 	bool no_busy_decrement = false;
2468 
2469 #if DIAGNOSTIC
2470 	lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2471 #endif
2472 
2473 again:
2474 
2475 	/*
2476 	 * if it is already on a list or non zero references return
2477 	 */
2478 	if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE)) {
2479 		return;
2480 	}
2481 
2482 	/*
2483 	 * In vclean, we might have deferred ditching locked buffers
2484 	 * because something was still referencing them (indicated by
2485 	 * usecount).  We can ditch them now.
2486 	 */
2487 	if (ISSET(vp->v_lflag, VL_DEAD)
2488 	    && (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))) {
2489 		++vp->v_iocount;        // Probably not necessary, but harmless
2490 #ifdef CONFIG_IOCOUNT_TRACE
2491 		record_vp(vp, 1);
2492 #endif
2493 		vnode_unlock(vp);
2494 		buf_invalidateblks(vp, BUF_INVALIDATE_LOCKED, 0, 0);
2495 		vnode_lock(vp);
2496 		vnode_dropiocount(vp);
2497 		goto again;
2498 	}
2499 
2500 	vnode_list_lock();
2501 
2502 	if (!(vp->v_lflag & VL_DEAD) && (vp->v_listflag & VLIST_NO_REUSE)) {
2503 		if (!(vp->v_listflag & VLIST_ASYNC_WORK)) {
2504 			vnode_async_list_add_locked(vp);
2505 		}
2506 		no_busy_decrement = true;
2507 	} else if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
2508 		/*
2509 		 * add the new guy to the appropriate end of the RAGE list
2510 		 */
2511 		if ((vp->v_flag & VAGE)) {
2512 			TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist);
2513 		} else {
2514 			TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist);
2515 		}
2516 
2517 		vp->v_listflag |= VLIST_RAGE;
2518 		ragevnodes++;
2519 		reusablevnodes++;
2520 		wakeup_laundry_thread();
2521 
2522 		/*
2523 		 * reset the timestamp for the last inserted vp on the RAGE
2524 		 * queue to let new_vnode know that its not ok to start stealing
2525 		 * from this list... as long as we're actively adding to this list
2526 		 * we'll push out the vnodes we want to donate to the real free list
2527 		 * once we stop pushing, we'll let some time elapse before we start
2528 		 * stealing them in the new_vnode routine
2529 		 */
2530 		microuptime(&rage_tv);
2531 	} else {
2532 		/*
2533 		 * if VL_DEAD, insert it at head of the dead list
2534 		 * else insert at tail of LRU list or at head if VAGE is set
2535 		 */
2536 		if ((vp->v_lflag & VL_DEAD)) {
2537 			if (vp->v_flag & VCANDEALLOC) {
2538 				TAILQ_INSERT_TAIL(&vnode_dead_list, vp, v_freelist);
2539 				if (vp->v_listflag & VLIST_NO_REUSE) {
2540 					deadvnodes_noreuse++;
2541 				}
2542 			} else {
2543 				TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist);
2544 			}
2545 			vp->v_listflag |= VLIST_DEAD;
2546 			deadvnodes++;
2547 
2548 			if (dead_vnode_wanted) {
2549 				dead_vnode_wanted--;
2550 				need_dead_wakeup = TRUE;
2551 			}
2552 		} else if ((vp->v_flag & VAGE)) {
2553 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2554 			vp->v_flag &= ~VAGE;
2555 			freevnodes++;
2556 			reusablevnodes++;
2557 			wakeup_laundry_thread();
2558 		} else {
2559 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2560 			freevnodes++;
2561 			reusablevnodes++;
2562 			wakeup_laundry_thread();
2563 		}
2564 	}
2565 	if ((vp->v_flag & VCANDEALLOC) && !no_busy_decrement) {
2566 		os_atomic_dec(&busyvnodes, relaxed);
2567 	}
2568 	vnode_list_unlock();
2569 
2570 	if (need_dead_wakeup == TRUE) {
2571 		wakeup_one((caddr_t)&dead_vnode_wanted);
2572 	}
2573 }
2574 
2575 
2576 /*
2577  * remove the vnode from appropriate free list.
2578  * called with vnode LOCKED and
2579  * the list lock held
2580  */
2581 static void
vnode_list_remove_locked(vnode_t vp)2582 vnode_list_remove_locked(vnode_t vp)
2583 {
2584 	if (VONLIST(vp)) {
2585 		/*
2586 		 * the v_listflag field is
2587 		 * protected by the vnode_list_lock
2588 		 */
2589 		if (vp->v_listflag & VLIST_RAGE) {
2590 			VREMRAGE("vnode_list_remove", vp);
2591 		} else if (vp->v_listflag & VLIST_DEAD) {
2592 			VREMDEAD("vnode_list_remove", vp);
2593 			wakeup_laundry_thread();
2594 		} else if (vp->v_listflag & VLIST_ASYNC_WORK) {
2595 			VREMASYNC_WORK("vnode_list_remove", vp);
2596 		} else {
2597 			VREMFREE("vnode_list_remove", vp);
2598 		}
2599 		if (vp->v_flag & VCANDEALLOC) {
2600 			os_atomic_inc(&busyvnodes, relaxed);
2601 		}
2602 	}
2603 }
2604 
2605 
2606 /*
2607  * remove the vnode from appropriate free list.
2608  * called with vnode LOCKED
2609  */
2610 static void
vnode_list_remove(vnode_t vp)2611 vnode_list_remove(vnode_t vp)
2612 {
2613 #if DIAGNOSTIC
2614 	lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2615 #endif
2616 	/*
2617 	 * we want to avoid taking the list lock
2618 	 * in the case where we're not on the free
2619 	 * list... this will be true for most
2620 	 * directories and any currently in use files
2621 	 *
2622 	 * we're guaranteed that we can't go from
2623 	 * the not-on-list state to the on-list
2624 	 * state since we hold the vnode lock...
2625 	 * all calls to vnode_list_add are done
2626 	 * under the vnode lock... so we can
2627 	 * check for that condition (the prevelant one)
2628 	 * without taking the list lock
2629 	 */
2630 	if (VONLIST(vp)) {
2631 		vnode_list_lock();
2632 		/*
2633 		 * however, we're not guaranteed that
2634 		 * we won't go from the on-list state
2635 		 * to the not-on-list state until we
2636 		 * hold the vnode_list_lock... this
2637 		 * is due to "new_vnode" removing vnodes
2638 		 * from the free list uder the list_lock
2639 		 * w/o the vnode lock... so we need to
2640 		 * check again whether we're currently
2641 		 * on the free list
2642 		 */
2643 		vnode_list_remove_locked(vp);
2644 
2645 		vnode_list_unlock();
2646 	}
2647 }
2648 
2649 
2650 void
vnode_rele(vnode_t vp)2651 vnode_rele(vnode_t vp)
2652 {
2653 	vnode_rele_internal(vp, 0, 0, 0);
2654 }
2655 
2656 
2657 void
vnode_rele_ext(vnode_t vp,int fmode,int dont_reenter)2658 vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter)
2659 {
2660 	vnode_rele_internal(vp, fmode, dont_reenter, 0);
2661 }
2662 
2663 
2664 void
vnode_rele_internal(vnode_t vp,int fmode,int dont_reenter,int locked)2665 vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked)
2666 {
2667 	int32_t old_usecount;
2668 
2669 	if (!locked) {
2670 		vnode_hold(vp);
2671 		vnode_lock_spin(vp);
2672 	}
2673 #if DIAGNOSTIC
2674 	else {
2675 		lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2676 	}
2677 #endif
2678 	/* Enable atomic ops on v_usecount without the vnode lock */
2679 	old_usecount = os_atomic_dec_orig(&vp->v_usecount, relaxed);
2680 	if (old_usecount < 1) {
2681 		/*
2682 		 * Because we allow atomic ops on usecount (in lookup only, under
2683 		 * specific conditions of already having a usecount) it is
2684 		 * possible that when the vnode is examined, its usecount is
2685 		 * different than what will be printed in this panic message.
2686 		 */
2687 		panic("vnode_rele_ext: vp %p usecount -ve : %d.  v_tag = %d, v_type = %d, v_flag = %x.",
2688 		    vp, old_usecount - 1, vp->v_tag, vp->v_type, vp->v_flag);
2689 	}
2690 
2691 	if (fmode & FWRITE) {
2692 		if (--vp->v_writecount < 0) {
2693 			panic("vnode_rele_ext: vp %p writecount -ve : %d.  v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
2694 		}
2695 	}
2696 	if (fmode & O_EVTONLY) {
2697 		if (--vp->v_kusecount < 0) {
2698 			panic("vnode_rele_ext: vp %p kusecount -ve : %d.  v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag);
2699 		}
2700 	}
2701 	if (vp->v_kusecount > vp->v_usecount) {
2702 		panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d).  v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
2703 	}
2704 
2705 	if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
2706 		/*
2707 		 * vnode is still busy... if we're the last
2708 		 * usecount, mark for a future call to VNOP_INACTIVE
2709 		 * when the iocount finally drops to 0
2710 		 */
2711 		if (vp->v_usecount == 0) {
2712 			vp->v_lflag |= VL_NEEDINACTIVE;
2713 			vp->v_flag  &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
2714 		}
2715 		goto done;
2716 	}
2717 	vp->v_flag  &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
2718 
2719 	if (ISSET(vp->v_lflag, VL_TERMINATE | VL_DEAD) || dont_reenter) {
2720 		/*
2721 		 * vnode is being cleaned, or
2722 		 * we've requested that we don't reenter
2723 		 * the filesystem on this release...in
2724 		 * the latter case, we'll mark the vnode aged
2725 		 */
2726 		if (dont_reenter) {
2727 			if (!(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM))) {
2728 				vp->v_lflag |= VL_NEEDINACTIVE;
2729 
2730 				if (vnode_on_reliable_media(vp) == FALSE || vp->v_flag & VISDIRTY) {
2731 					vnode_async_list_add(vp);
2732 					goto done;
2733 				}
2734 			}
2735 			vp->v_flag |= VAGE;
2736 		}
2737 		vnode_list_add(vp);
2738 
2739 		goto done;
2740 	}
2741 	/*
2742 	 * at this point both the iocount and usecount
2743 	 * are zero
2744 	 * pick up an iocount so that we can call
2745 	 * VNOP_INACTIVE with the vnode lock unheld
2746 	 */
2747 	vp->v_iocount++;
2748 #ifdef CONFIG_IOCOUNT_TRACE
2749 	record_vp(vp, 1);
2750 #endif
2751 	vp->v_lflag &= ~VL_NEEDINACTIVE;
2752 
2753 	if (UBCINFOEXISTS(vp)) {
2754 		ubc_cs_free_and_vnode_unlock(vp);
2755 	} else {
2756 		vnode_unlock(vp);
2757 	}
2758 
2759 	VNOP_INACTIVE(vp, vfs_context_current());
2760 
2761 	vnode_lock_spin(vp);
2762 
2763 	/*
2764 	 * because we dropped the vnode lock to call VNOP_INACTIVE
2765 	 * the state of the vnode may have changed... we may have
2766 	 * picked up an iocount, usecount or the MARKTERM may have
2767 	 * been set... we need to reevaluate the reference counts
2768 	 * to determine if we can call vnode_reclaim_internal at
2769 	 * this point... if the reference counts are up, we'll pick
2770 	 * up the MARKTERM state when they get subsequently dropped
2771 	 */
2772 	if ((vp->v_iocount == 1) && (vp->v_usecount == 0) &&
2773 	    ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
2774 		struct  uthread *ut;
2775 
2776 		ut = current_uthread();
2777 
2778 		if (ut->uu_defer_reclaims) {
2779 			vp->v_defer_reclaimlist = ut->uu_vreclaims;
2780 			ut->uu_vreclaims = vp;
2781 			goto done;
2782 		}
2783 		vnode_lock_convert(vp);
2784 		vnode_reclaim_internal(vp, 1, 1, 0);
2785 	}
2786 	vnode_dropiocount(vp);
2787 	vnode_list_add(vp);
2788 done:
2789 	if (vp->v_usecount == 0 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
2790 		if (vp->v_ubcinfo) {
2791 			vnode_lock_convert(vp);
2792 			memory_object_mark_unused(vp->v_ubcinfo->ui_control, (vp->v_flag & VRAGE) == VRAGE);
2793 		}
2794 	}
2795 	if (!locked) {
2796 		vnode_drop_and_unlock(vp);
2797 	}
2798 	return;
2799 }
2800 
2801 /*
2802  * Remove any vnodes in the vnode table belonging to mount point mp.
2803  *
2804  * If MNT_NOFORCE is specified, there should not be any active ones,
2805  * return error if any are found (nb: this is a user error, not a
2806  * system error). If MNT_FORCE is specified, detach any active vnodes
2807  * that are found.
2808  */
2809 
2810 int
vflush(struct mount * mp,struct vnode * skipvp,int flags)2811 vflush(struct mount *mp, struct vnode *skipvp, int flags)
2812 {
2813 	struct vnode *vp;
2814 	int busy = 0;
2815 	int reclaimed = 0;
2816 	int retval;
2817 	unsigned int vid;
2818 	bool first_try = true;
2819 
2820 	/*
2821 	 * See comments in vnode_iterate() for the rationale for this lock
2822 	 */
2823 	mount_iterate_lock(mp);
2824 
2825 	mount_lock(mp);
2826 	vnode_iterate_setup(mp);
2827 	/*
2828 	 * On regular unmounts(not forced) do a
2829 	 * quick check for vnodes to be in use. This
2830 	 * preserves the caching of vnodes. automounter
2831 	 * tries unmounting every so often to see whether
2832 	 * it is still busy or not.
2833 	 */
2834 	if (((flags & FORCECLOSE) == 0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) {
2835 		if (vnode_umount_preflight(mp, skipvp, flags)) {
2836 			vnode_iterate_clear(mp);
2837 			mount_unlock(mp);
2838 			mount_iterate_unlock(mp);
2839 			return EBUSY;
2840 		}
2841 	}
2842 loop:
2843 	/* If it returns 0 then there is nothing to do */
2844 	retval = vnode_iterate_prepare(mp);
2845 
2846 	if (retval == 0) {
2847 		vnode_iterate_clear(mp);
2848 		mount_unlock(mp);
2849 		mount_iterate_unlock(mp);
2850 		return retval;
2851 	}
2852 
2853 	/* iterate over all the vnodes */
2854 	while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
2855 		vp = TAILQ_FIRST(&mp->mnt_workerqueue);
2856 		TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
2857 		TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
2858 
2859 		if ((vp->v_mount != mp) || (vp == skipvp)) {
2860 			continue;
2861 		}
2862 		vid = vp->v_id;
2863 		mount_unlock(mp);
2864 
2865 		vnode_lock_spin(vp);
2866 
2867 		// If vnode is already terminating, wait for it...
2868 		while (vp->v_id == vid && ISSET(vp->v_lflag, VL_TERMINATE)) {
2869 			vp->v_lflag |= VL_TERMWANT;
2870 			msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vflush", NULL);
2871 		}
2872 
2873 		if ((vp->v_id != vid) || ISSET(vp->v_lflag, VL_DEAD)) {
2874 			vnode_unlock(vp);
2875 			mount_lock(mp);
2876 			continue;
2877 		}
2878 
2879 		/*
2880 		 * If requested, skip over vnodes marked VSYSTEM.
2881 		 * Skip over all vnodes marked VNOFLUSH.
2882 		 */
2883 		if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
2884 		    (vp->v_flag & VNOFLUSH))) {
2885 			vnode_unlock(vp);
2886 			mount_lock(mp);
2887 			continue;
2888 		}
2889 		/*
2890 		 * If requested, skip over vnodes marked VSWAP.
2891 		 */
2892 		if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
2893 			vnode_unlock(vp);
2894 			mount_lock(mp);
2895 			continue;
2896 		}
2897 		/*
2898 		 * If requested, skip over vnodes marked VROOT.
2899 		 */
2900 		if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) {
2901 			vnode_unlock(vp);
2902 			mount_lock(mp);
2903 			continue;
2904 		}
2905 		/*
2906 		 * If WRITECLOSE is set, only flush out regular file
2907 		 * vnodes open for writing.
2908 		 */
2909 		if ((flags & WRITECLOSE) &&
2910 		    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2911 			vnode_unlock(vp);
2912 			mount_lock(mp);
2913 			continue;
2914 		}
2915 		/*
2916 		 * If the real usecount is 0, all we need to do is clear
2917 		 * out the vnode data structures and we are done.
2918 		 */
2919 		if (((vp->v_usecount == 0) ||
2920 		    ((vp->v_usecount - vp->v_kusecount) == 0))) {
2921 			vnode_lock_convert(vp);
2922 			vnode_hold(vp);
2923 			vp->v_iocount++;        /* so that drain waits for * other iocounts */
2924 #ifdef CONFIG_IOCOUNT_TRACE
2925 			record_vp(vp, 1);
2926 #endif
2927 			vnode_reclaim_internal(vp, 1, 1, 0);
2928 			vnode_dropiocount(vp);
2929 			vnode_list_add(vp);
2930 			vnode_drop_and_unlock(vp);
2931 
2932 			reclaimed++;
2933 			mount_lock(mp);
2934 			continue;
2935 		}
2936 		/*
2937 		 * If FORCECLOSE is set, forcibly close the vnode.
2938 		 * For block or character devices, revert to an
2939 		 * anonymous device. For all other files, just kill them.
2940 		 */
2941 		if (flags & FORCECLOSE) {
2942 			vnode_lock_convert(vp);
2943 
2944 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
2945 				vp->v_iocount++;        /* so that drain waits * for other iocounts */
2946 				vnode_hold(vp);
2947 #ifdef CONFIG_IOCOUNT_TRACE
2948 				record_vp(vp, 1);
2949 #endif
2950 				vnode_abort_advlocks(vp);
2951 				vnode_reclaim_internal(vp, 1, 1, 0);
2952 				vnode_dropiocount(vp);
2953 				vnode_list_add(vp);
2954 				vnode_drop_and_unlock(vp);
2955 			} else {
2956 				vnode_hold(vp);
2957 				vp->v_lflag |= VL_OPSCHANGE;
2958 				vclean(vp, 0);
2959 				vp->v_lflag &= ~VL_DEAD;
2960 				vp->v_op = spec_vnodeop_p;
2961 				vp->v_flag |= VDEVFLUSH;
2962 				vnode_drop_and_unlock(vp);
2963 				wakeup(&vp->v_lflag); /* chkvnlock is waitng for VL_DEAD to get unset */
2964 			}
2965 			mount_lock(mp);
2966 			continue;
2967 		}
2968 
2969 		vnode_unlock(vp);
2970 		/* log vnodes blocking unforced unmounts */
2971 		if (print_busy_vnodes && first_try && ((flags & FORCECLOSE) == 0)) {
2972 			vprint_path("vflush - busy vnode", vp);
2973 		}
2974 
2975 		mount_lock(mp);
2976 		busy++;
2977 	}
2978 
2979 	/* At this point the worker queue is completed */
2980 	if (busy && ((flags & FORCECLOSE) == 0) && reclaimed) {
2981 		busy = 0;
2982 		reclaimed = 0;
2983 		(void)vnode_iterate_reloadq(mp);
2984 		first_try = false;
2985 		/* returned with mount lock held */
2986 		goto loop;
2987 	}
2988 
2989 	/* if new vnodes were created in between retry the reclaim */
2990 	if (vnode_iterate_reloadq(mp) != 0) {
2991 		if (!(busy && ((flags & FORCECLOSE) == 0))) {
2992 			first_try = false;
2993 			goto loop;
2994 		}
2995 	}
2996 	vnode_iterate_clear(mp);
2997 	mount_unlock(mp);
2998 	mount_iterate_unlock(mp);
2999 
3000 	if (busy && ((flags & FORCECLOSE) == 0)) {
3001 		return EBUSY;
3002 	}
3003 	return 0;
3004 }
3005 
3006 long num_recycledvnodes = 0;
3007 /*
3008  * Disassociate the underlying file system from a vnode.
3009  * The vnode lock is held on entry.
3010  */
3011 static void
vclean(vnode_t vp,int flags)3012 vclean(vnode_t vp, int flags)
3013 {
3014 	vfs_context_t ctx = vfs_context_current();
3015 	int active;
3016 	int need_inactive;
3017 	int already_terminating;
3018 	int clflags = 0;
3019 #if NAMEDSTREAMS
3020 	int is_namedstream;
3021 #endif
3022 
3023 	/*
3024 	 * Check to see if the vnode is in use.
3025 	 * If so we have to reference it before we clean it out
3026 	 * so that its count cannot fall to zero and generate a
3027 	 * race against ourselves to recycle it.
3028 	 */
3029 	active = vp->v_usecount;
3030 
3031 	/*
3032 	 * just in case we missed sending a needed
3033 	 * VNOP_INACTIVE, we'll do it now
3034 	 */
3035 	need_inactive = (vp->v_lflag & VL_NEEDINACTIVE);
3036 
3037 	vp->v_lflag &= ~VL_NEEDINACTIVE;
3038 
3039 	/*
3040 	 * Prevent the vnode from being recycled or
3041 	 * brought into use while we clean it out.
3042 	 */
3043 	already_terminating = (vp->v_lflag & VL_TERMINATE);
3044 
3045 	vp->v_lflag |= VL_TERMINATE;
3046 
3047 #if NAMEDSTREAMS
3048 	is_namedstream = vnode_isnamedstream(vp);
3049 #endif
3050 
3051 	vnode_unlock(vp);
3052 
3053 	OSAddAtomicLong(1, &num_recycledvnodes);
3054 
3055 	if (flags & DOCLOSE) {
3056 		clflags |= IO_NDELAY;
3057 	}
3058 	if (flags & REVOKEALL) {
3059 		clflags |= IO_REVOKE;
3060 	}
3061 
3062 #if CONFIG_MACF
3063 	if (vp->v_mount) {
3064 		/*
3065 		 * It is possible for bdevvp vnodes to not have a mount
3066 		 * pointer. It's fine to let it get reclaimed without
3067 		 * notifying.
3068 		 */
3069 		mac_vnode_notify_reclaim(vp);
3070 	}
3071 #endif
3072 
3073 	if (active && (flags & DOCLOSE)) {
3074 		VNOP_CLOSE(vp, clflags, ctx);
3075 	}
3076 
3077 	/*
3078 	 * Clean out any buffers associated with the vnode.
3079 	 */
3080 	if (flags & DOCLOSE) {
3081 		if (vp->v_tag == VT_NFS) {
3082 			nfs_vinvalbuf(vp, V_SAVE, ctx, 0);
3083 		} else {
3084 			VNOP_FSYNC(vp, MNT_WAIT, ctx);
3085 
3086 			/*
3087 			 * If the vnode is still in use (by the journal for
3088 			 * example) we don't want to invalidate locked buffers
3089 			 * here.  In that case, either the journal will tidy them
3090 			 * up, or we will deal with it when the usecount is
3091 			 * finally released in vnode_rele_internal.
3092 			 */
3093 			buf_invalidateblks(vp, BUF_WRITE_DATA | (active ? 0 : BUF_INVALIDATE_LOCKED), 0, 0);
3094 		}
3095 		if (UBCINFOEXISTS(vp)) {
3096 			/*
3097 			 * Clean the pages in VM.
3098 			 */
3099 			(void)ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
3100 		}
3101 	}
3102 	if (active || need_inactive) {
3103 		VNOP_INACTIVE(vp, ctx);
3104 	}
3105 
3106 #if NAMEDSTREAMS
3107 	if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) {
3108 		vnode_t pvp = vp->v_parent;
3109 
3110 		/* Delete the shadow stream file before we reclaim its vnode */
3111 		if (vnode_isshadow(vp)) {
3112 			vnode_relenamedstream(pvp, vp);
3113 		}
3114 
3115 		/*
3116 		 * No more streams associated with the parent.  We
3117 		 * have a ref on it, so its identity is stable.
3118 		 * If the parent is on an opaque volume, then we need to know
3119 		 * whether it has associated named streams.
3120 		 */
3121 		if (vfs_authopaque(pvp->v_mount)) {
3122 			vnode_lock_spin(pvp);
3123 			pvp->v_lflag &= ~VL_HASSTREAMS;
3124 			vnode_unlock(pvp);
3125 		}
3126 	}
3127 #endif
3128 
3129 	vm_object_destroy_reason_t reason = VM_OBJECT_DESTROY_RECLAIM;
3130 	bool forced_unmount = vnode_mount(vp) != NULL && (vnode_mount(vp)->mnt_lflag & MNT_LFORCE) != 0;
3131 	bool ungraft_heuristic = flags & REVOKEALL;
3132 	bool unmount = vnode_mount(vp) != NULL && (vnode_mount(vp)->mnt_lflag & MNT_LUNMOUNT) != 0;
3133 	if (forced_unmount) {
3134 		reason = VM_OBJECT_DESTROY_FORCED_UNMOUNT;
3135 	} else if (ungraft_heuristic) {
3136 		reason = VM_OBJECT_DESTROY_UNGRAFT;
3137 	} else if (unmount) {
3138 		reason = VM_OBJECT_DESTROY_UNMOUNT;
3139 	}
3140 
3141 	/*
3142 	 * Destroy ubc named reference
3143 	 * cluster_release is done on this path
3144 	 * along with dropping the reference on the ucred
3145 	 * (and in the case of forced unmount of an mmap-ed file,
3146 	 * the ubc reference on the vnode is dropped here too).
3147 	 */
3148 	ubc_destroy_named(vp, reason);
3149 
3150 #if CONFIG_TRIGGERS
3151 	/*
3152 	 * cleanup trigger info from vnode (if any)
3153 	 */
3154 	if (vp->v_resolve) {
3155 		vnode_resolver_detach(vp);
3156 	}
3157 #endif
3158 
3159 #if CONFIG_IO_COMPRESSION_STATS
3160 	if ((vp->io_compression_stats)) {
3161 		vnode_iocs_record_and_free(vp);
3162 	}
3163 #endif /* CONFIG_IO_COMPRESSION_STATS */
3164 
3165 	/*
3166 	 * Reclaim the vnode.
3167 	 */
3168 	if (VNOP_RECLAIM(vp, ctx)) {
3169 		panic("vclean: cannot reclaim");
3170 	}
3171 
3172 	// make sure the name & parent ptrs get cleaned out!
3173 	vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE | VNODE_UPDATE_PURGEFIRMLINK);
3174 
3175 	vnode_lock(vp);
3176 
3177 	/*
3178 	 * Remove the vnode from any mount list it might be on.  It is not
3179 	 * safe to do this any earlier because unmount needs to wait for
3180 	 * any vnodes to terminate and it cannot do that if it cannot find
3181 	 * them.
3182 	 */
3183 	insmntque(vp, (struct mount *)0);
3184 
3185 	vp->v_lflag |= VL_DEAD;
3186 	vp->v_mount = dead_mountp;
3187 	vp->v_op = dead_vnodeop_p;
3188 	vp->v_tag = VT_NON;
3189 	vp->v_data = NULL;
3190 
3191 	vp->v_flag &= ~VISDIRTY;
3192 
3193 	if (already_terminating == 0) {
3194 		vp->v_lflag &= ~VL_TERMINATE;
3195 		/*
3196 		 * Done with purge, notify sleepers of the grim news.
3197 		 */
3198 		if (vp->v_lflag & VL_TERMWANT) {
3199 			vp->v_lflag &= ~VL_TERMWANT;
3200 			wakeup(&vp->v_lflag);
3201 		}
3202 	}
3203 }
3204 
3205 /*
3206  * Eliminate all activity associated with  the requested vnode
3207  * and with all vnodes aliased to the requested vnode.
3208  */
3209 int
3210 #if DIAGNOSTIC
vn_revoke(vnode_t vp,int flags,__unused vfs_context_t a_context)3211 vn_revoke(vnode_t vp, int flags, __unused vfs_context_t a_context)
3212 #else
3213 vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context)
3214 #endif
3215 {
3216 	struct vnode *vq;
3217 	int vid;
3218 
3219 #if DIAGNOSTIC
3220 	if ((flags & REVOKEALL) == 0) {
3221 		panic("vnop_revoke");
3222 	}
3223 #endif
3224 
3225 	if (vnode_isaliased(vp)) {
3226 		/*
3227 		 * If a vgone (or vclean) is already in progress,
3228 		 * return an immediate error
3229 		 */
3230 		if (vp->v_lflag & VL_TERMINATE) {
3231 			return ENOENT;
3232 		}
3233 
3234 		/*
3235 		 * Ensure that vp will not be vgone'd while we
3236 		 * are eliminating its aliases.
3237 		 */
3238 		SPECHASH_LOCK();
3239 		while ((vp->v_specflags & SI_ALIASED)) {
3240 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3241 				if (vq->v_rdev != vp->v_rdev ||
3242 				    vq->v_type != vp->v_type || vp == vq) {
3243 					continue;
3244 				}
3245 				vid = vq->v_id;
3246 				vnode_hold(vq);
3247 				SPECHASH_UNLOCK();
3248 				if (vnode_getwithvid(vq, vid)) {
3249 					vq = vnode_drop(vq);
3250 					SPECHASH_LOCK();
3251 					break;
3252 				}
3253 				vnode_lock(vq);
3254 				if (!(vq->v_lflag & VL_TERMINATE)) {
3255 					vnode_reclaim_internal(vq, 1, 1, 0);
3256 				}
3257 				vnode_put_locked(vq);
3258 				vq = vnode_drop_and_unlock(vq);
3259 				SPECHASH_LOCK();
3260 				break;
3261 			}
3262 		}
3263 		SPECHASH_UNLOCK();
3264 	}
3265 	vnode_lock(vp);
3266 	if (vp->v_lflag & VL_TERMINATE) {
3267 		vnode_unlock(vp);
3268 		return ENOENT;
3269 	}
3270 	vnode_reclaim_internal(vp, 1, 0, REVOKEALL);
3271 	vnode_unlock(vp);
3272 
3273 	return 0;
3274 }
3275 
3276 /*
3277  * Recycle an unused vnode to the front of the free list.
3278  * Release the passed interlock if the vnode will be recycled.
3279  */
3280 int
vnode_recycle(struct vnode * vp)3281 vnode_recycle(struct vnode *vp)
3282 {
3283 	vnode_lock_spin(vp);
3284 
3285 	if (vp->v_iocount || vp->v_usecount) {
3286 		vp->v_lflag |= VL_MARKTERM;
3287 		vnode_unlock(vp);
3288 		return 0;
3289 	}
3290 	vnode_lock_convert(vp);
3291 	vnode_hold(vp);
3292 	vnode_reclaim_internal(vp, 1, 0, 0);
3293 
3294 	vnode_drop_and_unlock(vp);
3295 
3296 	return 1;
3297 }
3298 
3299 static int
vnode_reload(vnode_t vp)3300 vnode_reload(vnode_t vp)
3301 {
3302 	vnode_lock_spin(vp);
3303 
3304 	if ((vp->v_iocount > 1) || vp->v_usecount) {
3305 		vnode_unlock(vp);
3306 		return 0;
3307 	}
3308 	if (vp->v_iocount <= 0) {
3309 		panic("vnode_reload with no iocount %d", vp->v_iocount);
3310 	}
3311 
3312 	/* mark for release when iocount is dopped */
3313 	vp->v_lflag |= VL_MARKTERM;
3314 	vnode_unlock(vp);
3315 
3316 	return 1;
3317 }
3318 
3319 
3320 static void
vgone(vnode_t vp,int flags)3321 vgone(vnode_t vp, int flags)
3322 {
3323 	struct vnode *vq;
3324 	struct vnode *vx;
3325 
3326 	/*
3327 	 * Clean out the filesystem specific data.
3328 	 * vclean also takes care of removing the
3329 	 * vnode from any mount list it might be on
3330 	 */
3331 	vclean(vp, flags | DOCLOSE);
3332 
3333 	/*
3334 	 * If special device, remove it from special device alias list
3335 	 * if it is on one.
3336 	 */
3337 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
3338 		SPECHASH_LOCK();
3339 		if (*vp->v_hashchain == vp) {
3340 			*vp->v_hashchain = vp->v_specnext;
3341 		} else {
3342 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3343 				if (vq->v_specnext != vp) {
3344 					continue;
3345 				}
3346 				vq->v_specnext = vp->v_specnext;
3347 				break;
3348 			}
3349 			if (vq == NULL) {
3350 				panic("missing bdev");
3351 			}
3352 		}
3353 		if (vp->v_specflags & SI_ALIASED) {
3354 			vx = NULL;
3355 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3356 				if (vq->v_rdev != vp->v_rdev ||
3357 				    vq->v_type != vp->v_type) {
3358 					continue;
3359 				}
3360 				if (vx) {
3361 					break;
3362 				}
3363 				vx = vq;
3364 			}
3365 			if (vx == NULL) {
3366 				panic("missing alias");
3367 			}
3368 			if (vq == NULL) {
3369 				vx->v_specflags &= ~SI_ALIASED;
3370 			}
3371 			vp->v_specflags &= ~SI_ALIASED;
3372 		}
3373 		SPECHASH_UNLOCK();
3374 		{
3375 			struct specinfo *tmp = vp->v_specinfo;
3376 			vp->v_specinfo = NULL;
3377 			zfree(specinfo_zone, tmp);
3378 		}
3379 	}
3380 }
3381 
3382 /*
3383  * internal helper function only!
3384  * vend an _iocounted_ vnode via output argument, or return an error if unable.
3385  */
3386 static int
get_vp_from_dev(dev_t dev,enum vtype type,vnode_t * outvp)3387 get_vp_from_dev(dev_t dev, enum vtype type, vnode_t *outvp)
3388 {
3389 	vnode_t vp;
3390 	int vid;
3391 
3392 loop:
3393 	SPECHASH_LOCK();
3394 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
3395 		if (dev != vp->v_rdev || type != vp->v_type) {
3396 			continue;
3397 		}
3398 		vid = vp->v_id;
3399 		vnode_hold(vp);
3400 		SPECHASH_UNLOCK();
3401 
3402 		/* acquire iocount */
3403 		if (vnode_getwithvid(vp, vid)) {
3404 			vnode_drop(vp);
3405 			goto loop;
3406 		}
3407 		vnode_drop(vp);
3408 
3409 		/* Vend iocounted vnode */
3410 		*outvp = vp;
3411 		return 0;
3412 	}
3413 
3414 	/* vnode not found, error out */
3415 	SPECHASH_UNLOCK();
3416 	return ENOENT;
3417 }
3418 
3419 
3420 
3421 /*
3422  * Lookup a vnode by device number.
3423  */
3424 int
check_mountedon(dev_t dev,enum vtype type,int * errorp)3425 check_mountedon(dev_t dev, enum vtype type, int *errorp)
3426 {
3427 	vnode_t vp = NULLVP;
3428 	int rc = 0;
3429 
3430 	rc = get_vp_from_dev(dev, type, &vp);
3431 	if (rc) {
3432 		/* if no vnode found, it cannot be mounted on */
3433 		return 0;
3434 	}
3435 
3436 	/* otherwise, examine it */
3437 	vnode_lock_spin(vp);
3438 	/* note: exclude the iocount we JUST got (e.g. >1, not >0) */
3439 	if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
3440 		vnode_unlock(vp);
3441 		if ((*errorp = vfs_mountedon(vp)) != 0) {
3442 			rc = 1;
3443 		}
3444 	} else {
3445 		vnode_unlock(vp);
3446 	}
3447 	/* release iocount! */
3448 	vnode_put(vp);
3449 
3450 	return rc;
3451 }
3452 
3453 extern dev_t chrtoblk(dev_t d);
3454 
3455 /*
3456  * Examine the supplied vnode's dev_t and find its counterpart
3457  * (e.g.  VCHR => VDEV) to compare against.
3458  */
3459 static int
vnode_cmp_paired_dev(vnode_t vp,vnode_t bdev_vp,enum vtype in_type,enum vtype out_type)3460 vnode_cmp_paired_dev(vnode_t vp, vnode_t bdev_vp, enum vtype in_type,
3461     enum vtype out_type)
3462 {
3463 	if (!vp || !bdev_vp) {
3464 		return EINVAL;
3465 	}
3466 	/* Verify iocounts */
3467 	if (vnode_iocount(vp) <= 0 ||
3468 	    vnode_iocount(bdev_vp) <= 0) {
3469 		return EINVAL;
3470 	}
3471 
3472 	/* check for basic matches */
3473 	if (vnode_vtype(vp) != in_type) {
3474 		return EINVAL;
3475 	}
3476 	if (vnode_vtype(bdev_vp) != out_type) {
3477 		return EINVAL;
3478 	}
3479 
3480 	dev_t dev = vnode_specrdev(vp);
3481 	dev_t blk_devt = vnode_specrdev(bdev_vp);
3482 
3483 	if (in_type == VCHR) {
3484 		if (out_type != VBLK) {
3485 			return EINVAL;
3486 		}
3487 		dev_t bdev = chrtoblk(dev);
3488 		if (bdev == NODEV) {
3489 			return EINVAL;
3490 		} else if (bdev == blk_devt) {
3491 			return 0;
3492 		}
3493 		//fall through
3494 	}
3495 	/*
3496 	 * else case:
3497 	 *
3498 	 * in_type == VBLK? => VCHR?
3499 	 * not implemented...
3500 	 * exercise to the reader: this can be built by
3501 	 * taking the device's major, and iterating the `chrtoblktab`
3502 	 * array to look for a value that matches.
3503 	 */
3504 	return EINVAL;
3505 }
3506 /*
3507  * Vnode compare: does the supplied vnode's CHR device, match the dev_t
3508  * of the accompanying `blk_vp` ?
3509  * NOTE: vnodes MUST be iocounted BEFORE calling this!
3510  */
3511 
3512 int
vnode_cmp_chrtoblk(vnode_t vp,vnode_t blk_vp)3513 vnode_cmp_chrtoblk(vnode_t vp, vnode_t blk_vp)
3514 {
3515 	return vnode_cmp_paired_dev(vp, blk_vp, VCHR, VBLK);
3516 }
3517 
3518 
3519 
3520 /*
3521  * Calculate the total number of references to a special device.
3522  */
3523 int
vcount(vnode_t vp)3524 vcount(vnode_t vp)
3525 {
3526 	vnode_t vq, vnext;
3527 	int count;
3528 	int vid;
3529 
3530 	if (!vnode_isspec(vp)) {
3531 		return vp->v_usecount - vp->v_kusecount;
3532 	}
3533 
3534 loop:
3535 	if (!vnode_isaliased(vp)) {
3536 		return vp->v_specinfo->si_opencount;
3537 	}
3538 	count = 0;
3539 
3540 	SPECHASH_LOCK();
3541 	/*
3542 	 * Grab first vnode and its vid.
3543 	 */
3544 	vq = *vp->v_hashchain;
3545 	if (vq) {
3546 		vid = vq->v_id;
3547 		vnode_hold(vq);
3548 	} else {
3549 		vid = 0;
3550 	}
3551 	SPECHASH_UNLOCK();
3552 
3553 	while (vq) {
3554 		/*
3555 		 * Attempt to get the vnode outside the SPECHASH lock.
3556 		 * Don't take iocount on 'vp' as iocount is already held by the caller.
3557 		 */
3558 		if ((vq != vp) && vnode_getwithvid(vq, vid)) {
3559 			vnode_drop(vq);
3560 			goto loop;
3561 		}
3562 		vnode_drop(vq);
3563 		vnode_lock(vq);
3564 
3565 		if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) {
3566 			if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) {
3567 				/*
3568 				 * Alias, but not in use, so flush it out.
3569 				 */
3570 				vnode_hold(vq);
3571 				vnode_reclaim_internal(vq, 1, 1, 0);
3572 				vnode_put_locked(vq);
3573 				vnode_drop_and_unlock(vq);
3574 				goto loop;
3575 			}
3576 			count += vq->v_specinfo->si_opencount;
3577 		}
3578 		vnode_unlock(vq);
3579 
3580 		SPECHASH_LOCK();
3581 		/*
3582 		 * must do this with the reference still held on 'vq'
3583 		 * so that it can't be destroyed while we're poking
3584 		 * through v_specnext
3585 		 */
3586 		vnext = vq->v_specnext;
3587 		if (vnext) {
3588 			vid = vnext->v_id;
3589 			vnode_hold(vnext);
3590 		} else {
3591 			vid = 0;
3592 		}
3593 		SPECHASH_UNLOCK();
3594 
3595 		if (vq != vp) {
3596 			vnode_put(vq);
3597 		}
3598 
3599 		vq = vnext;
3600 	}
3601 
3602 	return count;
3603 }
3604 
3605 int     prtactive = 0;          /* 1 => print out reclaim of active vnodes */
3606 
3607 /*
3608  * Print out a description of a vnode.
3609  */
3610 static const char *typename[] =
3611 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
3612 
3613 static void
vprint_internal(const char * label,struct vnode * vp,bool with_path)3614 vprint_internal(const char *label, struct vnode *vp, bool with_path)
3615 {
3616 	char sbuf[64];
3617 
3618 	if (label != NULL) {
3619 		printf("%s: ", label);
3620 	}
3621 
3622 	if (with_path) {
3623 		char const *path = NULL;
3624 		char *vn_path = NULL;
3625 		vm_size_t vn_pathlen = MAXPATHLEN;
3626 
3627 		vn_path = zalloc(ZV_NAMEI);
3628 		if (vn_getpath(vp, vn_path, (int*)&vn_pathlen) == 0) {
3629 			path = vn_path;
3630 		} else {
3631 			path = "(get vnode path failed)";
3632 		}
3633 
3634 		printf("name %s, type %s, usecount %d, writecount %d, path %s\n",
3635 		    vp->v_name, typename[vp->v_type],
3636 		    vp->v_usecount, vp->v_writecount, path);
3637 
3638 		if (vn_path) {
3639 			zfree(ZV_NAMEI, vn_path);
3640 		}
3641 	} else {
3642 		printf("name %s, type %s, usecount %d, writecount %d\n",
3643 		    vp->v_name, typename[vp->v_type],
3644 		    vp->v_usecount, vp->v_writecount);
3645 	}
3646 	sbuf[0] = '\0';
3647 	if (vp->v_flag & VROOT) {
3648 		strlcat(sbuf, "|VROOT", sizeof(sbuf));
3649 	}
3650 	if (vp->v_flag & VTEXT) {
3651 		strlcat(sbuf, "|VTEXT", sizeof(sbuf));
3652 	}
3653 	if (vp->v_flag & VSYSTEM) {
3654 		strlcat(sbuf, "|VSYSTEM", sizeof(sbuf));
3655 	}
3656 	if (vp->v_flag & VNOFLUSH) {
3657 		strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf));
3658 	}
3659 	if (vp->v_flag & VBWAIT) {
3660 		strlcat(sbuf, "|VBWAIT", sizeof(sbuf));
3661 	}
3662 	if (vnode_isaliased(vp)) {
3663 		strlcat(sbuf, "|VALIASED", sizeof(sbuf));
3664 	}
3665 	if (sbuf[0] != '\0') {
3666 		printf("vnode flags (%s)\n", &sbuf[1]);
3667 	}
3668 }
3669 
3670 void
vprint(const char * label,struct vnode * vp)3671 vprint(const char *label, struct vnode *vp)
3672 {
3673 	vprint_internal(label, vp, false);
3674 }
3675 
3676 void
vprint_path(const char * label,struct vnode * vp)3677 vprint_path(const char *label, struct vnode *vp)
3678 {
3679 	vprint_internal(label, vp, true);
3680 }
3681 
3682 static int
vn_getpath_flags_to_buildpath_flags(int flags)3683 vn_getpath_flags_to_buildpath_flags(int flags)
3684 {
3685 	int bpflags = (flags & VN_GETPATH_FSENTER) ? 0 : BUILDPATH_NO_FS_ENTER;
3686 
3687 	if (flags && (flags != VN_GETPATH_FSENTER)) {
3688 		if (flags & VN_GETPATH_NO_FIRMLINK) {
3689 			bpflags |= BUILDPATH_NO_FIRMLINK;
3690 		}
3691 		if (flags & VN_GETPATH_VOLUME_RELATIVE) {
3692 			bpflags |= (BUILDPATH_VOLUME_RELATIVE |
3693 			    BUILDPATH_NO_FIRMLINK);
3694 		}
3695 		if (flags & VN_GETPATH_NO_PROCROOT) {
3696 			bpflags |= BUILDPATH_NO_PROCROOT;
3697 		}
3698 		if (flags & VN_GETPATH_CHECK_MOVED) {
3699 			bpflags |= BUILDPATH_CHECK_MOVED;
3700 		}
3701 	}
3702 
3703 	return bpflags;
3704 }
3705 
3706 int
vn_getpath_ext_with_mntlen(struct vnode * vp,struct vnode * dvp,char * pathbuf,size_t * len,size_t * mntlen,int flags)3707 vn_getpath_ext_with_mntlen(struct vnode *vp, struct vnode *dvp, char *pathbuf,
3708     size_t *len, size_t *mntlen, int flags)
3709 {
3710 	int bpflags = vn_getpath_flags_to_buildpath_flags(flags);
3711 	int local_len;
3712 	int error;
3713 
3714 	if (*len > INT_MAX) {
3715 		return EINVAL;
3716 	}
3717 
3718 	local_len = *len;
3719 
3720 	error = build_path_with_parent(vp, dvp, pathbuf, local_len, &local_len,
3721 	    mntlen, bpflags, vfs_context_current());
3722 
3723 	if (local_len >= 0 && local_len <= (int)*len) {
3724 		*len = (size_t)local_len;
3725 	}
3726 
3727 	return error;
3728 }
3729 
3730 int
vn_getpath_ext(struct vnode * vp,struct vnode * dvp,char * pathbuf,size_t * len,int flags)3731 vn_getpath_ext(struct vnode *vp, struct vnode *dvp, char *pathbuf, size_t *len,
3732     int flags)
3733 {
3734 	return vn_getpath_ext_with_mntlen(vp, dvp, pathbuf, len, NULL, flags);
3735 }
3736 
3737 /*
3738  * Wrapper around vn_getpath_ext() that takes care of the int * <-> size_t *
3739  * conversion for the legacy KPIs.
3740  */
3741 static int
vn_getpath_ext_int(struct vnode * vp,struct vnode * dvp,char * pathbuf,int * len,int flags)3742 vn_getpath_ext_int(struct vnode *vp, struct vnode *dvp, char *pathbuf,
3743     int *len, int flags)
3744 {
3745 	size_t slen = *len;
3746 	int error;
3747 
3748 	if (*len < 0) {
3749 		return EINVAL;
3750 	}
3751 
3752 	error = vn_getpath_ext(vp, dvp, pathbuf, &slen, flags);
3753 
3754 	if (slen <= INT_MAX) {
3755 		*len = (int)slen;
3756 	}
3757 
3758 	return error;
3759 }
3760 
3761 int
vn_getpath(struct vnode * vp,char * pathbuf,int * len)3762 vn_getpath(struct vnode *vp, char *pathbuf, int *len)
3763 {
3764 	return vn_getpath_ext_int(vp, NULL, pathbuf, len, 0);
3765 }
3766 
3767 int
vn_getpath_fsenter(struct vnode * vp,char * pathbuf,int * len)3768 vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len)
3769 {
3770 	return vn_getpath_ext_int(vp, NULL, pathbuf, len, VN_GETPATH_FSENTER);
3771 }
3772 
3773 /*
3774  * vn_getpath_fsenter_with_parent will reenter the file system to fine the path of the
3775  * vnode.  It requires that there are IO counts on both the vnode and the directory vnode.
3776  *
3777  * vn_getpath_fsenter is called by MAC hooks to authorize operations for every thing, but
3778  * unlink, rmdir and rename. For these operation the MAC hook  calls vn_getpath. This presents
3779  * problems where if the path can not be found from the name cache, those operations can
3780  * erroneously fail with EPERM even though the call should succeed. When removing or moving
3781  * file system objects with operations such as unlink or rename, those operations need to
3782  * take IO counts on the target and containing directory. Calling vn_getpath_fsenter from a
3783  * MAC hook from these operations during forced unmount operations can lead to dead
3784  * lock. This happens when the operation starts, IO counts are taken on the containing
3785  * directories and targets. Before the MAC hook is called a forced unmount from another
3786  * thread takes place and blocks on the on going operation's directory vnode in vdrain.
3787  * After which, the MAC hook gets called and calls vn_getpath_fsenter.  vn_getpath_fsenter
3788  * is called with the understanding that there is an IO count on the target. If in
3789  * build_path the directory vnode is no longer in the cache, then the parent object id via
3790  * vnode_getattr from the target is obtain and used to call VFS_VGET to get the parent
3791  * vnode. The file system's VFS_VGET then looks up by inode in its hash and tries to get
3792  * an IO count. But VFS_VGET "sees" the directory vnode is in vdrain and can block
3793  * depending on which version and how it calls the vnode_get family of interfaces.
3794  *
3795  * N.B.  A reasonable interface to use is vnode_getwithvid. This interface was modified to
3796  * call vnode_getiocount with VNODE_DRAINO, so it will happily get an IO count and not
3797  * cause issues, but there is no guarantee that all or any file systems are doing that.
3798  *
3799  * vn_getpath_fsenter_with_parent can enter the file system safely since there is a known
3800  * IO count on the directory vnode by calling build_path_with_parent.
3801  */
3802 
3803 int
vn_getpath_fsenter_with_parent(struct vnode * dvp,struct vnode * vp,char * pathbuf,int * len)3804 vn_getpath_fsenter_with_parent(struct vnode *dvp, struct vnode *vp, char *pathbuf, int *len)
3805 {
3806 	return build_path_with_parent(vp, dvp, pathbuf, *len, len, NULL, 0, vfs_context_current());
3807 }
3808 
3809 int
vn_getpath_no_firmlink(struct vnode * vp,char * pathbuf,int * len)3810 vn_getpath_no_firmlink(struct vnode *vp, char *pathbuf, int *len)
3811 {
3812 	return vn_getpath_ext_int(vp, NULLVP, pathbuf, len,
3813 	           VN_GETPATH_NO_FIRMLINK);
3814 }
3815 
3816 int
vn_getcdhash(struct vnode * vp,off_t offset,unsigned char * cdhash,uint8_t * type)3817 vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash, uint8_t *type)
3818 {
3819 	return ubc_cs_getcdhash(vp, offset, cdhash, type);
3820 }
3821 
3822 
3823 static char *extension_table = NULL;
3824 static int   nexts;
3825 static int   max_ext_width;
3826 
3827 static int
extension_cmp(const void * a,const void * b)3828 extension_cmp(const void *a, const void *b)
3829 {
3830 	return (int)(strlen((const char *)a) - strlen((const char *)b));
3831 }
3832 
3833 
3834 //
3835 // This is the api LaunchServices uses to inform the kernel
3836 // the list of package extensions to ignore.
3837 //
3838 // Internally we keep the list sorted by the length of the
3839 // the extension (from longest to shortest).  We sort the
3840 // list of extensions so that we can speed up our searches
3841 // when comparing file names -- we only compare extensions
3842 // that could possibly fit into the file name, not all of
3843 // them (i.e. a short 8 character name can't have an 8
3844 // character extension).
3845 //
3846 extern lck_mtx_t pkg_extensions_lck;
3847 
3848 __private_extern__ int
set_package_extensions_table(user_addr_t data,int nentries,int maxwidth)3849 set_package_extensions_table(user_addr_t data, int nentries, int maxwidth)
3850 {
3851 	char *new_exts, *old_exts;
3852 	int old_nentries = 0, old_maxwidth = 0;
3853 	int error;
3854 
3855 	if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) {
3856 		return EINVAL;
3857 	}
3858 
3859 
3860 	// allocate one byte extra so we can guarantee null termination
3861 	new_exts = kalloc_data((nentries * maxwidth) + 1, Z_WAITOK);
3862 	if (new_exts == NULL) {
3863 		return ENOMEM;
3864 	}
3865 
3866 	error = copyin(data, new_exts, nentries * maxwidth);
3867 	if (error) {
3868 		kfree_data(new_exts, (nentries * maxwidth) + 1);
3869 		return error;
3870 	}
3871 
3872 	new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block
3873 
3874 	qsort(new_exts, nentries, maxwidth, extension_cmp);
3875 
3876 	lck_mtx_lock(&pkg_extensions_lck);
3877 
3878 	old_exts        = extension_table;
3879 	old_nentries    = nexts;
3880 	old_maxwidth    = max_ext_width;
3881 	extension_table = new_exts;
3882 	nexts           = nentries;
3883 	max_ext_width   = maxwidth;
3884 
3885 	lck_mtx_unlock(&pkg_extensions_lck);
3886 
3887 	kfree_data(old_exts, (old_nentries * old_maxwidth) + 1);
3888 
3889 	return 0;
3890 }
3891 
3892 
3893 int
is_package_name(const char * name,int len)3894 is_package_name(const char *name, int len)
3895 {
3896 	int i;
3897 	size_t extlen;
3898 	const char *ptr, *name_ext;
3899 
3900 	// if the name is less than 3 bytes it can't be of the
3901 	// form A.B and if it begins with a "." then it is also
3902 	// not a package.
3903 	if (len <= 3 || name[0] == '.') {
3904 		return 0;
3905 	}
3906 
3907 	name_ext = NULL;
3908 	for (ptr = name; *ptr != '\0'; ptr++) {
3909 		if (*ptr == '.') {
3910 			name_ext = ptr;
3911 		}
3912 	}
3913 
3914 	// if there is no "." extension, it can't match
3915 	if (name_ext == NULL) {
3916 		return 0;
3917 	}
3918 
3919 	// advance over the "."
3920 	name_ext++;
3921 
3922 	lck_mtx_lock(&pkg_extensions_lck);
3923 
3924 	// now iterate over all the extensions to see if any match
3925 	ptr = &extension_table[0];
3926 	for (i = 0; i < nexts; i++, ptr += max_ext_width) {
3927 		extlen = strlen(ptr);
3928 		if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') {
3929 			// aha, a match!
3930 			lck_mtx_unlock(&pkg_extensions_lck);
3931 			return 1;
3932 		}
3933 	}
3934 
3935 	lck_mtx_unlock(&pkg_extensions_lck);
3936 
3937 	// if we get here, no extension matched
3938 	return 0;
3939 }
3940 
3941 int
vn_path_package_check(__unused vnode_t vp,char * path,int pathlen,int * component)3942 vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component)
3943 {
3944 	char *ptr, *end;
3945 	int comp = 0;
3946 
3947 	if (pathlen < 0) {
3948 		return EINVAL;
3949 	}
3950 
3951 	*component = -1;
3952 	if (*path != '/') {
3953 		return EINVAL;
3954 	}
3955 
3956 	end = path + 1;
3957 	while (end < path + pathlen && *end != '\0') {
3958 		while (end < path + pathlen && *end == '/' && *end != '\0') {
3959 			end++;
3960 		}
3961 
3962 		ptr = end;
3963 
3964 		while (end < path + pathlen && *end != '/' && *end != '\0') {
3965 			end++;
3966 		}
3967 
3968 		if (end > path + pathlen) {
3969 			// hmm, string wasn't null terminated
3970 			return EINVAL;
3971 		}
3972 
3973 		*end = '\0';
3974 		if (is_package_name(ptr, (int)(end - ptr))) {
3975 			*component = comp;
3976 			break;
3977 		}
3978 
3979 		end++;
3980 		comp++;
3981 	}
3982 
3983 	return 0;
3984 }
3985 
3986 /*
3987  * Determine if a name is inappropriate for a searchfs query.
3988  * This list consists of /System currently.
3989  */
3990 
3991 int
vn_searchfs_inappropriate_name(const char * name,int len)3992 vn_searchfs_inappropriate_name(const char *name, int len)
3993 {
3994 	const char *bad_names[] = { "System" };
3995 	int   bad_len[]   = { 6 };
3996 	int  i;
3997 
3998 	if (len < 0) {
3999 		return EINVAL;
4000 	}
4001 
4002 	for (i = 0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) {
4003 		if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) {
4004 			return 1;
4005 		}
4006 	}
4007 
4008 	// if we get here, no name matched
4009 	return 0;
4010 }
4011 
4012 /*
4013  * Top level filesystem related information gathering.
4014  */
4015 extern unsigned int vfs_nummntops;
4016 
4017 /*
4018  * The VFS_NUMMNTOPS shouldn't be at name[1] since
4019  * is a VFS generic variable. Since we no longer support
4020  * VT_UFS, we reserve its value to support this sysctl node.
4021  *
4022  * It should have been:
4023  *    name[0]:  VFS_GENERIC
4024  *    name[1]:  VFS_NUMMNTOPS
4025  */
4026 SYSCTL_INT(_vfs, VFS_NUMMNTOPS, nummntops,
4027     CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
4028     &vfs_nummntops, 0, "");
4029 
4030 int
4031 vfs_sysctl(int *name __unused, u_int namelen __unused,
4032     user_addr_t oldp __unused, size_t *oldlenp __unused,
4033     user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused);
4034 
4035 int
vfs_sysctl(int * name __unused,u_int namelen __unused,user_addr_t oldp __unused,size_t * oldlenp __unused,user_addr_t newp __unused,size_t newlen __unused,proc_t p __unused)4036 vfs_sysctl(int *name __unused, u_int namelen __unused,
4037     user_addr_t oldp __unused, size_t *oldlenp __unused,
4038     user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused)
4039 {
4040 	return EINVAL;
4041 }
4042 
4043 
4044 //
4045 // The following code disallows specific sysctl's that came through
4046 // the direct sysctl interface (vfs_sysctl_node) instead of the newer
4047 // sysctl_vfs_ctlbyfsid() interface.  We can not allow these selectors
4048 // through vfs_sysctl_node() because it passes the user's oldp pointer
4049 // directly to the file system which (for these selectors) casts it
4050 // back to a struct sysctl_req and then proceed to use SYSCTL_IN()
4051 // which jumps through an arbitrary function pointer.  When called
4052 // through the sysctl_vfs_ctlbyfsid() interface this does not happen
4053 // and so it's safe.
4054 //
4055 // Unfortunately we have to pull in definitions from AFP and SMB and
4056 // perform explicit name checks on the file system to determine if
4057 // these selectors are being used.
4058 //
4059 
4060 #define AFPFS_VFS_CTL_GETID            0x00020001
4061 #define AFPFS_VFS_CTL_NETCHANGE        0x00020002
4062 #define AFPFS_VFS_CTL_VOLCHANGE        0x00020003
4063 
4064 #define SMBFS_SYSCTL_REMOUNT           1
4065 #define SMBFS_SYSCTL_REMOUNT_INFO      2
4066 #define SMBFS_SYSCTL_GET_SERVER_SHARE  3
4067 
4068 
4069 static int
is_bad_sysctl_name(struct vfstable * vfsp,int selector_name)4070 is_bad_sysctl_name(struct vfstable *vfsp, int selector_name)
4071 {
4072 	switch (selector_name) {
4073 	case VFS_CTL_QUERY:
4074 	case VFS_CTL_TIMEO:
4075 	case VFS_CTL_NOLOCKS:
4076 	case VFS_CTL_NSTATUS:
4077 	case VFS_CTL_SADDR:
4078 	case VFS_CTL_DISC:
4079 	case VFS_CTL_SERVERINFO:
4080 		return 1;
4081 
4082 	default:
4083 		break;
4084 	}
4085 
4086 	// the more complicated check for some of SMB's special values
4087 	if (strcmp(vfsp->vfc_name, "smbfs") == 0) {
4088 		switch (selector_name) {
4089 		case SMBFS_SYSCTL_REMOUNT:
4090 		case SMBFS_SYSCTL_REMOUNT_INFO:
4091 		case SMBFS_SYSCTL_GET_SERVER_SHARE:
4092 			return 1;
4093 		}
4094 	} else if (strcmp(vfsp->vfc_name, "afpfs") == 0) {
4095 		switch (selector_name) {
4096 		case AFPFS_VFS_CTL_GETID:
4097 		case AFPFS_VFS_CTL_NETCHANGE:
4098 		case AFPFS_VFS_CTL_VOLCHANGE:
4099 			return 1;
4100 		}
4101 	}
4102 
4103 	//
4104 	// If we get here we passed all the checks so the selector is ok
4105 	//
4106 	return 0;
4107 }
4108 
4109 
4110 int vfs_sysctl_node SYSCTL_HANDLER_ARGS
4111 {
4112 	int *name, namelen;
4113 	struct vfstable *vfsp;
4114 	int error;
4115 	int fstypenum;
4116 
4117 	fstypenum = oidp->oid_number;
4118 	name = arg1;
4119 	namelen = arg2;
4120 
4121 	/* all sysctl names at this level should have at least one name slot for the FS */
4122 	if (namelen < 1) {
4123 		return EISDIR; /* overloaded */
4124 	}
4125 	mount_list_lock();
4126 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
4127 		if (vfsp->vfc_typenum == fstypenum) {
4128 			vfsp->vfc_refcount++;
4129 			break;
4130 		}
4131 	}
4132 	mount_list_unlock();
4133 
4134 	if (vfsp == NULL) {
4135 		return ENOTSUP;
4136 	}
4137 
4138 	if (is_bad_sysctl_name(vfsp, name[0])) {
4139 		printf("vfs: bad selector 0x%.8x for old-style sysctl().  use the sysctl-by-fsid interface instead\n", name[0]);
4140 		error = EPERM;
4141 	} else {
4142 		error = (vfsp->vfc_vfsops->vfs_sysctl)(name, namelen,
4143 		    req->oldptr, &req->oldlen, req->newptr, req->newlen,
4144 		    vfs_context_current());
4145 	}
4146 
4147 	mount_list_lock();
4148 	vfsp->vfc_refcount--;
4149 	mount_list_unlock();
4150 
4151 	return error;
4152 }
4153 
4154 /*
4155  * Check to see if a filesystem is mounted on a block device.
4156  */
4157 int
vfs_mountedon(struct vnode * vp)4158 vfs_mountedon(struct vnode *vp)
4159 {
4160 	struct vnode *vq;
4161 	int error = 0;
4162 
4163 restart:
4164 	SPECHASH_LOCK();
4165 	if (vp->v_specflags & SI_MOUNTING && (vp->v_specinfo->si_mountingowner != current_thread())) {
4166 		msleep((caddr_t)&vp->v_specflags, SPECHASH_LOCK_ADDR(), PVFS | PDROP, "vnode_waitformounting", NULL);
4167 		goto restart;
4168 	}
4169 	if (vp->v_specflags & SI_MOUNTEDON) {
4170 		error = EBUSY;
4171 		goto out;
4172 	}
4173 	if (vp->v_specflags & SI_ALIASED) {
4174 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
4175 			if (vq->v_rdev != vp->v_rdev ||
4176 			    vq->v_type != vp->v_type || vq == vp) {
4177 				continue;
4178 			}
4179 			if (vq->v_specflags & SI_MOUNTING) {
4180 				msleep((caddr_t)&vq->v_specflags, SPECHASH_LOCK_ADDR(), PVFS | PDROP, "vnode_waitformounting", NULL);
4181 				goto restart;
4182 			}
4183 			if (vq->v_specflags & SI_MOUNTEDON) {
4184 				error = EBUSY;
4185 				break;
4186 			}
4187 		}
4188 	}
4189 out:
4190 	SPECHASH_UNLOCK();
4191 	return error;
4192 }
4193 
4194 void
vfs_setmountedon(vnode_t vp)4195 vfs_setmountedon(vnode_t vp)
4196 {
4197 	vnode_lock(vp);
4198 	SPECHASH_LOCK();
4199 	vp->v_specflags |= SI_MOUNTEDON;
4200 	vp->v_specflags &= ~SI_MOUNTING;
4201 	vp->v_specinfo->si_mountingowner = NULL;
4202 	SPECHASH_UNLOCK();
4203 	vnode_unlock(vp);
4204 	wakeup(&vp->v_specflags);
4205 }
4206 
4207 void
vfs_clearmounting(vnode_t vp)4208 vfs_clearmounting(vnode_t vp)
4209 {
4210 	vnode_lock(vp);
4211 	SPECHASH_LOCK();
4212 	vp->v_specflags &= ~SI_MOUNTING;
4213 	vp->v_specinfo->si_mountingowner = NULL;
4214 	SPECHASH_UNLOCK();
4215 	vnode_unlock(vp);
4216 	wakeup(&vp->v_specflags);
4217 }
4218 
4219 /*
4220  * Check to see if a filesystem is mounted on a block device.
4221  */
4222 int
vfs_setmounting(vnode_t vp)4223 vfs_setmounting(vnode_t vp)
4224 {
4225 	struct vnode *vq;
4226 	int error = 0;
4227 
4228 	vnode_lock(vp);
4229 	while (vp->v_specflags & SI_MOUNTING) {
4230 		msleep((caddr_t)&vp->v_specflags, &vp->v_lock, PVFS, "vnode_waitformounting", NULL);
4231 	}
4232 	if (vp->v_specflags & SI_MOUNTEDON) {
4233 		vnode_unlock(vp);
4234 		return EBUSY;
4235 	}
4236 	SPECHASH_LOCK();
4237 	vp->v_specflags |= SI_MOUNTING;
4238 	vp->v_specinfo->si_mountingowner = current_thread();
4239 	vnode_unlock(vp);
4240 restart:
4241 	if (vp->v_specflags & SI_ALIASED) {
4242 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
4243 			if (vq->v_rdev != vp->v_rdev ||
4244 			    vq->v_type != vp->v_type || vq == vp) {
4245 				continue;
4246 			}
4247 			if (vq->v_specflags & SI_MOUNTING) {
4248 				msleep((caddr_t)&vq->v_specflags, SPECHASH_LOCK_ADDR(), PVFS | PDROP, "vnode_waitformounting", NULL);
4249 				SPECHASH_LOCK();
4250 				goto restart;
4251 			}
4252 			if (vq->v_specflags & SI_MOUNTEDON) {
4253 				error = EBUSY;
4254 				break;
4255 			}
4256 		}
4257 	}
4258 	SPECHASH_UNLOCK();
4259 	if (error) {
4260 		vnode_lock(vp);
4261 		SPECHASH_LOCK();
4262 		vp->v_specflags &= ~SI_MOUNTING;
4263 		SPECHASH_UNLOCK();
4264 		vnode_unlock(vp);
4265 		wakeup(&vp->v_specflags);
4266 	}
4267 	return error;
4268 }
4269 
4270 struct unmount_info {
4271 	int     u_errs; // Total failed unmounts
4272 	int     u_busy; // EBUSY failed unmounts
4273 	int     u_count; // Total volumes iterated
4274 	int     u_only_non_system;
4275 };
4276 
4277 static int
unmount_callback(mount_t mp,void * arg)4278 unmount_callback(mount_t mp, void *arg)
4279 {
4280 	int error;
4281 	char *mntname;
4282 	struct unmount_info *uip = arg;
4283 
4284 	uip->u_count++;
4285 
4286 	mntname = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_NOFAIL);
4287 	strlcpy(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN);
4288 
4289 	if (uip->u_only_non_system
4290 	    && ((mp->mnt_flag & MNT_ROOTFS) || (mp->mnt_kern_flag & MNTK_SYSTEM))) { //MNTK_BACKS_ROOT
4291 		printf("unmount(%d) %s skipped\n", uip->u_only_non_system, mntname);
4292 		mount_iterdrop(mp);     // VFS_ITERATE_CB_DROPREF
4293 	} else {
4294 		printf("unmount(%d) %s\n", uip->u_only_non_system, mntname);
4295 
4296 		mount_ref(mp, 0);
4297 		mount_iterdrop(mp);     // VFS_ITERATE_CB_DROPREF
4298 		error = dounmount(mp, MNT_FORCE, 1, vfs_context_current());
4299 		if (error) {
4300 			uip->u_errs++;
4301 			printf("Unmount of %s failed (%d)\n", mntname ? mntname:"?", error);
4302 			if (error == EBUSY) {
4303 				uip->u_busy++;
4304 			}
4305 		}
4306 	}
4307 	zfree(ZV_NAMEI, mntname);
4308 
4309 	return VFS_RETURNED;
4310 }
4311 
4312 /*
4313  * Unmount all filesystems. The list is traversed in reverse order
4314  * of mounting to avoid dependencies.
4315  * Busy mounts are retried.
4316  */
4317 __private_extern__ void
vfs_unmountall(int only_non_system)4318 vfs_unmountall(int only_non_system)
4319 {
4320 	int mounts, sec = 1;
4321 	struct unmount_info ui;
4322 
4323 	/*
4324 	 * Ensure last-completion-time is valid before anyone can see that
4325 	 * VFS shutdown has started.
4326 	 */
4327 	vfs_shutdown_last_completion_time = mach_absolute_time();
4328 	OSMemoryBarrier();
4329 	vfs_unmountall_started = 1;
4330 	printf("vfs_unmountall(%ssystem) start\n", only_non_system ? "non" : "");
4331 
4332 retry:
4333 	ui.u_errs = ui.u_busy = ui.u_count = 0;
4334 	ui.u_only_non_system = only_non_system;
4335 	// avoid vfs_iterate deadlock in dounmount(), use VFS_ITERATE_CB_DROPREF
4336 	vfs_iterate(VFS_ITERATE_CB_DROPREF | VFS_ITERATE_TAIL_FIRST, unmount_callback, &ui);
4337 	mounts = mount_getvfscnt();
4338 	if (mounts == 0) {
4339 		goto out;
4340 	}
4341 	if (ui.u_busy > 0) {            // Busy mounts - wait & retry
4342 		tsleep(&nummounts, PVFS, "busy mount", sec * hz);
4343 		sec *= 2;
4344 		if (sec <= 32) {
4345 			goto retry;
4346 		}
4347 		printf("Unmounting timed out\n");
4348 	} else if (ui.u_count < mounts) {
4349 		// If the vfs_iterate missed mounts in progress - wait a bit
4350 		tsleep(&nummounts, PVFS, "missed mount", 2 * hz);
4351 	}
4352 
4353 out:
4354 	printf("vfs_unmountall(%ssystem) end\n", only_non_system ? "non" : "");
4355 
4356 	/*
4357 	 * reboot_kernel() calls us twice; once to deal with non-system
4358 	 * mounts, and again to sweep up anything left after terminating
4359 	 * DEXTs.  We're only finished once we've completed the second pass.
4360 	 */
4361 	if (!only_non_system) {
4362 		vfs_unmountall_finished = 1;
4363 	}
4364 }
4365 
4366 /*
4367  * vfs_shutdown_in_progress --
4368  *
4369  * Returns whether or not the VFS is shutting down the file systems.
4370  */
4371 boolean_t
vfs_shutdown_in_progress(void)4372 vfs_shutdown_in_progress(void)
4373 {
4374 	return vfs_unmountall_started && !vfs_unmountall_finished;
4375 }
4376 
4377 /*
4378  * vfs_shutdown_finished --
4379  *
4380  * Returns whether or not the VFS shutdown has completed.
4381  */
4382 boolean_t
vfs_shutdown_finished(void)4383 vfs_shutdown_finished(void)
4384 {
4385 	return !!vfs_unmountall_finished;
4386 }
4387 
4388 /*
4389  * vfs_update_last_completion_time --
4390  *
4391  * Updates the "last I/O completion time" timestamp used by the watchdog
4392  * to monitor VFS shutdown progress.  Called by various I/O stack layers
4393  * as operations complete and progress moves forward.
4394  */
4395 void
vfs_update_last_completion_time(void)4396 vfs_update_last_completion_time(void)
4397 {
4398 	if (vfs_unmountall_started) {
4399 		vfs_shutdown_last_completion_time = mach_absolute_time();
4400 	}
4401 }
4402 
4403 /*
4404  * vfs_last_completion_time --
4405  *
4406  * Returns the "last I/O completion time" timestamp.  Return
4407  * value is a mach_absolute_time() value, and is not meaningful
4408  * unless vfs_is_shutting_down() also returns true.
4409  */
4410 uint64_t
vfs_last_completion_time(void)4411 vfs_last_completion_time(void)
4412 {
4413 	return vfs_unmountall_started ? vfs_shutdown_last_completion_time : 0;
4414 }
4415 
4416 /*
4417  * This routine is called from vnode_pager_deallocate out of the VM
4418  * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
4419  * on a vnode that has a UBCINFO
4420  */
4421 __private_extern__ void
vnode_pager_vrele(vnode_t vp)4422 vnode_pager_vrele(vnode_t vp)
4423 {
4424 	struct ubc_info *uip;
4425 
4426 	vnode_lock_spin(vp);
4427 
4428 	vp->v_lflag &= ~VNAMED_UBC;
4429 	if (vp->v_usecount != 0) {
4430 		/*
4431 		 * At the eleventh hour, just before the ubcinfo is
4432 		 * destroyed, ensure the ubc-specific v_usecount
4433 		 * reference has gone.  We use v_usecount != 0 as a hint;
4434 		 * ubc_unmap() does nothing if there's no mapping.
4435 		 *
4436 		 * This case is caused by coming here via forced unmount,
4437 		 * versus the usual vm_object_deallocate() path.
4438 		 * In the forced unmount case, ubc_destroy_named()
4439 		 * releases the pager before memory_object_last_unmap()
4440 		 * can be called.
4441 		 */
4442 		vnode_unlock(vp);
4443 		ubc_unmap(vp);
4444 		vnode_lock_spin(vp);
4445 	}
4446 
4447 	uip = vp->v_ubcinfo;
4448 	vp->v_ubcinfo = UBC_INFO_NULL;
4449 
4450 	vnode_unlock(vp);
4451 
4452 	ubc_info_deallocate(uip);
4453 }
4454 
4455 
4456 #include <sys/disk.h>
4457 
4458 u_int32_t rootunit = (u_int32_t)-1;
4459 
4460 #if CONFIG_IOSCHED
4461 extern int lowpri_throttle_enabled;
4462 extern int iosched_enabled;
4463 #endif
4464 
4465 errno_t
vfs_init_io_attributes(vnode_t devvp,mount_t mp)4466 vfs_init_io_attributes(vnode_t devvp, mount_t mp)
4467 {
4468 	int     error;
4469 	off_t   readblockcnt = 0;
4470 	off_t   writeblockcnt = 0;
4471 	off_t   readmaxcnt = 0;
4472 	off_t   writemaxcnt = 0;
4473 	off_t   readsegcnt = 0;
4474 	off_t   writesegcnt = 0;
4475 	off_t   readsegsize = 0;
4476 	off_t   writesegsize = 0;
4477 	off_t   alignment = 0;
4478 	u_int32_t minsaturationbytecount = 0;
4479 	u_int32_t ioqueue_depth = 0;
4480 	u_int32_t blksize;
4481 	u_int64_t temp;
4482 	u_int32_t features;
4483 	u_int64_t location = 0;
4484 	vfs_context_t ctx = vfs_context_current();
4485 	dk_corestorage_info_t cs_info;
4486 	boolean_t cs_present = FALSE;
4487 	int isssd = 0;
4488 	int isvirtual = 0;
4489 
4490 
4491 	VNOP_IOCTL(devvp, DKIOCGETTHROTTLEMASK, (caddr_t)&mp->mnt_throttle_mask, 0, NULL);
4492 	/*
4493 	 * as a reasonable approximation, only use the lowest bit of the mask
4494 	 * to generate a disk unit number
4495 	 */
4496 	mp->mnt_devbsdunit = mp->mnt_throttle_mask ?
4497 	    num_trailing_0(mp->mnt_throttle_mask) : (LOWPRI_MAX_NUM_DEV - 1);
4498 
4499 	if (devvp == rootvp) {
4500 		rootunit = mp->mnt_devbsdunit;
4501 	}
4502 
4503 	if (mp->mnt_devbsdunit == rootunit) {
4504 		/*
4505 		 * this mount point exists on the same device as the root
4506 		 * partition, so it comes under the hard throttle control...
4507 		 * this is true even for the root mount point itself
4508 		 */
4509 		mp->mnt_kern_flag |= MNTK_ROOTDEV;
4510 	}
4511 	/*
4512 	 * force the spec device to re-cache
4513 	 * the underlying block size in case
4514 	 * the filesystem overrode the initial value
4515 	 */
4516 	set_fsblocksize(devvp);
4517 
4518 
4519 	if ((error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE,
4520 	    (caddr_t)&blksize, 0, ctx))) {
4521 		return error;
4522 	}
4523 
4524 	mp->mnt_devblocksize = blksize;
4525 
4526 	/*
4527 	 * set the maximum possible I/O size
4528 	 * this may get clipped to a smaller value
4529 	 * based on which constraints are being advertised
4530 	 * and if those advertised constraints result in a smaller
4531 	 * limit for a given I/O
4532 	 */
4533 	mp->mnt_maxreadcnt = MAX_UPL_SIZE_BYTES;
4534 	mp->mnt_maxwritecnt = MAX_UPL_SIZE_BYTES;
4535 
4536 	if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) {
4537 		if (isvirtual) {
4538 			mp->mnt_kern_flag |= MNTK_VIRTUALDEV;
4539 			mp->mnt_flag |= MNT_REMOVABLE;
4540 		}
4541 	}
4542 	if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ctx) == 0) {
4543 		if (isssd) {
4544 			mp->mnt_kern_flag |= MNTK_SSD;
4545 		}
4546 	}
4547 	if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES,
4548 	    (caddr_t)&features, 0, ctx))) {
4549 		return error;
4550 	}
4551 
4552 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD,
4553 	    (caddr_t)&readblockcnt, 0, ctx))) {
4554 		return error;
4555 	}
4556 
4557 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE,
4558 	    (caddr_t)&writeblockcnt, 0, ctx))) {
4559 		return error;
4560 	}
4561 
4562 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD,
4563 	    (caddr_t)&readmaxcnt, 0, ctx))) {
4564 		return error;
4565 	}
4566 
4567 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE,
4568 	    (caddr_t)&writemaxcnt, 0, ctx))) {
4569 		return error;
4570 	}
4571 
4572 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD,
4573 	    (caddr_t)&readsegcnt, 0, ctx))) {
4574 		return error;
4575 	}
4576 
4577 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE,
4578 	    (caddr_t)&writesegcnt, 0, ctx))) {
4579 		return error;
4580 	}
4581 
4582 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD,
4583 	    (caddr_t)&readsegsize, 0, ctx))) {
4584 		return error;
4585 	}
4586 
4587 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE,
4588 	    (caddr_t)&writesegsize, 0, ctx))) {
4589 		return error;
4590 	}
4591 
4592 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT,
4593 	    (caddr_t)&alignment, 0, ctx))) {
4594 		return error;
4595 	}
4596 
4597 	if ((error = VNOP_IOCTL(devvp, DKIOCGETCOMMANDPOOLSIZE,
4598 	    (caddr_t)&ioqueue_depth, 0, ctx))) {
4599 		return error;
4600 	}
4601 
4602 	if (readmaxcnt) {
4603 		mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX :(uint32_t) readmaxcnt;
4604 	}
4605 
4606 	if (readblockcnt) {
4607 		temp = readblockcnt * blksize;
4608 		temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
4609 
4610 		if (temp < mp->mnt_maxreadcnt) {
4611 			mp->mnt_maxreadcnt = (u_int32_t)temp;
4612 		}
4613 	}
4614 
4615 	if (writemaxcnt) {
4616 		mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : (uint32_t)writemaxcnt;
4617 	}
4618 
4619 	if (writeblockcnt) {
4620 		temp = writeblockcnt * blksize;
4621 		temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
4622 
4623 		if (temp < mp->mnt_maxwritecnt) {
4624 			mp->mnt_maxwritecnt = (u_int32_t)temp;
4625 		}
4626 	}
4627 
4628 	if (readsegcnt) {
4629 		temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt;
4630 	} else {
4631 		temp = mp->mnt_maxreadcnt / PAGE_SIZE;
4632 
4633 		if (temp > UINT16_MAX) {
4634 			temp = UINT16_MAX;
4635 		}
4636 	}
4637 	mp->mnt_segreadcnt = (u_int16_t)temp;
4638 
4639 	if (writesegcnt) {
4640 		temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt;
4641 	} else {
4642 		temp = mp->mnt_maxwritecnt / PAGE_SIZE;
4643 
4644 		if (temp > UINT16_MAX) {
4645 			temp = UINT16_MAX;
4646 		}
4647 	}
4648 	mp->mnt_segwritecnt = (u_int16_t)temp;
4649 
4650 	if (readsegsize) {
4651 		temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize;
4652 	} else {
4653 		temp = mp->mnt_maxreadcnt;
4654 	}
4655 	mp->mnt_maxsegreadsize = (u_int32_t)temp;
4656 
4657 	if (writesegsize) {
4658 		temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize;
4659 	} else {
4660 		temp = mp->mnt_maxwritecnt;
4661 	}
4662 	mp->mnt_maxsegwritesize = (u_int32_t)temp;
4663 
4664 	if (alignment) {
4665 		temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1;
4666 	} else {
4667 		temp = 0;
4668 	}
4669 	mp->mnt_alignmentmask = (uint32_t)temp;
4670 
4671 
4672 	if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH) {
4673 		temp = ioqueue_depth;
4674 	} else {
4675 		temp = MNT_DEFAULT_IOQUEUE_DEPTH;
4676 	}
4677 
4678 	mp->mnt_ioqueue_depth = (uint32_t)temp;
4679 	mp->mnt_ioscale = MNT_IOSCALE(mp->mnt_ioqueue_depth);
4680 
4681 	if (mp->mnt_ioscale > 1) {
4682 		printf("ioqueue_depth = %d,   ioscale = %d\n", (int)mp->mnt_ioqueue_depth, (int)mp->mnt_ioscale);
4683 	}
4684 
4685 	if (features & DK_FEATURE_FORCE_UNIT_ACCESS) {
4686 		mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED;
4687 	}
4688 
4689 	if (VNOP_IOCTL(devvp, DKIOCGETIOMINSATURATIONBYTECOUNT, (caddr_t)&minsaturationbytecount, 0, ctx) == 0) {
4690 		mp->mnt_minsaturationbytecount = minsaturationbytecount;
4691 	} else {
4692 		mp->mnt_minsaturationbytecount = 0;
4693 	}
4694 
4695 	if (VNOP_IOCTL(devvp, DKIOCCORESTORAGE, (caddr_t)&cs_info, 0, ctx) == 0) {
4696 		cs_present = TRUE;
4697 	}
4698 
4699 	if (features & DK_FEATURE_UNMAP) {
4700 		mp->mnt_ioflags |= MNT_IOFLAGS_UNMAP_SUPPORTED;
4701 
4702 		if (cs_present == TRUE) {
4703 			mp->mnt_ioflags |= MNT_IOFLAGS_CSUNMAP_SUPPORTED;
4704 		}
4705 	}
4706 	if (cs_present == TRUE) {
4707 		/*
4708 		 * for now we'll use the following test as a proxy for
4709 		 * the underlying drive being FUSION in nature
4710 		 */
4711 		if ((cs_info.flags & DK_CORESTORAGE_PIN_YOUR_METADATA)) {
4712 			mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE;
4713 		}
4714 	} else {
4715 		/* Check for APFS Fusion */
4716 		dk_apfs_flavour_t flavour;
4717 		if ((VNOP_IOCTL(devvp, DKIOCGETAPFSFLAVOUR, (caddr_t)&flavour, 0, ctx) == 0) &&
4718 		    (flavour == DK_APFS_FUSION)) {
4719 			mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE;
4720 		}
4721 	}
4722 
4723 	if (VNOP_IOCTL(devvp, DKIOCGETLOCATION, (caddr_t)&location, 0, ctx) == 0) {
4724 		if (location & DK_LOCATION_EXTERNAL) {
4725 			mp->mnt_ioflags |= MNT_IOFLAGS_PERIPHERAL_DRIVE;
4726 			mp->mnt_flag |= MNT_REMOVABLE;
4727 		}
4728 	}
4729 
4730 #if CONFIG_IOSCHED
4731 	if (iosched_enabled && (features & DK_FEATURE_PRIORITY)) {
4732 		mp->mnt_ioflags |= MNT_IOFLAGS_IOSCHED_SUPPORTED;
4733 		throttle_info_disable_throttle(mp->mnt_devbsdunit, (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) != 0);
4734 	}
4735 #endif /* CONFIG_IOSCHED */
4736 	return error;
4737 }
4738 
4739 static struct klist fs_klist;
4740 static LCK_GRP_DECLARE(fs_klist_lck_grp, "fs_klist");
4741 static LCK_MTX_DECLARE(fs_klist_lock, &fs_klist_lck_grp);
4742 
4743 void
vfs_event_init(void)4744 vfs_event_init(void)
4745 {
4746 	klist_init(&fs_klist);
4747 }
4748 
4749 void
vfs_event_signal(fsid_t * fsid,u_int32_t event,intptr_t data)4750 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data)
4751 {
4752 	if (event == VQ_DEAD || event == VQ_NOTRESP) {
4753 		struct mount *mp = vfs_getvfs(fsid);
4754 		if (mp) {
4755 			mount_lock_spin(mp);
4756 			if (data) {
4757 				mp->mnt_lflag &= ~MNT_LNOTRESP;     // Now responding
4758 			} else {
4759 				mp->mnt_lflag |= MNT_LNOTRESP;      // Not responding
4760 			}
4761 			mount_unlock(mp);
4762 		}
4763 	}
4764 
4765 	lck_mtx_lock(&fs_klist_lock);
4766 	KNOTE(&fs_klist, event);
4767 	lck_mtx_unlock(&fs_klist_lock);
4768 }
4769 
4770 /*
4771  * return the number of mounted filesystems.
4772  */
4773 static int
sysctl_vfs_getvfscnt(void)4774 sysctl_vfs_getvfscnt(void)
4775 {
4776 	return mount_getvfscnt();
4777 }
4778 
4779 
4780 static int
mount_getvfscnt(void)4781 mount_getvfscnt(void)
4782 {
4783 	int ret;
4784 
4785 	mount_list_lock();
4786 	ret = nummounts;
4787 	mount_list_unlock();
4788 	return ret;
4789 }
4790 
4791 
4792 
4793 static int
mount_fillfsids(fsid_t * fsidlst,int count)4794 mount_fillfsids(fsid_t *fsidlst, int count)
4795 {
4796 	struct mount *mp;
4797 	int actual = 0;
4798 
4799 	actual = 0;
4800 	mount_list_lock();
4801 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4802 		if (actual < count) {
4803 			fsidlst[actual] = mp->mnt_vfsstat.f_fsid;
4804 			actual++;
4805 		}
4806 	}
4807 	mount_list_unlock();
4808 	return actual;
4809 }
4810 
4811 /*
4812  * fill in the array of fsid_t's up to a max of 'count', the actual
4813  * number filled in will be set in '*actual'.  If there are more fsid_t's
4814  * than room in fsidlst then ENOMEM will be returned and '*actual' will
4815  * have the actual count.
4816  * having *actual filled out even in the error case is depended upon.
4817  */
4818 static int
sysctl_vfs_getvfslist(fsid_t * fsidlst,unsigned long count,unsigned long * actual)4819 sysctl_vfs_getvfslist(fsid_t *fsidlst, unsigned long count, unsigned long *actual)
4820 {
4821 	struct mount *mp;
4822 
4823 	*actual = 0;
4824 	mount_list_lock();
4825 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4826 		(*actual)++;
4827 		if (*actual <= count) {
4828 			fsidlst[(*actual) - 1] = mp->mnt_vfsstat.f_fsid;
4829 		}
4830 	}
4831 	mount_list_unlock();
4832 	return *actual <= count ? 0 : ENOMEM;
4833 }
4834 
4835 static int
sysctl_vfs_vfslist(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)4836 sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1,
4837     __unused int arg2, struct sysctl_req *req)
4838 {
4839 	unsigned long actual;
4840 	int error;
4841 	size_t space;
4842 	fsid_t *fsidlst;
4843 
4844 	/* This is a readonly node. */
4845 	if (req->newptr != USER_ADDR_NULL) {
4846 		return EPERM;
4847 	}
4848 
4849 	/* they are querying us so just return the space required. */
4850 	if (req->oldptr == USER_ADDR_NULL) {
4851 		req->oldidx = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
4852 		return 0;
4853 	}
4854 again:
4855 	/*
4856 	 * Retrieve an accurate count of the amount of space required to copy
4857 	 * out all the fsids in the system.
4858 	 */
4859 	space = req->oldlen;
4860 	req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
4861 
4862 	/* they didn't give us enough space. */
4863 	if (space < req->oldlen) {
4864 		return ENOMEM;
4865 	}
4866 
4867 	fsidlst = kalloc_data(req->oldlen, Z_WAITOK | Z_ZERO);
4868 	if (fsidlst == NULL) {
4869 		return ENOMEM;
4870 	}
4871 
4872 	error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t),
4873 	    &actual);
4874 	/*
4875 	 * If we get back ENOMEM, then another mount has been added while we
4876 	 * slept in malloc above.  If this is the case then try again.
4877 	 */
4878 	if (error == ENOMEM) {
4879 		kfree_data(fsidlst, req->oldlen);
4880 		req->oldlen = space;
4881 		goto again;
4882 	}
4883 	if (error == 0) {
4884 		error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t));
4885 	}
4886 	kfree_data(fsidlst, req->oldlen);
4887 	return error;
4888 }
4889 
4890 /*
4891  * Do a sysctl by fsid.
4892  */
4893 static int
sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid * oidp,void * arg1,int arg2,struct sysctl_req * req)4894 sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
4895     struct sysctl_req *req)
4896 {
4897 	union union_vfsidctl vc;
4898 	struct mount *mp = NULL;
4899 	struct vfsstatfs *sp;
4900 	int *name, namelen;
4901 	int flags = 0;
4902 	int error = 0, gotref = 0;
4903 	vfs_context_t ctx = vfs_context_current();
4904 	proc_t p = req->p;      /* XXX req->p != current_proc()? */
4905 	boolean_t is_64_bit;
4906 	union {
4907 		struct statfs64 sfs64;
4908 		struct user64_statfs osfs64;
4909 		struct user32_statfs osfs32;
4910 	} *sfsbuf;
4911 
4912 	if (req->newptr == USER_ADDR_NULL) {
4913 		error = EINVAL;
4914 		goto out;
4915 	}
4916 
4917 	name = arg1;
4918 	namelen = arg2;
4919 	is_64_bit = proc_is64bit(p);
4920 
4921 	error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
4922 	if (error) {
4923 		goto out;
4924 	}
4925 	if (vc.vc32.vc_vers != VFS_CTL_VERS1) { /* works for 32 and 64 */
4926 		error = EINVAL;
4927 		goto out;
4928 	}
4929 	mp = mount_list_lookupby_fsid(&vc.vc32.vc_fsid, 0, 1); /* works for 32 and 64 */
4930 	if (mp == NULL) {
4931 		error = ENOENT;
4932 		goto out;
4933 	}
4934 	gotref = 1;
4935 	/* reset so that the fs specific code can fetch it. */
4936 	req->newidx = 0;
4937 	/*
4938 	 * Note if this is a VFS_CTL then we pass the actual sysctl req
4939 	 * in for "oldp" so that the lower layer can DTRT and use the
4940 	 * SYSCTL_IN/OUT routines.
4941 	 */
4942 	if (mp->mnt_op->vfs_sysctl != NULL) {
4943 		if (is_64_bit) {
4944 			if (vfs_64bitready(mp)) {
4945 				error = mp->mnt_op->vfs_sysctl(name, namelen,
4946 				    CAST_USER_ADDR_T(req),
4947 				    NULL, USER_ADDR_NULL, 0,
4948 				    ctx);
4949 			} else {
4950 				error = ENOTSUP;
4951 			}
4952 		} else {
4953 			error = mp->mnt_op->vfs_sysctl(name, namelen,
4954 			    CAST_USER_ADDR_T(req),
4955 			    NULL, USER_ADDR_NULL, 0,
4956 			    ctx);
4957 		}
4958 		if (error != ENOTSUP) {
4959 			goto out;
4960 		}
4961 	}
4962 	switch (name[0]) {
4963 	case VFS_CTL_UMOUNT:
4964 #if CONFIG_MACF
4965 		error = mac_mount_check_umount(ctx, mp);
4966 		if (error != 0) {
4967 			goto out;
4968 		}
4969 #endif
4970 		req->newidx = 0;
4971 		if (is_64_bit) {
4972 			req->newptr = vc.vc64.vc_ptr;
4973 			req->newlen = (size_t)vc.vc64.vc_len;
4974 		} else {
4975 			req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
4976 			req->newlen = vc.vc32.vc_len;
4977 		}
4978 		error = SYSCTL_IN(req, &flags, sizeof(flags));
4979 		if (error) {
4980 			break;
4981 		}
4982 
4983 		mount_ref(mp, 0);
4984 		mount_iterdrop(mp);
4985 		gotref = 0;
4986 		/* safedounmount consumes a ref */
4987 		error = safedounmount(mp, flags, ctx);
4988 		break;
4989 	case VFS_CTL_OSTATFS:
4990 	case VFS_CTL_STATFS64:
4991 #if CONFIG_MACF
4992 		error = mac_mount_check_stat(ctx, mp);
4993 		if (error != 0) {
4994 			break;
4995 		}
4996 #endif
4997 		req->newidx = 0;
4998 		if (is_64_bit) {
4999 			req->newptr = vc.vc64.vc_ptr;
5000 			req->newlen = (size_t)vc.vc64.vc_len;
5001 		} else {
5002 			req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
5003 			req->newlen = vc.vc32.vc_len;
5004 		}
5005 		error = SYSCTL_IN(req, &flags, sizeof(flags));
5006 		if (error) {
5007 			break;
5008 		}
5009 		sp = &mp->mnt_vfsstat;
5010 		if (((flags & MNT_NOWAIT) == 0 || (flags & (MNT_WAIT | MNT_DWAIT))) &&
5011 		    (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT))) {
5012 			goto out;
5013 		}
5014 
5015 		sfsbuf = kalloc_type(typeof(*sfsbuf), Z_WAITOK);
5016 
5017 		if (name[0] == VFS_CTL_STATFS64) {
5018 			struct statfs64 *sfs = &sfsbuf->sfs64;
5019 
5020 			vfs_get_statfs64(mp, sfs);
5021 			error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
5022 		} else if (is_64_bit) {
5023 			struct user64_statfs *sfs = &sfsbuf->osfs64;
5024 
5025 			bzero(sfs, sizeof(*sfs));
5026 			sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
5027 			sfs->f_type = (short)mp->mnt_vtable->vfc_typenum;
5028 			sfs->f_bsize = (user64_long_t)sp->f_bsize;
5029 			sfs->f_iosize = (user64_long_t)sp->f_iosize;
5030 			sfs->f_blocks = (user64_long_t)sp->f_blocks;
5031 			sfs->f_bfree = (user64_long_t)sp->f_bfree;
5032 			sfs->f_bavail = (user64_long_t)sp->f_bavail;
5033 			sfs->f_files = (user64_long_t)sp->f_files;
5034 			sfs->f_ffree = (user64_long_t)sp->f_ffree;
5035 			sfs->f_fsid = sp->f_fsid;
5036 			sfs->f_owner = sp->f_owner;
5037 			vfs_getfstypename(mp, sfs->f_fstypename, MFSNAMELEN);
5038 			strlcpy(sfs->f_mntonname, sp->f_mntonname, MNAMELEN);
5039 			strlcpy(sfs->f_mntfromname, sp->f_mntfromname, MNAMELEN);
5040 
5041 			error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
5042 		} else {
5043 			struct user32_statfs *sfs = &sfsbuf->osfs32;
5044 			long temp;
5045 
5046 			bzero(sfs, sizeof(*sfs));
5047 			sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
5048 			sfs->f_type = (short)mp->mnt_vtable->vfc_typenum;
5049 
5050 			/*
5051 			 * It's possible for there to be more than 2^^31 blocks in the filesystem, so we
5052 			 * have to fudge the numbers here in that case.   We inflate the blocksize in order
5053 			 * to reflect the filesystem size as best we can.
5054 			 */
5055 			if (sp->f_blocks > INT_MAX) {
5056 				int             shift;
5057 
5058 				/*
5059 				 * Work out how far we have to shift the block count down to make it fit.
5060 				 * Note that it's possible to have to shift so far that the resulting
5061 				 * blocksize would be unreportably large.  At that point, we will clip
5062 				 * any values that don't fit.
5063 				 *
5064 				 * For safety's sake, we also ensure that f_iosize is never reported as
5065 				 * being smaller than f_bsize.
5066 				 */
5067 				for (shift = 0; shift < 32; shift++) {
5068 					if ((sp->f_blocks >> shift) <= INT_MAX) {
5069 						break;
5070 					}
5071 					if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX) {
5072 						break;
5073 					}
5074 				}
5075 #define __SHIFT_OR_CLIP(x, s)   ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
5076 				sfs->f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift);
5077 				sfs->f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift);
5078 				sfs->f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift);
5079 #undef __SHIFT_OR_CLIP
5080 				sfs->f_bsize = (user32_long_t)(sp->f_bsize << shift);
5081 				temp = lmax(sp->f_iosize, sp->f_bsize);
5082 				if (temp > INT32_MAX) {
5083 					error = EINVAL;
5084 					kfree_type(typeof(*sfsbuf), sfsbuf);
5085 					goto out;
5086 				}
5087 				sfs->f_iosize = (user32_long_t)temp;
5088 			} else {
5089 				sfs->f_bsize = (user32_long_t)sp->f_bsize;
5090 				sfs->f_iosize = (user32_long_t)sp->f_iosize;
5091 				sfs->f_blocks = (user32_long_t)sp->f_blocks;
5092 				sfs->f_bfree = (user32_long_t)sp->f_bfree;
5093 				sfs->f_bavail = (user32_long_t)sp->f_bavail;
5094 			}
5095 			sfs->f_files = (user32_long_t)sp->f_files;
5096 			sfs->f_ffree = (user32_long_t)sp->f_ffree;
5097 			sfs->f_fsid = sp->f_fsid;
5098 			sfs->f_owner = sp->f_owner;
5099 
5100 			vfs_getfstypename(mp, sfs->f_fstypename, MFSNAMELEN);
5101 			strlcpy(sfs->f_mntonname, sp->f_mntonname, MNAMELEN);
5102 			strlcpy(sfs->f_mntfromname, sp->f_mntfromname, MNAMELEN);
5103 
5104 			error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
5105 		}
5106 		kfree_type(typeof(*sfsbuf), sfsbuf);
5107 		break;
5108 	default:
5109 		error = ENOTSUP;
5110 		goto out;
5111 	}
5112 out:
5113 	if (gotref != 0) {
5114 		mount_iterdrop(mp);
5115 	}
5116 	return error;
5117 }
5118 
5119 static int      filt_fsattach(struct knote *kn, struct kevent_qos_s *kev);
5120 static void     filt_fsdetach(struct knote *kn);
5121 static int      filt_fsevent(struct knote *kn, long hint);
5122 static int      filt_fstouch(struct knote *kn, struct kevent_qos_s *kev);
5123 static int      filt_fsprocess(struct knote *kn, struct kevent_qos_s *kev);
5124 SECURITY_READ_ONLY_EARLY(struct filterops) fs_filtops = {
5125 	.f_attach = filt_fsattach,
5126 	.f_detach = filt_fsdetach,
5127 	.f_event = filt_fsevent,
5128 	.f_touch = filt_fstouch,
5129 	.f_process = filt_fsprocess,
5130 };
5131 
5132 static int
filt_fsattach(struct knote * kn,__unused struct kevent_qos_s * kev)5133 filt_fsattach(struct knote *kn, __unused struct kevent_qos_s *kev)
5134 {
5135 	kn->kn_flags |= EV_CLEAR; /* automatic */
5136 	kn->kn_sdata = 0;         /* incoming data is ignored */
5137 
5138 	lck_mtx_lock(&fs_klist_lock);
5139 	KNOTE_ATTACH(&fs_klist, kn);
5140 	lck_mtx_unlock(&fs_klist_lock);
5141 
5142 	/*
5143 	 * filter only sees future events,
5144 	 * so it can't be fired already.
5145 	 */
5146 	return 0;
5147 }
5148 
5149 static void
filt_fsdetach(struct knote * kn)5150 filt_fsdetach(struct knote *kn)
5151 {
5152 	lck_mtx_lock(&fs_klist_lock);
5153 	KNOTE_DETACH(&fs_klist, kn);
5154 	lck_mtx_unlock(&fs_klist_lock);
5155 }
5156 
5157 static int
filt_fsevent(struct knote * kn,long hint)5158 filt_fsevent(struct knote *kn, long hint)
5159 {
5160 	/*
5161 	 * Backwards compatibility:
5162 	 * Other filters would do nothing if kn->kn_sfflags == 0
5163 	 */
5164 
5165 	if ((kn->kn_sfflags == 0) || (kn->kn_sfflags & hint)) {
5166 		kn->kn_fflags |= hint;
5167 	}
5168 
5169 	return kn->kn_fflags != 0;
5170 }
5171 
5172 static int
filt_fstouch(struct knote * kn,struct kevent_qos_s * kev)5173 filt_fstouch(struct knote *kn, struct kevent_qos_s *kev)
5174 {
5175 	int res;
5176 
5177 	lck_mtx_lock(&fs_klist_lock);
5178 
5179 	kn->kn_sfflags = kev->fflags;
5180 
5181 	/*
5182 	 * the above filter function sets bits even if nobody is looking for them.
5183 	 * Just preserve those bits even in the new mask is more selective
5184 	 * than before.
5185 	 *
5186 	 * For compatibility with previous implementations, we leave kn_fflags
5187 	 * as they were before.
5188 	 */
5189 	//if (kn->kn_sfflags)
5190 	//	kn->kn_fflags &= kn->kn_sfflags;
5191 	res = (kn->kn_fflags != 0);
5192 
5193 	lck_mtx_unlock(&fs_klist_lock);
5194 
5195 	return res;
5196 }
5197 
5198 static int
filt_fsprocess(struct knote * kn,struct kevent_qos_s * kev)5199 filt_fsprocess(struct knote *kn, struct kevent_qos_s *kev)
5200 {
5201 	int res = 0;
5202 
5203 	lck_mtx_lock(&fs_klist_lock);
5204 	if (kn->kn_fflags) {
5205 		knote_fill_kevent(kn, kev, 0);
5206 		res = 1;
5207 	}
5208 	lck_mtx_unlock(&fs_klist_lock);
5209 	return res;
5210 }
5211 
5212 static int
sysctl_vfs_noremotehang(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)5213 sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp,
5214     __unused void *arg1, __unused int arg2, struct sysctl_req *req)
5215 {
5216 	int out, error;
5217 	pid_t pid;
5218 	proc_t p;
5219 
5220 	/* We need a pid. */
5221 	if (req->newptr == USER_ADDR_NULL) {
5222 		return EINVAL;
5223 	}
5224 
5225 	error = SYSCTL_IN(req, &pid, sizeof(pid));
5226 	if (error) {
5227 		return error;
5228 	}
5229 
5230 	p = proc_find(pid < 0 ? -pid : pid);
5231 	if (p == NULL) {
5232 		return ESRCH;
5233 	}
5234 
5235 	/*
5236 	 * Fetching the value is ok, but we only fetch if the old
5237 	 * pointer is given.
5238 	 */
5239 	if (req->oldptr != USER_ADDR_NULL) {
5240 		out = !((p->p_flag & P_NOREMOTEHANG) == 0);
5241 		proc_rele(p);
5242 		error = SYSCTL_OUT(req, &out, sizeof(out));
5243 		return error;
5244 	}
5245 
5246 	/* cansignal offers us enough security. */
5247 	if (p != req->p && proc_suser(req->p) != 0) {
5248 		proc_rele(p);
5249 		return EPERM;
5250 	}
5251 
5252 	if (pid < 0) {
5253 		OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), &p->p_flag);
5254 	} else {
5255 		OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
5256 	}
5257 	proc_rele(p);
5258 
5259 	return 0;
5260 }
5261 
5262 static int
5263 sysctl_vfs_generic_conf SYSCTL_HANDLER_ARGS
5264 {
5265 	int *name, namelen;
5266 	struct vfstable *vfsp;
5267 	struct vfsconf vfsc = {};
5268 
5269 	(void)oidp;
5270 	name = arg1;
5271 	namelen = arg2;
5272 
5273 	if (namelen < 1) {
5274 		return EISDIR;
5275 	} else if (namelen > 1) {
5276 		return ENOTDIR;
5277 	}
5278 
5279 	mount_list_lock();
5280 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
5281 		if (vfsp->vfc_typenum == name[0]) {
5282 			break;
5283 		}
5284 	}
5285 
5286 	if (vfsp == NULL) {
5287 		mount_list_unlock();
5288 		return ENOTSUP;
5289 	}
5290 
5291 	vfsc.vfc_reserved1 = 0;
5292 	bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
5293 	vfsc.vfc_typenum = vfsp->vfc_typenum;
5294 	vfsc.vfc_refcount = vfsp->vfc_refcount;
5295 	vfsc.vfc_flags = vfsp->vfc_flags;
5296 	vfsc.vfc_reserved2 = 0;
5297 	vfsc.vfc_reserved3 = 0;
5298 
5299 	mount_list_unlock();
5300 	return SYSCTL_OUT(req, &vfsc, sizeof(struct vfsconf));
5301 }
5302 
5303 /* the vfs.generic. branch. */
5304 SYSCTL_EXTENSIBLE_NODE(_vfs, VFS_GENERIC, generic,
5305     CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "vfs generic hinge");
5306 /* retreive a list of mounted filesystem fsid_t */
5307 SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist,
5308     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
5309     NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids");
5310 /* perform operations on filesystem via fsid_t */
5311 SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW | CTLFLAG_LOCKED,
5312     sysctl_vfs_ctlbyfsid, "ctlbyfsid");
5313 SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW | CTLFLAG_ANYBODY,
5314     NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang");
5315 SYSCTL_INT(_vfs_generic, VFS_MAXTYPENUM, maxtypenum,
5316     CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
5317     &maxvfstypenum, 0, "");
5318 SYSCTL_INT(_vfs_generic, OID_AUTO, sync_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, &sync_timeout_seconds, 0, "");
5319 SYSCTL_NODE(_vfs_generic, VFS_CONF, conf,
5320     CTLFLAG_RD | CTLFLAG_LOCKED,
5321     sysctl_vfs_generic_conf, "");
5322 #if DEVELOPMENT || DEBUG
5323 SYSCTL_INT(_vfs_generic, OID_AUTO, print_busy_vnodes,
5324     CTLTYPE_INT | CTLFLAG_RW,
5325     &print_busy_vnodes, 0,
5326     "VFS log busy vnodes blocking unmount");
5327 #endif
5328 
5329 /* Indicate that the root file system unmounted cleanly */
5330 static int vfs_root_unmounted_cleanly = 0;
5331 SYSCTL_INT(_vfs_generic, OID_AUTO, root_unmounted_cleanly, CTLFLAG_RD, &vfs_root_unmounted_cleanly, 0, "Root filesystem was unmounted cleanly");
5332 
5333 void
vfs_set_root_unmounted_cleanly(void)5334 vfs_set_root_unmounted_cleanly(void)
5335 {
5336 	vfs_root_unmounted_cleanly = 1;
5337 }
5338 
5339 /*
5340  * Print vnode state.
5341  */
5342 void
vn_print_state(struct vnode * vp,const char * fmt,...)5343 vn_print_state(struct vnode *vp, const char *fmt, ...)
5344 {
5345 	va_list ap;
5346 	char perm_str[] = "(VM_KERNEL_ADDRPERM pointer)";
5347 	char fs_name[MFSNAMELEN];
5348 
5349 	va_start(ap, fmt);
5350 	vprintf(fmt, ap);
5351 	va_end(ap);
5352 	printf("vp 0x%0llx %s: ", (uint64_t)VM_KERNEL_ADDRPERM(vp), perm_str);
5353 	printf("tag %d, type %d\n", vp->v_tag, vp->v_type);
5354 	/* Counts .. */
5355 	printf("    iocount %d, usecount %d, kusecount %d references %d\n",
5356 	    vp->v_iocount, vp->v_usecount, vp->v_kusecount, vp->v_references);
5357 	printf("    writecount %d, numoutput %d\n", vp->v_writecount,
5358 	    vp->v_numoutput);
5359 	/* Flags */
5360 	printf("    flag 0x%x, lflag 0x%x, listflag 0x%x\n", vp->v_flag,
5361 	    vp->v_lflag, vp->v_listflag);
5362 
5363 	if (vp->v_mount == NULL || vp->v_mount == dead_mountp) {
5364 		strlcpy(fs_name, "deadfs", MFSNAMELEN);
5365 	} else {
5366 		vfs_name(vp->v_mount, fs_name);
5367 	}
5368 
5369 	printf("    v_data 0x%0llx %s\n",
5370 	    (vp->v_data ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_data) : 0),
5371 	    perm_str);
5372 	printf("    v_mount 0x%0llx %s vfs_name %s\n",
5373 	    (vp->v_mount ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_mount) : 0),
5374 	    perm_str, fs_name);
5375 }
5376 
5377 long num_reusedvnodes = 0;
5378 
5379 
5380 static vnode_t
process_vp(vnode_t vp,int want_vp,bool can_defer,int * deferred)5381 process_vp(vnode_t vp, int want_vp, bool can_defer, int *deferred)
5382 {
5383 	unsigned int  vpid;
5384 
5385 	*deferred = 0;
5386 
5387 	vpid = vp->v_id;
5388 
5389 	vnode_list_remove_locked(vp);
5390 
5391 	vnode_hold(vp);
5392 	vnode_list_unlock();
5393 
5394 	vnode_lock_spin(vp);
5395 
5396 	/*
5397 	 * We could wait for the vnode_lock after removing the vp from the freelist
5398 	 * and the vid is bumped only at the very end of reclaim. So it is  possible
5399 	 * that we are looking at a vnode that is being terminated. If so skip it.
5400 	 */
5401 	if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
5402 	    VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
5403 		/*
5404 		 * we lost the race between dropping the list lock
5405 		 * and picking up the vnode_lock... someone else
5406 		 * used this vnode and it is now in a new state
5407 		 */
5408 		vnode_drop_and_unlock(vp);
5409 
5410 		return NULLVP;
5411 	}
5412 	if ((vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE) {
5413 		/*
5414 		 * we did a vnode_rele_ext that asked for
5415 		 * us not to reenter the filesystem during
5416 		 * the release even though VL_NEEDINACTIVE was
5417 		 * set... we'll do it here by doing a
5418 		 * vnode_get/vnode_put
5419 		 *
5420 		 * pick up an iocount so that we can call
5421 		 * vnode_put and drive the VNOP_INACTIVE...
5422 		 * vnode_put will either leave us off
5423 		 * the freelist if a new ref comes in,
5424 		 * or put us back on the end of the freelist
5425 		 * or recycle us if we were marked for termination...
5426 		 * so we'll just go grab a new candidate
5427 		 */
5428 		vp->v_iocount++;
5429 #ifdef CONFIG_IOCOUNT_TRACE
5430 		record_vp(vp, 1);
5431 #endif
5432 		vnode_put_locked(vp);
5433 		vnode_drop_and_unlock(vp);
5434 
5435 		return NULLVP;
5436 	}
5437 	/*
5438 	 * Checks for anyone racing us for recycle
5439 	 */
5440 	if (vp->v_type != VBAD) {
5441 		if ((want_vp || can_defer) && (vnode_on_reliable_media(vp) == FALSE || (vp->v_flag & VISDIRTY))) {
5442 			vnode_async_list_add(vp);
5443 			vnode_drop_and_unlock(vp);
5444 
5445 			*deferred = 1;
5446 
5447 			return NULLVP;
5448 		}
5449 		if (vp->v_lflag & VL_DEAD) {
5450 			panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
5451 		}
5452 
5453 		vnode_lock_convert(vp);
5454 		(void)vnode_reclaim_internal(vp, 1, want_vp, 0);
5455 
5456 		if (want_vp) {
5457 			if ((VONLIST(vp))) {
5458 				panic("new_vnode(%p): vp on list", vp);
5459 			}
5460 			if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
5461 			    (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH))) {
5462 				panic("new_vnode(%p): free vnode still referenced", vp);
5463 			}
5464 			if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0)) {
5465 				panic("new_vnode(%p): vnode seems to be on mount list", vp);
5466 			}
5467 			if (!LIST_EMPTY(&vp->v_nclinks) || !TAILQ_EMPTY(&vp->v_ncchildren)) {
5468 				panic("new_vnode(%p): vnode still hooked into the name cache", vp);
5469 			}
5470 		} else {
5471 			vnode_drop_and_unlock(vp);
5472 			vp = NULLVP;
5473 		}
5474 	}
5475 	return vp;
5476 }
5477 
5478 __attribute__((noreturn))
5479 static void
async_work_continue(void)5480 async_work_continue(void)
5481 {
5482 	struct async_work_lst *q;
5483 	int     deferred;
5484 	vnode_t vp;
5485 
5486 	q = &vnode_async_work_list;
5487 
5488 	for (;;) {
5489 		vnode_list_lock();
5490 
5491 		if (TAILQ_EMPTY(q)) {
5492 			assert_wait(q, (THREAD_UNINT));
5493 
5494 			vnode_list_unlock();
5495 
5496 			thread_block((thread_continue_t)async_work_continue);
5497 
5498 			continue;
5499 		}
5500 		async_work_handled++;
5501 
5502 		vp = TAILQ_FIRST(q);
5503 
5504 		vp = process_vp(vp, 0, false, &deferred);
5505 
5506 		if (vp != NULLVP) {
5507 			panic("found VBAD vp (%p) on async queue", vp);
5508 		}
5509 	}
5510 }
5511 
5512 #if CONFIG_JETSAM
5513 bool do_async_jetsam = false;
5514 #endif
5515 
5516 __attribute__((noreturn))
5517 static void
vn_laundry_continue(void)5518 vn_laundry_continue(void)
5519 {
5520 	struct freelst *free_q;
5521 	struct ragelst *rage_q;
5522 	vnode_t vp;
5523 	int deferred;
5524 	bool rage_q_empty;
5525 	bool free_q_empty;
5526 
5527 
5528 	free_q = &vnode_free_list;
5529 	rage_q = &vnode_rage_list;
5530 
5531 	for (;;) {
5532 		vnode_list_lock();
5533 
5534 #if CONFIG_JETSAM
5535 		if (do_async_jetsam) {
5536 			do_async_jetsam = false;
5537 			if (deadvnodes <= deadvnodes_low) {
5538 				vnode_list_unlock();
5539 
5540 				log(LOG_EMERG, "Initiating vnode jetsam : %d desired, %ld numvnodes, "
5541 				    "%ld free, %ld dead, %ld async, %d rage\n",
5542 				    desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes);
5543 
5544 				memorystatus_kill_on_vnode_exhaustion();
5545 
5546 				continue;
5547 			}
5548 		}
5549 #endif
5550 
5551 		if (!TAILQ_EMPTY(&vnode_async_work_list)) {
5552 			vp = TAILQ_FIRST(&vnode_async_work_list);
5553 			async_work_handled++;
5554 
5555 			vp = process_vp(vp, 0, false, &deferred);
5556 
5557 			if (vp != NULLVP) {
5558 				panic("found VBAD vp (%p) on async queue", vp);
5559 			}
5560 			continue;
5561 		}
5562 
5563 		free_q_empty = TAILQ_EMPTY(free_q);
5564 		rage_q_empty = TAILQ_EMPTY(rage_q);
5565 
5566 		if (!rage_q_empty && !free_q_empty) {
5567 			struct timeval current_tv;
5568 
5569 			microuptime(&current_tv);
5570 			if (ragevnodes < rage_limit &&
5571 			    ((current_tv.tv_sec - rage_tv.tv_sec) < RAGE_TIME_LIMIT)) {
5572 				rage_q_empty = true;
5573 			}
5574 		}
5575 
5576 		if (numvnodes < numvnodes_min || (rage_q_empty && free_q_empty) ||
5577 		    (reusablevnodes <= reusablevnodes_max && deadvnodes >= deadvnodes_high)) {
5578 			assert_wait(free_q, (THREAD_UNINT));
5579 
5580 			vnode_list_unlock();
5581 
5582 			thread_block((thread_continue_t)vn_laundry_continue);
5583 
5584 			continue;
5585 		}
5586 
5587 		if (!rage_q_empty) {
5588 			vp = TAILQ_FIRST(rage_q);
5589 		} else {
5590 			vp = TAILQ_FIRST(free_q);
5591 		}
5592 
5593 		vp = process_vp(vp, 0, false, &deferred);
5594 
5595 		if (vp != NULLVP) {
5596 			/* If process_vp returns a vnode, it is locked and has a holdcount */
5597 			vnode_drop_and_unlock(vp);
5598 			vp = NULLVP;
5599 		}
5600 	}
5601 }
5602 
5603 static inline void
wakeup_laundry_thread()5604 wakeup_laundry_thread()
5605 {
5606 	if (deadvnodes_noreuse || (numvnodes >= numvnodes_min && deadvnodes < deadvnodes_low &&
5607 	    (reusablevnodes > reusablevnodes_max || numvnodes >= desiredvnodes))) {
5608 		wakeup(&vnode_free_list);
5609 	}
5610 }
5611 
5612 /*
5613  * This must be called under vnode_list_lock() to prevent race when accessing
5614  * various vnode stats.
5615  */
5616 static void
send_freeable_vnodes_telemetry(void)5617 send_freeable_vnodes_telemetry(void)
5618 {
5619 	bool send_event = false;
5620 
5621 	/*
5622 	 * Log an event when the 'numvnodes' is above the freeable vnodes threshold
5623 	 * or when it falls back within the threshold.
5624 	 * When the 'numvnodes' is above the threshold, log an event when it has
5625 	 * been incrementally growing by 25%.
5626 	 */
5627 	if ((numvnodes > desiredvnodes) && (freevnodes + deadvnodes) == 0) {
5628 		long last_numvnodes = freeable_vnodes_telemetry.numvnodes;
5629 
5630 		if (numvnodes > (last_numvnodes + ((last_numvnodes * 25) / 100)) ||
5631 		    numvnodes >= numvnodes_max) {
5632 			send_event = true;
5633 		}
5634 		freeablevnodes_threshold_crossed = true;
5635 	} else if (freeablevnodes_threshold_crossed &&
5636 	    (freevnodes + deadvnodes) > busyvnodes) {
5637 		freeablevnodes_threshold_crossed = false;
5638 		send_event = true;
5639 	}
5640 
5641 	if (__improbable(send_event)) {
5642 		ca_event_t event = CA_EVENT_ALLOCATE_FLAGS(freeable_vnodes, Z_NOWAIT);
5643 
5644 		if (event) {
5645 			/*
5646 			 * Update the stats except the 'numvnodes_max' and 'desiredvnodes'
5647 			 * as they are immutable after init.
5648 			 */
5649 			freeable_vnodes_telemetry.numvnodes_min = numvnodes_min;
5650 			freeable_vnodes_telemetry.numvnodes = numvnodes;
5651 			freeable_vnodes_telemetry.freevnodes = freevnodes;
5652 			freeable_vnodes_telemetry.deadvnodes = deadvnodes;
5653 			freeable_vnodes_telemetry.freeablevnodes = freeablevnodes;
5654 			freeable_vnodes_telemetry.busyvnodes = busyvnodes;
5655 			freeable_vnodes_telemetry.threshold_crossed =
5656 			    freeablevnodes_threshold_crossed;
5657 
5658 			memcpy(event->data, &freeable_vnodes_telemetry,
5659 			    sizeof(CA_EVENT_TYPE(freeable_vnodes)));
5660 
5661 			if (!freeablevnodes_threshold_crossed) {
5662 				freeable_vnodes_telemetry.numvnodes = 0;
5663 			}
5664 			CA_EVENT_SEND(event);
5665 		}
5666 	}
5667 }
5668 
5669 static int
new_vnode(vnode_t * vpp,bool can_free)5670 new_vnode(vnode_t *vpp, bool can_free)
5671 {
5672 	long force_alloc_min;
5673 	vnode_t vp;
5674 #if CONFIG_JETSAM
5675 	uint32_t retries = 0, max_retries = 2;                  /* retry incase of tablefull */
5676 #else
5677 	uint32_t retries = 0, max_retries = 100;                /* retry incase of tablefull */
5678 #endif
5679 	int force_alloc = 0, walk_count = 0;
5680 	boolean_t need_reliable_vp = FALSE;
5681 	int deferred;
5682 	struct timeval initial_tv;
5683 	struct timeval current_tv;
5684 	proc_t  curproc = current_proc();
5685 	bool force_alloc_freeable = false;
5686 
5687 	if (vn_dealloc_level == DEALLOC_VNODE_NONE) {
5688 		can_free = false;
5689 	}
5690 
5691 	initial_tv.tv_sec = 0;
5692 retry:
5693 	vp = NULLVP;
5694 
5695 	vnode_list_lock();
5696 	newvnode++;
5697 
5698 	if (need_reliable_vp == TRUE) {
5699 		async_work_timed_out++;
5700 	}
5701 
5702 	/*
5703 	 * The vnode list lock was dropped after force_alloc_freeable was set,
5704 	 * reevaluate.
5705 	 */
5706 	force_alloc_min = MAX(desiredvnodes, numvnodes_min);
5707 	if (force_alloc_freeable &&
5708 	    (numvnodes < force_alloc_min || numvnodes >= numvnodes_max)) {
5709 		force_alloc_freeable = false;
5710 	}
5711 
5712 #if CONFIG_JETSAM
5713 	if ((numvnodes_max > desiredvnodes) && numvnodes > (numvnodes_max - 100)
5714 #if (DEVELOPMENT || DEBUG)
5715 	    && !bootarg_no_vnode_jetsam
5716 #endif
5717 	    ) {
5718 		do_async_jetsam = true;
5719 		wakeup(&vnode_free_list);
5720 	}
5721 #endif /* CONFIG_JETSAM */
5722 
5723 	if (((numvnodes - deadvnodes + deadvnodes_noreuse) < desiredvnodes) ||
5724 	    force_alloc || force_alloc_freeable) {
5725 		struct timespec ts;
5726 		uint32_t vflag = 0;
5727 
5728 		/*
5729 		 * Can always reuse a dead one except if it is in the process of
5730 		 * being freed or the FS cannot handle freeable vnodes.
5731 		 */
5732 		if (!TAILQ_EMPTY(&vnode_dead_list)) {
5733 			/* Select an appropriate deadvnode */
5734 			if (numvnodes <= numvnodes_min || !can_free) {
5735 				/* all vnodes upto numvnodes_min are not freeable */
5736 				vp = TAILQ_FIRST(&vnode_dead_list);
5737 				if (numvnodes > numvnodes_min &&
5738 				    (vp->v_flag & VCANDEALLOC)) {
5739 					/*
5740 					 * Freeable vnodes are added to the
5741 					 * back of the queue, so if the first
5742 					 * from the front is freeable, then
5743 					 * there are none on the dead list.
5744 					 */
5745 					vp = NULLVP;
5746 				}
5747 			} else {
5748 				/*
5749 				 * Filesystems which opt in to freeable vnodes
5750 				 * can get either one.
5751 				 */
5752 				TAILQ_FOREACH_REVERSE(vp, &vnode_dead_list,
5753 				    deadlst, v_freelist) {
5754 					if (!(vp->v_listflag & VLIST_NO_REUSE)) {
5755 						break;
5756 					}
5757 				}
5758 			}
5759 
5760 			if (vp) {
5761 				force_alloc_freeable = false;
5762 				goto steal_this_vp;
5763 			}
5764 		}
5765 
5766 		/*
5767 		 * no dead vnodes available... if we're under
5768 		 * the limit, we'll create a new vnode
5769 		 */
5770 		numvnodes++;
5771 		if (force_alloc) {
5772 			numvnodes_min++;
5773 		} else if (can_free && (numvnodes > numvnodes_min)) {
5774 			allocedvnodes++;
5775 			freeablevnodes++;
5776 			vflag = VCANDEALLOC;
5777 
5778 			send_freeable_vnodes_telemetry();
5779 		}
5780 		vnode_list_unlock();
5781 
5782 		if (nc_smr_enabled) {
5783 			vp = zalloc_smr(vnode_zone, Z_WAITOK_ZERO_NOFAIL);
5784 		} else {
5785 			vp = zalloc_flags(vnode_zone, Z_WAITOK_ZERO_NOFAIL);
5786 		}
5787 
5788 		VLISTNONE(vp);          /* avoid double queue removal */
5789 		lck_mtx_init(&vp->v_lock, &vnode_lck_grp, &vnode_lck_attr);
5790 
5791 		TAILQ_INIT(&vp->v_ncchildren);
5792 
5793 		klist_init(&vp->v_knotes);
5794 		nanouptime(&ts);
5795 		vp->v_id = (uint32_t)ts.tv_nsec;
5796 		vp->v_flag = VSTANDARD | vflag;
5797 		if (force_alloc_freeable) {
5798 			/* This vnode should be recycled and freed immediately */
5799 			vp->v_lflag = VL_MARKTERM;
5800 			vp->v_listflag = VLIST_NO_REUSE;
5801 		}
5802 
5803 		if (vflag & VCANDEALLOC) {
5804 			os_atomic_inc(&busyvnodes, relaxed);
5805 		}
5806 
5807 #if CONFIG_MACF
5808 		if (mac_vnode_label_init_needed(vp)) {
5809 			mac_vnode_label_init(vp);
5810 		}
5811 #endif /* MAC */
5812 
5813 #if CONFIG_IOCOUNT_TRACE
5814 		if (__improbable(bootarg_vnode_iocount_trace)) {
5815 			vp->v_iocount_trace = (vnode_iocount_trace_t)zalloc_permanent(
5816 				IOCOUNT_TRACE_MAX_TYPES * sizeof(struct vnode_iocount_trace),
5817 				ZALIGN(struct vnode_iocount_trace));
5818 		}
5819 #endif /* CONFIG_IOCOUNT_TRACE */
5820 
5821 #if CONFIG_FILE_LEASES
5822 		LIST_INIT(&vp->v_leases);
5823 #endif
5824 
5825 		vp->v_iocount = 1;
5826 
5827 		goto done;
5828 	}
5829 
5830 	microuptime(&current_tv);
5831 
5832 #define MAX_WALK_COUNT 1000
5833 
5834 	if (!TAILQ_EMPTY(&vnode_rage_list) &&
5835 	    (ragevnodes >= rage_limit ||
5836 	    (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) {
5837 		TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
5838 			if (!(vp->v_listflag & VLIST_RAGE)) {
5839 				panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
5840 			}
5841 
5842 			// if we're a dependency-capable process, skip vnodes that can
5843 			// cause recycling deadlocks. (i.e. this process is diskimages
5844 			// helper and the vnode is in a disk image).  Querying the
5845 			// mnt_kern_flag for the mount's virtual device status
5846 			// is safer than checking the mnt_dependent_process, which
5847 			// may not be updated if there are multiple devnode layers
5848 			// in between the disk image and the final consumer.
5849 
5850 			if (((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
5851 			    (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) &&
5852 			    !(vp->v_listflag & VLIST_NO_REUSE) &&
5853 			    (can_free || !(vp->v_flag & VCANDEALLOC))) {
5854 				/*
5855 				 * if need_reliable_vp == TRUE, then we've already sent one or more
5856 				 * non-reliable vnodes to the async thread for processing and timed
5857 				 * out waiting for a dead vnode to show up.  Use the MAX_WALK_COUNT
5858 				 * mechanism to first scan for a reliable vnode before forcing
5859 				 * a new vnode to be created
5860 				 */
5861 				if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) {
5862 					break;
5863 				}
5864 			}
5865 
5866 			// don't iterate more than MAX_WALK_COUNT vnodes to
5867 			// avoid keeping the vnode list lock held for too long.
5868 
5869 			if (walk_count++ > MAX_WALK_COUNT) {
5870 				vp = NULL;
5871 				break;
5872 			}
5873 		}
5874 	}
5875 
5876 	if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) {
5877 		/*
5878 		 * Pick the first vp for possible reuse
5879 		 */
5880 		walk_count = 0;
5881 		TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
5882 			// if we're a dependency-capable process, skip vnodes that can
5883 			// cause recycling deadlocks. (i.e. this process is diskimages
5884 			// helper and the vnode is in a disk image).  Querying the
5885 			// mnt_kern_flag for the mount's virtual device status
5886 			// is safer than checking the mnt_dependent_process, which
5887 			// may not be updated if there are multiple devnode layers
5888 			// in between the disk image and the final consumer.
5889 
5890 			if (((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
5891 			    (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) &&
5892 			    !(vp->v_listflag & VLIST_NO_REUSE) &&
5893 			    (can_free || !(vp->v_flag & VCANDEALLOC))) {
5894 				/*
5895 				 * if need_reliable_vp == TRUE, then we've already sent one or more
5896 				 * non-reliable vnodes to the async thread for processing and timed
5897 				 * out waiting for a dead vnode to show up.  Use the MAX_WALK_COUNT
5898 				 * mechanism to first scan for a reliable vnode before forcing
5899 				 * a new vnode to be created
5900 				 */
5901 				if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) {
5902 					break;
5903 				}
5904 			}
5905 
5906 			// don't iterate more than MAX_WALK_COUNT vnodes to
5907 			// avoid keeping the vnode list lock held for too long.
5908 
5909 			if (walk_count++ > MAX_WALK_COUNT) {
5910 				vp = NULL;
5911 				break;
5912 			}
5913 		}
5914 	}
5915 
5916 	//
5917 	// if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT
5918 	// then we're trying to create a vnode on behalf of a
5919 	// process like diskimages-helper that has file systems
5920 	// mounted on top of itself (and thus we can't reclaim
5921 	// vnodes in the file systems on top of us).  if we can't
5922 	// find a vnode to reclaim then we'll just have to force
5923 	// the allocation.
5924 	//
5925 	if (vp == NULL && walk_count >= MAX_WALK_COUNT) {
5926 		force_alloc = 1;
5927 		vnode_list_unlock();
5928 		goto retry;
5929 	}
5930 
5931 	if (vp == NULL) {
5932 		if (can_free && (vn_dealloc_level > DEALLOC_VNODE_NONE) &&
5933 		    (numvnodes >= force_alloc_min) && (numvnodes < numvnodes_max)) {
5934 			force_alloc_freeable = true;
5935 			vnode_list_unlock();
5936 			goto retry;
5937 		}
5938 		vnode_list_unlock();
5939 
5940 		/*
5941 		 * we've reached the system imposed maximum number of vnodes
5942 		 * but there isn't a single one available
5943 		 * wait a bit and then retry... if we can't get a vnode
5944 		 * after our target number of retries, than log a complaint
5945 		 */
5946 		if (++retries <= max_retries) {
5947 			delay_for_interval(1, 1000 * 1000);
5948 			goto retry;
5949 		}
5950 
5951 		tablefull("vnode");
5952 		log(LOG_EMERG, "%d desired, %ld numvnodes, "
5953 		    "%ld free, %ld dead, %ld async, %d rage\n",
5954 		    desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes);
5955 
5956 #if CONFIG_JETSAM
5957 		/*
5958 		 * Running out of vnodes tends to make a system unusable. Start killing
5959 		 * processes that jetsam knows are killable.
5960 		 */
5961 		if (!memorystatus_kill_on_vnode_exhaustion()
5962 #if DEVELOPMENT || DEBUG
5963 		    || bootarg_no_vnode_jetsam
5964 #endif
5965 		    ) {
5966 			/*
5967 			 * If jetsam can't find any more processes to kill and there
5968 			 * still aren't any free vnodes, panic. Hopefully we'll get a
5969 			 * panic log to tell us why we ran out.
5970 			 */
5971 			panic("vnode table is full");
5972 		}
5973 
5974 		/*
5975 		 * Now that we've killed someone, wait a bit and continue looking
5976 		 */
5977 		delay_for_interval(3, 1000 * 1000);
5978 		retries = 0;
5979 		goto retry;
5980 #endif
5981 
5982 		*vpp = NULL;
5983 		return ENFILE;
5984 	}
5985 	newvnode_nodead++;
5986 steal_this_vp:
5987 	if ((vp = process_vp(vp, 1, true, &deferred)) == NULLVP) {
5988 		if (deferred) {
5989 			int     elapsed_msecs;
5990 			struct timeval elapsed_tv;
5991 
5992 			if (initial_tv.tv_sec == 0) {
5993 				microuptime(&initial_tv);
5994 			}
5995 
5996 			vnode_list_lock();
5997 
5998 			dead_vnode_waited++;
5999 			dead_vnode_wanted++;
6000 
6001 			/*
6002 			 * note that we're only going to explicitly wait 10ms
6003 			 * for a dead vnode to become available, since even if one
6004 			 * isn't available, a reliable vnode might now be available
6005 			 * at the head of the VRAGE or free lists... if so, we
6006 			 * can satisfy the new_vnode request with less latency then waiting
6007 			 * for the full 100ms duration we're ultimately willing to tolerate
6008 			 */
6009 			assert_wait_timeout((caddr_t)&dead_vnode_wanted, (THREAD_INTERRUPTIBLE), 10000, NSEC_PER_USEC);
6010 
6011 			vnode_list_unlock();
6012 
6013 			thread_block(THREAD_CONTINUE_NULL);
6014 
6015 			microuptime(&elapsed_tv);
6016 
6017 			timevalsub(&elapsed_tv, &initial_tv);
6018 			elapsed_msecs = (int)(elapsed_tv.tv_sec * 1000 + elapsed_tv.tv_usec / 1000);
6019 
6020 			if (elapsed_msecs >= 100) {
6021 				/*
6022 				 * we've waited long enough... 100ms is
6023 				 * somewhat arbitrary for this case, but the
6024 				 * normal worst case latency used for UI
6025 				 * interaction is 100ms, so I've chosen to
6026 				 * go with that.
6027 				 *
6028 				 * setting need_reliable_vp to TRUE
6029 				 * forces us to find a reliable vnode
6030 				 * that we can process synchronously, or
6031 				 * to create a new one if the scan for
6032 				 * a reliable one hits the scan limit
6033 				 */
6034 				need_reliable_vp = TRUE;
6035 			}
6036 		}
6037 		goto retry;
6038 	}
6039 	OSAddAtomicLong(1, &num_reusedvnodes);
6040 
6041 
6042 #if CONFIG_MACF
6043 	/*
6044 	 * We should never see VL_LABELWAIT or VL_LABEL here.
6045 	 * as those operations hold a reference.
6046 	 */
6047 	assert((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT);
6048 	assert((vp->v_lflag & VL_LABEL) != VL_LABEL);
6049 	if (vp->v_lflag & VL_LABELED || mac_vnode_label(vp) != NULL) {
6050 		vnode_lock_convert(vp);
6051 		mac_vnode_label_recycle(vp);
6052 	} else if (mac_vnode_label_init_needed(vp)) {
6053 		vnode_lock_convert(vp);
6054 		mac_vnode_label_init(vp);
6055 	}
6056 
6057 #endif /* MAC */
6058 
6059 	vp->v_iocount = 1;
6060 	vp->v_lflag = 0;
6061 	vp->v_writecount = 0;
6062 	vp->v_references = 0;
6063 	vp->v_iterblkflags = 0;
6064 	if (can_free && (vp->v_flag & VCANDEALLOC)) {
6065 		vp->v_flag = VSTANDARD | VCANDEALLOC;
6066 	} else {
6067 		vp->v_flag = VSTANDARD;
6068 	}
6069 
6070 	/* vbad vnodes can point to dead_mountp */
6071 	vp->v_mount = NULL;
6072 	vp->v_defer_reclaimlist = (vnode_t)0;
6073 
6074 	/* process_vp returns a locked vnode with a holdcount */
6075 	vnode_drop_and_unlock(vp);
6076 
6077 done:
6078 	*vpp = vp;
6079 
6080 	return 0;
6081 }
6082 
6083 void
vnode_lock(vnode_t vp)6084 vnode_lock(vnode_t vp)
6085 {
6086 	lck_mtx_lock(&vp->v_lock);
6087 }
6088 
6089 void
vnode_lock_spin(vnode_t vp)6090 vnode_lock_spin(vnode_t vp)
6091 {
6092 	lck_mtx_lock_spin(&vp->v_lock);
6093 }
6094 
6095 void
vnode_unlock(vnode_t vp)6096 vnode_unlock(vnode_t vp)
6097 {
6098 	lck_mtx_unlock(&vp->v_lock);
6099 }
6100 
6101 void
vnode_hold(vnode_t vp)6102 vnode_hold(vnode_t vp)
6103 {
6104 	int32_t old_holdcount = os_atomic_inc_orig(&vp->v_holdcount, relaxed);
6105 
6106 	if (old_holdcount == INT32_MAX) {
6107 		/*
6108 		 * Because we allow atomic ops on the holdcount it is
6109 		 * possible that when the vnode is examined, its holdcount
6110 		 * is different than what will be printed in this
6111 		 * panic message.
6112 		 */
6113 		panic("%s: vp %p holdcount overflow from : %d v_tag = %d, v_type = %d, v_flag = %x.",
6114 		    __FUNCTION__, vp, old_holdcount, vp->v_tag, vp->v_type, vp->v_flag);
6115 	}
6116 }
6117 
6118 #define VNODE_HOLD_NO_SMR    (1<<29) /* Disable vnode_hold_smr */
6119 
6120 /*
6121  * To be used when smr is the only protection (cache_lookup and cache_lookup_path)
6122  */
6123 bool
vnode_hold_smr(vnode_t vp)6124 vnode_hold_smr(vnode_t vp)
6125 {
6126 	int32_t holdcount;
6127 
6128 	/*
6129 	 * For "high traffic" vnodes like rootvnode, the atomic
6130 	 * cmpexcg loop below can turn into a infinite loop, no need
6131 	 * to do it for vnodes that won't be dealloc'ed
6132 	 */
6133 	if (!(os_atomic_load(&vp->v_flag, relaxed) & VCANDEALLOC)) {
6134 		vnode_hold(vp);
6135 		return true;
6136 	}
6137 
6138 	for (;;) {
6139 		holdcount = os_atomic_load(&vp->v_holdcount, relaxed);
6140 
6141 		if (holdcount & VNODE_HOLD_NO_SMR) {
6142 			return false;
6143 		}
6144 
6145 		if ((os_atomic_cmpxchg(&vp->v_holdcount, holdcount, holdcount + 1, relaxed) != 0)) {
6146 			return true;
6147 		}
6148 	}
6149 }
6150 
6151 /*
6152  * free callback from smr enabled zones
6153  */
6154 static void
vnode_smr_free(void * _vp,__unused size_t _size)6155 vnode_smr_free(void *_vp, __unused size_t _size)
6156 {
6157 	vnode_t vp = _vp;
6158 
6159 	bzero(vp, sizeof(*vp));
6160 }
6161 
6162 static vnode_t
vnode_drop_internal(vnode_t vp,bool locked)6163 vnode_drop_internal(vnode_t vp, bool locked)
6164 {
6165 	int32_t old_holdcount = os_atomic_dec_orig(&vp->v_holdcount, relaxed);
6166 
6167 	if (old_holdcount < 1) {
6168 		if (locked) {
6169 			vnode_unlock(vp);
6170 		}
6171 
6172 		/*
6173 		 * Because we allow atomic ops on the holdcount it is possible
6174 		 * that when the vnode is examined, its holdcount is different
6175 		 * than what will be printed in this panic message.
6176 		 */
6177 		panic("%s : vp %p holdcount -ve: %d.  v_tag = %d, v_type = %d, v_flag = %x.",
6178 		    __FUNCTION__, vp, old_holdcount - 1, vp->v_tag, vp->v_type, vp->v_flag);
6179 	}
6180 
6181 	if (vn_dealloc_level == DEALLOC_VNODE_NONE || old_holdcount > 1 ||
6182 	    !(vp->v_flag & VCANDEALLOC) || !(vp->v_lflag & VL_DEAD)) {
6183 		if (locked) {
6184 			vnode_unlock(vp);
6185 		}
6186 		return vp;
6187 	}
6188 
6189 	if (!locked) {
6190 		vnode_lock(vp);
6191 	}
6192 
6193 	if ((os_atomic_load(&vp->v_holdcount, relaxed) != 0) || vp->v_iocount ||
6194 	    vp->v_usecount || !(vp->v_flag & VCANDEALLOC) || !(vp->v_lflag & VL_DEAD)) {
6195 		vnode_unlock(vp);
6196 		return vp;
6197 	}
6198 
6199 	vnode_lock_convert(vp);
6200 	vnode_list_lock();
6201 
6202 	/*
6203 	 * the v_listflag field is protected by the vnode_list_lock
6204 	 */
6205 	if (VONLIST(vp) && (vp->v_listflag & VLIST_DEAD) &&
6206 	    (numvnodes > desiredvnodes || (vp->v_listflag & VLIST_NO_REUSE) ||
6207 	    vn_dealloc_level != DEALLOC_VNODE_ALL || deadvnodes >= deadvnodes_high) &&
6208 	    (os_atomic_cmpxchg(&vp->v_holdcount, 0, VNODE_HOLD_NO_SMR, relaxed) != 0)) {
6209 		VREMDEAD("vnode_list_remove", vp);
6210 		numvnodes--;
6211 		freeablevnodes--;
6212 		deallocedvnodes++;
6213 		vp->v_listflag = 0;
6214 
6215 		send_freeable_vnodes_telemetry();
6216 		vnode_list_unlock();
6217 
6218 #if CONFIG_MACF
6219 		struct label *tmpl = mac_vnode_label(vp);
6220 		os_atomic_store(&vp->v_label, NULL, release);
6221 #endif /* CONFIG_MACF */
6222 
6223 		vnode_unlock(vp);
6224 
6225 #if CONFIG_MACF
6226 		if (tmpl) {
6227 			mac_vnode_label_free(tmpl);
6228 		}
6229 #endif /* CONFIG_MACF */
6230 
6231 		if (nc_smr_enabled) {
6232 			zfree_smr(vnode_zone, vp);
6233 		} else {
6234 			zfree(vnode_zone, vp);
6235 		}
6236 
6237 		vp = NULLVP;
6238 	} else {
6239 		vnode_list_unlock();
6240 		vnode_unlock(vp);
6241 	}
6242 
6243 	return vp;
6244 }
6245 
6246 vnode_t
vnode_drop_and_unlock(vnode_t vp)6247 vnode_drop_and_unlock(vnode_t vp)
6248 {
6249 	return vnode_drop_internal(vp, true);
6250 }
6251 
6252 vnode_t
vnode_drop(vnode_t vp)6253 vnode_drop(vnode_t vp)
6254 {
6255 	return vnode_drop_internal(vp, false);
6256 }
6257 
6258 SYSCTL_NODE(_vfs, OID_AUTO, vnstats, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "vfs vnode stats");
6259 
6260 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, vn_dealloc_level,
6261     CTLFLAG_RD | CTLFLAG_LOCKED,
6262     &vn_dealloc_level, 0, "");
6263 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, desired_vnodes,
6264     CTLFLAG_RD | CTLFLAG_LOCKED,
6265     &desiredvnodes, 0, "");
6266 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_vnodes,
6267     CTLFLAG_RD | CTLFLAG_LOCKED,
6268     &numvnodes, "");
6269 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, num_vnodes_min,
6270     CTLFLAG_RD | CTLFLAG_LOCKED,
6271     &numvnodes_min, 0, "");
6272 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, num_vnodes_max,
6273     CTLFLAG_RD | CTLFLAG_LOCKED,
6274     &numvnodes_max, 0, "");
6275 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, num_deallocable_vnodes,
6276     CTLFLAG_RD | CTLFLAG_LOCKED,
6277     &freeablevnodes, 0, "");
6278 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_deallocable_busy_vnodes,
6279     CTLFLAG_RD | CTLFLAG_LOCKED,
6280     &busyvnodes, "");
6281 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_dead_vnodes,
6282     CTLFLAG_RD | CTLFLAG_LOCKED,
6283     &deadvnodes, "");
6284 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_dead_vnodes_to_dealloc,
6285     CTLFLAG_RD | CTLFLAG_LOCKED,
6286     &deadvnodes_noreuse, "");
6287 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_async_work_vnodes,
6288     CTLFLAG_RD | CTLFLAG_LOCKED,
6289     &async_work_vnodes, "");
6290 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, num_rapid_aging_vnodes,
6291     CTLFLAG_RD | CTLFLAG_LOCKED,
6292     &ragevnodes, 0, "");
6293 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_free_vnodes,
6294     CTLFLAG_RD | CTLFLAG_LOCKED,
6295     &freevnodes, "");
6296 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_recycledvnodes,
6297     CTLFLAG_RD | CTLFLAG_LOCKED,
6298     &num_recycledvnodes, "");
6299 SYSCTL_QUAD(_vfs_vnstats, OID_AUTO, num_allocedvnodes,
6300     CTLFLAG_RD | CTLFLAG_LOCKED,
6301     &allocedvnodes, "");
6302 SYSCTL_QUAD(_vfs_vnstats, OID_AUTO, num_deallocedvnodes,
6303     CTLFLAG_RD | CTLFLAG_LOCKED,
6304     &deallocedvnodes, "");
6305 SYSCTL_QUAD(_vfs_vnstats, OID_AUTO, num_newvnode_calls,
6306     CTLFLAG_RD | CTLFLAG_LOCKED,
6307     &newvnode, "");
6308 SYSCTL_QUAD(_vfs_vnstats, OID_AUTO, num_newvnode_calls_nodead,
6309     CTLFLAG_RD | CTLFLAG_LOCKED,
6310     &newvnode_nodead, "");
6311 
6312 int
vnode_get(struct vnode * vp)6313 vnode_get(struct vnode *vp)
6314 {
6315 	int retval;
6316 
6317 	vnode_lock_spin(vp);
6318 	retval = vnode_get_locked(vp);
6319 	vnode_unlock(vp);
6320 
6321 	return retval;
6322 }
6323 
6324 int
vnode_get_locked(struct vnode * vp)6325 vnode_get_locked(struct vnode *vp)
6326 {
6327 #if DIAGNOSTIC
6328 	lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
6329 #endif
6330 	if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
6331 		return ENOENT;
6332 	}
6333 
6334 	if (os_add_overflow(vp->v_iocount, 1, &vp->v_iocount)) {
6335 		panic("v_iocount overflow");
6336 	}
6337 
6338 #ifdef CONFIG_IOCOUNT_TRACE
6339 	record_vp(vp, 1);
6340 #endif
6341 	return 0;
6342 }
6343 
6344 /*
6345  * vnode_getwithvid() cuts in line in front of a vnode drain (that is,
6346  * while the vnode is draining, but at no point after that) to prevent
6347  * deadlocks when getting vnodes from filesystem hashes while holding
6348  * resources that may prevent other iocounts from being released.
6349  */
6350 int
vnode_getwithvid(vnode_t vp,uint32_t vid)6351 vnode_getwithvid(vnode_t vp, uint32_t vid)
6352 {
6353 	return vget_internal(vp, vid, (VNODE_NODEAD | VNODE_WITHID | VNODE_DRAINO));
6354 }
6355 
6356 /*
6357  * vnode_getwithvid_drainok() is like vnode_getwithvid(), but *does* block behind a vnode
6358  * drain; it exists for use in the VFS name cache, where we really do want to block behind
6359  * vnode drain to prevent holding off an unmount.
6360  */
6361 int
vnode_getwithvid_drainok(vnode_t vp,uint32_t vid)6362 vnode_getwithvid_drainok(vnode_t vp, uint32_t vid)
6363 {
6364 	return vget_internal(vp, vid, (VNODE_NODEAD | VNODE_WITHID));
6365 }
6366 
6367 int
vnode_getwithref(vnode_t vp)6368 vnode_getwithref(vnode_t vp)
6369 {
6370 	return vget_internal(vp, 0, 0);
6371 }
6372 
6373 /*
6374  * This is not a noblock variant of vnode_getwithref, this also returns an error
6375  * if the vnode is dead. It should only be called if the calling context already
6376  * has a usecount or iocount.
6377  */
6378 int
vnode_getwithref_noblock(vnode_t vp)6379 vnode_getwithref_noblock(vnode_t vp)
6380 {
6381 	return vget_internal(vp, 0, (VNODE_NOBLOCK | VNODE_NODEAD | VNODE_WITHREF));
6382 }
6383 
6384 __private_extern__ int
vnode_getalways(vnode_t vp)6385 vnode_getalways(vnode_t vp)
6386 {
6387 	return vget_internal(vp, 0, VNODE_ALWAYS);
6388 }
6389 
6390 __private_extern__ int
vnode_getalways_from_pager(vnode_t vp)6391 vnode_getalways_from_pager(vnode_t vp)
6392 {
6393 	return vget_internal(vp, 0, VNODE_ALWAYS | VNODE_PAGER);
6394 }
6395 
6396 static inline void
vn_set_dead(vnode_t vp)6397 vn_set_dead(vnode_t vp)
6398 {
6399 	vp->v_mount = NULL;
6400 	vp->v_op = dead_vnodeop_p;
6401 	vp->v_tag = VT_NON;
6402 	vp->v_data = NULL;
6403 	vp->v_type = VBAD;
6404 	vp->v_lflag |= VL_DEAD;
6405 }
6406 
6407 static int
vnode_put_internal_locked(vnode_t vp,bool from_pager)6408 vnode_put_internal_locked(vnode_t vp, bool from_pager)
6409 {
6410 	vfs_context_t ctx = vfs_context_current();      /* hoist outside loop */
6411 
6412 #if DIAGNOSTIC
6413 	lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
6414 #endif
6415 retry:
6416 	if (vp->v_iocount < 1) {
6417 		panic("vnode_put(%p): iocount < 1", vp);
6418 	}
6419 
6420 	if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
6421 		vnode_dropiocount(vp);
6422 		return 0;
6423 	}
6424 
6425 	if (((vp->v_lflag & (VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE)) {
6426 		vp->v_lflag &= ~VL_NEEDINACTIVE;
6427 
6428 		if (UBCINFOEXISTS(vp)) {
6429 			ubc_cs_free_and_vnode_unlock(vp);
6430 		} else {
6431 			vnode_unlock(vp);
6432 		}
6433 
6434 		VNOP_INACTIVE(vp, ctx);
6435 
6436 		vnode_lock_spin(vp);
6437 		/*
6438 		 * because we had to drop the vnode lock before calling
6439 		 * VNOP_INACTIVE, the state of this vnode may have changed...
6440 		 * we may pick up both VL_MARTERM and either
6441 		 * an iocount or a usecount while in the VNOP_INACTIVE call
6442 		 * we don't want to call vnode_reclaim_internal on a vnode
6443 		 * that has active references on it... so loop back around
6444 		 * and reevaluate the state
6445 		 */
6446 		goto retry;
6447 	}
6448 	vp->v_lflag &= ~VL_NEEDINACTIVE;
6449 
6450 	vnode_lock_convert(vp);
6451 	if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
6452 		if (from_pager) {
6453 			/*
6454 			 * We can't initiate reclaim when called from the pager
6455 			 * because it will deadlock with itself so we hand it
6456 			 * off to the async cleaner thread.
6457 			 */
6458 			vnode_async_list_add(vp);
6459 		} else {
6460 			vnode_reclaim_internal(vp, 1, 1, 0);
6461 		}
6462 	}
6463 	vnode_dropiocount(vp);
6464 	vnode_list_add(vp);
6465 
6466 	return 0;
6467 }
6468 
6469 int
vnode_put_locked(vnode_t vp)6470 vnode_put_locked(vnode_t vp)
6471 {
6472 	return vnode_put_internal_locked(vp, false);
6473 }
6474 
6475 int
vnode_put(vnode_t vp)6476 vnode_put(vnode_t vp)
6477 {
6478 	int retval;
6479 
6480 	vnode_lock_spin(vp);
6481 	vnode_hold(vp);
6482 	retval = vnode_put_internal_locked(vp, false);
6483 	vnode_drop_and_unlock(vp);
6484 
6485 	return retval;
6486 }
6487 
6488 int
vnode_put_from_pager(vnode_t vp)6489 vnode_put_from_pager(vnode_t vp)
6490 {
6491 	int retval;
6492 
6493 	vnode_lock_spin(vp);
6494 	vnode_hold(vp);
6495 	/* Cannot initiate reclaim while paging */
6496 	retval = vnode_put_internal_locked(vp, true);
6497 	vnode_drop_and_unlock(vp);
6498 
6499 	return retval;
6500 }
6501 
6502 int
vnode_writecount(vnode_t vp)6503 vnode_writecount(vnode_t vp)
6504 {
6505 	return vp->v_writecount;
6506 }
6507 
6508 /* is vnode_t in use by others?  */
6509 int
vnode_isinuse(vnode_t vp,int refcnt)6510 vnode_isinuse(vnode_t vp, int refcnt)
6511 {
6512 	return vnode_isinuse_locked(vp, refcnt, 0);
6513 }
6514 
6515 int
vnode_usecount(vnode_t vp)6516 vnode_usecount(vnode_t vp)
6517 {
6518 	return vp->v_usecount;
6519 }
6520 
6521 int
vnode_iocount(vnode_t vp)6522 vnode_iocount(vnode_t vp)
6523 {
6524 	if (!(os_atomic_load(&vp->v_ext_flag, relaxed) & VE_LINKCHANGE)) {
6525 		return vp->v_iocount;
6526 	} else {
6527 		int iocount = 0;
6528 		vnode_lock_spin(vp);
6529 		if (!(os_atomic_load(&vp->v_ext_flag, relaxed) & VE_LINKCHANGE)) {
6530 			iocount = vp->v_iocount;
6531 		} else {
6532 			/* the "link lock" takes its own iocount */
6533 			iocount = vp->v_iocount - 1;
6534 		}
6535 		vnode_unlock(vp);
6536 		return iocount;
6537 	}
6538 }
6539 
6540 int
vnode_isinuse_locked(vnode_t vp,int refcnt,int locked)6541 vnode_isinuse_locked(vnode_t vp, int refcnt, int locked)
6542 {
6543 	int retval = 0;
6544 
6545 	if (!locked) {
6546 		vnode_lock_spin(vp);
6547 	}
6548 	if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) {
6549 		retval = 1;
6550 		goto out;
6551 	}
6552 	if (vp->v_type == VREG) {
6553 		retval = ubc_isinuse_locked(vp, refcnt, 1);
6554 	}
6555 
6556 out:
6557 	if (!locked) {
6558 		vnode_unlock(vp);
6559 	}
6560 	return retval;
6561 }
6562 
6563 kauth_cred_t
vnode_cred(vnode_t vp)6564 vnode_cred(vnode_t vp)
6565 {
6566 	if (vp->v_cred) {
6567 		return kauth_cred_require(vp->v_cred);
6568 	}
6569 
6570 	return NULL;
6571 }
6572 
6573 
6574 /* resume vnode_t */
6575 errno_t
vnode_resume(vnode_t vp)6576 vnode_resume(vnode_t vp)
6577 {
6578 	if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) {
6579 		vnode_lock_spin(vp);
6580 		vp->v_lflag &= ~VL_SUSPENDED;
6581 		vp->v_owner = NULL;
6582 		vnode_unlock(vp);
6583 
6584 		wakeup(&vp->v_iocount);
6585 	}
6586 	return 0;
6587 }
6588 
6589 /* suspend vnode_t
6590  * Please do not use on more than one vnode at a time as it may
6591  * cause deadlocks.
6592  * xxx should we explicity prevent this from happening?
6593  */
6594 
6595 errno_t
vnode_suspend(vnode_t vp)6596 vnode_suspend(vnode_t vp)
6597 {
6598 	if (vp->v_lflag & VL_SUSPENDED) {
6599 		return EBUSY;
6600 	}
6601 
6602 	vnode_lock_spin(vp);
6603 
6604 	/*
6605 	 * xxx is this sufficient to check if a vnode_drain is
6606 	 * progress?
6607 	 */
6608 
6609 	if (vp->v_owner == NULL) {
6610 		vp->v_lflag |= VL_SUSPENDED;
6611 		vp->v_owner = current_thread();
6612 	}
6613 	vnode_unlock(vp);
6614 
6615 	return 0;
6616 }
6617 
6618 /*
6619  * Release any blocked locking requests on the vnode.
6620  * Used for forced-unmounts.
6621  *
6622  * XXX	What about network filesystems?
6623  */
6624 static void
vnode_abort_advlocks(vnode_t vp)6625 vnode_abort_advlocks(vnode_t vp)
6626 {
6627 	if (vp->v_flag & VLOCKLOCAL) {
6628 		lf_abort_advlocks(vp);
6629 	}
6630 }
6631 
6632 
6633 static errno_t
vnode_drain(vnode_t vp)6634 vnode_drain(vnode_t vp)
6635 {
6636 	if (vp->v_lflag & VL_DRAIN) {
6637 		panic("vnode_drain: recursive drain");
6638 		return ENOENT;
6639 	}
6640 	vp->v_lflag |= VL_DRAIN;
6641 	vp->v_owner = current_thread();
6642 
6643 	while (vp->v_iocount > 1) {
6644 		if (bootarg_no_vnode_drain) {
6645 			struct timespec ts = {.tv_sec = 10, .tv_nsec = 0};
6646 			int error;
6647 
6648 			if (vfs_unmountall_started) {
6649 				ts.tv_sec = 1;
6650 			}
6651 
6652 			error = msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain_with_timeout", &ts);
6653 
6654 			/* Try to deal with leaked iocounts under bootarg and shutting down */
6655 			if (vp->v_iocount > 1 && error == EWOULDBLOCK &&
6656 			    ts.tv_sec == 1 && vp->v_numoutput == 0) {
6657 				vp->v_iocount = 1;
6658 				break;
6659 			}
6660 		} else {
6661 			msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL);
6662 		}
6663 	}
6664 
6665 	vp->v_lflag &= ~VL_DRAIN;
6666 
6667 	return 0;
6668 }
6669 
6670 
6671 /*
6672  * if the number of recent references via vnode_getwithvid or vnode_getwithref
6673  * exceeds this threshold, than 'UN-AGE' the vnode by removing it from
6674  * the LRU list if it's currently on it... once the iocount and usecount both drop
6675  * to 0, it will get put back on the end of the list, effectively making it younger
6676  * this allows us to keep actively referenced vnodes in the list without having
6677  * to constantly remove and add to the list each time a vnode w/o a usecount is
6678  * referenced which costs us taking and dropping a global lock twice.
6679  * However, if the vnode is marked DIRTY, we want to pull it out much earlier
6680  */
6681 #define UNAGE_THRESHHOLD        25
6682 #define UNAGE_DIRTYTHRESHHOLD    6
6683 
6684 errno_t
vnode_getiocount(vnode_t vp,unsigned int vid,int vflags)6685 vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
6686 {
6687 	int nodead = vflags & VNODE_NODEAD;
6688 	int nosusp = vflags & VNODE_NOSUSPEND;
6689 	int always = vflags & VNODE_ALWAYS;
6690 	int beatdrain = vflags & VNODE_DRAINO;
6691 	int withvid = vflags & VNODE_WITHID;
6692 	int forpager = vflags & VNODE_PAGER;
6693 	int noblock = vflags & VNODE_NOBLOCK;
6694 
6695 	for (;;) {
6696 		int sleepflg = 0;
6697 
6698 		/*
6699 		 * if it is a dead vnode with deadfs
6700 		 */
6701 		if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) {
6702 			return ENOENT;
6703 		}
6704 		/*
6705 		 * will return VL_DEAD ones
6706 		 */
6707 		if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0) {
6708 			break;
6709 		}
6710 		/*
6711 		 * if suspended vnodes are to be failed
6712 		 */
6713 		if (nosusp && (vp->v_lflag & VL_SUSPENDED)) {
6714 			return ENOENT;
6715 		}
6716 		/*
6717 		 * if you are the owner of drain/suspend/termination , can acquire iocount
6718 		 * check for VL_TERMINATE; it does not set owner
6719 		 */
6720 		if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) &&
6721 		    (vp->v_owner == current_thread())) {
6722 			break;
6723 		}
6724 
6725 		if (always != 0) {
6726 			break;
6727 		}
6728 
6729 		if (noblock && (vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE))) {
6730 			return ENOENT;
6731 		}
6732 
6733 		/*
6734 		 * If this vnode is getting drained, there are some cases where
6735 		 * we can't block or, in case of tty vnodes, want to be
6736 		 * interruptible.
6737 		 */
6738 		if (vp->v_lflag & VL_DRAIN) {
6739 			/*
6740 			 * In some situations, we want to get an iocount
6741 			 * even if the vnode is draining to prevent deadlock,
6742 			 * e.g. if we're in the filesystem, potentially holding
6743 			 * resources that could prevent other iocounts from
6744 			 * being released.
6745 			 */
6746 			if (beatdrain) {
6747 				break;
6748 			}
6749 			/*
6750 			 * Don't block if the vnode's mount point is unmounting as
6751 			 * we may be the thread the unmount is itself waiting on
6752 			 * Only callers who pass in vids (at this point, we've already
6753 			 * handled nosusp and nodead) are expecting error returns
6754 			 * from this function, so only we can only return errors for
6755 			 * those. ENODEV is intended to inform callers that the call
6756 			 * failed because an unmount is in progress.
6757 			 */
6758 			if (withvid && (vp->v_mount) && vfs_isunmount(vp->v_mount)) {
6759 				return ENODEV;
6760 			}
6761 
6762 			if (vnode_istty(vp)) {
6763 				sleepflg = PCATCH;
6764 			}
6765 		}
6766 
6767 		vnode_lock_convert(vp);
6768 
6769 		if (vp->v_lflag & VL_TERMINATE) {
6770 			int error;
6771 
6772 			vp->v_lflag |= VL_TERMWANT;
6773 
6774 			error = msleep(&vp->v_lflag, &vp->v_lock,
6775 			    (PVFS | sleepflg), "vnode getiocount", NULL);
6776 			if (error) {
6777 				return error;
6778 			}
6779 		} else {
6780 			msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL);
6781 		}
6782 	}
6783 	if (withvid && vid != vp->v_id) {
6784 		return ENOENT;
6785 	}
6786 	if (!forpager && (++vp->v_references >= UNAGE_THRESHHOLD ||
6787 	    (vp->v_flag & VISDIRTY && vp->v_references >= UNAGE_DIRTYTHRESHHOLD))) {
6788 		vp->v_references = 0;
6789 		vnode_list_remove(vp);
6790 	}
6791 	vp->v_iocount++;
6792 #ifdef CONFIG_IOCOUNT_TRACE
6793 	record_vp(vp, 1);
6794 #endif
6795 	return 0;
6796 }
6797 
6798 static void
vnode_dropiocount(vnode_t vp)6799 vnode_dropiocount(vnode_t vp)
6800 {
6801 	if (vp->v_iocount < 1) {
6802 		panic("vnode_dropiocount(%p): v_iocount < 1", vp);
6803 	}
6804 
6805 	vp->v_iocount--;
6806 #ifdef CONFIG_IOCOUNT_TRACE
6807 	record_vp(vp, -1);
6808 #endif
6809 	if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1)) {
6810 		wakeup(&vp->v_iocount);
6811 	}
6812 }
6813 
6814 
6815 void
vnode_reclaim(struct vnode * vp)6816 vnode_reclaim(struct vnode * vp)
6817 {
6818 	vnode_reclaim_internal(vp, 0, 0, 0);
6819 }
6820 
6821 __private_extern__
6822 void
vnode_reclaim_internal(struct vnode * vp,int locked,int reuse,int flags)6823 vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags)
6824 {
6825 	int isfifo = 0;
6826 	bool clear_tty_revoke = false;
6827 
6828 	if (!locked) {
6829 		vnode_lock(vp);
6830 	}
6831 
6832 	if (vp->v_lflag & VL_TERMINATE) {
6833 		panic("vnode reclaim in progress");
6834 	}
6835 	vp->v_lflag |= VL_TERMINATE;
6836 	vp->v_lflag &= ~VL_OPSCHANGE;
6837 
6838 	vn_clearunionwait(vp, 1);
6839 
6840 	/*
6841 	 * We have to force any terminals in reads to return and give up
6842 	 * their iocounts. It's important to do this after VL_TERMINATE
6843 	 * has been set to ensure new reads are blocked while the
6844 	 * revoke is in progress.
6845 	 */
6846 	if (vnode_istty(vp) && (flags & REVOKEALL) && (vp->v_iocount > 1)) {
6847 		vnode_unlock(vp);
6848 		VNOP_IOCTL(vp, TIOCREVOKE, (caddr_t)NULL, 0, vfs_context_kernel());
6849 		clear_tty_revoke = true;
6850 		vnode_lock(vp);
6851 	}
6852 
6853 	vnode_drain(vp);
6854 
6855 	if (clear_tty_revoke) {
6856 		vnode_unlock(vp);
6857 		VNOP_IOCTL(vp, TIOCREVOKECLEAR, (caddr_t)NULL, 0, vfs_context_kernel());
6858 		vnode_lock(vp);
6859 	}
6860 
6861 #if CONFIG_FILE_LEASES
6862 	/*
6863 	 * Revoke all leases in place for this vnode as it is about to be reclaimed.
6864 	 * In normal case, there shouldn't be any leases in place by the time we
6865 	 * get here as there shouldn't be any opens on the vnode (usecount == 0).
6866 	 * However, in the case of force unmount or unmount of a volume that
6867 	 * contains file that was opened with O_EVTONLY then the vnode can be
6868 	 * reclaimed while the file is still opened.
6869 	 */
6870 	vnode_revokelease(vp, true);
6871 #endif
6872 
6873 	isfifo = (vp->v_type == VFIFO);
6874 
6875 	if (vp->v_type != VBAD) {
6876 		vgone(vp, flags);               /* clean and reclaim the vnode */
6877 	}
6878 	/*
6879 	 * give the vnode a new identity so that vnode_getwithvid will fail
6880 	 * on any stale cache accesses...
6881 	 * grab the list_lock so that if we're in "new_vnode"
6882 	 * behind the list_lock trying to steal this vnode, the v_id is stable...
6883 	 * once new_vnode drops the list_lock, it will block trying to take
6884 	 * the vnode lock until we release it... at that point it will evaluate
6885 	 * whether the v_vid has changed
6886 	 * also need to make sure that the vnode isn't on a list where "new_vnode"
6887 	 * can find it after the v_id has been bumped until we are completely done
6888 	 * with the vnode (i.e. putting it back on a list has to be the very last
6889 	 * thing we do to this vnode... many of the callers of vnode_reclaim_internal
6890 	 * are holding an io_count on the vnode... they need to drop the io_count
6891 	 * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
6892 	 * they are completely done with the vnode
6893 	 */
6894 	vnode_list_lock();
6895 
6896 	vnode_list_remove_locked(vp);
6897 	vp->v_id++;
6898 
6899 	vnode_list_unlock();
6900 
6901 	if (isfifo) {
6902 		struct fifoinfo * fip;
6903 
6904 		fip = vp->v_fifoinfo;
6905 		vp->v_fifoinfo = NULL;
6906 		kfree_type(struct fifoinfo, fip);
6907 	}
6908 	vp->v_type = VBAD;
6909 
6910 	if (vp->v_data) {
6911 		panic("vnode_reclaim_internal: cleaned vnode isn't");
6912 	}
6913 	if (vp->v_numoutput) {
6914 		panic("vnode_reclaim_internal: clean vnode has pending I/O's");
6915 	}
6916 	if (UBCINFOEXISTS(vp)) {
6917 		panic("vnode_reclaim_internal: ubcinfo not cleaned");
6918 	}
6919 	if (vp->v_parent) {
6920 		panic("vnode_reclaim_internal: vparent not removed");
6921 	}
6922 	if (vp->v_name) {
6923 		panic("vnode_reclaim_internal: vname not removed");
6924 	}
6925 
6926 #if CONFIG_FILE_LEASES
6927 	if (__improbable(!LIST_EMPTY(&vp->v_leases))) {
6928 		panic("vnode_reclaim_internal: vleases NOT empty");
6929 	}
6930 #endif
6931 
6932 	vp->v_socket = NULL;
6933 
6934 	vp->v_lflag &= ~VL_TERMINATE;
6935 	vp->v_owner = NULL;
6936 
6937 #if CONFIG_IOCOUNT_TRACE
6938 	if (__improbable(bootarg_vnode_iocount_trace)) {
6939 		bzero(vp->v_iocount_trace,
6940 		    IOCOUNT_TRACE_MAX_TYPES * sizeof(struct vnode_iocount_trace));
6941 	}
6942 #endif /* CONFIG_IOCOUNT_TRACE */
6943 
6944 	KNOTE(&vp->v_knotes, NOTE_REVOKE);
6945 
6946 	/* Make sure that when we reuse the vnode, no knotes left over */
6947 	klist_init(&vp->v_knotes);
6948 
6949 	if (vp->v_lflag & VL_TERMWANT) {
6950 		vp->v_lflag &= ~VL_TERMWANT;
6951 		wakeup(&vp->v_lflag);
6952 	}
6953 	if (!reuse) {
6954 		/*
6955 		 * make sure we get on the
6956 		 * dead list if appropriate
6957 		 */
6958 		vnode_list_add(vp);
6959 	}
6960 	if (!locked) {
6961 		vnode_unlock(vp);
6962 	}
6963 }
6964 
6965 static int
vnode_create_internal(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp,vnode_create_options_t vc_options)6966 vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp,
6967     vnode_create_options_t vc_options)
6968 {
6969 	int error;
6970 	int insert = 1;
6971 	vnode_t vp = NULLVP;
6972 	vnode_t nvp;
6973 	vnode_t dvp;
6974 	struct  uthread *ut;
6975 	struct componentname *cnp;
6976 	struct vnode_fsparam *param = (struct vnode_fsparam *)data;
6977 #if CONFIG_TRIGGERS
6978 	struct vnode_trigger_param *tinfo = NULL;
6979 #endif
6980 	bool existing_vnode;
6981 	bool init_vnode = !(vc_options & VNODE_CREATE_EMPTY);
6982 	bool is_bdevvp = false;
6983 
6984 	if (*vpp) {
6985 		vp = *vpp;
6986 		*vpp = NULLVP;
6987 		existing_vnode = true;
6988 	} else {
6989 		existing_vnode = false;
6990 	}
6991 
6992 	if (init_vnode) {
6993 		/* Do quick sanity check on the parameters. */
6994 		if ((param == NULL) || (param->vnfs_vtype == VBAD)) {
6995 			error = EINVAL;
6996 			goto error_out;
6997 		}
6998 
6999 #if CONFIG_TRIGGERS
7000 		if ((flavor == VNCREATE_TRIGGER) && (size == VNCREATE_TRIGGER_SIZE)) {
7001 			tinfo = (struct vnode_trigger_param *)data;
7002 
7003 			/* Validate trigger vnode input */
7004 			if ((param->vnfs_vtype != VDIR) ||
7005 			    (tinfo->vnt_resolve_func == NULL) ||
7006 			    (tinfo->vnt_flags & ~VNT_VALID_MASK)) {
7007 				error = EINVAL;
7008 				goto error_out;
7009 			}
7010 			/* Fall through a normal create (params will be the same) */
7011 			flavor = VNCREATE_FLAVOR;
7012 			size = VCREATESIZE;
7013 		}
7014 #endif
7015 		if ((flavor != VNCREATE_FLAVOR) || (size != VCREATESIZE)) {
7016 			error = EINVAL;
7017 			goto error_out;
7018 		}
7019 	}
7020 
7021 	if (!existing_vnode) {
7022 		if ((error = new_vnode(&vp, !(vc_options & VNODE_CREATE_NODEALLOC)))) {
7023 			return error;
7024 		}
7025 		if (!init_vnode) {
7026 			/* Make it so that it can be released by a vnode_put) */
7027 			vnode_lock(vp);
7028 			vn_set_dead(vp);
7029 			vnode_unlock(vp);
7030 			*vpp = vp;
7031 			return 0;
7032 		}
7033 	} else {
7034 		/*
7035 		 * A vnode obtained by vnode_create_empty has been passed to
7036 		 * vnode_initialize - Unset VL_DEAD set by vn_set_dead. After
7037 		 * this point, it is set back on any error.
7038 		 */
7039 		vnode_lock(vp);
7040 		vp->v_lflag &= ~VL_DEAD;
7041 		vnode_unlock(vp);
7042 	}
7043 
7044 	dvp = param->vnfs_dvp;
7045 	cnp = param->vnfs_cnp;
7046 
7047 	vp->v_op = param->vnfs_vops;
7048 	vp->v_type = (uint8_t)param->vnfs_vtype;
7049 	vp->v_data = param->vnfs_fsnode;
7050 
7051 	if (param->vnfs_markroot) {
7052 		vp->v_flag |= VROOT;
7053 	}
7054 	if (param->vnfs_marksystem) {
7055 		vp->v_flag |= VSYSTEM;
7056 	}
7057 	if (vp->v_type == VREG) {
7058 		error = ubc_info_init_withsize(vp, param->vnfs_filesize);
7059 		if (error) {
7060 #ifdef CONFIG_IOCOUNT_TRACE
7061 			record_vp(vp, 1);
7062 #endif
7063 			vnode_hold(vp);
7064 			vnode_lock(vp);
7065 			vn_set_dead(vp);
7066 
7067 			vnode_put_locked(vp);
7068 			vnode_drop_and_unlock(vp);
7069 			return error;
7070 		}
7071 		if (param->vnfs_mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED) {
7072 			memory_object_mark_io_tracking(vp->v_ubcinfo->ui_control);
7073 		}
7074 	}
7075 #ifdef CONFIG_IOCOUNT_TRACE
7076 	record_vp(vp, 1);
7077 #endif
7078 
7079 #if CONFIG_FIRMLINKS
7080 	vp->v_fmlink = NULLVP;
7081 #endif
7082 	vp->v_flag &= ~VFMLINKTARGET;
7083 
7084 #if CONFIG_TRIGGERS
7085 	/*
7086 	 * For trigger vnodes, attach trigger info to vnode
7087 	 */
7088 	if ((vp->v_type == VDIR) && (tinfo != NULL)) {
7089 		/*
7090 		 * Note: has a side effect of incrementing trigger count on the
7091 		 * mount if successful, which we would need to undo on a
7092 		 * subsequent failure.
7093 		 */
7094 #ifdef CONFIG_IOCOUNT_TRACE
7095 		record_vp(vp, -1);
7096 #endif
7097 		error = vnode_resolver_create(param->vnfs_mp, vp, tinfo, FALSE);
7098 		if (error) {
7099 			printf("vnode_create: vnode_resolver_create() err %d\n", error);
7100 			vnode_hold(vp);
7101 			vnode_lock(vp);
7102 			vn_set_dead(vp);
7103 #ifdef CONFIG_IOCOUNT_TRACE
7104 			record_vp(vp, 1);
7105 #endif
7106 			vnode_put_locked(vp);
7107 			vnode_drop_and_unlock(vp);
7108 			return error;
7109 		}
7110 	}
7111 #endif
7112 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
7113 		vp->v_tag = VT_DEVFS;           /* callers will reset if needed (bdevvp) */
7114 
7115 		if ((nvp = checkalias(vp, param->vnfs_rdev))) {
7116 			/*
7117 			 * if checkalias returns a vnode, it will be locked
7118 			 *
7119 			 * first get rid of the unneeded vnode we acquired
7120 			 */
7121 			vp->v_data = NULL;
7122 			vp->v_op = spec_vnodeop_p;
7123 			vp->v_type = VBAD;
7124 			vp->v_lflag = VL_DEAD;
7125 			vp->v_data = NULL;
7126 			vp->v_tag = VT_NON;
7127 			vnode_put(vp);
7128 
7129 			/*
7130 			 * switch to aliased vnode and finish
7131 			 * preparing it
7132 			 */
7133 			vp = nvp;
7134 
7135 			is_bdevvp = (vp->v_flag & VBDEVVP);
7136 
7137 			if (is_bdevvp) {
7138 				printf("%s: alias vnode (vid = %u) is in state of change (start) v_flags = 0x%x v_numoutput = %d\n",
7139 				    __func__, vp->v_id, vp->v_flag, vp->v_numoutput);
7140 			}
7141 
7142 			vnode_hold(vp);
7143 			vp->v_lflag |= VL_OPSCHANGE;
7144 			vclean(vp, 0);
7145 			vp->v_op = param->vnfs_vops;
7146 			vp->v_type = (uint8_t)param->vnfs_vtype;
7147 			vp->v_data = param->vnfs_fsnode;
7148 			vp->v_lflag = VL_OPSCHANGE;
7149 			vp->v_mount = NULL;
7150 			insmntque(vp, param->vnfs_mp);
7151 			insert = 0;
7152 
7153 			if (is_bdevvp) {
7154 				printf("%s: alias vnode (vid = %u), is in state of change (end) v_flags = 0x%x v_numoutput = %d\n",
7155 				    __func__, vp->v_id, vp->v_flag, vp->v_numoutput);
7156 			}
7157 
7158 			vnode_drop_and_unlock(vp);
7159 			wakeup(&vp->v_lflag); /* chkvnlock is waitng for VL_DEAD to get unset */
7160 		}
7161 
7162 		if (VCHR == vp->v_type) {
7163 			u_int maj = major(vp->v_rdev);
7164 
7165 			if (maj < (u_int)nchrdev && cdevsw[maj].d_type == D_TTY) {
7166 				vp->v_flag |= VISTTY;
7167 			}
7168 		}
7169 	}
7170 
7171 	if (vp->v_type == VFIFO) {
7172 		struct fifoinfo *fip;
7173 
7174 		fip = kalloc_type(struct fifoinfo, Z_WAITOK | Z_ZERO);
7175 		vp->v_fifoinfo = fip;
7176 	}
7177 	/* The file systems must pass the address of the location where
7178 	 * they store the vnode pointer. When we add the vnode into the mount
7179 	 * list and name cache they become discoverable. So the file system node
7180 	 * must have the connection to vnode setup by then
7181 	 */
7182 	*vpp = vp;
7183 
7184 	/* Add fs named reference. */
7185 	if (param->vnfs_flags & VNFS_ADDFSREF) {
7186 		vp->v_lflag |= VNAMED_FSHASH;
7187 	}
7188 	if (param->vnfs_mp) {
7189 		if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL) {
7190 			vp->v_flag |= VLOCKLOCAL;
7191 		}
7192 		if (insert) {
7193 			if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) {
7194 				panic("insmntque: vp on the free list");
7195 			}
7196 
7197 			/*
7198 			 * enter in mount vnode list
7199 			 */
7200 			insmntque(vp, param->vnfs_mp);
7201 		}
7202 	}
7203 	if (dvp && vnode_ref(dvp) == 0) {
7204 		vp->v_parent = dvp;
7205 	}
7206 	if (cnp) {
7207 		if (dvp && ((param->vnfs_flags & (VNFS_NOCACHE | VNFS_CANTCACHE)) == 0)) {
7208 			/*
7209 			 * enter into name cache
7210 			 * we've got the info to enter it into the name cache now
7211 			 * cache_enter_create will pick up an extra reference on
7212 			 * the name entered into the string cache
7213 			 */
7214 			vp->v_name = cache_enter_create(dvp, vp, cnp);
7215 		} else {
7216 			vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
7217 		}
7218 
7219 #if NAMEDSTREAMS
7220 		if (cnp->cn_flags & MARKISSHADOW) {
7221 			vp->v_flag |= VISSHADOW;
7222 		}
7223 #endif
7224 	}
7225 	if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
7226 		/*
7227 		 * this vnode is being created as cacheable in the name cache
7228 		 * this allows us to re-enter it in the cache
7229 		 */
7230 		vp->v_flag |= VNCACHEABLE;
7231 	}
7232 	ut = current_uthread();
7233 
7234 	if ((current_proc()->p_lflag & P_LRAGE_VNODES) ||
7235 	    (ut->uu_flag & (UT_RAGE_VNODES | UT_KERN_RAGE_VNODES))) {
7236 		/*
7237 		 * process has indicated that it wants any
7238 		 * vnodes created on its behalf to be rapidly
7239 		 * aged to reduce the impact on the cached set
7240 		 * of vnodes
7241 		 *
7242 		 * if UT_KERN_RAGE_VNODES is set, then the
7243 		 * kernel internally wants vnodes to be rapidly
7244 		 * aged, even if the process hasn't requested
7245 		 * this
7246 		 */
7247 		vp->v_flag |= VRAGE;
7248 	}
7249 
7250 #if CONFIG_SECLUDED_MEMORY
7251 	switch (secluded_for_filecache) {
7252 	case SECLUDED_FILECACHE_NONE:
7253 		/*
7254 		 * secluded_for_filecache == 0:
7255 		 * + no file contents in secluded pool
7256 		 */
7257 		break;
7258 	case SECLUDED_FILECACHE_APPS:
7259 		/*
7260 		 * secluded_for_filecache == 1:
7261 		 * + no files from /
7262 		 * + files from /Applications/ are OK
7263 		 * + files from /Applications/Camera are not OK
7264 		 * + no files that are open for write
7265 		 */
7266 		if (vnode_vtype(vp) == VREG &&
7267 		    vnode_mount(vp) != NULL &&
7268 		    (!(vfs_flags(vnode_mount(vp)) & MNT_ROOTFS))) {
7269 			/* not from root filesystem: eligible for secluded pages */
7270 			memory_object_mark_eligible_for_secluded(
7271 				ubc_getobject(vp, UBC_FLAGS_NONE),
7272 				TRUE);
7273 		}
7274 		break;
7275 	case SECLUDED_FILECACHE_RDONLY:
7276 		/*
7277 		 * secluded_for_filecache == 2:
7278 		 * + all read-only files OK, except:
7279 		 *      + dyld_shared_cache_arm64*
7280 		 *      + Camera
7281 		 *      + mediaserverd
7282 		 *      + cameracaptured
7283 		 */
7284 		if (vnode_vtype(vp) == VREG) {
7285 			memory_object_mark_eligible_for_secluded(
7286 				ubc_getobject(vp, UBC_FLAGS_NONE),
7287 				TRUE);
7288 		}
7289 		break;
7290 	default:
7291 		break;
7292 	}
7293 #endif /* CONFIG_SECLUDED_MEMORY */
7294 
7295 	if (is_bdevvp) {
7296 		/*
7297 		 * The v_flags and v_lflags felds for the vndoe above are
7298 		 * manipulated without the vnode lock. This is fine for
7299 		 * everything because no other use  of this vnode is occurring.
7300 		 * However the case of the bdevvp alias vnode reuse is different
7301 		 * and the flags end up being modified while a thread may be in
7302 		 * vnode_waitforwrites which sets VTHROTTLED and any one of the
7303 		 * non atomic modifications of v_flag in this function can race
7304 		 * with the setting of that flag and cause VTHROTTLED on vflag
7305 		 * to get "lost".
7306 		 *
7307 		 * This should ideally be fixed by making sure all modifications
7308 		 * in this function to the vnode flags are done under the
7309 		 * vnode lock but at this time, a much smaller workaround is
7310 		 * being  employed and a the more correct (and potentially
7311 		 * much bigger) change will follow later.
7312 		 *
7313 		 * The effect of "losing" the VTHROTTLED flags would be a lost
7314 		 * wakeup so we just issue that wakeup here since this happens
7315 		 * only once per bdevvp vnode which are only one or two for a
7316 		 * given boot.
7317 		 */
7318 		wakeup(&vp->v_numoutput);
7319 
7320 		/*
7321 		 * now make sure the flags that we were suppossed to put aren't
7322 		 * lost.
7323 		 */
7324 		vnode_lock_spin(vp);
7325 		if (param->vnfs_flags & VNFS_ADDFSREF) {
7326 			vp->v_lflag |= VNAMED_FSHASH;
7327 		}
7328 		if (param->vnfs_mp && (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL)) {
7329 			vp->v_flag |= VLOCKLOCAL;
7330 		}
7331 		if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
7332 			vp->v_flag |= VNCACHEABLE;
7333 		}
7334 		vnode_unlock(vp);
7335 	}
7336 
7337 	return 0;
7338 
7339 error_out:
7340 	if (existing_vnode) {
7341 		vnode_put(vp);
7342 	}
7343 	return error;
7344 }
7345 
7346 int
vnode_create_ext(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp,vnode_create_options_t vc_options)7347 vnode_create_ext(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, vnode_create_options_t vc_options)
7348 {
7349 	if (vc_options & ~(VNODE_CREATE_EMPTY | VNODE_CREATE_NODEALLOC)) {
7350 		return EINVAL;
7351 	}
7352 	*vpp = NULLVP;
7353 	return vnode_create_internal(flavor, size, data, vpp, vc_options);
7354 }
7355 
7356 /* USAGE:
7357  * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
7358  * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
7359  * is obsoleted by this.
7360  */
7361 int
vnode_create(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp)7362 vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
7363 {
7364 	return vnode_create_ext(flavor, size, data, vpp, VNODE_CREATE_NODEALLOC);
7365 }
7366 
7367 int
vnode_create_empty(vnode_t * vpp)7368 vnode_create_empty(vnode_t *vpp)
7369 {
7370 	return vnode_create_ext(VNCREATE_FLAVOR, VCREATESIZE, NULL,
7371 	           vpp, VNODE_CREATE_EMPTY);
7372 }
7373 
7374 int
vnode_initialize(uint32_t __unused flavor,uint32_t size,void * data,vnode_t * vpp)7375 vnode_initialize(uint32_t __unused flavor, uint32_t size, void *data, vnode_t *vpp)
7376 {
7377 	if (*vpp == NULLVP) {
7378 		panic("NULL vnode passed to vnode_initialize");
7379 	}
7380 #if DEVELOPMENT || DEBUG
7381 	/*
7382 	 * We lock to check that vnode is fit for unlocked use in
7383 	 * vnode_create_internal.
7384 	 */
7385 	vnode_lock_spin(*vpp);
7386 	VNASSERT(((*vpp)->v_usecount == 0), *vpp,
7387 	    ("vnode_initialize : usecount not 0, is %d", (*vpp)->v_usecount));
7388 	VNASSERT(((*vpp)->v_lflag & VL_DEAD), *vpp,
7389 	    ("vnode_initialize : v_lflag does not have VL_DEAD, is 0x%x",
7390 	    (*vpp)->v_lflag));
7391 	VNASSERT(((*vpp)->v_data == NULL), *vpp,
7392 	    ("vnode_initialize : v_data not NULL"));
7393 	vnode_unlock(*vpp);
7394 #endif
7395 	return vnode_create_internal(flavor, size, data, vpp, VNODE_CREATE_DEFAULT);
7396 }
7397 
7398 int
vnode_addfsref(vnode_t vp)7399 vnode_addfsref(vnode_t vp)
7400 {
7401 	vnode_lock_spin(vp);
7402 	if (vp->v_lflag & VNAMED_FSHASH) {
7403 		panic("add_fsref: vp already has named reference");
7404 	}
7405 	if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) {
7406 		panic("addfsref: vp on the free list");
7407 	}
7408 	vp->v_lflag |= VNAMED_FSHASH;
7409 	vnode_unlock(vp);
7410 	return 0;
7411 }
7412 int
vnode_removefsref(vnode_t vp)7413 vnode_removefsref(vnode_t vp)
7414 {
7415 	vnode_lock_spin(vp);
7416 	if ((vp->v_lflag & VNAMED_FSHASH) == 0) {
7417 		panic("remove_fsref: no named reference");
7418 	}
7419 	vp->v_lflag &= ~VNAMED_FSHASH;
7420 	vnode_unlock(vp);
7421 	return 0;
7422 }
7423 
7424 void
vnode_link_lock(vnode_t vp)7425 vnode_link_lock(vnode_t vp)
7426 {
7427 	vnode_lock_spin(vp);
7428 	while (os_atomic_load(&vp->v_ext_flag, relaxed) & VE_LINKCHANGE) {
7429 		os_atomic_or(&vp->v_ext_flag, VE_LINKCHANGEWAIT, relaxed);
7430 		msleep(&vp->v_ext_flag, &vp->v_lock, PVFS | PSPIN,
7431 		    "vnode_link_lock_wait", 0);
7432 	}
7433 	if (vp->v_iocount == 0) {
7434 		panic("%s called without an iocount on the vnode", __FUNCTION__);
7435 	}
7436 	vnode_get_locked(vp);
7437 	os_atomic_or(&vp->v_ext_flag, VE_LINKCHANGE, relaxed);
7438 	vnode_unlock(vp);
7439 }
7440 
7441 void
vnode_link_unlock(vnode_t vp)7442 vnode_link_unlock(vnode_t vp)
7443 {
7444 	bool do_wakeup = false;
7445 	bool do_vnode_put = false;
7446 
7447 	vnode_lock_spin(vp);
7448 	if (os_atomic_load(&vp->v_ext_flag, relaxed) & VE_LINKCHANGEWAIT) {
7449 		do_wakeup = true;
7450 	}
7451 	os_atomic_andnot(&vp->v_ext_flag, VE_LINKCHANGE | VE_LINKCHANGEWAIT, relaxed);
7452 	if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
7453 		vnode_put_locked(vp);
7454 	} else {
7455 		do_vnode_put = true;
7456 	}
7457 	vnode_unlock(vp);
7458 	if (do_wakeup) {
7459 		wakeup(&vp->v_ext_flag);
7460 	}
7461 	if (do_vnode_put) {
7462 		vnode_put(vp);
7463 	}
7464 }
7465 
7466 int
vfs_iterate(int flags,int (* callout)(mount_t,void *),void * arg)7467 vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg)
7468 {
7469 	mount_t mp;
7470 	int ret = 0;
7471 	fsid_t * fsid_list;
7472 	int count, actualcount, i;
7473 	void * allocmem;
7474 	int indx_start, indx_stop, indx_incr;
7475 	int cb_dropref = (flags & VFS_ITERATE_CB_DROPREF);
7476 	int noskip_unmount = (flags & VFS_ITERATE_NOSKIP_UNMOUNT);
7477 
7478 	count = mount_getvfscnt();
7479 	count += 10;
7480 
7481 	fsid_list = kalloc_data(count * sizeof(fsid_t), Z_WAITOK);
7482 	allocmem = (void *)fsid_list;
7483 
7484 	actualcount = mount_fillfsids(fsid_list, count);
7485 
7486 	/*
7487 	 * Establish the iteration direction
7488 	 * VFS_ITERATE_TAIL_FIRST overrides default head first order (oldest first)
7489 	 */
7490 	if (flags & VFS_ITERATE_TAIL_FIRST) {
7491 		indx_start = actualcount - 1;
7492 		indx_stop = -1;
7493 		indx_incr = -1;
7494 	} else { /* Head first by default */
7495 		indx_start = 0;
7496 		indx_stop = actualcount;
7497 		indx_incr = 1;
7498 	}
7499 
7500 	for (i = indx_start; i != indx_stop; i += indx_incr) {
7501 		/* obtain the mount point with iteration reference */
7502 		mp = mount_list_lookupby_fsid(&fsid_list[i], 0, 1);
7503 
7504 		if (mp == (struct mount *)0) {
7505 			continue;
7506 		}
7507 		mount_lock(mp);
7508 		if ((mp->mnt_lflag & MNT_LDEAD) ||
7509 		    (!noskip_unmount && (mp->mnt_lflag & MNT_LUNMOUNT))) {
7510 			mount_unlock(mp);
7511 			mount_iterdrop(mp);
7512 			continue;
7513 		}
7514 		mount_unlock(mp);
7515 
7516 		/* iterate over all the vnodes */
7517 		ret = callout(mp, arg);
7518 
7519 		/*
7520 		 * Drop the iterref here if the callback didn't do it.
7521 		 * Note: If cb_dropref is set the mp may no longer exist.
7522 		 */
7523 		if (!cb_dropref) {
7524 			mount_iterdrop(mp);
7525 		}
7526 
7527 		switch (ret) {
7528 		case VFS_RETURNED:
7529 		case VFS_RETURNED_DONE:
7530 			if (ret == VFS_RETURNED_DONE) {
7531 				ret = 0;
7532 				goto out;
7533 			}
7534 			break;
7535 
7536 		case VFS_CLAIMED_DONE:
7537 			ret = 0;
7538 			goto out;
7539 		case VFS_CLAIMED:
7540 		default:
7541 			break;
7542 		}
7543 		ret = 0;
7544 	}
7545 
7546 out:
7547 	kfree_data(allocmem, count * sizeof(fsid_t));
7548 	return ret;
7549 }
7550 
7551 /*
7552  * Update the vfsstatfs structure in the mountpoint.
7553  * MAC: Parameter eventtype added, indicating whether the event that
7554  * triggered this update came from user space, via a system call
7555  * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT).
7556  */
7557 int
vfs_update_vfsstat(mount_t mp,vfs_context_t ctx,__unused int eventtype)7558 vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype)
7559 {
7560 	struct vfs_attr va;
7561 	int             error;
7562 
7563 	/*
7564 	 * Request the attributes we want to propagate into
7565 	 * the per-mount vfsstat structure.
7566 	 */
7567 	VFSATTR_INIT(&va);
7568 	VFSATTR_WANTED(&va, f_iosize);
7569 	VFSATTR_WANTED(&va, f_blocks);
7570 	VFSATTR_WANTED(&va, f_bfree);
7571 	VFSATTR_WANTED(&va, f_bavail);
7572 	VFSATTR_WANTED(&va, f_bused);
7573 	VFSATTR_WANTED(&va, f_files);
7574 	VFSATTR_WANTED(&va, f_ffree);
7575 	VFSATTR_WANTED(&va, f_bsize);
7576 	VFSATTR_WANTED(&va, f_fssubtype);
7577 
7578 	if ((error = vfs_getattr(mp, &va, ctx)) != 0) {
7579 		KAUTH_DEBUG("STAT - filesystem returned error %d", error);
7580 		return error;
7581 	}
7582 #if CONFIG_MACF
7583 	if (eventtype == VFS_USER_EVENT) {
7584 		error = mac_mount_check_getattr(ctx, mp, &va);
7585 		if (error != 0) {
7586 			return error;
7587 		}
7588 	}
7589 #endif
7590 	/*
7591 	 * Unpack into the per-mount structure.
7592 	 *
7593 	 * We only overwrite these fields, which are likely to change:
7594 	 *	f_blocks
7595 	 *	f_bfree
7596 	 *	f_bavail
7597 	 *	f_bused
7598 	 *	f_files
7599 	 *	f_ffree
7600 	 *
7601 	 * And these which are not, but which the FS has no other way
7602 	 * of providing to us:
7603 	 *	f_bsize
7604 	 *	f_iosize
7605 	 *	f_fssubtype
7606 	 *
7607 	 */
7608 	if (VFSATTR_IS_SUPPORTED(&va, f_bsize)) {
7609 		/* 4822056 - protect against malformed server mount */
7610 		mp->mnt_vfsstat.f_bsize = (va.f_bsize > 0 ? va.f_bsize : 512);
7611 	} else {
7612 		mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */
7613 	}
7614 	if (VFSATTR_IS_SUPPORTED(&va, f_iosize)) {
7615 		mp->mnt_vfsstat.f_iosize = va.f_iosize;
7616 	} else {
7617 		mp->mnt_vfsstat.f_iosize = 1024 * 1024;         /* 1MB sensible I/O size */
7618 	}
7619 	if (VFSATTR_IS_SUPPORTED(&va, f_blocks)) {
7620 		mp->mnt_vfsstat.f_blocks = va.f_blocks;
7621 	}
7622 	if (VFSATTR_IS_SUPPORTED(&va, f_bfree)) {
7623 		mp->mnt_vfsstat.f_bfree = va.f_bfree;
7624 	}
7625 	if (VFSATTR_IS_SUPPORTED(&va, f_bavail)) {
7626 		mp->mnt_vfsstat.f_bavail = va.f_bavail;
7627 	}
7628 	if (VFSATTR_IS_SUPPORTED(&va, f_bused)) {
7629 		mp->mnt_vfsstat.f_bused = va.f_bused;
7630 	}
7631 	if (VFSATTR_IS_SUPPORTED(&va, f_files)) {
7632 		mp->mnt_vfsstat.f_files = va.f_files;
7633 	}
7634 	if (VFSATTR_IS_SUPPORTED(&va, f_ffree)) {
7635 		mp->mnt_vfsstat.f_ffree = va.f_ffree;
7636 	}
7637 
7638 	/* this is unlikely to change, but has to be queried for */
7639 	if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype)) {
7640 		mp->mnt_vfsstat.f_fssubtype = va.f_fssubtype;
7641 	}
7642 
7643 	return 0;
7644 }
7645 
7646 int
mount_list_add(mount_t mp)7647 mount_list_add(mount_t mp)
7648 {
7649 	int res;
7650 
7651 	mount_list_lock();
7652 	if (get_system_inshutdown() != 0) {
7653 		res = -1;
7654 	} else {
7655 		TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
7656 		nummounts++;
7657 		res = 0;
7658 	}
7659 	mount_list_unlock();
7660 
7661 	return res;
7662 }
7663 
7664 void
mount_list_remove(mount_t mp)7665 mount_list_remove(mount_t mp)
7666 {
7667 	mount_list_lock();
7668 	TAILQ_REMOVE(&mountlist, mp, mnt_list);
7669 	nummounts--;
7670 	mp->mnt_list.tqe_next = NULL;
7671 	mp->mnt_list.tqe_prev = NULL;
7672 	mount_list_unlock();
7673 }
7674 
7675 mount_t
mount_lookupby_volfsid(int volfs_id,int withref)7676 mount_lookupby_volfsid(int volfs_id, int withref)
7677 {
7678 	mount_t cur_mount = (mount_t)0;
7679 	mount_t mp;
7680 
7681 	mount_list_lock();
7682 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
7683 		if (!(mp->mnt_kern_flag & MNTK_UNMOUNT) &&
7684 		    (mp->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
7685 		    (mp->mnt_vfsstat.f_fsid.val[0] == volfs_id)) {
7686 			cur_mount = mp;
7687 			if (withref) {
7688 				if (mount_iterref(cur_mount, 1)) {
7689 					cur_mount = (mount_t)0;
7690 					mount_list_unlock();
7691 					goto out;
7692 				}
7693 			}
7694 			break;
7695 		}
7696 	}
7697 	mount_list_unlock();
7698 	if (withref && (cur_mount != (mount_t)0)) {
7699 		mp = cur_mount;
7700 		if (vfs_busy(mp, LK_NOWAIT) != 0) {
7701 			cur_mount = (mount_t)0;
7702 		}
7703 		mount_iterdrop(mp);
7704 	}
7705 out:
7706 	return cur_mount;
7707 }
7708 
7709 mount_t
mount_list_lookupby_fsid(fsid_t * fsid,int locked,int withref)7710 mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref)
7711 {
7712 	mount_t retmp = (mount_t)0;
7713 	mount_t mp;
7714 
7715 	if (!locked) {
7716 		mount_list_lock();
7717 	}
7718 	TAILQ_FOREACH(mp, &mountlist, mnt_list)
7719 	if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] &&
7720 	    mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) {
7721 		retmp = mp;
7722 		if (withref) {
7723 			if (mount_iterref(retmp, 1)) {
7724 				retmp = (mount_t)0;
7725 			}
7726 		}
7727 		goto out;
7728 	}
7729 out:
7730 	if (!locked) {
7731 		mount_list_unlock();
7732 	}
7733 	return retmp;
7734 }
7735 
7736 errno_t
vnode_lookupat(const char * path,int flags,vnode_t * vpp,vfs_context_t ctx,vnode_t start_dvp)7737 vnode_lookupat(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx,
7738     vnode_t start_dvp)
7739 {
7740 	struct nameidata *ndp;
7741 	int error = 0;
7742 	u_int32_t ndflags = 0;
7743 
7744 	if (ctx == NULL) {
7745 		return EINVAL;
7746 	}
7747 
7748 	ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
7749 
7750 	if (flags & VNODE_LOOKUP_NOFOLLOW) {
7751 		ndflags = NOFOLLOW;
7752 	} else {
7753 		ndflags = FOLLOW;
7754 	}
7755 
7756 	if (flags & VNODE_LOOKUP_NOCROSSMOUNT) {
7757 		ndflags |= NOCROSSMOUNT;
7758 	}
7759 
7760 	if (flags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) {
7761 		ndflags |= CN_NBMOUNTLOOK;
7762 	}
7763 
7764 	/* XXX AUDITVNPATH1 needed ? */
7765 	NDINIT(ndp, LOOKUP, OP_LOOKUP, ndflags, UIO_SYSSPACE,
7766 	    CAST_USER_ADDR_T(path), ctx);
7767 
7768 	if (flags & VNODE_LOOKUP_NOFOLLOW_ANY) {
7769 		ndp->ni_flag |= NAMEI_NOFOLLOW_ANY;
7770 	}
7771 
7772 	if (start_dvp && (path[0] != '/')) {
7773 		ndp->ni_dvp = start_dvp;
7774 		ndp->ni_cnd.cn_flags |= USEDVP;
7775 	}
7776 
7777 	if ((error = namei(ndp))) {
7778 		goto out_free;
7779 	}
7780 
7781 	ndp->ni_cnd.cn_flags &= ~USEDVP;
7782 
7783 	*vpp = ndp->ni_vp;
7784 	nameidone(ndp);
7785 
7786 out_free:
7787 	kfree_type(struct nameidata, ndp);
7788 	return error;
7789 }
7790 
7791 errno_t
vnode_lookup(const char * path,int flags,vnode_t * vpp,vfs_context_t ctx)7792 vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx)
7793 {
7794 	return vnode_lookupat(path, flags, vpp, ctx, NULLVP);
7795 }
7796 
7797 errno_t
vnode_open(const char * path,int fmode,int cmode,int flags,vnode_t * vpp,vfs_context_t ctx)7798 vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx)
7799 {
7800 	struct nameidata *ndp = NULL;
7801 	int error;
7802 	u_int32_t ndflags = 0;
7803 	int lflags = flags;
7804 
7805 	if (ctx == NULL) {              /* XXX technically an error */
7806 		ctx = vfs_context_current();
7807 	}
7808 
7809 	ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
7810 
7811 	if (fmode & O_NOFOLLOW) {
7812 		lflags |= VNODE_LOOKUP_NOFOLLOW;
7813 	}
7814 
7815 	if (lflags & VNODE_LOOKUP_NOFOLLOW) {
7816 		ndflags = NOFOLLOW;
7817 	} else {
7818 		ndflags = FOLLOW;
7819 	}
7820 
7821 	if (lflags & VNODE_LOOKUP_NOFOLLOW_ANY) {
7822 		fmode |= O_NOFOLLOW_ANY;
7823 	}
7824 
7825 	if (lflags & VNODE_LOOKUP_NOCROSSMOUNT) {
7826 		ndflags |= NOCROSSMOUNT;
7827 	}
7828 
7829 	if (lflags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) {
7830 		ndflags |= CN_NBMOUNTLOOK;
7831 	}
7832 
7833 	/* XXX AUDITVNPATH1 needed ? */
7834 	NDINIT(ndp, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE,
7835 	    CAST_USER_ADDR_T(path), ctx);
7836 
7837 	if ((error = vn_open(ndp, fmode, cmode))) {
7838 		*vpp = NULL;
7839 	} else {
7840 		*vpp = ndp->ni_vp;
7841 	}
7842 
7843 	kfree_type(struct nameidata, ndp);
7844 	return error;
7845 }
7846 
7847 errno_t
vnode_close(vnode_t vp,int flags,vfs_context_t ctx)7848 vnode_close(vnode_t vp, int flags, vfs_context_t ctx)
7849 {
7850 	int error;
7851 
7852 	if (ctx == NULL) {
7853 		ctx = vfs_context_current();
7854 	}
7855 
7856 	error = vn_close(vp, flags, ctx);
7857 	vnode_put(vp);
7858 	return error;
7859 }
7860 
7861 errno_t
vnode_mtime(vnode_t vp,struct timespec * mtime,vfs_context_t ctx)7862 vnode_mtime(vnode_t vp, struct timespec *mtime, vfs_context_t ctx)
7863 {
7864 	struct vnode_attr       va;
7865 	int                     error;
7866 
7867 	VATTR_INIT(&va);
7868 	VATTR_WANTED(&va, va_modify_time);
7869 	error = vnode_getattr(vp, &va, ctx);
7870 	if (!error) {
7871 		*mtime = va.va_modify_time;
7872 	}
7873 	return error;
7874 }
7875 
7876 errno_t
vnode_flags(vnode_t vp,uint32_t * flags,vfs_context_t ctx)7877 vnode_flags(vnode_t vp, uint32_t *flags, vfs_context_t ctx)
7878 {
7879 	struct vnode_attr       va;
7880 	int                     error;
7881 
7882 	VATTR_INIT(&va);
7883 	VATTR_WANTED(&va, va_flags);
7884 	error = vnode_getattr(vp, &va, ctx);
7885 	if (!error) {
7886 		*flags = va.va_flags;
7887 	}
7888 	return error;
7889 }
7890 
7891 /*
7892  * Returns:	0			Success
7893  *	vnode_getattr:???
7894  */
7895 errno_t
vnode_size(vnode_t vp,off_t * sizep,vfs_context_t ctx)7896 vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx)
7897 {
7898 	struct vnode_attr       va;
7899 	int                     error;
7900 
7901 	VATTR_INIT(&va);
7902 	VATTR_WANTED(&va, va_data_size);
7903 	error = vnode_getattr(vp, &va, ctx);
7904 	if (!error) {
7905 		*sizep = va.va_data_size;
7906 	}
7907 	return error;
7908 }
7909 
7910 errno_t
vnode_setsize(vnode_t vp,off_t size,int ioflag,vfs_context_t ctx)7911 vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx)
7912 {
7913 	struct vnode_attr       va;
7914 
7915 	VATTR_INIT(&va);
7916 	VATTR_SET(&va, va_data_size, size);
7917 	va.va_vaflags = ioflag & 0xffff;
7918 	return vnode_setattr(vp, &va, ctx);
7919 }
7920 
7921 int
vnode_setdirty(vnode_t vp)7922 vnode_setdirty(vnode_t vp)
7923 {
7924 	vnode_lock_spin(vp);
7925 	vp->v_flag |= VISDIRTY;
7926 	vnode_unlock(vp);
7927 	return 0;
7928 }
7929 
7930 int
vnode_cleardirty(vnode_t vp)7931 vnode_cleardirty(vnode_t vp)
7932 {
7933 	vnode_lock_spin(vp);
7934 	vp->v_flag &= ~VISDIRTY;
7935 	vnode_unlock(vp);
7936 	return 0;
7937 }
7938 
7939 int
vnode_isdirty(vnode_t vp)7940 vnode_isdirty(vnode_t vp)
7941 {
7942 	int dirty;
7943 
7944 	vnode_lock_spin(vp);
7945 	dirty = (vp->v_flag & VISDIRTY) ? 1 : 0;
7946 	vnode_unlock(vp);
7947 
7948 	return dirty;
7949 }
7950 
7951 static int
vn_create_reg(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,uint32_t flags,int fmode,uint32_t * statusp,vfs_context_t ctx)7952 vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
7953 {
7954 	/* Only use compound VNOP for compound operation */
7955 	if (vnode_compound_open_available(dvp) && ((flags & VN_CREATE_DOOPEN) != 0)) {
7956 		*vpp = NULLVP;
7957 		return VNOP_COMPOUND_OPEN(dvp, vpp, ndp, O_CREAT, fmode, statusp, vap, ctx);
7958 	} else {
7959 		return VNOP_CREATE(dvp, vpp, &ndp->ni_cnd, vap, ctx);
7960 	}
7961 }
7962 
7963 /*
7964  * Create a filesystem object of arbitrary type with arbitrary attributes in
7965  * the spevied directory with the specified name.
7966  *
7967  * Parameters:	dvp			Pointer to the vnode of the directory
7968  *					in which to create the object.
7969  *		vpp			Pointer to the area into which to
7970  *					return the vnode of the created object.
7971  *		cnp			Component name pointer from the namei
7972  *					data structure, containing the name to
7973  *					use for the create object.
7974  *		vap			Pointer to the vnode_attr structure
7975  *					describing the object to be created,
7976  *					including the type of object.
7977  *		flags			VN_* flags controlling ACL inheritance
7978  *					and whether or not authorization is to
7979  *					be required for the operation.
7980  *
7981  * Returns:	0			Success
7982  *		!0			errno value
7983  *
7984  * Implicit:	*vpp			Contains the vnode of the object that
7985  *					was created, if successful.
7986  *		*cnp			May be modified by the underlying VFS.
7987  *		*vap			May be modified by the underlying VFS.
7988  *					modified by either ACL inheritance or
7989  *
7990  *
7991  *					be modified, even if the operation is
7992  *
7993  *
7994  * Notes:	The kauth_filesec_t in 'vap', if any, is in host byte order.
7995  *
7996  *		Modification of '*cnp' and '*vap' by the underlying VFS is
7997  *		strongly discouraged.
7998  *
7999  * XXX:		This function is a 'vn_*' function; it belongs in vfs_vnops.c
8000  *
8001  * XXX:		We should enummerate the possible errno values here, and where
8002  *		in the code they originated.
8003  */
8004 errno_t
vn_create(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,uint32_t flags,int fmode,uint32_t * statusp,vfs_context_t ctx)8005 vn_create(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
8006 {
8007 	errno_t error, old_error;
8008 	vnode_t vp = (vnode_t)0;
8009 	boolean_t batched;
8010 	struct componentname *cnp;
8011 	uint32_t defaulted;
8012 
8013 	cnp = &ndp->ni_cnd;
8014 	error = 0;
8015 	batched = namei_compound_available(dvp, ndp) ? TRUE : FALSE;
8016 
8017 	KAUTH_DEBUG("%p    CREATE - '%s'", dvp, cnp->cn_nameptr);
8018 
8019 	if (flags & VN_CREATE_NOINHERIT) {
8020 		vap->va_vaflags |= VA_NOINHERIT;
8021 	}
8022 	if (flags & VN_CREATE_NOAUTH) {
8023 		vap->va_vaflags |= VA_NOAUTH;
8024 	}
8025 	/*
8026 	 * Handle ACL inheritance, initialize vap.
8027 	 */
8028 	error = vn_attribute_prepare(dvp, vap, &defaulted, ctx);
8029 	if (error) {
8030 		return error;
8031 	}
8032 
8033 	if (vap->va_type != VREG && (fmode != 0 || (flags & VN_CREATE_DOOPEN) || statusp)) {
8034 		panic("Open parameters, but not a regular file.");
8035 	}
8036 	if ((fmode != 0) && ((flags & VN_CREATE_DOOPEN) == 0)) {
8037 		panic("Mode for open, but not trying to open...");
8038 	}
8039 
8040 
8041 	/*
8042 	 * Create the requested node.
8043 	 */
8044 	switch (vap->va_type) {
8045 	case VREG:
8046 		error = vn_create_reg(dvp, vpp, ndp, vap, flags, fmode, statusp, ctx);
8047 		break;
8048 	case VDIR:
8049 		error = vn_mkdir(dvp, vpp, ndp, vap, ctx);
8050 		break;
8051 	case VSOCK:
8052 	case VFIFO:
8053 	case VBLK:
8054 	case VCHR:
8055 		error = VNOP_MKNOD(dvp, vpp, cnp, vap, ctx);
8056 		break;
8057 	default:
8058 		panic("vnode_create: unknown vtype %d", vap->va_type);
8059 	}
8060 	if (error != 0) {
8061 		KAUTH_DEBUG("%p    CREATE - error %d returned by filesystem", dvp, error);
8062 		goto out;
8063 	}
8064 
8065 	vp = *vpp;
8066 	old_error = error;
8067 
8068 	/*
8069 	 * If some of the requested attributes weren't handled by the VNOP,
8070 	 * use our fallback code.
8071 	 */
8072 	if ((error == 0) && !VATTR_ALL_SUPPORTED(vap) && *vpp) {
8073 		KAUTH_DEBUG("     CREATE - doing fallback with ACL %p", vap->va_acl);
8074 		error = vnode_setattr_fallback(*vpp, vap, ctx);
8075 	}
8076 
8077 #if CONFIG_MACF
8078 	if ((error == 0) && !(flags & VN_CREATE_NOLABEL)) {
8079 		error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx);
8080 	}
8081 #endif
8082 
8083 	if ((error != 0) && (vp != (vnode_t)0)) {
8084 		/* If we've done a compound open, close */
8085 		if (batched && (old_error == 0) && (vap->va_type == VREG)) {
8086 			VNOP_CLOSE(vp, fmode, ctx);
8087 		}
8088 
8089 		/* Need to provide notifications if a create succeeded */
8090 		if (!batched) {
8091 			*vpp = (vnode_t) 0;
8092 			vnode_put(vp);
8093 			vp = NULLVP;
8094 		}
8095 	}
8096 
8097 	/*
8098 	 * For creation VNOPs, this is the equivalent of
8099 	 * lookup_handle_found_vnode.
8100 	 */
8101 	if (kdebug_enable && *vpp) {
8102 		kdebug_lookup(*vpp, cnp);
8103 	}
8104 
8105 out:
8106 	vn_attribute_cleanup(vap, defaulted);
8107 
8108 	return error;
8109 }
8110 
8111 static kauth_scope_t    vnode_scope;
8112 static int      vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action,
8113     uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
8114 static int vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx,
8115     vnode_t vp, vnode_t dvp, int *errorp);
8116 
8117 typedef struct _vnode_authorize_context {
8118 	vnode_t         vp;
8119 	struct vnode_attr *vap;
8120 	vnode_t         dvp;
8121 	struct vnode_attr *dvap;
8122 	vfs_context_t   ctx;
8123 	int             flags;
8124 	int             flags_valid;
8125 #define _VAC_IS_OWNER           (1<<0)
8126 #define _VAC_IN_GROUP           (1<<1)
8127 #define _VAC_IS_DIR_OWNER       (1<<2)
8128 #define _VAC_IN_DIR_GROUP       (1<<3)
8129 #define _VAC_NO_VNODE_POINTERS  (1<<4)
8130 } *vauth_ctx;
8131 
8132 void
vnode_authorize_init(void)8133 vnode_authorize_init(void)
8134 {
8135 	vnode_scope = kauth_register_scope(KAUTH_SCOPE_VNODE, vnode_authorize_callback, NULL);
8136 }
8137 
8138 #define VATTR_PREPARE_DEFAULTED_UID             0x1
8139 #define VATTR_PREPARE_DEFAULTED_GID             0x2
8140 #define VATTR_PREPARE_DEFAULTED_MODE            0x4
8141 
8142 int
vn_attribute_prepare(vnode_t dvp,struct vnode_attr * vap,uint32_t * defaulted_fieldsp,vfs_context_t ctx)8143 vn_attribute_prepare(vnode_t dvp, struct vnode_attr *vap, uint32_t *defaulted_fieldsp, vfs_context_t ctx)
8144 {
8145 	kauth_acl_t nacl = NULL, oacl = NULL;
8146 	int error;
8147 
8148 	/*
8149 	 * Handle ACL inheritance.
8150 	 */
8151 	if (!(vap->va_vaflags & VA_NOINHERIT) && vfs_extendedsecurity(dvp->v_mount)) {
8152 		/* save the original filesec */
8153 		if (VATTR_IS_ACTIVE(vap, va_acl)) {
8154 			oacl = vap->va_acl;
8155 		}
8156 
8157 		vap->va_acl = NULL;
8158 		if ((error = kauth_acl_inherit(dvp,
8159 		    oacl,
8160 		    &nacl,
8161 		    vap->va_type == VDIR,
8162 		    ctx)) != 0) {
8163 			KAUTH_DEBUG("%p    CREATE - error %d processing inheritance", dvp, error);
8164 			return error;
8165 		}
8166 
8167 		/*
8168 		 * If the generated ACL is NULL, then we can save ourselves some effort
8169 		 * by clearing the active bit.
8170 		 */
8171 		if (nacl == NULL) {
8172 			VATTR_CLEAR_ACTIVE(vap, va_acl);
8173 		} else {
8174 			vap->va_base_acl = oacl;
8175 			VATTR_SET(vap, va_acl, nacl);
8176 		}
8177 	}
8178 
8179 	error = vnode_authattr_new_internal(dvp, vap, (vap->va_vaflags & VA_NOAUTH), defaulted_fieldsp, ctx);
8180 	if (error) {
8181 		vn_attribute_cleanup(vap, *defaulted_fieldsp);
8182 	}
8183 
8184 	return error;
8185 }
8186 
8187 void
vn_attribute_cleanup(struct vnode_attr * vap,uint32_t defaulted_fields)8188 vn_attribute_cleanup(struct vnode_attr *vap, uint32_t defaulted_fields)
8189 {
8190 	/*
8191 	 * If the caller supplied a filesec in vap, it has been replaced
8192 	 * now by the post-inheritance copy.  We need to put the original back
8193 	 * and free the inherited product.
8194 	 */
8195 	kauth_acl_t nacl, oacl;
8196 
8197 	if (VATTR_IS_ACTIVE(vap, va_acl)) {
8198 		nacl = vap->va_acl;
8199 		oacl = vap->va_base_acl;
8200 
8201 		if (oacl) {
8202 			VATTR_SET(vap, va_acl, oacl);
8203 			vap->va_base_acl = NULL;
8204 		} else {
8205 			VATTR_CLEAR_ACTIVE(vap, va_acl);
8206 		}
8207 
8208 		if (nacl != NULL) {
8209 			/*
8210 			 * Only free the ACL buffer if 'VA_FILESEC_ACL' is not set as it
8211 			 * should be freed by the caller or it is a post-inheritance copy.
8212 			 */
8213 			if (!(vap->va_vaflags & VA_FILESEC_ACL) ||
8214 			    (oacl != NULL && nacl != oacl)) {
8215 				kauth_acl_free(nacl);
8216 			}
8217 		}
8218 	}
8219 
8220 	if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_MODE) != 0) {
8221 		VATTR_CLEAR_ACTIVE(vap, va_mode);
8222 	}
8223 	if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_GID) != 0) {
8224 		VATTR_CLEAR_ACTIVE(vap, va_gid);
8225 	}
8226 	if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_UID) != 0) {
8227 		VATTR_CLEAR_ACTIVE(vap, va_uid);
8228 	}
8229 
8230 	return;
8231 }
8232 
8233 #if CONFIG_APPLEDOUBLE
8234 
8235 #define NATIVE_XATTR(VP)  \
8236 	((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
8237 
8238 static int
dot_underbar_check_paired_vnode(struct componentname * cnp,vnode_t vp,vnode_t dvp,vfs_context_t ctx)8239 dot_underbar_check_paired_vnode(struct componentname *cnp, vnode_t vp,
8240     vnode_t dvp, vfs_context_t ctx)
8241 {
8242 	int error = 0;
8243 	bool dvp_needs_put = false;
8244 
8245 	if (cnp->cn_namelen <= 2 || cnp->cn_nameptr[0] != '.' || cnp->cn_nameptr[1] != '_') {
8246 		return 0;
8247 	}
8248 
8249 	if (!dvp) {
8250 		if ((dvp = vnode_getparent(vp)) == NULLVP) {
8251 			return 0;
8252 		}
8253 		dvp_needs_put = true;
8254 	}
8255 
8256 	vnode_t dupairedvp = NULLVP;
8257 	char lastchar = cnp->cn_nameptr[cnp->cn_namelen];
8258 
8259 	cnp->cn_nameptr[cnp->cn_namelen] = '\0';
8260 	error = vnode_lookupat(cnp->cn_nameptr + (sizeof("._") - 1), 0,
8261 	    &dupairedvp, ctx, dvp);
8262 	cnp->cn_nameptr[cnp->cn_namelen] = lastchar;
8263 	if (dvp_needs_put) {
8264 		vnode_put(dvp);
8265 		dvp = NULLVP;
8266 	}
8267 	if (!error && dupairedvp) {
8268 		error = mac_vnode_check_deleteextattr(ctx, dupairedvp,
8269 		    "com.apple.quarantine");
8270 		vnode_put(dupairedvp);
8271 		dupairedvp = NULLVP;
8272 	} else {
8273 		error = 0;
8274 	}
8275 
8276 	return error;
8277 }
8278 #endif /* CONFIG_APPLEDOUBLE */
8279 
8280 int
vn_authorize_unlink(vnode_t dvp,vnode_t vp,struct componentname * cnp,vfs_context_t ctx,__unused void * reserved)8281 vn_authorize_unlink(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, __unused void *reserved)
8282 {
8283 #if (!CONFIG_MACF && !NAMEDRSRCFORK)
8284 #pragma unused(cnp)
8285 #endif
8286 	int error = 0;
8287 
8288 	/*
8289 	 * Normally, unlinking of directories is not supported.
8290 	 * However, some file systems may have limited support.
8291 	 */
8292 	if ((vp->v_type == VDIR) &&
8293 	    !(vp->v_mount->mnt_kern_flag & MNTK_DIR_HARDLINKS)) {
8294 		return EPERM; /* POSIX */
8295 	}
8296 
8297 	/* authorize the delete operation */
8298 #if CONFIG_MACF
8299 	if (!error) {
8300 		error = mac_vnode_check_unlink(ctx, dvp, vp, cnp);
8301 #if CONFIG_APPLEDOUBLE
8302 		if (!error && !NATIVE_XATTR(dvp)) {
8303 			error = dot_underbar_check_paired_vnode(cnp, vp, dvp, ctx);
8304 		}
8305 #endif /* CONFIG_APPLEDOUBLE */
8306 	}
8307 #endif /* MAC */
8308 
8309 	/* authorize file's resource fork */
8310 #if NAMEDRSRCFORK
8311 	if (!error && cnp && (cnp->cn_flags & CN_WANTSRSRCFORK)) {
8312 		/* If CN_WANTSRSRCFORK is set, that implies that 'dvp' is the base file and 'vp' is the namedstream file */
8313 #if CONFIG_MACF
8314 		error = mac_vnode_check_deleteextattr(ctx, dvp, XATTR_RESOURCEFORK_NAME);
8315 #endif /* MAC */
8316 		if (!error) {
8317 			error = vnode_authorize(dvp, NULL, KAUTH_VNODE_WRITE_EXTATTRIBUTES, ctx);
8318 		}
8319 	}
8320 #endif /* NAMEDRSRCFORK */
8321 
8322 	if (!error) {
8323 		error = vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
8324 	}
8325 
8326 	return error;
8327 }
8328 
8329 int
vn_authorize_open_existing(vnode_t vp,struct componentname * cnp,int fmode,vfs_context_t ctx,void * reserved)8330 vn_authorize_open_existing(vnode_t vp, struct componentname *cnp, int fmode, vfs_context_t ctx, void *reserved)
8331 {
8332 	/* Open of existing case */
8333 	kauth_action_t action;
8334 	int error = 0;
8335 	if (cnp->cn_ndp == NULL) {
8336 		panic("NULL ndp");
8337 	}
8338 	if (reserved != NULL) {
8339 		panic("reserved not NULL.");
8340 	}
8341 
8342 #if CONFIG_MACF
8343 	/* XXX may do duplicate work here, but ignore that for now (idempotent) */
8344 	if (vfs_flags(vnode_mount(vp)) & MNT_MULTILABEL) {
8345 		error = vnode_label(vnode_mount(vp), NULL, vp, NULL, 0, ctx);
8346 		if (error) {
8347 			return error;
8348 		}
8349 	}
8350 #endif
8351 
8352 	if (vnode_isdir(vp)) {
8353 		if ((fmode & (FWRITE | O_TRUNC)) || /* disallow write operations on directories */
8354 		    ((fmode & FSEARCH) && !(fmode & O_DIRECTORY))) {
8355 			return EISDIR;
8356 		}
8357 	} else {
8358 		if (fmode & O_DIRECTORY) {
8359 			return ENOTDIR;
8360 		}
8361 
8362 		if (vp->v_type == VSOCK && vp->v_tag != VT_FDESC) {
8363 			return EOPNOTSUPP;    /* Operation not supported on socket */
8364 		}
8365 
8366 		if (vp->v_type == VLNK && (fmode & O_NOFOLLOW) != 0) {
8367 			return ELOOP;         /* O_NOFOLLOW was specified and the target is a symbolic link */
8368 		}
8369 
8370 		if (cnp->cn_ndp->ni_flag & NAMEI_TRAILINGSLASH) {
8371 			return ENOTDIR;
8372 		}
8373 
8374 		if (!vnode_isreg(vp) && (fmode & FEXEC)) {
8375 			return EACCES;
8376 		}
8377 	}
8378 
8379 #if CONFIG_MACF
8380 	/* If a file being opened is a shadow file containing
8381 	 * namedstream data, ignore the macf checks because it
8382 	 * is a kernel internal file and access should always
8383 	 * be allowed.
8384 	 */
8385 	if (!(vnode_isshadow(vp) && vnode_isnamedstream(vp))) {
8386 		error = mac_vnode_check_open(ctx, vp, fmode);
8387 		if (error) {
8388 			return error;
8389 		}
8390 	}
8391 #if CONFIG_APPLEDOUBLE
8392 	if (fmode & (FWRITE | O_TRUNC) && !NATIVE_XATTR(vp)) {
8393 		error = dot_underbar_check_paired_vnode(cnp, vp, NULLVP, ctx);
8394 		if (error) {
8395 			return error;
8396 		}
8397 	}
8398 #endif /* CONFIG_APPLEDOUBLE */
8399 #endif
8400 
8401 	/* authorize file's resource fork */
8402 #if NAMEDRSRCFORK
8403 	if (cnp && (cnp->cn_flags & CN_WANTSRSRCFORK)) {
8404 		/* If CN_WANTSRSRCFORK is set, that implies that 'pvp' is the base file and 'vp' is the namedstream file */
8405 		vnode_t pvp = vnode_getparent(vp);
8406 		if (pvp == NULLVP) {
8407 			return ENOENT;
8408 		}
8409 
8410 #if CONFIG_MACF
8411 		error = mac_vnode_check_getextattr(ctx, pvp, XATTR_RESOURCEFORK_NAME, NULL);
8412 		if (error) {
8413 			vnode_put(pvp);
8414 			return error;
8415 		}
8416 #endif /* MAC */
8417 
8418 		action = 0;
8419 		if (fmode & FREAD) {
8420 			action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
8421 		}
8422 		if (fmode & (FWRITE | O_TRUNC)) {
8423 			action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
8424 		}
8425 		error = vnode_authorize(pvp, NULL, action, ctx);
8426 		if (error) {
8427 			vnode_put(pvp);
8428 			return error;
8429 		}
8430 		vnode_put(pvp);
8431 	}
8432 #endif /* NAMEDRSRCFORK */
8433 
8434 	/* compute action to be authorized */
8435 	action = 0;
8436 	if (fmode & FREAD) {
8437 		action |= KAUTH_VNODE_READ_DATA;
8438 	}
8439 	if (fmode & (FWRITE | O_TRUNC)) {
8440 		/*
8441 		 * If we are writing, appending, and not truncating,
8442 		 * indicate that we are appending so that if the
8443 		 * UF_APPEND or SF_APPEND bits are set, we do not deny
8444 		 * the open.
8445 		 */
8446 		if ((fmode & O_APPEND) && !(fmode & O_TRUNC)) {
8447 			action |= KAUTH_VNODE_APPEND_DATA;
8448 		} else {
8449 			action |= KAUTH_VNODE_WRITE_DATA;
8450 		}
8451 	}
8452 	if (fmode & (FSEARCH | FEXEC)) {
8453 		if (vnode_isdir(vp)) {
8454 			action |= KAUTH_VNODE_SEARCH;
8455 		} else {
8456 			action |= KAUTH_VNODE_EXECUTE;
8457 		}
8458 	}
8459 	error = vnode_authorize(vp, NULL, action, ctx);
8460 #if NAMEDSTREAMS
8461 	if (error == EACCES) {
8462 		/*
8463 		 * Shadow files may exist on-disk with a different UID/GID
8464 		 * than that of the current context.  Verify that this file
8465 		 * is really a shadow file.  If it was created successfully
8466 		 * then it should be authorized.
8467 		 */
8468 		if (vnode_isshadow(vp) && vnode_isnamedstream(vp)) {
8469 			error = vnode_verifynamedstream(vp);
8470 		}
8471 	}
8472 #endif
8473 
8474 	return error;
8475 }
8476 
8477 int
vn_authorize_create(vnode_t dvp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx,void * reserved)8478 vn_authorize_create(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
8479 {
8480 #if !CONFIG_MACF
8481 #pragma unused(vap)
8482 #endif
8483 	/* Creation case */
8484 	int error;
8485 	kauth_action_t action = KAUTH_VNODE_ADD_FILE;
8486 
8487 	if (cnp->cn_ndp == NULL) {
8488 		panic("NULL cn_ndp");
8489 	}
8490 	if (reserved != NULL) {
8491 		panic("reserved not NULL.");
8492 	}
8493 
8494 	/* Only validate path for creation if we didn't do a complete lookup */
8495 	if (cnp->cn_ndp->ni_flag & NAMEI_UNFINISHED) {
8496 		error = lookup_validate_creation_path(cnp->cn_ndp);
8497 		if (error) {
8498 			return error;
8499 		}
8500 	}
8501 
8502 	/* authorize file's resource fork */
8503 #if NAMEDRSRCFORK
8504 	if (cnp && (cnp->cn_flags & CN_WANTSRSRCFORK)) {
8505 		/* If CN_WANTSRSRCFORK is set, that implies that 'dvp' is the base file and 'vp' is the namedstream file */
8506 #if CONFIG_MACF
8507 		error = mac_vnode_check_setextattr(ctx, dvp, XATTR_RESOURCEFORK_NAME, NULL);
8508 		if (error) {
8509 			return error;
8510 		}
8511 #endif /* MAC */
8512 
8513 		action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
8514 	}
8515 #endif /* NAMEDRSRCFORK */
8516 
8517 #if CONFIG_MACF
8518 	error = mac_vnode_check_create(ctx, dvp, cnp, vap);
8519 	if (error) {
8520 		return error;
8521 	}
8522 #endif /* CONFIG_MACF */
8523 
8524 	return vnode_authorize(dvp, NULL, action, ctx);
8525 }
8526 
8527 int
vn_authorize_rename(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx,void * reserved)8528 vn_authorize_rename(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
8529     struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
8530     vfs_context_t ctx, void *reserved)
8531 {
8532 	return vn_authorize_renamex(fdvp, fvp, fcnp, tdvp, tvp, tcnp, ctx, 0, reserved);
8533 }
8534 
8535 int
vn_authorize_renamex(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx,vfs_rename_flags_t flags,void * reserved)8536 vn_authorize_renamex(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
8537     struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
8538     vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved)
8539 {
8540 	return vn_authorize_renamex_with_paths(fdvp, fvp, fcnp, NULL, tdvp, tvp, tcnp, NULL, ctx, flags, reserved);
8541 }
8542 
8543 int
vn_authorize_renamex_with_paths(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,const char * from_path,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,const char * to_path,vfs_context_t ctx,vfs_rename_flags_t flags,void * reserved)8544 vn_authorize_renamex_with_paths(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, const char *from_path,
8545     struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, const char *to_path,
8546     vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved)
8547 {
8548 	int error = 0;
8549 	int moving = 0;
8550 	bool swap = flags & VFS_RENAME_SWAP;
8551 
8552 	if (reserved != NULL) {
8553 		panic("Passed something other than NULL as reserved field!");
8554 	}
8555 
8556 	/*
8557 	 * Avoid renaming "." and "..".
8558 	 *
8559 	 * XXX No need to check for this in the FS.  We should always have the leaves
8560 	 * in VFS in this case.
8561 	 */
8562 	if (fvp->v_type == VDIR &&
8563 	    ((fdvp == fvp) ||
8564 	    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') ||
8565 	    ((fcnp->cn_flags | tcnp->cn_flags) & ISDOTDOT))) {
8566 		error = EINVAL;
8567 		goto out;
8568 	}
8569 
8570 	if (tvp == NULLVP && vnode_compound_rename_available(tdvp)) {
8571 		error = lookup_validate_creation_path(tcnp->cn_ndp);
8572 		if (error) {
8573 			goto out;
8574 		}
8575 	}
8576 
8577 	/***** <MACF> *****/
8578 #if CONFIG_MACF
8579 	if (swap) {
8580 		error = mac_vnode_check_rename_swap(ctx, fdvp, fvp, fcnp, tdvp, tvp, tcnp);
8581 	} else {
8582 		error = mac_vnode_check_rename(ctx, fdvp, fvp, fcnp, tdvp, tvp, tcnp);
8583 	}
8584 #if CONFIG_APPLEDOUBLE
8585 	if (!error && !NATIVE_XATTR(fdvp)) {
8586 		error = dot_underbar_check_paired_vnode(fcnp, fvp, fdvp, ctx);
8587 	}
8588 	/* Currently no Filesystem that does not support native xattrs supports rename swap */
8589 	if (!error && swap && !NATIVE_XATTR(tdvp)) {
8590 		error = dot_underbar_check_paired_vnode(tcnp, tvp, tdvp, ctx);
8591 	}
8592 #endif /* CONFIG_APPLEDOUBLE */
8593 	if (error) {
8594 		goto out;
8595 	}
8596 #endif
8597 	/***** </MACF> *****/
8598 
8599 	/***** <MiscChecks> *****/
8600 	if (tvp != NULL) {
8601 		if (!swap) {
8602 			if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
8603 				error = ENOTDIR;
8604 				goto out;
8605 			} else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
8606 				error = EISDIR;
8607 				goto out;
8608 			}
8609 		}
8610 	} else if (swap) {
8611 		/*
8612 		 * Caller should have already checked this and returned
8613 		 * ENOENT.  If we send back ENOENT here, caller will retry
8614 		 * which isn't what we want so we send back EINVAL here
8615 		 * instead.
8616 		 */
8617 		error = EINVAL;
8618 		goto out;
8619 	}
8620 
8621 	if (fvp == tdvp) {
8622 		error = EINVAL;
8623 		goto out;
8624 	}
8625 
8626 	/*
8627 	 * The following edge case is caught here:
8628 	 * (to cannot be a descendent of from)
8629 	 *
8630 	 *       o fdvp
8631 	 *      /
8632 	 *     /
8633 	 *    o fvp
8634 	 *     \
8635 	 *      \
8636 	 *       o tdvp
8637 	 *      /
8638 	 *     /
8639 	 *    o tvp
8640 	 */
8641 	if (tdvp->v_parent == fvp) {
8642 		error = EINVAL;
8643 		goto out;
8644 	}
8645 
8646 	if (swap && fdvp->v_parent == tvp) {
8647 		error = EINVAL;
8648 		goto out;
8649 	}
8650 	/***** </MiscChecks> *****/
8651 
8652 	/***** <Kauth> *****/
8653 
8654 	/*
8655 	 * As part of the Kauth step, we call out to allow 3rd-party
8656 	 * fileop notification of "about to rename".  This is needed
8657 	 * in the event that 3rd-parties need to know that the DELETE
8658 	 * authorization is actually part of a rename.  It's important
8659 	 * that we guarantee that the DELETE call-out will always be
8660 	 * made if the WILL_RENAME call-out is made.  Another fileop
8661 	 * call-out will be performed once the operation is completed.
8662 	 * We can ignore the result of kauth_authorize_fileop().
8663 	 *
8664 	 * N.B. We are passing the vnode and *both* paths to each
8665 	 * call; kauth_authorize_fileop() extracts the "from" path
8666 	 * when posting a KAUTH_FILEOP_WILL_RENAME notification.
8667 	 * As such, we only post these notifications if all of the
8668 	 * information we need is provided.
8669 	 */
8670 
8671 	if (swap) {
8672 		kauth_action_t f = 0, t = 0;
8673 
8674 		/*
8675 		 * Directories changing parents need ...ADD_SUBDIR...  to
8676 		 * permit changing ".."
8677 		 */
8678 		if (fdvp != tdvp) {
8679 			if (vnode_isdir(fvp)) {
8680 				f = KAUTH_VNODE_ADD_SUBDIRECTORY;
8681 			}
8682 			if (vnode_isdir(tvp)) {
8683 				t = KAUTH_VNODE_ADD_SUBDIRECTORY;
8684 			}
8685 		}
8686 		if (to_path != NULL) {
8687 			kauth_authorize_fileop(vfs_context_ucred(ctx),
8688 			    KAUTH_FILEOP_WILL_RENAME,
8689 			    (uintptr_t)fvp,
8690 			    (uintptr_t)to_path);
8691 		}
8692 		error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | f, ctx);
8693 		if (error) {
8694 			goto out;
8695 		}
8696 		if (from_path != NULL) {
8697 			kauth_authorize_fileop(vfs_context_ucred(ctx),
8698 			    KAUTH_FILEOP_WILL_RENAME,
8699 			    (uintptr_t)tvp,
8700 			    (uintptr_t)from_path);
8701 		}
8702 		error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE | t, ctx);
8703 		if (error) {
8704 			goto out;
8705 		}
8706 		f = vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE;
8707 		t = vnode_isdir(tvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE;
8708 		if (fdvp == tdvp) {
8709 			error = vnode_authorize(fdvp, NULL, f | t, ctx);
8710 		} else {
8711 			error = vnode_authorize(fdvp, NULL, t, ctx);
8712 			if (error) {
8713 				goto out;
8714 			}
8715 			error = vnode_authorize(tdvp, NULL, f, ctx);
8716 		}
8717 		if (error) {
8718 			goto out;
8719 		}
8720 	} else {
8721 		error = 0;
8722 		if ((tvp != NULL) && vnode_isdir(tvp)) {
8723 			if (tvp != fdvp) {
8724 				moving = 1;
8725 			}
8726 		} else if (tdvp != fdvp) {
8727 			moving = 1;
8728 		}
8729 
8730 		/*
8731 		 * must have delete rights to remove the old name even in
8732 		 * the simple case of fdvp == tdvp.
8733 		 *
8734 		 * If fvp is a directory, and we are changing it's parent,
8735 		 * then we also need rights to rewrite its ".." entry as well.
8736 		 */
8737 		if (to_path != NULL) {
8738 			kauth_authorize_fileop(vfs_context_ucred(ctx),
8739 			    KAUTH_FILEOP_WILL_RENAME,
8740 			    (uintptr_t)fvp,
8741 			    (uintptr_t)to_path);
8742 		}
8743 		if (vnode_isdir(fvp)) {
8744 			if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) {
8745 				goto out;
8746 			}
8747 		} else {
8748 			if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE, ctx)) != 0) {
8749 				goto out;
8750 			}
8751 		}
8752 		if (moving) {
8753 			/* moving into tdvp or tvp, must have rights to add */
8754 			if ((error = vnode_authorize(((tvp != NULL) && vnode_isdir(tvp)) ? tvp : tdvp,
8755 			    NULL,
8756 			    vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE,
8757 			    ctx)) != 0) {
8758 				goto out;
8759 			}
8760 		} else {
8761 			/* node staying in same directory, must be allowed to add new name */
8762 			if ((error = vnode_authorize(fdvp, NULL,
8763 			    vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, ctx)) != 0) {
8764 				goto out;
8765 			}
8766 		}
8767 		/* overwriting tvp */
8768 		if ((tvp != NULL) && !vnode_isdir(tvp) &&
8769 		    ((error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE, ctx)) != 0)) {
8770 			goto out;
8771 		}
8772 	}
8773 
8774 	/***** </Kauth> *****/
8775 
8776 	/* XXX more checks? */
8777 out:
8778 	return error;
8779 }
8780 
8781 int
vn_authorize_mkdir(vnode_t dvp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx,void * reserved)8782 vn_authorize_mkdir(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
8783 {
8784 #if !CONFIG_MACF
8785 #pragma unused(vap)
8786 #endif
8787 	int error;
8788 
8789 	if (reserved != NULL) {
8790 		panic("reserved not NULL in vn_authorize_mkdir()");
8791 	}
8792 
8793 	/* XXX A hack for now, to make shadow files work */
8794 	if (cnp->cn_ndp == NULL) {
8795 		return 0;
8796 	}
8797 
8798 	if (vnode_compound_mkdir_available(dvp)) {
8799 		error = lookup_validate_creation_path(cnp->cn_ndp);
8800 		if (error) {
8801 			goto out;
8802 		}
8803 	}
8804 
8805 #if CONFIG_MACF
8806 	error = mac_vnode_check_create(ctx,
8807 	    dvp, cnp, vap);
8808 	if (error) {
8809 		goto out;
8810 	}
8811 #endif
8812 
8813 	/* authorize addition of a directory to the parent */
8814 	if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) {
8815 		goto out;
8816 	}
8817 
8818 out:
8819 	return error;
8820 }
8821 
8822 int
vn_authorize_rmdir(vnode_t dvp,vnode_t vp,struct componentname * cnp,vfs_context_t ctx,void * reserved)8823 vn_authorize_rmdir(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, void *reserved)
8824 {
8825 #if CONFIG_MACF
8826 	int error;
8827 #else
8828 #pragma unused(cnp)
8829 #endif
8830 	if (reserved != NULL) {
8831 		panic("Non-NULL reserved argument to vn_authorize_rmdir()");
8832 	}
8833 
8834 	if (vp->v_type != VDIR) {
8835 		/*
8836 		 * rmdir only deals with directories
8837 		 */
8838 		return ENOTDIR;
8839 	}
8840 
8841 	if (dvp == vp) {
8842 		/*
8843 		 * No rmdir "." please.
8844 		 */
8845 		return EINVAL;
8846 	}
8847 
8848 #if CONFIG_MACF
8849 	error = mac_vnode_check_unlink(ctx, dvp,
8850 	    vp, cnp);
8851 	if (error) {
8852 		return error;
8853 	}
8854 #endif
8855 
8856 	return vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
8857 }
8858 
8859 /*
8860  * Authorizer for directory cloning. This does not use vnodes but instead
8861  * uses prefilled vnode attributes from the filesystem.
8862  *
8863  * The same function is called to set up the attributes required, perform the
8864  * authorization and cleanup (if required)
8865  */
8866 int
vnode_attr_authorize_dir_clone(struct vnode_attr * vap,kauth_action_t action,struct vnode_attr * dvap,__unused vnode_t sdvp,mount_t mp,dir_clone_authorizer_op_t vattr_op,uint32_t flags,vfs_context_t ctx,__unused void * reserved)8867 vnode_attr_authorize_dir_clone(struct vnode_attr *vap, kauth_action_t action,
8868     struct vnode_attr *dvap, __unused vnode_t sdvp, mount_t mp,
8869     dir_clone_authorizer_op_t vattr_op, uint32_t flags, vfs_context_t ctx,
8870     __unused void *reserved)
8871 {
8872 	int error;
8873 	int is_suser = vfs_context_issuser(ctx);
8874 
8875 	if (vattr_op == OP_VATTR_SETUP) {
8876 		VATTR_INIT(vap);
8877 
8878 		/*
8879 		 * When ACL inheritence is implemented, both vap->va_acl and
8880 		 * dvap->va_acl will be required (even as superuser).
8881 		 */
8882 		VATTR_WANTED(vap, va_type);
8883 		VATTR_WANTED(vap, va_mode);
8884 		VATTR_WANTED(vap, va_flags);
8885 		VATTR_WANTED(vap, va_uid);
8886 		VATTR_WANTED(vap, va_gid);
8887 		if (dvap) {
8888 			VATTR_INIT(dvap);
8889 			VATTR_WANTED(dvap, va_flags);
8890 		}
8891 
8892 		if (!is_suser) {
8893 			/*
8894 			 * If not superuser, we have to evaluate ACLs and
8895 			 * need the target directory gid to set the initial
8896 			 * gid of the new object.
8897 			 */
8898 			VATTR_WANTED(vap, va_acl);
8899 			if (dvap) {
8900 				VATTR_WANTED(dvap, va_gid);
8901 			}
8902 		} else if (dvap && (flags & VNODE_CLONEFILE_NOOWNERCOPY)) {
8903 			VATTR_WANTED(dvap, va_gid);
8904 		}
8905 		return 0;
8906 	} else if (vattr_op == OP_VATTR_CLEANUP) {
8907 		return 0; /* Nothing to do for now */
8908 	}
8909 
8910 	/* dvap isn't used for authorization */
8911 	error = vnode_attr_authorize(vap, NULL, mp, action, ctx);
8912 
8913 	if (error) {
8914 		return error;
8915 	}
8916 
8917 	/*
8918 	 * vn_attribute_prepare should be able to accept attributes as well as
8919 	 * vnodes but for now we do this inline.
8920 	 */
8921 	if (!is_suser || (flags & VNODE_CLONEFILE_NOOWNERCOPY)) {
8922 		/*
8923 		 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit
8924 		 * owner is set, that owner takes ownership of all new files.
8925 		 */
8926 		if ((mp->mnt_flag & MNT_IGNORE_OWNERSHIP) &&
8927 		    (mp->mnt_fsowner != KAUTH_UID_NONE)) {
8928 			VATTR_SET(vap, va_uid, mp->mnt_fsowner);
8929 		} else {
8930 			/* default owner is current user */
8931 			VATTR_SET(vap, va_uid,
8932 			    kauth_cred_getuid(vfs_context_ucred(ctx)));
8933 		}
8934 
8935 		if ((mp->mnt_flag & MNT_IGNORE_OWNERSHIP) &&
8936 		    (mp->mnt_fsgroup != KAUTH_GID_NONE)) {
8937 			VATTR_SET(vap, va_gid, mp->mnt_fsgroup);
8938 		} else {
8939 			/*
8940 			 * default group comes from parent object,
8941 			 * fallback to current user
8942 			 */
8943 			if (VATTR_IS_SUPPORTED(dvap, va_gid)) {
8944 				VATTR_SET(vap, va_gid, dvap->va_gid);
8945 			} else {
8946 				VATTR_SET(vap, va_gid,
8947 				    kauth_cred_getgid(vfs_context_ucred(ctx)));
8948 			}
8949 		}
8950 	}
8951 
8952 	/* Inherit SF_RESTRICTED bit from destination directory only */
8953 	if (VATTR_IS_ACTIVE(vap, va_flags)) {
8954 		VATTR_SET(vap, va_flags,
8955 		    ((vap->va_flags & ~(UF_DATAVAULT | SF_RESTRICTED)))); /* Turn off from source */
8956 		if (VATTR_IS_ACTIVE(dvap, va_flags)) {
8957 			VATTR_SET(vap, va_flags,
8958 			    vap->va_flags | (dvap->va_flags & (UF_DATAVAULT | SF_RESTRICTED)));
8959 		}
8960 	} else if (VATTR_IS_ACTIVE(dvap, va_flags)) {
8961 		VATTR_SET(vap, va_flags, (dvap->va_flags & (UF_DATAVAULT | SF_RESTRICTED)));
8962 	}
8963 
8964 	return 0;
8965 }
8966 
8967 
8968 /*
8969  * Authorize an operation on a vnode.
8970  *
8971  * This is KPI, but here because it needs vnode_scope.
8972  *
8973  * Returns:	0			Success
8974  *	kauth_authorize_action:EPERM	...
8975  *	xlate => EACCES			Permission denied
8976  *	kauth_authorize_action:0	Success
8977  *	kauth_authorize_action:		Depends on callback return; this is
8978  *					usually only vnode_authorize_callback(),
8979  *					but may include other listerners, if any
8980  *					exist.
8981  *		EROFS
8982  *		EACCES
8983  *		EPERM
8984  *		???
8985  */
8986 int
vnode_authorize(vnode_t vp,vnode_t dvp,kauth_action_t action,vfs_context_t ctx)8987 vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx)
8988 {
8989 	int     error, result;
8990 
8991 	/*
8992 	 * We can't authorize against a dead vnode; allow all operations through so that
8993 	 * the correct error can be returned.
8994 	 */
8995 	if (vp->v_type == VBAD) {
8996 		return 0;
8997 	}
8998 
8999 	error = 0;
9000 	result = kauth_authorize_action(vnode_scope, vfs_context_ucred(ctx), action,
9001 	    (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error);
9002 	if (result == EPERM) {          /* traditional behaviour */
9003 		result = EACCES;
9004 	}
9005 	/* did the lower layers give a better error return? */
9006 	if ((result != 0) && (error != 0)) {
9007 		return error;
9008 	}
9009 	return result;
9010 }
9011 
9012 /*
9013  * Test for vnode immutability.
9014  *
9015  * The 'append' flag is set when the authorization request is constrained
9016  * to operations which only request the right to append to a file.
9017  *
9018  * The 'ignore' flag is set when an operation modifying the immutability flags
9019  * is being authorized.  We check the system securelevel to determine which
9020  * immutability flags we can ignore.
9021  */
9022 static int
vnode_immutable(struct vnode_attr * vap,int append,int ignore)9023 vnode_immutable(struct vnode_attr *vap, int append, int ignore)
9024 {
9025 	int     mask;
9026 
9027 	/* start with all bits precluding the operation */
9028 	mask = IMMUTABLE | APPEND;
9029 
9030 	/* if appending only, remove the append-only bits */
9031 	if (append) {
9032 		mask &= ~APPEND;
9033 	}
9034 
9035 	/* ignore only set when authorizing flags changes */
9036 	if (ignore) {
9037 		if (securelevel <= 0) {
9038 			/* in insecure state, flags do not inhibit changes */
9039 			mask = 0;
9040 		} else {
9041 			/* in secure state, user flags don't inhibit */
9042 			mask &= ~(UF_IMMUTABLE | UF_APPEND);
9043 		}
9044 	}
9045 	KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap->va_flags, mask, append, ignore);
9046 	if ((vap->va_flags & mask) != 0) {
9047 		return EPERM;
9048 	}
9049 	return 0;
9050 }
9051 
9052 static int
vauth_node_owner(struct vnode_attr * vap,kauth_cred_t cred)9053 vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred)
9054 {
9055 	int result;
9056 
9057 	/* default assumption is not-owner */
9058 	result = 0;
9059 
9060 	/*
9061 	 * If the filesystem has given us a UID, we treat this as authoritative.
9062 	 */
9063 	if (vap && VATTR_IS_SUPPORTED(vap, va_uid)) {
9064 		result = (vap->va_uid == kauth_cred_getuid(cred)) ? 1 : 0;
9065 	}
9066 	/* we could test the owner UUID here if we had a policy for it */
9067 
9068 	return result;
9069 }
9070 
9071 /*
9072  * vauth_node_group
9073  *
9074  * Description:	Ask if a cred is a member of the group owning the vnode object
9075  *
9076  * Parameters:		vap		vnode attribute
9077  *				vap->va_gid	group owner of vnode object
9078  *			cred		credential to check
9079  *			ismember	pointer to where to put the answer
9080  *			idontknow	Return this if we can't get an answer
9081  *
9082  * Returns:		0		Success
9083  *			idontknow	Can't get information
9084  *	kauth_cred_ismember_gid:?	Error from kauth subsystem
9085  *	kauth_cred_ismember_gid:?	Error from kauth subsystem
9086  */
9087 static int
vauth_node_group(struct vnode_attr * vap,kauth_cred_t cred,int * ismember,int idontknow)9088 vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int idontknow)
9089 {
9090 	int     error;
9091 	int     result;
9092 
9093 	error = 0;
9094 	result = 0;
9095 
9096 	/*
9097 	 * The caller is expected to have asked the filesystem for a group
9098 	 * at some point prior to calling this function.  The answer may
9099 	 * have been that there is no group ownership supported for the
9100 	 * vnode object, in which case we return
9101 	 */
9102 	if (vap && VATTR_IS_SUPPORTED(vap, va_gid)) {
9103 		error = kauth_cred_ismember_gid(cred, vap->va_gid, &result);
9104 		/*
9105 		 * Credentials which are opted into external group membership
9106 		 * resolution which are not known to the external resolver
9107 		 * will result in an ENOENT error.  We translate this into
9108 		 * the appropriate 'idontknow' response for our caller.
9109 		 *
9110 		 * XXX We do not make a distinction here between an ENOENT
9111 		 * XXX arising from a response from the external resolver,
9112 		 * XXX and an ENOENT which is internally generated.  This is
9113 		 * XXX a deficiency of the published kauth_cred_ismember_gid()
9114 		 * XXX KPI which can not be overcome without new KPI.  For
9115 		 * XXX all currently known cases, however, this wil result
9116 		 * XXX in correct behaviour.
9117 		 */
9118 		if (error == ENOENT) {
9119 			error = idontknow;
9120 		}
9121 	}
9122 	/*
9123 	 * XXX We could test the group UUID here if we had a policy for it,
9124 	 * XXX but this is problematic from the perspective of synchronizing
9125 	 * XXX group UUID and POSIX GID ownership of a file and keeping the
9126 	 * XXX values coherent over time.  The problem is that the local
9127 	 * XXX system will vend transient group UUIDs for unknown POSIX GID
9128 	 * XXX values, and these are not persistent, whereas storage of values
9129 	 * XXX is persistent.  One potential solution to this is a local
9130 	 * XXX (persistent) replica of remote directory entries and vended
9131 	 * XXX local ids in a local directory server (think in terms of a
9132 	 * XXX caching DNS server).
9133 	 */
9134 
9135 	if (!error) {
9136 		*ismember = result;
9137 	}
9138 	return error;
9139 }
9140 
9141 static int
vauth_file_owner(vauth_ctx vcp)9142 vauth_file_owner(vauth_ctx vcp)
9143 {
9144 	int result;
9145 
9146 	if (vcp->flags_valid & _VAC_IS_OWNER) {
9147 		result = (vcp->flags & _VAC_IS_OWNER) ? 1 : 0;
9148 	} else {
9149 		result = vauth_node_owner(vcp->vap, vcp->ctx->vc_ucred);
9150 
9151 		/* cache our result */
9152 		vcp->flags_valid |= _VAC_IS_OWNER;
9153 		if (result) {
9154 			vcp->flags |= _VAC_IS_OWNER;
9155 		} else {
9156 			vcp->flags &= ~_VAC_IS_OWNER;
9157 		}
9158 	}
9159 	return result;
9160 }
9161 
9162 
9163 /*
9164  * vauth_file_ingroup
9165  *
9166  * Description:	Ask if a user is a member of the group owning the directory
9167  *
9168  * Parameters:		vcp		The vnode authorization context that
9169  *					contains the user and directory info
9170  *				vcp->flags_valid	Valid flags
9171  *				vcp->flags		Flags values
9172  *				vcp->vap		File vnode attributes
9173  *				vcp->ctx		VFS Context (for user)
9174  *			ismember	pointer to where to put the answer
9175  *			idontknow	Return this if we can't get an answer
9176  *
9177  * Returns:		0		Success
9178  *		vauth_node_group:?	Error from vauth_node_group()
9179  *
9180  * Implicit returns:	*ismember	0	The user is not a group member
9181  *					1	The user is a group member
9182  */
9183 static int
vauth_file_ingroup(vauth_ctx vcp,int * ismember,int idontknow)9184 vauth_file_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
9185 {
9186 	int     error;
9187 
9188 	/* Check for a cached answer first, to avoid the check if possible */
9189 	if (vcp->flags_valid & _VAC_IN_GROUP) {
9190 		*ismember = (vcp->flags & _VAC_IN_GROUP) ? 1 : 0;
9191 		error = 0;
9192 	} else {
9193 		/* Otherwise, go look for it */
9194 		error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember, idontknow);
9195 
9196 		if (!error) {
9197 			/* cache our result */
9198 			vcp->flags_valid |= _VAC_IN_GROUP;
9199 			if (*ismember) {
9200 				vcp->flags |= _VAC_IN_GROUP;
9201 			} else {
9202 				vcp->flags &= ~_VAC_IN_GROUP;
9203 			}
9204 		}
9205 	}
9206 	return error;
9207 }
9208 
9209 static int
vauth_dir_owner(vauth_ctx vcp)9210 vauth_dir_owner(vauth_ctx vcp)
9211 {
9212 	int result;
9213 
9214 	if (vcp->flags_valid & _VAC_IS_DIR_OWNER) {
9215 		result = (vcp->flags & _VAC_IS_DIR_OWNER) ? 1 : 0;
9216 	} else {
9217 		result = vauth_node_owner(vcp->dvap, vcp->ctx->vc_ucred);
9218 
9219 		/* cache our result */
9220 		vcp->flags_valid |= _VAC_IS_DIR_OWNER;
9221 		if (result) {
9222 			vcp->flags |= _VAC_IS_DIR_OWNER;
9223 		} else {
9224 			vcp->flags &= ~_VAC_IS_DIR_OWNER;
9225 		}
9226 	}
9227 	return result;
9228 }
9229 
9230 /*
9231  * vauth_dir_ingroup
9232  *
9233  * Description:	Ask if a user is a member of the group owning the directory
9234  *
9235  * Parameters:		vcp		The vnode authorization context that
9236  *					contains the user and directory info
9237  *				vcp->flags_valid	Valid flags
9238  *				vcp->flags		Flags values
9239  *				vcp->dvap		Dir vnode attributes
9240  *				vcp->ctx		VFS Context (for user)
9241  *			ismember	pointer to where to put the answer
9242  *			idontknow	Return this if we can't get an answer
9243  *
9244  * Returns:		0		Success
9245  *		vauth_node_group:?	Error from vauth_node_group()
9246  *
9247  * Implicit returns:	*ismember	0	The user is not a group member
9248  *					1	The user is a group member
9249  */
9250 static int
vauth_dir_ingroup(vauth_ctx vcp,int * ismember,int idontknow)9251 vauth_dir_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
9252 {
9253 	int     error;
9254 
9255 	/* Check for a cached answer first, to avoid the check if possible */
9256 	if (vcp->flags_valid & _VAC_IN_DIR_GROUP) {
9257 		*ismember = (vcp->flags & _VAC_IN_DIR_GROUP) ? 1 : 0;
9258 		error = 0;
9259 	} else {
9260 		/* Otherwise, go look for it */
9261 		error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember, idontknow);
9262 
9263 		if (!error) {
9264 			/* cache our result */
9265 			vcp->flags_valid |= _VAC_IN_DIR_GROUP;
9266 			if (*ismember) {
9267 				vcp->flags |= _VAC_IN_DIR_GROUP;
9268 			} else {
9269 				vcp->flags &= ~_VAC_IN_DIR_GROUP;
9270 			}
9271 		}
9272 	}
9273 	return error;
9274 }
9275 
9276 static int
vfs_context_ignores_node_permissions(vfs_context_t ctx)9277 vfs_context_ignores_node_permissions(vfs_context_t ctx)
9278 {
9279 	if (proc_ignores_node_permissions(vfs_context_proc(ctx))) {
9280 		return 1;
9281 	}
9282 	if (get_bsdthread_info(vfs_context_thread(ctx))->uu_flag & UT_IGNORE_NODE_PERMISSIONS) {
9283 		return 1;
9284 	}
9285 	return 0;
9286 }
9287 
9288 /*
9289  * Test the posix permissions in (vap) to determine whether (credential)
9290  * may perform (action)
9291  */
9292 static int
vnode_authorize_posix(vauth_ctx vcp,int action,int on_dir)9293 vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir)
9294 {
9295 	struct vnode_attr *vap;
9296 	int needed, error, owner_ok, group_ok, world_ok, ismember;
9297 #ifdef KAUTH_DEBUG_ENABLE
9298 	const char *where = "uninitialized";
9299 # define _SETWHERE(c)   where = c;
9300 #else
9301 # define _SETWHERE(c)
9302 #endif
9303 
9304 	/* checking file or directory? */
9305 	if (on_dir) {
9306 		vap = vcp->dvap;
9307 	} else {
9308 		vap = vcp->vap;
9309 	}
9310 
9311 	error = 0;
9312 
9313 	/*
9314 	 * We want to do as little work here as possible.  So first we check
9315 	 * which sets of permissions grant us the access we need, and avoid checking
9316 	 * whether specific permissions grant access when more generic ones would.
9317 	 */
9318 
9319 	/* owner permissions */
9320 	needed = 0;
9321 	if (action & VREAD) {
9322 		needed |= S_IRUSR;
9323 	}
9324 	if (action & VWRITE) {
9325 		needed |= S_IWUSR;
9326 	}
9327 	if (action & VEXEC) {
9328 		needed |= S_IXUSR;
9329 	}
9330 	owner_ok = (needed & vap->va_mode) == needed;
9331 
9332 	/*
9333 	 * Processes with the appropriate entitlement can marked themselves as
9334 	 * ignoring file/directory permissions if they own it.
9335 	 */
9336 	if (!owner_ok && vfs_context_ignores_node_permissions(vcp->ctx)) {
9337 		owner_ok = 1;
9338 	}
9339 
9340 	/* group permissions */
9341 	needed = 0;
9342 	if (action & VREAD) {
9343 		needed |= S_IRGRP;
9344 	}
9345 	if (action & VWRITE) {
9346 		needed |= S_IWGRP;
9347 	}
9348 	if (action & VEXEC) {
9349 		needed |= S_IXGRP;
9350 	}
9351 	group_ok = (needed & vap->va_mode) == needed;
9352 
9353 	/* world permissions */
9354 	needed = 0;
9355 	if (action & VREAD) {
9356 		needed |= S_IROTH;
9357 	}
9358 	if (action & VWRITE) {
9359 		needed |= S_IWOTH;
9360 	}
9361 	if (action & VEXEC) {
9362 		needed |= S_IXOTH;
9363 	}
9364 	world_ok = (needed & vap->va_mode) == needed;
9365 
9366 	/* If granted/denied by all three, we're done */
9367 	if (owner_ok && group_ok && world_ok) {
9368 		_SETWHERE("all");
9369 		goto out;
9370 	}
9371 
9372 	if (!owner_ok && !group_ok && !world_ok) {
9373 		_SETWHERE("all");
9374 		error = EACCES;
9375 		goto out;
9376 	}
9377 
9378 	/* Check ownership (relatively cheap) */
9379 	if ((on_dir && vauth_dir_owner(vcp)) ||
9380 	    (!on_dir && vauth_file_owner(vcp))) {
9381 		_SETWHERE("user");
9382 		if (!owner_ok) {
9383 			error = EACCES;
9384 		}
9385 		goto out;
9386 	}
9387 
9388 	/* Not owner; if group and world both grant it we're done */
9389 	if (group_ok && world_ok) {
9390 		_SETWHERE("group/world");
9391 		goto out;
9392 	}
9393 	if (!group_ok && !world_ok) {
9394 		_SETWHERE("group/world");
9395 		error = EACCES;
9396 		goto out;
9397 	}
9398 
9399 	/* Check group membership (most expensive) */
9400 	ismember = 0;   /* Default to allow, if the target has no group owner */
9401 
9402 	/*
9403 	 * In the case we can't get an answer about the user from the call to
9404 	 * vauth_dir_ingroup() or vauth_file_ingroup(), we want to fail on
9405 	 * the side of caution, rather than simply granting access, or we will
9406 	 * fail to correctly implement exclusion groups, so we set the third
9407 	 * parameter on the basis of the state of 'group_ok'.
9408 	 */
9409 	if (on_dir) {
9410 		error = vauth_dir_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
9411 	} else {
9412 		error = vauth_file_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
9413 	}
9414 	if (error) {
9415 		if (!group_ok) {
9416 			ismember = 1;
9417 		}
9418 		error = 0;
9419 	}
9420 	if (ismember) {
9421 		_SETWHERE("group");
9422 		if (!group_ok) {
9423 			error = EACCES;
9424 		}
9425 		goto out;
9426 	}
9427 
9428 	/* Not owner, not in group, use world result */
9429 	_SETWHERE("world");
9430 	if (!world_ok) {
9431 		error = EACCES;
9432 	}
9433 
9434 	/* FALLTHROUGH */
9435 
9436 out:
9437 	KAUTH_DEBUG("%p    %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d",
9438 	    vcp->vp, (error == 0) ? "ALLOWED" : "DENIED", where,
9439 	    (action & VREAD)  ? "r" : "-",
9440 	    (action & VWRITE) ? "w" : "-",
9441 	    (action & VEXEC)  ? "x" : "-",
9442 	    needed,
9443 	    (vap->va_mode & S_IRUSR) ? "r" : "-",
9444 	    (vap->va_mode & S_IWUSR) ? "w" : "-",
9445 	    (vap->va_mode & S_IXUSR) ? "x" : "-",
9446 	    (vap->va_mode & S_IRGRP) ? "r" : "-",
9447 	    (vap->va_mode & S_IWGRP) ? "w" : "-",
9448 	    (vap->va_mode & S_IXGRP) ? "x" : "-",
9449 	    (vap->va_mode & S_IROTH) ? "r" : "-",
9450 	    (vap->va_mode & S_IWOTH) ? "w" : "-",
9451 	    (vap->va_mode & S_IXOTH) ? "x" : "-",
9452 	    kauth_cred_getuid(vcp->ctx->vc_ucred),
9453 	    on_dir ? vcp->dvap->va_uid : vcp->vap->va_uid,
9454 	    on_dir ? vcp->dvap->va_gid : vcp->vap->va_gid);
9455 	return error;
9456 }
9457 
9458 /*
9459  * Authorize the deletion of the node vp from the directory dvp.
9460  *
9461  * We assume that:
9462  * - Neither the node nor the directory are immutable.
9463  * - The user is not the superuser.
9464  *
9465  * The precedence of factors for authorizing or denying delete for a credential
9466  *
9467  * 1) Explicit ACE on the node. (allow or deny DELETE)
9468  * 2) Explicit ACE on the directory (allow or deny DELETE_CHILD).
9469  *
9470  *    If there are conflicting ACEs on the node and the directory, the node
9471  *    ACE wins.
9472  *
9473  * 3) Sticky bit on the directory.
9474  *    Deletion is not permitted if the directory is sticky and the caller is
9475  *    not owner of the node or directory. The sticky bit rules are like a deny
9476  *    delete ACE except lower in priority than ACL's either allowing or denying
9477  *    delete.
9478  *
9479  * 4) POSIX permisions on the directory.
9480  *
9481  * As an optimization, we cache whether or not delete child is permitted
9482  * on directories. This enables us to skip directory ACL and POSIX checks
9483  * as we already have the result from those checks. However, we always check the
9484  * node ACL and, if the directory has the sticky bit set, we always check its
9485  * ACL (even for a directory with an authorized delete child). Furthermore,
9486  * caching the delete child authorization is independent of the sticky bit
9487  * being set as it is only applicable in determining whether the node can be
9488  * deleted or not.
9489  */
9490 static int
vnode_authorize_delete(vauth_ctx vcp,boolean_t cached_delete_child)9491 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child)
9492 {
9493 	struct vnode_attr       *vap = vcp->vap;
9494 	struct vnode_attr       *dvap = vcp->dvap;
9495 	kauth_cred_t            cred = vcp->ctx->vc_ucred;
9496 	struct kauth_acl_eval   eval;
9497 	int                     error, ismember;
9498 
9499 	/* Check the ACL on the node first */
9500 	if (VATTR_IS_NOT(vap, va_acl, NULL)) {
9501 		eval.ae_requested = KAUTH_VNODE_DELETE;
9502 		eval.ae_acl = &vap->va_acl->acl_ace[0];
9503 		eval.ae_count = vap->va_acl->acl_entrycount;
9504 		eval.ae_options = 0;
9505 		if (vauth_file_owner(vcp)) {
9506 			eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
9507 		}
9508 		/*
9509 		 * We use ENOENT as a marker to indicate we could not get
9510 		 * information in order to delay evaluation until after we
9511 		 * have the ACL evaluation answer.  Previously, we would
9512 		 * always deny the operation at this point.
9513 		 */
9514 		if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
9515 			return error;
9516 		}
9517 		if (error == ENOENT) {
9518 			eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
9519 		} else if (ismember) {
9520 			eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
9521 		}
9522 		eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
9523 		eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
9524 		eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
9525 		eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
9526 
9527 		if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
9528 			KAUTH_DEBUG("%p    ERROR during ACL processing - %d", vcp->vp, error);
9529 			return error;
9530 		}
9531 
9532 		switch (eval.ae_result) {
9533 		case KAUTH_RESULT_DENY:
9534 			if (vauth_file_owner(vcp) && vfs_context_ignores_node_permissions(vcp->ctx)) {
9535 				KAUTH_DEBUG("%p    Override DENY due to entitlement", vcp->vp);
9536 				return 0;
9537 			}
9538 			KAUTH_DEBUG("%p    DENIED - denied by ACL", vcp->vp);
9539 			return EACCES;
9540 		case KAUTH_RESULT_ALLOW:
9541 			KAUTH_DEBUG("%p    ALLOWED - granted by ACL", vcp->vp);
9542 			return 0;
9543 		case KAUTH_RESULT_DEFER:
9544 		default:
9545 			/* Defer to directory */
9546 			KAUTH_DEBUG("%p    DEFERRED - by file ACL", vcp->vp);
9547 			break;
9548 		}
9549 	}
9550 
9551 	/*
9552 	 * Without a sticky bit, a previously authorized delete child is
9553 	 * sufficient to authorize this delete.
9554 	 *
9555 	 * If the sticky bit is set, a directory ACL which allows delete child
9556 	 * overrides a (potential) sticky bit deny. The authorized delete child
9557 	 * cannot tell us if it was authorized because of an explicit delete
9558 	 * child allow ACE or because of POSIX permisions so we have to check
9559 	 * the directory ACL everytime if the directory has a sticky bit.
9560 	 */
9561 	if (!(dvap->va_mode & S_ISTXT) && cached_delete_child) {
9562 		KAUTH_DEBUG("%p    ALLOWED - granted by directory ACL or POSIX permissions and no sticky bit on directory", vcp->vp);
9563 		return 0;
9564 	}
9565 
9566 	/* check the ACL on the directory */
9567 	if (VATTR_IS_NOT(dvap, va_acl, NULL)) {
9568 		eval.ae_requested = KAUTH_VNODE_DELETE_CHILD;
9569 		eval.ae_acl = &dvap->va_acl->acl_ace[0];
9570 		eval.ae_count = dvap->va_acl->acl_entrycount;
9571 		eval.ae_options = 0;
9572 		if (vauth_dir_owner(vcp)) {
9573 			eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
9574 		}
9575 		/*
9576 		 * We use ENOENT as a marker to indicate we could not get
9577 		 * information in order to delay evaluation until after we
9578 		 * have the ACL evaluation answer.  Previously, we would
9579 		 * always deny the operation at this point.
9580 		 */
9581 		if ((error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
9582 			return error;
9583 		}
9584 		if (error == ENOENT) {
9585 			eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
9586 		} else if (ismember) {
9587 			eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
9588 		}
9589 		eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
9590 		eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
9591 		eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
9592 		eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
9593 
9594 		/*
9595 		 * If there is no entry, we are going to defer to other
9596 		 * authorization mechanisms.
9597 		 */
9598 		error = kauth_acl_evaluate(cred, &eval);
9599 
9600 		if (error != 0) {
9601 			KAUTH_DEBUG("%p    ERROR during ACL processing - %d", vcp->vp, error);
9602 			return error;
9603 		}
9604 		switch (eval.ae_result) {
9605 		case KAUTH_RESULT_DENY:
9606 			if (vauth_dir_owner(vcp) && vfs_context_ignores_node_permissions(vcp->ctx)) {
9607 				KAUTH_DEBUG("%p    Override DENY due to entitlement", vcp->vp);
9608 				return 0;
9609 			}
9610 			KAUTH_DEBUG("%p    DENIED - denied by directory ACL", vcp->vp);
9611 			return EACCES;
9612 		case KAUTH_RESULT_ALLOW:
9613 			KAUTH_DEBUG("%p    ALLOWED - granted by directory ACL", vcp->vp);
9614 			if (!cached_delete_child && vcp->dvp) {
9615 				vnode_cache_authorized_action(vcp->dvp,
9616 				    vcp->ctx, KAUTH_VNODE_DELETE_CHILD);
9617 			}
9618 			return 0;
9619 		case KAUTH_RESULT_DEFER:
9620 		default:
9621 			/* Deferred by directory ACL */
9622 			KAUTH_DEBUG("%p    DEFERRED - directory ACL", vcp->vp);
9623 			break;
9624 		}
9625 	}
9626 
9627 	/*
9628 	 * From this point, we can't explicitly allow and if we reach the end
9629 	 * of the function without a denial, then the delete is authorized.
9630 	 */
9631 	if (!cached_delete_child) {
9632 		if (vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */) != 0) {
9633 			KAUTH_DEBUG("%p    DENIED - denied by posix permisssions", vcp->vp);
9634 			return EACCES;
9635 		}
9636 		/*
9637 		 * Cache the authorized action on the vnode if allowed by the
9638 		 * directory ACL or POSIX permissions. It is correct to cache
9639 		 * this action even if sticky bit would deny deleting the node.
9640 		 */
9641 		if (vcp->dvp) {
9642 			vnode_cache_authorized_action(vcp->dvp, vcp->ctx,
9643 			    KAUTH_VNODE_DELETE_CHILD);
9644 		}
9645 	}
9646 
9647 	/* enforce sticky bit behaviour */
9648 	if ((dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) {
9649 		KAUTH_DEBUG("%p    DENIED - sticky bit rules (user %d  file %d  dir %d)",
9650 		    vcp->vp, cred->cr_posix.cr_uid, vap->va_uid, dvap->va_uid);
9651 		return EACCES;
9652 	}
9653 
9654 	/* not denied, must be OK */
9655 	return 0;
9656 }
9657 
9658 
9659 /*
9660  * Authorize an operation based on the node's attributes.
9661  */
9662 static int
vnode_authorize_simple(vauth_ctx vcp,kauth_ace_rights_t acl_rights,kauth_ace_rights_t preauth_rights,boolean_t * found_deny)9663 vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_rights_t preauth_rights, boolean_t *found_deny)
9664 {
9665 	struct vnode_attr       *vap = vcp->vap;
9666 	kauth_cred_t            cred = vcp->ctx->vc_ucred;
9667 	struct kauth_acl_eval   eval;
9668 	int                     error, ismember;
9669 	mode_t                  posix_action;
9670 
9671 	/*
9672 	 * If we are the file owner, we automatically have some rights.
9673 	 *
9674 	 * Do we need to expand this to support group ownership?
9675 	 */
9676 	if (vauth_file_owner(vcp)) {
9677 		acl_rights &= ~(KAUTH_VNODE_WRITE_SECURITY);
9678 	}
9679 
9680 	/*
9681 	 * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can
9682 	 * mask the latter.  If TAKE_OWNERSHIP is requested the caller is about to
9683 	 * change ownership to themselves, and WRITE_SECURITY is implicitly
9684 	 * granted to the owner.  We need to do this because at this point
9685 	 * WRITE_SECURITY may not be granted as the caller is not currently
9686 	 * the owner.
9687 	 */
9688 	if ((acl_rights & KAUTH_VNODE_TAKE_OWNERSHIP) &&
9689 	    (acl_rights & KAUTH_VNODE_WRITE_SECURITY)) {
9690 		acl_rights &= ~KAUTH_VNODE_WRITE_SECURITY;
9691 	}
9692 
9693 	if (acl_rights == 0) {
9694 		KAUTH_DEBUG("%p    ALLOWED - implicit or no rights required", vcp->vp);
9695 		return 0;
9696 	}
9697 
9698 	/* if we have an ACL, evaluate it */
9699 	if (VATTR_IS_NOT(vap, va_acl, NULL)) {
9700 		eval.ae_requested = acl_rights;
9701 		eval.ae_acl = &vap->va_acl->acl_ace[0];
9702 		eval.ae_count = vap->va_acl->acl_entrycount;
9703 		eval.ae_options = 0;
9704 		if (vauth_file_owner(vcp)) {
9705 			eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
9706 		}
9707 		/*
9708 		 * We use ENOENT as a marker to indicate we could not get
9709 		 * information in order to delay evaluation until after we
9710 		 * have the ACL evaluation answer.  Previously, we would
9711 		 * always deny the operation at this point.
9712 		 */
9713 		if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
9714 			return error;
9715 		}
9716 		if (error == ENOENT) {
9717 			eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
9718 		} else if (ismember) {
9719 			eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
9720 		}
9721 		eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
9722 		eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
9723 		eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
9724 		eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
9725 
9726 		if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
9727 			KAUTH_DEBUG("%p    ERROR during ACL processing - %d", vcp->vp, error);
9728 			return error;
9729 		}
9730 
9731 		switch (eval.ae_result) {
9732 		case KAUTH_RESULT_DENY:
9733 			if (vauth_file_owner(vcp) && vfs_context_ignores_node_permissions(vcp->ctx)) {
9734 				KAUTH_DEBUG("%p    Override DENY due to entitlement", vcp->vp);
9735 				return 0;
9736 			}
9737 			KAUTH_DEBUG("%p    DENIED - by ACL", vcp->vp);
9738 			return EACCES;         /* deny, deny, counter-allege */
9739 		case KAUTH_RESULT_ALLOW:
9740 			KAUTH_DEBUG("%p    ALLOWED - all rights granted by ACL", vcp->vp);
9741 			return 0;
9742 		case KAUTH_RESULT_DEFER:
9743 		default:
9744 			/* Effectively the same as !delete_child_denied */
9745 			KAUTH_DEBUG("%p    DEFERRED - directory ACL", vcp->vp);
9746 			break;
9747 		}
9748 
9749 		*found_deny = eval.ae_found_deny;
9750 
9751 		/* fall through and evaluate residual rights */
9752 	} else {
9753 		/* no ACL, everything is residual */
9754 		eval.ae_residual = acl_rights;
9755 	}
9756 
9757 	/*
9758 	 * Grant residual rights that have been pre-authorized.
9759 	 */
9760 	eval.ae_residual &= ~preauth_rights;
9761 
9762 	/*
9763 	 * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied.
9764 	 */
9765 	if (vauth_file_owner(vcp)) {
9766 		eval.ae_residual &= ~KAUTH_VNODE_WRITE_ATTRIBUTES;
9767 	}
9768 
9769 	if (eval.ae_residual == 0) {
9770 		KAUTH_DEBUG("%p    ALLOWED - rights already authorized", vcp->vp);
9771 		return 0;
9772 	}
9773 
9774 	/*
9775 	 * Bail if we have residual rights that can't be granted by posix permissions,
9776 	 * or aren't presumed granted at this point.
9777 	 *
9778 	 * XXX these can be collapsed for performance
9779 	 */
9780 	if (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) {
9781 		KAUTH_DEBUG("%p    DENIED - CHANGE_OWNER not permitted", vcp->vp);
9782 		return EACCES;
9783 	}
9784 	if (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) {
9785 		KAUTH_DEBUG("%p    DENIED - WRITE_SECURITY not permitted", vcp->vp);
9786 		return EACCES;
9787 	}
9788 
9789 #if DIAGNOSTIC
9790 	if (eval.ae_residual & KAUTH_VNODE_DELETE) {
9791 		panic("vnode_authorize: can't be checking delete permission here");
9792 	}
9793 #endif
9794 
9795 	/*
9796 	 * Compute the fallback posix permissions that will satisfy the remaining
9797 	 * rights.
9798 	 */
9799 	posix_action = 0;
9800 	if (eval.ae_residual & (KAUTH_VNODE_READ_DATA |
9801 	    KAUTH_VNODE_LIST_DIRECTORY |
9802 	    KAUTH_VNODE_READ_EXTATTRIBUTES)) {
9803 		posix_action |= VREAD;
9804 	}
9805 	if (eval.ae_residual & (KAUTH_VNODE_WRITE_DATA |
9806 	    KAUTH_VNODE_ADD_FILE |
9807 	    KAUTH_VNODE_ADD_SUBDIRECTORY |
9808 	    KAUTH_VNODE_DELETE_CHILD |
9809 	    KAUTH_VNODE_WRITE_ATTRIBUTES |
9810 	    KAUTH_VNODE_WRITE_EXTATTRIBUTES)) {
9811 		posix_action |= VWRITE;
9812 	}
9813 	if (eval.ae_residual & (KAUTH_VNODE_EXECUTE |
9814 	    KAUTH_VNODE_SEARCH)) {
9815 		posix_action |= VEXEC;
9816 	}
9817 
9818 	if (posix_action != 0) {
9819 		return vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */);
9820 	} else {
9821 		KAUTH_DEBUG("%p    ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping",
9822 		    vcp->vp,
9823 		    (eval.ae_residual & KAUTH_VNODE_READ_DATA)
9824 		    ? vnode_isdir(vcp->vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
9825 		    (eval.ae_residual & KAUTH_VNODE_WRITE_DATA)
9826 		    ? vnode_isdir(vcp->vp) ? " ADD_FILE" : " WRITE_DATA" : "",
9827 		    (eval.ae_residual & KAUTH_VNODE_EXECUTE)
9828 		    ? vnode_isdir(vcp->vp) ? " SEARCH" : " EXECUTE" : "",
9829 		    (eval.ae_residual & KAUTH_VNODE_DELETE)
9830 		    ? " DELETE" : "",
9831 		    (eval.ae_residual & KAUTH_VNODE_APPEND_DATA)
9832 		    ? vnode_isdir(vcp->vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
9833 		    (eval.ae_residual & KAUTH_VNODE_DELETE_CHILD)
9834 		    ? " DELETE_CHILD" : "",
9835 		    (eval.ae_residual & KAUTH_VNODE_READ_ATTRIBUTES)
9836 		    ? " READ_ATTRIBUTES" : "",
9837 		    (eval.ae_residual & KAUTH_VNODE_WRITE_ATTRIBUTES)
9838 		    ? " WRITE_ATTRIBUTES" : "",
9839 		    (eval.ae_residual & KAUTH_VNODE_READ_EXTATTRIBUTES)
9840 		    ? " READ_EXTATTRIBUTES" : "",
9841 		    (eval.ae_residual & KAUTH_VNODE_WRITE_EXTATTRIBUTES)
9842 		    ? " WRITE_EXTATTRIBUTES" : "",
9843 		    (eval.ae_residual & KAUTH_VNODE_READ_SECURITY)
9844 		    ? " READ_SECURITY" : "",
9845 		    (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY)
9846 		    ? " WRITE_SECURITY" : "",
9847 		    (eval.ae_residual & KAUTH_VNODE_CHECKIMMUTABLE)
9848 		    ? " CHECKIMMUTABLE" : "",
9849 		    (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER)
9850 		    ? " CHANGE_OWNER" : "");
9851 	}
9852 
9853 	/*
9854 	 * Lack of required Posix permissions implies no reason to deny access.
9855 	 */
9856 	return 0;
9857 }
9858 
9859 /*
9860  * Check for file immutability.
9861  */
9862 static int
vnode_authorize_checkimmutable(mount_t mp,vauth_ctx vcp,struct vnode_attr * vap,int rights,int ignore)9863 vnode_authorize_checkimmutable(mount_t mp, vauth_ctx vcp,
9864     struct vnode_attr *vap, int rights, int ignore)
9865 {
9866 	int error;
9867 	int append;
9868 
9869 	/*
9870 	 * Perform immutability checks for operations that change data.
9871 	 *
9872 	 * Sockets, fifos and devices require special handling.
9873 	 */
9874 	switch (vap->va_type) {
9875 	case VSOCK:
9876 	case VFIFO:
9877 	case VBLK:
9878 	case VCHR:
9879 		/*
9880 		 * Writing to these nodes does not change the filesystem data,
9881 		 * so forget that it's being tried.
9882 		 */
9883 		rights &= ~KAUTH_VNODE_WRITE_DATA;
9884 		break;
9885 	default:
9886 		break;
9887 	}
9888 
9889 	error = 0;
9890 	if (rights & KAUTH_VNODE_WRITE_RIGHTS) {
9891 		/* check per-filesystem options if possible */
9892 		if (mp != NULL) {
9893 			/* check for no-EA filesystems */
9894 			if ((rights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) &&
9895 			    (vfs_flags(mp) & MNT_NOUSERXATTR)) {
9896 				KAUTH_DEBUG("%p    DENIED - filesystem disallowed extended attributes", vap);
9897 				error = EACCES;  /* User attributes disabled */
9898 				goto out;
9899 			}
9900 		}
9901 
9902 		/*
9903 		 * check for file immutability. first, check if the requested rights are
9904 		 * allowable for a UF_APPEND file.
9905 		 */
9906 		append = 0;
9907 		if (vap->va_type == VDIR) {
9908 			if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES | ~KAUTH_VNODE_WRITE_RIGHTS)) == rights) {
9909 				append = 1;
9910 			}
9911 		} else {
9912 			if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES | ~KAUTH_VNODE_WRITE_RIGHTS)) == rights) {
9913 				append = 1;
9914 			}
9915 		}
9916 		if ((error = vnode_immutable(vap, append, ignore)) != 0) {
9917 			if (error && !ignore) {
9918 				/*
9919 				 * In case of a rename, we want to check ownership for dvp as well.
9920 				 */
9921 				int owner = 0;
9922 				if (rights & KAUTH_VNODE_DELETE_CHILD && vcp->dvp != NULL) {
9923 					owner = vauth_file_owner(vcp) && vauth_dir_owner(vcp);
9924 				} else {
9925 					owner = vauth_file_owner(vcp);
9926 				}
9927 				if (owner && vfs_context_ignores_node_permissions(vcp->ctx)) {
9928 					error = vnode_immutable(vap, append, 1);
9929 				}
9930 			}
9931 		}
9932 		if (error) {
9933 			KAUTH_DEBUG("%p    DENIED - file is immutable", vap);
9934 			goto out;
9935 		}
9936 	}
9937 out:
9938 	return error;
9939 }
9940 
9941 /*
9942  * Handle authorization actions for filesystems that advertise that the
9943  * server will be enforcing.
9944  *
9945  * Returns:	0			Authorization should be handled locally
9946  *		1			Authorization was handled by the FS
9947  *
9948  * Note:	Imputed returns will only occur if the authorization request
9949  *		was handled by the FS.
9950  *
9951  * Imputed:	*resultp, modified	Return code from FS when the request is
9952  *					handled by the FS.
9953  *		VNOP_ACCESS:???
9954  *		VNOP_OPEN:???
9955  */
9956 static int
vnode_authorize_opaque(vnode_t vp,int * resultp,kauth_action_t action,vfs_context_t ctx)9957 vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx)
9958 {
9959 	int     error;
9960 
9961 	/*
9962 	 * If the vp is a device node, socket or FIFO it actually represents a local
9963 	 * endpoint, so we need to handle it locally.
9964 	 */
9965 	switch (vp->v_type) {
9966 	case VBLK:
9967 	case VCHR:
9968 	case VSOCK:
9969 	case VFIFO:
9970 		return 0;
9971 	default:
9972 		break;
9973 	}
9974 
9975 	/*
9976 	 * In the advisory request case, if the filesystem doesn't think it's reliable
9977 	 * we will attempt to formulate a result ourselves based on VNOP_GETATTR data.
9978 	 */
9979 	if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount)) {
9980 		return 0;
9981 	}
9982 
9983 	/*
9984 	 * Let the filesystem have a say in the matter.  It's OK for it to not implemnent
9985 	 * VNOP_ACCESS, as most will authorise inline with the actual request.
9986 	 */
9987 	if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) {
9988 		*resultp = error;
9989 		KAUTH_DEBUG("%p    DENIED - opaque filesystem VNOP_ACCESS denied access", vp);
9990 		return 1;
9991 	}
9992 
9993 	/*
9994 	 * Typically opaque filesystems do authorisation in-line, but exec is a special case.  In
9995 	 * order to be reasonably sure that exec will be permitted, we try a bit harder here.
9996 	 */
9997 	if ((action & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG)) {
9998 		/* try a VNOP_OPEN for readonly access */
9999 		if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
10000 			*resultp = error;
10001 			KAUTH_DEBUG("%p    DENIED - EXECUTE denied because file could not be opened readonly", vp);
10002 			return 1;
10003 		}
10004 		VNOP_CLOSE(vp, FREAD, ctx);
10005 	}
10006 
10007 	/*
10008 	 * We don't have any reason to believe that the request has to be denied at this point,
10009 	 * so go ahead and allow it.
10010 	 */
10011 	*resultp = 0;
10012 	KAUTH_DEBUG("%p    ALLOWED - bypassing access check for non-local filesystem", vp);
10013 	return 1;
10014 }
10015 
10016 
10017 
10018 
10019 /*
10020  * Returns:	KAUTH_RESULT_ALLOW
10021  *		KAUTH_RESULT_DENY
10022  *
10023  * Imputed:	*arg3, modified		Error code in the deny case
10024  *		EROFS			Read-only file system
10025  *		EACCES			Permission denied
10026  *		EPERM			Operation not permitted [no execute]
10027  *	vnode_getattr:ENOMEM		Not enough space [only if has filesec]
10028  *	vnode_getattr:???
10029  *	vnode_authorize_opaque:*arg2	???
10030  *	vnode_authorize_checkimmutable:???
10031  *	vnode_authorize_delete:???
10032  *	vnode_authorize_simple:???
10033  */
10034 
10035 
10036 static int
vnode_authorize_callback(__unused kauth_cred_t cred,__unused void * idata,kauth_action_t action,uintptr_t arg0,uintptr_t arg1,uintptr_t arg2,uintptr_t arg3)10037 vnode_authorize_callback(__unused kauth_cred_t cred, __unused void *idata,
10038     kauth_action_t action, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2,
10039     uintptr_t arg3)
10040 {
10041 	vfs_context_t   ctx;
10042 	vnode_t         cvp = NULLVP;
10043 	vnode_t         vp, dvp;
10044 	int             result = KAUTH_RESULT_DENY;
10045 	int             parent_iocount = 0;
10046 	int             parent_action = 0; /* In case we need to use namedstream's data fork for cached rights*/
10047 
10048 	ctx = (vfs_context_t)arg0;
10049 	vp = (vnode_t)arg1;
10050 	dvp = (vnode_t)arg2;
10051 
10052 	/*
10053 	 * if there are 2 vnodes passed in, we don't know at
10054 	 * this point which rights to look at based on the
10055 	 * combined action being passed in... defer until later...
10056 	 * otherwise check the kauth 'rights' cache hung
10057 	 * off of the vnode we're interested in... if we've already
10058 	 * been granted the right we're currently interested in,
10059 	 * we can just return success... otherwise we'll go through
10060 	 * the process of authorizing the requested right(s)... if that
10061 	 * succeeds, we'll add the right(s) to the cache.
10062 	 * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache
10063 	 */
10064 	if (dvp && vp) {
10065 		goto defer;
10066 	}
10067 	if (dvp) {
10068 		cvp = dvp;
10069 	} else {
10070 		/*
10071 		 * For named streams on local-authorization volumes, rights are cached on the parent;
10072 		 * authorization is determined by looking at the parent's properties anyway, so storing
10073 		 * on the parent means that we don't recompute for the named stream and that if
10074 		 * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
10075 		 * stream to flush its cache separately.  If we miss in the cache, then we authorize
10076 		 * as if there were no cached rights (passing the named stream vnode and desired rights to
10077 		 * vnode_authorize_callback_int()).
10078 		 *
10079 		 * On an opaquely authorized volume, we don't know the relationship between the
10080 		 * data fork's properties and the rights granted on a stream.  Thus, named stream vnodes
10081 		 * on such a volume are authorized directly (rather than using the parent) and have their
10082 		 * own caches.  When a named stream vnode is created, we mark the parent as having a named
10083 		 * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
10084 		 * find the stream and flush its cache.
10085 		 */
10086 		if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) {
10087 			cvp = vnode_getparent(vp);
10088 			if (cvp != NULLVP) {
10089 				parent_iocount = 1;
10090 			} else {
10091 				cvp = NULL;
10092 				goto defer; /* If we can't use the parent, take the slow path */
10093 			}
10094 
10095 			/* Have to translate some actions */
10096 			parent_action = action;
10097 			if (parent_action & KAUTH_VNODE_READ_DATA) {
10098 				parent_action &= ~KAUTH_VNODE_READ_DATA;
10099 				parent_action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
10100 			}
10101 			if (parent_action & KAUTH_VNODE_WRITE_DATA) {
10102 				parent_action &= ~KAUTH_VNODE_WRITE_DATA;
10103 				parent_action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
10104 			}
10105 		} else {
10106 			cvp = vp;
10107 		}
10108 	}
10109 
10110 	if (vnode_cache_is_authorized(cvp, ctx, parent_iocount ? parent_action : action) == TRUE) {
10111 		result = KAUTH_RESULT_ALLOW;
10112 		goto out;
10113 	}
10114 defer:
10115 	result = vnode_authorize_callback_int(action, ctx, vp, dvp, (int *)arg3);
10116 
10117 	if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP) {
10118 		KAUTH_DEBUG("%p - caching action = %x", cvp, action);
10119 		vnode_cache_authorized_action(cvp, ctx, action);
10120 	}
10121 
10122 out:
10123 	if (parent_iocount) {
10124 		vnode_put(cvp);
10125 	}
10126 
10127 	return result;
10128 }
10129 
10130 static int
vnode_attr_authorize_internal(vauth_ctx vcp,mount_t mp,kauth_ace_rights_t rights,int is_suser,boolean_t * found_deny,int noimmutable,int parent_authorized_for_delete_child)10131 vnode_attr_authorize_internal(vauth_ctx vcp, mount_t mp,
10132     kauth_ace_rights_t rights, int is_suser, boolean_t *found_deny,
10133     int noimmutable, int parent_authorized_for_delete_child)
10134 {
10135 	int result;
10136 
10137 	/*
10138 	 * Check for immutability.
10139 	 *
10140 	 * In the deletion case, parent directory immutability vetoes specific
10141 	 * file rights.
10142 	 */
10143 	if ((result = vnode_authorize_checkimmutable(mp, vcp, vcp->vap, rights,
10144 	    noimmutable)) != 0) {
10145 		goto out;
10146 	}
10147 
10148 	if ((rights & KAUTH_VNODE_DELETE) &&
10149 	    !parent_authorized_for_delete_child) {
10150 		result = vnode_authorize_checkimmutable(mp, vcp, vcp->dvap,
10151 		    KAUTH_VNODE_DELETE_CHILD, 0);
10152 		if (result) {
10153 			goto out;
10154 		}
10155 	}
10156 
10157 	/*
10158 	 * Clear rights that have been authorized by reaching this point, bail if nothing left to
10159 	 * check.
10160 	 */
10161 	rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE);
10162 	if (rights == 0) {
10163 		goto out;
10164 	}
10165 
10166 	/*
10167 	 * If we're not the superuser, authorize based on file properties;
10168 	 * note that even if parent_authorized_for_delete_child is TRUE, we
10169 	 * need to check on the node itself.
10170 	 */
10171 	if (!is_suser) {
10172 		/* process delete rights */
10173 		if ((rights & KAUTH_VNODE_DELETE) &&
10174 		    ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0)) {
10175 			goto out;
10176 		}
10177 
10178 		/* process remaining rights */
10179 		if ((rights & ~KAUTH_VNODE_DELETE) &&
10180 		    (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, found_deny)) != 0) {
10181 			goto out;
10182 		}
10183 	} else {
10184 		/*
10185 		 * Execute is only granted to root if one of the x bits is set.  This check only
10186 		 * makes sense if the posix mode bits are actually supported.
10187 		 */
10188 		if ((rights & KAUTH_VNODE_EXECUTE) &&
10189 		    (vcp->vap->va_type == VREG) &&
10190 		    VATTR_IS_SUPPORTED(vcp->vap, va_mode) &&
10191 		    !(vcp->vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
10192 			result = EPERM;
10193 			KAUTH_DEBUG("%p    DENIED - root execute requires at least one x bit in 0x%x", vcp, vcp->vap->va_mode);
10194 			goto out;
10195 		}
10196 
10197 		/* Assume that there were DENYs so we don't wrongly cache KAUTH_VNODE_SEARCHBYANYONE */
10198 		*found_deny = TRUE;
10199 
10200 		KAUTH_DEBUG("%p    ALLOWED - caller is superuser", vcp);
10201 	}
10202 out:
10203 	return result;
10204 }
10205 
10206 static int
vnode_authorize_callback_int(kauth_action_t action,vfs_context_t ctx,vnode_t vp,vnode_t dvp,int * errorp)10207 vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx,
10208     vnode_t vp, vnode_t dvp, int *errorp)
10209 {
10210 	struct _vnode_authorize_context auth_context;
10211 	vauth_ctx               vcp;
10212 	kauth_cred_t            cred;
10213 	kauth_ace_rights_t      rights;
10214 	struct vnode_attr       va, dva;
10215 	int                     result;
10216 	int                     noimmutable;
10217 	boolean_t               parent_authorized_for_delete_child = FALSE;
10218 	boolean_t               found_deny = FALSE;
10219 	boolean_t               parent_ref = FALSE;
10220 	boolean_t               is_suser = FALSE;
10221 
10222 	vcp = &auth_context;
10223 	vcp->ctx = ctx;
10224 	vcp->vp = vp;
10225 	vcp->dvp = dvp;
10226 	/*
10227 	 * Note that we authorize against the context, not the passed cred
10228 	 * (the same thing anyway)
10229 	 */
10230 	cred = ctx->vc_ucred;
10231 
10232 	VATTR_INIT(&va);
10233 	vcp->vap = &va;
10234 	VATTR_INIT(&dva);
10235 	vcp->dvap = &dva;
10236 
10237 	vcp->flags = vcp->flags_valid = 0;
10238 
10239 #if DIAGNOSTIC
10240 	if ((ctx == NULL) || (vp == NULL) || (cred == NULL)) {
10241 		panic("vnode_authorize: bad arguments (context %p  vp %p  cred %p)", ctx, vp, cred);
10242 	}
10243 #endif
10244 
10245 	KAUTH_DEBUG("%p  AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)",
10246 	    vp, vfs_context_proc(ctx)->p_comm,
10247 	    (action & KAUTH_VNODE_ACCESS)               ? "access" : "auth",
10248 	    (action & KAUTH_VNODE_READ_DATA)            ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
10249 	    (action & KAUTH_VNODE_WRITE_DATA)           ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "",
10250 	    (action & KAUTH_VNODE_EXECUTE)              ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "",
10251 	    (action & KAUTH_VNODE_DELETE)               ? " DELETE" : "",
10252 	    (action & KAUTH_VNODE_APPEND_DATA)          ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
10253 	    (action & KAUTH_VNODE_DELETE_CHILD)         ? " DELETE_CHILD" : "",
10254 	    (action & KAUTH_VNODE_READ_ATTRIBUTES)      ? " READ_ATTRIBUTES" : "",
10255 	    (action & KAUTH_VNODE_WRITE_ATTRIBUTES)     ? " WRITE_ATTRIBUTES" : "",
10256 	    (action & KAUTH_VNODE_READ_EXTATTRIBUTES)   ? " READ_EXTATTRIBUTES" : "",
10257 	    (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES)  ? " WRITE_EXTATTRIBUTES" : "",
10258 	    (action & KAUTH_VNODE_READ_SECURITY)        ? " READ_SECURITY" : "",
10259 	    (action & KAUTH_VNODE_WRITE_SECURITY)       ? " WRITE_SECURITY" : "",
10260 	    (action & KAUTH_VNODE_CHANGE_OWNER)         ? " CHANGE_OWNER" : "",
10261 	    (action & KAUTH_VNODE_NOIMMUTABLE)          ? " (noimmutable)" : "",
10262 	    vnode_isdir(vp) ? "directory" : "file",
10263 	    vp->v_name ? vp->v_name : "<NULL>", action, vp, dvp);
10264 
10265 	/*
10266 	 * Extract the control bits from the action, everything else is
10267 	 * requested rights.
10268 	 */
10269 	noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
10270 	rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
10271 
10272 	if (rights & KAUTH_VNODE_DELETE) {
10273 #if DIAGNOSTIC
10274 		if (dvp == NULL) {
10275 			panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory");
10276 		}
10277 #endif
10278 		/*
10279 		 * check to see if we've already authorized the parent
10280 		 * directory for deletion of its children... if so, we
10281 		 * can skip a whole bunch of work... we will still have to
10282 		 * authorize that this specific child can be removed
10283 		 */
10284 		if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE) {
10285 			parent_authorized_for_delete_child = TRUE;
10286 		}
10287 	} else {
10288 		vcp->dvp = NULLVP;
10289 		vcp->dvap = NULL;
10290 	}
10291 
10292 	/*
10293 	 * Check for read-only filesystems.
10294 	 */
10295 	if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
10296 	    (vp->v_mount->mnt_flag & MNT_RDONLY) &&
10297 	    ((vp->v_type == VREG) || (vp->v_type == VDIR) ||
10298 	    (vp->v_type == VLNK) || (vp->v_type == VCPLX) ||
10299 	    (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) {
10300 		result = EROFS;
10301 		goto out;
10302 	}
10303 
10304 	/*
10305 	 * Check for noexec filesystems.
10306 	 */
10307 	if ((rights & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG) && (vp->v_mount->mnt_flag & MNT_NOEXEC)) {
10308 		result = EACCES;
10309 		goto out;
10310 	}
10311 
10312 	/*
10313 	 * Handle cases related to filesystems with non-local enforcement.
10314 	 * This call can return 0, in which case we will fall through to perform a
10315 	 * check based on VNOP_GETATTR data.  Otherwise it returns 1 and sets
10316 	 * an appropriate result, at which point we can return immediately.
10317 	 */
10318 	if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx)) {
10319 		goto out;
10320 	}
10321 
10322 	/*
10323 	 * If the vnode is a namedstream (extended attribute) data vnode (eg.
10324 	 * a resource fork), *_DATA becomes *_EXTATTRIBUTES.
10325 	 */
10326 	if (vnode_isnamedstream(vp)) {
10327 		if (rights & KAUTH_VNODE_READ_DATA) {
10328 			rights &= ~KAUTH_VNODE_READ_DATA;
10329 			rights |= KAUTH_VNODE_READ_EXTATTRIBUTES;
10330 		}
10331 		if (rights & KAUTH_VNODE_WRITE_DATA) {
10332 			rights &= ~KAUTH_VNODE_WRITE_DATA;
10333 			rights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
10334 		}
10335 
10336 		/*
10337 		 * Point 'vp' to the namedstream's parent for ACL checking
10338 		 */
10339 		if ((vp->v_parent != NULL) &&
10340 		    (vget_internal(vp->v_parent, 0, VNODE_NODEAD | VNODE_DRAINO) == 0)) {
10341 			parent_ref = TRUE;
10342 			vcp->vp = vp = vp->v_parent;
10343 		}
10344 	}
10345 
10346 	if (vfs_context_issuser(ctx)) {
10347 		/*
10348 		 * if we're not asking for execute permissions or modifications,
10349 		 * then we're done, this action is authorized.
10350 		 */
10351 		if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) {
10352 			goto success;
10353 		}
10354 
10355 		is_suser = TRUE;
10356 	}
10357 
10358 	/*
10359 	 * Get vnode attributes and extended security information for the vnode
10360 	 * and directory if required.
10361 	 *
10362 	 * If we're root we only want mode bits and flags for checking
10363 	 * execute and immutability.
10364 	 */
10365 	VATTR_WANTED(&va, va_mode);
10366 	VATTR_WANTED(&va, va_flags);
10367 	if (!is_suser) {
10368 		VATTR_WANTED(&va, va_uid);
10369 		VATTR_WANTED(&va, va_gid);
10370 		VATTR_WANTED(&va, va_acl);
10371 	}
10372 	if ((result = vnode_getattr(vp, &va, ctx)) != 0) {
10373 		KAUTH_DEBUG("%p    ERROR - failed to get vnode attributes - %d", vp, result);
10374 		goto out;
10375 	}
10376 	VATTR_WANTED(&va, va_type);
10377 	VATTR_RETURN(&va, va_type, vnode_vtype(vp));
10378 
10379 	if (vcp->dvp) {
10380 		VATTR_WANTED(&dva, va_mode);
10381 		VATTR_WANTED(&dva, va_flags);
10382 		if (!is_suser) {
10383 			VATTR_WANTED(&dva, va_uid);
10384 			VATTR_WANTED(&dva, va_gid);
10385 			VATTR_WANTED(&dva, va_acl);
10386 		}
10387 		if ((result = vnode_getattr(vcp->dvp, &dva, ctx)) != 0) {
10388 			KAUTH_DEBUG("%p    ERROR - failed to get directory vnode attributes - %d", vp, result);
10389 			goto out;
10390 		}
10391 		VATTR_WANTED(&dva, va_type);
10392 		VATTR_RETURN(&dva, va_type, vnode_vtype(vcp->dvp));
10393 	}
10394 
10395 	result = vnode_attr_authorize_internal(vcp, vp->v_mount, rights, is_suser,
10396 	    &found_deny, noimmutable, parent_authorized_for_delete_child);
10397 out:
10398 	if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) {
10399 		kauth_acl_free(va.va_acl);
10400 	}
10401 	if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL)) {
10402 		kauth_acl_free(dva.va_acl);
10403 	}
10404 
10405 	if (result) {
10406 		if (parent_ref) {
10407 			vnode_put(vp);
10408 		}
10409 		*errorp = result;
10410 		KAUTH_DEBUG("%p    DENIED - auth denied", vp);
10411 		return KAUTH_RESULT_DENY;
10412 	}
10413 	if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) {
10414 		/*
10415 		 * if we were successfully granted the right to search this directory
10416 		 * and there were NO ACL DENYs for search and the posix permissions also don't
10417 		 * deny execute, we can synthesize a global right that allows anyone to
10418 		 * traverse this directory during a pathname lookup without having to
10419 		 * match the credential associated with this cache of rights.
10420 		 *
10421 		 * Note that we can correctly cache KAUTH_VNODE_SEARCHBYANYONE
10422 		 * only if we actually check ACLs which we don't for root. As
10423 		 * a workaround, the lookup fast path checks for root.
10424 		 */
10425 		if (!VATTR_IS_SUPPORTED(&va, va_mode) ||
10426 		    ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) ==
10427 		    (S_IXUSR | S_IXGRP | S_IXOTH))) {
10428 			vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE);
10429 		}
10430 	}
10431 success:
10432 	if (parent_ref) {
10433 		vnode_put(vp);
10434 	}
10435 
10436 	/*
10437 	 * Note that this implies that we will allow requests for no rights, as well as
10438 	 * for rights that we do not recognise.  There should be none of these.
10439 	 */
10440 	KAUTH_DEBUG("%p    ALLOWED - auth granted", vp);
10441 	return KAUTH_RESULT_ALLOW;
10442 }
10443 
10444 int
vnode_attr_authorize_init(struct vnode_attr * vap,struct vnode_attr * dvap,kauth_action_t action,vfs_context_t ctx)10445 vnode_attr_authorize_init(struct vnode_attr *vap, struct vnode_attr *dvap,
10446     kauth_action_t action, vfs_context_t ctx)
10447 {
10448 	VATTR_INIT(vap);
10449 	VATTR_WANTED(vap, va_type);
10450 	VATTR_WANTED(vap, va_mode);
10451 	VATTR_WANTED(vap, va_flags);
10452 	if (dvap) {
10453 		VATTR_INIT(dvap);
10454 		if (action & KAUTH_VNODE_DELETE) {
10455 			VATTR_WANTED(dvap, va_type);
10456 			VATTR_WANTED(dvap, va_mode);
10457 			VATTR_WANTED(dvap, va_flags);
10458 		}
10459 	} else if (action & KAUTH_VNODE_DELETE) {
10460 		return EINVAL;
10461 	}
10462 
10463 	if (!vfs_context_issuser(ctx)) {
10464 		VATTR_WANTED(vap, va_uid);
10465 		VATTR_WANTED(vap, va_gid);
10466 		VATTR_WANTED(vap, va_acl);
10467 		if (dvap && (action & KAUTH_VNODE_DELETE)) {
10468 			VATTR_WANTED(dvap, va_uid);
10469 			VATTR_WANTED(dvap, va_gid);
10470 			VATTR_WANTED(dvap, va_acl);
10471 		}
10472 	}
10473 
10474 	return 0;
10475 }
10476 
10477 #define VNODE_SEC_ATTRS_NO_ACL (VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | VNODE_ATTR_va_mode | VNODE_ATTR_va_flags | VNODE_ATTR_va_type)
10478 
10479 int
vnode_attr_authorize(struct vnode_attr * vap,struct vnode_attr * dvap,mount_t mp,kauth_action_t action,vfs_context_t ctx)10480 vnode_attr_authorize(struct vnode_attr *vap, struct vnode_attr *dvap, mount_t mp,
10481     kauth_action_t action, vfs_context_t ctx)
10482 {
10483 	struct _vnode_authorize_context auth_context;
10484 	vauth_ctx vcp;
10485 	kauth_ace_rights_t rights;
10486 	int noimmutable;
10487 	boolean_t found_deny;
10488 	boolean_t is_suser = FALSE;
10489 	int result = 0;
10490 	uid_t ouid = vap->va_uid;
10491 	gid_t ogid = vap->va_gid;
10492 
10493 	vcp = &auth_context;
10494 	vcp->ctx = ctx;
10495 	vcp->vp = NULLVP;
10496 	vcp->vap = vap;
10497 	vcp->dvp = NULLVP;
10498 	vcp->dvap = dvap;
10499 	vcp->flags = vcp->flags_valid = 0;
10500 
10501 	noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
10502 	rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
10503 
10504 	/*
10505 	 * Check for read-only filesystems.
10506 	 */
10507 	if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
10508 	    mp && (mp->mnt_flag & MNT_RDONLY) &&
10509 	    ((vap->va_type == VREG) || (vap->va_type == VDIR) ||
10510 	    (vap->va_type == VLNK) || (rights & KAUTH_VNODE_DELETE) ||
10511 	    (rights & KAUTH_VNODE_DELETE_CHILD))) {
10512 		result = EROFS;
10513 		goto out;
10514 	}
10515 
10516 	/*
10517 	 * Check for noexec filesystems.
10518 	 */
10519 	if ((rights & KAUTH_VNODE_EXECUTE) &&
10520 	    (vap->va_type == VREG) && mp && (mp->mnt_flag & MNT_NOEXEC)) {
10521 		result = EACCES;
10522 		goto out;
10523 	}
10524 
10525 	if (vfs_context_issuser(ctx)) {
10526 		/*
10527 		 * if we're not asking for execute permissions or modifications,
10528 		 * then we're done, this action is authorized.
10529 		 */
10530 		if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) {
10531 			goto out;
10532 		}
10533 		is_suser = TRUE;
10534 	}
10535 
10536 	if (mp) {
10537 		if (vfs_extendedsecurity(mp) && VATTR_IS_ACTIVE(vap, va_acl) && !VATTR_IS_SUPPORTED(vap, va_acl)) {
10538 			panic("(1) vnode attrs not complete for vnode_attr_authorize");
10539 		}
10540 		vnode_attr_handle_uid_and_gid(vap, mp, ctx);
10541 	}
10542 
10543 	if ((vap->va_active & VNODE_SEC_ATTRS_NO_ACL) != (vap->va_supported & VNODE_SEC_ATTRS_NO_ACL)) {
10544 		panic("(2) vnode attrs not complete for vnode_attr_authorize (2) vap->va_active = 0x%llx , vap->va_supported = 0x%llx",
10545 		    vap->va_active, vap->va_supported);
10546 	}
10547 
10548 	result = vnode_attr_authorize_internal(vcp, mp, rights, is_suser,
10549 	    &found_deny, noimmutable, FALSE);
10550 
10551 	if (mp) {
10552 		vap->va_uid = ouid;
10553 		vap->va_gid = ogid;
10554 	}
10555 
10556 	if (result == EPERM) {
10557 		result = EACCES;
10558 	}
10559 out:
10560 	return result;
10561 }
10562 
10563 
10564 int
vnode_authattr_new(vnode_t dvp,struct vnode_attr * vap,int noauth,vfs_context_t ctx)10565 vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx)
10566 {
10567 	return vnode_authattr_new_internal(dvp, vap, noauth, NULL, ctx);
10568 }
10569 
10570 /*
10571  * Check that the attribute information in vattr can be legally applied to
10572  * a new file by the context.
10573  */
10574 static int
vnode_authattr_new_internal(vnode_t dvp,struct vnode_attr * vap,int noauth,uint32_t * defaulted_fieldsp,vfs_context_t ctx)10575 vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx)
10576 {
10577 	int             error;
10578 	int             has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode;
10579 	uint32_t        inherit_flags;
10580 	kauth_cred_t    cred;
10581 	guid_t          changer;
10582 	mount_t         dmp;
10583 	struct vnode_attr dva;
10584 
10585 	error = 0;
10586 
10587 	if (defaulted_fieldsp) {
10588 		*defaulted_fieldsp = 0;
10589 	}
10590 
10591 	defaulted_owner = defaulted_group = defaulted_mode = 0;
10592 
10593 	inherit_flags = 0;
10594 
10595 	/*
10596 	 * Require that the filesystem support extended security to apply any.
10597 	 */
10598 	if (!vfs_extendedsecurity(dvp->v_mount) &&
10599 	    (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
10600 		error = EINVAL;
10601 		goto out;
10602 	}
10603 
10604 	/*
10605 	 * Default some fields.
10606 	 */
10607 	dmp = dvp->v_mount;
10608 
10609 	/*
10610 	 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that
10611 	 * owner takes ownership of all new files.
10612 	 */
10613 	if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsowner != KAUTH_UID_NONE)) {
10614 		VATTR_SET(vap, va_uid, dmp->mnt_fsowner);
10615 		defaulted_owner = 1;
10616 	} else {
10617 		if (!VATTR_IS_ACTIVE(vap, va_uid)) {
10618 			/* default owner is current user */
10619 			VATTR_SET(vap, va_uid, kauth_cred_getuid(vfs_context_ucred(ctx)));
10620 			defaulted_owner = 1;
10621 		}
10622 	}
10623 
10624 	/*
10625 	 * We need the dvp's va_flags and *may* need the gid of the directory,
10626 	 * we ask for both here.
10627 	 */
10628 	VATTR_INIT(&dva);
10629 	VATTR_WANTED(&dva, va_gid);
10630 	VATTR_WANTED(&dva, va_flags);
10631 	if ((error = vnode_getattr(dvp, &dva, ctx)) != 0) {
10632 		goto out;
10633 	}
10634 
10635 	/*
10636 	 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that
10637 	 * group takes ownership of all new files.
10638 	 */
10639 	if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsgroup != KAUTH_GID_NONE)) {
10640 		VATTR_SET(vap, va_gid, dmp->mnt_fsgroup);
10641 		defaulted_group = 1;
10642 	} else {
10643 		if (!VATTR_IS_ACTIVE(vap, va_gid)) {
10644 			/* default group comes from parent object, fallback to current user */
10645 			if (VATTR_IS_SUPPORTED(&dva, va_gid)) {
10646 				VATTR_SET(vap, va_gid, dva.va_gid);
10647 			} else {
10648 				VATTR_SET(vap, va_gid, kauth_cred_getgid(vfs_context_ucred(ctx)));
10649 			}
10650 			defaulted_group = 1;
10651 		}
10652 	}
10653 
10654 	if (!VATTR_IS_ACTIVE(vap, va_flags)) {
10655 		VATTR_SET(vap, va_flags, 0);
10656 	}
10657 
10658 	/* Determine if SF_RESTRICTED should be inherited from the parent
10659 	 * directory. */
10660 	if (VATTR_IS_SUPPORTED(&dva, va_flags)) {
10661 		inherit_flags = dva.va_flags & (UF_DATAVAULT | SF_RESTRICTED);
10662 	}
10663 
10664 	/* default mode is everything, masked with current umask */
10665 	if (!VATTR_IS_ACTIVE(vap, va_mode)) {
10666 		VATTR_SET(vap, va_mode, ACCESSPERMS & ~vfs_context_proc(ctx)->p_fd.fd_cmask);
10667 		KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o",
10668 		    vap->va_mode, vfs_context_proc(ctx)->p_fd.fd_cmask);
10669 		defaulted_mode = 1;
10670 	}
10671 	/* set timestamps to now */
10672 	if (!VATTR_IS_ACTIVE(vap, va_create_time)) {
10673 		nanotime(&vap->va_create_time);
10674 		VATTR_SET_ACTIVE(vap, va_create_time);
10675 	}
10676 
10677 	/*
10678 	 * Check for attempts to set nonsensical fields.
10679 	 */
10680 	if (vap->va_active & ~VNODE_ATTR_NEWOBJ) {
10681 		error = EINVAL;
10682 		KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx",
10683 		    vap->va_active & ~VNODE_ATTR_NEWOBJ);
10684 		goto out;
10685 	}
10686 
10687 	/*
10688 	 * Quickly check for the applicability of any enforcement here.
10689 	 * Tests below maintain the integrity of the local security model.
10690 	 */
10691 	if (vfs_authopaque(dvp->v_mount)) {
10692 		goto out;
10693 	}
10694 
10695 	/*
10696 	 * We need to know if the caller is the superuser, or if the work is
10697 	 * otherwise already authorised.
10698 	 */
10699 	cred = vfs_context_ucred(ctx);
10700 	if (noauth) {
10701 		/* doing work for the kernel */
10702 		has_priv_suser = 1;
10703 	} else {
10704 		has_priv_suser = vfs_context_issuser(ctx);
10705 	}
10706 
10707 
10708 	if (VATTR_IS_ACTIVE(vap, va_flags)) {
10709 		vap->va_flags &= ~SF_SYNTHETIC;
10710 		if (has_priv_suser) {
10711 			if ((vap->va_flags & (UF_SETTABLE | SF_SETTABLE)) != vap->va_flags) {
10712 				error = EPERM;
10713 				KAUTH_DEBUG("  DENIED - superuser attempt to set illegal flag(s)");
10714 				goto out;
10715 			}
10716 		} else {
10717 			if ((vap->va_flags & UF_SETTABLE) != vap->va_flags) {
10718 				error = EPERM;
10719 				KAUTH_DEBUG("  DENIED - user attempt to set illegal flag(s)");
10720 				goto out;
10721 			}
10722 		}
10723 	}
10724 
10725 	/* if not superuser, validate legality of new-item attributes */
10726 	if (!has_priv_suser) {
10727 		if (!defaulted_mode && VATTR_IS_ACTIVE(vap, va_mode)) {
10728 			/* setgid? */
10729 			if (vap->va_mode & S_ISGID) {
10730 				if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
10731 					KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
10732 					goto out;
10733 				}
10734 				if (!ismember) {
10735 					KAUTH_DEBUG("  DENIED - can't set SGID bit, not a member of %d", vap->va_gid);
10736 					error = EPERM;
10737 					goto out;
10738 				}
10739 			}
10740 
10741 			/* setuid? */
10742 			if ((vap->va_mode & S_ISUID) && (vap->va_uid != kauth_cred_getuid(cred))) {
10743 				KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
10744 				error = EPERM;
10745 				goto out;
10746 			}
10747 		}
10748 		if (!defaulted_owner && (vap->va_uid != kauth_cred_getuid(cred))) {
10749 			KAUTH_DEBUG("  DENIED - cannot create new item owned by %d", vap->va_uid);
10750 			error = EPERM;
10751 			goto out;
10752 		}
10753 		if (!defaulted_group) {
10754 			if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
10755 				KAUTH_DEBUG("  ERROR - got %d checking for membership in %d", error, vap->va_gid);
10756 				goto out;
10757 			}
10758 			if (!ismember) {
10759 				KAUTH_DEBUG("  DENIED - cannot create new item with group %d - not a member", vap->va_gid);
10760 				error = EPERM;
10761 				goto out;
10762 			}
10763 		}
10764 
10765 		/* initialising owner/group UUID */
10766 		if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
10767 			if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
10768 				KAUTH_DEBUG("  ERROR - got %d trying to get caller UUID", error);
10769 				/* XXX ENOENT here - no GUID - should perhaps become EPERM */
10770 				goto out;
10771 			}
10772 			if (!kauth_guid_equal(&vap->va_uuuid, &changer)) {
10773 				KAUTH_DEBUG("  ERROR - cannot create item with supplied owner UUID - not us");
10774 				error = EPERM;
10775 				goto out;
10776 			}
10777 		}
10778 		if (VATTR_IS_ACTIVE(vap, va_guuid)) {
10779 			if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
10780 				KAUTH_DEBUG("  ERROR - got %d trying to check group membership", error);
10781 				goto out;
10782 			}
10783 			if (!ismember) {
10784 				KAUTH_DEBUG("  ERROR - cannot create item with supplied group UUID - not a member");
10785 				error = EPERM;
10786 				goto out;
10787 			}
10788 		}
10789 	}
10790 out:
10791 	if (inherit_flags) {
10792 		/* Apply SF_RESTRICTED to the file if its parent directory was
10793 		 * restricted.  This is done at the end so that root is not
10794 		 * required if this flag is only set due to inheritance. */
10795 		VATTR_SET(vap, va_flags, (vap->va_flags | inherit_flags));
10796 	}
10797 	if (defaulted_fieldsp) {
10798 		if (defaulted_mode) {
10799 			*defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_MODE;
10800 		}
10801 		if (defaulted_group) {
10802 			*defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_GID;
10803 		}
10804 		if (defaulted_owner) {
10805 			*defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_UID;
10806 		}
10807 	}
10808 	return error;
10809 }
10810 
10811 /*
10812  * Check that the attribute information in vap can be legally written by the
10813  * context.
10814  *
10815  * Call this when you're not sure about the vnode_attr; either its contents
10816  * have come from an unknown source, or when they are variable.
10817  *
10818  * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that
10819  * must be authorized to be permitted to write the vattr.
10820  */
10821 int
vnode_authattr(vnode_t vp,struct vnode_attr * vap,kauth_action_t * actionp,vfs_context_t ctx)10822 vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx)
10823 {
10824 	struct vnode_attr ova;
10825 	kauth_action_t  required_action;
10826 	int             error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid;
10827 	guid_t          changer;
10828 	gid_t           group;
10829 	uid_t           owner;
10830 	mode_t          newmode;
10831 	kauth_cred_t    cred;
10832 	uint32_t        fdelta;
10833 
10834 	VATTR_INIT(&ova);
10835 	required_action = 0;
10836 	error = 0;
10837 
10838 	/*
10839 	 * Quickly check for enforcement applicability.
10840 	 */
10841 	if (vfs_authopaque(vp->v_mount)) {
10842 		goto out;
10843 	}
10844 
10845 	/*
10846 	 * Check for attempts to set nonsensical fields.
10847 	 */
10848 	if (vap->va_active & VNODE_ATTR_RDONLY) {
10849 		KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)");
10850 		error = EINVAL;
10851 		goto out;
10852 	}
10853 
10854 	/*
10855 	 * We need to know if the caller is the superuser.
10856 	 */
10857 	cred = vfs_context_ucred(ctx);
10858 	has_priv_suser = kauth_cred_issuser(cred);
10859 
10860 	/*
10861 	 * If any of the following are changing, we need information from the old file:
10862 	 * va_uid
10863 	 * va_gid
10864 	 * va_mode
10865 	 * va_uuuid
10866 	 * va_guuid
10867 	 */
10868 	if (VATTR_IS_ACTIVE(vap, va_uid) ||
10869 	    VATTR_IS_ACTIVE(vap, va_gid) ||
10870 	    VATTR_IS_ACTIVE(vap, va_mode) ||
10871 	    VATTR_IS_ACTIVE(vap, va_uuuid) ||
10872 	    VATTR_IS_ACTIVE(vap, va_guuid)) {
10873 		VATTR_WANTED(&ova, va_mode);
10874 		VATTR_WANTED(&ova, va_uid);
10875 		VATTR_WANTED(&ova, va_gid);
10876 		VATTR_WANTED(&ova, va_uuuid);
10877 		VATTR_WANTED(&ova, va_guuid);
10878 		KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes");
10879 	}
10880 
10881 	/*
10882 	 * If timestamps are being changed, we need to know who the file is owned
10883 	 * by.
10884 	 */
10885 	if (VATTR_IS_ACTIVE(vap, va_create_time) ||
10886 	    VATTR_IS_ACTIVE(vap, va_change_time) ||
10887 	    VATTR_IS_ACTIVE(vap, va_modify_time) ||
10888 	    VATTR_IS_ACTIVE(vap, va_access_time) ||
10889 	    VATTR_IS_ACTIVE(vap, va_backup_time) ||
10890 	    VATTR_IS_ACTIVE(vap, va_addedtime)) {
10891 		VATTR_WANTED(&ova, va_uid);
10892 #if 0   /* enable this when we support UUIDs as official owners */
10893 		VATTR_WANTED(&ova, va_uuuid);
10894 #endif
10895 		KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID");
10896 	}
10897 
10898 	/*
10899 	 * If flags are being changed, we need the old flags.
10900 	 */
10901 	if (VATTR_IS_ACTIVE(vap, va_flags)) {
10902 		KAUTH_DEBUG("ATTR - flags changing, fetching old flags");
10903 		VATTR_WANTED(&ova, va_flags);
10904 	}
10905 
10906 	/*
10907 	 * If ACLs are being changed, we need the old ACLs.
10908 	 */
10909 	if (VATTR_IS_ACTIVE(vap, va_acl)) {
10910 		KAUTH_DEBUG("ATTR - acl changing, fetching old flags");
10911 		VATTR_WANTED(&ova, va_acl);
10912 	}
10913 
10914 	/*
10915 	 * If the size is being set, make sure it's not a directory.
10916 	 */
10917 	if (VATTR_IS_ACTIVE(vap, va_data_size)) {
10918 		/* size is only meaningful on regular files, don't permit otherwise */
10919 		if (!vnode_isreg(vp)) {
10920 			KAUTH_DEBUG("ATTR - ERROR: size change requested on non-file");
10921 			error = vnode_isdir(vp) ? EISDIR : EINVAL;
10922 			goto out;
10923 		}
10924 	}
10925 
10926 	/*
10927 	 * Get old data.
10928 	 */
10929 	KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova.va_active);
10930 	if ((error = vnode_getattr(vp, &ova, ctx)) != 0) {
10931 		KAUTH_DEBUG("  ERROR - got %d trying to get attributes", error);
10932 		goto out;
10933 	}
10934 
10935 	/*
10936 	 * Size changes require write access to the file data.
10937 	 */
10938 	if (VATTR_IS_ACTIVE(vap, va_data_size)) {
10939 		/* if we can't get the size, or it's different, we need write access */
10940 		KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA");
10941 		required_action |= KAUTH_VNODE_WRITE_DATA;
10942 	}
10943 
10944 	/*
10945 	 * Changing timestamps?
10946 	 *
10947 	 * Note that we are only called to authorize user-requested time changes;
10948 	 * side-effect time changes are not authorized.  Authorisation is only
10949 	 * required for existing files.
10950 	 *
10951 	 * Non-owners are not permitted to change the time on an existing
10952 	 * file to anything other than the current time.
10953 	 */
10954 	if (VATTR_IS_ACTIVE(vap, va_create_time) ||
10955 	    VATTR_IS_ACTIVE(vap, va_change_time) ||
10956 	    VATTR_IS_ACTIVE(vap, va_modify_time) ||
10957 	    VATTR_IS_ACTIVE(vap, va_access_time) ||
10958 	    VATTR_IS_ACTIVE(vap, va_backup_time) ||
10959 	    VATTR_IS_ACTIVE(vap, va_addedtime)) {
10960 		/*
10961 		 * The owner and root may set any timestamps they like,
10962 		 * provided that the file is not immutable.  The owner still needs
10963 		 * WRITE_ATTRIBUTES (implied by ownership but still deniable).
10964 		 */
10965 		if (has_priv_suser || vauth_node_owner(&ova, cred)) {
10966 			KAUTH_DEBUG("ATTR - root or owner changing timestamps");
10967 			required_action |= KAUTH_VNODE_CHECKIMMUTABLE | KAUTH_VNODE_WRITE_ATTRIBUTES;
10968 		} else {
10969 			/* just setting the current time? */
10970 			if (vap->va_vaflags & VA_UTIMES_NULL) {
10971 				KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES");
10972 				required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
10973 			} else {
10974 				KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted");
10975 				error = EACCES;
10976 				goto out;
10977 			}
10978 		}
10979 	}
10980 
10981 	/*
10982 	 * Changing file mode?
10983 	 */
10984 	if (VATTR_IS_ACTIVE(vap, va_mode) && VATTR_IS_SUPPORTED(&ova, va_mode) && (ova.va_mode != vap->va_mode)) {
10985 		KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova.va_mode, vap->va_mode);
10986 
10987 		/*
10988 		 * Mode changes always have the same basic auth requirements.
10989 		 */
10990 		if (has_priv_suser) {
10991 			KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check");
10992 			required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
10993 		} else {
10994 			/* need WRITE_SECURITY */
10995 			KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY");
10996 			required_action |= KAUTH_VNODE_WRITE_SECURITY;
10997 		}
10998 
10999 		/*
11000 		 * Can't set the setgid bit if you're not in the group and not root.  Have to have
11001 		 * existing group information in the case we're not setting it right now.
11002 		 */
11003 		if (vap->va_mode & S_ISGID) {
11004 			required_action |= KAUTH_VNODE_CHECKIMMUTABLE;  /* always required */
11005 			if (!has_priv_suser) {
11006 				if (VATTR_IS_ACTIVE(vap, va_gid)) {
11007 					group = vap->va_gid;
11008 				} else if (VATTR_IS_SUPPORTED(&ova, va_gid)) {
11009 					group = ova.va_gid;
11010 				} else {
11011 					KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available");
11012 					error = EINVAL;
11013 					goto out;
11014 				}
11015 				/*
11016 				 * This might be too restrictive; WRITE_SECURITY might be implied by
11017 				 * membership in this case, rather than being an additional requirement.
11018 				 */
11019 				if ((error = kauth_cred_ismember_gid(cred, group, &ismember)) != 0) {
11020 					KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
11021 					goto out;
11022 				}
11023 				if (!ismember) {
11024 					KAUTH_DEBUG("  DENIED - can't set SGID bit, not a member of %d", group);
11025 					error = EPERM;
11026 					goto out;
11027 				}
11028 			}
11029 		}
11030 
11031 		/*
11032 		 * Can't set the setuid bit unless you're root or the file's owner.
11033 		 */
11034 		if (vap->va_mode & S_ISUID) {
11035 			required_action |= KAUTH_VNODE_CHECKIMMUTABLE;  /* always required */
11036 			if (!has_priv_suser) {
11037 				if (VATTR_IS_ACTIVE(vap, va_uid)) {
11038 					owner = vap->va_uid;
11039 				} else if (VATTR_IS_SUPPORTED(&ova, va_uid)) {
11040 					owner = ova.va_uid;
11041 				} else {
11042 					KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available");
11043 					error = EINVAL;
11044 					goto out;
11045 				}
11046 				if (owner != kauth_cred_getuid(cred)) {
11047 					/*
11048 					 * We could allow this if WRITE_SECURITY is permitted, perhaps.
11049 					 */
11050 					KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
11051 					error = EPERM;
11052 					goto out;
11053 				}
11054 			}
11055 		}
11056 	}
11057 
11058 	/*
11059 	 * Validate/mask flags changes.  This checks that only the flags in
11060 	 * the UF_SETTABLE mask are being set, and preserves the flags in
11061 	 * the SF_SETTABLE case.
11062 	 *
11063 	 * Since flags changes may be made in conjunction with other changes,
11064 	 * we will ask the auth code to ignore immutability in the case that
11065 	 * the SF_* flags are not set and we are only manipulating the file flags.
11066 	 *
11067 	 */
11068 	if (VATTR_IS_ACTIVE(vap, va_flags)) {
11069 		/* compute changing flags bits */
11070 		vap->va_flags &= ~SF_SYNTHETIC;
11071 		ova.va_flags &= ~SF_SYNTHETIC;
11072 		if (VATTR_IS_SUPPORTED(&ova, va_flags)) {
11073 			fdelta = vap->va_flags ^ ova.va_flags;
11074 		} else {
11075 			fdelta = vap->va_flags;
11076 		}
11077 
11078 		if (fdelta != 0) {
11079 			KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY");
11080 			required_action |= KAUTH_VNODE_WRITE_SECURITY;
11081 
11082 			/* check that changing bits are legal */
11083 			if (has_priv_suser) {
11084 				/*
11085 				 * The immutability check will prevent us from clearing the SF_*
11086 				 * flags unless the system securelevel permits it, so just check
11087 				 * for legal flags here.
11088 				 */
11089 				if (fdelta & ~(UF_SETTABLE | SF_SETTABLE)) {
11090 					error = EPERM;
11091 					KAUTH_DEBUG("  DENIED - superuser attempt to set illegal flag(s)");
11092 					goto out;
11093 				}
11094 			} else {
11095 				if (fdelta & ~UF_SETTABLE) {
11096 					error = EPERM;
11097 					KAUTH_DEBUG("  DENIED - user attempt to set illegal flag(s)");
11098 					goto out;
11099 				}
11100 			}
11101 			/*
11102 			 * If the caller has the ability to manipulate file flags,
11103 			 * security is not reduced by ignoring them for this operation.
11104 			 *
11105 			 * A more complete test here would consider the 'after' states of the flags
11106 			 * to determine whether it would permit the operation, but this becomes
11107 			 * very complex.
11108 			 *
11109 			 * Ignoring immutability is conditional on securelevel; this does not bypass
11110 			 * the SF_* flags if securelevel > 0.
11111 			 */
11112 			required_action |= KAUTH_VNODE_NOIMMUTABLE;
11113 		}
11114 	}
11115 
11116 	/*
11117 	 * Validate ownership information.
11118 	 */
11119 	chowner = 0;
11120 	chgroup = 0;
11121 	clear_suid = 0;
11122 	clear_sgid = 0;
11123 
11124 	/*
11125 	 * uid changing
11126 	 * Note that if the filesystem didn't give us a UID, we expect that it doesn't
11127 	 * support them in general, and will ignore it if/when we try to set it.
11128 	 * We might want to clear the uid out of vap completely here.
11129 	 */
11130 	if (VATTR_IS_ACTIVE(vap, va_uid)) {
11131 		if (VATTR_IS_SUPPORTED(&ova, va_uid) && (vap->va_uid != ova.va_uid)) {
11132 			if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) {
11133 				KAUTH_DEBUG("  DENIED - non-superuser cannot change ownershipt to a third party");
11134 				error = EPERM;
11135 				goto out;
11136 			}
11137 			chowner = 1;
11138 		}
11139 		clear_suid = 1;
11140 	}
11141 
11142 	/*
11143 	 * gid changing
11144 	 * Note that if the filesystem didn't give us a GID, we expect that it doesn't
11145 	 * support them in general, and will ignore it if/when we try to set it.
11146 	 * We might want to clear the gid out of vap completely here.
11147 	 */
11148 	if (VATTR_IS_ACTIVE(vap, va_gid)) {
11149 		if (VATTR_IS_SUPPORTED(&ova, va_gid) && (vap->va_gid != ova.va_gid)) {
11150 			if (!has_priv_suser) {
11151 				if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
11152 					KAUTH_DEBUG("  ERROR - got %d checking for membership in %d", error, vap->va_gid);
11153 					goto out;
11154 				}
11155 				if (!ismember) {
11156 					KAUTH_DEBUG("  DENIED - group change from %d to %d but not a member of target group",
11157 					    ova.va_gid, vap->va_gid);
11158 					error = EPERM;
11159 					goto out;
11160 				}
11161 			}
11162 			chgroup = 1;
11163 		}
11164 		clear_sgid = 1;
11165 	}
11166 
11167 	/*
11168 	 * Owner UUID being set or changed.
11169 	 */
11170 	if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
11171 		/* if the owner UUID is not actually changing ... */
11172 		if (VATTR_IS_SUPPORTED(&ova, va_uuuid)) {
11173 			if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid)) {
11174 				goto no_uuuid_change;
11175 			}
11176 
11177 			/*
11178 			 * If the current owner UUID is a null GUID, check
11179 			 * it against the UUID corresponding to the owner UID.
11180 			 */
11181 			if (kauth_guid_equal(&ova.va_uuuid, &kauth_null_guid) &&
11182 			    VATTR_IS_SUPPORTED(&ova, va_uid)) {
11183 				guid_t uid_guid;
11184 
11185 				if (kauth_cred_uid2guid(ova.va_uid, &uid_guid) == 0 &&
11186 				    kauth_guid_equal(&vap->va_uuuid, &uid_guid)) {
11187 					goto no_uuuid_change;
11188 				}
11189 			}
11190 		}
11191 
11192 		/*
11193 		 * The owner UUID cannot be set by a non-superuser to anything other than
11194 		 * their own or a null GUID (to "unset" the owner UUID).
11195 		 * Note that file systems must be prepared to handle the
11196 		 * null UUID case in a manner appropriate for that file
11197 		 * system.
11198 		 */
11199 		if (!has_priv_suser) {
11200 			if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
11201 				KAUTH_DEBUG("  ERROR - got %d trying to get caller UUID", error);
11202 				/* XXX ENOENT here - no UUID - should perhaps become EPERM */
11203 				goto out;
11204 			}
11205 			if (!kauth_guid_equal(&vap->va_uuuid, &changer) &&
11206 			    !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
11207 				KAUTH_DEBUG("  ERROR - cannot set supplied owner UUID - not us / null");
11208 				error = EPERM;
11209 				goto out;
11210 			}
11211 		}
11212 		chowner = 1;
11213 		clear_suid = 1;
11214 	}
11215 no_uuuid_change:
11216 	/*
11217 	 * Group UUID being set or changed.
11218 	 */
11219 	if (VATTR_IS_ACTIVE(vap, va_guuid)) {
11220 		/* if the group UUID is not actually changing ... */
11221 		if (VATTR_IS_SUPPORTED(&ova, va_guuid)) {
11222 			if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid)) {
11223 				goto no_guuid_change;
11224 			}
11225 
11226 			/*
11227 			 * If the current group UUID is a null UUID, check
11228 			 * it against the UUID corresponding to the group GID.
11229 			 */
11230 			if (kauth_guid_equal(&ova.va_guuid, &kauth_null_guid) &&
11231 			    VATTR_IS_SUPPORTED(&ova, va_gid)) {
11232 				guid_t gid_guid;
11233 
11234 				if (kauth_cred_gid2guid(ova.va_gid, &gid_guid) == 0 &&
11235 				    kauth_guid_equal(&vap->va_guuid, &gid_guid)) {
11236 					goto no_guuid_change;
11237 				}
11238 			}
11239 		}
11240 
11241 		/*
11242 		 * The group UUID cannot be set by a non-superuser to anything other than
11243 		 * one of which they are a member or a null GUID (to "unset"
11244 		 * the group UUID).
11245 		 * Note that file systems must be prepared to handle the
11246 		 * null UUID case in a manner appropriate for that file
11247 		 * system.
11248 		 */
11249 		if (!has_priv_suser) {
11250 			if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
11251 				ismember = 1;
11252 			} else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
11253 				KAUTH_DEBUG("  ERROR - got %d trying to check group membership", error);
11254 				goto out;
11255 			}
11256 			if (!ismember) {
11257 				KAUTH_DEBUG("  ERROR - cannot set supplied group UUID - not a member / null");
11258 				error = EPERM;
11259 				goto out;
11260 			}
11261 		}
11262 		chgroup = 1;
11263 	}
11264 no_guuid_change:
11265 
11266 	/*
11267 	 * Compute authorisation for group/ownership changes.
11268 	 */
11269 	if (chowner || chgroup || clear_suid || clear_sgid) {
11270 		if (has_priv_suser) {
11271 			KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check");
11272 			required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
11273 		} else {
11274 			if (chowner) {
11275 				KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP");
11276 				required_action |= KAUTH_VNODE_TAKE_OWNERSHIP;
11277 			}
11278 			if (chgroup && !chowner) {
11279 				KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY");
11280 				required_action |= KAUTH_VNODE_WRITE_SECURITY;
11281 			}
11282 		}
11283 
11284 		/*
11285 		 * clear set-uid and set-gid bits. POSIX only requires this for
11286 		 * non-privileged processes but we do it even for root.
11287 		 */
11288 		if (VATTR_IS_ACTIVE(vap, va_mode)) {
11289 			newmode = vap->va_mode;
11290 		} else if (VATTR_IS_SUPPORTED(&ova, va_mode)) {
11291 			newmode = ova.va_mode;
11292 		} else {
11293 			KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits");
11294 			newmode = 0;
11295 		}
11296 
11297 		/* chown always clears setuid/gid bits. An exception is made for
11298 		 * setattrlist which can set both at the same time: <uid, gid, mode> on a file:
11299 		 * setattrlist is allowed to set the new mode on the file and change (chown)
11300 		 * uid/gid.
11301 		 */
11302 		if (newmode & (S_ISUID | S_ISGID)) {
11303 			if (!VATTR_IS_ACTIVE(vap, va_mode)) {
11304 				KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o",
11305 				    newmode, newmode & ~(S_ISUID | S_ISGID));
11306 				newmode &= ~(S_ISUID | S_ISGID);
11307 			}
11308 			VATTR_SET(vap, va_mode, newmode);
11309 		}
11310 	}
11311 
11312 	/*
11313 	 * Authorise changes in the ACL.
11314 	 */
11315 	if (VATTR_IS_ACTIVE(vap, va_acl)) {
11316 		/* no existing ACL */
11317 		if (!VATTR_IS_ACTIVE(&ova, va_acl) || (ova.va_acl == NULL)) {
11318 			/* adding an ACL */
11319 			if (vap->va_acl != NULL) {
11320 				required_action |= KAUTH_VNODE_WRITE_SECURITY;
11321 				KAUTH_DEBUG("CHMOD - adding ACL");
11322 			}
11323 
11324 			/* removing an existing ACL */
11325 		} else if (vap->va_acl == NULL) {
11326 			required_action |= KAUTH_VNODE_WRITE_SECURITY;
11327 			KAUTH_DEBUG("CHMOD - removing ACL");
11328 
11329 			/* updating an existing ACL */
11330 		} else {
11331 			if (vap->va_acl->acl_entrycount != ova.va_acl->acl_entrycount) {
11332 				/* entry count changed, must be different */
11333 				required_action |= KAUTH_VNODE_WRITE_SECURITY;
11334 				KAUTH_DEBUG("CHMOD - adding/removing ACL entries");
11335 			} else if (vap->va_acl->acl_entrycount > 0) {
11336 				/* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */
11337 				if (memcmp(&vap->va_acl->acl_ace[0], &ova.va_acl->acl_ace[0],
11338 				    sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) {
11339 					required_action |= KAUTH_VNODE_WRITE_SECURITY;
11340 					KAUTH_DEBUG("CHMOD - changing ACL entries");
11341 				}
11342 			}
11343 		}
11344 	}
11345 
11346 	/*
11347 	 * Other attributes that require authorisation.
11348 	 */
11349 	if (VATTR_IS_ACTIVE(vap, va_encoding)) {
11350 		required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
11351 	}
11352 
11353 out:
11354 	if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL)) {
11355 		kauth_acl_free(ova.va_acl);
11356 	}
11357 	if (error == 0) {
11358 		*actionp = required_action;
11359 	}
11360 	return error;
11361 }
11362 
11363 static int
setlocklocal_callback(struct vnode * vp,__unused void * cargs)11364 setlocklocal_callback(struct vnode *vp, __unused void *cargs)
11365 {
11366 	vnode_lock_spin(vp);
11367 	vp->v_flag |= VLOCKLOCAL;
11368 	vnode_unlock(vp);
11369 
11370 	return VNODE_RETURNED;
11371 }
11372 
11373 void
vfs_setlocklocal(mount_t mp)11374 vfs_setlocklocal(mount_t mp)
11375 {
11376 	mount_lock_spin(mp);
11377 	mp->mnt_kern_flag |= MNTK_LOCK_LOCAL;
11378 	mount_unlock(mp);
11379 
11380 	/*
11381 	 * The number of active vnodes is expected to be
11382 	 * very small when vfs_setlocklocal is invoked.
11383 	 */
11384 	vnode_iterate(mp, 0, setlocklocal_callback, NULL);
11385 }
11386 
11387 void
vfs_setcompoundopen(mount_t mp)11388 vfs_setcompoundopen(mount_t mp)
11389 {
11390 	mount_lock_spin(mp);
11391 	mp->mnt_compound_ops |= COMPOUND_VNOP_OPEN;
11392 	mount_unlock(mp);
11393 }
11394 
11395 void
vnode_setswapmount(vnode_t vp)11396 vnode_setswapmount(vnode_t vp)
11397 {
11398 	mount_lock(vp->v_mount);
11399 	vp->v_mount->mnt_kern_flag |= MNTK_SWAP_MOUNT;
11400 	mount_unlock(vp->v_mount);
11401 }
11402 
11403 void
vfs_setfskit(mount_t mp)11404 vfs_setfskit(mount_t mp)
11405 {
11406 	mount_lock_spin(mp);
11407 	mp->mnt_kern_flag |= MNTK_FSKIT;
11408 	mount_unlock(mp);
11409 }
11410 
11411 uint32_t
vfs_getextflags(mount_t mp)11412 vfs_getextflags(mount_t mp)
11413 {
11414 	uint32_t flags_ext = 0;
11415 
11416 	if (mp->mnt_kern_flag & MNTK_SYSTEMDATA) {
11417 		flags_ext |= MNT_EXT_ROOT_DATA_VOL;
11418 	}
11419 	if (mp->mnt_kern_flag & MNTK_FSKIT) {
11420 		flags_ext |= MNT_EXT_FSKIT;
11421 	}
11422 	return flags_ext;
11423 }
11424 
11425 char *
vfs_getfstypenameref_locked(mount_t mp,size_t * lenp)11426 vfs_getfstypenameref_locked(mount_t mp, size_t *lenp)
11427 {
11428 	char *name;
11429 
11430 	if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
11431 		name = mp->fstypename_override;
11432 	} else {
11433 		name = mp->mnt_vfsstat.f_fstypename;
11434 	}
11435 	if (lenp != NULL) {
11436 		*lenp = strlen(name);
11437 	}
11438 	return name;
11439 }
11440 
11441 void
vfs_getfstypename(mount_t mp,char * buf,size_t buflen)11442 vfs_getfstypename(mount_t mp, char *buf, size_t buflen)
11443 {
11444 	mount_lock_spin(mp);
11445 	strlcpy(buf, vfs_getfstypenameref_locked(mp, NULL), buflen);
11446 	mount_unlock(mp);
11447 }
11448 
11449 void
vfs_setfstypename_locked(mount_t mp,const char * name)11450 vfs_setfstypename_locked(mount_t mp, const char *name)
11451 {
11452 	if (name == NULL || name[0] == '\0') {
11453 		mp->mnt_kern_flag &= ~MNTK_TYPENAME_OVERRIDE;
11454 		mp->fstypename_override[0] = '\0';
11455 	} else {
11456 		strlcpy(mp->fstypename_override, name,
11457 		    sizeof(mp->fstypename_override));
11458 		mp->mnt_kern_flag |= MNTK_TYPENAME_OVERRIDE;
11459 	}
11460 }
11461 
11462 void
vfs_setfstypename(mount_t mp,const char * name)11463 vfs_setfstypename(mount_t mp, const char *name)
11464 {
11465 	mount_lock_spin(mp);
11466 	vfs_setfstypename_locked(mp, name);
11467 	mount_unlock(mp);
11468 }
11469 
11470 int64_t
vnode_getswappin_avail(vnode_t vp)11471 vnode_getswappin_avail(vnode_t vp)
11472 {
11473 	int64_t max_swappin_avail = 0;
11474 
11475 	mount_lock(vp->v_mount);
11476 	if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_SWAPPIN_SUPPORTED) {
11477 		max_swappin_avail = vp->v_mount->mnt_max_swappin_available;
11478 	}
11479 	mount_unlock(vp->v_mount);
11480 
11481 	return max_swappin_avail;
11482 }
11483 
11484 
11485 void
vn_setunionwait(vnode_t vp)11486 vn_setunionwait(vnode_t vp)
11487 {
11488 	vnode_lock_spin(vp);
11489 	vp->v_flag |= VISUNION;
11490 	vnode_unlock(vp);
11491 }
11492 
11493 
11494 void
vn_checkunionwait(vnode_t vp)11495 vn_checkunionwait(vnode_t vp)
11496 {
11497 	vnode_lock_spin(vp);
11498 	while ((vp->v_flag & VISUNION) == VISUNION) {
11499 		msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0);
11500 	}
11501 	vnode_unlock(vp);
11502 }
11503 
11504 void
vn_clearunionwait(vnode_t vp,int locked)11505 vn_clearunionwait(vnode_t vp, int locked)
11506 {
11507 	if (!locked) {
11508 		vnode_lock_spin(vp);
11509 	}
11510 	if ((vp->v_flag & VISUNION) == VISUNION) {
11511 		vp->v_flag &= ~VISUNION;
11512 		wakeup((caddr_t)&vp->v_flag);
11513 	}
11514 	if (!locked) {
11515 		vnode_unlock(vp);
11516 	}
11517 }
11518 
11519 /*
11520  * Removes orphaned apple double files during a rmdir
11521  * Works by:
11522  * 1. vnode_suspend().
11523  * 2. Call VNOP_READDIR() till the end of directory is reached.
11524  * 3. Check if the directory entries returned are regular files with name starting with "._".  If not, return ENOTEMPTY.
11525  * 4. Continue (2) and (3) till end of directory is reached.
11526  * 5. If all the entries in the directory were files with "._" name, delete all the files.
11527  * 6. vnode_resume()
11528  * 7. If deletion of all files succeeded, call VNOP_RMDIR() again.
11529  */
11530 
11531 errno_t
rmdir_remove_orphaned_appleDouble(vnode_t vp,vfs_context_t ctx,int * restart_flag)11532 rmdir_remove_orphaned_appleDouble(vnode_t vp, vfs_context_t ctx, int * restart_flag)
11533 {
11534 #define UIO_BUFF_SIZE 2048
11535 	uio_t auio = NULL;
11536 	int eofflag, siz = UIO_BUFF_SIZE, alloc_size = 0, nentries = 0;
11537 	int open_flag = 0, full_erase_flag = 0;
11538 	UIO_STACKBUF(uio_buf, 1);
11539 	char *rbuf = NULL;
11540 	void *dir_pos;
11541 	void *dir_end;
11542 	struct dirent *dp;
11543 	errno_t error;
11544 
11545 	error = vnode_suspend(vp);
11546 
11547 	/*
11548 	 * restart_flag is set so that the calling rmdir sleeps and resets
11549 	 */
11550 	if (error == EBUSY) {
11551 		*restart_flag = 1;
11552 	}
11553 	if (error != 0) {
11554 		return error;
11555 	}
11556 
11557 	/*
11558 	 * Prevent dataless fault materialization while we have
11559 	 * a suspended vnode.
11560 	 */
11561 	uthread_t ut = current_uthread();
11562 	bool saved_nodatalessfaults =
11563 	    (ut->uu_flag & UT_NSPACE_NODATALESSFAULTS) ? true : false;
11564 	ut->uu_flag |= UT_NSPACE_NODATALESSFAULTS;
11565 
11566 	/*
11567 	 * set up UIO
11568 	 */
11569 	rbuf = kalloc_data(siz, Z_WAITOK);
11570 	alloc_size = siz;
11571 	if (rbuf) {
11572 		auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
11573 		    &uio_buf[0], sizeof(uio_buf));
11574 	}
11575 	if (!rbuf || !auio) {
11576 		error = ENOMEM;
11577 		goto outsc;
11578 	}
11579 
11580 	uio_setoffset(auio, 0);
11581 
11582 	eofflag = 0;
11583 
11584 	if ((error = VNOP_OPEN(vp, FREAD, ctx))) {
11585 		goto outsc;
11586 	} else {
11587 		open_flag = 1;
11588 	}
11589 
11590 	/*
11591 	 * First pass checks if all files are appleDouble files.
11592 	 */
11593 
11594 	do {
11595 		siz = UIO_BUFF_SIZE;
11596 		uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
11597 		uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
11598 
11599 		if ((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx))) {
11600 			goto outsc;
11601 		}
11602 
11603 		if (uio_resid(auio) != 0) {
11604 			siz -= uio_resid(auio);
11605 		}
11606 
11607 		/*
11608 		 * Iterate through directory
11609 		 */
11610 		dir_pos = (void*) rbuf;
11611 		dir_end = (void*) (rbuf + siz);
11612 		dp = (struct dirent*) (dir_pos);
11613 
11614 		if (dir_pos == dir_end) {
11615 			eofflag = 1;
11616 		}
11617 
11618 		while (dir_pos < dir_end) {
11619 			/*
11620 			 * Check for . and .. as well as directories
11621 			 */
11622 			if (dp->d_ino != 0 &&
11623 			    !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
11624 			    (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) {
11625 				/*
11626 				 * Check for irregular files and ._ files
11627 				 * If there is a ._._ file abort the op
11628 				 */
11629 				if (dp->d_namlen < 2 ||
11630 				    strncmp(dp->d_name, "._", 2) ||
11631 				    (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._", 2))) {
11632 					error = ENOTEMPTY;
11633 					goto outsc;
11634 				}
11635 			}
11636 			dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen);
11637 			dp = (struct dirent*)dir_pos;
11638 		}
11639 
11640 		/*
11641 		 * workaround for HFS/NFS setting eofflag before end of file
11642 		 */
11643 		if (vp->v_tag == VT_HFS && nentries > 2) {
11644 			eofflag = 0;
11645 		}
11646 
11647 		if (vp->v_tag == VT_NFS) {
11648 			if (eofflag && !full_erase_flag) {
11649 				full_erase_flag = 1;
11650 				eofflag = 0;
11651 				uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
11652 			} else if (!eofflag && full_erase_flag) {
11653 				full_erase_flag = 0;
11654 			}
11655 		}
11656 	} while (!eofflag);
11657 	/*
11658 	 * If we've made it here all the files in the dir are ._ files.
11659 	 * We can delete the files even though the node is suspended
11660 	 * because we are the owner of the file.
11661 	 */
11662 
11663 	uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
11664 	eofflag = 0;
11665 	full_erase_flag = 0;
11666 
11667 	do {
11668 		siz = UIO_BUFF_SIZE;
11669 		uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
11670 		uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
11671 
11672 		error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx);
11673 
11674 		if (error != 0) {
11675 			goto outsc;
11676 		}
11677 
11678 		if (uio_resid(auio) != 0) {
11679 			siz -= uio_resid(auio);
11680 		}
11681 
11682 		/*
11683 		 * Iterate through directory
11684 		 */
11685 		dir_pos = (void*) rbuf;
11686 		dir_end = (void*) (rbuf + siz);
11687 		dp = (struct dirent*) dir_pos;
11688 
11689 		if (dir_pos == dir_end) {
11690 			eofflag = 1;
11691 		}
11692 
11693 		while (dir_pos < dir_end) {
11694 			/*
11695 			 * Check for . and .. as well as directories
11696 			 */
11697 			if (dp->d_ino != 0 &&
11698 			    !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
11699 			    (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))
11700 			    ) {
11701 				error = unlink1(ctx, vp,
11702 				    CAST_USER_ADDR_T(dp->d_name), UIO_SYSSPACE,
11703 				    VNODE_REMOVE_SKIP_NAMESPACE_EVENT |
11704 				    VNODE_REMOVE_NO_AUDIT_PATH);
11705 
11706 				if (error && error != ENOENT) {
11707 					goto outsc;
11708 				}
11709 			}
11710 			dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen);
11711 			dp = (struct dirent*)dir_pos;
11712 		}
11713 
11714 		/*
11715 		 * workaround for HFS/NFS setting eofflag before end of file
11716 		 */
11717 		if (vp->v_tag == VT_HFS && nentries > 2) {
11718 			eofflag = 0;
11719 		}
11720 
11721 		if (vp->v_tag == VT_NFS) {
11722 			if (eofflag && !full_erase_flag) {
11723 				full_erase_flag = 1;
11724 				eofflag = 0;
11725 				uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
11726 			} else if (!eofflag && full_erase_flag) {
11727 				full_erase_flag = 0;
11728 			}
11729 		}
11730 	} while (!eofflag);
11731 
11732 
11733 	error = 0;
11734 
11735 outsc:
11736 	if (open_flag) {
11737 		VNOP_CLOSE(vp, FREAD, ctx);
11738 	}
11739 
11740 	if (auio) {
11741 		uio_free(auio);
11742 	}
11743 	kfree_data(rbuf, alloc_size);
11744 
11745 	if (saved_nodatalessfaults == false) {
11746 		ut->uu_flag &= ~UT_NSPACE_NODATALESSFAULTS;
11747 	}
11748 
11749 	vnode_resume(vp);
11750 
11751 	return error;
11752 }
11753 
11754 
11755 void
lock_vnode_and_post(vnode_t vp,int kevent_num)11756 lock_vnode_and_post(vnode_t vp, int kevent_num)
11757 {
11758 	/* Only take the lock if there's something there! */
11759 	if (vp->v_knotes.slh_first != NULL) {
11760 		vnode_lock(vp);
11761 		KNOTE(&vp->v_knotes, kevent_num);
11762 		vnode_unlock(vp);
11763 	}
11764 }
11765 
11766 void panic_print_vnodes(void);
11767 
11768 /* define PANIC_PRINTS_VNODES only if investigation is required. */
11769 #ifdef PANIC_PRINTS_VNODES
11770 
11771 static const char *
__vtype(uint16_t vtype)11772 __vtype(uint16_t vtype)
11773 {
11774 	switch (vtype) {
11775 	case VREG:
11776 		return "R";
11777 	case VDIR:
11778 		return "D";
11779 	case VBLK:
11780 		return "B";
11781 	case VCHR:
11782 		return "C";
11783 	case VLNK:
11784 		return "L";
11785 	case VSOCK:
11786 		return "S";
11787 	case VFIFO:
11788 		return "F";
11789 	case VBAD:
11790 		return "x";
11791 	case VSTR:
11792 		return "T";
11793 	case VCPLX:
11794 		return "X";
11795 	default:
11796 		return "?";
11797 	}
11798 }
11799 
11800 /*
11801  * build a path from the bottom up
11802  * NOTE: called from the panic path - no alloc'ing of memory and no locks!
11803  */
11804 static char *
__vpath(vnode_t vp,char * str,int len,int depth)11805 __vpath(vnode_t vp, char *str, int len, int depth)
11806 {
11807 	int vnm_len;
11808 	const char *src;
11809 	char *dst;
11810 
11811 	if (len <= 0) {
11812 		return str;
11813 	}
11814 	/* str + len is the start of the string we created */
11815 	if (!vp->v_name) {
11816 		return str + len;
11817 	}
11818 
11819 	/* follow mount vnodes to get the full path */
11820 	if ((vp->v_flag & VROOT)) {
11821 		if (vp->v_mount != NULL && vp->v_mount->mnt_vnodecovered) {
11822 			return __vpath(vp->v_mount->mnt_vnodecovered,
11823 			           str, len, depth + 1);
11824 		}
11825 		return str + len;
11826 	}
11827 
11828 	src = vp->v_name;
11829 	vnm_len = strlen(src);
11830 	if (vnm_len > len) {
11831 		/* truncate the name to fit in the string */
11832 		src += (vnm_len - len);
11833 		vnm_len = len;
11834 	}
11835 
11836 	/* start from the back and copy just characters (no NULLs) */
11837 
11838 	/* this will chop off leaf path (file) names */
11839 	if (depth > 0) {
11840 		dst = str + len - vnm_len;
11841 		memcpy(dst, src, vnm_len);
11842 		len -= vnm_len;
11843 	} else {
11844 		dst = str + len;
11845 	}
11846 
11847 	if (vp->v_parent && len > 1) {
11848 		/* follow parents up the chain */
11849 		len--;
11850 		*(dst - 1) = '/';
11851 		return __vpath(vp->v_parent, str, len, depth + 1);
11852 	}
11853 
11854 	return dst;
11855 }
11856 
11857 #define SANE_VNODE_PRINT_LIMIT 5000
11858 void
panic_print_vnodes(void)11859 panic_print_vnodes(void)
11860 {
11861 	mount_t mnt;
11862 	vnode_t vp;
11863 	int nvnodes = 0;
11864 	const char *type;
11865 	char *nm;
11866 	char vname[257];
11867 
11868 	paniclog_append_noflush("\n***** VNODES *****\n"
11869 	    "TYPE UREF ICNT PATH\n");
11870 
11871 	/* NULL-terminate the path name */
11872 	vname[sizeof(vname) - 1] = '\0';
11873 
11874 	/*
11875 	 * iterate all vnodelist items in all mounts (mntlist) -> mnt_vnodelist
11876 	 */
11877 	TAILQ_FOREACH(mnt, &mountlist, mnt_list) {
11878 		if (!ml_validate_nofault((vm_offset_t)mnt, sizeof(mount_t))) {
11879 			paniclog_append_noflush("Unable to iterate the mount list %p - encountered an invalid mount pointer %p \n",
11880 			    &mountlist, mnt);
11881 			break;
11882 		}
11883 
11884 		TAILQ_FOREACH(vp, &mnt->mnt_vnodelist, v_mntvnodes) {
11885 			if (!ml_validate_nofault((vm_offset_t)vp, sizeof(vnode_t))) {
11886 				paniclog_append_noflush("Unable to iterate the vnode list %p - encountered an invalid vnode pointer %p \n",
11887 				    &mnt->mnt_vnodelist, vp);
11888 				break;
11889 			}
11890 
11891 			if (++nvnodes > SANE_VNODE_PRINT_LIMIT) {
11892 				return;
11893 			}
11894 			type = __vtype(vp->v_type);
11895 			nm = __vpath(vp, vname, sizeof(vname) - 1, 0);
11896 			paniclog_append_noflush("%s %0d %0d %s\n",
11897 			    type, vp->v_usecount, vp->v_iocount, nm);
11898 		}
11899 	}
11900 }
11901 
11902 #else /* !PANIC_PRINTS_VNODES */
11903 void
panic_print_vnodes(void)11904 panic_print_vnodes(void)
11905 {
11906 	return;
11907 }
11908 #endif
11909 
11910 
11911 #ifdef CONFIG_IOCOUNT_TRACE
11912 static void
record_iocount_trace_vnode(vnode_t vp,int type)11913 record_iocount_trace_vnode(vnode_t vp, int type)
11914 {
11915 	void *stacks[IOCOUNT_TRACE_MAX_FRAMES] = {0};
11916 	int idx = vp->v_iocount_trace[type].idx;
11917 
11918 	if (idx >= IOCOUNT_TRACE_MAX_IDX) {
11919 		return;
11920 	}
11921 
11922 	OSBacktrace((void **)&stacks[0], IOCOUNT_TRACE_MAX_FRAMES);
11923 
11924 	/*
11925 	 * To save index space, only store the unique backtraces. If dup is found,
11926 	 * just bump the count and return.
11927 	 */
11928 	for (int i = 0; i < idx; i++) {
11929 		if (memcmp(&stacks[0], &vp->v_iocount_trace[type].stacks[i][0],
11930 		    sizeof(stacks)) == 0) {
11931 			vp->v_iocount_trace[type].counts[i]++;
11932 			return;
11933 		}
11934 	}
11935 
11936 	memcpy(&vp->v_iocount_trace[type].stacks[idx][0], &stacks[0],
11937 	    sizeof(stacks));
11938 	vp->v_iocount_trace[type].counts[idx] = 1;
11939 	vp->v_iocount_trace[type].idx++;
11940 }
11941 
11942 static void
record_iocount_trace_uthread(vnode_t vp,int count)11943 record_iocount_trace_uthread(vnode_t vp, int count)
11944 {
11945 	struct uthread *ut;
11946 
11947 	ut = current_uthread();
11948 	ut->uu_iocount += count;
11949 
11950 	if (count == 1) {
11951 		if (ut->uu_vpindex < 32) {
11952 			OSBacktrace((void **)&ut->uu_pcs[ut->uu_vpindex][0], 10);
11953 
11954 			ut->uu_vps[ut->uu_vpindex] = vp;
11955 			ut->uu_vpindex++;
11956 		}
11957 	}
11958 }
11959 
11960 static void
record_vp(vnode_t vp,int count)11961 record_vp(vnode_t vp, int count)
11962 {
11963 	if (__probable(bootarg_vnode_iocount_trace == 0 &&
11964 	    bootarg_uthread_iocount_trace == 0)) {
11965 		return;
11966 	}
11967 
11968 #if CONFIG_TRIGGERS
11969 	if (vp->v_resolve) {
11970 		return;
11971 	}
11972 #endif
11973 	if ((vp->v_flag & VSYSTEM)) {
11974 		return;
11975 	}
11976 
11977 	if (bootarg_vnode_iocount_trace) {
11978 		record_iocount_trace_vnode(vp,
11979 		    (count > 0) ? IOCOUNT_TRACE_VGET : IOCOUNT_TRACE_VPUT);
11980 	}
11981 	if (bootarg_uthread_iocount_trace) {
11982 		record_iocount_trace_uthread(vp, count);
11983 	}
11984 }
11985 #endif /* CONFIG_IOCOUNT_TRACE */
11986 
11987 #if CONFIG_TRIGGERS
11988 #define __triggers_unused
11989 #else
11990 #define __triggers_unused       __unused
11991 #endif
11992 
11993 resolver_result_t
vfs_resolver_result(__triggers_unused uint32_t seq,__triggers_unused enum resolver_status stat,__triggers_unused int aux)11994 vfs_resolver_result(__triggers_unused uint32_t seq, __triggers_unused enum resolver_status stat, __triggers_unused int aux)
11995 {
11996 #if CONFIG_TRIGGERS
11997 	/*
11998 	 * |<---   32   --->|<---  28  --->|<- 4 ->|
11999 	 *      sequence        auxiliary    status
12000 	 */
12001 	return (((uint64_t)seq) << 32) |
12002 	       (((uint64_t)(aux & 0x0fffffff)) << 4) |
12003 	       (uint64_t)(stat & 0x0000000F);
12004 #else
12005 	return (0x0ULL) | (((uint64_t)ENOTSUP) << 4) | (((uint64_t)RESOLVER_ERROR) & 0xF);
12006 #endif
12007 }
12008 
12009 #if CONFIG_TRIGGERS
12010 
12011 #define TRIG_DEBUG 0
12012 
12013 #if TRIG_DEBUG
12014 #define TRIG_LOG(...) do { printf("%s: ", __FUNCTION__); printf(__VA_ARGS__); } while (0)
12015 #else
12016 #define TRIG_LOG(...)
12017 #endif
12018 
12019 /*
12020  * Resolver result functions
12021  */
12022 
12023 
12024 enum resolver_status
vfs_resolver_status(resolver_result_t result)12025 vfs_resolver_status(resolver_result_t result)
12026 {
12027 	/* lower 4 bits is status */
12028 	return result & 0x0000000F;
12029 }
12030 
12031 uint32_t
vfs_resolver_sequence(resolver_result_t result)12032 vfs_resolver_sequence(resolver_result_t result)
12033 {
12034 	/* upper 32 bits is sequence */
12035 	return (uint32_t)(result >> 32);
12036 }
12037 
12038 int
vfs_resolver_auxiliary(resolver_result_t result)12039 vfs_resolver_auxiliary(resolver_result_t result)
12040 {
12041 	/* 28 bits of auxiliary */
12042 	return (int)(((uint32_t)(result & 0xFFFFFFF0)) >> 4);
12043 }
12044 
12045 /*
12046  * SPI
12047  * Call in for resolvers to update vnode trigger state
12048  */
12049 int
vnode_trigger_update(vnode_t vp,resolver_result_t result)12050 vnode_trigger_update(vnode_t vp, resolver_result_t result)
12051 {
12052 	vnode_resolve_t rp;
12053 	uint32_t seq;
12054 	enum resolver_status stat;
12055 
12056 	if (vp->v_resolve == NULL) {
12057 		return EINVAL;
12058 	}
12059 
12060 	stat = vfs_resolver_status(result);
12061 	seq = vfs_resolver_sequence(result);
12062 
12063 	if ((stat != RESOLVER_RESOLVED) && (stat != RESOLVER_UNRESOLVED)) {
12064 		return EINVAL;
12065 	}
12066 
12067 	rp = vp->v_resolve;
12068 	lck_mtx_lock(&rp->vr_lock);
12069 
12070 	if (seq > rp->vr_lastseq) {
12071 		if (stat == RESOLVER_RESOLVED) {
12072 			rp->vr_flags |= VNT_RESOLVED;
12073 		} else {
12074 			rp->vr_flags &= ~VNT_RESOLVED;
12075 		}
12076 
12077 		rp->vr_lastseq = seq;
12078 	}
12079 
12080 	lck_mtx_unlock(&rp->vr_lock);
12081 
12082 	return 0;
12083 }
12084 
12085 static int
vnode_resolver_attach(vnode_t vp,vnode_resolve_t rp,boolean_t ref)12086 vnode_resolver_attach(vnode_t vp, vnode_resolve_t rp, boolean_t ref)
12087 {
12088 	int error;
12089 
12090 	vnode_lock_spin(vp);
12091 	if (vp->v_resolve != NULL) {
12092 		vnode_unlock(vp);
12093 		return EINVAL;
12094 	} else {
12095 		vp->v_resolve = rp;
12096 	}
12097 	vnode_unlock(vp);
12098 
12099 	if (ref) {
12100 		error = vnode_ref_ext(vp, O_EVTONLY, VNODE_REF_FORCE);
12101 		if (error != 0) {
12102 			panic("VNODE_REF_FORCE didn't help...");
12103 		}
12104 	}
12105 
12106 	return 0;
12107 }
12108 
12109 /*
12110  * VFS internal interfaces for vnode triggers
12111  *
12112  * vnode must already have an io count on entry
12113  * v_resolve is stable when io count is non-zero
12114  */
12115 static int
vnode_resolver_create(mount_t mp,vnode_t vp,struct vnode_trigger_param * tinfo,boolean_t external)12116 vnode_resolver_create(mount_t mp, vnode_t vp, struct vnode_trigger_param *tinfo, boolean_t external)
12117 {
12118 	vnode_resolve_t rp;
12119 	int result;
12120 	char byte;
12121 
12122 #if 1
12123 	/* minimum pointer test (debugging) */
12124 	if (tinfo->vnt_data) {
12125 		byte = *((char *)tinfo->vnt_data);
12126 	}
12127 #endif
12128 	rp = kalloc_type(struct vnode_resolve, Z_WAITOK | Z_NOFAIL);
12129 
12130 	lck_mtx_init(&rp->vr_lock, &trigger_vnode_lck_grp, &trigger_vnode_lck_attr);
12131 
12132 	rp->vr_resolve_func = tinfo->vnt_resolve_func;
12133 	rp->vr_unresolve_func = tinfo->vnt_unresolve_func;
12134 	rp->vr_rearm_func = tinfo->vnt_rearm_func;
12135 	rp->vr_reclaim_func = tinfo->vnt_reclaim_func;
12136 	rp->vr_data = tinfo->vnt_data;
12137 	rp->vr_lastseq = 0;
12138 	rp->vr_flags = tinfo->vnt_flags & VNT_VALID_MASK;
12139 	if (external) {
12140 		rp->vr_flags |= VNT_EXTERNAL;
12141 	}
12142 
12143 	result = vnode_resolver_attach(vp, rp, external);
12144 	if (result != 0) {
12145 		goto out;
12146 	}
12147 
12148 	if (mp) {
12149 		OSAddAtomic(1, &mp->mnt_numtriggers);
12150 	}
12151 
12152 	return result;
12153 
12154 out:
12155 	kfree_type(struct vnode_resolve, rp);
12156 	return result;
12157 }
12158 
12159 static void
vnode_resolver_release(vnode_resolve_t rp)12160 vnode_resolver_release(vnode_resolve_t rp)
12161 {
12162 	/*
12163 	 * Give them a chance to free any private data
12164 	 */
12165 	if (rp->vr_data && rp->vr_reclaim_func) {
12166 		rp->vr_reclaim_func(NULLVP, rp->vr_data);
12167 	}
12168 
12169 	lck_mtx_destroy(&rp->vr_lock, &trigger_vnode_lck_grp);
12170 	kfree_type(struct vnode_resolve, rp);
12171 }
12172 
12173 /* Called after the vnode has been drained */
12174 static void
vnode_resolver_detach(vnode_t vp)12175 vnode_resolver_detach(vnode_t vp)
12176 {
12177 	vnode_resolve_t rp;
12178 	mount_t mp;
12179 
12180 	mp = vnode_mount(vp);
12181 
12182 	vnode_lock(vp);
12183 	rp = vp->v_resolve;
12184 	vp->v_resolve = NULL;
12185 	vnode_unlock(vp);
12186 
12187 	if ((rp->vr_flags & VNT_EXTERNAL) != 0) {
12188 		vnode_rele_ext(vp, O_EVTONLY, 1);
12189 	}
12190 
12191 	vnode_resolver_release(rp);
12192 
12193 	/* Keep count of active trigger vnodes per mount */
12194 	OSAddAtomic(-1, &mp->mnt_numtriggers);
12195 }
12196 
12197 __private_extern__
12198 void
vnode_trigger_rearm(vnode_t vp,vfs_context_t ctx)12199 vnode_trigger_rearm(vnode_t vp, vfs_context_t ctx)
12200 {
12201 	vnode_resolve_t rp;
12202 	resolver_result_t result;
12203 	enum resolver_status status;
12204 	uint32_t seq;
12205 
12206 	if ((vp->v_resolve == NULL) ||
12207 	    (vp->v_resolve->vr_rearm_func == NULL) ||
12208 	    (vp->v_resolve->vr_flags & VNT_AUTO_REARM) == 0) {
12209 		return;
12210 	}
12211 
12212 	rp = vp->v_resolve;
12213 	lck_mtx_lock(&rp->vr_lock);
12214 
12215 	/*
12216 	 * Check if VFS initiated this unmount. If so, we'll catch it after the unresolve completes.
12217 	 */
12218 	if (rp->vr_flags & VNT_VFS_UNMOUNTED) {
12219 		lck_mtx_unlock(&rp->vr_lock);
12220 		return;
12221 	}
12222 
12223 	/* Check if this vnode is already armed */
12224 	if ((rp->vr_flags & VNT_RESOLVED) == 0) {
12225 		lck_mtx_unlock(&rp->vr_lock);
12226 		return;
12227 	}
12228 
12229 	lck_mtx_unlock(&rp->vr_lock);
12230 
12231 	result = rp->vr_rearm_func(vp, 0, rp->vr_data, ctx);
12232 	status = vfs_resolver_status(result);
12233 	seq = vfs_resolver_sequence(result);
12234 
12235 	lck_mtx_lock(&rp->vr_lock);
12236 	if (seq > rp->vr_lastseq) {
12237 		if (status == RESOLVER_UNRESOLVED) {
12238 			rp->vr_flags &= ~VNT_RESOLVED;
12239 		}
12240 		rp->vr_lastseq = seq;
12241 	}
12242 	lck_mtx_unlock(&rp->vr_lock);
12243 }
12244 
12245 __private_extern__
12246 int
vnode_trigger_resolve(vnode_t vp,struct nameidata * ndp,vfs_context_t ctx)12247 vnode_trigger_resolve(vnode_t vp, struct nameidata *ndp, vfs_context_t ctx)
12248 {
12249 	vnode_resolve_t rp;
12250 	enum path_operation op;
12251 	resolver_result_t result;
12252 	enum resolver_status status;
12253 	uint32_t seq;
12254 
12255 	/*
12256 	 * N.B. we cannot call vfs_context_can_resolve_triggers()
12257 	 * here because we really only want to suppress that in
12258 	 * the event the trigger will be resolved by something in
12259 	 * user-space.  Any triggers that are resolved by the kernel
12260 	 * do not pose a threat of deadlock.
12261 	 */
12262 
12263 	/* Only trigger on topmost vnodes */
12264 	if ((vp->v_resolve == NULL) ||
12265 	    (vp->v_resolve->vr_resolve_func == NULL) ||
12266 	    (vp->v_mountedhere != NULL)) {
12267 		return 0;
12268 	}
12269 
12270 	rp = vp->v_resolve;
12271 	lck_mtx_lock(&rp->vr_lock);
12272 
12273 	/* Check if this vnode is already resolved */
12274 	if (rp->vr_flags & VNT_RESOLVED) {
12275 		lck_mtx_unlock(&rp->vr_lock);
12276 		return 0;
12277 	}
12278 
12279 	lck_mtx_unlock(&rp->vr_lock);
12280 
12281 #if CONFIG_MACF
12282 	if ((rp->vr_flags & VNT_KERN_RESOLVE) == 0) {
12283 		/*
12284 		 * VNT_KERN_RESOLVE indicates this trigger has no parameters
12285 		 * at the discression of the accessing process other than
12286 		 * the act of access. All other triggers must be checked
12287 		 */
12288 		int rv = mac_vnode_check_trigger_resolve(ctx, vp, &ndp->ni_cnd);
12289 		if (rv != 0) {
12290 			return rv;
12291 		}
12292 	}
12293 #endif
12294 
12295 	/*
12296 	 * XXX
12297 	 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
12298 	 * is there anyway to know this???
12299 	 * there can also be other legitimate lookups in parallel
12300 	 *
12301 	 * XXX - should we call this on a separate thread with a timeout?
12302 	 *
12303 	 * XXX - should we use ISLASTCN to pick the op value???  Perhaps only leafs should
12304 	 * get the richer set and non-leafs should get generic OP_LOOKUP?  TBD
12305 	 */
12306 	op = (ndp->ni_op < OP_MAXOP) ? ndp->ni_op: OP_LOOKUP;
12307 
12308 	result = rp->vr_resolve_func(vp, &ndp->ni_cnd, op, 0, rp->vr_data, ctx);
12309 	status = vfs_resolver_status(result);
12310 	seq = vfs_resolver_sequence(result);
12311 
12312 	lck_mtx_lock(&rp->vr_lock);
12313 	if (seq > rp->vr_lastseq) {
12314 		if (status == RESOLVER_RESOLVED) {
12315 			rp->vr_flags |= VNT_RESOLVED;
12316 		}
12317 		rp->vr_lastseq = seq;
12318 	}
12319 	lck_mtx_unlock(&rp->vr_lock);
12320 
12321 	/* On resolver errors, propagate the error back up */
12322 	return status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0;
12323 }
12324 
12325 static int
vnode_trigger_unresolve(vnode_t vp,int flags,vfs_context_t ctx)12326 vnode_trigger_unresolve(vnode_t vp, int flags, vfs_context_t ctx)
12327 {
12328 	vnode_resolve_t rp;
12329 	resolver_result_t result;
12330 	enum resolver_status status;
12331 	uint32_t seq;
12332 
12333 	if ((vp->v_resolve == NULL) || (vp->v_resolve->vr_unresolve_func == NULL)) {
12334 		return 0;
12335 	}
12336 
12337 	rp = vp->v_resolve;
12338 	lck_mtx_lock(&rp->vr_lock);
12339 
12340 	/* Check if this vnode is already resolved */
12341 	if ((rp->vr_flags & VNT_RESOLVED) == 0) {
12342 		printf("vnode_trigger_unresolve: not currently resolved\n");
12343 		lck_mtx_unlock(&rp->vr_lock);
12344 		return 0;
12345 	}
12346 
12347 	rp->vr_flags |= VNT_VFS_UNMOUNTED;
12348 
12349 	lck_mtx_unlock(&rp->vr_lock);
12350 
12351 	/*
12352 	 * XXX
12353 	 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
12354 	 * there can also be other legitimate lookups in parallel
12355 	 *
12356 	 * XXX - should we call this on a separate thread with a timeout?
12357 	 */
12358 
12359 	result = rp->vr_unresolve_func(vp, flags, rp->vr_data, ctx);
12360 	status = vfs_resolver_status(result);
12361 	seq = vfs_resolver_sequence(result);
12362 
12363 	lck_mtx_lock(&rp->vr_lock);
12364 	if (seq > rp->vr_lastseq) {
12365 		if (status == RESOLVER_UNRESOLVED) {
12366 			rp->vr_flags &= ~VNT_RESOLVED;
12367 		}
12368 		rp->vr_lastseq = seq;
12369 	}
12370 	rp->vr_flags &= ~VNT_VFS_UNMOUNTED;
12371 	lck_mtx_unlock(&rp->vr_lock);
12372 
12373 	/* On resolver errors, propagate the error back up */
12374 	return status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0;
12375 }
12376 
12377 static int
triggerisdescendant(mount_t mp,mount_t rmp)12378 triggerisdescendant(mount_t mp, mount_t rmp)
12379 {
12380 	int match = FALSE;
12381 
12382 	/*
12383 	 * walk up vnode covered chain looking for a match
12384 	 */
12385 	name_cache_lock_shared();
12386 
12387 	while (1) {
12388 		vnode_t vp;
12389 
12390 		/* did we encounter "/" ? */
12391 		if (mp->mnt_flag & MNT_ROOTFS) {
12392 			break;
12393 		}
12394 
12395 		vp = mp->mnt_vnodecovered;
12396 		if (vp == NULLVP) {
12397 			break;
12398 		}
12399 
12400 		mp = vp->v_mount;
12401 		if (mp == rmp) {
12402 			match = TRUE;
12403 			break;
12404 		}
12405 	}
12406 
12407 	name_cache_unlock();
12408 
12409 	return match;
12410 }
12411 
12412 struct trigger_unmount_info {
12413 	vfs_context_t   ctx;
12414 	mount_t         top_mp;
12415 	vnode_t         trigger_vp;
12416 	mount_t         trigger_mp;
12417 	uint32_t        trigger_vid;
12418 	int             flags;
12419 };
12420 
12421 static int
trigger_unmount_callback(mount_t mp,void * arg)12422 trigger_unmount_callback(mount_t mp, void * arg)
12423 {
12424 	struct trigger_unmount_info * infop = (struct trigger_unmount_info *)arg;
12425 	boolean_t mountedtrigger = FALSE;
12426 
12427 	/*
12428 	 * When we encounter the top level mount we're done
12429 	 */
12430 	if (mp == infop->top_mp) {
12431 		return VFS_RETURNED_DONE;
12432 	}
12433 
12434 	if ((mp->mnt_vnodecovered == NULL) ||
12435 	    (vnode_getwithref(mp->mnt_vnodecovered) != 0)) {
12436 		return VFS_RETURNED;
12437 	}
12438 
12439 	if ((mp->mnt_vnodecovered->v_mountedhere == mp) &&
12440 	    (mp->mnt_vnodecovered->v_resolve != NULL) &&
12441 	    (mp->mnt_vnodecovered->v_resolve->vr_flags & VNT_RESOLVED)) {
12442 		mountedtrigger = TRUE;
12443 	}
12444 	vnode_put(mp->mnt_vnodecovered);
12445 
12446 	/*
12447 	 * When we encounter a mounted trigger, check if its under the top level mount
12448 	 */
12449 	if (!mountedtrigger || !triggerisdescendant(mp, infop->top_mp)) {
12450 		return VFS_RETURNED;
12451 	}
12452 
12453 	/*
12454 	 * Process any pending nested mount (now that its not referenced)
12455 	 */
12456 	if ((infop->trigger_vp != NULLVP) &&
12457 	    (vnode_getwithvid(infop->trigger_vp, infop->trigger_vid) == 0)) {
12458 		vnode_t vp = infop->trigger_vp;
12459 		int error;
12460 
12461 		vnode_drop(infop->trigger_vp);
12462 		infop->trigger_vp = NULLVP;
12463 
12464 		if (mp == vp->v_mountedhere) {
12465 			vnode_put(vp);
12466 			printf("trigger_unmount_callback: unexpected match '%s'\n",
12467 			    mp->mnt_vfsstat.f_mntonname);
12468 			return VFS_RETURNED;
12469 		}
12470 		if (infop->trigger_mp != vp->v_mountedhere) {
12471 			vnode_put(vp);
12472 			printf("trigger_unmount_callback: trigger mnt changed! (%p != %p)\n",
12473 			    infop->trigger_mp, vp->v_mountedhere);
12474 			goto savenext;
12475 		}
12476 
12477 		error = vnode_trigger_unresolve(vp, infop->flags, infop->ctx);
12478 		vnode_put(vp);
12479 		if (error) {
12480 			printf("unresolving: '%s', err %d\n",
12481 			    vp->v_mountedhere ? vp->v_mountedhere->mnt_vfsstat.f_mntonname :
12482 			    "???", error);
12483 			return VFS_RETURNED_DONE; /* stop iteration on errors */
12484 		}
12485 	} else if (infop->trigger_vp != NULLVP) {
12486 		vnode_drop(infop->trigger_vp);
12487 	}
12488 
12489 savenext:
12490 	/*
12491 	 * We can't call resolver here since we hold a mount iter
12492 	 * ref on mp so save its covered vp for later processing
12493 	 */
12494 	infop->trigger_vp = mp->mnt_vnodecovered;
12495 	if ((infop->trigger_vp != NULLVP) &&
12496 	    (vnode_getwithref(infop->trigger_vp) == 0)) {
12497 		if (infop->trigger_vp->v_mountedhere == mp) {
12498 			infop->trigger_vid = infop->trigger_vp->v_id;
12499 			vnode_hold(infop->trigger_vp);
12500 			infop->trigger_mp = mp;
12501 		}
12502 		vnode_put(infop->trigger_vp);
12503 	}
12504 
12505 	return VFS_RETURNED;
12506 }
12507 
12508 /*
12509  * Attempt to unmount any trigger mounts nested underneath a mount.
12510  * This is a best effort attempt and no retries are performed here.
12511  *
12512  * Note: mp->mnt_rwlock is held exclusively on entry (so be carefull)
12513  */
12514 __private_extern__
12515 void
vfs_nested_trigger_unmounts(mount_t mp,int flags,vfs_context_t ctx)12516 vfs_nested_trigger_unmounts(mount_t mp, int flags, vfs_context_t ctx)
12517 {
12518 	struct trigger_unmount_info info;
12519 
12520 	/* Must have trigger vnodes */
12521 	if (mp->mnt_numtriggers == 0) {
12522 		return;
12523 	}
12524 	/* Avoid recursive requests (by checking covered vnode) */
12525 	if ((mp->mnt_vnodecovered != NULL) &&
12526 	    (vnode_getwithref(mp->mnt_vnodecovered) == 0)) {
12527 		boolean_t recursive = FALSE;
12528 
12529 		if ((mp->mnt_vnodecovered->v_mountedhere == mp) &&
12530 		    (mp->mnt_vnodecovered->v_resolve != NULL) &&
12531 		    (mp->mnt_vnodecovered->v_resolve->vr_flags & VNT_VFS_UNMOUNTED)) {
12532 			recursive = TRUE;
12533 		}
12534 		vnode_put(mp->mnt_vnodecovered);
12535 		if (recursive) {
12536 			return;
12537 		}
12538 	}
12539 
12540 	/*
12541 	 * Attempt to unmount any nested trigger mounts (best effort)
12542 	 */
12543 	info.ctx = ctx;
12544 	info.top_mp = mp;
12545 	info.trigger_vp = NULLVP;
12546 	info.trigger_vid = 0;
12547 	info.trigger_mp = NULL;
12548 	info.flags = flags;
12549 
12550 	(void) vfs_iterate(VFS_ITERATE_TAIL_FIRST, trigger_unmount_callback, &info);
12551 
12552 	/*
12553 	 * Process remaining nested mount (now that its not referenced)
12554 	 */
12555 	if ((info.trigger_vp != NULLVP) &&
12556 	    (vnode_getwithvid(info.trigger_vp, info.trigger_vid) == 0)) {
12557 		vnode_t vp = info.trigger_vp;
12558 
12559 		if (info.trigger_mp == vp->v_mountedhere) {
12560 			(void) vnode_trigger_unresolve(vp, flags, ctx);
12561 		}
12562 		vnode_put(vp);
12563 		vnode_drop(vp);
12564 	} else if (info.trigger_vp != NULLVP) {
12565 		vnode_drop(info.trigger_vp);
12566 	}
12567 }
12568 
12569 int
vfs_addtrigger(mount_t mp,const char * relpath,struct vnode_trigger_info * vtip,vfs_context_t ctx)12570 vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, vfs_context_t ctx)
12571 {
12572 	struct nameidata *ndp;
12573 	int res;
12574 	vnode_t rvp, vp;
12575 	struct vnode_trigger_param vtp;
12576 
12577 	/*
12578 	 * Must be called for trigger callback, wherein rwlock is held
12579 	 */
12580 	lck_rw_assert(&mp->mnt_rwlock, LCK_RW_ASSERT_HELD);
12581 
12582 	TRIG_LOG("Adding trigger at %s\n", relpath);
12583 	TRIG_LOG("Trying VFS_ROOT\n");
12584 
12585 	ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
12586 
12587 	/*
12588 	 * We do a lookup starting at the root of the mountpoint, unwilling
12589 	 * to cross into other mountpoints.
12590 	 */
12591 	res = VFS_ROOT(mp, &rvp, ctx);
12592 	if (res != 0) {
12593 		goto out;
12594 	}
12595 
12596 	TRIG_LOG("Trying namei\n");
12597 
12598 	NDINIT(ndp, LOOKUP, OP_LOOKUP, USEDVP | NOCROSSMOUNT | FOLLOW, UIO_SYSSPACE,
12599 	    CAST_USER_ADDR_T(relpath), ctx);
12600 	ndp->ni_dvp = rvp;
12601 	res = namei(ndp);
12602 	if (res != 0) {
12603 		vnode_put(rvp);
12604 		goto out;
12605 	}
12606 
12607 	vp = ndp->ni_vp;
12608 	nameidone(ndp);
12609 	vnode_put(rvp);
12610 
12611 	TRIG_LOG("Trying vnode_resolver_create()\n");
12612 
12613 	/*
12614 	 * Set up blob.  vnode_create() takes a larger structure
12615 	 * with creation info, and we needed something different
12616 	 * for this case.  One needs to win, or we need to munge both;
12617 	 * vnode_create() wins.
12618 	 */
12619 	bzero(&vtp, sizeof(vtp));
12620 	vtp.vnt_resolve_func = vtip->vti_resolve_func;
12621 	vtp.vnt_unresolve_func = vtip->vti_unresolve_func;
12622 	vtp.vnt_rearm_func = vtip->vti_rearm_func;
12623 	vtp.vnt_reclaim_func = vtip->vti_reclaim_func;
12624 	vtp.vnt_reclaim_func = vtip->vti_reclaim_func;
12625 	vtp.vnt_data = vtip->vti_data;
12626 	vtp.vnt_flags = vtip->vti_flags;
12627 
12628 	res = vnode_resolver_create(mp, vp, &vtp, TRUE);
12629 	vnode_put(vp);
12630 out:
12631 	kfree_type(struct nameidata, ndp);
12632 	TRIG_LOG("Returning %d\n", res);
12633 	return res;
12634 }
12635 
12636 #endif /* CONFIG_TRIGGERS */
12637 
12638 vm_offset_t
kdebug_vnode(vnode_t vp)12639 kdebug_vnode(vnode_t vp)
12640 {
12641 	return VM_KERNEL_ADDRPERM(vp);
12642 }
12643 
12644 static int flush_cache_on_write = 0;
12645 SYSCTL_INT(_kern, OID_AUTO, flush_cache_on_write,
12646     CTLFLAG_RW | CTLFLAG_LOCKED, &flush_cache_on_write, 0,
12647     "always flush the drive cache on writes to uncached files");
12648 
12649 int
vnode_should_flush_after_write(vnode_t vp,int ioflag)12650 vnode_should_flush_after_write(vnode_t vp, int ioflag)
12651 {
12652 	return flush_cache_on_write
12653 	       && (ISSET(ioflag, IO_NOCACHE) || vnode_isnocache(vp));
12654 }
12655 
12656 /*
12657  * sysctl for use by disk I/O tracing tools to get the list of existing
12658  * vnodes' paths
12659  */
12660 
12661 #define NPATH_WORDS (MAXPATHLEN / sizeof(unsigned long))
12662 struct vnode_trace_paths_context {
12663 	uint64_t count;
12664 	/*
12665 	 * Must be a multiple of 4, then -1, for tracing!
12666 	 */
12667 	unsigned long path[NPATH_WORDS + (4 - (NPATH_WORDS % 4)) - 1];
12668 };
12669 
12670 static int
vnode_trace_path_callback(struct vnode * vp,void * vctx)12671 vnode_trace_path_callback(struct vnode *vp, void *vctx)
12672 {
12673 	struct vnode_trace_paths_context *ctx = vctx;
12674 	size_t path_len = sizeof(ctx->path);
12675 
12676 	int getpath_len = (int)path_len;
12677 	if (vn_getpath(vp, (char *)ctx->path, &getpath_len) == 0) {
12678 		/* vn_getpath() NUL-terminates, and len includes the NUL. */
12679 		assert(getpath_len >= 0);
12680 		path_len = (size_t)getpath_len;
12681 
12682 		assert(path_len <= sizeof(ctx->path));
12683 		kdebug_vfs_lookup((const char *)ctx->path, path_len, vp,
12684 		    KDBG_VFSLKUP_LOOKUP | KDBG_VFS_LOOKUP_FLAG_NOPROCFILT);
12685 
12686 		if (++(ctx->count) == 1000) {
12687 			thread_yield_to_preemption();
12688 			ctx->count = 0;
12689 		}
12690 	}
12691 
12692 	return VNODE_RETURNED;
12693 }
12694 
12695 static int
vfs_trace_paths_callback(mount_t mp,void * arg)12696 vfs_trace_paths_callback(mount_t mp, void *arg)
12697 {
12698 	if (mp->mnt_flag & MNT_LOCAL) {
12699 		vnode_iterate(mp, VNODE_ITERATE_ALL, vnode_trace_path_callback, arg);
12700 	}
12701 
12702 	return VFS_RETURNED;
12703 }
12704 
12705 static int sysctl_vfs_trace_paths SYSCTL_HANDLER_ARGS {
12706 	struct vnode_trace_paths_context ctx;
12707 
12708 	(void)oidp;
12709 	(void)arg1;
12710 	(void)arg2;
12711 	(void)req;
12712 
12713 	if (!kauth_cred_issuser(kauth_cred_get())) {
12714 		return EPERM;
12715 	}
12716 
12717 	if (!kdebug_enable || !kdebug_debugid_enabled(VFS_LOOKUP)) {
12718 		return EINVAL;
12719 	}
12720 
12721 	bzero(&ctx, sizeof(struct vnode_trace_paths_context));
12722 
12723 	vfs_iterate(0, vfs_trace_paths_callback, &ctx);
12724 
12725 	return 0;
12726 }
12727 
12728 SYSCTL_PROC(_vfs_generic, OID_AUTO, trace_paths, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED, NULL, 0, &sysctl_vfs_trace_paths, "-", "trace_paths");
12729 
12730 #if CONFIG_FILE_LEASES
12731 #include <IOKit/IOBSD.h>
12732 #include <sys/file_internal.h>
12733 
12734 #define FILE_LEASES_ENTITLEMENT    "com.apple.private.vfs.file-leases"
12735 
12736 static uint32_t lease_break_timeout = 60; /* secs */
12737 
12738 #if (DEVELOPMENT || DEBUG)
12739 static int lease_debug = 0;
12740 static int lease_entitlement_override = 0;
12741 
12742 SYSCTL_NODE(_vfs, OID_AUTO, lease, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "vfs lease");
12743 SYSCTL_UINT(_vfs_lease, OID_AUTO, break_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, &lease_break_timeout, 0, "");
12744 SYSCTL_INT(_vfs_lease, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &lease_debug, 0, "");
12745 SYSCTL_INT(_vfs_lease, OID_AUTO, entitlement_override, CTLFLAG_RW | CTLFLAG_LOCKED, &lease_entitlement_override, 0, "");
12746 
12747 #define LEASEDBG(fmt, args...)                                       \
12748 do {                                                                 \
12749 	if (__improbable(lease_debug)) {                                 \
12750 	        pid_t cur_pid = proc_getpid(current_proc());             \
12751 	        printf("%s(%d): " fmt "\n", __func__, cur_pid, ##args);  \
12752 	}                                                                \
12753 } while(0)
12754 #else
12755 #define LEASEDBG(fmt, args...)  /**/
12756 #endif /* (DEVELOPMENT || DEBUG) */
12757 
12758 static bool
allow_setlease(vfs_context_t ctx)12759 allow_setlease(vfs_context_t ctx)
12760 {
12761 	bool entitled;
12762 
12763 	entitled = IOTaskHasEntitlement(vfs_context_task(ctx),
12764 	    FILE_LEASES_ENTITLEMENT);
12765 
12766 #if (DEVELOPMENT || DEBUG)
12767 	if (!entitled) {
12768 		entitled = (lease_entitlement_override == 1);
12769 	}
12770 #endif
12771 
12772 	return entitled;
12773 }
12774 
12775 static file_lease_t
file_lease_alloc(struct fileglob * fg,int fl_type,pid_t pid)12776 file_lease_alloc(struct fileglob *fg, int fl_type, pid_t pid)
12777 {
12778 	file_lease_t fl;
12779 
12780 	fl = kalloc_type(struct file_lease, Z_WAITOK);
12781 	/*
12782 	 * Duplicated file descriptors created by dup() or fork() would have the
12783 	 * same 'fileglob' so the lease can be released or modified with the
12784 	 * duplicated fds. Opening the same file (by either same or different
12785 	 * process) would have different 'fileglob' so a lease always follows a
12786 	 * 'fileglob'.
12787 	 */
12788 	fl->fl_fg = fg;
12789 	fl->fl_type = fl_type;
12790 	fl->fl_pid = pid;
12791 	fl->fl_downgrade_start = fl->fl_release_start = 0;
12792 
12793 	return fl;
12794 }
12795 
12796 static void
file_lease_free(file_lease_t fl)12797 file_lease_free(file_lease_t fl)
12798 {
12799 	kfree_type(struct file_lease, fl);
12800 }
12801 
12802 /*
12803  * A read lease can be placed only on a file/directory that is opened for
12804  * read-only which means no other processes have the file/directory opened in
12805  * read-write/write-only mode or mmap'ed writable.
12806  * A write lease can be placed on a file only if there are no other opens
12807  * for the file.
12808  *
12809  * Needs to be called with vnode's lock held.
12810  */
12811 static int
check_for_open_conflict(vnode_t vp,struct fileglob * fg,int fl_type,int expcounts)12812 check_for_open_conflict(vnode_t vp, struct fileglob *fg, int fl_type,
12813     int expcounts)
12814 {
12815 	int error = 0;
12816 
12817 	if (fl_type == F_RDLCK) {
12818 		if (vp->v_writecount > expcounts &&
12819 		    !(vp->v_writecount == 1 && (fg->fg_flag & FWRITE))) {
12820 			error = EAGAIN;
12821 		} else if (ubc_is_mapped_writable(vp)) {
12822 			error = EAGAIN;
12823 		}
12824 	} else if (fl_type == F_WRLCK && vp->v_usecount > expcounts) {
12825 		error = EAGAIN;
12826 	}
12827 
12828 	return error;
12829 }
12830 
12831 /* Needs to be called with vnode's lock held. */
12832 static void
modify_file_lease(vnode_t vp,file_lease_t fl,int new_fl_type,struct fileglob * new_fg)12833 modify_file_lease(vnode_t vp, file_lease_t fl, int new_fl_type,
12834     struct fileglob *new_fg)
12835 {
12836 	LEASEDBG("fl %p changing fl_type from %d to %d (flags 0x%x)",
12837 	    fl, fl->fl_type, new_fl_type, fl->fl_flags);
12838 
12839 	fl->fl_type = new_fl_type;
12840 
12841 	/*
12842 	 * The lease being modified may be using a different file
12843 	 * descriptor, so usurp the fileglob pointer here.  In this
12844 	 * case the old descriptor no longer holds the lease.
12845 	 */
12846 	if (new_fg != NULL) {
12847 		fl->fl_fg = new_fg;
12848 	}
12849 
12850 	if (fl->fl_flags & FL_FLAG_RELEASE_PENDING ||
12851 	    fl->fl_flags & FL_FLAG_DOWNGRADE_PENDING) {
12852 		wakeup(&vp->v_leases);
12853 	}
12854 }
12855 
12856 static int
acquire_file_lease(vnode_t vp,struct fileglob * fg,int fl_type,int expcounts,vfs_context_t ctx)12857 acquire_file_lease(vnode_t vp, struct fileglob *fg, int fl_type, int expcounts,
12858     vfs_context_t ctx)
12859 {
12860 	file_lease_t fl, new_fl, our_fl;
12861 	int error;
12862 
12863 	/* Make sure "expected count" looks sane. */
12864 	if (expcounts < 0 || expcounts > OPEN_MAX) {
12865 		return EINVAL;
12866 	}
12867 
12868 	new_fl = file_lease_alloc(fg, fl_type, vfs_context_pid(ctx));
12869 
12870 	vnode_lock(vp);
12871 
12872 	error = check_for_open_conflict(vp, fg, fl_type, expcounts);
12873 	if (error) {
12874 		LEASEDBG("open conflict on vp %p type %d writecnt %d usecnt %d "
12875 		    "fl_type %d expcounts %d",
12876 		    vp, vp->v_type, vp->v_writecount, vp->v_usecount, fl_type,
12877 		    expcounts);
12878 		goto out;
12879 	}
12880 
12881 	our_fl = NULL;
12882 	LIST_FOREACH(fl, &vp->v_leases, fl_link) {
12883 		/* Does the existing lease belong to us? */
12884 		if (fl->fl_fg == new_fl->fl_fg ||
12885 		    fl->fl_pid == new_fl->fl_pid) {
12886 			our_fl = fl;
12887 			continue;
12888 		}
12889 
12890 		/*
12891 		 * We don't allow placing a new write lease when there is an existing
12892 		 * read lease that doesn't belong to us. We also don't allow putting
12893 		 * a new read lease if there is a pending release on the lease.
12894 		 * Putting a new read lease when there is a pending downgrade on the
12895 		 * lease is fine as it won't cause lease conflict.
12896 		 */
12897 		if (fl_type == F_WRLCK || fl->fl_flags & FL_FLAG_RELEASE_PENDING) {
12898 			break;
12899 		}
12900 	}
12901 
12902 	/*
12903 	 * Found an existing lease that we don't own and it conflicts with the
12904 	 * new lease.
12905 	 */
12906 	if (fl) {
12907 		LEASEDBG("lease conflict on vp %p fl %p fl_type %d cur_fl_type %d",
12908 		    vp, fl, fl_type, fl->fl_type);
12909 		goto out;
12910 	}
12911 
12912 	/* Found an existing lease that we own so just change the type. */
12913 	if (our_fl) {
12914 		LEASEDBG("replace lease on vp %p fl %p old_fl_type %d new_fl_type %d",
12915 		    vp, our_fl, our_fl->fl_type, fl_type);
12916 
12917 		modify_file_lease(vp, our_fl, new_fl->fl_type, new_fl->fl_fg);
12918 		goto out;
12919 	}
12920 
12921 	LEASEDBG("acquired lease on vp %p type %d fl %p fl_type %d fg %p",
12922 	    vp, vp->v_type, new_fl, new_fl->fl_type, new_fl->fl_fg);
12923 
12924 	LIST_INSERT_HEAD(&vp->v_leases, new_fl, fl_link);
12925 	new_fl = NULL;
12926 
12927 out:
12928 	vnode_unlock(vp);
12929 
12930 	if (new_fl) {
12931 		file_lease_free(new_fl);
12932 	}
12933 
12934 	return error;
12935 }
12936 
12937 static int
release_file_lease(vnode_t vp,struct fileglob * fg)12938 release_file_lease(vnode_t vp, struct fileglob *fg)
12939 {
12940 	file_lease_t fl, fl_tmp;
12941 	int error = 0;
12942 
12943 	LEASEDBG("request to release lease on vp %p type %d fg %p",
12944 	    vp, vp->v_type, fg);
12945 
12946 	vnode_lock(vp);
12947 
12948 	LIST_FOREACH_SAFE(fl, &vp->v_leases, fl_link, fl_tmp) {
12949 		if (fl->fl_fg == fg) {
12950 			LEASEDBG("released lease on vp %p fl %p type %d",
12951 			    vp, fl, fl->fl_type);
12952 
12953 			LIST_REMOVE(fl, fl_link);
12954 			modify_file_lease(vp, fl, F_UNLCK, NULL);
12955 			break;
12956 		}
12957 	}
12958 
12959 	vnode_unlock(vp);
12960 
12961 	if (fl) {
12962 		file_lease_free(fl);
12963 	} else {
12964 		error = ENOLCK;
12965 	}
12966 
12967 	return error;
12968 }
12969 
12970 /*
12971  * Acquire or release a file lease according to the given type (F_RDLCK,
12972  * F_WRLCK or F_UNLCK).
12973  *
12974  * Returns:	0			Success
12975  *		EAGAIN			Failed to acquire a file lease due to conflicting opens
12976  *		ENOLCK			Failed to release a file lease due to lease not found
12977  *		EPERM           Current task doesn't have the entitlement
12978  */
12979 int
vnode_setlease(vnode_t vp,struct fileglob * fg,int fl_type,int expcounts,vfs_context_t ctx)12980 vnode_setlease(vnode_t vp, struct fileglob *fg, int fl_type, int expcounts,
12981     vfs_context_t ctx)
12982 {
12983 	int error;
12984 
12985 	if (!allow_setlease(ctx)) {
12986 		return EPERM;
12987 	}
12988 
12989 	error = (fl_type == F_UNLCK) ? release_file_lease(vp, fg) :
12990 	    acquire_file_lease(vp, fg, fl_type, expcounts, ctx);
12991 
12992 	return error;
12993 }
12994 
12995 /*
12996  * Retrieve the currently in place lease for the file.
12997  *
12998  * Returns:
12999  *		F_RDLCK			Read lease
13000  *		F_WRLCK			Write lease
13001  *		F_UNLCK			No lease
13002  */
13003 int
vnode_getlease(vnode_t vp)13004 vnode_getlease(vnode_t vp)
13005 {
13006 	file_lease_t fl;
13007 	int fl_type = F_UNLCK;
13008 
13009 	vnode_lock(vp);
13010 
13011 	/*
13012 	 * There should be only one type of lease in the list as read and write
13013 	 * leases can't co-exist for the same file.
13014 	 */
13015 	fl = LIST_FIRST(&vp->v_leases);
13016 	if (fl) {
13017 		fl_type = fl->fl_type;
13018 	}
13019 
13020 	vnode_unlock(vp);
13021 
13022 	LEASEDBG("vp %p fl %p fl_type %d", vp, fl, fl_type);
13023 
13024 	return fl_type;
13025 }
13026 
13027 /* Must be called with vnode's lock held. */
13028 static bool
check_for_lease_conflict(vnode_t vp,int breaker_fl_type,vfs_context_t ctx)13029 check_for_lease_conflict(vnode_t vp, int breaker_fl_type, vfs_context_t ctx)
13030 {
13031 	file_lease_t fl;
13032 	pid_t pid = vfs_context_pid(ctx);
13033 	bool is_conflict = false;
13034 
13035 	LIST_FOREACH(fl, &vp->v_leases, fl_link) {
13036 		if ((fl->fl_type == F_WRLCK && fl->fl_pid != pid) ||
13037 		    (breaker_fl_type == F_WRLCK && fl->fl_pid != pid)) {
13038 			LEASEDBG("conflict detected on vp %p type %d fl_type %d "
13039 			    "breaker_fl_type %d",
13040 			    vp, vp->v_type, fl->fl_type, breaker_fl_type);
13041 
13042 			is_conflict = true;
13043 			break;
13044 		}
13045 	}
13046 
13047 	return is_conflict;
13048 }
13049 
13050 static uint64_t
absolutetime_elapsed_in_secs(uint64_t start)13051 absolutetime_elapsed_in_secs(uint64_t start)
13052 {
13053 	uint64_t elapsed, elapsed_sec;
13054 	uint64_t now = mach_absolute_time();
13055 
13056 	elapsed = now - start;
13057 	absolutetime_to_nanoseconds(elapsed, &elapsed_sec);
13058 	elapsed_sec /= NSEC_PER_SEC;
13059 
13060 	return elapsed_sec;
13061 }
13062 
13063 /* Must be called with vnode's lock held. */
13064 static void
handle_lease_break_timedout(vnode_t vp)13065 handle_lease_break_timedout(vnode_t vp)
13066 {
13067 	file_lease_t fl, fl_tmp;
13068 	uint64_t elapsed_sec;
13069 
13070 	LIST_FOREACH_SAFE(fl, &vp->v_leases, fl_link, fl_tmp) {
13071 		if (fl->fl_flags & FL_FLAG_DOWNGRADE_PENDING) {
13072 			elapsed_sec = absolutetime_elapsed_in_secs(fl->fl_downgrade_start);
13073 
13074 			if (elapsed_sec >= lease_break_timeout) {
13075 				LEASEDBG("force downgrade on vp %p for fl %p elapsed %llu "
13076 				    "timeout %u", vp, fl, elapsed_sec, lease_break_timeout);
13077 
13078 				fl->fl_flags &= ~FL_FLAG_DOWNGRADE_PENDING;
13079 				fl->fl_downgrade_start = 0;
13080 				modify_file_lease(vp, fl, F_RDLCK, NULL);
13081 				continue;
13082 			}
13083 		}
13084 		if (fl->fl_flags & FL_FLAG_RELEASE_PENDING) {
13085 			elapsed_sec = absolutetime_elapsed_in_secs(fl->fl_release_start);
13086 
13087 			if (elapsed_sec >= lease_break_timeout) {
13088 				LEASEDBG("force release on vp %p for fl %p elapsed %llu "
13089 				    "timeout %u", vp, fl, elapsed_sec, lease_break_timeout);
13090 
13091 				LIST_REMOVE(fl, fl_link);
13092 				file_lease_free(fl);
13093 				continue;
13094 			}
13095 		}
13096 	}
13097 
13098 	/* Wakeup the lease breaker(s). */
13099 	wakeup(&vp->v_leases);
13100 }
13101 
13102 /* Must be called with vnode's lock held. */
13103 static void
wait_for_lease_break(vnode_t vp,int breaker_fl_type,vfs_context_t ctx)13104 wait_for_lease_break(vnode_t vp, int breaker_fl_type, vfs_context_t ctx)
13105 {
13106 	file_lease_t fl;
13107 	struct timespec ts;
13108 	uint64_t elapsed_sec, start_time;
13109 	int error;
13110 
13111 restart:
13112 	fl = LIST_FIRST(&vp->v_leases);
13113 	assert(fl);
13114 
13115 	/*
13116 	 * In a rare case it is possible that the lease that we are blocked on has
13117 	 * been released and a new lease has been put in place after we are
13118 	 * signalled to wake up. In this particular, we would treat it as no
13119 	 * conflict and proceed. This could only happen for directory leasing.
13120 	 */
13121 	if ((fl->fl_flags & (FL_FLAG_DOWNGRADE_PENDING | FL_FLAG_RELEASE_PENDING)) == 0) {
13122 		LEASEDBG("new lease in place on vp %p fl %p fl_type %d "
13123 		    "breaker_fl_type %d",
13124 		    vp, fl, fl->fl_type, breaker_fl_type);
13125 
13126 		return;
13127 	}
13128 	/*
13129 	 * Figure out which timer to use for lease break timedout as we could have
13130 	 * both timers active. If both timers active, pick the one with earliest
13131 	 * start time.
13132 	 */
13133 	if (fl->fl_release_start) {
13134 		if (fl->fl_downgrade_start == 0 ||
13135 		    fl->fl_downgrade_start < fl->fl_release_start) {
13136 			start_time = fl->fl_release_start;
13137 		} else {
13138 			start_time = fl->fl_downgrade_start;
13139 		}
13140 	} else {
13141 		start_time = fl->fl_downgrade_start;
13142 	}
13143 	assert(start_time > 0);
13144 
13145 	elapsed_sec = absolutetime_elapsed_in_secs(start_time);
13146 
13147 	LEASEDBG("elapsed_sec %llu release_start %llu downgrade_start %llu",
13148 	    elapsed_sec, fl->fl_release_start, fl->fl_downgrade_start);
13149 
13150 	ts.tv_sec = (lease_break_timeout > elapsed_sec ?
13151 	    (lease_break_timeout - elapsed_sec) : 0);
13152 	ts.tv_nsec = (ts.tv_sec == 0 ? 1 : 0);
13153 	error = msleep(&vp->v_leases, &vp->v_lock, PVFS, __func__, &ts);
13154 
13155 	if (error == 0 || error != EWOULDBLOCK) {
13156 		/*
13157 		 * Woken up due to lease is released/downgraded by lease holder.
13158 		 * We don't expect any other error from msleep() beside EWOULDBLOCK.
13159 		 * Check if there is any further conflicts. If so, then continue to
13160 		 * wait for the next conflict to resolve.
13161 		 */
13162 		if (check_for_lease_conflict(vp, breaker_fl_type, ctx)) {
13163 			goto restart;
13164 		}
13165 	} else {
13166 		/*
13167 		 * Woken due to lease break timeout expired (EWOULDBLOCK returned).
13168 		 * Break/downgrade all conflicting leases.
13169 		 */
13170 		handle_lease_break_timedout(vp);
13171 
13172 		if (check_for_lease_conflict(vp, breaker_fl_type, ctx)) {
13173 			goto restart;
13174 		}
13175 	}
13176 }
13177 
13178 /* Must be called with vnode's lock held. */
13179 static void
send_lease_break_event(vnode_t vp,uint32_t event)13180 send_lease_break_event(vnode_t vp, uint32_t event)
13181 {
13182 	if (vp->v_knotes.slh_first != NULL) {
13183 		KNOTE(&vp->v_knotes, event);
13184 	}
13185 }
13186 
13187 static bool
is_dataless_file(vnode_t vp,vfs_context_t ctx)13188 is_dataless_file(vnode_t vp, vfs_context_t ctx)
13189 {
13190 	struct vnode_attr va;
13191 	bool is_dataless = false;
13192 	int error;
13193 
13194 	VATTR_INIT(&va);
13195 	VATTR_WANTED(&va, va_flags);
13196 
13197 	error = vnode_getattr(vp, &va, ctx);
13198 	if (!error && (va.va_flags & SF_DATALESS)) {
13199 		is_dataless = true;
13200 	}
13201 
13202 	return is_dataless;
13203 }
13204 
13205 /*
13206  * Break lease(s) in place for the file when there is conflict.
13207  * This function would return 0 for almost all call sites. The only exception
13208  * is when it is called from open1() with O_NONBLOCK flag and it needs to block
13209  * waiting for the lease conflict(s) to resolve. In this case EWOULDBLOCK is
13210  * returned.
13211  */
13212 int
vnode_breaklease(vnode_t vp,uint32_t oflags,vfs_context_t ctx)13213 vnode_breaklease(vnode_t vp, uint32_t oflags, vfs_context_t ctx)
13214 {
13215 	file_lease_t fl;
13216 	uint64_t now;
13217 	int fl_type;
13218 	int error = 0;
13219 
13220 	vnode_lock(vp);
13221 
13222 	if (__probable(LIST_EMPTY(&vp->v_leases))) {
13223 		goto out_unlock;
13224 	}
13225 
13226 	/* Determine the access mode requested by the lease breaker. */
13227 	fl_type = (oflags & (O_WRONLY | O_RDWR | O_CREAT | O_TRUNC)) ? F_WRLCK : F_RDLCK;
13228 
13229 	/*
13230 	 * If the lease-breaker is just reading, check that it can break
13231 	 * leases first. If the lease-breaker is writing, or if the
13232 	 * context was not specified, we always break.
13233 	 * We skip lease break if the lease-breaker is dataless manipulator and
13234 	 * the file is dataless.
13235 	 */
13236 	if ((fl_type == F_RDLCK && !vfs_context_can_break_leases(ctx)) ||
13237 	    (vfs_context_is_dataless_manipulator(ctx) && (vp->v_type == VREG) &&
13238 	    is_dataless_file(vp, ctx))) {
13239 		goto out_unlock;
13240 	}
13241 
13242 	if (!check_for_lease_conflict(vp, fl_type, ctx)) {
13243 		goto out_unlock;
13244 	}
13245 
13246 	now = mach_absolute_time();
13247 
13248 	LEASEDBG("break lease on vp %p type %d oflags 0x%x cur_time %llu",
13249 	    vp, vp->v_type, oflags, now);
13250 
13251 	/*
13252 	 * We get to this point then this means all lease(s) are conflict and
13253 	 * we need to send the lease break event to the lease holder(s).
13254 	 * It is possible that a lease could have both downgrade and release events
13255 	 * pending triggered by multiple breakers trying to open the file in
13256 	 * different modes. Both events would have different lease break timers.
13257 	 * Consider the following case:
13258 	 * 1. Process A holds the write lease on file X.
13259 	 * 2. Provess B opens the file X in read-only mode.
13260 	 *    This triggers downgrade lease event to Process A.
13261 	 * 3. While downgrade is pending, Process C opens the file X in read-write
13262 	 *    mode. This triggers release lease event to Process A.
13263 	 */
13264 	LIST_FOREACH(fl, &vp->v_leases, fl_link) {
13265 		if (fl_type == F_WRLCK) {
13266 			/* File is opened for writing or truncate. */
13267 			if (fl->fl_flags & FL_FLAG_RELEASE_PENDING) {
13268 				continue;
13269 			}
13270 			fl->fl_release_start = now;
13271 			fl->fl_flags |= FL_FLAG_RELEASE_PENDING;
13272 			send_lease_break_event(vp, NOTE_LEASE_RELEASE);
13273 		} else {
13274 			/* File is opened for reading. */
13275 			if (fl->fl_flags & FL_FLAG_DOWNGRADE_PENDING ||
13276 			    fl->fl_flags & FL_FLAG_RELEASE_PENDING) {
13277 				continue;
13278 			}
13279 			fl->fl_downgrade_start = now;
13280 			fl->fl_flags |= FL_FLAG_DOWNGRADE_PENDING;
13281 			send_lease_break_event(vp, NOTE_LEASE_DOWNGRADE);
13282 		}
13283 	}
13284 
13285 	/*
13286 	 * If open is requested with O_NONBLOCK, then we can't block and wait for
13287 	 * the lease to be released/downgraded. Just bail out with EWOULDBLOCK.
13288 	 */
13289 	if (oflags & O_NONBLOCK) {
13290 		error = EWOULDBLOCK;
13291 		goto out;
13292 	}
13293 
13294 	wait_for_lease_break(vp, fl_type, ctx);
13295 
13296 out:
13297 	LEASEDBG("break lease on vp %p oflags 0x%x, error %d", vp, oflags, error);
13298 
13299 out_unlock:
13300 	vnode_unlock(vp);
13301 
13302 	return error;
13303 }
13304 
13305 /*
13306  * Get parent vnode by parent ID (only for file system that supports
13307  * MNTK_PATH_FROM_ID).
13308  * On success, the parent's vnode is returned with iocount held.
13309  */
13310 static vnode_t
vnode_getparent_byid(vnode_t vp)13311 vnode_getparent_byid(vnode_t vp)
13312 {
13313 	struct vnode_attr va;
13314 	vnode_t dvp = NULLVP;
13315 	vfs_context_t ctx = vfs_context_current();
13316 	int error;
13317 
13318 	if (!(vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID)) {
13319 		goto out;
13320 	}
13321 
13322 	VATTR_INIT(&va);
13323 	VATTR_WANTED(&va, va_parentid);
13324 
13325 	/* Get the vnode's parent id from the file system. */
13326 	error = vnode_getattr(vp, &va, ctx);
13327 	if (error || !VATTR_IS_SUPPORTED(&va, va_parentid)) {
13328 		goto out;
13329 	}
13330 
13331 	/*
13332 	 * Ask the file system for the parent vnode.
13333 	 * We are ignoring the error here as we don't expect the parent vnode to be
13334 	 * populated on error.
13335 	 */
13336 	(void)VFS_VGET(vp->v_mount, (ino64_t)va.va_parentid, &dvp, ctx);
13337 
13338 out:
13339 	return dvp;
13340 }
13341 
13342 /*
13343  * Break directory's lease.
13344  * If 'need_parent' is true, then parent is obtained via vnode_getparent() (or
13345  * vnode_getparent_byid()) on the provided 'vp'.
13346  */
13347 void
vnode_breakdirlease(vnode_t vp,bool need_parent,uint32_t oflags)13348 vnode_breakdirlease(vnode_t vp, bool need_parent, uint32_t oflags)
13349 {
13350 	vnode_t dvp;
13351 
13352 	if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VDIR) ||
13353 	    (vp == rootvnode)) {
13354 		return;
13355 	}
13356 
13357 	/*
13358 	 * If parent is not provided, first try to get it from the name cache.
13359 	 * If failed, then we will attempt to ask the file system for parent vnode.
13360 	 * This is just a best effort as both attempts could still fail.
13361 	 */
13362 	if (need_parent) {
13363 		dvp = vnode_getparent(vp);
13364 		if (__improbable(dvp == NULLVP)) {
13365 			dvp = vnode_getparent_byid(vp);
13366 		}
13367 	} else {
13368 		dvp = vp;
13369 	}
13370 
13371 	if (__probable(dvp != NULLVP)) {
13372 		/* Always break dir leases. */
13373 		(void)vnode_breaklease(dvp, oflags, vfs_context_current());
13374 	}
13375 
13376 	if (need_parent && (dvp != NULLVP)) {
13377 		vnode_put(dvp);
13378 	}
13379 }
13380 
13381 /*
13382  * Revoke all lease(s) in place for the file.
13383  * This is called when the vnode is reclaimed.
13384  */
13385 void
vnode_revokelease(vnode_t vp,bool locked)13386 vnode_revokelease(vnode_t vp, bool locked)
13387 {
13388 	file_lease_t fl, fl_tmp;
13389 	bool need_wakeup = false;
13390 
13391 	if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VDIR)) {
13392 		return;
13393 	}
13394 
13395 	if (!locked) {
13396 		vnode_lock(vp);
13397 	}
13398 
13399 	LIST_FOREACH_SAFE(fl, &vp->v_leases, fl_link, fl_tmp) {
13400 		LIST_REMOVE(fl, fl_link);
13401 		file_lease_free(fl);
13402 		need_wakeup = true;
13403 	}
13404 
13405 	/* Wakeup any lease breaker(s) that might be currently blocked. */
13406 	if (__improbable(need_wakeup)) {
13407 		wakeup(&vp->v_leases);
13408 	}
13409 
13410 	if (!locked) {
13411 		vnode_unlock(vp);
13412 	}
13413 }
13414 
13415 #endif /* CONFIG_FILE_LEASES */
13416 
13417 errno_t
vnode_rdadvise(vnode_t vp,off_t offset,int len,vfs_context_t ctx)13418 vnode_rdadvise(vnode_t vp, off_t offset, int len, vfs_context_t ctx)
13419 {
13420 	struct radvisory ra_struct;
13421 
13422 	assert(vp);
13423 
13424 	if (offset < 0 || len < 0) {
13425 		return EINVAL;
13426 	}
13427 
13428 	ra_struct.ra_offset = offset;
13429 	ra_struct.ra_count = len;
13430 
13431 	return VNOP_IOCTL(vp, F_RDADVISE, (caddr_t)&ra_struct, 0, ctx);
13432 }
13433 
13434 int
vnode_hasmultipath(vnode_t vp)13435 vnode_hasmultipath(vnode_t vp)
13436 {
13437 	struct vnode_attr va;
13438 	bool is_local_volume = !!(vp->v_mount->mnt_flag & MNT_LOCAL);
13439 	bool link_locked = false;
13440 	int has_multipath = 0;
13441 	int error;
13442 
13443 	/*
13444 	 * If the volume doesn't support directory hard link then the directory
13445 	 * can't be a hard link.
13446 	 */
13447 	if ((vp->v_type == VDIR) && is_local_volume &&
13448 	    !(vp->v_mount->mnt_kern_flag & MNTK_DIR_HARDLINKS)) {
13449 		goto out;
13450 	}
13451 
13452 	vnode_link_lock(vp);
13453 	link_locked = true;
13454 
13455 	if (is_local_volume &&
13456 	    (os_atomic_load(&vp->v_ext_flag, relaxed) & VE_NOT_HARDLINK)) {
13457 		goto out;
13458 	}
13459 
13460 	/*
13461 	 * Not all file systems adopt vnode_setmultipath() to mark a vnode is
13462 	 * hard link (VISHARDLINK) so we need to call into the file system to get
13463 	 * the link count attributes to determine if the vnode has multiple paths.
13464 	 */
13465 	VATTR_INIT(&va);
13466 	VATTR_WANTED(&va, va_nlink);
13467 	VATTR_WANTED(&va, va_dirlinkcount);
13468 
13469 	error = vnode_getattr(vp, &va, vfs_context_current());
13470 	if (error) {
13471 		goto out;
13472 	}
13473 
13474 	if ((vp->v_type == VDIR) && VATTR_IS_SUPPORTED(&va, va_dirlinkcount)) {
13475 		has_multipath = (va.va_dirlinkcount > 1);
13476 	} else if (VATTR_IS_SUPPORTED(&va, va_nlink)) {
13477 		has_multipath = (va.va_nlink > 1);
13478 	}
13479 
13480 	if (has_multipath == 0) {
13481 		os_atomic_or(&vp->v_ext_flag, VE_NOT_HARDLINK, relaxed);
13482 	}
13483 
13484 out:
13485 	if (link_locked) {
13486 		vnode_link_unlock(vp);
13487 	}
13488 
13489 	return has_multipath;
13490 }
13491 
13492 bool
vnode_isappendonly(vnode_t vp)13493 vnode_isappendonly(vnode_t vp)
13494 {
13495 	return os_atomic_load(&vp->v_ext_flag, relaxed) & VE_APPENDONLY;
13496 }
13497