1 /*
2 *
3 * Copyright (c) 2000-2024 Apple Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 /*
31 * Copyright (c) 1989, 1993
32 * The Regents of the University of California. All rights reserved.
33 * (c) UNIX System Laboratories, Inc.
34 * All or some portions of this file are derived from material licensed
35 * to the University of California by American Telephone and Telegraph
36 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
37 * the permission of UNIX System Laboratories, Inc.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Berkeley and its contributors.
51 * 4. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
67 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
68 */
69 /*
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
73 * Version 2.0.
74 */
75
76 /*
77 * External virtual filesystem routines
78 */
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/time.h>
86 #include <sys/lock.h>
87 #include <sys/vnode.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf_internal.h>
93 #include <sys/errno.h>
94 #include <kern/kalloc.h>
95 #include <sys/uio_internal.h>
96 #include <sys/uio.h>
97 #include <sys/domain.h>
98 #include <sys/mbuf.h>
99 #include <sys/syslog.h>
100 #include <sys/ubc_internal.h>
101 #include <sys/vm.h>
102 #include <sys/xattr.h>
103 #include <sys/sysctl.h>
104 #include <sys/filedesc.h>
105 #include <sys/fcntl.h>
106 #include <sys/event.h>
107 #include <sys/kdebug.h>
108 #include <sys/kauth.h>
109 #include <sys/user.h>
110 #include <sys/systm.h>
111 #include <sys/kern_memorystatus_xnu.h>
112 #include <sys/lockf.h>
113 #include <sys/reboot.h>
114 #include <miscfs/fifofs/fifo.h>
115
116 #include <nfs/nfs.h>
117
118 #include <string.h>
119 #include <machine/machine_routines.h>
120
121 #include <kern/assert.h>
122 #include <mach/kern_return.h>
123 #include <kern/thread.h>
124 #include <kern/sched_prim.h>
125 #include <kern/smr.h>
126
127 #include <miscfs/specfs/specdev.h>
128
129 #include <mach/mach_types.h>
130 #include <mach/memory_object_types.h>
131 #include <mach/memory_object_control.h>
132
133 #include <kern/kalloc.h> /* kalloc()/kfree() */
134 #include <kern/clock.h> /* delay_for_interval() */
135 #include <libkern/coreanalytics/coreanalytics.h>
136 #include <libkern/OSAtomic.h> /* OSAddAtomic() */
137 #include <os/atomic_private.h>
138 #if defined(XNU_TARGET_OS_OSX)
139 #include <console/video_console.h>
140 #endif
141
142 #ifdef CONFIG_IOCOUNT_TRACE
143 #include <libkern/OSDebug.h>
144 #endif
145
146 #include <vm/vm_protos.h> /* vnode_pager_vrele() */
147 #include <vm/vm_ubc.h>
148 #include <vm/memory_object_xnu.h>
149
150 #if CONFIG_MACF
151 #include <security/mac_framework.h>
152 #endif
153
154 #include <vfs/vfs_disk_conditioner.h>
155 #include <libkern/section_keywords.h>
156
157 static LCK_GRP_DECLARE(vnode_lck_grp, "vnode");
158 static LCK_ATTR_DECLARE(vnode_lck_attr, 0, 0);
159
160 #if CONFIG_TRIGGERS
161 static LCK_GRP_DECLARE(trigger_vnode_lck_grp, "trigger_vnode");
162 static LCK_ATTR_DECLARE(trigger_vnode_lck_attr, 0, 0);
163 #endif
164
165 extern lck_mtx_t mnt_list_mtx_lock;
166
167 static KALLOC_TYPE_DEFINE(specinfo_zone, struct specinfo, KT_DEFAULT);
168
169 ZONE_DEFINE(vnode_zone, "vnodes",
170 sizeof(struct vnode), ZC_NOGC | ZC_ZFREE_CLEARMEM);
171
172 enum vtype iftovt_tab[16] = {
173 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
174 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
175 };
176 int vttoif_tab[9] = {
177 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
178 S_IFSOCK, S_IFIFO, S_IFMT,
179 };
180
181 extern int paniclog_append_noflush(const char *format, ...);
182
183 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
184 __private_extern__ void qsort(
185 void * array,
186 size_t nmembers,
187 size_t member_size,
188 int (*)(const void *, const void *));
189
190 __private_extern__ void vntblinit(void);
191 __private_extern__ int unlink1(vfs_context_t, vnode_t, user_addr_t,
192 enum uio_seg, int);
193
194 static void vnode_list_add(vnode_t);
195 static void vnode_async_list_add(vnode_t);
196 static void vnode_list_remove(vnode_t);
197 static void vnode_list_remove_locked(vnode_t);
198
199 static void vnode_abort_advlocks(vnode_t);
200 static errno_t vnode_drain(vnode_t);
201 static void vgone(vnode_t, int flags);
202 static void vclean(vnode_t vp, int flag);
203 static void vnode_reclaim_internal(vnode_t, int, int, int);
204
205 static void vnode_dropiocount(vnode_t);
206
207 static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev);
208 static int vnode_reload(vnode_t);
209
210 static int unmount_callback(mount_t, __unused void *);
211
212 static void insmntque(vnode_t vp, mount_t mp);
213 static int mount_getvfscnt(void);
214 static int mount_fillfsids(fsid_t *, int );
215 static void vnode_iterate_setup(mount_t);
216 int vnode_umount_preflight(mount_t, vnode_t, int);
217 static int vnode_iterate_prepare(mount_t);
218 static int vnode_iterate_reloadq(mount_t);
219 static void vnode_iterate_clear(mount_t);
220 static mount_t vfs_getvfs_locked(fsid_t *);
221 static int vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp,
222 struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx);
223 static int vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx);
224
225 errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
226
227 #ifdef CONFIG_IOCOUNT_TRACE
228 static void record_vp(vnode_t vp, int count);
229 static TUNABLE(int, bootarg_vnode_iocount_trace, "vnode_iocount_trace", 0);
230 static TUNABLE(int, bootarg_uthread_iocount_trace, "uthread_iocount_trace", 0);
231 #endif /* CONFIG_IOCOUNT_TRACE */
232
233 #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG)
234 static TUNABLE(bool, bootarg_no_vnode_jetsam, "-no_vnode_jetsam", false);
235 #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */
236
237 static TUNABLE(bool, bootarg_no_vnode_drain, "-no_vnode_drain", false);
238
239 __options_decl(freeable_vnode_level_t, uint32_t, {
240 DEALLOC_VNODE_NONE = 0,
241 DEALLOC_VNODE_ONLY_OVERFLOW = 1,
242 DEALLOC_VNODE_ALL = 2
243 });
244
245 #if XNU_TARGET_OS_OSX
246 static TUNABLE(freeable_vnode_level_t, bootarg_vn_dealloc_level, "vn_dealloc_level", DEALLOC_VNODE_NONE);
247 #else
248 static TUNABLE(freeable_vnode_level_t, bootarg_vn_dealloc_level, "vn_dealloc_level", DEALLOC_VNODE_ONLY_OVERFLOW);
249 #endif /* CONFIG_VNDEALLOC */
250
251 static freeable_vnode_level_t vn_dealloc_level = DEALLOC_VNODE_NONE;
252
253 boolean_t root_is_CF_drive = FALSE;
254
255 #if CONFIG_TRIGGERS
256 static int vnode_resolver_create(mount_t, vnode_t, struct vnode_trigger_param *, boolean_t external);
257 static void vnode_resolver_detach(vnode_t);
258 #endif
259
260 TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
261 TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */
262 TAILQ_HEAD(async_work_lst, vnode) vnode_async_work_list;
263
264
265 TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */
266 struct timeval rage_tv;
267 int rage_limit = 0;
268 int ragevnodes = 0;
269
270 long reusablevnodes_max = LONG_MAX;
271 long reusablevnodes = 0;
272 int deadvnodes_low = 0;
273 int deadvnodes_high = 0;
274 int numvnodes_min = 0;
275 int numvnodes_max = 0;
276
277 uint64_t newvnode = 0;
278 unsigned long newvnode_nodead = 0;
279
280 static int vfs_unmountall_started = 0;
281 static int vfs_unmountall_finished = 0;
282 static uint64_t vfs_shutdown_last_completion_time;
283
284 #define RAGE_LIMIT_MIN 100
285 #define RAGE_TIME_LIMIT 5
286
287 VFS_SMR_DECLARE;
288 extern uint32_t nc_smr_enabled;
289
290 /*
291 * ROSV definitions
292 * NOTE: These are shadowed from PlatformSupport definitions, but XNU
293 * builds standalone.
294 */
295 #define PLATFORM_DATA_VOLUME_MOUNT_POINT "/System/Volumes/Data"
296
297 /*
298 * These could be in PlatformSupport but aren't yet
299 */
300 #define PLATFORM_PREBOOT_VOLUME_MOUNT_POINT "/System/Volumes/Preboot"
301 #define PLATFORM_RECOVERY_VOLUME_MOUNT_POINT "/System/Volumes/Recovery"
302
303 #if CONFIG_MOUNT_VM
304 #define PLATFORM_VM_VOLUME_MOUNT_POINT "/System/Volumes/VM"
305 #endif
306
307 struct mntlist mountlist; /* mounted filesystem list */
308 static int nummounts = 0;
309
310 static int print_busy_vnodes = 0; /* print out busy vnodes */
311
312 #if DIAGNOSTIC
313 #define VLISTCHECK(fun, vp, list) \
314 if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
315 panic("%s: %s vnode not on %slist", (fun), (list), (list));
316 #else
317 #define VLISTCHECK(fun, vp, list)
318 #endif /* DIAGNOSTIC */
319
320 #define VLISTNONE(vp) \
321 do { \
322 (vp)->v_freelist.tqe_next = (struct vnode *)0; \
323 (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \
324 } while(0)
325
326 #define VONLIST(vp) \
327 ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
328
329 /* remove a vnode from free vnode list */
330 #define VREMFREE(fun, vp) \
331 do { \
332 VLISTCHECK((fun), (vp), "free"); \
333 TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \
334 VLISTNONE((vp)); \
335 freevnodes--; \
336 reusablevnodes--; \
337 } while(0)
338
339
340 /* remove a vnode from dead vnode list */
341 #define VREMDEAD(fun, vp) \
342 do { \
343 VLISTCHECK((fun), (vp), "dead"); \
344 TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \
345 VLISTNONE((vp)); \
346 vp->v_listflag &= ~VLIST_DEAD; \
347 deadvnodes--; \
348 if (vp->v_listflag & VLIST_NO_REUSE) { \
349 deadvnodes_noreuse--; \
350 } \
351 } while(0)
352
353
354 /* remove a vnode from async work vnode list */
355 #define VREMASYNC_WORK(fun, vp) \
356 do { \
357 VLISTCHECK((fun), (vp), "async_work"); \
358 TAILQ_REMOVE(&vnode_async_work_list, (vp), v_freelist); \
359 VLISTNONE((vp)); \
360 vp->v_listflag &= ~VLIST_ASYNC_WORK; \
361 async_work_vnodes--; \
362 if (!(vp->v_listflag & VLIST_NO_REUSE)) { \
363 reusablevnodes--; \
364 } \
365 } while(0)
366
367
368 /* remove a vnode from rage vnode list */
369 #define VREMRAGE(fun, vp) \
370 do { \
371 if ( !(vp->v_listflag & VLIST_RAGE)) \
372 panic("VREMRAGE: vp not on rage list"); \
373 VLISTCHECK((fun), (vp), "rage"); \
374 TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \
375 VLISTNONE((vp)); \
376 vp->v_listflag &= ~VLIST_RAGE; \
377 ragevnodes--; \
378 reusablevnodes--; \
379 } while(0)
380
381 static void async_work_continue(void);
382 static void vn_laundry_continue(void);
383 static void wakeup_laundry_thread(void);
384 static void vnode_smr_free(void *, size_t);
385
386 CA_EVENT(freeable_vnodes,
387 CA_INT, numvnodes_min,
388 CA_INT, numvnodes_max,
389 CA_INT, desiredvnodes,
390 CA_INT, numvnodes,
391 CA_INT, freevnodes,
392 CA_INT, deadvnodes,
393 CA_INT, freeablevnodes,
394 CA_INT, busyvnodes,
395 CA_BOOL, threshold_crossed);
396 static CA_EVENT_TYPE(freeable_vnodes) freeable_vnodes_telemetry;
397
398 static bool freeablevnodes_threshold_crossed = false;
399
400 /*
401 * Initialize the vnode management data structures.
402 */
403 __private_extern__ void
vntblinit(void)404 vntblinit(void)
405 {
406 thread_t thread = THREAD_NULL;
407 int desiredvnodes_one_percent = desiredvnodes / 100;
408
409 TAILQ_INIT(&vnode_free_list);
410 TAILQ_INIT(&vnode_rage_list);
411 TAILQ_INIT(&vnode_dead_list);
412 TAILQ_INIT(&vnode_async_work_list);
413 TAILQ_INIT(&mountlist);
414
415 microuptime(&rage_tv);
416 rage_limit = desiredvnodes_one_percent;
417 if (rage_limit < RAGE_LIMIT_MIN) {
418 rage_limit = RAGE_LIMIT_MIN;
419 }
420
421 deadvnodes_low = desiredvnodes_one_percent;
422 if (deadvnodes_low > 300) {
423 deadvnodes_low = 300;
424 }
425 deadvnodes_high = deadvnodes_low * 2;
426
427 numvnodes_min = numvnodes_max = desiredvnodes;
428 if (bootarg_vn_dealloc_level == DEALLOC_VNODE_ONLY_OVERFLOW) {
429 numvnodes_max = desiredvnodes * 2;
430 vn_dealloc_level = bootarg_vn_dealloc_level;
431 } else if (bootarg_vn_dealloc_level == DEALLOC_VNODE_ALL) {
432 numvnodes_min = desiredvnodes_one_percent * 40;
433 numvnodes_max = desiredvnodes * 2;
434 reusablevnodes_max = (desiredvnodes_one_percent * 20) - deadvnodes_low;
435 vn_dealloc_level = bootarg_vn_dealloc_level;
436 }
437
438 bzero(&freeable_vnodes_telemetry, sizeof(CA_EVENT_TYPE(freeable_vnodes)));
439 freeable_vnodes_telemetry.numvnodes_min = numvnodes_min;
440 freeable_vnodes_telemetry.numvnodes_max = numvnodes_max;
441 freeable_vnodes_telemetry.desiredvnodes = desiredvnodes;
442
443 if (nc_smr_enabled) {
444 zone_enable_smr(vnode_zone, VFS_SMR(), &vnode_smr_free);
445 }
446
447 /*
448 * create worker threads
449 */
450 kernel_thread_start((thread_continue_t)async_work_continue, NULL, &thread);
451 thread_deallocate(thread);
452 kernel_thread_start((thread_continue_t)vn_laundry_continue, NULL, &thread);
453 thread_deallocate(thread);
454 }
455
456 /* the timeout is in 10 msecs */
457 int
vnode_waitforwrites(vnode_t vp,int output_target,int slpflag,int slptimeout,const char * msg)458 vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg)
459 {
460 int error = 0;
461 struct timespec ts;
462
463 if (output_target < 0) {
464 return EINVAL;
465 }
466
467 KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0);
468
469 if (vp->v_numoutput > output_target) {
470 slpflag |= PDROP;
471
472 vnode_lock_spin(vp);
473
474 while ((vp->v_numoutput > output_target) && error == 0) {
475 if (output_target) {
476 vp->v_flag |= VTHROTTLED;
477 } else {
478 vp->v_flag |= VBWAIT;
479 }
480
481 ts.tv_sec = (slptimeout / 100);
482 ts.tv_nsec = (slptimeout % 1000) * 10 * NSEC_PER_USEC * 1000;
483 error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts);
484
485 vnode_lock_spin(vp);
486 }
487 vnode_unlock(vp);
488 }
489 KERNEL_DEBUG(0x3010280 | DBG_FUNC_END, (int)vp, output_target, vp->v_numoutput, error, 0);
490
491 return error;
492 }
493
494
495 void
vnode_startwrite(vnode_t vp)496 vnode_startwrite(vnode_t vp)
497 {
498 OSAddAtomic(1, &vp->v_numoutput);
499 }
500
501
502 void
vnode_writedone(vnode_t vp)503 vnode_writedone(vnode_t vp)
504 {
505 if (vp) {
506 int need_wakeup = 0;
507
508 OSAddAtomic(-1, &vp->v_numoutput);
509
510 vnode_lock_spin(vp);
511
512 if (vp->v_numoutput < 0) {
513 panic("vnode_writedone: numoutput < 0");
514 }
515
516 if ((vp->v_flag & VTHROTTLED)) {
517 vp->v_flag &= ~VTHROTTLED;
518 need_wakeup = 1;
519 }
520 if ((vp->v_flag & VBWAIT) && (vp->v_numoutput == 0)) {
521 vp->v_flag &= ~VBWAIT;
522 need_wakeup = 1;
523 }
524 vnode_unlock(vp);
525
526 if (need_wakeup) {
527 wakeup((caddr_t)&vp->v_numoutput);
528 }
529 }
530 }
531
532
533
534 int
vnode_hasdirtyblks(vnode_t vp)535 vnode_hasdirtyblks(vnode_t vp)
536 {
537 struct cl_writebehind *wbp;
538
539 /*
540 * Not taking the buf_mtx as there is little
541 * point doing it. Even if the lock is taken the
542 * state can change right after that. If their
543 * needs to be a synchronization, it must be driven
544 * by the caller
545 */
546 if (vp->v_dirtyblkhd.lh_first) {
547 return 1;
548 }
549
550 if (!UBCINFOEXISTS(vp)) {
551 return 0;
552 }
553
554 wbp = vp->v_ubcinfo->cl_wbehind;
555
556 if (wbp && (wbp->cl_number || wbp->cl_scmap)) {
557 return 1;
558 }
559
560 return 0;
561 }
562
563 int
vnode_hascleanblks(vnode_t vp)564 vnode_hascleanblks(vnode_t vp)
565 {
566 /*
567 * Not taking the buf_mtx as there is little
568 * point doing it. Even if the lock is taken the
569 * state can change right after that. If their
570 * needs to be a synchronization, it must be driven
571 * by the caller
572 */
573 if (vp->v_cleanblkhd.lh_first) {
574 return 1;
575 }
576 return 0;
577 }
578
579 void
vnode_iterate_setup(mount_t mp)580 vnode_iterate_setup(mount_t mp)
581 {
582 mp->mnt_lflag |= MNT_LITER;
583 }
584
585 int
vnode_umount_preflight(mount_t mp,vnode_t skipvp,int flags)586 vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags)
587 {
588 vnode_t vp;
589 int ret = 0;
590
591 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
592 if (vp->v_type == VDIR) {
593 continue;
594 }
595 if (vp == skipvp) {
596 continue;
597 }
598 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || (vp->v_flag & VNOFLUSH))) {
599 continue;
600 }
601 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
602 continue;
603 }
604 if ((flags & WRITECLOSE) && (vp->v_writecount == 0 || vp->v_type != VREG)) {
605 continue;
606 }
607
608 /* Look for busy vnode */
609 if ((vp->v_usecount != 0) && ((vp->v_usecount - vp->v_kusecount) != 0)) {
610 ret = 1;
611 if (print_busy_vnodes && ((flags & FORCECLOSE) == 0)) {
612 vprint_path("vnode_umount_preflight - busy vnode", vp);
613 } else {
614 return ret;
615 }
616 } else if (vp->v_iocount > 0) {
617 /* Busy if iocount is > 0 for more than 3 seconds */
618 tsleep(&vp->v_iocount, PVFS, "vnode_drain_network", 3 * hz);
619 if (vp->v_iocount > 0) {
620 ret = 1;
621 if (print_busy_vnodes && ((flags & FORCECLOSE) == 0)) {
622 vprint_path("vnode_umount_preflight - busy vnode", vp);
623 } else {
624 return ret;
625 }
626 }
627 continue;
628 }
629 }
630
631 return ret;
632 }
633
634 /*
635 * This routine prepares iteration by moving all the vnodes to worker queue
636 * called with mount lock held
637 */
638 int
vnode_iterate_prepare(mount_t mp)639 vnode_iterate_prepare(mount_t mp)
640 {
641 vnode_t vp;
642
643 if (TAILQ_EMPTY(&mp->mnt_vnodelist)) {
644 /* nothing to do */
645 return 0;
646 }
647
648 vp = TAILQ_FIRST(&mp->mnt_vnodelist);
649 vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first);
650 mp->mnt_workerqueue.tqh_first = mp->mnt_vnodelist.tqh_first;
651 mp->mnt_workerqueue.tqh_last = mp->mnt_vnodelist.tqh_last;
652
653 TAILQ_INIT(&mp->mnt_vnodelist);
654 if (mp->mnt_newvnodes.tqh_first != NULL) {
655 panic("vnode_iterate_prepare: newvnode when entering vnode");
656 }
657 TAILQ_INIT(&mp->mnt_newvnodes);
658
659 return 1;
660 }
661
662
663 /* called with mount lock held */
664 int
vnode_iterate_reloadq(mount_t mp)665 vnode_iterate_reloadq(mount_t mp)
666 {
667 int moved = 0;
668
669 /* add the remaining entries in workerq to the end of mount vnode list */
670 if (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
671 struct vnode * mvp;
672 mvp = TAILQ_LAST(&mp->mnt_vnodelist, vnodelst);
673
674 /* Joining the workerque entities to mount vnode list */
675 if (mvp) {
676 mvp->v_mntvnodes.tqe_next = mp->mnt_workerqueue.tqh_first;
677 } else {
678 mp->mnt_vnodelist.tqh_first = mp->mnt_workerqueue.tqh_first;
679 }
680 mp->mnt_workerqueue.tqh_first->v_mntvnodes.tqe_prev = mp->mnt_vnodelist.tqh_last;
681 mp->mnt_vnodelist.tqh_last = mp->mnt_workerqueue.tqh_last;
682 TAILQ_INIT(&mp->mnt_workerqueue);
683 }
684
685 /* add the newvnodes to the head of mount vnode list */
686 if (!TAILQ_EMPTY(&mp->mnt_newvnodes)) {
687 struct vnode * nlvp;
688 nlvp = TAILQ_LAST(&mp->mnt_newvnodes, vnodelst);
689
690 mp->mnt_newvnodes.tqh_first->v_mntvnodes.tqe_prev = &mp->mnt_vnodelist.tqh_first;
691 nlvp->v_mntvnodes.tqe_next = mp->mnt_vnodelist.tqh_first;
692 if (mp->mnt_vnodelist.tqh_first) {
693 mp->mnt_vnodelist.tqh_first->v_mntvnodes.tqe_prev = &nlvp->v_mntvnodes.tqe_next;
694 } else {
695 mp->mnt_vnodelist.tqh_last = mp->mnt_newvnodes.tqh_last;
696 }
697 mp->mnt_vnodelist.tqh_first = mp->mnt_newvnodes.tqh_first;
698 TAILQ_INIT(&mp->mnt_newvnodes);
699 moved = 1;
700 }
701
702 return moved;
703 }
704
705
706 void
vnode_iterate_clear(mount_t mp)707 vnode_iterate_clear(mount_t mp)
708 {
709 mp->mnt_lflag &= ~MNT_LITER;
710 }
711
712 #if defined(__x86_64__)
713
714 #include <i386/panic_hooks.h>
715
716 struct vnode_iterate_panic_hook {
717 panic_hook_t hook;
718 mount_t mp;
719 struct vnode *vp;
720 };
721
722 static void
vnode_iterate_panic_hook(panic_hook_t * hook_)723 vnode_iterate_panic_hook(panic_hook_t *hook_)
724 {
725 struct vnode_iterate_panic_hook *hook = (struct vnode_iterate_panic_hook *)hook_;
726 panic_phys_range_t range;
727 uint64_t phys;
728
729 if (panic_phys_range_before(hook->mp, &phys, &range)) {
730 paniclog_append_noflush("mp = %p, phys = %p, prev (%p: %p-%p)\n",
731 hook->mp, phys, range.type, range.phys_start,
732 range.phys_start + range.len);
733 } else {
734 paniclog_append_noflush("mp = %p, phys = %p, prev (!)\n", hook->mp, phys);
735 }
736
737 if (panic_phys_range_before(hook->vp, &phys, &range)) {
738 paniclog_append_noflush("vp = %p, phys = %p, prev (%p: %p-%p)\n",
739 hook->vp, phys, range.type, range.phys_start,
740 range.phys_start + range.len);
741 } else {
742 paniclog_append_noflush("vp = %p, phys = %p, prev (!)\n", hook->vp, phys);
743 }
744 panic_dump_mem((void *)(((vm_offset_t)hook->mp - 4096) & ~4095), 12288);
745 }
746 #endif /* defined(__x86_64__) */
747
748 int
vnode_iterate(mount_t mp,int flags,int (* callout)(struct vnode *,void *),void * arg)749 vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *),
750 void *arg)
751 {
752 struct vnode *vp;
753 int vid, retval;
754 int ret = 0;
755
756 /*
757 * The mount iterate mutex is held for the duration of the iteration.
758 * This can be done by a state flag on the mount structure but we can
759 * run into priority inversion issues sometimes.
760 * Using a mutex allows us to benefit from the priority donation
761 * mechanisms in the kernel for locks. This mutex should never be
762 * acquired in spin mode and it should be acquired before attempting to
763 * acquire the mount lock.
764 */
765 mount_iterate_lock(mp);
766
767 mount_lock(mp);
768
769 vnode_iterate_setup(mp);
770
771 /* If it returns 0 then there is nothing to do */
772 retval = vnode_iterate_prepare(mp);
773
774 if (retval == 0) {
775 vnode_iterate_clear(mp);
776 mount_unlock(mp);
777 mount_iterate_unlock(mp);
778 return ret;
779 }
780
781 #if defined(__x86_64__)
782 struct vnode_iterate_panic_hook hook;
783 hook.mp = mp;
784 hook.vp = NULL;
785 panic_hook(&hook.hook, vnode_iterate_panic_hook);
786 #endif
787 /* iterate over all the vnodes */
788 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
789 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
790 #if defined(__x86_64__)
791 hook.vp = vp;
792 #endif
793 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
794 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
795 vid = vp->v_id;
796 if ((vp->v_data == NULL) || (vp->v_type == VNON) || (vp->v_mount != mp)) {
797 continue;
798 }
799 vnode_hold(vp);
800 mount_unlock(mp);
801
802 if (vget_internal(vp, vid, (flags | VNODE_NODEAD | VNODE_WITHID | VNODE_NOSUSPEND))) {
803 mount_lock(mp);
804 vnode_drop(vp);
805 continue;
806 }
807 vnode_drop(vp);
808 if (flags & VNODE_RELOAD) {
809 /*
810 * we're reloading the filesystem
811 * cast out any inactive vnodes...
812 */
813 if (vnode_reload(vp)) {
814 /* vnode will be recycled on the refcount drop */
815 vnode_put(vp);
816 mount_lock(mp);
817 continue;
818 }
819 }
820
821 retval = callout(vp, arg);
822
823 switch (retval) {
824 case VNODE_RETURNED:
825 case VNODE_RETURNED_DONE:
826 vnode_put(vp);
827 if (retval == VNODE_RETURNED_DONE) {
828 mount_lock(mp);
829 ret = 0;
830 goto out;
831 }
832 break;
833
834 case VNODE_CLAIMED_DONE:
835 mount_lock(mp);
836 ret = 0;
837 goto out;
838 case VNODE_CLAIMED:
839 default:
840 break;
841 }
842 mount_lock(mp);
843 }
844
845 out:
846 #if defined(__x86_64__)
847 panic_unhook(&hook.hook);
848 #endif
849 (void)vnode_iterate_reloadq(mp);
850 vnode_iterate_clear(mp);
851 mount_unlock(mp);
852 mount_iterate_unlock(mp);
853 return ret;
854 }
855
856 void
mount_lock_renames(mount_t mp)857 mount_lock_renames(mount_t mp)
858 {
859 lck_mtx_lock(&mp->mnt_renamelock);
860 }
861
862 void
mount_unlock_renames(mount_t mp)863 mount_unlock_renames(mount_t mp)
864 {
865 lck_mtx_unlock(&mp->mnt_renamelock);
866 }
867
868 void
mount_iterate_lock(mount_t mp)869 mount_iterate_lock(mount_t mp)
870 {
871 lck_mtx_lock(&mp->mnt_iter_lock);
872 }
873
874 void
mount_iterate_unlock(mount_t mp)875 mount_iterate_unlock(mount_t mp)
876 {
877 lck_mtx_unlock(&mp->mnt_iter_lock);
878 }
879
880 void
mount_lock(mount_t mp)881 mount_lock(mount_t mp)
882 {
883 lck_mtx_lock(&mp->mnt_mlock);
884 }
885
886 void
mount_lock_spin(mount_t mp)887 mount_lock_spin(mount_t mp)
888 {
889 lck_mtx_lock_spin(&mp->mnt_mlock);
890 }
891
892 void
mount_unlock(mount_t mp)893 mount_unlock(mount_t mp)
894 {
895 lck_mtx_unlock(&mp->mnt_mlock);
896 }
897
898
899 void
mount_ref(mount_t mp,int locked)900 mount_ref(mount_t mp, int locked)
901 {
902 if (!locked) {
903 mount_lock_spin(mp);
904 }
905
906 mp->mnt_count++;
907
908 if (!locked) {
909 mount_unlock(mp);
910 }
911 }
912
913
914 void
mount_drop(mount_t mp,int locked)915 mount_drop(mount_t mp, int locked)
916 {
917 if (!locked) {
918 mount_lock_spin(mp);
919 }
920
921 mp->mnt_count--;
922
923 if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN)) {
924 wakeup(&mp->mnt_lflag);
925 }
926
927 if (!locked) {
928 mount_unlock(mp);
929 }
930 }
931
932
933 int
mount_iterref(mount_t mp,int locked)934 mount_iterref(mount_t mp, int locked)
935 {
936 int retval = 0;
937
938 if (!locked) {
939 mount_list_lock();
940 }
941 if (mp->mnt_iterref < 0) {
942 retval = 1;
943 } else {
944 mp->mnt_iterref++;
945 }
946 if (!locked) {
947 mount_list_unlock();
948 }
949 return retval;
950 }
951
952 int
mount_isdrained(mount_t mp,int locked)953 mount_isdrained(mount_t mp, int locked)
954 {
955 int retval;
956
957 if (!locked) {
958 mount_list_lock();
959 }
960 if (mp->mnt_iterref < 0) {
961 retval = 1;
962 } else {
963 retval = 0;
964 }
965 if (!locked) {
966 mount_list_unlock();
967 }
968 return retval;
969 }
970
971 void
mount_iterdrop(mount_t mp)972 mount_iterdrop(mount_t mp)
973 {
974 mount_list_lock();
975 mp->mnt_iterref--;
976 wakeup(&mp->mnt_iterref);
977 mount_list_unlock();
978 }
979
980 void
mount_iterdrain(mount_t mp)981 mount_iterdrain(mount_t mp)
982 {
983 mount_list_lock();
984 while (mp->mnt_iterref) {
985 msleep((caddr_t)&mp->mnt_iterref, &mnt_list_mtx_lock, PVFS, "mount_iterdrain", NULL);
986 }
987 /* mount iterations drained */
988 mp->mnt_iterref = -1;
989 mount_list_unlock();
990 }
991 void
mount_iterreset(mount_t mp)992 mount_iterreset(mount_t mp)
993 {
994 mount_list_lock();
995 if (mp->mnt_iterref == -1) {
996 mp->mnt_iterref = 0;
997 }
998 mount_list_unlock();
999 }
1000
1001 /* always called with mount lock held */
1002 int
mount_refdrain(mount_t mp)1003 mount_refdrain(mount_t mp)
1004 {
1005 if (mp->mnt_lflag & MNT_LDRAIN) {
1006 panic("already in drain");
1007 }
1008 mp->mnt_lflag |= MNT_LDRAIN;
1009
1010 while (mp->mnt_count) {
1011 msleep((caddr_t)&mp->mnt_lflag, &mp->mnt_mlock, PVFS, "mount_drain", NULL);
1012 }
1013
1014 if (mp->mnt_vnodelist.tqh_first != NULL) {
1015 panic("mount_refdrain: dangling vnode");
1016 }
1017
1018 mp->mnt_lflag &= ~MNT_LDRAIN;
1019
1020 return 0;
1021 }
1022
1023 /* Tags the mount point as not supportine extended readdir for NFS exports */
1024 void
mount_set_noreaddirext(mount_t mp)1025 mount_set_noreaddirext(mount_t mp)
1026 {
1027 mount_lock(mp);
1028 mp->mnt_kern_flag |= MNTK_DENY_READDIREXT;
1029 mount_unlock(mp);
1030 }
1031
1032 /*
1033 * Mark a mount point as busy. Used to synchronize access and to delay
1034 * unmounting.
1035 */
1036 int
vfs_busy(mount_t mp,int flags)1037 vfs_busy(mount_t mp, int flags)
1038 {
1039 restart:
1040 if (mp->mnt_lflag & MNT_LDEAD) {
1041 return ENOENT;
1042 }
1043
1044 mount_lock(mp);
1045
1046 if (mp->mnt_lflag & MNT_LUNMOUNT) {
1047 if (flags & LK_NOWAIT || mp->mnt_lflag & MNT_LDEAD) {
1048 mount_unlock(mp);
1049 return ENOENT;
1050 }
1051
1052 /*
1053 * Since all busy locks are shared except the exclusive
1054 * lock granted when unmounting, the only place that a
1055 * wakeup needs to be done is at the release of the
1056 * exclusive lock at the end of dounmount.
1057 */
1058 mp->mnt_lflag |= MNT_LWAIT;
1059 msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL);
1060 return ENOENT;
1061 }
1062
1063 mount_unlock(mp);
1064
1065 lck_rw_lock_shared(&mp->mnt_rwlock);
1066
1067 /*
1068 * Until we are granted the rwlock, it's possible for the mount point to
1069 * change state, so re-evaluate before granting the vfs_busy.
1070 */
1071 if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
1072 lck_rw_done(&mp->mnt_rwlock);
1073 goto restart;
1074 }
1075 return 0;
1076 }
1077
1078 /*
1079 * Free a busy filesystem.
1080 */
1081 void
vfs_unbusy(mount_t mp)1082 vfs_unbusy(mount_t mp)
1083 {
1084 lck_rw_done(&mp->mnt_rwlock);
1085 }
1086
1087
1088
1089 static void
vfs_rootmountfailed(mount_t mp)1090 vfs_rootmountfailed(mount_t mp)
1091 {
1092 mount_list_lock();
1093 mp->mnt_vtable->vfc_refcount--;
1094 mount_list_unlock();
1095
1096 vfs_unbusy(mp);
1097
1098 if (nc_smr_enabled) {
1099 vfs_smr_synchronize();
1100 }
1101
1102 mount_lock_destroy(mp);
1103
1104 #if CONFIG_MACF
1105 mac_mount_label_destroy(mp);
1106 #endif
1107
1108 zfree(mount_zone, mp);
1109 }
1110
1111 /*
1112 * Lookup a filesystem type, and if found allocate and initialize
1113 * a mount structure for it.
1114 *
1115 * Devname is usually updated by mount(8) after booting.
1116 */
1117 static mount_t
vfs_rootmountalloc_internal(struct vfstable * vfsp,const char * devname)1118 vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname)
1119 {
1120 mount_t mp;
1121
1122 mp = zalloc_flags(mount_zone, Z_WAITOK | Z_ZERO);
1123 /* Initialize the default IO constraints */
1124 mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
1125 mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
1126 mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
1127 mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
1128 mp->mnt_devblocksize = DEV_BSIZE;
1129 mp->mnt_alignmentmask = PAGE_MASK;
1130 mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
1131 mp->mnt_ioscale = 1;
1132 mp->mnt_ioflags = 0;
1133 mp->mnt_realrootvp = NULLVP;
1134 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
1135 mp->mnt_throttle_mask = LOWPRI_MAX_NUM_DEV - 1;
1136 mp->mnt_devbsdunit = 0;
1137
1138 mount_lock_init(mp);
1139 (void)vfs_busy(mp, LK_NOWAIT);
1140
1141 TAILQ_INIT(&mp->mnt_vnodelist);
1142 TAILQ_INIT(&mp->mnt_workerqueue);
1143 TAILQ_INIT(&mp->mnt_newvnodes);
1144
1145 mp->mnt_vtable = vfsp;
1146 mp->mnt_op = vfsp->vfc_vfsops;
1147 mp->mnt_flag = MNT_RDONLY | MNT_ROOTFS;
1148 mp->mnt_vnodecovered = NULLVP;
1149 //mp->mnt_stat.f_type = vfsp->vfc_typenum;
1150 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
1151
1152 mount_list_lock();
1153 vfsp->vfc_refcount++;
1154 mount_list_unlock();
1155
1156 strlcpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
1157 mp->mnt_vfsstat.f_mntonname[0] = '/';
1158 /* XXX const poisoning layering violation */
1159 (void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL);
1160
1161 #if CONFIG_MACF
1162 mac_mount_label_init(mp);
1163 mac_mount_label_associate(vfs_context_kernel(), mp);
1164 #endif
1165 return mp;
1166 }
1167
1168 errno_t
vfs_rootmountalloc(const char * fstypename,const char * devname,mount_t * mpp)1169 vfs_rootmountalloc(const char *fstypename, const char *devname, mount_t *mpp)
1170 {
1171 struct vfstable *vfsp;
1172
1173 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1174 if (!strncmp(vfsp->vfc_name, fstypename,
1175 sizeof(vfsp->vfc_name))) {
1176 break;
1177 }
1178 }
1179 if (vfsp == NULL) {
1180 return ENODEV;
1181 }
1182
1183 *mpp = vfs_rootmountalloc_internal(vfsp, devname);
1184
1185 if (*mpp) {
1186 return 0;
1187 }
1188
1189 return ENOMEM;
1190 }
1191
1192 #define DBG_MOUNTROOT (FSDBG_CODE(DBG_MOUNT, 0))
1193
1194 /*
1195 * Find an appropriate filesystem to use for the root. If a filesystem
1196 * has not been preselected, walk through the list of known filesystems
1197 * trying those that have mountroot routines, and try them until one
1198 * works or we have tried them all.
1199 */
1200 extern int (*mountroot)(void);
1201
1202 int
vfs_mountroot(void)1203 vfs_mountroot(void)
1204 {
1205 #if CONFIG_MACF
1206 struct vnode *vp;
1207 #endif
1208 struct vfstable *vfsp;
1209 vfs_context_t ctx = vfs_context_kernel();
1210 struct vfs_attr vfsattr;
1211 int error;
1212 mount_t mp;
1213 vnode_t bdevvp_rootvp;
1214
1215 /*
1216 * Reset any prior "unmounting everything" state. This handles the
1217 * situation where mount root and then unmountall and re-mountroot
1218 * a new image (see bsd/kern/imageboot.c).
1219 */
1220 vfs_unmountall_started = vfs_unmountall_finished = 0;
1221 OSMemoryBarrier();
1222
1223 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_START);
1224 if (mountroot != NULL) {
1225 /*
1226 * used for netboot which follows a different set of rules
1227 */
1228 error = (*mountroot)();
1229
1230 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 0);
1231 return error;
1232 }
1233 if ((error = bdevvp(rootdev, &rootvp))) {
1234 printf("vfs_mountroot: can't setup bdevvp\n");
1235
1236 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 1);
1237 return error;
1238 }
1239 /*
1240 * 4951998 - code we call in vfc_mountroot may replace rootvp
1241 * so keep a local copy for some house keeping.
1242 */
1243 bdevvp_rootvp = rootvp;
1244
1245 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1246 if (vfsp->vfc_mountroot == NULL
1247 && !ISSET(vfsp->vfc_vfsflags, VFC_VFSCANMOUNTROOT)) {
1248 continue;
1249 }
1250
1251 mp = vfs_rootmountalloc_internal(vfsp, "root_device");
1252 mp->mnt_devvp = rootvp;
1253
1254 if (vfsp->vfc_mountroot) {
1255 error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx);
1256 } else {
1257 error = VFS_MOUNT(mp, rootvp, 0, ctx);
1258 }
1259
1260 if (!error) {
1261 if (bdevvp_rootvp != rootvp) {
1262 /*
1263 * rootvp changed...
1264 * bump the iocount and fix up mnt_devvp for the
1265 * new rootvp (it will already have a usecount taken)...
1266 * drop the iocount and the usecount on the orignal
1267 * since we are no longer going to use it...
1268 */
1269 vnode_getwithref(rootvp);
1270 mp->mnt_devvp = rootvp;
1271
1272 vnode_rele(bdevvp_rootvp);
1273 vnode_put(bdevvp_rootvp);
1274 }
1275 mp->mnt_devvp->v_specflags |= SI_MOUNTEDON;
1276
1277 vfs_unbusy(mp);
1278
1279 mount_list_add(mp);
1280
1281 /*
1282 * cache the IO attributes for the underlying physical media...
1283 * an error return indicates the underlying driver doesn't
1284 * support all the queries necessary... however, reasonable
1285 * defaults will have been set, so no reason to bail or care
1286 */
1287 vfs_init_io_attributes(rootvp, mp);
1288
1289 if (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) {
1290 root_is_CF_drive = TRUE;
1291 }
1292
1293 /*
1294 * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
1295 */
1296 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR) {
1297 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1298 }
1299 if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSPREFLIGHT) {
1300 mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
1301 }
1302
1303 #if defined(XNU_TARGET_OS_OSX)
1304 uint32_t speed;
1305
1306 if (MNTK_VIRTUALDEV & mp->mnt_kern_flag) {
1307 speed = 128;
1308 } else if (disk_conditioner_mount_is_ssd(mp)) {
1309 speed = 7 * 256;
1310 } else {
1311 speed = 256;
1312 }
1313 vc_progress_setdiskspeed(speed);
1314 #endif /* XNU_TARGET_OS_OSX */
1315 /*
1316 * Probe root file system for additional features.
1317 */
1318 (void)VFS_START(mp, 0, ctx);
1319
1320 VFSATTR_INIT(&vfsattr);
1321 VFSATTR_WANTED(&vfsattr, f_capabilities);
1322 if (vfs_getattr(mp, &vfsattr, ctx) == 0 &&
1323 VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) {
1324 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) &&
1325 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) {
1326 mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1327 }
1328 #if NAMEDSTREAMS
1329 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS) &&
1330 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS)) {
1331 mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1332 }
1333 #endif
1334 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID) &&
1335 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID)) {
1336 mp->mnt_kern_flag |= MNTK_PATH_FROM_ID;
1337 }
1338
1339 if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS) &&
1340 (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS)) {
1341 mp->mnt_kern_flag |= MNTK_DIR_HARDLINKS;
1342 }
1343 }
1344
1345 /*
1346 * get rid of iocount reference returned
1347 * by bdevvp (or picked up by us on the substitued
1348 * rootvp)... it (or we) will have also taken
1349 * a usecount reference which we want to keep
1350 */
1351 vnode_put(rootvp);
1352
1353 #if CONFIG_MACF
1354 if ((vfs_flags(mp) & MNT_MULTILABEL) == 0) {
1355 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 2);
1356 return 0;
1357 }
1358
1359 error = VFS_ROOT(mp, &vp, ctx);
1360 if (error) {
1361 printf("%s() VFS_ROOT() returned %d\n",
1362 __func__, error);
1363 dounmount(mp, MNT_FORCE, 0, ctx);
1364 goto fail;
1365 }
1366 error = vnode_label(mp, NULL, vp, NULL, 0, ctx);
1367 /*
1368 * get rid of reference provided by VFS_ROOT
1369 */
1370 vnode_put(vp);
1371
1372 if (error) {
1373 printf("%s() vnode_label() returned %d\n",
1374 __func__, error);
1375 dounmount(mp, MNT_FORCE, 0, ctx);
1376 goto fail;
1377 }
1378 #endif
1379 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 3);
1380 return 0;
1381 }
1382 vfs_rootmountfailed(mp);
1383 #if CONFIG_MACF
1384 fail:
1385 #endif
1386 if (error != EINVAL) {
1387 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
1388 }
1389 }
1390 KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error ? error : ENODEV, 4);
1391 return ENODEV;
1392 }
1393
1394 static int
cache_purge_callback(mount_t mp,__unused void * arg)1395 cache_purge_callback(mount_t mp, __unused void * arg)
1396 {
1397 cache_purgevfs(mp);
1398 return VFS_RETURNED;
1399 }
1400
1401 extern lck_rw_t rootvnode_rw_lock;
1402 extern void set_rootvnode(vnode_t);
1403
1404
1405 static int
mntonname_fixup_callback(mount_t mp,__unused void * arg)1406 mntonname_fixup_callback(mount_t mp, __unused void *arg)
1407 {
1408 int error = 0;
1409
1410 if ((strncmp(&mp->mnt_vfsstat.f_mntonname[0], "/", sizeof("/")) == 0) ||
1411 (strncmp(&mp->mnt_vfsstat.f_mntonname[0], "/dev", sizeof("/dev")) == 0)) {
1412 return 0;
1413 }
1414
1415 if ((error = vfs_busy(mp, LK_NOWAIT))) {
1416 printf("vfs_busy failed with %d for %s\n", error, mp->mnt_vfsstat.f_mntonname);
1417 return -1;
1418 }
1419
1420 size_t pathlen = MAXPATHLEN;
1421 if ((error = vn_getpath_ext(mp->mnt_vnodecovered, NULL, mp->mnt_vfsstat.f_mntonname, &pathlen, VN_GETPATH_FSENTER))) {
1422 printf("vn_getpath_ext failed with %d for mnt_vnodecovered of %s\n", error, mp->mnt_vfsstat.f_mntonname);
1423 }
1424
1425 vfs_unbusy(mp);
1426
1427 return error;
1428 }
1429
1430 static int
clear_mntk_backs_root_callback(mount_t mp,__unused void * arg)1431 clear_mntk_backs_root_callback(mount_t mp, __unused void *arg)
1432 {
1433 lck_rw_lock_exclusive(&mp->mnt_rwlock);
1434 mp->mnt_kern_flag &= ~MNTK_BACKS_ROOT;
1435 lck_rw_done(&mp->mnt_rwlock);
1436 return VFS_RETURNED;
1437 }
1438
1439 static int
verify_incoming_rootfs(vnode_t * incoming_rootvnodep,vfs_context_t ctx,vfs_switch_root_flags_t flags)1440 verify_incoming_rootfs(vnode_t *incoming_rootvnodep, vfs_context_t ctx,
1441 vfs_switch_root_flags_t flags)
1442 {
1443 mount_t mp;
1444 vnode_t tdp;
1445 vnode_t incoming_rootvnode_with_iocount = *incoming_rootvnodep;
1446 vnode_t incoming_rootvnode_with_usecount = NULLVP;
1447 int error = 0;
1448
1449 if (vnode_vtype(incoming_rootvnode_with_iocount) != VDIR) {
1450 printf("Incoming rootfs path not a directory\n");
1451 error = ENOTDIR;
1452 goto done;
1453 }
1454
1455 /*
1456 * Before we call VFS_ROOT, we have to let go of the iocount already
1457 * acquired, but before doing that get a usecount.
1458 */
1459 vnode_ref_ext(incoming_rootvnode_with_iocount, 0, VNODE_REF_FORCE);
1460 incoming_rootvnode_with_usecount = incoming_rootvnode_with_iocount;
1461 vnode_lock_spin(incoming_rootvnode_with_usecount);
1462 if ((mp = incoming_rootvnode_with_usecount->v_mount)) {
1463 mp->mnt_crossref++;
1464 vnode_unlock(incoming_rootvnode_with_usecount);
1465 } else {
1466 vnode_unlock(incoming_rootvnode_with_usecount);
1467 printf("Incoming rootfs root vnode does not have associated mount\n");
1468 error = ENOTDIR;
1469 goto done;
1470 }
1471
1472 if (vfs_busy(mp, LK_NOWAIT)) {
1473 printf("Incoming rootfs root vnode mount is busy\n");
1474 error = ENOENT;
1475 goto out;
1476 }
1477
1478 vnode_put(incoming_rootvnode_with_iocount);
1479 incoming_rootvnode_with_iocount = NULLVP;
1480
1481 error = VFS_ROOT(mp, &tdp, ctx);
1482
1483 if (error) {
1484 printf("Could not get rootvnode of incoming rootfs\n");
1485 } else if (tdp != incoming_rootvnode_with_usecount) {
1486 vnode_put(tdp);
1487 tdp = NULLVP;
1488 printf("Incoming rootfs root vnode mount is is not a mountpoint\n");
1489 error = EINVAL;
1490 goto out_busy;
1491 } else {
1492 incoming_rootvnode_with_iocount = tdp;
1493 tdp = NULLVP;
1494 }
1495
1496 if ((flags & VFSSR_VIRTUALDEV_PROHIBITED) != 0) {
1497 if (mp->mnt_kern_flag & MNTK_VIRTUALDEV) {
1498 error = ENODEV;
1499 }
1500 if (error) {
1501 printf("Incoming rootfs is backed by a virtual device; cannot switch to it");
1502 goto out_busy;
1503 }
1504 }
1505
1506 out_busy:
1507 vfs_unbusy(mp);
1508
1509 out:
1510 vnode_lock(incoming_rootvnode_with_usecount);
1511 mp->mnt_crossref--;
1512 if (mp->mnt_crossref < 0) {
1513 panic("mount cross refs -ve");
1514 }
1515 vnode_unlock(incoming_rootvnode_with_usecount);
1516
1517 done:
1518 if (incoming_rootvnode_with_usecount) {
1519 vnode_rele(incoming_rootvnode_with_usecount);
1520 incoming_rootvnode_with_usecount = NULLVP;
1521 }
1522
1523 if (error && incoming_rootvnode_with_iocount) {
1524 vnode_put(incoming_rootvnode_with_iocount);
1525 incoming_rootvnode_with_iocount = NULLVP;
1526 }
1527
1528 *incoming_rootvnodep = incoming_rootvnode_with_iocount;
1529 return error;
1530 }
1531
1532 /*
1533 * vfs_switch_root()
1534 *
1535 * Move the current root volume, and put a different volume at the root.
1536 *
1537 * incoming_vol_old_path: This is the path where the incoming root volume
1538 * is mounted when this function begins.
1539 * outgoing_vol_new_path: This is the path where the outgoing root volume
1540 * will be mounted when this function (successfully) ends.
1541 * Note: Do not use a leading slash.
1542 *
1543 * Volumes mounted at several fixed points (including /dev) will be preserved
1544 * at the same absolute path. That means they will move within the folder
1545 * hierarchy during the pivot operation. For example, /dev before the pivot
1546 * will be at /dev after the pivot.
1547 *
1548 * If any filesystem has MNTK_BACKS_ROOT set, it will be cleared. If the
1549 * incoming root volume is actually a disk image backed by some other
1550 * filesystem, it is the caller's responsibility to re-set MNTK_BACKS_ROOT
1551 * as appropriate.
1552 */
1553 int
vfs_switch_root(const char * incoming_vol_old_path,const char * outgoing_vol_new_path,vfs_switch_root_flags_t flags)1554 vfs_switch_root(const char *incoming_vol_old_path,
1555 const char *outgoing_vol_new_path,
1556 vfs_switch_root_flags_t flags)
1557 {
1558 // grumble grumble
1559 #define countof(x) (sizeof(x) / sizeof(x[0]))
1560
1561 struct preserved_mount {
1562 vnode_t pm_rootvnode;
1563 mount_t pm_mount;
1564 vnode_t pm_new_covered_vp;
1565 vnode_t pm_old_covered_vp;
1566 const char *pm_path;
1567 };
1568
1569 vfs_context_t ctx = vfs_context_kernel();
1570 vnode_t incoming_rootvnode = NULLVP;
1571 vnode_t outgoing_vol_new_covered_vp = NULLVP;
1572 vnode_t incoming_vol_old_covered_vp = NULLVP;
1573 mount_t outgoing = NULL;
1574 mount_t incoming = NULL;
1575
1576 struct preserved_mount devfs = { NULLVP, NULL, NULLVP, NULLVP, "dev" };
1577 struct preserved_mount preboot = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Preboot" };
1578 struct preserved_mount recovery = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Recovery" };
1579 struct preserved_mount vm = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/VM" };
1580 struct preserved_mount update = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Update" };
1581 struct preserved_mount iscPreboot = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/iSCPreboot" };
1582 struct preserved_mount hardware = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Hardware" };
1583 struct preserved_mount xarts = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/xarts" };
1584 struct preserved_mount factorylogs = { NULLVP, NULL, NULLVP, NULLVP, "FactoryLogs" };
1585 struct preserved_mount idiags = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Diags" };
1586
1587 struct preserved_mount *preserved[10];
1588 preserved[0] = &devfs;
1589 preserved[1] = &preboot;
1590 preserved[2] = &recovery;
1591 preserved[3] = &vm;
1592 preserved[4] = &update;
1593 preserved[5] = &iscPreboot;
1594 preserved[6] = &hardware;
1595 preserved[7] = &xarts;
1596 preserved[8] = &factorylogs;
1597 preserved[9] = &idiags;
1598
1599 int error;
1600
1601 printf("%s : shuffling mount points : %s <-> / <-> %s\n", __FUNCTION__, incoming_vol_old_path, outgoing_vol_new_path);
1602
1603 if (outgoing_vol_new_path[0] == '/') {
1604 // I should have written this to be more helpful and just advance the pointer forward past the slash
1605 printf("Do not use a leading slash in outgoing_vol_new_path\n");
1606 return EINVAL;
1607 }
1608
1609 // Set incoming_rootvnode.
1610 // Find the vnode representing the mountpoint of the new root
1611 // filesystem. That will be the new root directory.
1612 error = vnode_lookup(incoming_vol_old_path, 0, &incoming_rootvnode, ctx);
1613 if (error) {
1614 printf("Incoming rootfs root vnode not found\n");
1615 error = ENOENT;
1616 goto done;
1617 }
1618
1619 /*
1620 * This function drops the icoount and sets the vnode to NULL on error.
1621 */
1622 error = verify_incoming_rootfs(&incoming_rootvnode, ctx, flags);
1623 if (error) {
1624 goto done;
1625 }
1626
1627 /*
1628 * Set outgoing_vol_new_covered_vp.
1629 * Find the vnode representing the future mountpoint of the old
1630 * root filesystem, inside the directory incoming_rootvnode.
1631 * Right now it's at "/incoming_vol_old_path/outgoing_vol_new_path".
1632 * soon it will become "/oldrootfs_path_after", which will be covered.
1633 */
1634 error = vnode_lookupat(outgoing_vol_new_path, 0, &outgoing_vol_new_covered_vp, ctx, incoming_rootvnode);
1635 if (error) {
1636 printf("Outgoing rootfs path not found, abandoning / switch, error = %d\n", error);
1637 error = ENOENT;
1638 goto done;
1639 }
1640 if (vnode_vtype(outgoing_vol_new_covered_vp) != VDIR) {
1641 printf("Outgoing rootfs path is not a directory, abandoning / switch\n");
1642 error = ENOTDIR;
1643 goto done;
1644 }
1645
1646 /*
1647 * Find the preserved mounts - see if they are mounted. Get their root
1648 * vnode if they are. If they aren't, leave rootvnode NULL which will
1649 * be the signal to ignore this mount later on.
1650 *
1651 * Also get preserved mounts' new_covered_vp.
1652 * Find the node representing the folder "dev" inside the directory newrootvnode.
1653 * Right now it's at "/incoming_vol_old_path/dev".
1654 * Soon it will become /dev, which will be covered by the devfs mountpoint.
1655 */
1656 for (size_t i = 0; i < countof(preserved); i++) {
1657 struct preserved_mount *pmi = preserved[i];
1658
1659 error = vnode_lookupat(pmi->pm_path, 0, &pmi->pm_rootvnode, ctx, rootvnode);
1660 if (error) {
1661 printf("skipping preserved mountpoint because not found or error: %d: %s\n", error, pmi->pm_path);
1662 // not fatal. try the next one in the list.
1663 continue;
1664 }
1665 bool is_mountpoint = false;
1666 vnode_lock_spin(pmi->pm_rootvnode);
1667 if ((pmi->pm_rootvnode->v_flag & VROOT) != 0) {
1668 is_mountpoint = true;
1669 }
1670 vnode_unlock(pmi->pm_rootvnode);
1671 if (!is_mountpoint) {
1672 printf("skipping preserved mountpoint because not a mountpoint: %s\n", pmi->pm_path);
1673 vnode_put(pmi->pm_rootvnode);
1674 pmi->pm_rootvnode = NULLVP;
1675 // not fatal. try the next one in the list.
1676 continue;
1677 }
1678
1679 error = vnode_lookupat(pmi->pm_path, 0, &pmi->pm_new_covered_vp, ctx, incoming_rootvnode);
1680 if (error) {
1681 printf("preserved new mount directory not found or error: %d: %s\n", error, pmi->pm_path);
1682 error = ENOENT;
1683 goto done;
1684 }
1685 if (vnode_vtype(pmi->pm_new_covered_vp) != VDIR) {
1686 printf("preserved new mount directory not directory: %s\n", pmi->pm_path);
1687 error = ENOTDIR;
1688 goto done;
1689 }
1690
1691 printf("will preserve mountpoint across pivot: /%s\n", pmi->pm_path);
1692 }
1693
1694 /*
1695 * --
1696 * At this point, everything has been prepared and all error conditions
1697 * have been checked. We check everything we can before this point;
1698 * from now on we start making destructive changes, and we can't stop
1699 * until we reach the end.
1700 * ----
1701 */
1702
1703 /* this usecount is transferred to the mnt_vnodecovered */
1704 vnode_ref_ext(outgoing_vol_new_covered_vp, 0, VNODE_REF_FORCE);
1705 /* this usecount is transferred to set_rootvnode */
1706 vnode_ref_ext(incoming_rootvnode, 0, VNODE_REF_FORCE);
1707
1708
1709 for (size_t i = 0; i < countof(preserved); i++) {
1710 struct preserved_mount *pmi = preserved[i];
1711 if (pmi->pm_rootvnode == NULLVP) {
1712 continue;
1713 }
1714
1715 /* this usecount is transferred to the mnt_vnodecovered */
1716 vnode_ref_ext(pmi->pm_new_covered_vp, 0, VNODE_REF_FORCE);
1717
1718 /* The new_covered_vp is a mountpoint from now on. */
1719 vnode_lock_spin(pmi->pm_new_covered_vp);
1720 pmi->pm_new_covered_vp->v_flag |= VMOUNTEDHERE;
1721 vnode_unlock(pmi->pm_new_covered_vp);
1722 }
1723
1724 /* The outgoing_vol_new_covered_vp is a mountpoint from now on. */
1725 vnode_lock_spin(outgoing_vol_new_covered_vp);
1726 outgoing_vol_new_covered_vp->v_flag |= VMOUNTEDHERE;
1727 vnode_unlock(outgoing_vol_new_covered_vp);
1728
1729
1730 /*
1731 * Identify the mount_ts of the mounted filesystems that are being
1732 * manipulated: outgoing rootfs, incoming rootfs, and the preserved
1733 * mounts.
1734 */
1735 outgoing = rootvnode->v_mount;
1736 incoming = incoming_rootvnode->v_mount;
1737 for (size_t i = 0; i < countof(preserved); i++) {
1738 struct preserved_mount *pmi = preserved[i];
1739 if (pmi->pm_rootvnode == NULLVP) {
1740 continue;
1741 }
1742
1743 pmi->pm_mount = pmi->pm_rootvnode->v_mount;
1744 }
1745
1746 lck_rw_lock_exclusive(&rootvnode_rw_lock);
1747
1748 /* Setup incoming as the new rootfs */
1749 lck_rw_lock_exclusive(&incoming->mnt_rwlock);
1750 incoming_vol_old_covered_vp = incoming->mnt_vnodecovered;
1751 incoming->mnt_vnodecovered = NULLVP;
1752 strlcpy(incoming->mnt_vfsstat.f_mntonname, "/", MAXPATHLEN);
1753 incoming->mnt_flag |= MNT_ROOTFS;
1754 lck_rw_done(&incoming->mnt_rwlock);
1755
1756 /*
1757 * The preserved mountpoints will now be moved to
1758 * incoming_rootnode/pm_path, and then by the end of the function,
1759 * since incoming_rootnode is going to /, the preserved mounts
1760 * will be end up back at /pm_path
1761 */
1762 for (size_t i = 0; i < countof(preserved); i++) {
1763 struct preserved_mount *pmi = preserved[i];
1764 if (pmi->pm_rootvnode == NULLVP) {
1765 continue;
1766 }
1767
1768 lck_rw_lock_exclusive(&pmi->pm_mount->mnt_rwlock);
1769 pmi->pm_old_covered_vp = pmi->pm_mount->mnt_vnodecovered;
1770 pmi->pm_mount->mnt_vnodecovered = pmi->pm_new_covered_vp;
1771 vnode_lock_spin(pmi->pm_new_covered_vp);
1772 pmi->pm_new_covered_vp->v_mountedhere = pmi->pm_mount;
1773 SET(pmi->pm_new_covered_vp->v_flag, VMOUNTEDHERE);
1774 vnode_unlock(pmi->pm_new_covered_vp);
1775 lck_rw_done(&pmi->pm_mount->mnt_rwlock);
1776 }
1777
1778 /*
1779 * The old root volume now covers outgoing_vol_new_covered_vp
1780 * on the new root volume. Remove the ROOTFS marker.
1781 * Now it is to be found at outgoing_vol_new_path
1782 */
1783 lck_rw_lock_exclusive(&outgoing->mnt_rwlock);
1784 outgoing->mnt_vnodecovered = outgoing_vol_new_covered_vp;
1785 strlcpy(outgoing->mnt_vfsstat.f_mntonname, "/", MAXPATHLEN);
1786 strlcat(outgoing->mnt_vfsstat.f_mntonname, outgoing_vol_new_path, MAXPATHLEN);
1787 outgoing->mnt_flag &= ~MNT_ROOTFS;
1788 vnode_lock_spin(outgoing_vol_new_covered_vp);
1789 outgoing_vol_new_covered_vp->v_mountedhere = outgoing;
1790 vnode_unlock(outgoing_vol_new_covered_vp);
1791 lck_rw_done(&outgoing->mnt_rwlock);
1792
1793 if (!(outgoing->mnt_kern_flag & MNTK_VIRTUALDEV) &&
1794 (TAILQ_FIRST(&mountlist) == outgoing)) {
1795 vfs_setmntsystem(outgoing);
1796 }
1797
1798 /*
1799 * Finally, remove the mount_t linkage from the previously covered
1800 * vnodes on the old root volume. These were incoming_vol_old_path,
1801 * and each preserved mounts's "/pm_path". The filesystems previously
1802 * mounted there have already been moved away.
1803 */
1804 vnode_lock_spin(incoming_vol_old_covered_vp);
1805 incoming_vol_old_covered_vp->v_flag &= ~VMOUNT;
1806 incoming_vol_old_covered_vp->v_mountedhere = NULL;
1807 vnode_unlock(incoming_vol_old_covered_vp);
1808
1809 for (size_t i = 0; i < countof(preserved); i++) {
1810 struct preserved_mount *pmi = preserved[i];
1811 if (pmi->pm_rootvnode == NULLVP) {
1812 continue;
1813 }
1814
1815 vnode_lock_spin(pmi->pm_old_covered_vp);
1816 CLR(pmi->pm_old_covered_vp->v_flag, VMOUNTEDHERE);
1817 pmi->pm_old_covered_vp->v_mountedhere = NULL;
1818 vnode_unlock(pmi->pm_old_covered_vp);
1819 }
1820
1821 /*
1822 * Clear the name cache since many cached names are now invalid.
1823 */
1824 vfs_iterate(0 /* flags */, cache_purge_callback, NULL);
1825
1826 /*
1827 * Actually change the rootvnode! And finally drop the lock that
1828 * prevents concurrent vnode_lookups.
1829 */
1830 set_rootvnode(incoming_rootvnode);
1831 lck_rw_unlock_exclusive(&rootvnode_rw_lock);
1832
1833 if (!(incoming->mnt_kern_flag & MNTK_VIRTUALDEV) &&
1834 !(outgoing->mnt_kern_flag & MNTK_VIRTUALDEV)) {
1835 /*
1836 * Switch the order of mount structures in the mountlist, new root
1837 * mount moves to the head of the list followed by /dev and the other
1838 * preserved mounts then all the preexisting mounts (old rootfs + any
1839 * others)
1840 */
1841 mount_list_lock();
1842 for (size_t i = 0; i < countof(preserved); i++) {
1843 struct preserved_mount *pmi = preserved[i];
1844 if (pmi->pm_rootvnode == NULLVP) {
1845 continue;
1846 }
1847
1848 TAILQ_REMOVE(&mountlist, pmi->pm_mount, mnt_list);
1849 TAILQ_INSERT_HEAD(&mountlist, pmi->pm_mount, mnt_list);
1850 }
1851 TAILQ_REMOVE(&mountlist, incoming, mnt_list);
1852 TAILQ_INSERT_HEAD(&mountlist, incoming, mnt_list);
1853 mount_list_unlock();
1854 }
1855
1856 /*
1857 * Fixups across all volumes
1858 */
1859 vfs_iterate(0 /* flags */, mntonname_fixup_callback, NULL);
1860 vfs_iterate(0 /* flags */, clear_mntk_backs_root_callback, NULL);
1861
1862 error = 0;
1863
1864 done:
1865 for (size_t i = 0; i < countof(preserved); i++) {
1866 struct preserved_mount *pmi = preserved[i];
1867
1868 if (pmi->pm_rootvnode) {
1869 vnode_put(pmi->pm_rootvnode);
1870 }
1871 if (pmi->pm_new_covered_vp) {
1872 vnode_put(pmi->pm_new_covered_vp);
1873 }
1874 if (pmi->pm_old_covered_vp) {
1875 vnode_rele(pmi->pm_old_covered_vp);
1876 }
1877 }
1878
1879 if (outgoing_vol_new_covered_vp) {
1880 vnode_put(outgoing_vol_new_covered_vp);
1881 }
1882
1883 if (incoming_vol_old_covered_vp) {
1884 vnode_rele(incoming_vol_old_covered_vp);
1885 }
1886
1887 if (incoming_rootvnode) {
1888 vnode_put(incoming_rootvnode);
1889 }
1890
1891 printf("%s : done shuffling mount points with error: %d\n", __FUNCTION__, error);
1892 return error;
1893 }
1894
1895 /*
1896 * Mount the Recovery volume of a container
1897 */
1898 int
vfs_mount_recovery(void)1899 vfs_mount_recovery(void)
1900 {
1901 #if CONFIG_MOUNT_PREBOOTRECOVERY
1902 int error = 0;
1903
1904 error = vnode_get(rootvnode);
1905 if (error) {
1906 /* root must be mounted first */
1907 printf("vnode_get(rootvnode) failed with error %d\n", error);
1908 return error;
1909 }
1910
1911 char recoverypath[] = PLATFORM_RECOVERY_VOLUME_MOUNT_POINT; /* !const because of internal casting */
1912
1913 /* Mount the recovery volume */
1914 printf("attempting kernel mount for recovery volume... \n");
1915 error = kernel_mount(rootvnode->v_mount->mnt_vfsstat.f_fstypename, NULLVP, NULLVP,
1916 recoverypath, (rootvnode->v_mount), 0, 0, (KERNEL_MOUNT_RECOVERYVOL), vfs_context_kernel());
1917
1918 if (error) {
1919 printf("Failed to mount recovery volume (%d)\n", error);
1920 } else {
1921 printf("mounted recovery volume\n");
1922 }
1923
1924 vnode_put(rootvnode);
1925 return error;
1926 #else
1927 return 0;
1928 #endif
1929 }
1930
1931 /*
1932 * Lookup a mount point by filesystem identifier.
1933 */
1934
1935 struct mount *
vfs_getvfs(fsid_t * fsid)1936 vfs_getvfs(fsid_t *fsid)
1937 {
1938 return mount_list_lookupby_fsid(fsid, 0, 0);
1939 }
1940
1941 static struct mount *
vfs_getvfs_locked(fsid_t * fsid)1942 vfs_getvfs_locked(fsid_t *fsid)
1943 {
1944 return mount_list_lookupby_fsid(fsid, 1, 0);
1945 }
1946
1947 struct mount *
vfs_getvfs_with_vfsops(fsid_t * fsid,const struct vfsops * const ops)1948 vfs_getvfs_with_vfsops(fsid_t *fsid, const struct vfsops * const ops)
1949 {
1950 mount_t mp = mount_list_lookupby_fsid(fsid, 0, 0);
1951
1952 if (mp != NULL && mp->mnt_op != ops) {
1953 mp = NULL;
1954 }
1955 return mp;
1956 }
1957
1958 struct mount *
vfs_getvfs_by_mntonname(char * path)1959 vfs_getvfs_by_mntonname(char *path)
1960 {
1961 mount_t retmp = (mount_t)0;
1962 mount_t mp;
1963
1964 mount_list_lock();
1965 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1966 if (!strncmp(mp->mnt_vfsstat.f_mntonname, path,
1967 sizeof(mp->mnt_vfsstat.f_mntonname))) {
1968 retmp = mp;
1969 if (mount_iterref(retmp, 1)) {
1970 retmp = NULL;
1971 }
1972 goto out;
1973 }
1974 }
1975 out:
1976 mount_list_unlock();
1977 return retmp;
1978 }
1979
1980 /* generation number for creation of new fsids */
1981 u_short mntid_gen = 0;
1982 /*
1983 * Get a new unique fsid
1984 */
1985 void
vfs_getnewfsid(struct mount * mp)1986 vfs_getnewfsid(struct mount *mp)
1987 {
1988 fsid_t tfsid;
1989 int mtype;
1990
1991 mount_list_lock();
1992
1993 /* generate a new fsid */
1994 mtype = mp->mnt_vtable->vfc_typenum;
1995 if (++mntid_gen == 0) {
1996 mntid_gen++;
1997 }
1998 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1999 tfsid.val[1] = mtype;
2000
2001 while (vfs_getvfs_locked(&tfsid)) {
2002 if (++mntid_gen == 0) {
2003 mntid_gen++;
2004 }
2005 tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
2006 }
2007
2008 mp->mnt_vfsstat.f_fsid.val[0] = tfsid.val[0];
2009 mp->mnt_vfsstat.f_fsid.val[1] = tfsid.val[1];
2010 mount_list_unlock();
2011 }
2012
2013 /*
2014 * Routines having to do with the management of the vnode table.
2015 */
2016 extern int(**dead_vnodeop_p)(void *);
2017 long numvnodes, freevnodes, deadvnodes, async_work_vnodes;
2018 long busyvnodes = 0;
2019 long deadvnodes_noreuse = 0;
2020 int32_t freeablevnodes = 0;
2021 uint64_t allocedvnodes = 0;
2022 uint64_t deallocedvnodes = 0;
2023
2024
2025 int async_work_timed_out = 0;
2026 int async_work_handled = 0;
2027 int dead_vnode_wanted = 0;
2028 int dead_vnode_waited = 0;
2029
2030 /*
2031 * Move a vnode from one mount queue to another.
2032 */
2033 static void
insmntque(vnode_t vp,mount_t mp)2034 insmntque(vnode_t vp, mount_t mp)
2035 {
2036 mount_t lmp;
2037 /*
2038 * Delete from old mount point vnode list, if on one.
2039 */
2040 if ((lmp = vp->v_mount) != NULL && lmp != dead_mountp) {
2041 if ((vp->v_lflag & VNAMED_MOUNT) == 0) {
2042 panic("insmntque: vp not in mount vnode list");
2043 }
2044 vp->v_lflag &= ~VNAMED_MOUNT;
2045
2046 mount_lock_spin(lmp);
2047
2048 mount_drop(lmp, 1);
2049
2050 if (vp->v_mntvnodes.tqe_next == NULL) {
2051 if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp) {
2052 TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes);
2053 } else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp) {
2054 TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes);
2055 } else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp) {
2056 TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes);
2057 }
2058 } else {
2059 vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev;
2060 *vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next;
2061 }
2062 vp->v_mntvnodes.tqe_next = NULL;
2063 vp->v_mntvnodes.tqe_prev = NULL;
2064 mount_unlock(lmp);
2065 vnode_drop(vp);
2066 return;
2067 }
2068
2069 /*
2070 * Insert into list of vnodes for the new mount point, if available.
2071 */
2072 if ((vp->v_mount = mp) != NULL) {
2073 mount_lock_spin(mp);
2074 if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0)) {
2075 panic("vp already in mount list");
2076 }
2077 if (mp->mnt_lflag & MNT_LITER) {
2078 TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes);
2079 } else {
2080 TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
2081 }
2082 if (vp->v_lflag & VNAMED_MOUNT) {
2083 panic("insmntque: vp already in mount vnode list");
2084 }
2085 vnode_hold(vp);
2086 vp->v_lflag |= VNAMED_MOUNT;
2087 mount_ref(mp, 1);
2088 mount_unlock(mp);
2089 }
2090 }
2091
2092
2093 /*
2094 * Create a vnode for a block device.
2095 * Used for root filesystem, argdev, and swap areas.
2096 * Also used for memory file system special devices.
2097 */
2098 int
bdevvp(dev_t dev,vnode_t * vpp)2099 bdevvp(dev_t dev, vnode_t *vpp)
2100 {
2101 vnode_t nvp;
2102 int error;
2103 struct vnode_fsparam vfsp;
2104 struct vfs_context context;
2105
2106 if (dev == NODEV) {
2107 *vpp = NULLVP;
2108 return ENODEV;
2109 }
2110
2111 context.vc_thread = current_thread();
2112 context.vc_ucred = FSCRED;
2113
2114 vfsp.vnfs_mp = (struct mount *)0;
2115 vfsp.vnfs_vtype = VBLK;
2116 vfsp.vnfs_str = "bdevvp";
2117 vfsp.vnfs_dvp = NULL;
2118 vfsp.vnfs_fsnode = NULL;
2119 vfsp.vnfs_cnp = NULL;
2120 vfsp.vnfs_vops = spec_vnodeop_p;
2121 vfsp.vnfs_rdev = dev;
2122 vfsp.vnfs_filesize = 0;
2123
2124 vfsp.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE;
2125
2126 vfsp.vnfs_marksystem = 0;
2127 vfsp.vnfs_markroot = 0;
2128
2129 if ((error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp))) {
2130 *vpp = NULLVP;
2131 return error;
2132 }
2133 vnode_lock_spin(nvp);
2134 nvp->v_flag |= VBDEVVP;
2135 nvp->v_tag = VT_NON; /* set this to VT_NON so during aliasing it can be replaced */
2136 vnode_unlock(nvp);
2137 if ((error = vnode_ref(nvp))) {
2138 panic("bdevvp failed: vnode_ref");
2139 return error;
2140 }
2141 if ((error = VNOP_FSYNC(nvp, MNT_WAIT, &context))) {
2142 panic("bdevvp failed: fsync");
2143 return error;
2144 }
2145 if ((error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0))) {
2146 panic("bdevvp failed: invalidateblks");
2147 return error;
2148 }
2149
2150 #if CONFIG_MACF
2151 /*
2152 * XXXMAC: We can't put a MAC check here, the system will
2153 * panic without this vnode.
2154 */
2155 #endif /* MAC */
2156
2157 if ((error = VNOP_OPEN(nvp, FREAD, &context))) {
2158 panic("bdevvp failed: open");
2159 return error;
2160 }
2161 *vpp = nvp;
2162
2163 return 0;
2164 }
2165
2166 /*
2167 * Check to see if the new vnode represents a special device
2168 * for which we already have a vnode (either because of
2169 * bdevvp() or because of a different vnode representing
2170 * the same block device). If such an alias exists, deallocate
2171 * the existing contents and return the aliased vnode. The
2172 * caller is responsible for filling it with its new contents.
2173 */
2174 static vnode_t
checkalias(struct vnode * nvp,dev_t nvp_rdev)2175 checkalias(struct vnode *nvp, dev_t nvp_rdev)
2176 {
2177 struct vnode *vp;
2178 struct vnode **vpp;
2179 struct specinfo *sin = NULL;
2180 int vid = 0;
2181
2182 vpp = &speclisth[SPECHASH(nvp_rdev)];
2183 loop:
2184 SPECHASH_LOCK();
2185
2186 for (vp = *vpp; vp; vp = vp->v_specnext) {
2187 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
2188 vid = vp->v_id;
2189 vnode_hold(vp);
2190 break;
2191 }
2192 }
2193 SPECHASH_UNLOCK();
2194
2195 if (vp) {
2196 found_alias:
2197 if (vnode_getwithvid(vp, vid)) {
2198 vnode_drop(vp);
2199 goto loop;
2200 }
2201 vnode_drop(vp);
2202 /*
2203 * Termination state is checked in vnode_getwithvid
2204 */
2205 vnode_lock(vp);
2206
2207 /*
2208 * Alias, but not in use, so flush it out.
2209 */
2210 if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
2211 vnode_hold(vp);
2212 vnode_reclaim_internal(vp, 1, 1, 0);
2213 vnode_put_locked(vp);
2214 vnode_drop_and_unlock(vp);
2215 goto loop;
2216 }
2217 }
2218 if (vp == NULL || vp->v_tag != VT_NON) {
2219 if (sin == NULL) {
2220 sin = zalloc_flags(specinfo_zone, Z_WAITOK | Z_ZERO);
2221 } else {
2222 bzero(sin, sizeof(struct specinfo));
2223 }
2224
2225 nvp->v_specinfo = sin;
2226 nvp->v_rdev = nvp_rdev;
2227 nvp->v_specflags = 0;
2228 nvp->v_speclastr = -1;
2229 nvp->v_specinfo->si_opencount = 0;
2230 nvp->v_specinfo->si_initted = 0;
2231 nvp->v_specinfo->si_throttleable = 0;
2232 nvp->v_specinfo->si_devbsdunit = LOWPRI_MAX_NUM_DEV - 1;
2233
2234 SPECHASH_LOCK();
2235
2236 /* We dropped the lock, someone could have added */
2237 if (vp == NULLVP) {
2238 for (vp = *vpp; vp; vp = vp->v_specnext) {
2239 if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
2240 vid = vp->v_id;
2241 vnode_hold(vp);
2242 SPECHASH_UNLOCK();
2243 goto found_alias;
2244 }
2245 }
2246 }
2247
2248 nvp->v_hashchain = vpp;
2249 nvp->v_specnext = *vpp;
2250 *vpp = nvp;
2251
2252 if (vp != NULLVP) {
2253 nvp->v_specflags |= SI_ALIASED;
2254 vp->v_specflags |= SI_ALIASED;
2255 SPECHASH_UNLOCK();
2256 vnode_put_locked(vp);
2257 vnode_unlock(vp);
2258 } else {
2259 SPECHASH_UNLOCK();
2260 }
2261
2262 return NULLVP;
2263 }
2264
2265 if (sin) {
2266 zfree(specinfo_zone, sin);
2267 }
2268
2269 if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0) {
2270 return vp;
2271 }
2272
2273 panic("checkalias with VT_NON vp that shouldn't: %p", vp);
2274
2275 return vp;
2276 }
2277
2278
2279 /*
2280 * Get a reference on a particular vnode and lock it if requested.
2281 * If the vnode was on the inactive list, remove it from the list.
2282 * If the vnode was on the free list, remove it from the list and
2283 * move it to inactive list as needed.
2284 * The vnode lock bit is set if the vnode is being eliminated in
2285 * vgone. The process is awakened when the transition is completed,
2286 * and an error returned to indicate that the vnode is no longer
2287 * usable (possibly having been changed to a new file system type).
2288 */
2289 int
vget_internal(vnode_t vp,int vid,int vflags)2290 vget_internal(vnode_t vp, int vid, int vflags)
2291 {
2292 int error = 0;
2293
2294 vnode_lock_spin(vp);
2295
2296 if ((vflags & VNODE_WITHREF) && (vp->v_usecount == 0) && (vp->v_iocount == 0)) {
2297 panic("Expected to have usecount or iocount on vnode");
2298 }
2299
2300 if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0)) {
2301 /*
2302 * vnode to be returned only if it has writers opened
2303 */
2304 error = EINVAL;
2305 } else {
2306 error = vnode_getiocount(vp, vid, vflags);
2307 }
2308
2309 vnode_unlock(vp);
2310
2311 return error;
2312 }
2313
2314 /*
2315 * Returns: 0 Success
2316 * ENOENT No such file or directory [terminating]
2317 */
2318 int
vnode_ref(vnode_t vp)2319 vnode_ref(vnode_t vp)
2320 {
2321 return vnode_ref_ext(vp, 0, 0);
2322 }
2323
2324 /*
2325 * Returns: 0 Success
2326 * ENOENT No such file or directory [terminating]
2327 */
2328 int
vnode_ref_ext(vnode_t vp,int fmode,int flags)2329 vnode_ref_ext(vnode_t vp, int fmode, int flags)
2330 {
2331 int error = 0;
2332
2333 vnode_lock_spin(vp);
2334
2335 /*
2336 * once all the current call sites have been fixed to insure they have
2337 * taken an iocount, we can toughen this assert up and insist that the
2338 * iocount is non-zero... a non-zero usecount doesn't insure correctness
2339 */
2340 if (vp->v_iocount <= 0 && vp->v_usecount <= 0) {
2341 panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount);
2342 }
2343
2344 /*
2345 * if you are the owner of drain/termination, can acquire usecount
2346 */
2347 if (((flags & VNODE_REF_FORCE) == 0) &&
2348 ((vp->v_lflag & (VL_DRAIN | VL_TERMINATE | VL_DEAD))) &&
2349 !(vp->v_lflag & VL_OPSCHANGE) &&
2350 (vp->v_owner != current_thread())) {
2351 error = ENOENT;
2352 goto out;
2353 }
2354
2355 /* Enable atomic ops on v_usecount without the vnode lock */
2356 os_atomic_inc(&vp->v_usecount, relaxed);
2357
2358 if (fmode & FWRITE) {
2359 if (++vp->v_writecount <= 0) {
2360 panic("vnode_ref_ext: v_writecount");
2361 }
2362 }
2363 if (fmode & O_EVTONLY) {
2364 if (++vp->v_kusecount <= 0) {
2365 panic("vnode_ref_ext: v_kusecount");
2366 }
2367 }
2368 if (vp->v_flag & VRAGE) {
2369 struct uthread *ut;
2370
2371 ut = current_uthread();
2372
2373 if (!(current_proc()->p_lflag & P_LRAGE_VNODES) &&
2374 !(ut->uu_flag & UT_RAGE_VNODES)) {
2375 /*
2376 * a 'normal' process accessed this vnode
2377 * so make sure its no longer marked
2378 * for rapid aging... also, make sure
2379 * it gets removed from the rage list...
2380 * when v_usecount drops back to 0, it
2381 * will be put back on the real free list
2382 */
2383 vp->v_flag &= ~VRAGE;
2384 vp->v_references = 0;
2385 vnode_list_remove(vp);
2386 }
2387 }
2388 if (vp->v_usecount == 1 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
2389 if (vp->v_ubcinfo) {
2390 vnode_lock_convert(vp);
2391 memory_object_mark_used(vp->v_ubcinfo->ui_control);
2392 }
2393 }
2394 out:
2395 vnode_unlock(vp);
2396
2397 return error;
2398 }
2399
2400
2401 boolean_t
vnode_on_reliable_media(vnode_t vp)2402 vnode_on_reliable_media(vnode_t vp)
2403 {
2404 mount_t mp = vp->v_mount;
2405
2406 /*
2407 * A NULL mountpoint would imply it's not attached to a any filesystem.
2408 * This can only happen with a vnode created by bdevvp(). We'll consider
2409 * those as not unreliable as the primary use of this function is determine
2410 * which vnodes are to be handed off to the async cleaner thread for
2411 * reclaim.
2412 */
2413 if (!mp || (!(mp->mnt_kern_flag & MNTK_VIRTUALDEV) && (mp->mnt_flag & MNT_LOCAL))) {
2414 return TRUE;
2415 }
2416
2417 return FALSE;
2418 }
2419
2420 static void
vnode_async_list_add_locked(vnode_t vp)2421 vnode_async_list_add_locked(vnode_t vp)
2422 {
2423 if (VONLIST(vp) || (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
2424 panic("vnode_async_list_add: %p is in wrong state", vp);
2425 }
2426
2427 TAILQ_INSERT_HEAD(&vnode_async_work_list, vp, v_freelist);
2428 vp->v_listflag |= VLIST_ASYNC_WORK;
2429
2430 async_work_vnodes++;
2431 if (!(vp->v_listflag & VLIST_NO_REUSE)) {
2432 reusablevnodes++;
2433 }
2434 if (vp->v_flag & VCANDEALLOC) {
2435 os_atomic_dec(&busyvnodes, relaxed);
2436 }
2437 }
2438
2439 static void
vnode_async_list_add(vnode_t vp)2440 vnode_async_list_add(vnode_t vp)
2441 {
2442 vnode_list_lock();
2443
2444 if (VONLIST(vp)) {
2445 if (!(vp->v_listflag & VLIST_ASYNC_WORK)) {
2446 vnode_list_remove_locked(vp);
2447 vnode_async_list_add_locked(vp);
2448 }
2449 } else {
2450 vnode_async_list_add_locked(vp);
2451 }
2452
2453 vnode_list_unlock();
2454
2455 wakeup(&vnode_async_work_list);
2456 }
2457
2458
2459 /*
2460 * put the vnode on appropriate free list.
2461 * called with vnode LOCKED
2462 */
2463 static void
vnode_list_add(vnode_t vp)2464 vnode_list_add(vnode_t vp)
2465 {
2466 boolean_t need_dead_wakeup = FALSE;
2467 bool no_busy_decrement = false;
2468
2469 #if DIAGNOSTIC
2470 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2471 #endif
2472
2473 again:
2474
2475 /*
2476 * if it is already on a list or non zero references return
2477 */
2478 if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE)) {
2479 return;
2480 }
2481
2482 /*
2483 * In vclean, we might have deferred ditching locked buffers
2484 * because something was still referencing them (indicated by
2485 * usecount). We can ditch them now.
2486 */
2487 if (ISSET(vp->v_lflag, VL_DEAD)
2488 && (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))) {
2489 ++vp->v_iocount; // Probably not necessary, but harmless
2490 #ifdef CONFIG_IOCOUNT_TRACE
2491 record_vp(vp, 1);
2492 #endif
2493 vnode_unlock(vp);
2494 buf_invalidateblks(vp, BUF_INVALIDATE_LOCKED, 0, 0);
2495 vnode_lock(vp);
2496 vnode_dropiocount(vp);
2497 goto again;
2498 }
2499
2500 vnode_list_lock();
2501
2502 if (!(vp->v_lflag & VL_DEAD) && (vp->v_listflag & VLIST_NO_REUSE)) {
2503 if (!(vp->v_listflag & VLIST_ASYNC_WORK)) {
2504 vnode_async_list_add_locked(vp);
2505 }
2506 no_busy_decrement = true;
2507 } else if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
2508 /*
2509 * add the new guy to the appropriate end of the RAGE list
2510 */
2511 if ((vp->v_flag & VAGE)) {
2512 TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist);
2513 } else {
2514 TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist);
2515 }
2516
2517 vp->v_listflag |= VLIST_RAGE;
2518 ragevnodes++;
2519 reusablevnodes++;
2520 wakeup_laundry_thread();
2521
2522 /*
2523 * reset the timestamp for the last inserted vp on the RAGE
2524 * queue to let new_vnode know that its not ok to start stealing
2525 * from this list... as long as we're actively adding to this list
2526 * we'll push out the vnodes we want to donate to the real free list
2527 * once we stop pushing, we'll let some time elapse before we start
2528 * stealing them in the new_vnode routine
2529 */
2530 microuptime(&rage_tv);
2531 } else {
2532 /*
2533 * if VL_DEAD, insert it at head of the dead list
2534 * else insert at tail of LRU list or at head if VAGE is set
2535 */
2536 if ((vp->v_lflag & VL_DEAD)) {
2537 if (vp->v_flag & VCANDEALLOC) {
2538 TAILQ_INSERT_TAIL(&vnode_dead_list, vp, v_freelist);
2539 if (vp->v_listflag & VLIST_NO_REUSE) {
2540 deadvnodes_noreuse++;
2541 }
2542 } else {
2543 TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist);
2544 }
2545 vp->v_listflag |= VLIST_DEAD;
2546 deadvnodes++;
2547
2548 if (dead_vnode_wanted) {
2549 dead_vnode_wanted--;
2550 need_dead_wakeup = TRUE;
2551 }
2552 } else if ((vp->v_flag & VAGE)) {
2553 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2554 vp->v_flag &= ~VAGE;
2555 freevnodes++;
2556 reusablevnodes++;
2557 wakeup_laundry_thread();
2558 } else {
2559 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2560 freevnodes++;
2561 reusablevnodes++;
2562 wakeup_laundry_thread();
2563 }
2564 }
2565 if ((vp->v_flag & VCANDEALLOC) && !no_busy_decrement) {
2566 os_atomic_dec(&busyvnodes, relaxed);
2567 }
2568 vnode_list_unlock();
2569
2570 if (need_dead_wakeup == TRUE) {
2571 wakeup_one((caddr_t)&dead_vnode_wanted);
2572 }
2573 }
2574
2575
2576 /*
2577 * remove the vnode from appropriate free list.
2578 * called with vnode LOCKED and
2579 * the list lock held
2580 */
2581 static void
vnode_list_remove_locked(vnode_t vp)2582 vnode_list_remove_locked(vnode_t vp)
2583 {
2584 if (VONLIST(vp)) {
2585 /*
2586 * the v_listflag field is
2587 * protected by the vnode_list_lock
2588 */
2589 if (vp->v_listflag & VLIST_RAGE) {
2590 VREMRAGE("vnode_list_remove", vp);
2591 } else if (vp->v_listflag & VLIST_DEAD) {
2592 VREMDEAD("vnode_list_remove", vp);
2593 wakeup_laundry_thread();
2594 } else if (vp->v_listflag & VLIST_ASYNC_WORK) {
2595 VREMASYNC_WORK("vnode_list_remove", vp);
2596 } else {
2597 VREMFREE("vnode_list_remove", vp);
2598 }
2599 if (vp->v_flag & VCANDEALLOC) {
2600 os_atomic_inc(&busyvnodes, relaxed);
2601 }
2602 }
2603 }
2604
2605
2606 /*
2607 * remove the vnode from appropriate free list.
2608 * called with vnode LOCKED
2609 */
2610 static void
vnode_list_remove(vnode_t vp)2611 vnode_list_remove(vnode_t vp)
2612 {
2613 #if DIAGNOSTIC
2614 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2615 #endif
2616 /*
2617 * we want to avoid taking the list lock
2618 * in the case where we're not on the free
2619 * list... this will be true for most
2620 * directories and any currently in use files
2621 *
2622 * we're guaranteed that we can't go from
2623 * the not-on-list state to the on-list
2624 * state since we hold the vnode lock...
2625 * all calls to vnode_list_add are done
2626 * under the vnode lock... so we can
2627 * check for that condition (the prevelant one)
2628 * without taking the list lock
2629 */
2630 if (VONLIST(vp)) {
2631 vnode_list_lock();
2632 /*
2633 * however, we're not guaranteed that
2634 * we won't go from the on-list state
2635 * to the not-on-list state until we
2636 * hold the vnode_list_lock... this
2637 * is due to "new_vnode" removing vnodes
2638 * from the free list uder the list_lock
2639 * w/o the vnode lock... so we need to
2640 * check again whether we're currently
2641 * on the free list
2642 */
2643 vnode_list_remove_locked(vp);
2644
2645 vnode_list_unlock();
2646 }
2647 }
2648
2649
2650 void
vnode_rele(vnode_t vp)2651 vnode_rele(vnode_t vp)
2652 {
2653 vnode_rele_internal(vp, 0, 0, 0);
2654 }
2655
2656
2657 void
vnode_rele_ext(vnode_t vp,int fmode,int dont_reenter)2658 vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter)
2659 {
2660 vnode_rele_internal(vp, fmode, dont_reenter, 0);
2661 }
2662
2663
2664 void
vnode_rele_internal(vnode_t vp,int fmode,int dont_reenter,int locked)2665 vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked)
2666 {
2667 int32_t old_usecount;
2668
2669 if (!locked) {
2670 vnode_hold(vp);
2671 vnode_lock_spin(vp);
2672 }
2673 #if DIAGNOSTIC
2674 else {
2675 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2676 }
2677 #endif
2678 /* Enable atomic ops on v_usecount without the vnode lock */
2679 old_usecount = os_atomic_dec_orig(&vp->v_usecount, relaxed);
2680 if (old_usecount < 1) {
2681 /*
2682 * Because we allow atomic ops on usecount (in lookup only, under
2683 * specific conditions of already having a usecount) it is
2684 * possible that when the vnode is examined, its usecount is
2685 * different than what will be printed in this panic message.
2686 */
2687 panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.",
2688 vp, old_usecount - 1, vp->v_tag, vp->v_type, vp->v_flag);
2689 }
2690
2691 if (fmode & FWRITE) {
2692 if (--vp->v_writecount < 0) {
2693 panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
2694 }
2695 }
2696 if (fmode & O_EVTONLY) {
2697 if (--vp->v_kusecount < 0) {
2698 panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag);
2699 }
2700 }
2701 if (vp->v_kusecount > vp->v_usecount) {
2702 panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
2703 }
2704
2705 if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
2706 /*
2707 * vnode is still busy... if we're the last
2708 * usecount, mark for a future call to VNOP_INACTIVE
2709 * when the iocount finally drops to 0
2710 */
2711 if (vp->v_usecount == 0) {
2712 vp->v_lflag |= VL_NEEDINACTIVE;
2713 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
2714 }
2715 goto done;
2716 }
2717 vp->v_flag &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
2718
2719 if (ISSET(vp->v_lflag, VL_TERMINATE | VL_DEAD) || dont_reenter) {
2720 /*
2721 * vnode is being cleaned, or
2722 * we've requested that we don't reenter
2723 * the filesystem on this release...in
2724 * the latter case, we'll mark the vnode aged
2725 */
2726 if (dont_reenter) {
2727 if (!(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM))) {
2728 vp->v_lflag |= VL_NEEDINACTIVE;
2729
2730 if (vnode_on_reliable_media(vp) == FALSE || vp->v_flag & VISDIRTY) {
2731 vnode_async_list_add(vp);
2732 goto done;
2733 }
2734 }
2735 vp->v_flag |= VAGE;
2736 }
2737 vnode_list_add(vp);
2738
2739 goto done;
2740 }
2741 /*
2742 * at this point both the iocount and usecount
2743 * are zero
2744 * pick up an iocount so that we can call
2745 * VNOP_INACTIVE with the vnode lock unheld
2746 */
2747 vp->v_iocount++;
2748 #ifdef CONFIG_IOCOUNT_TRACE
2749 record_vp(vp, 1);
2750 #endif
2751 vp->v_lflag &= ~VL_NEEDINACTIVE;
2752
2753 if (UBCINFOEXISTS(vp)) {
2754 ubc_cs_free_and_vnode_unlock(vp);
2755 } else {
2756 vnode_unlock(vp);
2757 }
2758
2759 VNOP_INACTIVE(vp, vfs_context_current());
2760
2761 vnode_lock_spin(vp);
2762
2763 /*
2764 * because we dropped the vnode lock to call VNOP_INACTIVE
2765 * the state of the vnode may have changed... we may have
2766 * picked up an iocount, usecount or the MARKTERM may have
2767 * been set... we need to reevaluate the reference counts
2768 * to determine if we can call vnode_reclaim_internal at
2769 * this point... if the reference counts are up, we'll pick
2770 * up the MARKTERM state when they get subsequently dropped
2771 */
2772 if ((vp->v_iocount == 1) && (vp->v_usecount == 0) &&
2773 ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
2774 struct uthread *ut;
2775
2776 ut = current_uthread();
2777
2778 if (ut->uu_defer_reclaims) {
2779 vp->v_defer_reclaimlist = ut->uu_vreclaims;
2780 ut->uu_vreclaims = vp;
2781 goto done;
2782 }
2783 vnode_lock_convert(vp);
2784 vnode_reclaim_internal(vp, 1, 1, 0);
2785 }
2786 vnode_dropiocount(vp);
2787 vnode_list_add(vp);
2788 done:
2789 if (vp->v_usecount == 0 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
2790 if (vp->v_ubcinfo) {
2791 vnode_lock_convert(vp);
2792 memory_object_mark_unused(vp->v_ubcinfo->ui_control, (vp->v_flag & VRAGE) == VRAGE);
2793 }
2794 }
2795 if (!locked) {
2796 vnode_drop_and_unlock(vp);
2797 }
2798 return;
2799 }
2800
2801 /*
2802 * Remove any vnodes in the vnode table belonging to mount point mp.
2803 *
2804 * If MNT_NOFORCE is specified, there should not be any active ones,
2805 * return error if any are found (nb: this is a user error, not a
2806 * system error). If MNT_FORCE is specified, detach any active vnodes
2807 * that are found.
2808 */
2809
2810 int
vflush(struct mount * mp,struct vnode * skipvp,int flags)2811 vflush(struct mount *mp, struct vnode *skipvp, int flags)
2812 {
2813 struct vnode *vp;
2814 int busy = 0;
2815 int reclaimed = 0;
2816 int retval;
2817 unsigned int vid;
2818 bool first_try = true;
2819
2820 /*
2821 * See comments in vnode_iterate() for the rationale for this lock
2822 */
2823 mount_iterate_lock(mp);
2824
2825 mount_lock(mp);
2826 vnode_iterate_setup(mp);
2827 /*
2828 * On regular unmounts(not forced) do a
2829 * quick check for vnodes to be in use. This
2830 * preserves the caching of vnodes. automounter
2831 * tries unmounting every so often to see whether
2832 * it is still busy or not.
2833 */
2834 if (((flags & FORCECLOSE) == 0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) {
2835 if (vnode_umount_preflight(mp, skipvp, flags)) {
2836 vnode_iterate_clear(mp);
2837 mount_unlock(mp);
2838 mount_iterate_unlock(mp);
2839 return EBUSY;
2840 }
2841 }
2842 loop:
2843 /* If it returns 0 then there is nothing to do */
2844 retval = vnode_iterate_prepare(mp);
2845
2846 if (retval == 0) {
2847 vnode_iterate_clear(mp);
2848 mount_unlock(mp);
2849 mount_iterate_unlock(mp);
2850 return retval;
2851 }
2852
2853 /* iterate over all the vnodes */
2854 while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
2855 vp = TAILQ_FIRST(&mp->mnt_workerqueue);
2856 TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
2857 TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
2858
2859 if ((vp->v_mount != mp) || (vp == skipvp)) {
2860 continue;
2861 }
2862 vid = vp->v_id;
2863 mount_unlock(mp);
2864
2865 vnode_lock_spin(vp);
2866
2867 // If vnode is already terminating, wait for it...
2868 while (vp->v_id == vid && ISSET(vp->v_lflag, VL_TERMINATE)) {
2869 vp->v_lflag |= VL_TERMWANT;
2870 msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vflush", NULL);
2871 }
2872
2873 if ((vp->v_id != vid) || ISSET(vp->v_lflag, VL_DEAD)) {
2874 vnode_unlock(vp);
2875 mount_lock(mp);
2876 continue;
2877 }
2878
2879 /*
2880 * If requested, skip over vnodes marked VSYSTEM.
2881 * Skip over all vnodes marked VNOFLUSH.
2882 */
2883 if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
2884 (vp->v_flag & VNOFLUSH))) {
2885 vnode_unlock(vp);
2886 mount_lock(mp);
2887 continue;
2888 }
2889 /*
2890 * If requested, skip over vnodes marked VSWAP.
2891 */
2892 if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
2893 vnode_unlock(vp);
2894 mount_lock(mp);
2895 continue;
2896 }
2897 /*
2898 * If requested, skip over vnodes marked VROOT.
2899 */
2900 if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) {
2901 vnode_unlock(vp);
2902 mount_lock(mp);
2903 continue;
2904 }
2905 /*
2906 * If WRITECLOSE is set, only flush out regular file
2907 * vnodes open for writing.
2908 */
2909 if ((flags & WRITECLOSE) &&
2910 (vp->v_writecount == 0 || vp->v_type != VREG)) {
2911 vnode_unlock(vp);
2912 mount_lock(mp);
2913 continue;
2914 }
2915 /*
2916 * If the real usecount is 0, all we need to do is clear
2917 * out the vnode data structures and we are done.
2918 */
2919 if (((vp->v_usecount == 0) ||
2920 ((vp->v_usecount - vp->v_kusecount) == 0))) {
2921 vnode_lock_convert(vp);
2922 vnode_hold(vp);
2923 vp->v_iocount++; /* so that drain waits for * other iocounts */
2924 #ifdef CONFIG_IOCOUNT_TRACE
2925 record_vp(vp, 1);
2926 #endif
2927 vnode_reclaim_internal(vp, 1, 1, 0);
2928 vnode_dropiocount(vp);
2929 vnode_list_add(vp);
2930 vnode_drop_and_unlock(vp);
2931
2932 reclaimed++;
2933 mount_lock(mp);
2934 continue;
2935 }
2936 /*
2937 * If FORCECLOSE is set, forcibly close the vnode.
2938 * For block or character devices, revert to an
2939 * anonymous device. For all other files, just kill them.
2940 */
2941 if (flags & FORCECLOSE) {
2942 vnode_lock_convert(vp);
2943
2944 if (vp->v_type != VBLK && vp->v_type != VCHR) {
2945 vp->v_iocount++; /* so that drain waits * for other iocounts */
2946 vnode_hold(vp);
2947 #ifdef CONFIG_IOCOUNT_TRACE
2948 record_vp(vp, 1);
2949 #endif
2950 vnode_abort_advlocks(vp);
2951 vnode_reclaim_internal(vp, 1, 1, 0);
2952 vnode_dropiocount(vp);
2953 vnode_list_add(vp);
2954 vnode_drop_and_unlock(vp);
2955 } else {
2956 vnode_hold(vp);
2957 vp->v_lflag |= VL_OPSCHANGE;
2958 vclean(vp, 0);
2959 vp->v_lflag &= ~VL_DEAD;
2960 vp->v_op = spec_vnodeop_p;
2961 vp->v_flag |= VDEVFLUSH;
2962 vnode_drop_and_unlock(vp);
2963 wakeup(&vp->v_lflag); /* chkvnlock is waitng for VL_DEAD to get unset */
2964 }
2965 mount_lock(mp);
2966 continue;
2967 }
2968
2969 vnode_unlock(vp);
2970 /* log vnodes blocking unforced unmounts */
2971 if (print_busy_vnodes && first_try && ((flags & FORCECLOSE) == 0)) {
2972 vprint_path("vflush - busy vnode", vp);
2973 }
2974
2975 mount_lock(mp);
2976 busy++;
2977 }
2978
2979 /* At this point the worker queue is completed */
2980 if (busy && ((flags & FORCECLOSE) == 0) && reclaimed) {
2981 busy = 0;
2982 reclaimed = 0;
2983 (void)vnode_iterate_reloadq(mp);
2984 first_try = false;
2985 /* returned with mount lock held */
2986 goto loop;
2987 }
2988
2989 /* if new vnodes were created in between retry the reclaim */
2990 if (vnode_iterate_reloadq(mp) != 0) {
2991 if (!(busy && ((flags & FORCECLOSE) == 0))) {
2992 first_try = false;
2993 goto loop;
2994 }
2995 }
2996 vnode_iterate_clear(mp);
2997 mount_unlock(mp);
2998 mount_iterate_unlock(mp);
2999
3000 if (busy && ((flags & FORCECLOSE) == 0)) {
3001 return EBUSY;
3002 }
3003 return 0;
3004 }
3005
3006 long num_recycledvnodes = 0;
3007 /*
3008 * Disassociate the underlying file system from a vnode.
3009 * The vnode lock is held on entry.
3010 */
3011 static void
vclean(vnode_t vp,int flags)3012 vclean(vnode_t vp, int flags)
3013 {
3014 vfs_context_t ctx = vfs_context_current();
3015 int active;
3016 int need_inactive;
3017 int already_terminating;
3018 int clflags = 0;
3019 #if NAMEDSTREAMS
3020 int is_namedstream;
3021 #endif
3022
3023 /*
3024 * Check to see if the vnode is in use.
3025 * If so we have to reference it before we clean it out
3026 * so that its count cannot fall to zero and generate a
3027 * race against ourselves to recycle it.
3028 */
3029 active = vp->v_usecount;
3030
3031 /*
3032 * just in case we missed sending a needed
3033 * VNOP_INACTIVE, we'll do it now
3034 */
3035 need_inactive = (vp->v_lflag & VL_NEEDINACTIVE);
3036
3037 vp->v_lflag &= ~VL_NEEDINACTIVE;
3038
3039 /*
3040 * Prevent the vnode from being recycled or
3041 * brought into use while we clean it out.
3042 */
3043 already_terminating = (vp->v_lflag & VL_TERMINATE);
3044
3045 vp->v_lflag |= VL_TERMINATE;
3046
3047 #if NAMEDSTREAMS
3048 is_namedstream = vnode_isnamedstream(vp);
3049 #endif
3050
3051 vnode_unlock(vp);
3052
3053 OSAddAtomicLong(1, &num_recycledvnodes);
3054
3055 if (flags & DOCLOSE) {
3056 clflags |= IO_NDELAY;
3057 }
3058 if (flags & REVOKEALL) {
3059 clflags |= IO_REVOKE;
3060 }
3061
3062 #if CONFIG_MACF
3063 if (vp->v_mount) {
3064 /*
3065 * It is possible for bdevvp vnodes to not have a mount
3066 * pointer. It's fine to let it get reclaimed without
3067 * notifying.
3068 */
3069 mac_vnode_notify_reclaim(vp);
3070 }
3071 #endif
3072
3073 if (active && (flags & DOCLOSE)) {
3074 VNOP_CLOSE(vp, clflags, ctx);
3075 }
3076
3077 /*
3078 * Clean out any buffers associated with the vnode.
3079 */
3080 if (flags & DOCLOSE) {
3081 if (vp->v_tag == VT_NFS) {
3082 nfs_vinvalbuf(vp, V_SAVE, ctx, 0);
3083 } else {
3084 VNOP_FSYNC(vp, MNT_WAIT, ctx);
3085
3086 /*
3087 * If the vnode is still in use (by the journal for
3088 * example) we don't want to invalidate locked buffers
3089 * here. In that case, either the journal will tidy them
3090 * up, or we will deal with it when the usecount is
3091 * finally released in vnode_rele_internal.
3092 */
3093 buf_invalidateblks(vp, BUF_WRITE_DATA | (active ? 0 : BUF_INVALIDATE_LOCKED), 0, 0);
3094 }
3095 if (UBCINFOEXISTS(vp)) {
3096 /*
3097 * Clean the pages in VM.
3098 */
3099 (void)ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
3100 }
3101 }
3102 if (active || need_inactive) {
3103 VNOP_INACTIVE(vp, ctx);
3104 }
3105
3106 #if NAMEDSTREAMS
3107 if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) {
3108 vnode_t pvp = vp->v_parent;
3109
3110 /* Delete the shadow stream file before we reclaim its vnode */
3111 if (vnode_isshadow(vp)) {
3112 vnode_relenamedstream(pvp, vp);
3113 }
3114
3115 /*
3116 * No more streams associated with the parent. We
3117 * have a ref on it, so its identity is stable.
3118 * If the parent is on an opaque volume, then we need to know
3119 * whether it has associated named streams.
3120 */
3121 if (vfs_authopaque(pvp->v_mount)) {
3122 vnode_lock_spin(pvp);
3123 pvp->v_lflag &= ~VL_HASSTREAMS;
3124 vnode_unlock(pvp);
3125 }
3126 }
3127 #endif
3128
3129 vm_object_destroy_reason_t reason = VM_OBJECT_DESTROY_RECLAIM;
3130 bool forced_unmount = vnode_mount(vp) != NULL && (vnode_mount(vp)->mnt_lflag & MNT_LFORCE) != 0;
3131 bool ungraft_heuristic = flags & REVOKEALL;
3132 bool unmount = vnode_mount(vp) != NULL && (vnode_mount(vp)->mnt_lflag & MNT_LUNMOUNT) != 0;
3133 if (forced_unmount) {
3134 reason = VM_OBJECT_DESTROY_FORCED_UNMOUNT;
3135 } else if (ungraft_heuristic) {
3136 reason = VM_OBJECT_DESTROY_UNGRAFT;
3137 } else if (unmount) {
3138 reason = VM_OBJECT_DESTROY_UNMOUNT;
3139 }
3140
3141 /*
3142 * Destroy ubc named reference
3143 * cluster_release is done on this path
3144 * along with dropping the reference on the ucred
3145 * (and in the case of forced unmount of an mmap-ed file,
3146 * the ubc reference on the vnode is dropped here too).
3147 */
3148 ubc_destroy_named(vp, reason);
3149
3150 #if CONFIG_TRIGGERS
3151 /*
3152 * cleanup trigger info from vnode (if any)
3153 */
3154 if (vp->v_resolve) {
3155 vnode_resolver_detach(vp);
3156 }
3157 #endif
3158
3159 #if CONFIG_IO_COMPRESSION_STATS
3160 if ((vp->io_compression_stats)) {
3161 vnode_iocs_record_and_free(vp);
3162 }
3163 #endif /* CONFIG_IO_COMPRESSION_STATS */
3164
3165 /*
3166 * Reclaim the vnode.
3167 */
3168 if (VNOP_RECLAIM(vp, ctx)) {
3169 panic("vclean: cannot reclaim");
3170 }
3171
3172 // make sure the name & parent ptrs get cleaned out!
3173 vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE | VNODE_UPDATE_PURGEFIRMLINK);
3174
3175 vnode_lock(vp);
3176
3177 /*
3178 * Remove the vnode from any mount list it might be on. It is not
3179 * safe to do this any earlier because unmount needs to wait for
3180 * any vnodes to terminate and it cannot do that if it cannot find
3181 * them.
3182 */
3183 insmntque(vp, (struct mount *)0);
3184
3185 vp->v_lflag |= VL_DEAD;
3186 vp->v_mount = dead_mountp;
3187 vp->v_op = dead_vnodeop_p;
3188 vp->v_tag = VT_NON;
3189 vp->v_data = NULL;
3190
3191 vp->v_flag &= ~VISDIRTY;
3192
3193 if (already_terminating == 0) {
3194 vp->v_lflag &= ~VL_TERMINATE;
3195 /*
3196 * Done with purge, notify sleepers of the grim news.
3197 */
3198 if (vp->v_lflag & VL_TERMWANT) {
3199 vp->v_lflag &= ~VL_TERMWANT;
3200 wakeup(&vp->v_lflag);
3201 }
3202 }
3203 }
3204
3205 /*
3206 * Eliminate all activity associated with the requested vnode
3207 * and with all vnodes aliased to the requested vnode.
3208 */
3209 int
3210 #if DIAGNOSTIC
vn_revoke(vnode_t vp,int flags,__unused vfs_context_t a_context)3211 vn_revoke(vnode_t vp, int flags, __unused vfs_context_t a_context)
3212 #else
3213 vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context)
3214 #endif
3215 {
3216 struct vnode *vq;
3217 int vid;
3218
3219 #if DIAGNOSTIC
3220 if ((flags & REVOKEALL) == 0) {
3221 panic("vnop_revoke");
3222 }
3223 #endif
3224
3225 if (vnode_isaliased(vp)) {
3226 /*
3227 * If a vgone (or vclean) is already in progress,
3228 * return an immediate error
3229 */
3230 if (vp->v_lflag & VL_TERMINATE) {
3231 return ENOENT;
3232 }
3233
3234 /*
3235 * Ensure that vp will not be vgone'd while we
3236 * are eliminating its aliases.
3237 */
3238 SPECHASH_LOCK();
3239 while ((vp->v_specflags & SI_ALIASED)) {
3240 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3241 if (vq->v_rdev != vp->v_rdev ||
3242 vq->v_type != vp->v_type || vp == vq) {
3243 continue;
3244 }
3245 vid = vq->v_id;
3246 vnode_hold(vq);
3247 SPECHASH_UNLOCK();
3248 if (vnode_getwithvid(vq, vid)) {
3249 vq = vnode_drop(vq);
3250 SPECHASH_LOCK();
3251 break;
3252 }
3253 vnode_lock(vq);
3254 if (!(vq->v_lflag & VL_TERMINATE)) {
3255 vnode_reclaim_internal(vq, 1, 1, 0);
3256 }
3257 vnode_put_locked(vq);
3258 vq = vnode_drop_and_unlock(vq);
3259 SPECHASH_LOCK();
3260 break;
3261 }
3262 }
3263 SPECHASH_UNLOCK();
3264 }
3265 vnode_lock(vp);
3266 if (vp->v_lflag & VL_TERMINATE) {
3267 vnode_unlock(vp);
3268 return ENOENT;
3269 }
3270 vnode_reclaim_internal(vp, 1, 0, REVOKEALL);
3271 vnode_unlock(vp);
3272
3273 return 0;
3274 }
3275
3276 /*
3277 * Recycle an unused vnode to the front of the free list.
3278 * Release the passed interlock if the vnode will be recycled.
3279 */
3280 int
vnode_recycle(struct vnode * vp)3281 vnode_recycle(struct vnode *vp)
3282 {
3283 vnode_lock_spin(vp);
3284
3285 if (vp->v_iocount || vp->v_usecount) {
3286 vp->v_lflag |= VL_MARKTERM;
3287 vnode_unlock(vp);
3288 return 0;
3289 }
3290 vnode_lock_convert(vp);
3291 vnode_hold(vp);
3292 vnode_reclaim_internal(vp, 1, 0, 0);
3293
3294 vnode_drop_and_unlock(vp);
3295
3296 return 1;
3297 }
3298
3299 static int
vnode_reload(vnode_t vp)3300 vnode_reload(vnode_t vp)
3301 {
3302 vnode_lock_spin(vp);
3303
3304 if ((vp->v_iocount > 1) || vp->v_usecount) {
3305 vnode_unlock(vp);
3306 return 0;
3307 }
3308 if (vp->v_iocount <= 0) {
3309 panic("vnode_reload with no iocount %d", vp->v_iocount);
3310 }
3311
3312 /* mark for release when iocount is dopped */
3313 vp->v_lflag |= VL_MARKTERM;
3314 vnode_unlock(vp);
3315
3316 return 1;
3317 }
3318
3319
3320 static void
vgone(vnode_t vp,int flags)3321 vgone(vnode_t vp, int flags)
3322 {
3323 struct vnode *vq;
3324 struct vnode *vx;
3325
3326 /*
3327 * Clean out the filesystem specific data.
3328 * vclean also takes care of removing the
3329 * vnode from any mount list it might be on
3330 */
3331 vclean(vp, flags | DOCLOSE);
3332
3333 /*
3334 * If special device, remove it from special device alias list
3335 * if it is on one.
3336 */
3337 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
3338 SPECHASH_LOCK();
3339 if (*vp->v_hashchain == vp) {
3340 *vp->v_hashchain = vp->v_specnext;
3341 } else {
3342 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3343 if (vq->v_specnext != vp) {
3344 continue;
3345 }
3346 vq->v_specnext = vp->v_specnext;
3347 break;
3348 }
3349 if (vq == NULL) {
3350 panic("missing bdev");
3351 }
3352 }
3353 if (vp->v_specflags & SI_ALIASED) {
3354 vx = NULL;
3355 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3356 if (vq->v_rdev != vp->v_rdev ||
3357 vq->v_type != vp->v_type) {
3358 continue;
3359 }
3360 if (vx) {
3361 break;
3362 }
3363 vx = vq;
3364 }
3365 if (vx == NULL) {
3366 panic("missing alias");
3367 }
3368 if (vq == NULL) {
3369 vx->v_specflags &= ~SI_ALIASED;
3370 }
3371 vp->v_specflags &= ~SI_ALIASED;
3372 }
3373 SPECHASH_UNLOCK();
3374 {
3375 struct specinfo *tmp = vp->v_specinfo;
3376 vp->v_specinfo = NULL;
3377 zfree(specinfo_zone, tmp);
3378 }
3379 }
3380 }
3381
3382 /*
3383 * internal helper function only!
3384 * vend an _iocounted_ vnode via output argument, or return an error if unable.
3385 */
3386 static int
get_vp_from_dev(dev_t dev,enum vtype type,vnode_t * outvp)3387 get_vp_from_dev(dev_t dev, enum vtype type, vnode_t *outvp)
3388 {
3389 vnode_t vp;
3390 int vid;
3391
3392 loop:
3393 SPECHASH_LOCK();
3394 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
3395 if (dev != vp->v_rdev || type != vp->v_type) {
3396 continue;
3397 }
3398 vid = vp->v_id;
3399 vnode_hold(vp);
3400 SPECHASH_UNLOCK();
3401
3402 /* acquire iocount */
3403 if (vnode_getwithvid(vp, vid)) {
3404 vnode_drop(vp);
3405 goto loop;
3406 }
3407 vnode_drop(vp);
3408
3409 /* Vend iocounted vnode */
3410 *outvp = vp;
3411 return 0;
3412 }
3413
3414 /* vnode not found, error out */
3415 SPECHASH_UNLOCK();
3416 return ENOENT;
3417 }
3418
3419
3420
3421 /*
3422 * Lookup a vnode by device number.
3423 */
3424 int
check_mountedon(dev_t dev,enum vtype type,int * errorp)3425 check_mountedon(dev_t dev, enum vtype type, int *errorp)
3426 {
3427 vnode_t vp = NULLVP;
3428 int rc = 0;
3429
3430 rc = get_vp_from_dev(dev, type, &vp);
3431 if (rc) {
3432 /* if no vnode found, it cannot be mounted on */
3433 return 0;
3434 }
3435
3436 /* otherwise, examine it */
3437 vnode_lock_spin(vp);
3438 /* note: exclude the iocount we JUST got (e.g. >1, not >0) */
3439 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
3440 vnode_unlock(vp);
3441 if ((*errorp = vfs_mountedon(vp)) != 0) {
3442 rc = 1;
3443 }
3444 } else {
3445 vnode_unlock(vp);
3446 }
3447 /* release iocount! */
3448 vnode_put(vp);
3449
3450 return rc;
3451 }
3452
3453 extern dev_t chrtoblk(dev_t d);
3454
3455 /*
3456 * Examine the supplied vnode's dev_t and find its counterpart
3457 * (e.g. VCHR => VDEV) to compare against.
3458 */
3459 static int
vnode_cmp_paired_dev(vnode_t vp,vnode_t bdev_vp,enum vtype in_type,enum vtype out_type)3460 vnode_cmp_paired_dev(vnode_t vp, vnode_t bdev_vp, enum vtype in_type,
3461 enum vtype out_type)
3462 {
3463 if (!vp || !bdev_vp) {
3464 return EINVAL;
3465 }
3466 /* Verify iocounts */
3467 if (vnode_iocount(vp) <= 0 ||
3468 vnode_iocount(bdev_vp) <= 0) {
3469 return EINVAL;
3470 }
3471
3472 /* check for basic matches */
3473 if (vnode_vtype(vp) != in_type) {
3474 return EINVAL;
3475 }
3476 if (vnode_vtype(bdev_vp) != out_type) {
3477 return EINVAL;
3478 }
3479
3480 dev_t dev = vnode_specrdev(vp);
3481 dev_t blk_devt = vnode_specrdev(bdev_vp);
3482
3483 if (in_type == VCHR) {
3484 if (out_type != VBLK) {
3485 return EINVAL;
3486 }
3487 dev_t bdev = chrtoblk(dev);
3488 if (bdev == NODEV) {
3489 return EINVAL;
3490 } else if (bdev == blk_devt) {
3491 return 0;
3492 }
3493 //fall through
3494 }
3495 /*
3496 * else case:
3497 *
3498 * in_type == VBLK? => VCHR?
3499 * not implemented...
3500 * exercise to the reader: this can be built by
3501 * taking the device's major, and iterating the `chrtoblktab`
3502 * array to look for a value that matches.
3503 */
3504 return EINVAL;
3505 }
3506 /*
3507 * Vnode compare: does the supplied vnode's CHR device, match the dev_t
3508 * of the accompanying `blk_vp` ?
3509 * NOTE: vnodes MUST be iocounted BEFORE calling this!
3510 */
3511
3512 int
vnode_cmp_chrtoblk(vnode_t vp,vnode_t blk_vp)3513 vnode_cmp_chrtoblk(vnode_t vp, vnode_t blk_vp)
3514 {
3515 return vnode_cmp_paired_dev(vp, blk_vp, VCHR, VBLK);
3516 }
3517
3518
3519
3520 /*
3521 * Calculate the total number of references to a special device.
3522 */
3523 int
vcount(vnode_t vp)3524 vcount(vnode_t vp)
3525 {
3526 vnode_t vq, vnext;
3527 int count;
3528 int vid;
3529
3530 if (!vnode_isspec(vp)) {
3531 return vp->v_usecount - vp->v_kusecount;
3532 }
3533
3534 loop:
3535 if (!vnode_isaliased(vp)) {
3536 return vp->v_specinfo->si_opencount;
3537 }
3538 count = 0;
3539
3540 SPECHASH_LOCK();
3541 /*
3542 * Grab first vnode and its vid.
3543 */
3544 vq = *vp->v_hashchain;
3545 if (vq) {
3546 vid = vq->v_id;
3547 vnode_hold(vq);
3548 } else {
3549 vid = 0;
3550 }
3551 SPECHASH_UNLOCK();
3552
3553 while (vq) {
3554 /*
3555 * Attempt to get the vnode outside the SPECHASH lock.
3556 * Don't take iocount on 'vp' as iocount is already held by the caller.
3557 */
3558 if ((vq != vp) && vnode_getwithvid(vq, vid)) {
3559 vnode_drop(vq);
3560 goto loop;
3561 }
3562 vnode_drop(vq);
3563 vnode_lock(vq);
3564
3565 if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) {
3566 if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) {
3567 /*
3568 * Alias, but not in use, so flush it out.
3569 */
3570 vnode_hold(vq);
3571 vnode_reclaim_internal(vq, 1, 1, 0);
3572 vnode_put_locked(vq);
3573 vnode_drop_and_unlock(vq);
3574 goto loop;
3575 }
3576 count += vq->v_specinfo->si_opencount;
3577 }
3578 vnode_unlock(vq);
3579
3580 SPECHASH_LOCK();
3581 /*
3582 * must do this with the reference still held on 'vq'
3583 * so that it can't be destroyed while we're poking
3584 * through v_specnext
3585 */
3586 vnext = vq->v_specnext;
3587 if (vnext) {
3588 vid = vnext->v_id;
3589 vnode_hold(vnext);
3590 } else {
3591 vid = 0;
3592 }
3593 SPECHASH_UNLOCK();
3594
3595 if (vq != vp) {
3596 vnode_put(vq);
3597 }
3598
3599 vq = vnext;
3600 }
3601
3602 return count;
3603 }
3604
3605 int prtactive = 0; /* 1 => print out reclaim of active vnodes */
3606
3607 /*
3608 * Print out a description of a vnode.
3609 */
3610 static const char *typename[] =
3611 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
3612
3613 static void
vprint_internal(const char * label,struct vnode * vp,bool with_path)3614 vprint_internal(const char *label, struct vnode *vp, bool with_path)
3615 {
3616 char sbuf[64];
3617
3618 if (label != NULL) {
3619 printf("%s: ", label);
3620 }
3621
3622 if (with_path) {
3623 char const *path = NULL;
3624 char *vn_path = NULL;
3625 vm_size_t vn_pathlen = MAXPATHLEN;
3626
3627 vn_path = zalloc(ZV_NAMEI);
3628 if (vn_getpath(vp, vn_path, (int*)&vn_pathlen) == 0) {
3629 path = vn_path;
3630 } else {
3631 path = "(get vnode path failed)";
3632 }
3633
3634 printf("name %s, type %s, usecount %d, writecount %d, path %s\n",
3635 vp->v_name, typename[vp->v_type],
3636 vp->v_usecount, vp->v_writecount, path);
3637
3638 if (vn_path) {
3639 zfree(ZV_NAMEI, vn_path);
3640 }
3641 } else {
3642 printf("name %s, type %s, usecount %d, writecount %d\n",
3643 vp->v_name, typename[vp->v_type],
3644 vp->v_usecount, vp->v_writecount);
3645 }
3646 sbuf[0] = '\0';
3647 if (vp->v_flag & VROOT) {
3648 strlcat(sbuf, "|VROOT", sizeof(sbuf));
3649 }
3650 if (vp->v_flag & VTEXT) {
3651 strlcat(sbuf, "|VTEXT", sizeof(sbuf));
3652 }
3653 if (vp->v_flag & VSYSTEM) {
3654 strlcat(sbuf, "|VSYSTEM", sizeof(sbuf));
3655 }
3656 if (vp->v_flag & VNOFLUSH) {
3657 strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf));
3658 }
3659 if (vp->v_flag & VBWAIT) {
3660 strlcat(sbuf, "|VBWAIT", sizeof(sbuf));
3661 }
3662 if (vnode_isaliased(vp)) {
3663 strlcat(sbuf, "|VALIASED", sizeof(sbuf));
3664 }
3665 if (sbuf[0] != '\0') {
3666 printf("vnode flags (%s)\n", &sbuf[1]);
3667 }
3668 }
3669
3670 void
vprint(const char * label,struct vnode * vp)3671 vprint(const char *label, struct vnode *vp)
3672 {
3673 vprint_internal(label, vp, false);
3674 }
3675
3676 void
vprint_path(const char * label,struct vnode * vp)3677 vprint_path(const char *label, struct vnode *vp)
3678 {
3679 vprint_internal(label, vp, true);
3680 }
3681
3682 static int
vn_getpath_flags_to_buildpath_flags(int flags)3683 vn_getpath_flags_to_buildpath_flags(int flags)
3684 {
3685 int bpflags = (flags & VN_GETPATH_FSENTER) ? 0 : BUILDPATH_NO_FS_ENTER;
3686
3687 if (flags && (flags != VN_GETPATH_FSENTER)) {
3688 if (flags & VN_GETPATH_NO_FIRMLINK) {
3689 bpflags |= BUILDPATH_NO_FIRMLINK;
3690 }
3691 if (flags & VN_GETPATH_VOLUME_RELATIVE) {
3692 bpflags |= (BUILDPATH_VOLUME_RELATIVE |
3693 BUILDPATH_NO_FIRMLINK);
3694 }
3695 if (flags & VN_GETPATH_NO_PROCROOT) {
3696 bpflags |= BUILDPATH_NO_PROCROOT;
3697 }
3698 if (flags & VN_GETPATH_CHECK_MOVED) {
3699 bpflags |= BUILDPATH_CHECK_MOVED;
3700 }
3701 }
3702
3703 return bpflags;
3704 }
3705
3706 int
vn_getpath_ext_with_mntlen(struct vnode * vp,struct vnode * dvp,char * pathbuf,size_t * len,size_t * mntlen,int flags)3707 vn_getpath_ext_with_mntlen(struct vnode *vp, struct vnode *dvp, char *pathbuf,
3708 size_t *len, size_t *mntlen, int flags)
3709 {
3710 int bpflags = vn_getpath_flags_to_buildpath_flags(flags);
3711 int local_len;
3712 int error;
3713
3714 if (*len > INT_MAX) {
3715 return EINVAL;
3716 }
3717
3718 local_len = *len;
3719
3720 error = build_path_with_parent(vp, dvp, pathbuf, local_len, &local_len,
3721 mntlen, bpflags, vfs_context_current());
3722
3723 if (local_len >= 0 && local_len <= (int)*len) {
3724 *len = (size_t)local_len;
3725 }
3726
3727 return error;
3728 }
3729
3730 int
vn_getpath_ext(struct vnode * vp,struct vnode * dvp,char * pathbuf,size_t * len,int flags)3731 vn_getpath_ext(struct vnode *vp, struct vnode *dvp, char *pathbuf, size_t *len,
3732 int flags)
3733 {
3734 return vn_getpath_ext_with_mntlen(vp, dvp, pathbuf, len, NULL, flags);
3735 }
3736
3737 /*
3738 * Wrapper around vn_getpath_ext() that takes care of the int * <-> size_t *
3739 * conversion for the legacy KPIs.
3740 */
3741 static int
vn_getpath_ext_int(struct vnode * vp,struct vnode * dvp,char * pathbuf,int * len,int flags)3742 vn_getpath_ext_int(struct vnode *vp, struct vnode *dvp, char *pathbuf,
3743 int *len, int flags)
3744 {
3745 size_t slen = *len;
3746 int error;
3747
3748 if (*len < 0) {
3749 return EINVAL;
3750 }
3751
3752 error = vn_getpath_ext(vp, dvp, pathbuf, &slen, flags);
3753
3754 if (slen <= INT_MAX) {
3755 *len = (int)slen;
3756 }
3757
3758 return error;
3759 }
3760
3761 int
vn_getpath(struct vnode * vp,char * pathbuf,int * len)3762 vn_getpath(struct vnode *vp, char *pathbuf, int *len)
3763 {
3764 return vn_getpath_ext_int(vp, NULL, pathbuf, len, 0);
3765 }
3766
3767 int
vn_getpath_fsenter(struct vnode * vp,char * pathbuf,int * len)3768 vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len)
3769 {
3770 return vn_getpath_ext_int(vp, NULL, pathbuf, len, VN_GETPATH_FSENTER);
3771 }
3772
3773 /*
3774 * vn_getpath_fsenter_with_parent will reenter the file system to fine the path of the
3775 * vnode. It requires that there are IO counts on both the vnode and the directory vnode.
3776 *
3777 * vn_getpath_fsenter is called by MAC hooks to authorize operations for every thing, but
3778 * unlink, rmdir and rename. For these operation the MAC hook calls vn_getpath. This presents
3779 * problems where if the path can not be found from the name cache, those operations can
3780 * erroneously fail with EPERM even though the call should succeed. When removing or moving
3781 * file system objects with operations such as unlink or rename, those operations need to
3782 * take IO counts on the target and containing directory. Calling vn_getpath_fsenter from a
3783 * MAC hook from these operations during forced unmount operations can lead to dead
3784 * lock. This happens when the operation starts, IO counts are taken on the containing
3785 * directories and targets. Before the MAC hook is called a forced unmount from another
3786 * thread takes place and blocks on the on going operation's directory vnode in vdrain.
3787 * After which, the MAC hook gets called and calls vn_getpath_fsenter. vn_getpath_fsenter
3788 * is called with the understanding that there is an IO count on the target. If in
3789 * build_path the directory vnode is no longer in the cache, then the parent object id via
3790 * vnode_getattr from the target is obtain and used to call VFS_VGET to get the parent
3791 * vnode. The file system's VFS_VGET then looks up by inode in its hash and tries to get
3792 * an IO count. But VFS_VGET "sees" the directory vnode is in vdrain and can block
3793 * depending on which version and how it calls the vnode_get family of interfaces.
3794 *
3795 * N.B. A reasonable interface to use is vnode_getwithvid. This interface was modified to
3796 * call vnode_getiocount with VNODE_DRAINO, so it will happily get an IO count and not
3797 * cause issues, but there is no guarantee that all or any file systems are doing that.
3798 *
3799 * vn_getpath_fsenter_with_parent can enter the file system safely since there is a known
3800 * IO count on the directory vnode by calling build_path_with_parent.
3801 */
3802
3803 int
vn_getpath_fsenter_with_parent(struct vnode * dvp,struct vnode * vp,char * pathbuf,int * len)3804 vn_getpath_fsenter_with_parent(struct vnode *dvp, struct vnode *vp, char *pathbuf, int *len)
3805 {
3806 return build_path_with_parent(vp, dvp, pathbuf, *len, len, NULL, 0, vfs_context_current());
3807 }
3808
3809 int
vn_getpath_no_firmlink(struct vnode * vp,char * pathbuf,int * len)3810 vn_getpath_no_firmlink(struct vnode *vp, char *pathbuf, int *len)
3811 {
3812 return vn_getpath_ext_int(vp, NULLVP, pathbuf, len,
3813 VN_GETPATH_NO_FIRMLINK);
3814 }
3815
3816 int
vn_getcdhash(struct vnode * vp,off_t offset,unsigned char * cdhash,uint8_t * type)3817 vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash, uint8_t *type)
3818 {
3819 return ubc_cs_getcdhash(vp, offset, cdhash, type);
3820 }
3821
3822
3823 static char *extension_table = NULL;
3824 static int nexts;
3825 static int max_ext_width;
3826
3827 static int
extension_cmp(const void * a,const void * b)3828 extension_cmp(const void *a, const void *b)
3829 {
3830 return (int)(strlen((const char *)a) - strlen((const char *)b));
3831 }
3832
3833
3834 //
3835 // This is the api LaunchServices uses to inform the kernel
3836 // the list of package extensions to ignore.
3837 //
3838 // Internally we keep the list sorted by the length of the
3839 // the extension (from longest to shortest). We sort the
3840 // list of extensions so that we can speed up our searches
3841 // when comparing file names -- we only compare extensions
3842 // that could possibly fit into the file name, not all of
3843 // them (i.e. a short 8 character name can't have an 8
3844 // character extension).
3845 //
3846 extern lck_mtx_t pkg_extensions_lck;
3847
3848 __private_extern__ int
set_package_extensions_table(user_addr_t data,int nentries,int maxwidth)3849 set_package_extensions_table(user_addr_t data, int nentries, int maxwidth)
3850 {
3851 char *new_exts, *old_exts;
3852 int old_nentries = 0, old_maxwidth = 0;
3853 int error;
3854
3855 if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) {
3856 return EINVAL;
3857 }
3858
3859
3860 // allocate one byte extra so we can guarantee null termination
3861 new_exts = kalloc_data((nentries * maxwidth) + 1, Z_WAITOK);
3862 if (new_exts == NULL) {
3863 return ENOMEM;
3864 }
3865
3866 error = copyin(data, new_exts, nentries * maxwidth);
3867 if (error) {
3868 kfree_data(new_exts, (nentries * maxwidth) + 1);
3869 return error;
3870 }
3871
3872 new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block
3873
3874 qsort(new_exts, nentries, maxwidth, extension_cmp);
3875
3876 lck_mtx_lock(&pkg_extensions_lck);
3877
3878 old_exts = extension_table;
3879 old_nentries = nexts;
3880 old_maxwidth = max_ext_width;
3881 extension_table = new_exts;
3882 nexts = nentries;
3883 max_ext_width = maxwidth;
3884
3885 lck_mtx_unlock(&pkg_extensions_lck);
3886
3887 kfree_data(old_exts, (old_nentries * old_maxwidth) + 1);
3888
3889 return 0;
3890 }
3891
3892
3893 int
is_package_name(const char * name,int len)3894 is_package_name(const char *name, int len)
3895 {
3896 int i;
3897 size_t extlen;
3898 const char *ptr, *name_ext;
3899
3900 // if the name is less than 3 bytes it can't be of the
3901 // form A.B and if it begins with a "." then it is also
3902 // not a package.
3903 if (len <= 3 || name[0] == '.') {
3904 return 0;
3905 }
3906
3907 name_ext = NULL;
3908 for (ptr = name; *ptr != '\0'; ptr++) {
3909 if (*ptr == '.') {
3910 name_ext = ptr;
3911 }
3912 }
3913
3914 // if there is no "." extension, it can't match
3915 if (name_ext == NULL) {
3916 return 0;
3917 }
3918
3919 // advance over the "."
3920 name_ext++;
3921
3922 lck_mtx_lock(&pkg_extensions_lck);
3923
3924 // now iterate over all the extensions to see if any match
3925 ptr = &extension_table[0];
3926 for (i = 0; i < nexts; i++, ptr += max_ext_width) {
3927 extlen = strlen(ptr);
3928 if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') {
3929 // aha, a match!
3930 lck_mtx_unlock(&pkg_extensions_lck);
3931 return 1;
3932 }
3933 }
3934
3935 lck_mtx_unlock(&pkg_extensions_lck);
3936
3937 // if we get here, no extension matched
3938 return 0;
3939 }
3940
3941 int
vn_path_package_check(__unused vnode_t vp,char * path,int pathlen,int * component)3942 vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component)
3943 {
3944 char *ptr, *end;
3945 int comp = 0;
3946
3947 if (pathlen < 0) {
3948 return EINVAL;
3949 }
3950
3951 *component = -1;
3952 if (*path != '/') {
3953 return EINVAL;
3954 }
3955
3956 end = path + 1;
3957 while (end < path + pathlen && *end != '\0') {
3958 while (end < path + pathlen && *end == '/' && *end != '\0') {
3959 end++;
3960 }
3961
3962 ptr = end;
3963
3964 while (end < path + pathlen && *end != '/' && *end != '\0') {
3965 end++;
3966 }
3967
3968 if (end > path + pathlen) {
3969 // hmm, string wasn't null terminated
3970 return EINVAL;
3971 }
3972
3973 *end = '\0';
3974 if (is_package_name(ptr, (int)(end - ptr))) {
3975 *component = comp;
3976 break;
3977 }
3978
3979 end++;
3980 comp++;
3981 }
3982
3983 return 0;
3984 }
3985
3986 /*
3987 * Determine if a name is inappropriate for a searchfs query.
3988 * This list consists of /System currently.
3989 */
3990
3991 int
vn_searchfs_inappropriate_name(const char * name,int len)3992 vn_searchfs_inappropriate_name(const char *name, int len)
3993 {
3994 const char *bad_names[] = { "System" };
3995 int bad_len[] = { 6 };
3996 int i;
3997
3998 if (len < 0) {
3999 return EINVAL;
4000 }
4001
4002 for (i = 0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) {
4003 if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) {
4004 return 1;
4005 }
4006 }
4007
4008 // if we get here, no name matched
4009 return 0;
4010 }
4011
4012 /*
4013 * Top level filesystem related information gathering.
4014 */
4015 extern unsigned int vfs_nummntops;
4016
4017 /*
4018 * The VFS_NUMMNTOPS shouldn't be at name[1] since
4019 * is a VFS generic variable. Since we no longer support
4020 * VT_UFS, we reserve its value to support this sysctl node.
4021 *
4022 * It should have been:
4023 * name[0]: VFS_GENERIC
4024 * name[1]: VFS_NUMMNTOPS
4025 */
4026 SYSCTL_INT(_vfs, VFS_NUMMNTOPS, nummntops,
4027 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
4028 &vfs_nummntops, 0, "");
4029
4030 int
4031 vfs_sysctl(int *name __unused, u_int namelen __unused,
4032 user_addr_t oldp __unused, size_t *oldlenp __unused,
4033 user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused);
4034
4035 int
vfs_sysctl(int * name __unused,u_int namelen __unused,user_addr_t oldp __unused,size_t * oldlenp __unused,user_addr_t newp __unused,size_t newlen __unused,proc_t p __unused)4036 vfs_sysctl(int *name __unused, u_int namelen __unused,
4037 user_addr_t oldp __unused, size_t *oldlenp __unused,
4038 user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused)
4039 {
4040 return EINVAL;
4041 }
4042
4043
4044 //
4045 // The following code disallows specific sysctl's that came through
4046 // the direct sysctl interface (vfs_sysctl_node) instead of the newer
4047 // sysctl_vfs_ctlbyfsid() interface. We can not allow these selectors
4048 // through vfs_sysctl_node() because it passes the user's oldp pointer
4049 // directly to the file system which (for these selectors) casts it
4050 // back to a struct sysctl_req and then proceed to use SYSCTL_IN()
4051 // which jumps through an arbitrary function pointer. When called
4052 // through the sysctl_vfs_ctlbyfsid() interface this does not happen
4053 // and so it's safe.
4054 //
4055 // Unfortunately we have to pull in definitions from AFP and SMB and
4056 // perform explicit name checks on the file system to determine if
4057 // these selectors are being used.
4058 //
4059
4060 #define AFPFS_VFS_CTL_GETID 0x00020001
4061 #define AFPFS_VFS_CTL_NETCHANGE 0x00020002
4062 #define AFPFS_VFS_CTL_VOLCHANGE 0x00020003
4063
4064 #define SMBFS_SYSCTL_REMOUNT 1
4065 #define SMBFS_SYSCTL_REMOUNT_INFO 2
4066 #define SMBFS_SYSCTL_GET_SERVER_SHARE 3
4067
4068
4069 static int
is_bad_sysctl_name(struct vfstable * vfsp,int selector_name)4070 is_bad_sysctl_name(struct vfstable *vfsp, int selector_name)
4071 {
4072 switch (selector_name) {
4073 case VFS_CTL_QUERY:
4074 case VFS_CTL_TIMEO:
4075 case VFS_CTL_NOLOCKS:
4076 case VFS_CTL_NSTATUS:
4077 case VFS_CTL_SADDR:
4078 case VFS_CTL_DISC:
4079 case VFS_CTL_SERVERINFO:
4080 return 1;
4081
4082 default:
4083 break;
4084 }
4085
4086 // the more complicated check for some of SMB's special values
4087 if (strcmp(vfsp->vfc_name, "smbfs") == 0) {
4088 switch (selector_name) {
4089 case SMBFS_SYSCTL_REMOUNT:
4090 case SMBFS_SYSCTL_REMOUNT_INFO:
4091 case SMBFS_SYSCTL_GET_SERVER_SHARE:
4092 return 1;
4093 }
4094 } else if (strcmp(vfsp->vfc_name, "afpfs") == 0) {
4095 switch (selector_name) {
4096 case AFPFS_VFS_CTL_GETID:
4097 case AFPFS_VFS_CTL_NETCHANGE:
4098 case AFPFS_VFS_CTL_VOLCHANGE:
4099 return 1;
4100 }
4101 }
4102
4103 //
4104 // If we get here we passed all the checks so the selector is ok
4105 //
4106 return 0;
4107 }
4108
4109
4110 int vfs_sysctl_node SYSCTL_HANDLER_ARGS
4111 {
4112 int *name, namelen;
4113 struct vfstable *vfsp;
4114 int error;
4115 int fstypenum;
4116
4117 fstypenum = oidp->oid_number;
4118 name = arg1;
4119 namelen = arg2;
4120
4121 /* all sysctl names at this level should have at least one name slot for the FS */
4122 if (namelen < 1) {
4123 return EISDIR; /* overloaded */
4124 }
4125 mount_list_lock();
4126 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
4127 if (vfsp->vfc_typenum == fstypenum) {
4128 vfsp->vfc_refcount++;
4129 break;
4130 }
4131 }
4132 mount_list_unlock();
4133
4134 if (vfsp == NULL) {
4135 return ENOTSUP;
4136 }
4137
4138 if (is_bad_sysctl_name(vfsp, name[0])) {
4139 printf("vfs: bad selector 0x%.8x for old-style sysctl(). use the sysctl-by-fsid interface instead\n", name[0]);
4140 error = EPERM;
4141 } else {
4142 error = (vfsp->vfc_vfsops->vfs_sysctl)(name, namelen,
4143 req->oldptr, &req->oldlen, req->newptr, req->newlen,
4144 vfs_context_current());
4145 }
4146
4147 mount_list_lock();
4148 vfsp->vfc_refcount--;
4149 mount_list_unlock();
4150
4151 return error;
4152 }
4153
4154 /*
4155 * Check to see if a filesystem is mounted on a block device.
4156 */
4157 int
vfs_mountedon(struct vnode * vp)4158 vfs_mountedon(struct vnode *vp)
4159 {
4160 struct vnode *vq;
4161 int error = 0;
4162
4163 restart:
4164 SPECHASH_LOCK();
4165 if (vp->v_specflags & SI_MOUNTING && (vp->v_specinfo->si_mountingowner != current_thread())) {
4166 msleep((caddr_t)&vp->v_specflags, SPECHASH_LOCK_ADDR(), PVFS | PDROP, "vnode_waitformounting", NULL);
4167 goto restart;
4168 }
4169 if (vp->v_specflags & SI_MOUNTEDON) {
4170 error = EBUSY;
4171 goto out;
4172 }
4173 if (vp->v_specflags & SI_ALIASED) {
4174 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
4175 if (vq->v_rdev != vp->v_rdev ||
4176 vq->v_type != vp->v_type || vq == vp) {
4177 continue;
4178 }
4179 if (vq->v_specflags & SI_MOUNTING) {
4180 msleep((caddr_t)&vq->v_specflags, SPECHASH_LOCK_ADDR(), PVFS | PDROP, "vnode_waitformounting", NULL);
4181 goto restart;
4182 }
4183 if (vq->v_specflags & SI_MOUNTEDON) {
4184 error = EBUSY;
4185 break;
4186 }
4187 }
4188 }
4189 out:
4190 SPECHASH_UNLOCK();
4191 return error;
4192 }
4193
4194 void
vfs_setmountedon(vnode_t vp)4195 vfs_setmountedon(vnode_t vp)
4196 {
4197 vnode_lock(vp);
4198 SPECHASH_LOCK();
4199 vp->v_specflags |= SI_MOUNTEDON;
4200 vp->v_specflags &= ~SI_MOUNTING;
4201 vp->v_specinfo->si_mountingowner = NULL;
4202 SPECHASH_UNLOCK();
4203 vnode_unlock(vp);
4204 wakeup(&vp->v_specflags);
4205 }
4206
4207 void
vfs_clearmounting(vnode_t vp)4208 vfs_clearmounting(vnode_t vp)
4209 {
4210 vnode_lock(vp);
4211 SPECHASH_LOCK();
4212 vp->v_specflags &= ~SI_MOUNTING;
4213 vp->v_specinfo->si_mountingowner = NULL;
4214 SPECHASH_UNLOCK();
4215 vnode_unlock(vp);
4216 wakeup(&vp->v_specflags);
4217 }
4218
4219 /*
4220 * Check to see if a filesystem is mounted on a block device.
4221 */
4222 int
vfs_setmounting(vnode_t vp)4223 vfs_setmounting(vnode_t vp)
4224 {
4225 struct vnode *vq;
4226 int error = 0;
4227
4228 vnode_lock(vp);
4229 while (vp->v_specflags & SI_MOUNTING) {
4230 msleep((caddr_t)&vp->v_specflags, &vp->v_lock, PVFS, "vnode_waitformounting", NULL);
4231 }
4232 if (vp->v_specflags & SI_MOUNTEDON) {
4233 vnode_unlock(vp);
4234 return EBUSY;
4235 }
4236 SPECHASH_LOCK();
4237 vp->v_specflags |= SI_MOUNTING;
4238 vp->v_specinfo->si_mountingowner = current_thread();
4239 vnode_unlock(vp);
4240 restart:
4241 if (vp->v_specflags & SI_ALIASED) {
4242 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
4243 if (vq->v_rdev != vp->v_rdev ||
4244 vq->v_type != vp->v_type || vq == vp) {
4245 continue;
4246 }
4247 if (vq->v_specflags & SI_MOUNTING) {
4248 msleep((caddr_t)&vq->v_specflags, SPECHASH_LOCK_ADDR(), PVFS | PDROP, "vnode_waitformounting", NULL);
4249 SPECHASH_LOCK();
4250 goto restart;
4251 }
4252 if (vq->v_specflags & SI_MOUNTEDON) {
4253 error = EBUSY;
4254 break;
4255 }
4256 }
4257 }
4258 SPECHASH_UNLOCK();
4259 if (error) {
4260 vnode_lock(vp);
4261 SPECHASH_LOCK();
4262 vp->v_specflags &= ~SI_MOUNTING;
4263 SPECHASH_UNLOCK();
4264 vnode_unlock(vp);
4265 wakeup(&vp->v_specflags);
4266 }
4267 return error;
4268 }
4269
4270 struct unmount_info {
4271 int u_errs; // Total failed unmounts
4272 int u_busy; // EBUSY failed unmounts
4273 int u_count; // Total volumes iterated
4274 int u_only_non_system;
4275 };
4276
4277 static int
unmount_callback(mount_t mp,void * arg)4278 unmount_callback(mount_t mp, void *arg)
4279 {
4280 int error;
4281 char *mntname;
4282 struct unmount_info *uip = arg;
4283
4284 uip->u_count++;
4285
4286 mntname = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_NOFAIL);
4287 strlcpy(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN);
4288
4289 if (uip->u_only_non_system
4290 && ((mp->mnt_flag & MNT_ROOTFS) || (mp->mnt_kern_flag & MNTK_SYSTEM))) { //MNTK_BACKS_ROOT
4291 printf("unmount(%d) %s skipped\n", uip->u_only_non_system, mntname);
4292 mount_iterdrop(mp); // VFS_ITERATE_CB_DROPREF
4293 } else {
4294 printf("unmount(%d) %s\n", uip->u_only_non_system, mntname);
4295
4296 mount_ref(mp, 0);
4297 mount_iterdrop(mp); // VFS_ITERATE_CB_DROPREF
4298 error = dounmount(mp, MNT_FORCE, 1, vfs_context_current());
4299 if (error) {
4300 uip->u_errs++;
4301 printf("Unmount of %s failed (%d)\n", mntname ? mntname:"?", error);
4302 if (error == EBUSY) {
4303 uip->u_busy++;
4304 }
4305 }
4306 }
4307 zfree(ZV_NAMEI, mntname);
4308
4309 return VFS_RETURNED;
4310 }
4311
4312 /*
4313 * Unmount all filesystems. The list is traversed in reverse order
4314 * of mounting to avoid dependencies.
4315 * Busy mounts are retried.
4316 */
4317 __private_extern__ void
vfs_unmountall(int only_non_system)4318 vfs_unmountall(int only_non_system)
4319 {
4320 int mounts, sec = 1;
4321 struct unmount_info ui;
4322
4323 /*
4324 * Ensure last-completion-time is valid before anyone can see that
4325 * VFS shutdown has started.
4326 */
4327 vfs_shutdown_last_completion_time = mach_absolute_time();
4328 OSMemoryBarrier();
4329 vfs_unmountall_started = 1;
4330 printf("vfs_unmountall(%ssystem) start\n", only_non_system ? "non" : "");
4331
4332 retry:
4333 ui.u_errs = ui.u_busy = ui.u_count = 0;
4334 ui.u_only_non_system = only_non_system;
4335 // avoid vfs_iterate deadlock in dounmount(), use VFS_ITERATE_CB_DROPREF
4336 vfs_iterate(VFS_ITERATE_CB_DROPREF | VFS_ITERATE_TAIL_FIRST, unmount_callback, &ui);
4337 mounts = mount_getvfscnt();
4338 if (mounts == 0) {
4339 goto out;
4340 }
4341 if (ui.u_busy > 0) { // Busy mounts - wait & retry
4342 tsleep(&nummounts, PVFS, "busy mount", sec * hz);
4343 sec *= 2;
4344 if (sec <= 32) {
4345 goto retry;
4346 }
4347 printf("Unmounting timed out\n");
4348 } else if (ui.u_count < mounts) {
4349 // If the vfs_iterate missed mounts in progress - wait a bit
4350 tsleep(&nummounts, PVFS, "missed mount", 2 * hz);
4351 }
4352
4353 out:
4354 printf("vfs_unmountall(%ssystem) end\n", only_non_system ? "non" : "");
4355
4356 /*
4357 * reboot_kernel() calls us twice; once to deal with non-system
4358 * mounts, and again to sweep up anything left after terminating
4359 * DEXTs. We're only finished once we've completed the second pass.
4360 */
4361 if (!only_non_system) {
4362 vfs_unmountall_finished = 1;
4363 }
4364 }
4365
4366 /*
4367 * vfs_shutdown_in_progress --
4368 *
4369 * Returns whether or not the VFS is shutting down the file systems.
4370 */
4371 boolean_t
vfs_shutdown_in_progress(void)4372 vfs_shutdown_in_progress(void)
4373 {
4374 return vfs_unmountall_started && !vfs_unmountall_finished;
4375 }
4376
4377 /*
4378 * vfs_shutdown_finished --
4379 *
4380 * Returns whether or not the VFS shutdown has completed.
4381 */
4382 boolean_t
vfs_shutdown_finished(void)4383 vfs_shutdown_finished(void)
4384 {
4385 return !!vfs_unmountall_finished;
4386 }
4387
4388 /*
4389 * vfs_update_last_completion_time --
4390 *
4391 * Updates the "last I/O completion time" timestamp used by the watchdog
4392 * to monitor VFS shutdown progress. Called by various I/O stack layers
4393 * as operations complete and progress moves forward.
4394 */
4395 void
vfs_update_last_completion_time(void)4396 vfs_update_last_completion_time(void)
4397 {
4398 if (vfs_unmountall_started) {
4399 vfs_shutdown_last_completion_time = mach_absolute_time();
4400 }
4401 }
4402
4403 /*
4404 * vfs_last_completion_time --
4405 *
4406 * Returns the "last I/O completion time" timestamp. Return
4407 * value is a mach_absolute_time() value, and is not meaningful
4408 * unless vfs_is_shutting_down() also returns true.
4409 */
4410 uint64_t
vfs_last_completion_time(void)4411 vfs_last_completion_time(void)
4412 {
4413 return vfs_unmountall_started ? vfs_shutdown_last_completion_time : 0;
4414 }
4415
4416 /*
4417 * This routine is called from vnode_pager_deallocate out of the VM
4418 * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
4419 * on a vnode that has a UBCINFO
4420 */
4421 __private_extern__ void
vnode_pager_vrele(vnode_t vp)4422 vnode_pager_vrele(vnode_t vp)
4423 {
4424 struct ubc_info *uip;
4425
4426 vnode_lock_spin(vp);
4427
4428 vp->v_lflag &= ~VNAMED_UBC;
4429 if (vp->v_usecount != 0) {
4430 /*
4431 * At the eleventh hour, just before the ubcinfo is
4432 * destroyed, ensure the ubc-specific v_usecount
4433 * reference has gone. We use v_usecount != 0 as a hint;
4434 * ubc_unmap() does nothing if there's no mapping.
4435 *
4436 * This case is caused by coming here via forced unmount,
4437 * versus the usual vm_object_deallocate() path.
4438 * In the forced unmount case, ubc_destroy_named()
4439 * releases the pager before memory_object_last_unmap()
4440 * can be called.
4441 */
4442 vnode_unlock(vp);
4443 ubc_unmap(vp);
4444 vnode_lock_spin(vp);
4445 }
4446
4447 uip = vp->v_ubcinfo;
4448 vp->v_ubcinfo = UBC_INFO_NULL;
4449
4450 vnode_unlock(vp);
4451
4452 ubc_info_deallocate(uip);
4453 }
4454
4455
4456 #include <sys/disk.h>
4457
4458 u_int32_t rootunit = (u_int32_t)-1;
4459
4460 #if CONFIG_IOSCHED
4461 extern int lowpri_throttle_enabled;
4462 extern int iosched_enabled;
4463 #endif
4464
4465 errno_t
vfs_init_io_attributes(vnode_t devvp,mount_t mp)4466 vfs_init_io_attributes(vnode_t devvp, mount_t mp)
4467 {
4468 int error;
4469 off_t readblockcnt = 0;
4470 off_t writeblockcnt = 0;
4471 off_t readmaxcnt = 0;
4472 off_t writemaxcnt = 0;
4473 off_t readsegcnt = 0;
4474 off_t writesegcnt = 0;
4475 off_t readsegsize = 0;
4476 off_t writesegsize = 0;
4477 off_t alignment = 0;
4478 u_int32_t minsaturationbytecount = 0;
4479 u_int32_t ioqueue_depth = 0;
4480 u_int32_t blksize;
4481 u_int64_t temp;
4482 u_int32_t features;
4483 u_int64_t location = 0;
4484 vfs_context_t ctx = vfs_context_current();
4485 dk_corestorage_info_t cs_info;
4486 boolean_t cs_present = FALSE;
4487 int isssd = 0;
4488 int isvirtual = 0;
4489
4490
4491 VNOP_IOCTL(devvp, DKIOCGETTHROTTLEMASK, (caddr_t)&mp->mnt_throttle_mask, 0, NULL);
4492 /*
4493 * as a reasonable approximation, only use the lowest bit of the mask
4494 * to generate a disk unit number
4495 */
4496 mp->mnt_devbsdunit = mp->mnt_throttle_mask ?
4497 num_trailing_0(mp->mnt_throttle_mask) : (LOWPRI_MAX_NUM_DEV - 1);
4498
4499 if (devvp == rootvp) {
4500 rootunit = mp->mnt_devbsdunit;
4501 }
4502
4503 if (mp->mnt_devbsdunit == rootunit) {
4504 /*
4505 * this mount point exists on the same device as the root
4506 * partition, so it comes under the hard throttle control...
4507 * this is true even for the root mount point itself
4508 */
4509 mp->mnt_kern_flag |= MNTK_ROOTDEV;
4510 }
4511 /*
4512 * force the spec device to re-cache
4513 * the underlying block size in case
4514 * the filesystem overrode the initial value
4515 */
4516 set_fsblocksize(devvp);
4517
4518
4519 if ((error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE,
4520 (caddr_t)&blksize, 0, ctx))) {
4521 return error;
4522 }
4523
4524 mp->mnt_devblocksize = blksize;
4525
4526 /*
4527 * set the maximum possible I/O size
4528 * this may get clipped to a smaller value
4529 * based on which constraints are being advertised
4530 * and if those advertised constraints result in a smaller
4531 * limit for a given I/O
4532 */
4533 mp->mnt_maxreadcnt = MAX_UPL_SIZE_BYTES;
4534 mp->mnt_maxwritecnt = MAX_UPL_SIZE_BYTES;
4535
4536 if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) {
4537 if (isvirtual) {
4538 mp->mnt_kern_flag |= MNTK_VIRTUALDEV;
4539 mp->mnt_flag |= MNT_REMOVABLE;
4540 }
4541 }
4542 if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ctx) == 0) {
4543 if (isssd) {
4544 mp->mnt_kern_flag |= MNTK_SSD;
4545 }
4546 }
4547 if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES,
4548 (caddr_t)&features, 0, ctx))) {
4549 return error;
4550 }
4551
4552 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD,
4553 (caddr_t)&readblockcnt, 0, ctx))) {
4554 return error;
4555 }
4556
4557 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE,
4558 (caddr_t)&writeblockcnt, 0, ctx))) {
4559 return error;
4560 }
4561
4562 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD,
4563 (caddr_t)&readmaxcnt, 0, ctx))) {
4564 return error;
4565 }
4566
4567 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE,
4568 (caddr_t)&writemaxcnt, 0, ctx))) {
4569 return error;
4570 }
4571
4572 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD,
4573 (caddr_t)&readsegcnt, 0, ctx))) {
4574 return error;
4575 }
4576
4577 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE,
4578 (caddr_t)&writesegcnt, 0, ctx))) {
4579 return error;
4580 }
4581
4582 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD,
4583 (caddr_t)&readsegsize, 0, ctx))) {
4584 return error;
4585 }
4586
4587 if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE,
4588 (caddr_t)&writesegsize, 0, ctx))) {
4589 return error;
4590 }
4591
4592 if ((error = VNOP_IOCTL(devvp, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT,
4593 (caddr_t)&alignment, 0, ctx))) {
4594 return error;
4595 }
4596
4597 if ((error = VNOP_IOCTL(devvp, DKIOCGETCOMMANDPOOLSIZE,
4598 (caddr_t)&ioqueue_depth, 0, ctx))) {
4599 return error;
4600 }
4601
4602 if (readmaxcnt) {
4603 mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX :(uint32_t) readmaxcnt;
4604 }
4605
4606 if (readblockcnt) {
4607 temp = readblockcnt * blksize;
4608 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
4609
4610 if (temp < mp->mnt_maxreadcnt) {
4611 mp->mnt_maxreadcnt = (u_int32_t)temp;
4612 }
4613 }
4614
4615 if (writemaxcnt) {
4616 mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : (uint32_t)writemaxcnt;
4617 }
4618
4619 if (writeblockcnt) {
4620 temp = writeblockcnt * blksize;
4621 temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
4622
4623 if (temp < mp->mnt_maxwritecnt) {
4624 mp->mnt_maxwritecnt = (u_int32_t)temp;
4625 }
4626 }
4627
4628 if (readsegcnt) {
4629 temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt;
4630 } else {
4631 temp = mp->mnt_maxreadcnt / PAGE_SIZE;
4632
4633 if (temp > UINT16_MAX) {
4634 temp = UINT16_MAX;
4635 }
4636 }
4637 mp->mnt_segreadcnt = (u_int16_t)temp;
4638
4639 if (writesegcnt) {
4640 temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt;
4641 } else {
4642 temp = mp->mnt_maxwritecnt / PAGE_SIZE;
4643
4644 if (temp > UINT16_MAX) {
4645 temp = UINT16_MAX;
4646 }
4647 }
4648 mp->mnt_segwritecnt = (u_int16_t)temp;
4649
4650 if (readsegsize) {
4651 temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize;
4652 } else {
4653 temp = mp->mnt_maxreadcnt;
4654 }
4655 mp->mnt_maxsegreadsize = (u_int32_t)temp;
4656
4657 if (writesegsize) {
4658 temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize;
4659 } else {
4660 temp = mp->mnt_maxwritecnt;
4661 }
4662 mp->mnt_maxsegwritesize = (u_int32_t)temp;
4663
4664 if (alignment) {
4665 temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1;
4666 } else {
4667 temp = 0;
4668 }
4669 mp->mnt_alignmentmask = (uint32_t)temp;
4670
4671
4672 if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH) {
4673 temp = ioqueue_depth;
4674 } else {
4675 temp = MNT_DEFAULT_IOQUEUE_DEPTH;
4676 }
4677
4678 mp->mnt_ioqueue_depth = (uint32_t)temp;
4679 mp->mnt_ioscale = MNT_IOSCALE(mp->mnt_ioqueue_depth);
4680
4681 if (mp->mnt_ioscale > 1) {
4682 printf("ioqueue_depth = %d, ioscale = %d\n", (int)mp->mnt_ioqueue_depth, (int)mp->mnt_ioscale);
4683 }
4684
4685 if (features & DK_FEATURE_FORCE_UNIT_ACCESS) {
4686 mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED;
4687 }
4688
4689 if (VNOP_IOCTL(devvp, DKIOCGETIOMINSATURATIONBYTECOUNT, (caddr_t)&minsaturationbytecount, 0, ctx) == 0) {
4690 mp->mnt_minsaturationbytecount = minsaturationbytecount;
4691 } else {
4692 mp->mnt_minsaturationbytecount = 0;
4693 }
4694
4695 if (VNOP_IOCTL(devvp, DKIOCCORESTORAGE, (caddr_t)&cs_info, 0, ctx) == 0) {
4696 cs_present = TRUE;
4697 }
4698
4699 if (features & DK_FEATURE_UNMAP) {
4700 mp->mnt_ioflags |= MNT_IOFLAGS_UNMAP_SUPPORTED;
4701
4702 if (cs_present == TRUE) {
4703 mp->mnt_ioflags |= MNT_IOFLAGS_CSUNMAP_SUPPORTED;
4704 }
4705 }
4706 if (cs_present == TRUE) {
4707 /*
4708 * for now we'll use the following test as a proxy for
4709 * the underlying drive being FUSION in nature
4710 */
4711 if ((cs_info.flags & DK_CORESTORAGE_PIN_YOUR_METADATA)) {
4712 mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE;
4713 }
4714 } else {
4715 /* Check for APFS Fusion */
4716 dk_apfs_flavour_t flavour;
4717 if ((VNOP_IOCTL(devvp, DKIOCGETAPFSFLAVOUR, (caddr_t)&flavour, 0, ctx) == 0) &&
4718 (flavour == DK_APFS_FUSION)) {
4719 mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE;
4720 }
4721 }
4722
4723 if (VNOP_IOCTL(devvp, DKIOCGETLOCATION, (caddr_t)&location, 0, ctx) == 0) {
4724 if (location & DK_LOCATION_EXTERNAL) {
4725 mp->mnt_ioflags |= MNT_IOFLAGS_PERIPHERAL_DRIVE;
4726 mp->mnt_flag |= MNT_REMOVABLE;
4727 }
4728 }
4729
4730 #if CONFIG_IOSCHED
4731 if (iosched_enabled && (features & DK_FEATURE_PRIORITY)) {
4732 mp->mnt_ioflags |= MNT_IOFLAGS_IOSCHED_SUPPORTED;
4733 throttle_info_disable_throttle(mp->mnt_devbsdunit, (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) != 0);
4734 }
4735 #endif /* CONFIG_IOSCHED */
4736 return error;
4737 }
4738
4739 static struct klist fs_klist;
4740 static LCK_GRP_DECLARE(fs_klist_lck_grp, "fs_klist");
4741 static LCK_MTX_DECLARE(fs_klist_lock, &fs_klist_lck_grp);
4742
4743 void
vfs_event_init(void)4744 vfs_event_init(void)
4745 {
4746 klist_init(&fs_klist);
4747 }
4748
4749 void
vfs_event_signal(fsid_t * fsid,u_int32_t event,intptr_t data)4750 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data)
4751 {
4752 if (event == VQ_DEAD || event == VQ_NOTRESP) {
4753 struct mount *mp = vfs_getvfs(fsid);
4754 if (mp) {
4755 mount_lock_spin(mp);
4756 if (data) {
4757 mp->mnt_lflag &= ~MNT_LNOTRESP; // Now responding
4758 } else {
4759 mp->mnt_lflag |= MNT_LNOTRESP; // Not responding
4760 }
4761 mount_unlock(mp);
4762 }
4763 }
4764
4765 lck_mtx_lock(&fs_klist_lock);
4766 KNOTE(&fs_klist, event);
4767 lck_mtx_unlock(&fs_klist_lock);
4768 }
4769
4770 /*
4771 * return the number of mounted filesystems.
4772 */
4773 static int
sysctl_vfs_getvfscnt(void)4774 sysctl_vfs_getvfscnt(void)
4775 {
4776 return mount_getvfscnt();
4777 }
4778
4779
4780 static int
mount_getvfscnt(void)4781 mount_getvfscnt(void)
4782 {
4783 int ret;
4784
4785 mount_list_lock();
4786 ret = nummounts;
4787 mount_list_unlock();
4788 return ret;
4789 }
4790
4791
4792
4793 static int
mount_fillfsids(fsid_t * fsidlst,int count)4794 mount_fillfsids(fsid_t *fsidlst, int count)
4795 {
4796 struct mount *mp;
4797 int actual = 0;
4798
4799 actual = 0;
4800 mount_list_lock();
4801 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4802 if (actual < count) {
4803 fsidlst[actual] = mp->mnt_vfsstat.f_fsid;
4804 actual++;
4805 }
4806 }
4807 mount_list_unlock();
4808 return actual;
4809 }
4810
4811 /*
4812 * fill in the array of fsid_t's up to a max of 'count', the actual
4813 * number filled in will be set in '*actual'. If there are more fsid_t's
4814 * than room in fsidlst then ENOMEM will be returned and '*actual' will
4815 * have the actual count.
4816 * having *actual filled out even in the error case is depended upon.
4817 */
4818 static int
sysctl_vfs_getvfslist(fsid_t * fsidlst,unsigned long count,unsigned long * actual)4819 sysctl_vfs_getvfslist(fsid_t *fsidlst, unsigned long count, unsigned long *actual)
4820 {
4821 struct mount *mp;
4822
4823 *actual = 0;
4824 mount_list_lock();
4825 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4826 (*actual)++;
4827 if (*actual <= count) {
4828 fsidlst[(*actual) - 1] = mp->mnt_vfsstat.f_fsid;
4829 }
4830 }
4831 mount_list_unlock();
4832 return *actual <= count ? 0 : ENOMEM;
4833 }
4834
4835 static int
sysctl_vfs_vfslist(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)4836 sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1,
4837 __unused int arg2, struct sysctl_req *req)
4838 {
4839 unsigned long actual;
4840 int error;
4841 size_t space;
4842 fsid_t *fsidlst;
4843
4844 /* This is a readonly node. */
4845 if (req->newptr != USER_ADDR_NULL) {
4846 return EPERM;
4847 }
4848
4849 /* they are querying us so just return the space required. */
4850 if (req->oldptr == USER_ADDR_NULL) {
4851 req->oldidx = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
4852 return 0;
4853 }
4854 again:
4855 /*
4856 * Retrieve an accurate count of the amount of space required to copy
4857 * out all the fsids in the system.
4858 */
4859 space = req->oldlen;
4860 req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
4861
4862 /* they didn't give us enough space. */
4863 if (space < req->oldlen) {
4864 return ENOMEM;
4865 }
4866
4867 fsidlst = kalloc_data(req->oldlen, Z_WAITOK | Z_ZERO);
4868 if (fsidlst == NULL) {
4869 return ENOMEM;
4870 }
4871
4872 error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t),
4873 &actual);
4874 /*
4875 * If we get back ENOMEM, then another mount has been added while we
4876 * slept in malloc above. If this is the case then try again.
4877 */
4878 if (error == ENOMEM) {
4879 kfree_data(fsidlst, req->oldlen);
4880 req->oldlen = space;
4881 goto again;
4882 }
4883 if (error == 0) {
4884 error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t));
4885 }
4886 kfree_data(fsidlst, req->oldlen);
4887 return error;
4888 }
4889
4890 /*
4891 * Do a sysctl by fsid.
4892 */
4893 static int
sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid * oidp,void * arg1,int arg2,struct sysctl_req * req)4894 sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
4895 struct sysctl_req *req)
4896 {
4897 union union_vfsidctl vc;
4898 struct mount *mp = NULL;
4899 struct vfsstatfs *sp;
4900 int *name, namelen;
4901 int flags = 0;
4902 int error = 0, gotref = 0;
4903 vfs_context_t ctx = vfs_context_current();
4904 proc_t p = req->p; /* XXX req->p != current_proc()? */
4905 boolean_t is_64_bit;
4906 union {
4907 struct statfs64 sfs64;
4908 struct user64_statfs osfs64;
4909 struct user32_statfs osfs32;
4910 } *sfsbuf;
4911
4912 if (req->newptr == USER_ADDR_NULL) {
4913 error = EINVAL;
4914 goto out;
4915 }
4916
4917 name = arg1;
4918 namelen = arg2;
4919 is_64_bit = proc_is64bit(p);
4920
4921 error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
4922 if (error) {
4923 goto out;
4924 }
4925 if (vc.vc32.vc_vers != VFS_CTL_VERS1) { /* works for 32 and 64 */
4926 error = EINVAL;
4927 goto out;
4928 }
4929 mp = mount_list_lookupby_fsid(&vc.vc32.vc_fsid, 0, 1); /* works for 32 and 64 */
4930 if (mp == NULL) {
4931 error = ENOENT;
4932 goto out;
4933 }
4934 gotref = 1;
4935 /* reset so that the fs specific code can fetch it. */
4936 req->newidx = 0;
4937 /*
4938 * Note if this is a VFS_CTL then we pass the actual sysctl req
4939 * in for "oldp" so that the lower layer can DTRT and use the
4940 * SYSCTL_IN/OUT routines.
4941 */
4942 if (mp->mnt_op->vfs_sysctl != NULL) {
4943 if (is_64_bit) {
4944 if (vfs_64bitready(mp)) {
4945 error = mp->mnt_op->vfs_sysctl(name, namelen,
4946 CAST_USER_ADDR_T(req),
4947 NULL, USER_ADDR_NULL, 0,
4948 ctx);
4949 } else {
4950 error = ENOTSUP;
4951 }
4952 } else {
4953 error = mp->mnt_op->vfs_sysctl(name, namelen,
4954 CAST_USER_ADDR_T(req),
4955 NULL, USER_ADDR_NULL, 0,
4956 ctx);
4957 }
4958 if (error != ENOTSUP) {
4959 goto out;
4960 }
4961 }
4962 switch (name[0]) {
4963 case VFS_CTL_UMOUNT:
4964 #if CONFIG_MACF
4965 error = mac_mount_check_umount(ctx, mp);
4966 if (error != 0) {
4967 goto out;
4968 }
4969 #endif
4970 req->newidx = 0;
4971 if (is_64_bit) {
4972 req->newptr = vc.vc64.vc_ptr;
4973 req->newlen = (size_t)vc.vc64.vc_len;
4974 } else {
4975 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
4976 req->newlen = vc.vc32.vc_len;
4977 }
4978 error = SYSCTL_IN(req, &flags, sizeof(flags));
4979 if (error) {
4980 break;
4981 }
4982
4983 mount_ref(mp, 0);
4984 mount_iterdrop(mp);
4985 gotref = 0;
4986 /* safedounmount consumes a ref */
4987 error = safedounmount(mp, flags, ctx);
4988 break;
4989 case VFS_CTL_OSTATFS:
4990 case VFS_CTL_STATFS64:
4991 #if CONFIG_MACF
4992 error = mac_mount_check_stat(ctx, mp);
4993 if (error != 0) {
4994 break;
4995 }
4996 #endif
4997 req->newidx = 0;
4998 if (is_64_bit) {
4999 req->newptr = vc.vc64.vc_ptr;
5000 req->newlen = (size_t)vc.vc64.vc_len;
5001 } else {
5002 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
5003 req->newlen = vc.vc32.vc_len;
5004 }
5005 error = SYSCTL_IN(req, &flags, sizeof(flags));
5006 if (error) {
5007 break;
5008 }
5009 sp = &mp->mnt_vfsstat;
5010 if (((flags & MNT_NOWAIT) == 0 || (flags & (MNT_WAIT | MNT_DWAIT))) &&
5011 (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT))) {
5012 goto out;
5013 }
5014
5015 sfsbuf = kalloc_type(typeof(*sfsbuf), Z_WAITOK);
5016
5017 if (name[0] == VFS_CTL_STATFS64) {
5018 struct statfs64 *sfs = &sfsbuf->sfs64;
5019
5020 vfs_get_statfs64(mp, sfs);
5021 error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
5022 } else if (is_64_bit) {
5023 struct user64_statfs *sfs = &sfsbuf->osfs64;
5024
5025 bzero(sfs, sizeof(*sfs));
5026 sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
5027 sfs->f_type = (short)mp->mnt_vtable->vfc_typenum;
5028 sfs->f_bsize = (user64_long_t)sp->f_bsize;
5029 sfs->f_iosize = (user64_long_t)sp->f_iosize;
5030 sfs->f_blocks = (user64_long_t)sp->f_blocks;
5031 sfs->f_bfree = (user64_long_t)sp->f_bfree;
5032 sfs->f_bavail = (user64_long_t)sp->f_bavail;
5033 sfs->f_files = (user64_long_t)sp->f_files;
5034 sfs->f_ffree = (user64_long_t)sp->f_ffree;
5035 sfs->f_fsid = sp->f_fsid;
5036 sfs->f_owner = sp->f_owner;
5037 vfs_getfstypename(mp, sfs->f_fstypename, MFSNAMELEN);
5038 strlcpy(sfs->f_mntonname, sp->f_mntonname, MNAMELEN);
5039 strlcpy(sfs->f_mntfromname, sp->f_mntfromname, MNAMELEN);
5040
5041 error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
5042 } else {
5043 struct user32_statfs *sfs = &sfsbuf->osfs32;
5044 long temp;
5045
5046 bzero(sfs, sizeof(*sfs));
5047 sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
5048 sfs->f_type = (short)mp->mnt_vtable->vfc_typenum;
5049
5050 /*
5051 * It's possible for there to be more than 2^^31 blocks in the filesystem, so we
5052 * have to fudge the numbers here in that case. We inflate the blocksize in order
5053 * to reflect the filesystem size as best we can.
5054 */
5055 if (sp->f_blocks > INT_MAX) {
5056 int shift;
5057
5058 /*
5059 * Work out how far we have to shift the block count down to make it fit.
5060 * Note that it's possible to have to shift so far that the resulting
5061 * blocksize would be unreportably large. At that point, we will clip
5062 * any values that don't fit.
5063 *
5064 * For safety's sake, we also ensure that f_iosize is never reported as
5065 * being smaller than f_bsize.
5066 */
5067 for (shift = 0; shift < 32; shift++) {
5068 if ((sp->f_blocks >> shift) <= INT_MAX) {
5069 break;
5070 }
5071 if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX) {
5072 break;
5073 }
5074 }
5075 #define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
5076 sfs->f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift);
5077 sfs->f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift);
5078 sfs->f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift);
5079 #undef __SHIFT_OR_CLIP
5080 sfs->f_bsize = (user32_long_t)(sp->f_bsize << shift);
5081 temp = lmax(sp->f_iosize, sp->f_bsize);
5082 if (temp > INT32_MAX) {
5083 error = EINVAL;
5084 kfree_type(typeof(*sfsbuf), sfsbuf);
5085 goto out;
5086 }
5087 sfs->f_iosize = (user32_long_t)temp;
5088 } else {
5089 sfs->f_bsize = (user32_long_t)sp->f_bsize;
5090 sfs->f_iosize = (user32_long_t)sp->f_iosize;
5091 sfs->f_blocks = (user32_long_t)sp->f_blocks;
5092 sfs->f_bfree = (user32_long_t)sp->f_bfree;
5093 sfs->f_bavail = (user32_long_t)sp->f_bavail;
5094 }
5095 sfs->f_files = (user32_long_t)sp->f_files;
5096 sfs->f_ffree = (user32_long_t)sp->f_ffree;
5097 sfs->f_fsid = sp->f_fsid;
5098 sfs->f_owner = sp->f_owner;
5099
5100 vfs_getfstypename(mp, sfs->f_fstypename, MFSNAMELEN);
5101 strlcpy(sfs->f_mntonname, sp->f_mntonname, MNAMELEN);
5102 strlcpy(sfs->f_mntfromname, sp->f_mntfromname, MNAMELEN);
5103
5104 error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
5105 }
5106 kfree_type(typeof(*sfsbuf), sfsbuf);
5107 break;
5108 default:
5109 error = ENOTSUP;
5110 goto out;
5111 }
5112 out:
5113 if (gotref != 0) {
5114 mount_iterdrop(mp);
5115 }
5116 return error;
5117 }
5118
5119 static int filt_fsattach(struct knote *kn, struct kevent_qos_s *kev);
5120 static void filt_fsdetach(struct knote *kn);
5121 static int filt_fsevent(struct knote *kn, long hint);
5122 static int filt_fstouch(struct knote *kn, struct kevent_qos_s *kev);
5123 static int filt_fsprocess(struct knote *kn, struct kevent_qos_s *kev);
5124 SECURITY_READ_ONLY_EARLY(struct filterops) fs_filtops = {
5125 .f_attach = filt_fsattach,
5126 .f_detach = filt_fsdetach,
5127 .f_event = filt_fsevent,
5128 .f_touch = filt_fstouch,
5129 .f_process = filt_fsprocess,
5130 };
5131
5132 static int
filt_fsattach(struct knote * kn,__unused struct kevent_qos_s * kev)5133 filt_fsattach(struct knote *kn, __unused struct kevent_qos_s *kev)
5134 {
5135 kn->kn_flags |= EV_CLEAR; /* automatic */
5136 kn->kn_sdata = 0; /* incoming data is ignored */
5137
5138 lck_mtx_lock(&fs_klist_lock);
5139 KNOTE_ATTACH(&fs_klist, kn);
5140 lck_mtx_unlock(&fs_klist_lock);
5141
5142 /*
5143 * filter only sees future events,
5144 * so it can't be fired already.
5145 */
5146 return 0;
5147 }
5148
5149 static void
filt_fsdetach(struct knote * kn)5150 filt_fsdetach(struct knote *kn)
5151 {
5152 lck_mtx_lock(&fs_klist_lock);
5153 KNOTE_DETACH(&fs_klist, kn);
5154 lck_mtx_unlock(&fs_klist_lock);
5155 }
5156
5157 static int
filt_fsevent(struct knote * kn,long hint)5158 filt_fsevent(struct knote *kn, long hint)
5159 {
5160 /*
5161 * Backwards compatibility:
5162 * Other filters would do nothing if kn->kn_sfflags == 0
5163 */
5164
5165 if ((kn->kn_sfflags == 0) || (kn->kn_sfflags & hint)) {
5166 kn->kn_fflags |= hint;
5167 }
5168
5169 return kn->kn_fflags != 0;
5170 }
5171
5172 static int
filt_fstouch(struct knote * kn,struct kevent_qos_s * kev)5173 filt_fstouch(struct knote *kn, struct kevent_qos_s *kev)
5174 {
5175 int res;
5176
5177 lck_mtx_lock(&fs_klist_lock);
5178
5179 kn->kn_sfflags = kev->fflags;
5180
5181 /*
5182 * the above filter function sets bits even if nobody is looking for them.
5183 * Just preserve those bits even in the new mask is more selective
5184 * than before.
5185 *
5186 * For compatibility with previous implementations, we leave kn_fflags
5187 * as they were before.
5188 */
5189 //if (kn->kn_sfflags)
5190 // kn->kn_fflags &= kn->kn_sfflags;
5191 res = (kn->kn_fflags != 0);
5192
5193 lck_mtx_unlock(&fs_klist_lock);
5194
5195 return res;
5196 }
5197
5198 static int
filt_fsprocess(struct knote * kn,struct kevent_qos_s * kev)5199 filt_fsprocess(struct knote *kn, struct kevent_qos_s *kev)
5200 {
5201 int res = 0;
5202
5203 lck_mtx_lock(&fs_klist_lock);
5204 if (kn->kn_fflags) {
5205 knote_fill_kevent(kn, kev, 0);
5206 res = 1;
5207 }
5208 lck_mtx_unlock(&fs_klist_lock);
5209 return res;
5210 }
5211
5212 static int
sysctl_vfs_noremotehang(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)5213 sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp,
5214 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
5215 {
5216 int out, error;
5217 pid_t pid;
5218 proc_t p;
5219
5220 /* We need a pid. */
5221 if (req->newptr == USER_ADDR_NULL) {
5222 return EINVAL;
5223 }
5224
5225 error = SYSCTL_IN(req, &pid, sizeof(pid));
5226 if (error) {
5227 return error;
5228 }
5229
5230 p = proc_find(pid < 0 ? -pid : pid);
5231 if (p == NULL) {
5232 return ESRCH;
5233 }
5234
5235 /*
5236 * Fetching the value is ok, but we only fetch if the old
5237 * pointer is given.
5238 */
5239 if (req->oldptr != USER_ADDR_NULL) {
5240 out = !((p->p_flag & P_NOREMOTEHANG) == 0);
5241 proc_rele(p);
5242 error = SYSCTL_OUT(req, &out, sizeof(out));
5243 return error;
5244 }
5245
5246 /* cansignal offers us enough security. */
5247 if (p != req->p && proc_suser(req->p) != 0) {
5248 proc_rele(p);
5249 return EPERM;
5250 }
5251
5252 if (pid < 0) {
5253 OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), &p->p_flag);
5254 } else {
5255 OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
5256 }
5257 proc_rele(p);
5258
5259 return 0;
5260 }
5261
5262 static int
5263 sysctl_vfs_generic_conf SYSCTL_HANDLER_ARGS
5264 {
5265 int *name, namelen;
5266 struct vfstable *vfsp;
5267 struct vfsconf vfsc = {};
5268
5269 (void)oidp;
5270 name = arg1;
5271 namelen = arg2;
5272
5273 if (namelen < 1) {
5274 return EISDIR;
5275 } else if (namelen > 1) {
5276 return ENOTDIR;
5277 }
5278
5279 mount_list_lock();
5280 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
5281 if (vfsp->vfc_typenum == name[0]) {
5282 break;
5283 }
5284 }
5285
5286 if (vfsp == NULL) {
5287 mount_list_unlock();
5288 return ENOTSUP;
5289 }
5290
5291 vfsc.vfc_reserved1 = 0;
5292 bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
5293 vfsc.vfc_typenum = vfsp->vfc_typenum;
5294 vfsc.vfc_refcount = vfsp->vfc_refcount;
5295 vfsc.vfc_flags = vfsp->vfc_flags;
5296 vfsc.vfc_reserved2 = 0;
5297 vfsc.vfc_reserved3 = 0;
5298
5299 mount_list_unlock();
5300 return SYSCTL_OUT(req, &vfsc, sizeof(struct vfsconf));
5301 }
5302
5303 /* the vfs.generic. branch. */
5304 SYSCTL_EXTENSIBLE_NODE(_vfs, VFS_GENERIC, generic,
5305 CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "vfs generic hinge");
5306 /* retreive a list of mounted filesystem fsid_t */
5307 SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist,
5308 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
5309 NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids");
5310 /* perform operations on filesystem via fsid_t */
5311 SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW | CTLFLAG_LOCKED,
5312 sysctl_vfs_ctlbyfsid, "ctlbyfsid");
5313 SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW | CTLFLAG_ANYBODY,
5314 NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang");
5315 SYSCTL_INT(_vfs_generic, VFS_MAXTYPENUM, maxtypenum,
5316 CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
5317 &maxvfstypenum, 0, "");
5318 SYSCTL_INT(_vfs_generic, OID_AUTO, sync_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, &sync_timeout_seconds, 0, "");
5319 SYSCTL_NODE(_vfs_generic, VFS_CONF, conf,
5320 CTLFLAG_RD | CTLFLAG_LOCKED,
5321 sysctl_vfs_generic_conf, "");
5322 #if DEVELOPMENT || DEBUG
5323 SYSCTL_INT(_vfs_generic, OID_AUTO, print_busy_vnodes,
5324 CTLTYPE_INT | CTLFLAG_RW,
5325 &print_busy_vnodes, 0,
5326 "VFS log busy vnodes blocking unmount");
5327 #endif
5328
5329 /* Indicate that the root file system unmounted cleanly */
5330 static int vfs_root_unmounted_cleanly = 0;
5331 SYSCTL_INT(_vfs_generic, OID_AUTO, root_unmounted_cleanly, CTLFLAG_RD, &vfs_root_unmounted_cleanly, 0, "Root filesystem was unmounted cleanly");
5332
5333 void
vfs_set_root_unmounted_cleanly(void)5334 vfs_set_root_unmounted_cleanly(void)
5335 {
5336 vfs_root_unmounted_cleanly = 1;
5337 }
5338
5339 /*
5340 * Print vnode state.
5341 */
5342 void
vn_print_state(struct vnode * vp,const char * fmt,...)5343 vn_print_state(struct vnode *vp, const char *fmt, ...)
5344 {
5345 va_list ap;
5346 char perm_str[] = "(VM_KERNEL_ADDRPERM pointer)";
5347 char fs_name[MFSNAMELEN];
5348
5349 va_start(ap, fmt);
5350 vprintf(fmt, ap);
5351 va_end(ap);
5352 printf("vp 0x%0llx %s: ", (uint64_t)VM_KERNEL_ADDRPERM(vp), perm_str);
5353 printf("tag %d, type %d\n", vp->v_tag, vp->v_type);
5354 /* Counts .. */
5355 printf(" iocount %d, usecount %d, kusecount %d references %d\n",
5356 vp->v_iocount, vp->v_usecount, vp->v_kusecount, vp->v_references);
5357 printf(" writecount %d, numoutput %d\n", vp->v_writecount,
5358 vp->v_numoutput);
5359 /* Flags */
5360 printf(" flag 0x%x, lflag 0x%x, listflag 0x%x\n", vp->v_flag,
5361 vp->v_lflag, vp->v_listflag);
5362
5363 if (vp->v_mount == NULL || vp->v_mount == dead_mountp) {
5364 strlcpy(fs_name, "deadfs", MFSNAMELEN);
5365 } else {
5366 vfs_name(vp->v_mount, fs_name);
5367 }
5368
5369 printf(" v_data 0x%0llx %s\n",
5370 (vp->v_data ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_data) : 0),
5371 perm_str);
5372 printf(" v_mount 0x%0llx %s vfs_name %s\n",
5373 (vp->v_mount ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_mount) : 0),
5374 perm_str, fs_name);
5375 }
5376
5377 long num_reusedvnodes = 0;
5378
5379
5380 static vnode_t
process_vp(vnode_t vp,int want_vp,bool can_defer,int * deferred)5381 process_vp(vnode_t vp, int want_vp, bool can_defer, int *deferred)
5382 {
5383 unsigned int vpid;
5384
5385 *deferred = 0;
5386
5387 vpid = vp->v_id;
5388
5389 vnode_list_remove_locked(vp);
5390
5391 vnode_hold(vp);
5392 vnode_list_unlock();
5393
5394 vnode_lock_spin(vp);
5395
5396 /*
5397 * We could wait for the vnode_lock after removing the vp from the freelist
5398 * and the vid is bumped only at the very end of reclaim. So it is possible
5399 * that we are looking at a vnode that is being terminated. If so skip it.
5400 */
5401 if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
5402 VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
5403 /*
5404 * we lost the race between dropping the list lock
5405 * and picking up the vnode_lock... someone else
5406 * used this vnode and it is now in a new state
5407 */
5408 vnode_drop_and_unlock(vp);
5409
5410 return NULLVP;
5411 }
5412 if ((vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE) {
5413 /*
5414 * we did a vnode_rele_ext that asked for
5415 * us not to reenter the filesystem during
5416 * the release even though VL_NEEDINACTIVE was
5417 * set... we'll do it here by doing a
5418 * vnode_get/vnode_put
5419 *
5420 * pick up an iocount so that we can call
5421 * vnode_put and drive the VNOP_INACTIVE...
5422 * vnode_put will either leave us off
5423 * the freelist if a new ref comes in,
5424 * or put us back on the end of the freelist
5425 * or recycle us if we were marked for termination...
5426 * so we'll just go grab a new candidate
5427 */
5428 vp->v_iocount++;
5429 #ifdef CONFIG_IOCOUNT_TRACE
5430 record_vp(vp, 1);
5431 #endif
5432 vnode_put_locked(vp);
5433 vnode_drop_and_unlock(vp);
5434
5435 return NULLVP;
5436 }
5437 /*
5438 * Checks for anyone racing us for recycle
5439 */
5440 if (vp->v_type != VBAD) {
5441 if ((want_vp || can_defer) && (vnode_on_reliable_media(vp) == FALSE || (vp->v_flag & VISDIRTY))) {
5442 vnode_async_list_add(vp);
5443 vnode_drop_and_unlock(vp);
5444
5445 *deferred = 1;
5446
5447 return NULLVP;
5448 }
5449 if (vp->v_lflag & VL_DEAD) {
5450 panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
5451 }
5452
5453 vnode_lock_convert(vp);
5454 (void)vnode_reclaim_internal(vp, 1, want_vp, 0);
5455
5456 if (want_vp) {
5457 if ((VONLIST(vp))) {
5458 panic("new_vnode(%p): vp on list", vp);
5459 }
5460 if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
5461 (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH))) {
5462 panic("new_vnode(%p): free vnode still referenced", vp);
5463 }
5464 if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0)) {
5465 panic("new_vnode(%p): vnode seems to be on mount list", vp);
5466 }
5467 if (!LIST_EMPTY(&vp->v_nclinks) || !TAILQ_EMPTY(&vp->v_ncchildren)) {
5468 panic("new_vnode(%p): vnode still hooked into the name cache", vp);
5469 }
5470 } else {
5471 vnode_drop_and_unlock(vp);
5472 vp = NULLVP;
5473 }
5474 }
5475 return vp;
5476 }
5477
5478 __attribute__((noreturn))
5479 static void
async_work_continue(void)5480 async_work_continue(void)
5481 {
5482 struct async_work_lst *q;
5483 int deferred;
5484 vnode_t vp;
5485
5486 q = &vnode_async_work_list;
5487
5488 for (;;) {
5489 vnode_list_lock();
5490
5491 if (TAILQ_EMPTY(q)) {
5492 assert_wait(q, (THREAD_UNINT));
5493
5494 vnode_list_unlock();
5495
5496 thread_block((thread_continue_t)async_work_continue);
5497
5498 continue;
5499 }
5500 async_work_handled++;
5501
5502 vp = TAILQ_FIRST(q);
5503
5504 vp = process_vp(vp, 0, false, &deferred);
5505
5506 if (vp != NULLVP) {
5507 panic("found VBAD vp (%p) on async queue", vp);
5508 }
5509 }
5510 }
5511
5512 #if CONFIG_JETSAM
5513 bool do_async_jetsam = false;
5514 #endif
5515
5516 __attribute__((noreturn))
5517 static void
vn_laundry_continue(void)5518 vn_laundry_continue(void)
5519 {
5520 struct freelst *free_q;
5521 struct ragelst *rage_q;
5522 vnode_t vp;
5523 int deferred;
5524 bool rage_q_empty;
5525 bool free_q_empty;
5526
5527
5528 free_q = &vnode_free_list;
5529 rage_q = &vnode_rage_list;
5530
5531 for (;;) {
5532 vnode_list_lock();
5533
5534 #if CONFIG_JETSAM
5535 if (do_async_jetsam) {
5536 do_async_jetsam = false;
5537 if (deadvnodes <= deadvnodes_low) {
5538 vnode_list_unlock();
5539
5540 log(LOG_EMERG, "Initiating vnode jetsam : %d desired, %ld numvnodes, "
5541 "%ld free, %ld dead, %ld async, %d rage\n",
5542 desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes);
5543
5544 memorystatus_kill_on_vnode_exhaustion();
5545
5546 continue;
5547 }
5548 }
5549 #endif
5550
5551 if (!TAILQ_EMPTY(&vnode_async_work_list)) {
5552 vp = TAILQ_FIRST(&vnode_async_work_list);
5553 async_work_handled++;
5554
5555 vp = process_vp(vp, 0, false, &deferred);
5556
5557 if (vp != NULLVP) {
5558 panic("found VBAD vp (%p) on async queue", vp);
5559 }
5560 continue;
5561 }
5562
5563 free_q_empty = TAILQ_EMPTY(free_q);
5564 rage_q_empty = TAILQ_EMPTY(rage_q);
5565
5566 if (!rage_q_empty && !free_q_empty) {
5567 struct timeval current_tv;
5568
5569 microuptime(¤t_tv);
5570 if (ragevnodes < rage_limit &&
5571 ((current_tv.tv_sec - rage_tv.tv_sec) < RAGE_TIME_LIMIT)) {
5572 rage_q_empty = true;
5573 }
5574 }
5575
5576 if (numvnodes < numvnodes_min || (rage_q_empty && free_q_empty) ||
5577 (reusablevnodes <= reusablevnodes_max && deadvnodes >= deadvnodes_high)) {
5578 assert_wait(free_q, (THREAD_UNINT));
5579
5580 vnode_list_unlock();
5581
5582 thread_block((thread_continue_t)vn_laundry_continue);
5583
5584 continue;
5585 }
5586
5587 if (!rage_q_empty) {
5588 vp = TAILQ_FIRST(rage_q);
5589 } else {
5590 vp = TAILQ_FIRST(free_q);
5591 }
5592
5593 vp = process_vp(vp, 0, false, &deferred);
5594
5595 if (vp != NULLVP) {
5596 /* If process_vp returns a vnode, it is locked and has a holdcount */
5597 vnode_drop_and_unlock(vp);
5598 vp = NULLVP;
5599 }
5600 }
5601 }
5602
5603 static inline void
wakeup_laundry_thread()5604 wakeup_laundry_thread()
5605 {
5606 if (deadvnodes_noreuse || (numvnodes >= numvnodes_min && deadvnodes < deadvnodes_low &&
5607 (reusablevnodes > reusablevnodes_max || numvnodes >= desiredvnodes))) {
5608 wakeup(&vnode_free_list);
5609 }
5610 }
5611
5612 /*
5613 * This must be called under vnode_list_lock() to prevent race when accessing
5614 * various vnode stats.
5615 */
5616 static void
send_freeable_vnodes_telemetry(void)5617 send_freeable_vnodes_telemetry(void)
5618 {
5619 bool send_event = false;
5620
5621 /*
5622 * Log an event when the 'numvnodes' is above the freeable vnodes threshold
5623 * or when it falls back within the threshold.
5624 * When the 'numvnodes' is above the threshold, log an event when it has
5625 * been incrementally growing by 25%.
5626 */
5627 if ((numvnodes > desiredvnodes) && (freevnodes + deadvnodes) == 0) {
5628 long last_numvnodes = freeable_vnodes_telemetry.numvnodes;
5629
5630 if (numvnodes > (last_numvnodes + ((last_numvnodes * 25) / 100)) ||
5631 numvnodes >= numvnodes_max) {
5632 send_event = true;
5633 }
5634 freeablevnodes_threshold_crossed = true;
5635 } else if (freeablevnodes_threshold_crossed &&
5636 (freevnodes + deadvnodes) > busyvnodes) {
5637 freeablevnodes_threshold_crossed = false;
5638 send_event = true;
5639 }
5640
5641 if (__improbable(send_event)) {
5642 ca_event_t event = CA_EVENT_ALLOCATE_FLAGS(freeable_vnodes, Z_NOWAIT);
5643
5644 if (event) {
5645 /*
5646 * Update the stats except the 'numvnodes_max' and 'desiredvnodes'
5647 * as they are immutable after init.
5648 */
5649 freeable_vnodes_telemetry.numvnodes_min = numvnodes_min;
5650 freeable_vnodes_telemetry.numvnodes = numvnodes;
5651 freeable_vnodes_telemetry.freevnodes = freevnodes;
5652 freeable_vnodes_telemetry.deadvnodes = deadvnodes;
5653 freeable_vnodes_telemetry.freeablevnodes = freeablevnodes;
5654 freeable_vnodes_telemetry.busyvnodes = busyvnodes;
5655 freeable_vnodes_telemetry.threshold_crossed =
5656 freeablevnodes_threshold_crossed;
5657
5658 memcpy(event->data, &freeable_vnodes_telemetry,
5659 sizeof(CA_EVENT_TYPE(freeable_vnodes)));
5660
5661 if (!freeablevnodes_threshold_crossed) {
5662 freeable_vnodes_telemetry.numvnodes = 0;
5663 }
5664 CA_EVENT_SEND(event);
5665 }
5666 }
5667 }
5668
5669 static int
new_vnode(vnode_t * vpp,bool can_free)5670 new_vnode(vnode_t *vpp, bool can_free)
5671 {
5672 long force_alloc_min;
5673 vnode_t vp;
5674 #if CONFIG_JETSAM
5675 uint32_t retries = 0, max_retries = 2; /* retry incase of tablefull */
5676 #else
5677 uint32_t retries = 0, max_retries = 100; /* retry incase of tablefull */
5678 #endif
5679 int force_alloc = 0, walk_count = 0;
5680 boolean_t need_reliable_vp = FALSE;
5681 int deferred;
5682 struct timeval initial_tv;
5683 struct timeval current_tv;
5684 proc_t curproc = current_proc();
5685 bool force_alloc_freeable = false;
5686
5687 if (vn_dealloc_level == DEALLOC_VNODE_NONE) {
5688 can_free = false;
5689 }
5690
5691 initial_tv.tv_sec = 0;
5692 retry:
5693 vp = NULLVP;
5694
5695 vnode_list_lock();
5696 newvnode++;
5697
5698 if (need_reliable_vp == TRUE) {
5699 async_work_timed_out++;
5700 }
5701
5702 /*
5703 * The vnode list lock was dropped after force_alloc_freeable was set,
5704 * reevaluate.
5705 */
5706 force_alloc_min = MAX(desiredvnodes, numvnodes_min);
5707 if (force_alloc_freeable &&
5708 (numvnodes < force_alloc_min || numvnodes >= numvnodes_max)) {
5709 force_alloc_freeable = false;
5710 }
5711
5712 #if CONFIG_JETSAM
5713 if ((numvnodes_max > desiredvnodes) && numvnodes > (numvnodes_max - 100)
5714 #if (DEVELOPMENT || DEBUG)
5715 && !bootarg_no_vnode_jetsam
5716 #endif
5717 ) {
5718 do_async_jetsam = true;
5719 wakeup(&vnode_free_list);
5720 }
5721 #endif /* CONFIG_JETSAM */
5722
5723 if (((numvnodes - deadvnodes + deadvnodes_noreuse) < desiredvnodes) ||
5724 force_alloc || force_alloc_freeable) {
5725 struct timespec ts;
5726 uint32_t vflag = 0;
5727
5728 /*
5729 * Can always reuse a dead one except if it is in the process of
5730 * being freed or the FS cannot handle freeable vnodes.
5731 */
5732 if (!TAILQ_EMPTY(&vnode_dead_list)) {
5733 /* Select an appropriate deadvnode */
5734 if (numvnodes <= numvnodes_min || !can_free) {
5735 /* all vnodes upto numvnodes_min are not freeable */
5736 vp = TAILQ_FIRST(&vnode_dead_list);
5737 if (numvnodes > numvnodes_min &&
5738 (vp->v_flag & VCANDEALLOC)) {
5739 /*
5740 * Freeable vnodes are added to the
5741 * back of the queue, so if the first
5742 * from the front is freeable, then
5743 * there are none on the dead list.
5744 */
5745 vp = NULLVP;
5746 }
5747 } else {
5748 /*
5749 * Filesystems which opt in to freeable vnodes
5750 * can get either one.
5751 */
5752 TAILQ_FOREACH_REVERSE(vp, &vnode_dead_list,
5753 deadlst, v_freelist) {
5754 if (!(vp->v_listflag & VLIST_NO_REUSE)) {
5755 break;
5756 }
5757 }
5758 }
5759
5760 if (vp) {
5761 force_alloc_freeable = false;
5762 goto steal_this_vp;
5763 }
5764 }
5765
5766 /*
5767 * no dead vnodes available... if we're under
5768 * the limit, we'll create a new vnode
5769 */
5770 numvnodes++;
5771 if (force_alloc) {
5772 numvnodes_min++;
5773 } else if (can_free && (numvnodes > numvnodes_min)) {
5774 allocedvnodes++;
5775 freeablevnodes++;
5776 vflag = VCANDEALLOC;
5777
5778 send_freeable_vnodes_telemetry();
5779 }
5780 vnode_list_unlock();
5781
5782 if (nc_smr_enabled) {
5783 vp = zalloc_smr(vnode_zone, Z_WAITOK_ZERO_NOFAIL);
5784 } else {
5785 vp = zalloc_flags(vnode_zone, Z_WAITOK_ZERO_NOFAIL);
5786 }
5787
5788 VLISTNONE(vp); /* avoid double queue removal */
5789 lck_mtx_init(&vp->v_lock, &vnode_lck_grp, &vnode_lck_attr);
5790
5791 TAILQ_INIT(&vp->v_ncchildren);
5792
5793 klist_init(&vp->v_knotes);
5794 nanouptime(&ts);
5795 vp->v_id = (uint32_t)ts.tv_nsec;
5796 vp->v_flag = VSTANDARD | vflag;
5797 if (force_alloc_freeable) {
5798 /* This vnode should be recycled and freed immediately */
5799 vp->v_lflag = VL_MARKTERM;
5800 vp->v_listflag = VLIST_NO_REUSE;
5801 }
5802
5803 if (vflag & VCANDEALLOC) {
5804 os_atomic_inc(&busyvnodes, relaxed);
5805 }
5806
5807 #if CONFIG_MACF
5808 if (mac_vnode_label_init_needed(vp)) {
5809 mac_vnode_label_init(vp);
5810 }
5811 #endif /* MAC */
5812
5813 #if CONFIG_IOCOUNT_TRACE
5814 if (__improbable(bootarg_vnode_iocount_trace)) {
5815 vp->v_iocount_trace = (vnode_iocount_trace_t)zalloc_permanent(
5816 IOCOUNT_TRACE_MAX_TYPES * sizeof(struct vnode_iocount_trace),
5817 ZALIGN(struct vnode_iocount_trace));
5818 }
5819 #endif /* CONFIG_IOCOUNT_TRACE */
5820
5821 #if CONFIG_FILE_LEASES
5822 LIST_INIT(&vp->v_leases);
5823 #endif
5824
5825 vp->v_iocount = 1;
5826
5827 goto done;
5828 }
5829
5830 microuptime(¤t_tv);
5831
5832 #define MAX_WALK_COUNT 1000
5833
5834 if (!TAILQ_EMPTY(&vnode_rage_list) &&
5835 (ragevnodes >= rage_limit ||
5836 (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) {
5837 TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
5838 if (!(vp->v_listflag & VLIST_RAGE)) {
5839 panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
5840 }
5841
5842 // if we're a dependency-capable process, skip vnodes that can
5843 // cause recycling deadlocks. (i.e. this process is diskimages
5844 // helper and the vnode is in a disk image). Querying the
5845 // mnt_kern_flag for the mount's virtual device status
5846 // is safer than checking the mnt_dependent_process, which
5847 // may not be updated if there are multiple devnode layers
5848 // in between the disk image and the final consumer.
5849
5850 if (((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
5851 (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) &&
5852 !(vp->v_listflag & VLIST_NO_REUSE) &&
5853 (can_free || !(vp->v_flag & VCANDEALLOC))) {
5854 /*
5855 * if need_reliable_vp == TRUE, then we've already sent one or more
5856 * non-reliable vnodes to the async thread for processing and timed
5857 * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT
5858 * mechanism to first scan for a reliable vnode before forcing
5859 * a new vnode to be created
5860 */
5861 if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) {
5862 break;
5863 }
5864 }
5865
5866 // don't iterate more than MAX_WALK_COUNT vnodes to
5867 // avoid keeping the vnode list lock held for too long.
5868
5869 if (walk_count++ > MAX_WALK_COUNT) {
5870 vp = NULL;
5871 break;
5872 }
5873 }
5874 }
5875
5876 if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) {
5877 /*
5878 * Pick the first vp for possible reuse
5879 */
5880 walk_count = 0;
5881 TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
5882 // if we're a dependency-capable process, skip vnodes that can
5883 // cause recycling deadlocks. (i.e. this process is diskimages
5884 // helper and the vnode is in a disk image). Querying the
5885 // mnt_kern_flag for the mount's virtual device status
5886 // is safer than checking the mnt_dependent_process, which
5887 // may not be updated if there are multiple devnode layers
5888 // in between the disk image and the final consumer.
5889
5890 if (((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
5891 (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) &&
5892 !(vp->v_listflag & VLIST_NO_REUSE) &&
5893 (can_free || !(vp->v_flag & VCANDEALLOC))) {
5894 /*
5895 * if need_reliable_vp == TRUE, then we've already sent one or more
5896 * non-reliable vnodes to the async thread for processing and timed
5897 * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT
5898 * mechanism to first scan for a reliable vnode before forcing
5899 * a new vnode to be created
5900 */
5901 if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) {
5902 break;
5903 }
5904 }
5905
5906 // don't iterate more than MAX_WALK_COUNT vnodes to
5907 // avoid keeping the vnode list lock held for too long.
5908
5909 if (walk_count++ > MAX_WALK_COUNT) {
5910 vp = NULL;
5911 break;
5912 }
5913 }
5914 }
5915
5916 //
5917 // if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT
5918 // then we're trying to create a vnode on behalf of a
5919 // process like diskimages-helper that has file systems
5920 // mounted on top of itself (and thus we can't reclaim
5921 // vnodes in the file systems on top of us). if we can't
5922 // find a vnode to reclaim then we'll just have to force
5923 // the allocation.
5924 //
5925 if (vp == NULL && walk_count >= MAX_WALK_COUNT) {
5926 force_alloc = 1;
5927 vnode_list_unlock();
5928 goto retry;
5929 }
5930
5931 if (vp == NULL) {
5932 if (can_free && (vn_dealloc_level > DEALLOC_VNODE_NONE) &&
5933 (numvnodes >= force_alloc_min) && (numvnodes < numvnodes_max)) {
5934 force_alloc_freeable = true;
5935 vnode_list_unlock();
5936 goto retry;
5937 }
5938 vnode_list_unlock();
5939
5940 /*
5941 * we've reached the system imposed maximum number of vnodes
5942 * but there isn't a single one available
5943 * wait a bit and then retry... if we can't get a vnode
5944 * after our target number of retries, than log a complaint
5945 */
5946 if (++retries <= max_retries) {
5947 delay_for_interval(1, 1000 * 1000);
5948 goto retry;
5949 }
5950
5951 tablefull("vnode");
5952 log(LOG_EMERG, "%d desired, %ld numvnodes, "
5953 "%ld free, %ld dead, %ld async, %d rage\n",
5954 desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes);
5955
5956 #if CONFIG_JETSAM
5957 /*
5958 * Running out of vnodes tends to make a system unusable. Start killing
5959 * processes that jetsam knows are killable.
5960 */
5961 if (!memorystatus_kill_on_vnode_exhaustion()
5962 #if DEVELOPMENT || DEBUG
5963 || bootarg_no_vnode_jetsam
5964 #endif
5965 ) {
5966 /*
5967 * If jetsam can't find any more processes to kill and there
5968 * still aren't any free vnodes, panic. Hopefully we'll get a
5969 * panic log to tell us why we ran out.
5970 */
5971 panic("vnode table is full");
5972 }
5973
5974 /*
5975 * Now that we've killed someone, wait a bit and continue looking
5976 */
5977 delay_for_interval(3, 1000 * 1000);
5978 retries = 0;
5979 goto retry;
5980 #endif
5981
5982 *vpp = NULL;
5983 return ENFILE;
5984 }
5985 newvnode_nodead++;
5986 steal_this_vp:
5987 if ((vp = process_vp(vp, 1, true, &deferred)) == NULLVP) {
5988 if (deferred) {
5989 int elapsed_msecs;
5990 struct timeval elapsed_tv;
5991
5992 if (initial_tv.tv_sec == 0) {
5993 microuptime(&initial_tv);
5994 }
5995
5996 vnode_list_lock();
5997
5998 dead_vnode_waited++;
5999 dead_vnode_wanted++;
6000
6001 /*
6002 * note that we're only going to explicitly wait 10ms
6003 * for a dead vnode to become available, since even if one
6004 * isn't available, a reliable vnode might now be available
6005 * at the head of the VRAGE or free lists... if so, we
6006 * can satisfy the new_vnode request with less latency then waiting
6007 * for the full 100ms duration we're ultimately willing to tolerate
6008 */
6009 assert_wait_timeout((caddr_t)&dead_vnode_wanted, (THREAD_INTERRUPTIBLE), 10000, NSEC_PER_USEC);
6010
6011 vnode_list_unlock();
6012
6013 thread_block(THREAD_CONTINUE_NULL);
6014
6015 microuptime(&elapsed_tv);
6016
6017 timevalsub(&elapsed_tv, &initial_tv);
6018 elapsed_msecs = (int)(elapsed_tv.tv_sec * 1000 + elapsed_tv.tv_usec / 1000);
6019
6020 if (elapsed_msecs >= 100) {
6021 /*
6022 * we've waited long enough... 100ms is
6023 * somewhat arbitrary for this case, but the
6024 * normal worst case latency used for UI
6025 * interaction is 100ms, so I've chosen to
6026 * go with that.
6027 *
6028 * setting need_reliable_vp to TRUE
6029 * forces us to find a reliable vnode
6030 * that we can process synchronously, or
6031 * to create a new one if the scan for
6032 * a reliable one hits the scan limit
6033 */
6034 need_reliable_vp = TRUE;
6035 }
6036 }
6037 goto retry;
6038 }
6039 OSAddAtomicLong(1, &num_reusedvnodes);
6040
6041
6042 #if CONFIG_MACF
6043 /*
6044 * We should never see VL_LABELWAIT or VL_LABEL here.
6045 * as those operations hold a reference.
6046 */
6047 assert((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT);
6048 assert((vp->v_lflag & VL_LABEL) != VL_LABEL);
6049 if (vp->v_lflag & VL_LABELED || mac_vnode_label(vp) != NULL) {
6050 vnode_lock_convert(vp);
6051 mac_vnode_label_recycle(vp);
6052 } else if (mac_vnode_label_init_needed(vp)) {
6053 vnode_lock_convert(vp);
6054 mac_vnode_label_init(vp);
6055 }
6056
6057 #endif /* MAC */
6058
6059 vp->v_iocount = 1;
6060 vp->v_lflag = 0;
6061 vp->v_writecount = 0;
6062 vp->v_references = 0;
6063 vp->v_iterblkflags = 0;
6064 if (can_free && (vp->v_flag & VCANDEALLOC)) {
6065 vp->v_flag = VSTANDARD | VCANDEALLOC;
6066 } else {
6067 vp->v_flag = VSTANDARD;
6068 }
6069
6070 /* vbad vnodes can point to dead_mountp */
6071 vp->v_mount = NULL;
6072 vp->v_defer_reclaimlist = (vnode_t)0;
6073
6074 /* process_vp returns a locked vnode with a holdcount */
6075 vnode_drop_and_unlock(vp);
6076
6077 done:
6078 *vpp = vp;
6079
6080 return 0;
6081 }
6082
6083 void
vnode_lock(vnode_t vp)6084 vnode_lock(vnode_t vp)
6085 {
6086 lck_mtx_lock(&vp->v_lock);
6087 }
6088
6089 void
vnode_lock_spin(vnode_t vp)6090 vnode_lock_spin(vnode_t vp)
6091 {
6092 lck_mtx_lock_spin(&vp->v_lock);
6093 }
6094
6095 void
vnode_unlock(vnode_t vp)6096 vnode_unlock(vnode_t vp)
6097 {
6098 lck_mtx_unlock(&vp->v_lock);
6099 }
6100
6101 void
vnode_hold(vnode_t vp)6102 vnode_hold(vnode_t vp)
6103 {
6104 int32_t old_holdcount = os_atomic_inc_orig(&vp->v_holdcount, relaxed);
6105
6106 if (old_holdcount == INT32_MAX) {
6107 /*
6108 * Because we allow atomic ops on the holdcount it is
6109 * possible that when the vnode is examined, its holdcount
6110 * is different than what will be printed in this
6111 * panic message.
6112 */
6113 panic("%s: vp %p holdcount overflow from : %d v_tag = %d, v_type = %d, v_flag = %x.",
6114 __FUNCTION__, vp, old_holdcount, vp->v_tag, vp->v_type, vp->v_flag);
6115 }
6116 }
6117
6118 #define VNODE_HOLD_NO_SMR (1<<29) /* Disable vnode_hold_smr */
6119
6120 /*
6121 * To be used when smr is the only protection (cache_lookup and cache_lookup_path)
6122 */
6123 bool
vnode_hold_smr(vnode_t vp)6124 vnode_hold_smr(vnode_t vp)
6125 {
6126 int32_t holdcount;
6127
6128 /*
6129 * For "high traffic" vnodes like rootvnode, the atomic
6130 * cmpexcg loop below can turn into a infinite loop, no need
6131 * to do it for vnodes that won't be dealloc'ed
6132 */
6133 if (!(os_atomic_load(&vp->v_flag, relaxed) & VCANDEALLOC)) {
6134 vnode_hold(vp);
6135 return true;
6136 }
6137
6138 for (;;) {
6139 holdcount = os_atomic_load(&vp->v_holdcount, relaxed);
6140
6141 if (holdcount & VNODE_HOLD_NO_SMR) {
6142 return false;
6143 }
6144
6145 if ((os_atomic_cmpxchg(&vp->v_holdcount, holdcount, holdcount + 1, relaxed) != 0)) {
6146 return true;
6147 }
6148 }
6149 }
6150
6151 /*
6152 * free callback from smr enabled zones
6153 */
6154 static void
vnode_smr_free(void * _vp,__unused size_t _size)6155 vnode_smr_free(void *_vp, __unused size_t _size)
6156 {
6157 vnode_t vp = _vp;
6158
6159 bzero(vp, sizeof(*vp));
6160 }
6161
6162 static vnode_t
vnode_drop_internal(vnode_t vp,bool locked)6163 vnode_drop_internal(vnode_t vp, bool locked)
6164 {
6165 int32_t old_holdcount = os_atomic_dec_orig(&vp->v_holdcount, relaxed);
6166
6167 if (old_holdcount < 1) {
6168 if (locked) {
6169 vnode_unlock(vp);
6170 }
6171
6172 /*
6173 * Because we allow atomic ops on the holdcount it is possible
6174 * that when the vnode is examined, its holdcount is different
6175 * than what will be printed in this panic message.
6176 */
6177 panic("%s : vp %p holdcount -ve: %d. v_tag = %d, v_type = %d, v_flag = %x.",
6178 __FUNCTION__, vp, old_holdcount - 1, vp->v_tag, vp->v_type, vp->v_flag);
6179 }
6180
6181 if (vn_dealloc_level == DEALLOC_VNODE_NONE || old_holdcount > 1 ||
6182 !(vp->v_flag & VCANDEALLOC) || !(vp->v_lflag & VL_DEAD)) {
6183 if (locked) {
6184 vnode_unlock(vp);
6185 }
6186 return vp;
6187 }
6188
6189 if (!locked) {
6190 vnode_lock(vp);
6191 }
6192
6193 if ((os_atomic_load(&vp->v_holdcount, relaxed) != 0) || vp->v_iocount ||
6194 vp->v_usecount || !(vp->v_flag & VCANDEALLOC) || !(vp->v_lflag & VL_DEAD)) {
6195 vnode_unlock(vp);
6196 return vp;
6197 }
6198
6199 vnode_lock_convert(vp);
6200 vnode_list_lock();
6201
6202 /*
6203 * the v_listflag field is protected by the vnode_list_lock
6204 */
6205 if (VONLIST(vp) && (vp->v_listflag & VLIST_DEAD) &&
6206 (numvnodes > desiredvnodes || (vp->v_listflag & VLIST_NO_REUSE) ||
6207 vn_dealloc_level != DEALLOC_VNODE_ALL || deadvnodes >= deadvnodes_high) &&
6208 (os_atomic_cmpxchg(&vp->v_holdcount, 0, VNODE_HOLD_NO_SMR, relaxed) != 0)) {
6209 VREMDEAD("vnode_list_remove", vp);
6210 numvnodes--;
6211 freeablevnodes--;
6212 deallocedvnodes++;
6213 vp->v_listflag = 0;
6214
6215 send_freeable_vnodes_telemetry();
6216 vnode_list_unlock();
6217
6218 #if CONFIG_MACF
6219 struct label *tmpl = mac_vnode_label(vp);
6220 os_atomic_store(&vp->v_label, NULL, release);
6221 #endif /* CONFIG_MACF */
6222
6223 vnode_unlock(vp);
6224
6225 #if CONFIG_MACF
6226 if (tmpl) {
6227 mac_vnode_label_free(tmpl);
6228 }
6229 #endif /* CONFIG_MACF */
6230
6231 if (nc_smr_enabled) {
6232 zfree_smr(vnode_zone, vp);
6233 } else {
6234 zfree(vnode_zone, vp);
6235 }
6236
6237 vp = NULLVP;
6238 } else {
6239 vnode_list_unlock();
6240 vnode_unlock(vp);
6241 }
6242
6243 return vp;
6244 }
6245
6246 vnode_t
vnode_drop_and_unlock(vnode_t vp)6247 vnode_drop_and_unlock(vnode_t vp)
6248 {
6249 return vnode_drop_internal(vp, true);
6250 }
6251
6252 vnode_t
vnode_drop(vnode_t vp)6253 vnode_drop(vnode_t vp)
6254 {
6255 return vnode_drop_internal(vp, false);
6256 }
6257
6258 SYSCTL_NODE(_vfs, OID_AUTO, vnstats, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "vfs vnode stats");
6259
6260 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, vn_dealloc_level,
6261 CTLFLAG_RD | CTLFLAG_LOCKED,
6262 &vn_dealloc_level, 0, "");
6263 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, desired_vnodes,
6264 CTLFLAG_RD | CTLFLAG_LOCKED,
6265 &desiredvnodes, 0, "");
6266 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_vnodes,
6267 CTLFLAG_RD | CTLFLAG_LOCKED,
6268 &numvnodes, "");
6269 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, num_vnodes_min,
6270 CTLFLAG_RD | CTLFLAG_LOCKED,
6271 &numvnodes_min, 0, "");
6272 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, num_vnodes_max,
6273 CTLFLAG_RD | CTLFLAG_LOCKED,
6274 &numvnodes_max, 0, "");
6275 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, num_deallocable_vnodes,
6276 CTLFLAG_RD | CTLFLAG_LOCKED,
6277 &freeablevnodes, 0, "");
6278 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_deallocable_busy_vnodes,
6279 CTLFLAG_RD | CTLFLAG_LOCKED,
6280 &busyvnodes, "");
6281 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_dead_vnodes,
6282 CTLFLAG_RD | CTLFLAG_LOCKED,
6283 &deadvnodes, "");
6284 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_dead_vnodes_to_dealloc,
6285 CTLFLAG_RD | CTLFLAG_LOCKED,
6286 &deadvnodes_noreuse, "");
6287 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_async_work_vnodes,
6288 CTLFLAG_RD | CTLFLAG_LOCKED,
6289 &async_work_vnodes, "");
6290 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, num_rapid_aging_vnodes,
6291 CTLFLAG_RD | CTLFLAG_LOCKED,
6292 &ragevnodes, 0, "");
6293 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_free_vnodes,
6294 CTLFLAG_RD | CTLFLAG_LOCKED,
6295 &freevnodes, "");
6296 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_recycledvnodes,
6297 CTLFLAG_RD | CTLFLAG_LOCKED,
6298 &num_recycledvnodes, "");
6299 SYSCTL_QUAD(_vfs_vnstats, OID_AUTO, num_allocedvnodes,
6300 CTLFLAG_RD | CTLFLAG_LOCKED,
6301 &allocedvnodes, "");
6302 SYSCTL_QUAD(_vfs_vnstats, OID_AUTO, num_deallocedvnodes,
6303 CTLFLAG_RD | CTLFLAG_LOCKED,
6304 &deallocedvnodes, "");
6305 SYSCTL_QUAD(_vfs_vnstats, OID_AUTO, num_newvnode_calls,
6306 CTLFLAG_RD | CTLFLAG_LOCKED,
6307 &newvnode, "");
6308 SYSCTL_QUAD(_vfs_vnstats, OID_AUTO, num_newvnode_calls_nodead,
6309 CTLFLAG_RD | CTLFLAG_LOCKED,
6310 &newvnode_nodead, "");
6311
6312 int
vnode_get(struct vnode * vp)6313 vnode_get(struct vnode *vp)
6314 {
6315 int retval;
6316
6317 vnode_lock_spin(vp);
6318 retval = vnode_get_locked(vp);
6319 vnode_unlock(vp);
6320
6321 return retval;
6322 }
6323
6324 int
vnode_get_locked(struct vnode * vp)6325 vnode_get_locked(struct vnode *vp)
6326 {
6327 #if DIAGNOSTIC
6328 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
6329 #endif
6330 if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
6331 return ENOENT;
6332 }
6333
6334 if (os_add_overflow(vp->v_iocount, 1, &vp->v_iocount)) {
6335 panic("v_iocount overflow");
6336 }
6337
6338 #ifdef CONFIG_IOCOUNT_TRACE
6339 record_vp(vp, 1);
6340 #endif
6341 return 0;
6342 }
6343
6344 /*
6345 * vnode_getwithvid() cuts in line in front of a vnode drain (that is,
6346 * while the vnode is draining, but at no point after that) to prevent
6347 * deadlocks when getting vnodes from filesystem hashes while holding
6348 * resources that may prevent other iocounts from being released.
6349 */
6350 int
vnode_getwithvid(vnode_t vp,uint32_t vid)6351 vnode_getwithvid(vnode_t vp, uint32_t vid)
6352 {
6353 return vget_internal(vp, vid, (VNODE_NODEAD | VNODE_WITHID | VNODE_DRAINO));
6354 }
6355
6356 /*
6357 * vnode_getwithvid_drainok() is like vnode_getwithvid(), but *does* block behind a vnode
6358 * drain; it exists for use in the VFS name cache, where we really do want to block behind
6359 * vnode drain to prevent holding off an unmount.
6360 */
6361 int
vnode_getwithvid_drainok(vnode_t vp,uint32_t vid)6362 vnode_getwithvid_drainok(vnode_t vp, uint32_t vid)
6363 {
6364 return vget_internal(vp, vid, (VNODE_NODEAD | VNODE_WITHID));
6365 }
6366
6367 int
vnode_getwithref(vnode_t vp)6368 vnode_getwithref(vnode_t vp)
6369 {
6370 return vget_internal(vp, 0, 0);
6371 }
6372
6373 /*
6374 * This is not a noblock variant of vnode_getwithref, this also returns an error
6375 * if the vnode is dead. It should only be called if the calling context already
6376 * has a usecount or iocount.
6377 */
6378 int
vnode_getwithref_noblock(vnode_t vp)6379 vnode_getwithref_noblock(vnode_t vp)
6380 {
6381 return vget_internal(vp, 0, (VNODE_NOBLOCK | VNODE_NODEAD | VNODE_WITHREF));
6382 }
6383
6384 __private_extern__ int
vnode_getalways(vnode_t vp)6385 vnode_getalways(vnode_t vp)
6386 {
6387 return vget_internal(vp, 0, VNODE_ALWAYS);
6388 }
6389
6390 __private_extern__ int
vnode_getalways_from_pager(vnode_t vp)6391 vnode_getalways_from_pager(vnode_t vp)
6392 {
6393 return vget_internal(vp, 0, VNODE_ALWAYS | VNODE_PAGER);
6394 }
6395
6396 static inline void
vn_set_dead(vnode_t vp)6397 vn_set_dead(vnode_t vp)
6398 {
6399 vp->v_mount = NULL;
6400 vp->v_op = dead_vnodeop_p;
6401 vp->v_tag = VT_NON;
6402 vp->v_data = NULL;
6403 vp->v_type = VBAD;
6404 vp->v_lflag |= VL_DEAD;
6405 }
6406
6407 static int
vnode_put_internal_locked(vnode_t vp,bool from_pager)6408 vnode_put_internal_locked(vnode_t vp, bool from_pager)
6409 {
6410 vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */
6411
6412 #if DIAGNOSTIC
6413 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
6414 #endif
6415 retry:
6416 if (vp->v_iocount < 1) {
6417 panic("vnode_put(%p): iocount < 1", vp);
6418 }
6419
6420 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
6421 vnode_dropiocount(vp);
6422 return 0;
6423 }
6424
6425 if (((vp->v_lflag & (VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE)) {
6426 vp->v_lflag &= ~VL_NEEDINACTIVE;
6427
6428 if (UBCINFOEXISTS(vp)) {
6429 ubc_cs_free_and_vnode_unlock(vp);
6430 } else {
6431 vnode_unlock(vp);
6432 }
6433
6434 VNOP_INACTIVE(vp, ctx);
6435
6436 vnode_lock_spin(vp);
6437 /*
6438 * because we had to drop the vnode lock before calling
6439 * VNOP_INACTIVE, the state of this vnode may have changed...
6440 * we may pick up both VL_MARTERM and either
6441 * an iocount or a usecount while in the VNOP_INACTIVE call
6442 * we don't want to call vnode_reclaim_internal on a vnode
6443 * that has active references on it... so loop back around
6444 * and reevaluate the state
6445 */
6446 goto retry;
6447 }
6448 vp->v_lflag &= ~VL_NEEDINACTIVE;
6449
6450 vnode_lock_convert(vp);
6451 if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
6452 if (from_pager) {
6453 /*
6454 * We can't initiate reclaim when called from the pager
6455 * because it will deadlock with itself so we hand it
6456 * off to the async cleaner thread.
6457 */
6458 vnode_async_list_add(vp);
6459 } else {
6460 vnode_reclaim_internal(vp, 1, 1, 0);
6461 }
6462 }
6463 vnode_dropiocount(vp);
6464 vnode_list_add(vp);
6465
6466 return 0;
6467 }
6468
6469 int
vnode_put_locked(vnode_t vp)6470 vnode_put_locked(vnode_t vp)
6471 {
6472 return vnode_put_internal_locked(vp, false);
6473 }
6474
6475 int
vnode_put(vnode_t vp)6476 vnode_put(vnode_t vp)
6477 {
6478 int retval;
6479
6480 vnode_lock_spin(vp);
6481 vnode_hold(vp);
6482 retval = vnode_put_internal_locked(vp, false);
6483 vnode_drop_and_unlock(vp);
6484
6485 return retval;
6486 }
6487
6488 int
vnode_put_from_pager(vnode_t vp)6489 vnode_put_from_pager(vnode_t vp)
6490 {
6491 int retval;
6492
6493 vnode_lock_spin(vp);
6494 vnode_hold(vp);
6495 /* Cannot initiate reclaim while paging */
6496 retval = vnode_put_internal_locked(vp, true);
6497 vnode_drop_and_unlock(vp);
6498
6499 return retval;
6500 }
6501
6502 int
vnode_writecount(vnode_t vp)6503 vnode_writecount(vnode_t vp)
6504 {
6505 return vp->v_writecount;
6506 }
6507
6508 /* is vnode_t in use by others? */
6509 int
vnode_isinuse(vnode_t vp,int refcnt)6510 vnode_isinuse(vnode_t vp, int refcnt)
6511 {
6512 return vnode_isinuse_locked(vp, refcnt, 0);
6513 }
6514
6515 int
vnode_usecount(vnode_t vp)6516 vnode_usecount(vnode_t vp)
6517 {
6518 return vp->v_usecount;
6519 }
6520
6521 int
vnode_iocount(vnode_t vp)6522 vnode_iocount(vnode_t vp)
6523 {
6524 if (!(os_atomic_load(&vp->v_ext_flag, relaxed) & VE_LINKCHANGE)) {
6525 return vp->v_iocount;
6526 } else {
6527 int iocount = 0;
6528 vnode_lock_spin(vp);
6529 if (!(os_atomic_load(&vp->v_ext_flag, relaxed) & VE_LINKCHANGE)) {
6530 iocount = vp->v_iocount;
6531 } else {
6532 /* the "link lock" takes its own iocount */
6533 iocount = vp->v_iocount - 1;
6534 }
6535 vnode_unlock(vp);
6536 return iocount;
6537 }
6538 }
6539
6540 int
vnode_isinuse_locked(vnode_t vp,int refcnt,int locked)6541 vnode_isinuse_locked(vnode_t vp, int refcnt, int locked)
6542 {
6543 int retval = 0;
6544
6545 if (!locked) {
6546 vnode_lock_spin(vp);
6547 }
6548 if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) {
6549 retval = 1;
6550 goto out;
6551 }
6552 if (vp->v_type == VREG) {
6553 retval = ubc_isinuse_locked(vp, refcnt, 1);
6554 }
6555
6556 out:
6557 if (!locked) {
6558 vnode_unlock(vp);
6559 }
6560 return retval;
6561 }
6562
6563 kauth_cred_t
vnode_cred(vnode_t vp)6564 vnode_cred(vnode_t vp)
6565 {
6566 if (vp->v_cred) {
6567 return kauth_cred_require(vp->v_cred);
6568 }
6569
6570 return NULL;
6571 }
6572
6573
6574 /* resume vnode_t */
6575 errno_t
vnode_resume(vnode_t vp)6576 vnode_resume(vnode_t vp)
6577 {
6578 if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) {
6579 vnode_lock_spin(vp);
6580 vp->v_lflag &= ~VL_SUSPENDED;
6581 vp->v_owner = NULL;
6582 vnode_unlock(vp);
6583
6584 wakeup(&vp->v_iocount);
6585 }
6586 return 0;
6587 }
6588
6589 /* suspend vnode_t
6590 * Please do not use on more than one vnode at a time as it may
6591 * cause deadlocks.
6592 * xxx should we explicity prevent this from happening?
6593 */
6594
6595 errno_t
vnode_suspend(vnode_t vp)6596 vnode_suspend(vnode_t vp)
6597 {
6598 if (vp->v_lflag & VL_SUSPENDED) {
6599 return EBUSY;
6600 }
6601
6602 vnode_lock_spin(vp);
6603
6604 /*
6605 * xxx is this sufficient to check if a vnode_drain is
6606 * progress?
6607 */
6608
6609 if (vp->v_owner == NULL) {
6610 vp->v_lflag |= VL_SUSPENDED;
6611 vp->v_owner = current_thread();
6612 }
6613 vnode_unlock(vp);
6614
6615 return 0;
6616 }
6617
6618 /*
6619 * Release any blocked locking requests on the vnode.
6620 * Used for forced-unmounts.
6621 *
6622 * XXX What about network filesystems?
6623 */
6624 static void
vnode_abort_advlocks(vnode_t vp)6625 vnode_abort_advlocks(vnode_t vp)
6626 {
6627 if (vp->v_flag & VLOCKLOCAL) {
6628 lf_abort_advlocks(vp);
6629 }
6630 }
6631
6632
6633 static errno_t
vnode_drain(vnode_t vp)6634 vnode_drain(vnode_t vp)
6635 {
6636 if (vp->v_lflag & VL_DRAIN) {
6637 panic("vnode_drain: recursive drain");
6638 return ENOENT;
6639 }
6640 vp->v_lflag |= VL_DRAIN;
6641 vp->v_owner = current_thread();
6642
6643 while (vp->v_iocount > 1) {
6644 if (bootarg_no_vnode_drain) {
6645 struct timespec ts = {.tv_sec = 10, .tv_nsec = 0};
6646 int error;
6647
6648 if (vfs_unmountall_started) {
6649 ts.tv_sec = 1;
6650 }
6651
6652 error = msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain_with_timeout", &ts);
6653
6654 /* Try to deal with leaked iocounts under bootarg and shutting down */
6655 if (vp->v_iocount > 1 && error == EWOULDBLOCK &&
6656 ts.tv_sec == 1 && vp->v_numoutput == 0) {
6657 vp->v_iocount = 1;
6658 break;
6659 }
6660 } else {
6661 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL);
6662 }
6663 }
6664
6665 vp->v_lflag &= ~VL_DRAIN;
6666
6667 return 0;
6668 }
6669
6670
6671 /*
6672 * if the number of recent references via vnode_getwithvid or vnode_getwithref
6673 * exceeds this threshold, than 'UN-AGE' the vnode by removing it from
6674 * the LRU list if it's currently on it... once the iocount and usecount both drop
6675 * to 0, it will get put back on the end of the list, effectively making it younger
6676 * this allows us to keep actively referenced vnodes in the list without having
6677 * to constantly remove and add to the list each time a vnode w/o a usecount is
6678 * referenced which costs us taking and dropping a global lock twice.
6679 * However, if the vnode is marked DIRTY, we want to pull it out much earlier
6680 */
6681 #define UNAGE_THRESHHOLD 25
6682 #define UNAGE_DIRTYTHRESHHOLD 6
6683
6684 errno_t
vnode_getiocount(vnode_t vp,unsigned int vid,int vflags)6685 vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
6686 {
6687 int nodead = vflags & VNODE_NODEAD;
6688 int nosusp = vflags & VNODE_NOSUSPEND;
6689 int always = vflags & VNODE_ALWAYS;
6690 int beatdrain = vflags & VNODE_DRAINO;
6691 int withvid = vflags & VNODE_WITHID;
6692 int forpager = vflags & VNODE_PAGER;
6693 int noblock = vflags & VNODE_NOBLOCK;
6694
6695 for (;;) {
6696 int sleepflg = 0;
6697
6698 /*
6699 * if it is a dead vnode with deadfs
6700 */
6701 if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) {
6702 return ENOENT;
6703 }
6704 /*
6705 * will return VL_DEAD ones
6706 */
6707 if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0) {
6708 break;
6709 }
6710 /*
6711 * if suspended vnodes are to be failed
6712 */
6713 if (nosusp && (vp->v_lflag & VL_SUSPENDED)) {
6714 return ENOENT;
6715 }
6716 /*
6717 * if you are the owner of drain/suspend/termination , can acquire iocount
6718 * check for VL_TERMINATE; it does not set owner
6719 */
6720 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) &&
6721 (vp->v_owner == current_thread())) {
6722 break;
6723 }
6724
6725 if (always != 0) {
6726 break;
6727 }
6728
6729 if (noblock && (vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE))) {
6730 return ENOENT;
6731 }
6732
6733 /*
6734 * If this vnode is getting drained, there are some cases where
6735 * we can't block or, in case of tty vnodes, want to be
6736 * interruptible.
6737 */
6738 if (vp->v_lflag & VL_DRAIN) {
6739 /*
6740 * In some situations, we want to get an iocount
6741 * even if the vnode is draining to prevent deadlock,
6742 * e.g. if we're in the filesystem, potentially holding
6743 * resources that could prevent other iocounts from
6744 * being released.
6745 */
6746 if (beatdrain) {
6747 break;
6748 }
6749 /*
6750 * Don't block if the vnode's mount point is unmounting as
6751 * we may be the thread the unmount is itself waiting on
6752 * Only callers who pass in vids (at this point, we've already
6753 * handled nosusp and nodead) are expecting error returns
6754 * from this function, so only we can only return errors for
6755 * those. ENODEV is intended to inform callers that the call
6756 * failed because an unmount is in progress.
6757 */
6758 if (withvid && (vp->v_mount) && vfs_isunmount(vp->v_mount)) {
6759 return ENODEV;
6760 }
6761
6762 if (vnode_istty(vp)) {
6763 sleepflg = PCATCH;
6764 }
6765 }
6766
6767 vnode_lock_convert(vp);
6768
6769 if (vp->v_lflag & VL_TERMINATE) {
6770 int error;
6771
6772 vp->v_lflag |= VL_TERMWANT;
6773
6774 error = msleep(&vp->v_lflag, &vp->v_lock,
6775 (PVFS | sleepflg), "vnode getiocount", NULL);
6776 if (error) {
6777 return error;
6778 }
6779 } else {
6780 msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL);
6781 }
6782 }
6783 if (withvid && vid != vp->v_id) {
6784 return ENOENT;
6785 }
6786 if (!forpager && (++vp->v_references >= UNAGE_THRESHHOLD ||
6787 (vp->v_flag & VISDIRTY && vp->v_references >= UNAGE_DIRTYTHRESHHOLD))) {
6788 vp->v_references = 0;
6789 vnode_list_remove(vp);
6790 }
6791 vp->v_iocount++;
6792 #ifdef CONFIG_IOCOUNT_TRACE
6793 record_vp(vp, 1);
6794 #endif
6795 return 0;
6796 }
6797
6798 static void
vnode_dropiocount(vnode_t vp)6799 vnode_dropiocount(vnode_t vp)
6800 {
6801 if (vp->v_iocount < 1) {
6802 panic("vnode_dropiocount(%p): v_iocount < 1", vp);
6803 }
6804
6805 vp->v_iocount--;
6806 #ifdef CONFIG_IOCOUNT_TRACE
6807 record_vp(vp, -1);
6808 #endif
6809 if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1)) {
6810 wakeup(&vp->v_iocount);
6811 }
6812 }
6813
6814
6815 void
vnode_reclaim(struct vnode * vp)6816 vnode_reclaim(struct vnode * vp)
6817 {
6818 vnode_reclaim_internal(vp, 0, 0, 0);
6819 }
6820
6821 __private_extern__
6822 void
vnode_reclaim_internal(struct vnode * vp,int locked,int reuse,int flags)6823 vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags)
6824 {
6825 int isfifo = 0;
6826
6827 if (!locked) {
6828 vnode_lock(vp);
6829 }
6830
6831 if (vp->v_lflag & VL_TERMINATE) {
6832 panic("vnode reclaim in progress");
6833 }
6834 vp->v_lflag |= VL_TERMINATE;
6835 vp->v_lflag &= ~VL_OPSCHANGE;
6836
6837 vn_clearunionwait(vp, 1);
6838
6839 /*
6840 * We have to force any terminals in reads to return and give up
6841 * their iocounts. It's important to do this after VL_TERMINATE
6842 * has been set to ensure new reads are blocked while the
6843 * revoke is in progress.
6844 */
6845 if (vnode_istty(vp) && (flags & REVOKEALL) && (vp->v_iocount > 1)) {
6846 vnode_unlock(vp);
6847 VNOP_IOCTL(vp, TIOCREVOKE, (caddr_t)NULL, 0, vfs_context_kernel());
6848 vnode_lock(vp);
6849 }
6850
6851 vnode_drain(vp);
6852
6853 #if CONFIG_FILE_LEASES
6854 /*
6855 * Revoke all leases in place for this vnode as it is about to be reclaimed.
6856 * In normal case, there shouldn't be any leases in place by the time we
6857 * get here as there shouldn't be any opens on the vnode (usecount == 0).
6858 * However, in the case of force unmount or unmount of a volume that
6859 * contains file that was opened with O_EVTONLY then the vnode can be
6860 * reclaimed while the file is still opened.
6861 */
6862 vnode_revokelease(vp, true);
6863 #endif
6864
6865 isfifo = (vp->v_type == VFIFO);
6866
6867 if (vp->v_type != VBAD) {
6868 vgone(vp, flags); /* clean and reclaim the vnode */
6869 }
6870 /*
6871 * give the vnode a new identity so that vnode_getwithvid will fail
6872 * on any stale cache accesses...
6873 * grab the list_lock so that if we're in "new_vnode"
6874 * behind the list_lock trying to steal this vnode, the v_id is stable...
6875 * once new_vnode drops the list_lock, it will block trying to take
6876 * the vnode lock until we release it... at that point it will evaluate
6877 * whether the v_vid has changed
6878 * also need to make sure that the vnode isn't on a list where "new_vnode"
6879 * can find it after the v_id has been bumped until we are completely done
6880 * with the vnode (i.e. putting it back on a list has to be the very last
6881 * thing we do to this vnode... many of the callers of vnode_reclaim_internal
6882 * are holding an io_count on the vnode... they need to drop the io_count
6883 * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
6884 * they are completely done with the vnode
6885 */
6886 vnode_list_lock();
6887
6888 vnode_list_remove_locked(vp);
6889 vp->v_id++;
6890
6891 vnode_list_unlock();
6892
6893 if (isfifo) {
6894 struct fifoinfo * fip;
6895
6896 fip = vp->v_fifoinfo;
6897 vp->v_fifoinfo = NULL;
6898 kfree_type(struct fifoinfo, fip);
6899 }
6900 vp->v_type = VBAD;
6901
6902 if (vp->v_data) {
6903 panic("vnode_reclaim_internal: cleaned vnode isn't");
6904 }
6905 if (vp->v_numoutput) {
6906 panic("vnode_reclaim_internal: clean vnode has pending I/O's");
6907 }
6908 if (UBCINFOEXISTS(vp)) {
6909 panic("vnode_reclaim_internal: ubcinfo not cleaned");
6910 }
6911 if (vp->v_parent) {
6912 panic("vnode_reclaim_internal: vparent not removed");
6913 }
6914 if (vp->v_name) {
6915 panic("vnode_reclaim_internal: vname not removed");
6916 }
6917
6918 #if CONFIG_FILE_LEASES
6919 if (__improbable(!LIST_EMPTY(&vp->v_leases))) {
6920 panic("vnode_reclaim_internal: vleases NOT empty");
6921 }
6922 #endif
6923
6924 vp->v_socket = NULL;
6925
6926 vp->v_lflag &= ~VL_TERMINATE;
6927 vp->v_owner = NULL;
6928
6929 #if CONFIG_IOCOUNT_TRACE
6930 if (__improbable(bootarg_vnode_iocount_trace)) {
6931 bzero(vp->v_iocount_trace,
6932 IOCOUNT_TRACE_MAX_TYPES * sizeof(struct vnode_iocount_trace));
6933 }
6934 #endif /* CONFIG_IOCOUNT_TRACE */
6935
6936 KNOTE(&vp->v_knotes, NOTE_REVOKE);
6937
6938 /* Make sure that when we reuse the vnode, no knotes left over */
6939 klist_init(&vp->v_knotes);
6940
6941 if (vp->v_lflag & VL_TERMWANT) {
6942 vp->v_lflag &= ~VL_TERMWANT;
6943 wakeup(&vp->v_lflag);
6944 }
6945 if (!reuse) {
6946 /*
6947 * make sure we get on the
6948 * dead list if appropriate
6949 */
6950 vnode_list_add(vp);
6951 }
6952 if (!locked) {
6953 vnode_unlock(vp);
6954 }
6955 }
6956
6957 static int
vnode_create_internal(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp,vnode_create_options_t vc_options)6958 vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp,
6959 vnode_create_options_t vc_options)
6960 {
6961 int error;
6962 int insert = 1;
6963 vnode_t vp = NULLVP;
6964 vnode_t nvp;
6965 vnode_t dvp;
6966 struct uthread *ut;
6967 struct componentname *cnp;
6968 struct vnode_fsparam *param = (struct vnode_fsparam *)data;
6969 #if CONFIG_TRIGGERS
6970 struct vnode_trigger_param *tinfo = NULL;
6971 #endif
6972 bool existing_vnode;
6973 bool init_vnode = !(vc_options & VNODE_CREATE_EMPTY);
6974 bool is_bdevvp = false;
6975
6976 if (*vpp) {
6977 vp = *vpp;
6978 *vpp = NULLVP;
6979 existing_vnode = true;
6980 } else {
6981 existing_vnode = false;
6982 }
6983
6984 if (init_vnode) {
6985 /* Do quick sanity check on the parameters. */
6986 if ((param == NULL) || (param->vnfs_vtype == VBAD)) {
6987 error = EINVAL;
6988 goto error_out;
6989 }
6990
6991 #if CONFIG_TRIGGERS
6992 if ((flavor == VNCREATE_TRIGGER) && (size == VNCREATE_TRIGGER_SIZE)) {
6993 tinfo = (struct vnode_trigger_param *)data;
6994
6995 /* Validate trigger vnode input */
6996 if ((param->vnfs_vtype != VDIR) ||
6997 (tinfo->vnt_resolve_func == NULL) ||
6998 (tinfo->vnt_flags & ~VNT_VALID_MASK)) {
6999 error = EINVAL;
7000 goto error_out;
7001 }
7002 /* Fall through a normal create (params will be the same) */
7003 flavor = VNCREATE_FLAVOR;
7004 size = VCREATESIZE;
7005 }
7006 #endif
7007 if ((flavor != VNCREATE_FLAVOR) || (size != VCREATESIZE)) {
7008 error = EINVAL;
7009 goto error_out;
7010 }
7011 }
7012
7013 if (!existing_vnode) {
7014 if ((error = new_vnode(&vp, !(vc_options & VNODE_CREATE_NODEALLOC)))) {
7015 return error;
7016 }
7017 if (!init_vnode) {
7018 /* Make it so that it can be released by a vnode_put) */
7019 vnode_lock(vp);
7020 vn_set_dead(vp);
7021 vnode_unlock(vp);
7022 *vpp = vp;
7023 return 0;
7024 }
7025 } else {
7026 /*
7027 * A vnode obtained by vnode_create_empty has been passed to
7028 * vnode_initialize - Unset VL_DEAD set by vn_set_dead. After
7029 * this point, it is set back on any error.
7030 */
7031 vnode_lock(vp);
7032 vp->v_lflag &= ~VL_DEAD;
7033 vnode_unlock(vp);
7034 }
7035
7036 dvp = param->vnfs_dvp;
7037 cnp = param->vnfs_cnp;
7038
7039 vp->v_op = param->vnfs_vops;
7040 vp->v_type = (uint8_t)param->vnfs_vtype;
7041 vp->v_data = param->vnfs_fsnode;
7042
7043 if (param->vnfs_markroot) {
7044 vp->v_flag |= VROOT;
7045 }
7046 if (param->vnfs_marksystem) {
7047 vp->v_flag |= VSYSTEM;
7048 }
7049 if (vp->v_type == VREG) {
7050 error = ubc_info_init_withsize(vp, param->vnfs_filesize);
7051 if (error) {
7052 #ifdef CONFIG_IOCOUNT_TRACE
7053 record_vp(vp, 1);
7054 #endif
7055 vnode_hold(vp);
7056 vnode_lock(vp);
7057 vn_set_dead(vp);
7058
7059 vnode_put_locked(vp);
7060 vnode_drop_and_unlock(vp);
7061 return error;
7062 }
7063 if (param->vnfs_mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED) {
7064 memory_object_mark_io_tracking(vp->v_ubcinfo->ui_control);
7065 }
7066 }
7067 #ifdef CONFIG_IOCOUNT_TRACE
7068 record_vp(vp, 1);
7069 #endif
7070
7071 #if CONFIG_FIRMLINKS
7072 vp->v_fmlink = NULLVP;
7073 #endif
7074 vp->v_flag &= ~VFMLINKTARGET;
7075
7076 #if CONFIG_TRIGGERS
7077 /*
7078 * For trigger vnodes, attach trigger info to vnode
7079 */
7080 if ((vp->v_type == VDIR) && (tinfo != NULL)) {
7081 /*
7082 * Note: has a side effect of incrementing trigger count on the
7083 * mount if successful, which we would need to undo on a
7084 * subsequent failure.
7085 */
7086 #ifdef CONFIG_IOCOUNT_TRACE
7087 record_vp(vp, -1);
7088 #endif
7089 error = vnode_resolver_create(param->vnfs_mp, vp, tinfo, FALSE);
7090 if (error) {
7091 printf("vnode_create: vnode_resolver_create() err %d\n", error);
7092 vnode_hold(vp);
7093 vnode_lock(vp);
7094 vn_set_dead(vp);
7095 #ifdef CONFIG_IOCOUNT_TRACE
7096 record_vp(vp, 1);
7097 #endif
7098 vnode_put_locked(vp);
7099 vnode_drop_and_unlock(vp);
7100 return error;
7101 }
7102 }
7103 #endif
7104 if (vp->v_type == VCHR || vp->v_type == VBLK) {
7105 vp->v_tag = VT_DEVFS; /* callers will reset if needed (bdevvp) */
7106
7107 if ((nvp = checkalias(vp, param->vnfs_rdev))) {
7108 /*
7109 * if checkalias returns a vnode, it will be locked
7110 *
7111 * first get rid of the unneeded vnode we acquired
7112 */
7113 vp->v_data = NULL;
7114 vp->v_op = spec_vnodeop_p;
7115 vp->v_type = VBAD;
7116 vp->v_lflag = VL_DEAD;
7117 vp->v_data = NULL;
7118 vp->v_tag = VT_NON;
7119 vnode_put(vp);
7120
7121 /*
7122 * switch to aliased vnode and finish
7123 * preparing it
7124 */
7125 vp = nvp;
7126
7127 is_bdevvp = (vp->v_flag & VBDEVVP);
7128
7129 if (is_bdevvp) {
7130 printf("%s: alias vnode (vid = %u) is in state of change (start) v_flags = 0x%x v_numoutput = %d\n",
7131 __func__, vp->v_id, vp->v_flag, vp->v_numoutput);
7132 }
7133
7134 vnode_hold(vp);
7135 vp->v_lflag |= VL_OPSCHANGE;
7136 vclean(vp, 0);
7137 vp->v_op = param->vnfs_vops;
7138 vp->v_type = (uint8_t)param->vnfs_vtype;
7139 vp->v_data = param->vnfs_fsnode;
7140 vp->v_lflag = VL_OPSCHANGE;
7141 vp->v_mount = NULL;
7142 insmntque(vp, param->vnfs_mp);
7143 insert = 0;
7144
7145 if (is_bdevvp) {
7146 printf("%s: alias vnode (vid = %u), is in state of change (end) v_flags = 0x%x v_numoutput = %d\n",
7147 __func__, vp->v_id, vp->v_flag, vp->v_numoutput);
7148 }
7149
7150 vnode_drop_and_unlock(vp);
7151 wakeup(&vp->v_lflag); /* chkvnlock is waitng for VL_DEAD to get unset */
7152 }
7153
7154 if (VCHR == vp->v_type) {
7155 u_int maj = major(vp->v_rdev);
7156
7157 if (maj < (u_int)nchrdev && cdevsw[maj].d_type == D_TTY) {
7158 vp->v_flag |= VISTTY;
7159 }
7160 }
7161 }
7162
7163 if (vp->v_type == VFIFO) {
7164 struct fifoinfo *fip;
7165
7166 fip = kalloc_type(struct fifoinfo, Z_WAITOK | Z_ZERO);
7167 vp->v_fifoinfo = fip;
7168 }
7169 /* The file systems must pass the address of the location where
7170 * they store the vnode pointer. When we add the vnode into the mount
7171 * list and name cache they become discoverable. So the file system node
7172 * must have the connection to vnode setup by then
7173 */
7174 *vpp = vp;
7175
7176 /* Add fs named reference. */
7177 if (param->vnfs_flags & VNFS_ADDFSREF) {
7178 vp->v_lflag |= VNAMED_FSHASH;
7179 }
7180 if (param->vnfs_mp) {
7181 if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL) {
7182 vp->v_flag |= VLOCKLOCAL;
7183 }
7184 if (insert) {
7185 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) {
7186 panic("insmntque: vp on the free list");
7187 }
7188
7189 /*
7190 * enter in mount vnode list
7191 */
7192 insmntque(vp, param->vnfs_mp);
7193 }
7194 }
7195 if (dvp && vnode_ref(dvp) == 0) {
7196 vp->v_parent = dvp;
7197 }
7198 if (cnp) {
7199 if (dvp && ((param->vnfs_flags & (VNFS_NOCACHE | VNFS_CANTCACHE)) == 0)) {
7200 /*
7201 * enter into name cache
7202 * we've got the info to enter it into the name cache now
7203 * cache_enter_create will pick up an extra reference on
7204 * the name entered into the string cache
7205 */
7206 vp->v_name = cache_enter_create(dvp, vp, cnp);
7207 } else {
7208 vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
7209 }
7210
7211 #if NAMEDSTREAMS
7212 if (cnp->cn_flags & MARKISSHADOW) {
7213 vp->v_flag |= VISSHADOW;
7214 }
7215 #endif
7216 }
7217 if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
7218 /*
7219 * this vnode is being created as cacheable in the name cache
7220 * this allows us to re-enter it in the cache
7221 */
7222 vp->v_flag |= VNCACHEABLE;
7223 }
7224 ut = current_uthread();
7225
7226 if ((current_proc()->p_lflag & P_LRAGE_VNODES) ||
7227 (ut->uu_flag & (UT_RAGE_VNODES | UT_KERN_RAGE_VNODES))) {
7228 /*
7229 * process has indicated that it wants any
7230 * vnodes created on its behalf to be rapidly
7231 * aged to reduce the impact on the cached set
7232 * of vnodes
7233 *
7234 * if UT_KERN_RAGE_VNODES is set, then the
7235 * kernel internally wants vnodes to be rapidly
7236 * aged, even if the process hasn't requested
7237 * this
7238 */
7239 vp->v_flag |= VRAGE;
7240 }
7241
7242 #if CONFIG_SECLUDED_MEMORY
7243 switch (secluded_for_filecache) {
7244 case SECLUDED_FILECACHE_NONE:
7245 /*
7246 * secluded_for_filecache == 0:
7247 * + no file contents in secluded pool
7248 */
7249 break;
7250 case SECLUDED_FILECACHE_APPS:
7251 /*
7252 * secluded_for_filecache == 1:
7253 * + no files from /
7254 * + files from /Applications/ are OK
7255 * + files from /Applications/Camera are not OK
7256 * + no files that are open for write
7257 */
7258 if (vnode_vtype(vp) == VREG &&
7259 vnode_mount(vp) != NULL &&
7260 (!(vfs_flags(vnode_mount(vp)) & MNT_ROOTFS))) {
7261 /* not from root filesystem: eligible for secluded pages */
7262 memory_object_mark_eligible_for_secluded(
7263 ubc_getobject(vp, UBC_FLAGS_NONE),
7264 TRUE);
7265 }
7266 break;
7267 case SECLUDED_FILECACHE_RDONLY:
7268 /*
7269 * secluded_for_filecache == 2:
7270 * + all read-only files OK, except:
7271 * + dyld_shared_cache_arm64*
7272 * + Camera
7273 * + mediaserverd
7274 * + cameracaptured
7275 */
7276 if (vnode_vtype(vp) == VREG) {
7277 memory_object_mark_eligible_for_secluded(
7278 ubc_getobject(vp, UBC_FLAGS_NONE),
7279 TRUE);
7280 }
7281 break;
7282 default:
7283 break;
7284 }
7285 #endif /* CONFIG_SECLUDED_MEMORY */
7286
7287 if (is_bdevvp) {
7288 /*
7289 * The v_flags and v_lflags felds for the vndoe above are
7290 * manipulated without the vnode lock. This is fine for
7291 * everything because no other use of this vnode is occurring.
7292 * However the case of the bdevvp alias vnode reuse is different
7293 * and the flags end up being modified while a thread may be in
7294 * vnode_waitforwrites which sets VTHROTTLED and any one of the
7295 * non atomic modifications of v_flag in this function can race
7296 * with the setting of that flag and cause VTHROTTLED on vflag
7297 * to get "lost".
7298 *
7299 * This should ideally be fixed by making sure all modifications
7300 * in this function to the vnode flags are done under the
7301 * vnode lock but at this time, a much smaller workaround is
7302 * being employed and a the more correct (and potentially
7303 * much bigger) change will follow later.
7304 *
7305 * The effect of "losing" the VTHROTTLED flags would be a lost
7306 * wakeup so we just issue that wakeup here since this happens
7307 * only once per bdevvp vnode which are only one or two for a
7308 * given boot.
7309 */
7310 wakeup(&vp->v_numoutput);
7311
7312 /*
7313 * now make sure the flags that we were suppossed to put aren't
7314 * lost.
7315 */
7316 vnode_lock_spin(vp);
7317 if (param->vnfs_flags & VNFS_ADDFSREF) {
7318 vp->v_lflag |= VNAMED_FSHASH;
7319 }
7320 if (param->vnfs_mp && (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL)) {
7321 vp->v_flag |= VLOCKLOCAL;
7322 }
7323 if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
7324 vp->v_flag |= VNCACHEABLE;
7325 }
7326 vnode_unlock(vp);
7327 }
7328
7329 return 0;
7330
7331 error_out:
7332 if (existing_vnode) {
7333 vnode_put(vp);
7334 }
7335 return error;
7336 }
7337
7338 int
vnode_create_ext(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp,vnode_create_options_t vc_options)7339 vnode_create_ext(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, vnode_create_options_t vc_options)
7340 {
7341 if (vc_options & ~(VNODE_CREATE_EMPTY | VNODE_CREATE_NODEALLOC)) {
7342 return EINVAL;
7343 }
7344 *vpp = NULLVP;
7345 return vnode_create_internal(flavor, size, data, vpp, vc_options);
7346 }
7347
7348 /* USAGE:
7349 * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
7350 * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
7351 * is obsoleted by this.
7352 */
7353 int
vnode_create(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp)7354 vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
7355 {
7356 return vnode_create_ext(flavor, size, data, vpp, VNODE_CREATE_NODEALLOC);
7357 }
7358
7359 int
vnode_create_empty(vnode_t * vpp)7360 vnode_create_empty(vnode_t *vpp)
7361 {
7362 return vnode_create_ext(VNCREATE_FLAVOR, VCREATESIZE, NULL,
7363 vpp, VNODE_CREATE_EMPTY);
7364 }
7365
7366 int
vnode_initialize(uint32_t __unused flavor,uint32_t size,void * data,vnode_t * vpp)7367 vnode_initialize(uint32_t __unused flavor, uint32_t size, void *data, vnode_t *vpp)
7368 {
7369 if (*vpp == NULLVP) {
7370 panic("NULL vnode passed to vnode_initialize");
7371 }
7372 #if DEVELOPMENT || DEBUG
7373 /*
7374 * We lock to check that vnode is fit for unlocked use in
7375 * vnode_create_internal.
7376 */
7377 vnode_lock_spin(*vpp);
7378 VNASSERT(((*vpp)->v_usecount == 0), *vpp,
7379 ("vnode_initialize : usecount not 0, is %d", (*vpp)->v_usecount));
7380 VNASSERT(((*vpp)->v_lflag & VL_DEAD), *vpp,
7381 ("vnode_initialize : v_lflag does not have VL_DEAD, is 0x%x",
7382 (*vpp)->v_lflag));
7383 VNASSERT(((*vpp)->v_data == NULL), *vpp,
7384 ("vnode_initialize : v_data not NULL"));
7385 vnode_unlock(*vpp);
7386 #endif
7387 return vnode_create_internal(flavor, size, data, vpp, VNODE_CREATE_DEFAULT);
7388 }
7389
7390 int
vnode_addfsref(vnode_t vp)7391 vnode_addfsref(vnode_t vp)
7392 {
7393 vnode_lock_spin(vp);
7394 if (vp->v_lflag & VNAMED_FSHASH) {
7395 panic("add_fsref: vp already has named reference");
7396 }
7397 if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) {
7398 panic("addfsref: vp on the free list");
7399 }
7400 vp->v_lflag |= VNAMED_FSHASH;
7401 vnode_unlock(vp);
7402 return 0;
7403 }
7404 int
vnode_removefsref(vnode_t vp)7405 vnode_removefsref(vnode_t vp)
7406 {
7407 vnode_lock_spin(vp);
7408 if ((vp->v_lflag & VNAMED_FSHASH) == 0) {
7409 panic("remove_fsref: no named reference");
7410 }
7411 vp->v_lflag &= ~VNAMED_FSHASH;
7412 vnode_unlock(vp);
7413 return 0;
7414 }
7415
7416 void
vnode_link_lock(vnode_t vp)7417 vnode_link_lock(vnode_t vp)
7418 {
7419 vnode_lock_spin(vp);
7420 while (os_atomic_load(&vp->v_ext_flag, relaxed) & VE_LINKCHANGE) {
7421 os_atomic_or(&vp->v_ext_flag, VE_LINKCHANGEWAIT, relaxed);
7422 msleep(&vp->v_ext_flag, &vp->v_lock, PVFS | PSPIN,
7423 "vnode_link_lock_wait", 0);
7424 }
7425 if (vp->v_iocount == 0) {
7426 panic("%s called without an iocount on the vnode", __FUNCTION__);
7427 }
7428 vnode_get_locked(vp);
7429 os_atomic_or(&vp->v_ext_flag, VE_LINKCHANGE, relaxed);
7430 vnode_unlock(vp);
7431 }
7432
7433 void
vnode_link_unlock(vnode_t vp)7434 vnode_link_unlock(vnode_t vp)
7435 {
7436 bool do_wakeup = false;
7437 bool do_vnode_put = false;
7438
7439 vnode_lock_spin(vp);
7440 if (os_atomic_load(&vp->v_ext_flag, relaxed) & VE_LINKCHANGEWAIT) {
7441 do_wakeup = true;
7442 }
7443 os_atomic_andnot(&vp->v_ext_flag, VE_LINKCHANGE | VE_LINKCHANGEWAIT, relaxed);
7444 if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
7445 vnode_put_locked(vp);
7446 } else {
7447 do_vnode_put = true;
7448 }
7449 vnode_unlock(vp);
7450 if (do_wakeup) {
7451 wakeup(&vp->v_ext_flag);
7452 }
7453 if (do_vnode_put) {
7454 vnode_put(vp);
7455 }
7456 }
7457
7458 int
vfs_iterate(int flags,int (* callout)(mount_t,void *),void * arg)7459 vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg)
7460 {
7461 mount_t mp;
7462 int ret = 0;
7463 fsid_t * fsid_list;
7464 int count, actualcount, i;
7465 void * allocmem;
7466 int indx_start, indx_stop, indx_incr;
7467 int cb_dropref = (flags & VFS_ITERATE_CB_DROPREF);
7468 int noskip_unmount = (flags & VFS_ITERATE_NOSKIP_UNMOUNT);
7469
7470 count = mount_getvfscnt();
7471 count += 10;
7472
7473 fsid_list = kalloc_data(count * sizeof(fsid_t), Z_WAITOK);
7474 allocmem = (void *)fsid_list;
7475
7476 actualcount = mount_fillfsids(fsid_list, count);
7477
7478 /*
7479 * Establish the iteration direction
7480 * VFS_ITERATE_TAIL_FIRST overrides default head first order (oldest first)
7481 */
7482 if (flags & VFS_ITERATE_TAIL_FIRST) {
7483 indx_start = actualcount - 1;
7484 indx_stop = -1;
7485 indx_incr = -1;
7486 } else { /* Head first by default */
7487 indx_start = 0;
7488 indx_stop = actualcount;
7489 indx_incr = 1;
7490 }
7491
7492 for (i = indx_start; i != indx_stop; i += indx_incr) {
7493 /* obtain the mount point with iteration reference */
7494 mp = mount_list_lookupby_fsid(&fsid_list[i], 0, 1);
7495
7496 if (mp == (struct mount *)0) {
7497 continue;
7498 }
7499 mount_lock(mp);
7500 if ((mp->mnt_lflag & MNT_LDEAD) ||
7501 (!noskip_unmount && (mp->mnt_lflag & MNT_LUNMOUNT))) {
7502 mount_unlock(mp);
7503 mount_iterdrop(mp);
7504 continue;
7505 }
7506 mount_unlock(mp);
7507
7508 /* iterate over all the vnodes */
7509 ret = callout(mp, arg);
7510
7511 /*
7512 * Drop the iterref here if the callback didn't do it.
7513 * Note: If cb_dropref is set the mp may no longer exist.
7514 */
7515 if (!cb_dropref) {
7516 mount_iterdrop(mp);
7517 }
7518
7519 switch (ret) {
7520 case VFS_RETURNED:
7521 case VFS_RETURNED_DONE:
7522 if (ret == VFS_RETURNED_DONE) {
7523 ret = 0;
7524 goto out;
7525 }
7526 break;
7527
7528 case VFS_CLAIMED_DONE:
7529 ret = 0;
7530 goto out;
7531 case VFS_CLAIMED:
7532 default:
7533 break;
7534 }
7535 ret = 0;
7536 }
7537
7538 out:
7539 kfree_data(allocmem, count * sizeof(fsid_t));
7540 return ret;
7541 }
7542
7543 /*
7544 * Update the vfsstatfs structure in the mountpoint.
7545 * MAC: Parameter eventtype added, indicating whether the event that
7546 * triggered this update came from user space, via a system call
7547 * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT).
7548 */
7549 int
vfs_update_vfsstat(mount_t mp,vfs_context_t ctx,__unused int eventtype)7550 vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype)
7551 {
7552 struct vfs_attr va;
7553 int error;
7554
7555 /*
7556 * Request the attributes we want to propagate into
7557 * the per-mount vfsstat structure.
7558 */
7559 VFSATTR_INIT(&va);
7560 VFSATTR_WANTED(&va, f_iosize);
7561 VFSATTR_WANTED(&va, f_blocks);
7562 VFSATTR_WANTED(&va, f_bfree);
7563 VFSATTR_WANTED(&va, f_bavail);
7564 VFSATTR_WANTED(&va, f_bused);
7565 VFSATTR_WANTED(&va, f_files);
7566 VFSATTR_WANTED(&va, f_ffree);
7567 VFSATTR_WANTED(&va, f_bsize);
7568 VFSATTR_WANTED(&va, f_fssubtype);
7569
7570 if ((error = vfs_getattr(mp, &va, ctx)) != 0) {
7571 KAUTH_DEBUG("STAT - filesystem returned error %d", error);
7572 return error;
7573 }
7574 #if CONFIG_MACF
7575 if (eventtype == VFS_USER_EVENT) {
7576 error = mac_mount_check_getattr(ctx, mp, &va);
7577 if (error != 0) {
7578 return error;
7579 }
7580 }
7581 #endif
7582 /*
7583 * Unpack into the per-mount structure.
7584 *
7585 * We only overwrite these fields, which are likely to change:
7586 * f_blocks
7587 * f_bfree
7588 * f_bavail
7589 * f_bused
7590 * f_files
7591 * f_ffree
7592 *
7593 * And these which are not, but which the FS has no other way
7594 * of providing to us:
7595 * f_bsize
7596 * f_iosize
7597 * f_fssubtype
7598 *
7599 */
7600 if (VFSATTR_IS_SUPPORTED(&va, f_bsize)) {
7601 /* 4822056 - protect against malformed server mount */
7602 mp->mnt_vfsstat.f_bsize = (va.f_bsize > 0 ? va.f_bsize : 512);
7603 } else {
7604 mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */
7605 }
7606 if (VFSATTR_IS_SUPPORTED(&va, f_iosize)) {
7607 mp->mnt_vfsstat.f_iosize = va.f_iosize;
7608 } else {
7609 mp->mnt_vfsstat.f_iosize = 1024 * 1024; /* 1MB sensible I/O size */
7610 }
7611 if (VFSATTR_IS_SUPPORTED(&va, f_blocks)) {
7612 mp->mnt_vfsstat.f_blocks = va.f_blocks;
7613 }
7614 if (VFSATTR_IS_SUPPORTED(&va, f_bfree)) {
7615 mp->mnt_vfsstat.f_bfree = va.f_bfree;
7616 }
7617 if (VFSATTR_IS_SUPPORTED(&va, f_bavail)) {
7618 mp->mnt_vfsstat.f_bavail = va.f_bavail;
7619 }
7620 if (VFSATTR_IS_SUPPORTED(&va, f_bused)) {
7621 mp->mnt_vfsstat.f_bused = va.f_bused;
7622 }
7623 if (VFSATTR_IS_SUPPORTED(&va, f_files)) {
7624 mp->mnt_vfsstat.f_files = va.f_files;
7625 }
7626 if (VFSATTR_IS_SUPPORTED(&va, f_ffree)) {
7627 mp->mnt_vfsstat.f_ffree = va.f_ffree;
7628 }
7629
7630 /* this is unlikely to change, but has to be queried for */
7631 if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype)) {
7632 mp->mnt_vfsstat.f_fssubtype = va.f_fssubtype;
7633 }
7634
7635 return 0;
7636 }
7637
7638 int
mount_list_add(mount_t mp)7639 mount_list_add(mount_t mp)
7640 {
7641 int res;
7642
7643 mount_list_lock();
7644 if (get_system_inshutdown() != 0) {
7645 res = -1;
7646 } else {
7647 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
7648 nummounts++;
7649 res = 0;
7650 }
7651 mount_list_unlock();
7652
7653 return res;
7654 }
7655
7656 void
mount_list_remove(mount_t mp)7657 mount_list_remove(mount_t mp)
7658 {
7659 mount_list_lock();
7660 TAILQ_REMOVE(&mountlist, mp, mnt_list);
7661 nummounts--;
7662 mp->mnt_list.tqe_next = NULL;
7663 mp->mnt_list.tqe_prev = NULL;
7664 mount_list_unlock();
7665 }
7666
7667 mount_t
mount_lookupby_volfsid(int volfs_id,int withref)7668 mount_lookupby_volfsid(int volfs_id, int withref)
7669 {
7670 mount_t cur_mount = (mount_t)0;
7671 mount_t mp;
7672
7673 mount_list_lock();
7674 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
7675 if (!(mp->mnt_kern_flag & MNTK_UNMOUNT) &&
7676 (mp->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
7677 (mp->mnt_vfsstat.f_fsid.val[0] == volfs_id)) {
7678 cur_mount = mp;
7679 if (withref) {
7680 if (mount_iterref(cur_mount, 1)) {
7681 cur_mount = (mount_t)0;
7682 mount_list_unlock();
7683 goto out;
7684 }
7685 }
7686 break;
7687 }
7688 }
7689 mount_list_unlock();
7690 if (withref && (cur_mount != (mount_t)0)) {
7691 mp = cur_mount;
7692 if (vfs_busy(mp, LK_NOWAIT) != 0) {
7693 cur_mount = (mount_t)0;
7694 }
7695 mount_iterdrop(mp);
7696 }
7697 out:
7698 return cur_mount;
7699 }
7700
7701 mount_t
mount_list_lookupby_fsid(fsid_t * fsid,int locked,int withref)7702 mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref)
7703 {
7704 mount_t retmp = (mount_t)0;
7705 mount_t mp;
7706
7707 if (!locked) {
7708 mount_list_lock();
7709 }
7710 TAILQ_FOREACH(mp, &mountlist, mnt_list)
7711 if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] &&
7712 mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) {
7713 retmp = mp;
7714 if (withref) {
7715 if (mount_iterref(retmp, 1)) {
7716 retmp = (mount_t)0;
7717 }
7718 }
7719 goto out;
7720 }
7721 out:
7722 if (!locked) {
7723 mount_list_unlock();
7724 }
7725 return retmp;
7726 }
7727
7728 errno_t
vnode_lookupat(const char * path,int flags,vnode_t * vpp,vfs_context_t ctx,vnode_t start_dvp)7729 vnode_lookupat(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx,
7730 vnode_t start_dvp)
7731 {
7732 struct nameidata *ndp;
7733 int error = 0;
7734 u_int32_t ndflags = 0;
7735
7736 if (ctx == NULL) {
7737 return EINVAL;
7738 }
7739
7740 ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
7741
7742 if (flags & VNODE_LOOKUP_NOFOLLOW) {
7743 ndflags = NOFOLLOW;
7744 } else {
7745 ndflags = FOLLOW;
7746 }
7747
7748 if (flags & VNODE_LOOKUP_NOCROSSMOUNT) {
7749 ndflags |= NOCROSSMOUNT;
7750 }
7751
7752 if (flags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) {
7753 ndflags |= CN_NBMOUNTLOOK;
7754 }
7755
7756 /* XXX AUDITVNPATH1 needed ? */
7757 NDINIT(ndp, LOOKUP, OP_LOOKUP, ndflags, UIO_SYSSPACE,
7758 CAST_USER_ADDR_T(path), ctx);
7759
7760 if (flags & VNODE_LOOKUP_NOFOLLOW_ANY) {
7761 ndp->ni_flag |= NAMEI_NOFOLLOW_ANY;
7762 }
7763
7764 if (start_dvp && (path[0] != '/')) {
7765 ndp->ni_dvp = start_dvp;
7766 ndp->ni_cnd.cn_flags |= USEDVP;
7767 }
7768
7769 if ((error = namei(ndp))) {
7770 goto out_free;
7771 }
7772
7773 ndp->ni_cnd.cn_flags &= ~USEDVP;
7774
7775 *vpp = ndp->ni_vp;
7776 nameidone(ndp);
7777
7778 out_free:
7779 kfree_type(struct nameidata, ndp);
7780 return error;
7781 }
7782
7783 errno_t
vnode_lookup(const char * path,int flags,vnode_t * vpp,vfs_context_t ctx)7784 vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx)
7785 {
7786 return vnode_lookupat(path, flags, vpp, ctx, NULLVP);
7787 }
7788
7789 errno_t
vnode_open(const char * path,int fmode,int cmode,int flags,vnode_t * vpp,vfs_context_t ctx)7790 vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx)
7791 {
7792 struct nameidata *ndp = NULL;
7793 int error;
7794 u_int32_t ndflags = 0;
7795 int lflags = flags;
7796
7797 if (ctx == NULL) { /* XXX technically an error */
7798 ctx = vfs_context_current();
7799 }
7800
7801 ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
7802
7803 if (fmode & O_NOFOLLOW) {
7804 lflags |= VNODE_LOOKUP_NOFOLLOW;
7805 }
7806
7807 if (lflags & VNODE_LOOKUP_NOFOLLOW) {
7808 ndflags = NOFOLLOW;
7809 } else {
7810 ndflags = FOLLOW;
7811 }
7812
7813 if (lflags & VNODE_LOOKUP_NOFOLLOW_ANY) {
7814 fmode |= O_NOFOLLOW_ANY;
7815 }
7816
7817 if (lflags & VNODE_LOOKUP_NOCROSSMOUNT) {
7818 ndflags |= NOCROSSMOUNT;
7819 }
7820
7821 if (lflags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) {
7822 ndflags |= CN_NBMOUNTLOOK;
7823 }
7824
7825 /* XXX AUDITVNPATH1 needed ? */
7826 NDINIT(ndp, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE,
7827 CAST_USER_ADDR_T(path), ctx);
7828
7829 if ((error = vn_open(ndp, fmode, cmode))) {
7830 *vpp = NULL;
7831 } else {
7832 *vpp = ndp->ni_vp;
7833 }
7834
7835 kfree_type(struct nameidata, ndp);
7836 return error;
7837 }
7838
7839 errno_t
vnode_close(vnode_t vp,int flags,vfs_context_t ctx)7840 vnode_close(vnode_t vp, int flags, vfs_context_t ctx)
7841 {
7842 int error;
7843
7844 if (ctx == NULL) {
7845 ctx = vfs_context_current();
7846 }
7847
7848 error = vn_close(vp, flags, ctx);
7849 vnode_put(vp);
7850 return error;
7851 }
7852
7853 errno_t
vnode_mtime(vnode_t vp,struct timespec * mtime,vfs_context_t ctx)7854 vnode_mtime(vnode_t vp, struct timespec *mtime, vfs_context_t ctx)
7855 {
7856 struct vnode_attr va;
7857 int error;
7858
7859 VATTR_INIT(&va);
7860 VATTR_WANTED(&va, va_modify_time);
7861 error = vnode_getattr(vp, &va, ctx);
7862 if (!error) {
7863 *mtime = va.va_modify_time;
7864 }
7865 return error;
7866 }
7867
7868 errno_t
vnode_flags(vnode_t vp,uint32_t * flags,vfs_context_t ctx)7869 vnode_flags(vnode_t vp, uint32_t *flags, vfs_context_t ctx)
7870 {
7871 struct vnode_attr va;
7872 int error;
7873
7874 VATTR_INIT(&va);
7875 VATTR_WANTED(&va, va_flags);
7876 error = vnode_getattr(vp, &va, ctx);
7877 if (!error) {
7878 *flags = va.va_flags;
7879 }
7880 return error;
7881 }
7882
7883 /*
7884 * Returns: 0 Success
7885 * vnode_getattr:???
7886 */
7887 errno_t
vnode_size(vnode_t vp,off_t * sizep,vfs_context_t ctx)7888 vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx)
7889 {
7890 struct vnode_attr va;
7891 int error;
7892
7893 VATTR_INIT(&va);
7894 VATTR_WANTED(&va, va_data_size);
7895 error = vnode_getattr(vp, &va, ctx);
7896 if (!error) {
7897 *sizep = va.va_data_size;
7898 }
7899 return error;
7900 }
7901
7902 errno_t
vnode_setsize(vnode_t vp,off_t size,int ioflag,vfs_context_t ctx)7903 vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx)
7904 {
7905 struct vnode_attr va;
7906
7907 VATTR_INIT(&va);
7908 VATTR_SET(&va, va_data_size, size);
7909 va.va_vaflags = ioflag & 0xffff;
7910 return vnode_setattr(vp, &va, ctx);
7911 }
7912
7913 int
vnode_setdirty(vnode_t vp)7914 vnode_setdirty(vnode_t vp)
7915 {
7916 vnode_lock_spin(vp);
7917 vp->v_flag |= VISDIRTY;
7918 vnode_unlock(vp);
7919 return 0;
7920 }
7921
7922 int
vnode_cleardirty(vnode_t vp)7923 vnode_cleardirty(vnode_t vp)
7924 {
7925 vnode_lock_spin(vp);
7926 vp->v_flag &= ~VISDIRTY;
7927 vnode_unlock(vp);
7928 return 0;
7929 }
7930
7931 int
vnode_isdirty(vnode_t vp)7932 vnode_isdirty(vnode_t vp)
7933 {
7934 int dirty;
7935
7936 vnode_lock_spin(vp);
7937 dirty = (vp->v_flag & VISDIRTY) ? 1 : 0;
7938 vnode_unlock(vp);
7939
7940 return dirty;
7941 }
7942
7943 static int
vn_create_reg(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,uint32_t flags,int fmode,uint32_t * statusp,vfs_context_t ctx)7944 vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
7945 {
7946 /* Only use compound VNOP for compound operation */
7947 if (vnode_compound_open_available(dvp) && ((flags & VN_CREATE_DOOPEN) != 0)) {
7948 *vpp = NULLVP;
7949 return VNOP_COMPOUND_OPEN(dvp, vpp, ndp, O_CREAT, fmode, statusp, vap, ctx);
7950 } else {
7951 return VNOP_CREATE(dvp, vpp, &ndp->ni_cnd, vap, ctx);
7952 }
7953 }
7954
7955 /*
7956 * Create a filesystem object of arbitrary type with arbitrary attributes in
7957 * the spevied directory with the specified name.
7958 *
7959 * Parameters: dvp Pointer to the vnode of the directory
7960 * in which to create the object.
7961 * vpp Pointer to the area into which to
7962 * return the vnode of the created object.
7963 * cnp Component name pointer from the namei
7964 * data structure, containing the name to
7965 * use for the create object.
7966 * vap Pointer to the vnode_attr structure
7967 * describing the object to be created,
7968 * including the type of object.
7969 * flags VN_* flags controlling ACL inheritance
7970 * and whether or not authorization is to
7971 * be required for the operation.
7972 *
7973 * Returns: 0 Success
7974 * !0 errno value
7975 *
7976 * Implicit: *vpp Contains the vnode of the object that
7977 * was created, if successful.
7978 * *cnp May be modified by the underlying VFS.
7979 * *vap May be modified by the underlying VFS.
7980 * modified by either ACL inheritance or
7981 *
7982 *
7983 * be modified, even if the operation is
7984 *
7985 *
7986 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
7987 *
7988 * Modification of '*cnp' and '*vap' by the underlying VFS is
7989 * strongly discouraged.
7990 *
7991 * XXX: This function is a 'vn_*' function; it belongs in vfs_vnops.c
7992 *
7993 * XXX: We should enummerate the possible errno values here, and where
7994 * in the code they originated.
7995 */
7996 errno_t
vn_create(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,uint32_t flags,int fmode,uint32_t * statusp,vfs_context_t ctx)7997 vn_create(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
7998 {
7999 errno_t error, old_error;
8000 vnode_t vp = (vnode_t)0;
8001 boolean_t batched;
8002 struct componentname *cnp;
8003 uint32_t defaulted;
8004
8005 cnp = &ndp->ni_cnd;
8006 error = 0;
8007 batched = namei_compound_available(dvp, ndp) ? TRUE : FALSE;
8008
8009 KAUTH_DEBUG("%p CREATE - '%s'", dvp, cnp->cn_nameptr);
8010
8011 if (flags & VN_CREATE_NOINHERIT) {
8012 vap->va_vaflags |= VA_NOINHERIT;
8013 }
8014 if (flags & VN_CREATE_NOAUTH) {
8015 vap->va_vaflags |= VA_NOAUTH;
8016 }
8017 /*
8018 * Handle ACL inheritance, initialize vap.
8019 */
8020 error = vn_attribute_prepare(dvp, vap, &defaulted, ctx);
8021 if (error) {
8022 return error;
8023 }
8024
8025 if (vap->va_type != VREG && (fmode != 0 || (flags & VN_CREATE_DOOPEN) || statusp)) {
8026 panic("Open parameters, but not a regular file.");
8027 }
8028 if ((fmode != 0) && ((flags & VN_CREATE_DOOPEN) == 0)) {
8029 panic("Mode for open, but not trying to open...");
8030 }
8031
8032
8033 /*
8034 * Create the requested node.
8035 */
8036 switch (vap->va_type) {
8037 case VREG:
8038 error = vn_create_reg(dvp, vpp, ndp, vap, flags, fmode, statusp, ctx);
8039 break;
8040 case VDIR:
8041 error = vn_mkdir(dvp, vpp, ndp, vap, ctx);
8042 break;
8043 case VSOCK:
8044 case VFIFO:
8045 case VBLK:
8046 case VCHR:
8047 error = VNOP_MKNOD(dvp, vpp, cnp, vap, ctx);
8048 break;
8049 default:
8050 panic("vnode_create: unknown vtype %d", vap->va_type);
8051 }
8052 if (error != 0) {
8053 KAUTH_DEBUG("%p CREATE - error %d returned by filesystem", dvp, error);
8054 goto out;
8055 }
8056
8057 vp = *vpp;
8058 old_error = error;
8059
8060 /*
8061 * If some of the requested attributes weren't handled by the VNOP,
8062 * use our fallback code.
8063 */
8064 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap) && *vpp) {
8065 KAUTH_DEBUG(" CREATE - doing fallback with ACL %p", vap->va_acl);
8066 error = vnode_setattr_fallback(*vpp, vap, ctx);
8067 }
8068
8069 #if CONFIG_MACF
8070 if ((error == 0) && !(flags & VN_CREATE_NOLABEL)) {
8071 error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx);
8072 }
8073 #endif
8074
8075 if ((error != 0) && (vp != (vnode_t)0)) {
8076 /* If we've done a compound open, close */
8077 if (batched && (old_error == 0) && (vap->va_type == VREG)) {
8078 VNOP_CLOSE(vp, fmode, ctx);
8079 }
8080
8081 /* Need to provide notifications if a create succeeded */
8082 if (!batched) {
8083 *vpp = (vnode_t) 0;
8084 vnode_put(vp);
8085 vp = NULLVP;
8086 }
8087 }
8088
8089 /*
8090 * For creation VNOPs, this is the equivalent of
8091 * lookup_handle_found_vnode.
8092 */
8093 if (kdebug_enable && *vpp) {
8094 kdebug_lookup(*vpp, cnp);
8095 }
8096
8097 out:
8098 vn_attribute_cleanup(vap, defaulted);
8099
8100 return error;
8101 }
8102
8103 static kauth_scope_t vnode_scope;
8104 static int vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action,
8105 uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
8106 static int vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx,
8107 vnode_t vp, vnode_t dvp, int *errorp);
8108
8109 typedef struct _vnode_authorize_context {
8110 vnode_t vp;
8111 struct vnode_attr *vap;
8112 vnode_t dvp;
8113 struct vnode_attr *dvap;
8114 vfs_context_t ctx;
8115 int flags;
8116 int flags_valid;
8117 #define _VAC_IS_OWNER (1<<0)
8118 #define _VAC_IN_GROUP (1<<1)
8119 #define _VAC_IS_DIR_OWNER (1<<2)
8120 #define _VAC_IN_DIR_GROUP (1<<3)
8121 #define _VAC_NO_VNODE_POINTERS (1<<4)
8122 } *vauth_ctx;
8123
8124 void
vnode_authorize_init(void)8125 vnode_authorize_init(void)
8126 {
8127 vnode_scope = kauth_register_scope(KAUTH_SCOPE_VNODE, vnode_authorize_callback, NULL);
8128 }
8129
8130 #define VATTR_PREPARE_DEFAULTED_UID 0x1
8131 #define VATTR_PREPARE_DEFAULTED_GID 0x2
8132 #define VATTR_PREPARE_DEFAULTED_MODE 0x4
8133
8134 int
vn_attribute_prepare(vnode_t dvp,struct vnode_attr * vap,uint32_t * defaulted_fieldsp,vfs_context_t ctx)8135 vn_attribute_prepare(vnode_t dvp, struct vnode_attr *vap, uint32_t *defaulted_fieldsp, vfs_context_t ctx)
8136 {
8137 kauth_acl_t nacl = NULL, oacl = NULL;
8138 int error;
8139
8140 /*
8141 * Handle ACL inheritance.
8142 */
8143 if (!(vap->va_vaflags & VA_NOINHERIT) && vfs_extendedsecurity(dvp->v_mount)) {
8144 /* save the original filesec */
8145 if (VATTR_IS_ACTIVE(vap, va_acl)) {
8146 oacl = vap->va_acl;
8147 }
8148
8149 vap->va_acl = NULL;
8150 if ((error = kauth_acl_inherit(dvp,
8151 oacl,
8152 &nacl,
8153 vap->va_type == VDIR,
8154 ctx)) != 0) {
8155 KAUTH_DEBUG("%p CREATE - error %d processing inheritance", dvp, error);
8156 return error;
8157 }
8158
8159 /*
8160 * If the generated ACL is NULL, then we can save ourselves some effort
8161 * by clearing the active bit.
8162 */
8163 if (nacl == NULL) {
8164 VATTR_CLEAR_ACTIVE(vap, va_acl);
8165 } else {
8166 vap->va_base_acl = oacl;
8167 VATTR_SET(vap, va_acl, nacl);
8168 }
8169 }
8170
8171 error = vnode_authattr_new_internal(dvp, vap, (vap->va_vaflags & VA_NOAUTH), defaulted_fieldsp, ctx);
8172 if (error) {
8173 vn_attribute_cleanup(vap, *defaulted_fieldsp);
8174 }
8175
8176 return error;
8177 }
8178
8179 void
vn_attribute_cleanup(struct vnode_attr * vap,uint32_t defaulted_fields)8180 vn_attribute_cleanup(struct vnode_attr *vap, uint32_t defaulted_fields)
8181 {
8182 /*
8183 * If the caller supplied a filesec in vap, it has been replaced
8184 * now by the post-inheritance copy. We need to put the original back
8185 * and free the inherited product.
8186 */
8187 kauth_acl_t nacl, oacl;
8188
8189 if (VATTR_IS_ACTIVE(vap, va_acl)) {
8190 nacl = vap->va_acl;
8191 oacl = vap->va_base_acl;
8192
8193 if (oacl) {
8194 VATTR_SET(vap, va_acl, oacl);
8195 vap->va_base_acl = NULL;
8196 } else {
8197 VATTR_CLEAR_ACTIVE(vap, va_acl);
8198 }
8199
8200 if (nacl != NULL) {
8201 /*
8202 * Only free the ACL buffer if 'VA_FILESEC_ACL' is not set as it
8203 * should be freed by the caller or it is a post-inheritance copy.
8204 */
8205 if (!(vap->va_vaflags & VA_FILESEC_ACL) ||
8206 (oacl != NULL && nacl != oacl)) {
8207 kauth_acl_free(nacl);
8208 }
8209 }
8210 }
8211
8212 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_MODE) != 0) {
8213 VATTR_CLEAR_ACTIVE(vap, va_mode);
8214 }
8215 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_GID) != 0) {
8216 VATTR_CLEAR_ACTIVE(vap, va_gid);
8217 }
8218 if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_UID) != 0) {
8219 VATTR_CLEAR_ACTIVE(vap, va_uid);
8220 }
8221
8222 return;
8223 }
8224
8225 #if CONFIG_APPLEDOUBLE
8226
8227 #define NATIVE_XATTR(VP) \
8228 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
8229
8230 static int
dot_underbar_check_paired_vnode(struct componentname * cnp,vnode_t vp,vnode_t dvp,vfs_context_t ctx)8231 dot_underbar_check_paired_vnode(struct componentname *cnp, vnode_t vp,
8232 vnode_t dvp, vfs_context_t ctx)
8233 {
8234 int error = 0;
8235 bool dvp_needs_put = false;
8236
8237 if (cnp->cn_namelen <= 2 || cnp->cn_nameptr[0] != '.' || cnp->cn_nameptr[1] != '_') {
8238 return 0;
8239 }
8240
8241 if (!dvp) {
8242 if ((dvp = vnode_getparent(vp)) == NULLVP) {
8243 return 0;
8244 }
8245 dvp_needs_put = true;
8246 }
8247
8248 vnode_t dupairedvp = NULLVP;
8249 char lastchar = cnp->cn_nameptr[cnp->cn_namelen];
8250
8251 cnp->cn_nameptr[cnp->cn_namelen] = '\0';
8252 error = vnode_lookupat(cnp->cn_nameptr + (sizeof("._") - 1), 0,
8253 &dupairedvp, ctx, dvp);
8254 cnp->cn_nameptr[cnp->cn_namelen] = lastchar;
8255 if (dvp_needs_put) {
8256 vnode_put(dvp);
8257 dvp = NULLVP;
8258 }
8259 if (!error && dupairedvp) {
8260 error = mac_vnode_check_deleteextattr(ctx, dupairedvp,
8261 "com.apple.quarantine");
8262 vnode_put(dupairedvp);
8263 dupairedvp = NULLVP;
8264 } else {
8265 error = 0;
8266 }
8267
8268 return error;
8269 }
8270 #endif /* CONFIG_APPLEDOUBLE */
8271
8272 int
vn_authorize_unlink(vnode_t dvp,vnode_t vp,struct componentname * cnp,vfs_context_t ctx,__unused void * reserved)8273 vn_authorize_unlink(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, __unused void *reserved)
8274 {
8275 #if (!CONFIG_MACF && !NAMEDRSRCFORK)
8276 #pragma unused(cnp)
8277 #endif
8278 int error = 0;
8279
8280 /*
8281 * Normally, unlinking of directories is not supported.
8282 * However, some file systems may have limited support.
8283 */
8284 if ((vp->v_type == VDIR) &&
8285 !(vp->v_mount->mnt_kern_flag & MNTK_DIR_HARDLINKS)) {
8286 return EPERM; /* POSIX */
8287 }
8288
8289 /* authorize the delete operation */
8290 #if CONFIG_MACF
8291 if (!error) {
8292 error = mac_vnode_check_unlink(ctx, dvp, vp, cnp);
8293 #if CONFIG_APPLEDOUBLE
8294 if (!error && !NATIVE_XATTR(dvp)) {
8295 error = dot_underbar_check_paired_vnode(cnp, vp, dvp, ctx);
8296 }
8297 #endif /* CONFIG_APPLEDOUBLE */
8298 }
8299 #endif /* MAC */
8300
8301 /* authorize file's resource fork */
8302 #if NAMEDRSRCFORK
8303 if (!error && cnp && (cnp->cn_flags & CN_WANTSRSRCFORK)) {
8304 /* If CN_WANTSRSRCFORK is set, that implies that 'dvp' is the base file and 'vp' is the namedstream file */
8305 #if CONFIG_MACF
8306 error = mac_vnode_check_deleteextattr(ctx, dvp, XATTR_RESOURCEFORK_NAME);
8307 #endif /* MAC */
8308 if (!error) {
8309 error = vnode_authorize(dvp, NULL, KAUTH_VNODE_WRITE_EXTATTRIBUTES, ctx);
8310 }
8311 }
8312 #endif /* NAMEDRSRCFORK */
8313
8314 if (!error) {
8315 error = vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
8316 }
8317
8318 return error;
8319 }
8320
8321 int
vn_authorize_open_existing(vnode_t vp,struct componentname * cnp,int fmode,vfs_context_t ctx,void * reserved)8322 vn_authorize_open_existing(vnode_t vp, struct componentname *cnp, int fmode, vfs_context_t ctx, void *reserved)
8323 {
8324 /* Open of existing case */
8325 kauth_action_t action;
8326 int error = 0;
8327 if (cnp->cn_ndp == NULL) {
8328 panic("NULL ndp");
8329 }
8330 if (reserved != NULL) {
8331 panic("reserved not NULL.");
8332 }
8333
8334 #if CONFIG_MACF
8335 /* XXX may do duplicate work here, but ignore that for now (idempotent) */
8336 if (vfs_flags(vnode_mount(vp)) & MNT_MULTILABEL) {
8337 error = vnode_label(vnode_mount(vp), NULL, vp, NULL, 0, ctx);
8338 if (error) {
8339 return error;
8340 }
8341 }
8342 #endif
8343
8344 if (vnode_isdir(vp)) {
8345 if ((fmode & (FWRITE | O_TRUNC)) || /* disallow write operations on directories */
8346 ((fmode & FSEARCH) && !(fmode & O_DIRECTORY))) {
8347 return EISDIR;
8348 }
8349 } else {
8350 if (fmode & O_DIRECTORY) {
8351 return ENOTDIR;
8352 }
8353
8354 if (vp->v_type == VSOCK && vp->v_tag != VT_FDESC) {
8355 return EOPNOTSUPP; /* Operation not supported on socket */
8356 }
8357
8358 if (vp->v_type == VLNK && (fmode & O_NOFOLLOW) != 0) {
8359 return ELOOP; /* O_NOFOLLOW was specified and the target is a symbolic link */
8360 }
8361
8362 if (cnp->cn_ndp->ni_flag & NAMEI_TRAILINGSLASH) {
8363 return ENOTDIR;
8364 }
8365
8366 if (!vnode_isreg(vp) && (fmode & FEXEC)) {
8367 return EACCES;
8368 }
8369 }
8370
8371 #if CONFIG_MACF
8372 /* If a file being opened is a shadow file containing
8373 * namedstream data, ignore the macf checks because it
8374 * is a kernel internal file and access should always
8375 * be allowed.
8376 */
8377 if (!(vnode_isshadow(vp) && vnode_isnamedstream(vp))) {
8378 error = mac_vnode_check_open(ctx, vp, fmode);
8379 if (error) {
8380 return error;
8381 }
8382 }
8383 #if CONFIG_APPLEDOUBLE
8384 if (fmode & (FWRITE | O_TRUNC) && !NATIVE_XATTR(vp)) {
8385 error = dot_underbar_check_paired_vnode(cnp, vp, NULLVP, ctx);
8386 if (error) {
8387 return error;
8388 }
8389 }
8390 #endif /* CONFIG_APPLEDOUBLE */
8391 #endif
8392
8393 /* authorize file's resource fork */
8394 #if NAMEDRSRCFORK
8395 if (cnp && (cnp->cn_flags & CN_WANTSRSRCFORK)) {
8396 /* If CN_WANTSRSRCFORK is set, that implies that 'pvp' is the base file and 'vp' is the namedstream file */
8397 vnode_t pvp = vnode_getparent(vp);
8398 if (pvp == NULLVP) {
8399 return ENOENT;
8400 }
8401
8402 #if CONFIG_MACF
8403 error = mac_vnode_check_getextattr(ctx, pvp, XATTR_RESOURCEFORK_NAME, NULL);
8404 if (error) {
8405 vnode_put(pvp);
8406 return error;
8407 }
8408 #endif /* MAC */
8409
8410 action = 0;
8411 if (fmode & FREAD) {
8412 action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
8413 }
8414 if (fmode & (FWRITE | O_TRUNC)) {
8415 action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
8416 }
8417 error = vnode_authorize(pvp, NULL, action, ctx);
8418 if (error) {
8419 vnode_put(pvp);
8420 return error;
8421 }
8422 vnode_put(pvp);
8423 }
8424 #endif /* NAMEDRSRCFORK */
8425
8426 /* compute action to be authorized */
8427 action = 0;
8428 if (fmode & FREAD) {
8429 action |= KAUTH_VNODE_READ_DATA;
8430 }
8431 if (fmode & (FWRITE | O_TRUNC)) {
8432 /*
8433 * If we are writing, appending, and not truncating,
8434 * indicate that we are appending so that if the
8435 * UF_APPEND or SF_APPEND bits are set, we do not deny
8436 * the open.
8437 */
8438 if ((fmode & O_APPEND) && !(fmode & O_TRUNC)) {
8439 action |= KAUTH_VNODE_APPEND_DATA;
8440 } else {
8441 action |= KAUTH_VNODE_WRITE_DATA;
8442 }
8443 }
8444 if (fmode & (FSEARCH | FEXEC)) {
8445 if (vnode_isdir(vp)) {
8446 action |= KAUTH_VNODE_SEARCH;
8447 } else {
8448 action |= KAUTH_VNODE_EXECUTE;
8449 }
8450 }
8451 error = vnode_authorize(vp, NULL, action, ctx);
8452 #if NAMEDSTREAMS
8453 if (error == EACCES) {
8454 /*
8455 * Shadow files may exist on-disk with a different UID/GID
8456 * than that of the current context. Verify that this file
8457 * is really a shadow file. If it was created successfully
8458 * then it should be authorized.
8459 */
8460 if (vnode_isshadow(vp) && vnode_isnamedstream(vp)) {
8461 error = vnode_verifynamedstream(vp);
8462 }
8463 }
8464 #endif
8465
8466 return error;
8467 }
8468
8469 int
vn_authorize_create(vnode_t dvp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx,void * reserved)8470 vn_authorize_create(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
8471 {
8472 #if !CONFIG_MACF
8473 #pragma unused(vap)
8474 #endif
8475 /* Creation case */
8476 int error;
8477 kauth_action_t action = KAUTH_VNODE_ADD_FILE;
8478
8479 if (cnp->cn_ndp == NULL) {
8480 panic("NULL cn_ndp");
8481 }
8482 if (reserved != NULL) {
8483 panic("reserved not NULL.");
8484 }
8485
8486 /* Only validate path for creation if we didn't do a complete lookup */
8487 if (cnp->cn_ndp->ni_flag & NAMEI_UNFINISHED) {
8488 error = lookup_validate_creation_path(cnp->cn_ndp);
8489 if (error) {
8490 return error;
8491 }
8492 }
8493
8494 /* authorize file's resource fork */
8495 #if NAMEDRSRCFORK
8496 if (cnp && (cnp->cn_flags & CN_WANTSRSRCFORK)) {
8497 /* If CN_WANTSRSRCFORK is set, that implies that 'dvp' is the base file and 'vp' is the namedstream file */
8498 #if CONFIG_MACF
8499 error = mac_vnode_check_setextattr(ctx, dvp, XATTR_RESOURCEFORK_NAME, NULL);
8500 if (error) {
8501 return error;
8502 }
8503 #endif /* MAC */
8504
8505 action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
8506 }
8507 #endif /* NAMEDRSRCFORK */
8508
8509 #if CONFIG_MACF
8510 error = mac_vnode_check_create(ctx, dvp, cnp, vap);
8511 if (error) {
8512 return error;
8513 }
8514 #endif /* CONFIG_MACF */
8515
8516 return vnode_authorize(dvp, NULL, action, ctx);
8517 }
8518
8519 int
vn_authorize_rename(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx,void * reserved)8520 vn_authorize_rename(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
8521 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
8522 vfs_context_t ctx, void *reserved)
8523 {
8524 return vn_authorize_renamex(fdvp, fvp, fcnp, tdvp, tvp, tcnp, ctx, 0, reserved);
8525 }
8526
8527 int
vn_authorize_renamex(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx,vfs_rename_flags_t flags,void * reserved)8528 vn_authorize_renamex(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
8529 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
8530 vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved)
8531 {
8532 return vn_authorize_renamex_with_paths(fdvp, fvp, fcnp, NULL, tdvp, tvp, tcnp, NULL, ctx, flags, reserved);
8533 }
8534
8535 int
vn_authorize_renamex_with_paths(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,const char * from_path,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,const char * to_path,vfs_context_t ctx,vfs_rename_flags_t flags,void * reserved)8536 vn_authorize_renamex_with_paths(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, const char *from_path,
8537 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, const char *to_path,
8538 vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved)
8539 {
8540 int error = 0;
8541 int moving = 0;
8542 bool swap = flags & VFS_RENAME_SWAP;
8543
8544 if (reserved != NULL) {
8545 panic("Passed something other than NULL as reserved field!");
8546 }
8547
8548 /*
8549 * Avoid renaming "." and "..".
8550 *
8551 * XXX No need to check for this in the FS. We should always have the leaves
8552 * in VFS in this case.
8553 */
8554 if (fvp->v_type == VDIR &&
8555 ((fdvp == fvp) ||
8556 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') ||
8557 ((fcnp->cn_flags | tcnp->cn_flags) & ISDOTDOT))) {
8558 error = EINVAL;
8559 goto out;
8560 }
8561
8562 if (tvp == NULLVP && vnode_compound_rename_available(tdvp)) {
8563 error = lookup_validate_creation_path(tcnp->cn_ndp);
8564 if (error) {
8565 goto out;
8566 }
8567 }
8568
8569 /***** <MACF> *****/
8570 #if CONFIG_MACF
8571 if (swap) {
8572 error = mac_vnode_check_rename_swap(ctx, fdvp, fvp, fcnp, tdvp, tvp, tcnp);
8573 } else {
8574 error = mac_vnode_check_rename(ctx, fdvp, fvp, fcnp, tdvp, tvp, tcnp);
8575 }
8576 #if CONFIG_APPLEDOUBLE
8577 if (!error && !NATIVE_XATTR(fdvp)) {
8578 error = dot_underbar_check_paired_vnode(fcnp, fvp, fdvp, ctx);
8579 }
8580 /* Currently no Filesystem that does not support native xattrs supports rename swap */
8581 if (!error && swap && !NATIVE_XATTR(tdvp)) {
8582 error = dot_underbar_check_paired_vnode(tcnp, tvp, tdvp, ctx);
8583 }
8584 #endif /* CONFIG_APPLEDOUBLE */
8585 if (error) {
8586 goto out;
8587 }
8588 #endif
8589 /***** </MACF> *****/
8590
8591 /***** <MiscChecks> *****/
8592 if (tvp != NULL) {
8593 if (!swap) {
8594 if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
8595 error = ENOTDIR;
8596 goto out;
8597 } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
8598 error = EISDIR;
8599 goto out;
8600 }
8601 }
8602 } else if (swap) {
8603 /*
8604 * Caller should have already checked this and returned
8605 * ENOENT. If we send back ENOENT here, caller will retry
8606 * which isn't what we want so we send back EINVAL here
8607 * instead.
8608 */
8609 error = EINVAL;
8610 goto out;
8611 }
8612
8613 if (fvp == tdvp) {
8614 error = EINVAL;
8615 goto out;
8616 }
8617
8618 /*
8619 * The following edge case is caught here:
8620 * (to cannot be a descendent of from)
8621 *
8622 * o fdvp
8623 * /
8624 * /
8625 * o fvp
8626 * \
8627 * \
8628 * o tdvp
8629 * /
8630 * /
8631 * o tvp
8632 */
8633 if (tdvp->v_parent == fvp) {
8634 error = EINVAL;
8635 goto out;
8636 }
8637
8638 if (swap && fdvp->v_parent == tvp) {
8639 error = EINVAL;
8640 goto out;
8641 }
8642 /***** </MiscChecks> *****/
8643
8644 /***** <Kauth> *****/
8645
8646 /*
8647 * As part of the Kauth step, we call out to allow 3rd-party
8648 * fileop notification of "about to rename". This is needed
8649 * in the event that 3rd-parties need to know that the DELETE
8650 * authorization is actually part of a rename. It's important
8651 * that we guarantee that the DELETE call-out will always be
8652 * made if the WILL_RENAME call-out is made. Another fileop
8653 * call-out will be performed once the operation is completed.
8654 * We can ignore the result of kauth_authorize_fileop().
8655 *
8656 * N.B. We are passing the vnode and *both* paths to each
8657 * call; kauth_authorize_fileop() extracts the "from" path
8658 * when posting a KAUTH_FILEOP_WILL_RENAME notification.
8659 * As such, we only post these notifications if all of the
8660 * information we need is provided.
8661 */
8662
8663 if (swap) {
8664 kauth_action_t f = 0, t = 0;
8665
8666 /*
8667 * Directories changing parents need ...ADD_SUBDIR... to
8668 * permit changing ".."
8669 */
8670 if (fdvp != tdvp) {
8671 if (vnode_isdir(fvp)) {
8672 f = KAUTH_VNODE_ADD_SUBDIRECTORY;
8673 }
8674 if (vnode_isdir(tvp)) {
8675 t = KAUTH_VNODE_ADD_SUBDIRECTORY;
8676 }
8677 }
8678 if (to_path != NULL) {
8679 kauth_authorize_fileop(vfs_context_ucred(ctx),
8680 KAUTH_FILEOP_WILL_RENAME,
8681 (uintptr_t)fvp,
8682 (uintptr_t)to_path);
8683 }
8684 error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | f, ctx);
8685 if (error) {
8686 goto out;
8687 }
8688 if (from_path != NULL) {
8689 kauth_authorize_fileop(vfs_context_ucred(ctx),
8690 KAUTH_FILEOP_WILL_RENAME,
8691 (uintptr_t)tvp,
8692 (uintptr_t)from_path);
8693 }
8694 error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE | t, ctx);
8695 if (error) {
8696 goto out;
8697 }
8698 f = vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE;
8699 t = vnode_isdir(tvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE;
8700 if (fdvp == tdvp) {
8701 error = vnode_authorize(fdvp, NULL, f | t, ctx);
8702 } else {
8703 error = vnode_authorize(fdvp, NULL, t, ctx);
8704 if (error) {
8705 goto out;
8706 }
8707 error = vnode_authorize(tdvp, NULL, f, ctx);
8708 }
8709 if (error) {
8710 goto out;
8711 }
8712 } else {
8713 error = 0;
8714 if ((tvp != NULL) && vnode_isdir(tvp)) {
8715 if (tvp != fdvp) {
8716 moving = 1;
8717 }
8718 } else if (tdvp != fdvp) {
8719 moving = 1;
8720 }
8721
8722 /*
8723 * must have delete rights to remove the old name even in
8724 * the simple case of fdvp == tdvp.
8725 *
8726 * If fvp is a directory, and we are changing it's parent,
8727 * then we also need rights to rewrite its ".." entry as well.
8728 */
8729 if (to_path != NULL) {
8730 kauth_authorize_fileop(vfs_context_ucred(ctx),
8731 KAUTH_FILEOP_WILL_RENAME,
8732 (uintptr_t)fvp,
8733 (uintptr_t)to_path);
8734 }
8735 if (vnode_isdir(fvp)) {
8736 if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) {
8737 goto out;
8738 }
8739 } else {
8740 if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE, ctx)) != 0) {
8741 goto out;
8742 }
8743 }
8744 if (moving) {
8745 /* moving into tdvp or tvp, must have rights to add */
8746 if ((error = vnode_authorize(((tvp != NULL) && vnode_isdir(tvp)) ? tvp : tdvp,
8747 NULL,
8748 vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE,
8749 ctx)) != 0) {
8750 goto out;
8751 }
8752 } else {
8753 /* node staying in same directory, must be allowed to add new name */
8754 if ((error = vnode_authorize(fdvp, NULL,
8755 vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, ctx)) != 0) {
8756 goto out;
8757 }
8758 }
8759 /* overwriting tvp */
8760 if ((tvp != NULL) && !vnode_isdir(tvp) &&
8761 ((error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE, ctx)) != 0)) {
8762 goto out;
8763 }
8764 }
8765
8766 /***** </Kauth> *****/
8767
8768 /* XXX more checks? */
8769 out:
8770 return error;
8771 }
8772
8773 int
vn_authorize_mkdir(vnode_t dvp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx,void * reserved)8774 vn_authorize_mkdir(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
8775 {
8776 #if !CONFIG_MACF
8777 #pragma unused(vap)
8778 #endif
8779 int error;
8780
8781 if (reserved != NULL) {
8782 panic("reserved not NULL in vn_authorize_mkdir()");
8783 }
8784
8785 /* XXX A hack for now, to make shadow files work */
8786 if (cnp->cn_ndp == NULL) {
8787 return 0;
8788 }
8789
8790 if (vnode_compound_mkdir_available(dvp)) {
8791 error = lookup_validate_creation_path(cnp->cn_ndp);
8792 if (error) {
8793 goto out;
8794 }
8795 }
8796
8797 #if CONFIG_MACF
8798 error = mac_vnode_check_create(ctx,
8799 dvp, cnp, vap);
8800 if (error) {
8801 goto out;
8802 }
8803 #endif
8804
8805 /* authorize addition of a directory to the parent */
8806 if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) {
8807 goto out;
8808 }
8809
8810 out:
8811 return error;
8812 }
8813
8814 int
vn_authorize_rmdir(vnode_t dvp,vnode_t vp,struct componentname * cnp,vfs_context_t ctx,void * reserved)8815 vn_authorize_rmdir(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, void *reserved)
8816 {
8817 #if CONFIG_MACF
8818 int error;
8819 #else
8820 #pragma unused(cnp)
8821 #endif
8822 if (reserved != NULL) {
8823 panic("Non-NULL reserved argument to vn_authorize_rmdir()");
8824 }
8825
8826 if (vp->v_type != VDIR) {
8827 /*
8828 * rmdir only deals with directories
8829 */
8830 return ENOTDIR;
8831 }
8832
8833 if (dvp == vp) {
8834 /*
8835 * No rmdir "." please.
8836 */
8837 return EINVAL;
8838 }
8839
8840 #if CONFIG_MACF
8841 error = mac_vnode_check_unlink(ctx, dvp,
8842 vp, cnp);
8843 if (error) {
8844 return error;
8845 }
8846 #endif
8847
8848 return vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
8849 }
8850
8851 /*
8852 * Authorizer for directory cloning. This does not use vnodes but instead
8853 * uses prefilled vnode attributes from the filesystem.
8854 *
8855 * The same function is called to set up the attributes required, perform the
8856 * authorization and cleanup (if required)
8857 */
8858 int
vnode_attr_authorize_dir_clone(struct vnode_attr * vap,kauth_action_t action,struct vnode_attr * dvap,__unused vnode_t sdvp,mount_t mp,dir_clone_authorizer_op_t vattr_op,uint32_t flags,vfs_context_t ctx,__unused void * reserved)8859 vnode_attr_authorize_dir_clone(struct vnode_attr *vap, kauth_action_t action,
8860 struct vnode_attr *dvap, __unused vnode_t sdvp, mount_t mp,
8861 dir_clone_authorizer_op_t vattr_op, uint32_t flags, vfs_context_t ctx,
8862 __unused void *reserved)
8863 {
8864 int error;
8865 int is_suser = vfs_context_issuser(ctx);
8866
8867 if (vattr_op == OP_VATTR_SETUP) {
8868 VATTR_INIT(vap);
8869
8870 /*
8871 * When ACL inheritence is implemented, both vap->va_acl and
8872 * dvap->va_acl will be required (even as superuser).
8873 */
8874 VATTR_WANTED(vap, va_type);
8875 VATTR_WANTED(vap, va_mode);
8876 VATTR_WANTED(vap, va_flags);
8877 VATTR_WANTED(vap, va_uid);
8878 VATTR_WANTED(vap, va_gid);
8879 if (dvap) {
8880 VATTR_INIT(dvap);
8881 VATTR_WANTED(dvap, va_flags);
8882 }
8883
8884 if (!is_suser) {
8885 /*
8886 * If not superuser, we have to evaluate ACLs and
8887 * need the target directory gid to set the initial
8888 * gid of the new object.
8889 */
8890 VATTR_WANTED(vap, va_acl);
8891 if (dvap) {
8892 VATTR_WANTED(dvap, va_gid);
8893 }
8894 } else if (dvap && (flags & VNODE_CLONEFILE_NOOWNERCOPY)) {
8895 VATTR_WANTED(dvap, va_gid);
8896 }
8897 return 0;
8898 } else if (vattr_op == OP_VATTR_CLEANUP) {
8899 return 0; /* Nothing to do for now */
8900 }
8901
8902 /* dvap isn't used for authorization */
8903 error = vnode_attr_authorize(vap, NULL, mp, action, ctx);
8904
8905 if (error) {
8906 return error;
8907 }
8908
8909 /*
8910 * vn_attribute_prepare should be able to accept attributes as well as
8911 * vnodes but for now we do this inline.
8912 */
8913 if (!is_suser || (flags & VNODE_CLONEFILE_NOOWNERCOPY)) {
8914 /*
8915 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit
8916 * owner is set, that owner takes ownership of all new files.
8917 */
8918 if ((mp->mnt_flag & MNT_IGNORE_OWNERSHIP) &&
8919 (mp->mnt_fsowner != KAUTH_UID_NONE)) {
8920 VATTR_SET(vap, va_uid, mp->mnt_fsowner);
8921 } else {
8922 /* default owner is current user */
8923 VATTR_SET(vap, va_uid,
8924 kauth_cred_getuid(vfs_context_ucred(ctx)));
8925 }
8926
8927 if ((mp->mnt_flag & MNT_IGNORE_OWNERSHIP) &&
8928 (mp->mnt_fsgroup != KAUTH_GID_NONE)) {
8929 VATTR_SET(vap, va_gid, mp->mnt_fsgroup);
8930 } else {
8931 /*
8932 * default group comes from parent object,
8933 * fallback to current user
8934 */
8935 if (VATTR_IS_SUPPORTED(dvap, va_gid)) {
8936 VATTR_SET(vap, va_gid, dvap->va_gid);
8937 } else {
8938 VATTR_SET(vap, va_gid,
8939 kauth_cred_getgid(vfs_context_ucred(ctx)));
8940 }
8941 }
8942 }
8943
8944 /* Inherit SF_RESTRICTED bit from destination directory only */
8945 if (VATTR_IS_ACTIVE(vap, va_flags)) {
8946 VATTR_SET(vap, va_flags,
8947 ((vap->va_flags & ~(UF_DATAVAULT | SF_RESTRICTED)))); /* Turn off from source */
8948 if (VATTR_IS_ACTIVE(dvap, va_flags)) {
8949 VATTR_SET(vap, va_flags,
8950 vap->va_flags | (dvap->va_flags & (UF_DATAVAULT | SF_RESTRICTED)));
8951 }
8952 } else if (VATTR_IS_ACTIVE(dvap, va_flags)) {
8953 VATTR_SET(vap, va_flags, (dvap->va_flags & (UF_DATAVAULT | SF_RESTRICTED)));
8954 }
8955
8956 return 0;
8957 }
8958
8959
8960 /*
8961 * Authorize an operation on a vnode.
8962 *
8963 * This is KPI, but here because it needs vnode_scope.
8964 *
8965 * Returns: 0 Success
8966 * kauth_authorize_action:EPERM ...
8967 * xlate => EACCES Permission denied
8968 * kauth_authorize_action:0 Success
8969 * kauth_authorize_action: Depends on callback return; this is
8970 * usually only vnode_authorize_callback(),
8971 * but may include other listerners, if any
8972 * exist.
8973 * EROFS
8974 * EACCES
8975 * EPERM
8976 * ???
8977 */
8978 int
vnode_authorize(vnode_t vp,vnode_t dvp,kauth_action_t action,vfs_context_t ctx)8979 vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx)
8980 {
8981 int error, result;
8982
8983 /*
8984 * We can't authorize against a dead vnode; allow all operations through so that
8985 * the correct error can be returned.
8986 */
8987 if (vp->v_type == VBAD) {
8988 return 0;
8989 }
8990
8991 error = 0;
8992 result = kauth_authorize_action(vnode_scope, vfs_context_ucred(ctx), action,
8993 (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error);
8994 if (result == EPERM) { /* traditional behaviour */
8995 result = EACCES;
8996 }
8997 /* did the lower layers give a better error return? */
8998 if ((result != 0) && (error != 0)) {
8999 return error;
9000 }
9001 return result;
9002 }
9003
9004 /*
9005 * Test for vnode immutability.
9006 *
9007 * The 'append' flag is set when the authorization request is constrained
9008 * to operations which only request the right to append to a file.
9009 *
9010 * The 'ignore' flag is set when an operation modifying the immutability flags
9011 * is being authorized. We check the system securelevel to determine which
9012 * immutability flags we can ignore.
9013 */
9014 static int
vnode_immutable(struct vnode_attr * vap,int append,int ignore)9015 vnode_immutable(struct vnode_attr *vap, int append, int ignore)
9016 {
9017 int mask;
9018
9019 /* start with all bits precluding the operation */
9020 mask = IMMUTABLE | APPEND;
9021
9022 /* if appending only, remove the append-only bits */
9023 if (append) {
9024 mask &= ~APPEND;
9025 }
9026
9027 /* ignore only set when authorizing flags changes */
9028 if (ignore) {
9029 if (securelevel <= 0) {
9030 /* in insecure state, flags do not inhibit changes */
9031 mask = 0;
9032 } else {
9033 /* in secure state, user flags don't inhibit */
9034 mask &= ~(UF_IMMUTABLE | UF_APPEND);
9035 }
9036 }
9037 KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap->va_flags, mask, append, ignore);
9038 if ((vap->va_flags & mask) != 0) {
9039 return EPERM;
9040 }
9041 return 0;
9042 }
9043
9044 static int
vauth_node_owner(struct vnode_attr * vap,kauth_cred_t cred)9045 vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred)
9046 {
9047 int result;
9048
9049 /* default assumption is not-owner */
9050 result = 0;
9051
9052 /*
9053 * If the filesystem has given us a UID, we treat this as authoritative.
9054 */
9055 if (vap && VATTR_IS_SUPPORTED(vap, va_uid)) {
9056 result = (vap->va_uid == kauth_cred_getuid(cred)) ? 1 : 0;
9057 }
9058 /* we could test the owner UUID here if we had a policy for it */
9059
9060 return result;
9061 }
9062
9063 /*
9064 * vauth_node_group
9065 *
9066 * Description: Ask if a cred is a member of the group owning the vnode object
9067 *
9068 * Parameters: vap vnode attribute
9069 * vap->va_gid group owner of vnode object
9070 * cred credential to check
9071 * ismember pointer to where to put the answer
9072 * idontknow Return this if we can't get an answer
9073 *
9074 * Returns: 0 Success
9075 * idontknow Can't get information
9076 * kauth_cred_ismember_gid:? Error from kauth subsystem
9077 * kauth_cred_ismember_gid:? Error from kauth subsystem
9078 */
9079 static int
vauth_node_group(struct vnode_attr * vap,kauth_cred_t cred,int * ismember,int idontknow)9080 vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int idontknow)
9081 {
9082 int error;
9083 int result;
9084
9085 error = 0;
9086 result = 0;
9087
9088 /*
9089 * The caller is expected to have asked the filesystem for a group
9090 * at some point prior to calling this function. The answer may
9091 * have been that there is no group ownership supported for the
9092 * vnode object, in which case we return
9093 */
9094 if (vap && VATTR_IS_SUPPORTED(vap, va_gid)) {
9095 error = kauth_cred_ismember_gid(cred, vap->va_gid, &result);
9096 /*
9097 * Credentials which are opted into external group membership
9098 * resolution which are not known to the external resolver
9099 * will result in an ENOENT error. We translate this into
9100 * the appropriate 'idontknow' response for our caller.
9101 *
9102 * XXX We do not make a distinction here between an ENOENT
9103 * XXX arising from a response from the external resolver,
9104 * XXX and an ENOENT which is internally generated. This is
9105 * XXX a deficiency of the published kauth_cred_ismember_gid()
9106 * XXX KPI which can not be overcome without new KPI. For
9107 * XXX all currently known cases, however, this wil result
9108 * XXX in correct behaviour.
9109 */
9110 if (error == ENOENT) {
9111 error = idontknow;
9112 }
9113 }
9114 /*
9115 * XXX We could test the group UUID here if we had a policy for it,
9116 * XXX but this is problematic from the perspective of synchronizing
9117 * XXX group UUID and POSIX GID ownership of a file and keeping the
9118 * XXX values coherent over time. The problem is that the local
9119 * XXX system will vend transient group UUIDs for unknown POSIX GID
9120 * XXX values, and these are not persistent, whereas storage of values
9121 * XXX is persistent. One potential solution to this is a local
9122 * XXX (persistent) replica of remote directory entries and vended
9123 * XXX local ids in a local directory server (think in terms of a
9124 * XXX caching DNS server).
9125 */
9126
9127 if (!error) {
9128 *ismember = result;
9129 }
9130 return error;
9131 }
9132
9133 static int
vauth_file_owner(vauth_ctx vcp)9134 vauth_file_owner(vauth_ctx vcp)
9135 {
9136 int result;
9137
9138 if (vcp->flags_valid & _VAC_IS_OWNER) {
9139 result = (vcp->flags & _VAC_IS_OWNER) ? 1 : 0;
9140 } else {
9141 result = vauth_node_owner(vcp->vap, vcp->ctx->vc_ucred);
9142
9143 /* cache our result */
9144 vcp->flags_valid |= _VAC_IS_OWNER;
9145 if (result) {
9146 vcp->flags |= _VAC_IS_OWNER;
9147 } else {
9148 vcp->flags &= ~_VAC_IS_OWNER;
9149 }
9150 }
9151 return result;
9152 }
9153
9154
9155 /*
9156 * vauth_file_ingroup
9157 *
9158 * Description: Ask if a user is a member of the group owning the directory
9159 *
9160 * Parameters: vcp The vnode authorization context that
9161 * contains the user and directory info
9162 * vcp->flags_valid Valid flags
9163 * vcp->flags Flags values
9164 * vcp->vap File vnode attributes
9165 * vcp->ctx VFS Context (for user)
9166 * ismember pointer to where to put the answer
9167 * idontknow Return this if we can't get an answer
9168 *
9169 * Returns: 0 Success
9170 * vauth_node_group:? Error from vauth_node_group()
9171 *
9172 * Implicit returns: *ismember 0 The user is not a group member
9173 * 1 The user is a group member
9174 */
9175 static int
vauth_file_ingroup(vauth_ctx vcp,int * ismember,int idontknow)9176 vauth_file_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
9177 {
9178 int error;
9179
9180 /* Check for a cached answer first, to avoid the check if possible */
9181 if (vcp->flags_valid & _VAC_IN_GROUP) {
9182 *ismember = (vcp->flags & _VAC_IN_GROUP) ? 1 : 0;
9183 error = 0;
9184 } else {
9185 /* Otherwise, go look for it */
9186 error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember, idontknow);
9187
9188 if (!error) {
9189 /* cache our result */
9190 vcp->flags_valid |= _VAC_IN_GROUP;
9191 if (*ismember) {
9192 vcp->flags |= _VAC_IN_GROUP;
9193 } else {
9194 vcp->flags &= ~_VAC_IN_GROUP;
9195 }
9196 }
9197 }
9198 return error;
9199 }
9200
9201 static int
vauth_dir_owner(vauth_ctx vcp)9202 vauth_dir_owner(vauth_ctx vcp)
9203 {
9204 int result;
9205
9206 if (vcp->flags_valid & _VAC_IS_DIR_OWNER) {
9207 result = (vcp->flags & _VAC_IS_DIR_OWNER) ? 1 : 0;
9208 } else {
9209 result = vauth_node_owner(vcp->dvap, vcp->ctx->vc_ucred);
9210
9211 /* cache our result */
9212 vcp->flags_valid |= _VAC_IS_DIR_OWNER;
9213 if (result) {
9214 vcp->flags |= _VAC_IS_DIR_OWNER;
9215 } else {
9216 vcp->flags &= ~_VAC_IS_DIR_OWNER;
9217 }
9218 }
9219 return result;
9220 }
9221
9222 /*
9223 * vauth_dir_ingroup
9224 *
9225 * Description: Ask if a user is a member of the group owning the directory
9226 *
9227 * Parameters: vcp The vnode authorization context that
9228 * contains the user and directory info
9229 * vcp->flags_valid Valid flags
9230 * vcp->flags Flags values
9231 * vcp->dvap Dir vnode attributes
9232 * vcp->ctx VFS Context (for user)
9233 * ismember pointer to where to put the answer
9234 * idontknow Return this if we can't get an answer
9235 *
9236 * Returns: 0 Success
9237 * vauth_node_group:? Error from vauth_node_group()
9238 *
9239 * Implicit returns: *ismember 0 The user is not a group member
9240 * 1 The user is a group member
9241 */
9242 static int
vauth_dir_ingroup(vauth_ctx vcp,int * ismember,int idontknow)9243 vauth_dir_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
9244 {
9245 int error;
9246
9247 /* Check for a cached answer first, to avoid the check if possible */
9248 if (vcp->flags_valid & _VAC_IN_DIR_GROUP) {
9249 *ismember = (vcp->flags & _VAC_IN_DIR_GROUP) ? 1 : 0;
9250 error = 0;
9251 } else {
9252 /* Otherwise, go look for it */
9253 error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember, idontknow);
9254
9255 if (!error) {
9256 /* cache our result */
9257 vcp->flags_valid |= _VAC_IN_DIR_GROUP;
9258 if (*ismember) {
9259 vcp->flags |= _VAC_IN_DIR_GROUP;
9260 } else {
9261 vcp->flags &= ~_VAC_IN_DIR_GROUP;
9262 }
9263 }
9264 }
9265 return error;
9266 }
9267
9268 static int
vfs_context_ignores_node_permissions(vfs_context_t ctx)9269 vfs_context_ignores_node_permissions(vfs_context_t ctx)
9270 {
9271 if (proc_ignores_node_permissions(vfs_context_proc(ctx))) {
9272 return 1;
9273 }
9274 if (get_bsdthread_info(vfs_context_thread(ctx))->uu_flag & UT_IGNORE_NODE_PERMISSIONS) {
9275 return 1;
9276 }
9277 return 0;
9278 }
9279
9280 /*
9281 * Test the posix permissions in (vap) to determine whether (credential)
9282 * may perform (action)
9283 */
9284 static int
vnode_authorize_posix(vauth_ctx vcp,int action,int on_dir)9285 vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir)
9286 {
9287 struct vnode_attr *vap;
9288 int needed, error, owner_ok, group_ok, world_ok, ismember;
9289 #ifdef KAUTH_DEBUG_ENABLE
9290 const char *where = "uninitialized";
9291 # define _SETWHERE(c) where = c;
9292 #else
9293 # define _SETWHERE(c)
9294 #endif
9295
9296 /* checking file or directory? */
9297 if (on_dir) {
9298 vap = vcp->dvap;
9299 } else {
9300 vap = vcp->vap;
9301 }
9302
9303 error = 0;
9304
9305 /*
9306 * We want to do as little work here as possible. So first we check
9307 * which sets of permissions grant us the access we need, and avoid checking
9308 * whether specific permissions grant access when more generic ones would.
9309 */
9310
9311 /* owner permissions */
9312 needed = 0;
9313 if (action & VREAD) {
9314 needed |= S_IRUSR;
9315 }
9316 if (action & VWRITE) {
9317 needed |= S_IWUSR;
9318 }
9319 if (action & VEXEC) {
9320 needed |= S_IXUSR;
9321 }
9322 owner_ok = (needed & vap->va_mode) == needed;
9323
9324 /*
9325 * Processes with the appropriate entitlement can marked themselves as
9326 * ignoring file/directory permissions if they own it.
9327 */
9328 if (!owner_ok && vfs_context_ignores_node_permissions(vcp->ctx)) {
9329 owner_ok = 1;
9330 }
9331
9332 /* group permissions */
9333 needed = 0;
9334 if (action & VREAD) {
9335 needed |= S_IRGRP;
9336 }
9337 if (action & VWRITE) {
9338 needed |= S_IWGRP;
9339 }
9340 if (action & VEXEC) {
9341 needed |= S_IXGRP;
9342 }
9343 group_ok = (needed & vap->va_mode) == needed;
9344
9345 /* world permissions */
9346 needed = 0;
9347 if (action & VREAD) {
9348 needed |= S_IROTH;
9349 }
9350 if (action & VWRITE) {
9351 needed |= S_IWOTH;
9352 }
9353 if (action & VEXEC) {
9354 needed |= S_IXOTH;
9355 }
9356 world_ok = (needed & vap->va_mode) == needed;
9357
9358 /* If granted/denied by all three, we're done */
9359 if (owner_ok && group_ok && world_ok) {
9360 _SETWHERE("all");
9361 goto out;
9362 }
9363
9364 if (!owner_ok && !group_ok && !world_ok) {
9365 _SETWHERE("all");
9366 error = EACCES;
9367 goto out;
9368 }
9369
9370 /* Check ownership (relatively cheap) */
9371 if ((on_dir && vauth_dir_owner(vcp)) ||
9372 (!on_dir && vauth_file_owner(vcp))) {
9373 _SETWHERE("user");
9374 if (!owner_ok) {
9375 error = EACCES;
9376 }
9377 goto out;
9378 }
9379
9380 /* Not owner; if group and world both grant it we're done */
9381 if (group_ok && world_ok) {
9382 _SETWHERE("group/world");
9383 goto out;
9384 }
9385 if (!group_ok && !world_ok) {
9386 _SETWHERE("group/world");
9387 error = EACCES;
9388 goto out;
9389 }
9390
9391 /* Check group membership (most expensive) */
9392 ismember = 0; /* Default to allow, if the target has no group owner */
9393
9394 /*
9395 * In the case we can't get an answer about the user from the call to
9396 * vauth_dir_ingroup() or vauth_file_ingroup(), we want to fail on
9397 * the side of caution, rather than simply granting access, or we will
9398 * fail to correctly implement exclusion groups, so we set the third
9399 * parameter on the basis of the state of 'group_ok'.
9400 */
9401 if (on_dir) {
9402 error = vauth_dir_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
9403 } else {
9404 error = vauth_file_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
9405 }
9406 if (error) {
9407 if (!group_ok) {
9408 ismember = 1;
9409 }
9410 error = 0;
9411 }
9412 if (ismember) {
9413 _SETWHERE("group");
9414 if (!group_ok) {
9415 error = EACCES;
9416 }
9417 goto out;
9418 }
9419
9420 /* Not owner, not in group, use world result */
9421 _SETWHERE("world");
9422 if (!world_ok) {
9423 error = EACCES;
9424 }
9425
9426 /* FALLTHROUGH */
9427
9428 out:
9429 KAUTH_DEBUG("%p %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d",
9430 vcp->vp, (error == 0) ? "ALLOWED" : "DENIED", where,
9431 (action & VREAD) ? "r" : "-",
9432 (action & VWRITE) ? "w" : "-",
9433 (action & VEXEC) ? "x" : "-",
9434 needed,
9435 (vap->va_mode & S_IRUSR) ? "r" : "-",
9436 (vap->va_mode & S_IWUSR) ? "w" : "-",
9437 (vap->va_mode & S_IXUSR) ? "x" : "-",
9438 (vap->va_mode & S_IRGRP) ? "r" : "-",
9439 (vap->va_mode & S_IWGRP) ? "w" : "-",
9440 (vap->va_mode & S_IXGRP) ? "x" : "-",
9441 (vap->va_mode & S_IROTH) ? "r" : "-",
9442 (vap->va_mode & S_IWOTH) ? "w" : "-",
9443 (vap->va_mode & S_IXOTH) ? "x" : "-",
9444 kauth_cred_getuid(vcp->ctx->vc_ucred),
9445 on_dir ? vcp->dvap->va_uid : vcp->vap->va_uid,
9446 on_dir ? vcp->dvap->va_gid : vcp->vap->va_gid);
9447 return error;
9448 }
9449
9450 /*
9451 * Authorize the deletion of the node vp from the directory dvp.
9452 *
9453 * We assume that:
9454 * - Neither the node nor the directory are immutable.
9455 * - The user is not the superuser.
9456 *
9457 * The precedence of factors for authorizing or denying delete for a credential
9458 *
9459 * 1) Explicit ACE on the node. (allow or deny DELETE)
9460 * 2) Explicit ACE on the directory (allow or deny DELETE_CHILD).
9461 *
9462 * If there are conflicting ACEs on the node and the directory, the node
9463 * ACE wins.
9464 *
9465 * 3) Sticky bit on the directory.
9466 * Deletion is not permitted if the directory is sticky and the caller is
9467 * not owner of the node or directory. The sticky bit rules are like a deny
9468 * delete ACE except lower in priority than ACL's either allowing or denying
9469 * delete.
9470 *
9471 * 4) POSIX permisions on the directory.
9472 *
9473 * As an optimization, we cache whether or not delete child is permitted
9474 * on directories. This enables us to skip directory ACL and POSIX checks
9475 * as we already have the result from those checks. However, we always check the
9476 * node ACL and, if the directory has the sticky bit set, we always check its
9477 * ACL (even for a directory with an authorized delete child). Furthermore,
9478 * caching the delete child authorization is independent of the sticky bit
9479 * being set as it is only applicable in determining whether the node can be
9480 * deleted or not.
9481 */
9482 static int
vnode_authorize_delete(vauth_ctx vcp,boolean_t cached_delete_child)9483 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child)
9484 {
9485 struct vnode_attr *vap = vcp->vap;
9486 struct vnode_attr *dvap = vcp->dvap;
9487 kauth_cred_t cred = vcp->ctx->vc_ucred;
9488 struct kauth_acl_eval eval;
9489 int error, ismember;
9490
9491 /* Check the ACL on the node first */
9492 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
9493 eval.ae_requested = KAUTH_VNODE_DELETE;
9494 eval.ae_acl = &vap->va_acl->acl_ace[0];
9495 eval.ae_count = vap->va_acl->acl_entrycount;
9496 eval.ae_options = 0;
9497 if (vauth_file_owner(vcp)) {
9498 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
9499 }
9500 /*
9501 * We use ENOENT as a marker to indicate we could not get
9502 * information in order to delay evaluation until after we
9503 * have the ACL evaluation answer. Previously, we would
9504 * always deny the operation at this point.
9505 */
9506 if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
9507 return error;
9508 }
9509 if (error == ENOENT) {
9510 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
9511 } else if (ismember) {
9512 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
9513 }
9514 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
9515 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
9516 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
9517 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
9518
9519 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
9520 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
9521 return error;
9522 }
9523
9524 switch (eval.ae_result) {
9525 case KAUTH_RESULT_DENY:
9526 if (vauth_file_owner(vcp) && vfs_context_ignores_node_permissions(vcp->ctx)) {
9527 KAUTH_DEBUG("%p Override DENY due to entitlement", vcp->vp);
9528 return 0;
9529 }
9530 KAUTH_DEBUG("%p DENIED - denied by ACL", vcp->vp);
9531 return EACCES;
9532 case KAUTH_RESULT_ALLOW:
9533 KAUTH_DEBUG("%p ALLOWED - granted by ACL", vcp->vp);
9534 return 0;
9535 case KAUTH_RESULT_DEFER:
9536 default:
9537 /* Defer to directory */
9538 KAUTH_DEBUG("%p DEFERRED - by file ACL", vcp->vp);
9539 break;
9540 }
9541 }
9542
9543 /*
9544 * Without a sticky bit, a previously authorized delete child is
9545 * sufficient to authorize this delete.
9546 *
9547 * If the sticky bit is set, a directory ACL which allows delete child
9548 * overrides a (potential) sticky bit deny. The authorized delete child
9549 * cannot tell us if it was authorized because of an explicit delete
9550 * child allow ACE or because of POSIX permisions so we have to check
9551 * the directory ACL everytime if the directory has a sticky bit.
9552 */
9553 if (!(dvap->va_mode & S_ISTXT) && cached_delete_child) {
9554 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL or POSIX permissions and no sticky bit on directory", vcp->vp);
9555 return 0;
9556 }
9557
9558 /* check the ACL on the directory */
9559 if (VATTR_IS_NOT(dvap, va_acl, NULL)) {
9560 eval.ae_requested = KAUTH_VNODE_DELETE_CHILD;
9561 eval.ae_acl = &dvap->va_acl->acl_ace[0];
9562 eval.ae_count = dvap->va_acl->acl_entrycount;
9563 eval.ae_options = 0;
9564 if (vauth_dir_owner(vcp)) {
9565 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
9566 }
9567 /*
9568 * We use ENOENT as a marker to indicate we could not get
9569 * information in order to delay evaluation until after we
9570 * have the ACL evaluation answer. Previously, we would
9571 * always deny the operation at this point.
9572 */
9573 if ((error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
9574 return error;
9575 }
9576 if (error == ENOENT) {
9577 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
9578 } else if (ismember) {
9579 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
9580 }
9581 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
9582 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
9583 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
9584 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
9585
9586 /*
9587 * If there is no entry, we are going to defer to other
9588 * authorization mechanisms.
9589 */
9590 error = kauth_acl_evaluate(cred, &eval);
9591
9592 if (error != 0) {
9593 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
9594 return error;
9595 }
9596 switch (eval.ae_result) {
9597 case KAUTH_RESULT_DENY:
9598 if (vauth_dir_owner(vcp) && vfs_context_ignores_node_permissions(vcp->ctx)) {
9599 KAUTH_DEBUG("%p Override DENY due to entitlement", vcp->vp);
9600 return 0;
9601 }
9602 KAUTH_DEBUG("%p DENIED - denied by directory ACL", vcp->vp);
9603 return EACCES;
9604 case KAUTH_RESULT_ALLOW:
9605 KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp);
9606 if (!cached_delete_child && vcp->dvp) {
9607 vnode_cache_authorized_action(vcp->dvp,
9608 vcp->ctx, KAUTH_VNODE_DELETE_CHILD);
9609 }
9610 return 0;
9611 case KAUTH_RESULT_DEFER:
9612 default:
9613 /* Deferred by directory ACL */
9614 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
9615 break;
9616 }
9617 }
9618
9619 /*
9620 * From this point, we can't explicitly allow and if we reach the end
9621 * of the function without a denial, then the delete is authorized.
9622 */
9623 if (!cached_delete_child) {
9624 if (vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */) != 0) {
9625 KAUTH_DEBUG("%p DENIED - denied by posix permisssions", vcp->vp);
9626 return EACCES;
9627 }
9628 /*
9629 * Cache the authorized action on the vnode if allowed by the
9630 * directory ACL or POSIX permissions. It is correct to cache
9631 * this action even if sticky bit would deny deleting the node.
9632 */
9633 if (vcp->dvp) {
9634 vnode_cache_authorized_action(vcp->dvp, vcp->ctx,
9635 KAUTH_VNODE_DELETE_CHILD);
9636 }
9637 }
9638
9639 /* enforce sticky bit behaviour */
9640 if ((dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) {
9641 KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)",
9642 vcp->vp, cred->cr_posix.cr_uid, vap->va_uid, dvap->va_uid);
9643 return EACCES;
9644 }
9645
9646 /* not denied, must be OK */
9647 return 0;
9648 }
9649
9650
9651 /*
9652 * Authorize an operation based on the node's attributes.
9653 */
9654 static int
vnode_authorize_simple(vauth_ctx vcp,kauth_ace_rights_t acl_rights,kauth_ace_rights_t preauth_rights,boolean_t * found_deny)9655 vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_rights_t preauth_rights, boolean_t *found_deny)
9656 {
9657 struct vnode_attr *vap = vcp->vap;
9658 kauth_cred_t cred = vcp->ctx->vc_ucred;
9659 struct kauth_acl_eval eval;
9660 int error, ismember;
9661 mode_t posix_action;
9662
9663 /*
9664 * If we are the file owner, we automatically have some rights.
9665 *
9666 * Do we need to expand this to support group ownership?
9667 */
9668 if (vauth_file_owner(vcp)) {
9669 acl_rights &= ~(KAUTH_VNODE_WRITE_SECURITY);
9670 }
9671
9672 /*
9673 * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can
9674 * mask the latter. If TAKE_OWNERSHIP is requested the caller is about to
9675 * change ownership to themselves, and WRITE_SECURITY is implicitly
9676 * granted to the owner. We need to do this because at this point
9677 * WRITE_SECURITY may not be granted as the caller is not currently
9678 * the owner.
9679 */
9680 if ((acl_rights & KAUTH_VNODE_TAKE_OWNERSHIP) &&
9681 (acl_rights & KAUTH_VNODE_WRITE_SECURITY)) {
9682 acl_rights &= ~KAUTH_VNODE_WRITE_SECURITY;
9683 }
9684
9685 if (acl_rights == 0) {
9686 KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp->vp);
9687 return 0;
9688 }
9689
9690 /* if we have an ACL, evaluate it */
9691 if (VATTR_IS_NOT(vap, va_acl, NULL)) {
9692 eval.ae_requested = acl_rights;
9693 eval.ae_acl = &vap->va_acl->acl_ace[0];
9694 eval.ae_count = vap->va_acl->acl_entrycount;
9695 eval.ae_options = 0;
9696 if (vauth_file_owner(vcp)) {
9697 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
9698 }
9699 /*
9700 * We use ENOENT as a marker to indicate we could not get
9701 * information in order to delay evaluation until after we
9702 * have the ACL evaluation answer. Previously, we would
9703 * always deny the operation at this point.
9704 */
9705 if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
9706 return error;
9707 }
9708 if (error == ENOENT) {
9709 eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
9710 } else if (ismember) {
9711 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
9712 }
9713 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
9714 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
9715 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
9716 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
9717
9718 if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
9719 KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error);
9720 return error;
9721 }
9722
9723 switch (eval.ae_result) {
9724 case KAUTH_RESULT_DENY:
9725 if (vauth_file_owner(vcp) && vfs_context_ignores_node_permissions(vcp->ctx)) {
9726 KAUTH_DEBUG("%p Override DENY due to entitlement", vcp->vp);
9727 return 0;
9728 }
9729 KAUTH_DEBUG("%p DENIED - by ACL", vcp->vp);
9730 return EACCES; /* deny, deny, counter-allege */
9731 case KAUTH_RESULT_ALLOW:
9732 KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp->vp);
9733 return 0;
9734 case KAUTH_RESULT_DEFER:
9735 default:
9736 /* Effectively the same as !delete_child_denied */
9737 KAUTH_DEBUG("%p DEFERRED - directory ACL", vcp->vp);
9738 break;
9739 }
9740
9741 *found_deny = eval.ae_found_deny;
9742
9743 /* fall through and evaluate residual rights */
9744 } else {
9745 /* no ACL, everything is residual */
9746 eval.ae_residual = acl_rights;
9747 }
9748
9749 /*
9750 * Grant residual rights that have been pre-authorized.
9751 */
9752 eval.ae_residual &= ~preauth_rights;
9753
9754 /*
9755 * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied.
9756 */
9757 if (vauth_file_owner(vcp)) {
9758 eval.ae_residual &= ~KAUTH_VNODE_WRITE_ATTRIBUTES;
9759 }
9760
9761 if (eval.ae_residual == 0) {
9762 KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp->vp);
9763 return 0;
9764 }
9765
9766 /*
9767 * Bail if we have residual rights that can't be granted by posix permissions,
9768 * or aren't presumed granted at this point.
9769 *
9770 * XXX these can be collapsed for performance
9771 */
9772 if (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) {
9773 KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp->vp);
9774 return EACCES;
9775 }
9776 if (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) {
9777 KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp->vp);
9778 return EACCES;
9779 }
9780
9781 #if DIAGNOSTIC
9782 if (eval.ae_residual & KAUTH_VNODE_DELETE) {
9783 panic("vnode_authorize: can't be checking delete permission here");
9784 }
9785 #endif
9786
9787 /*
9788 * Compute the fallback posix permissions that will satisfy the remaining
9789 * rights.
9790 */
9791 posix_action = 0;
9792 if (eval.ae_residual & (KAUTH_VNODE_READ_DATA |
9793 KAUTH_VNODE_LIST_DIRECTORY |
9794 KAUTH_VNODE_READ_EXTATTRIBUTES)) {
9795 posix_action |= VREAD;
9796 }
9797 if (eval.ae_residual & (KAUTH_VNODE_WRITE_DATA |
9798 KAUTH_VNODE_ADD_FILE |
9799 KAUTH_VNODE_ADD_SUBDIRECTORY |
9800 KAUTH_VNODE_DELETE_CHILD |
9801 KAUTH_VNODE_WRITE_ATTRIBUTES |
9802 KAUTH_VNODE_WRITE_EXTATTRIBUTES)) {
9803 posix_action |= VWRITE;
9804 }
9805 if (eval.ae_residual & (KAUTH_VNODE_EXECUTE |
9806 KAUTH_VNODE_SEARCH)) {
9807 posix_action |= VEXEC;
9808 }
9809
9810 if (posix_action != 0) {
9811 return vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */);
9812 } else {
9813 KAUTH_DEBUG("%p ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping",
9814 vcp->vp,
9815 (eval.ae_residual & KAUTH_VNODE_READ_DATA)
9816 ? vnode_isdir(vcp->vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
9817 (eval.ae_residual & KAUTH_VNODE_WRITE_DATA)
9818 ? vnode_isdir(vcp->vp) ? " ADD_FILE" : " WRITE_DATA" : "",
9819 (eval.ae_residual & KAUTH_VNODE_EXECUTE)
9820 ? vnode_isdir(vcp->vp) ? " SEARCH" : " EXECUTE" : "",
9821 (eval.ae_residual & KAUTH_VNODE_DELETE)
9822 ? " DELETE" : "",
9823 (eval.ae_residual & KAUTH_VNODE_APPEND_DATA)
9824 ? vnode_isdir(vcp->vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
9825 (eval.ae_residual & KAUTH_VNODE_DELETE_CHILD)
9826 ? " DELETE_CHILD" : "",
9827 (eval.ae_residual & KAUTH_VNODE_READ_ATTRIBUTES)
9828 ? " READ_ATTRIBUTES" : "",
9829 (eval.ae_residual & KAUTH_VNODE_WRITE_ATTRIBUTES)
9830 ? " WRITE_ATTRIBUTES" : "",
9831 (eval.ae_residual & KAUTH_VNODE_READ_EXTATTRIBUTES)
9832 ? " READ_EXTATTRIBUTES" : "",
9833 (eval.ae_residual & KAUTH_VNODE_WRITE_EXTATTRIBUTES)
9834 ? " WRITE_EXTATTRIBUTES" : "",
9835 (eval.ae_residual & KAUTH_VNODE_READ_SECURITY)
9836 ? " READ_SECURITY" : "",
9837 (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY)
9838 ? " WRITE_SECURITY" : "",
9839 (eval.ae_residual & KAUTH_VNODE_CHECKIMMUTABLE)
9840 ? " CHECKIMMUTABLE" : "",
9841 (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER)
9842 ? " CHANGE_OWNER" : "");
9843 }
9844
9845 /*
9846 * Lack of required Posix permissions implies no reason to deny access.
9847 */
9848 return 0;
9849 }
9850
9851 /*
9852 * Check for file immutability.
9853 */
9854 static int
vnode_authorize_checkimmutable(mount_t mp,vauth_ctx vcp,struct vnode_attr * vap,int rights,int ignore)9855 vnode_authorize_checkimmutable(mount_t mp, vauth_ctx vcp,
9856 struct vnode_attr *vap, int rights, int ignore)
9857 {
9858 int error;
9859 int append;
9860
9861 /*
9862 * Perform immutability checks for operations that change data.
9863 *
9864 * Sockets, fifos and devices require special handling.
9865 */
9866 switch (vap->va_type) {
9867 case VSOCK:
9868 case VFIFO:
9869 case VBLK:
9870 case VCHR:
9871 /*
9872 * Writing to these nodes does not change the filesystem data,
9873 * so forget that it's being tried.
9874 */
9875 rights &= ~KAUTH_VNODE_WRITE_DATA;
9876 break;
9877 default:
9878 break;
9879 }
9880
9881 error = 0;
9882 if (rights & KAUTH_VNODE_WRITE_RIGHTS) {
9883 /* check per-filesystem options if possible */
9884 if (mp != NULL) {
9885 /* check for no-EA filesystems */
9886 if ((rights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) &&
9887 (vfs_flags(mp) & MNT_NOUSERXATTR)) {
9888 KAUTH_DEBUG("%p DENIED - filesystem disallowed extended attributes", vap);
9889 error = EACCES; /* User attributes disabled */
9890 goto out;
9891 }
9892 }
9893
9894 /*
9895 * check for file immutability. first, check if the requested rights are
9896 * allowable for a UF_APPEND file.
9897 */
9898 append = 0;
9899 if (vap->va_type == VDIR) {
9900 if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES | ~KAUTH_VNODE_WRITE_RIGHTS)) == rights) {
9901 append = 1;
9902 }
9903 } else {
9904 if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES | ~KAUTH_VNODE_WRITE_RIGHTS)) == rights) {
9905 append = 1;
9906 }
9907 }
9908 if ((error = vnode_immutable(vap, append, ignore)) != 0) {
9909 if (error && !ignore) {
9910 /*
9911 * In case of a rename, we want to check ownership for dvp as well.
9912 */
9913 int owner = 0;
9914 if (rights & KAUTH_VNODE_DELETE_CHILD && vcp->dvp != NULL) {
9915 owner = vauth_file_owner(vcp) && vauth_dir_owner(vcp);
9916 } else {
9917 owner = vauth_file_owner(vcp);
9918 }
9919 if (owner && vfs_context_ignores_node_permissions(vcp->ctx)) {
9920 error = vnode_immutable(vap, append, 1);
9921 }
9922 }
9923 }
9924 if (error) {
9925 KAUTH_DEBUG("%p DENIED - file is immutable", vap);
9926 goto out;
9927 }
9928 }
9929 out:
9930 return error;
9931 }
9932
9933 /*
9934 * Handle authorization actions for filesystems that advertise that the
9935 * server will be enforcing.
9936 *
9937 * Returns: 0 Authorization should be handled locally
9938 * 1 Authorization was handled by the FS
9939 *
9940 * Note: Imputed returns will only occur if the authorization request
9941 * was handled by the FS.
9942 *
9943 * Imputed: *resultp, modified Return code from FS when the request is
9944 * handled by the FS.
9945 * VNOP_ACCESS:???
9946 * VNOP_OPEN:???
9947 */
9948 static int
vnode_authorize_opaque(vnode_t vp,int * resultp,kauth_action_t action,vfs_context_t ctx)9949 vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx)
9950 {
9951 int error;
9952
9953 /*
9954 * If the vp is a device node, socket or FIFO it actually represents a local
9955 * endpoint, so we need to handle it locally.
9956 */
9957 switch (vp->v_type) {
9958 case VBLK:
9959 case VCHR:
9960 case VSOCK:
9961 case VFIFO:
9962 return 0;
9963 default:
9964 break;
9965 }
9966
9967 /*
9968 * In the advisory request case, if the filesystem doesn't think it's reliable
9969 * we will attempt to formulate a result ourselves based on VNOP_GETATTR data.
9970 */
9971 if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount)) {
9972 return 0;
9973 }
9974
9975 /*
9976 * Let the filesystem have a say in the matter. It's OK for it to not implemnent
9977 * VNOP_ACCESS, as most will authorise inline with the actual request.
9978 */
9979 if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) {
9980 *resultp = error;
9981 KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp);
9982 return 1;
9983 }
9984
9985 /*
9986 * Typically opaque filesystems do authorisation in-line, but exec is a special case. In
9987 * order to be reasonably sure that exec will be permitted, we try a bit harder here.
9988 */
9989 if ((action & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG)) {
9990 /* try a VNOP_OPEN for readonly access */
9991 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
9992 *resultp = error;
9993 KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp);
9994 return 1;
9995 }
9996 VNOP_CLOSE(vp, FREAD, ctx);
9997 }
9998
9999 /*
10000 * We don't have any reason to believe that the request has to be denied at this point,
10001 * so go ahead and allow it.
10002 */
10003 *resultp = 0;
10004 KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp);
10005 return 1;
10006 }
10007
10008
10009
10010
10011 /*
10012 * Returns: KAUTH_RESULT_ALLOW
10013 * KAUTH_RESULT_DENY
10014 *
10015 * Imputed: *arg3, modified Error code in the deny case
10016 * EROFS Read-only file system
10017 * EACCES Permission denied
10018 * EPERM Operation not permitted [no execute]
10019 * vnode_getattr:ENOMEM Not enough space [only if has filesec]
10020 * vnode_getattr:???
10021 * vnode_authorize_opaque:*arg2 ???
10022 * vnode_authorize_checkimmutable:???
10023 * vnode_authorize_delete:???
10024 * vnode_authorize_simple:???
10025 */
10026
10027
10028 static int
vnode_authorize_callback(__unused kauth_cred_t cred,__unused void * idata,kauth_action_t action,uintptr_t arg0,uintptr_t arg1,uintptr_t arg2,uintptr_t arg3)10029 vnode_authorize_callback(__unused kauth_cred_t cred, __unused void *idata,
10030 kauth_action_t action, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2,
10031 uintptr_t arg3)
10032 {
10033 vfs_context_t ctx;
10034 vnode_t cvp = NULLVP;
10035 vnode_t vp, dvp;
10036 int result = KAUTH_RESULT_DENY;
10037 int parent_iocount = 0;
10038 int parent_action = 0; /* In case we need to use namedstream's data fork for cached rights*/
10039
10040 ctx = (vfs_context_t)arg0;
10041 vp = (vnode_t)arg1;
10042 dvp = (vnode_t)arg2;
10043
10044 /*
10045 * if there are 2 vnodes passed in, we don't know at
10046 * this point which rights to look at based on the
10047 * combined action being passed in... defer until later...
10048 * otherwise check the kauth 'rights' cache hung
10049 * off of the vnode we're interested in... if we've already
10050 * been granted the right we're currently interested in,
10051 * we can just return success... otherwise we'll go through
10052 * the process of authorizing the requested right(s)... if that
10053 * succeeds, we'll add the right(s) to the cache.
10054 * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache
10055 */
10056 if (dvp && vp) {
10057 goto defer;
10058 }
10059 if (dvp) {
10060 cvp = dvp;
10061 } else {
10062 /*
10063 * For named streams on local-authorization volumes, rights are cached on the parent;
10064 * authorization is determined by looking at the parent's properties anyway, so storing
10065 * on the parent means that we don't recompute for the named stream and that if
10066 * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
10067 * stream to flush its cache separately. If we miss in the cache, then we authorize
10068 * as if there were no cached rights (passing the named stream vnode and desired rights to
10069 * vnode_authorize_callback_int()).
10070 *
10071 * On an opaquely authorized volume, we don't know the relationship between the
10072 * data fork's properties and the rights granted on a stream. Thus, named stream vnodes
10073 * on such a volume are authorized directly (rather than using the parent) and have their
10074 * own caches. When a named stream vnode is created, we mark the parent as having a named
10075 * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
10076 * find the stream and flush its cache.
10077 */
10078 if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) {
10079 cvp = vnode_getparent(vp);
10080 if (cvp != NULLVP) {
10081 parent_iocount = 1;
10082 } else {
10083 cvp = NULL;
10084 goto defer; /* If we can't use the parent, take the slow path */
10085 }
10086
10087 /* Have to translate some actions */
10088 parent_action = action;
10089 if (parent_action & KAUTH_VNODE_READ_DATA) {
10090 parent_action &= ~KAUTH_VNODE_READ_DATA;
10091 parent_action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
10092 }
10093 if (parent_action & KAUTH_VNODE_WRITE_DATA) {
10094 parent_action &= ~KAUTH_VNODE_WRITE_DATA;
10095 parent_action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
10096 }
10097 } else {
10098 cvp = vp;
10099 }
10100 }
10101
10102 if (vnode_cache_is_authorized(cvp, ctx, parent_iocount ? parent_action : action) == TRUE) {
10103 result = KAUTH_RESULT_ALLOW;
10104 goto out;
10105 }
10106 defer:
10107 result = vnode_authorize_callback_int(action, ctx, vp, dvp, (int *)arg3);
10108
10109 if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP) {
10110 KAUTH_DEBUG("%p - caching action = %x", cvp, action);
10111 vnode_cache_authorized_action(cvp, ctx, action);
10112 }
10113
10114 out:
10115 if (parent_iocount) {
10116 vnode_put(cvp);
10117 }
10118
10119 return result;
10120 }
10121
10122 static int
vnode_attr_authorize_internal(vauth_ctx vcp,mount_t mp,kauth_ace_rights_t rights,int is_suser,boolean_t * found_deny,int noimmutable,int parent_authorized_for_delete_child)10123 vnode_attr_authorize_internal(vauth_ctx vcp, mount_t mp,
10124 kauth_ace_rights_t rights, int is_suser, boolean_t *found_deny,
10125 int noimmutable, int parent_authorized_for_delete_child)
10126 {
10127 int result;
10128
10129 /*
10130 * Check for immutability.
10131 *
10132 * In the deletion case, parent directory immutability vetoes specific
10133 * file rights.
10134 */
10135 if ((result = vnode_authorize_checkimmutable(mp, vcp, vcp->vap, rights,
10136 noimmutable)) != 0) {
10137 goto out;
10138 }
10139
10140 if ((rights & KAUTH_VNODE_DELETE) &&
10141 !parent_authorized_for_delete_child) {
10142 result = vnode_authorize_checkimmutable(mp, vcp, vcp->dvap,
10143 KAUTH_VNODE_DELETE_CHILD, 0);
10144 if (result) {
10145 goto out;
10146 }
10147 }
10148
10149 /*
10150 * Clear rights that have been authorized by reaching this point, bail if nothing left to
10151 * check.
10152 */
10153 rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE);
10154 if (rights == 0) {
10155 goto out;
10156 }
10157
10158 /*
10159 * If we're not the superuser, authorize based on file properties;
10160 * note that even if parent_authorized_for_delete_child is TRUE, we
10161 * need to check on the node itself.
10162 */
10163 if (!is_suser) {
10164 /* process delete rights */
10165 if ((rights & KAUTH_VNODE_DELETE) &&
10166 ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0)) {
10167 goto out;
10168 }
10169
10170 /* process remaining rights */
10171 if ((rights & ~KAUTH_VNODE_DELETE) &&
10172 (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, found_deny)) != 0) {
10173 goto out;
10174 }
10175 } else {
10176 /*
10177 * Execute is only granted to root if one of the x bits is set. This check only
10178 * makes sense if the posix mode bits are actually supported.
10179 */
10180 if ((rights & KAUTH_VNODE_EXECUTE) &&
10181 (vcp->vap->va_type == VREG) &&
10182 VATTR_IS_SUPPORTED(vcp->vap, va_mode) &&
10183 !(vcp->vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
10184 result = EPERM;
10185 KAUTH_DEBUG("%p DENIED - root execute requires at least one x bit in 0x%x", vcp, vcp->vap->va_mode);
10186 goto out;
10187 }
10188
10189 /* Assume that there were DENYs so we don't wrongly cache KAUTH_VNODE_SEARCHBYANYONE */
10190 *found_deny = TRUE;
10191
10192 KAUTH_DEBUG("%p ALLOWED - caller is superuser", vcp);
10193 }
10194 out:
10195 return result;
10196 }
10197
10198 static int
vnode_authorize_callback_int(kauth_action_t action,vfs_context_t ctx,vnode_t vp,vnode_t dvp,int * errorp)10199 vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx,
10200 vnode_t vp, vnode_t dvp, int *errorp)
10201 {
10202 struct _vnode_authorize_context auth_context;
10203 vauth_ctx vcp;
10204 kauth_cred_t cred;
10205 kauth_ace_rights_t rights;
10206 struct vnode_attr va, dva;
10207 int result;
10208 int noimmutable;
10209 boolean_t parent_authorized_for_delete_child = FALSE;
10210 boolean_t found_deny = FALSE;
10211 boolean_t parent_ref = FALSE;
10212 boolean_t is_suser = FALSE;
10213
10214 vcp = &auth_context;
10215 vcp->ctx = ctx;
10216 vcp->vp = vp;
10217 vcp->dvp = dvp;
10218 /*
10219 * Note that we authorize against the context, not the passed cred
10220 * (the same thing anyway)
10221 */
10222 cred = ctx->vc_ucred;
10223
10224 VATTR_INIT(&va);
10225 vcp->vap = &va;
10226 VATTR_INIT(&dva);
10227 vcp->dvap = &dva;
10228
10229 vcp->flags = vcp->flags_valid = 0;
10230
10231 #if DIAGNOSTIC
10232 if ((ctx == NULL) || (vp == NULL) || (cred == NULL)) {
10233 panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx, vp, cred);
10234 }
10235 #endif
10236
10237 KAUTH_DEBUG("%p AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)",
10238 vp, vfs_context_proc(ctx)->p_comm,
10239 (action & KAUTH_VNODE_ACCESS) ? "access" : "auth",
10240 (action & KAUTH_VNODE_READ_DATA) ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
10241 (action & KAUTH_VNODE_WRITE_DATA) ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "",
10242 (action & KAUTH_VNODE_EXECUTE) ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "",
10243 (action & KAUTH_VNODE_DELETE) ? " DELETE" : "",
10244 (action & KAUTH_VNODE_APPEND_DATA) ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
10245 (action & KAUTH_VNODE_DELETE_CHILD) ? " DELETE_CHILD" : "",
10246 (action & KAUTH_VNODE_READ_ATTRIBUTES) ? " READ_ATTRIBUTES" : "",
10247 (action & KAUTH_VNODE_WRITE_ATTRIBUTES) ? " WRITE_ATTRIBUTES" : "",
10248 (action & KAUTH_VNODE_READ_EXTATTRIBUTES) ? " READ_EXTATTRIBUTES" : "",
10249 (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES) ? " WRITE_EXTATTRIBUTES" : "",
10250 (action & KAUTH_VNODE_READ_SECURITY) ? " READ_SECURITY" : "",
10251 (action & KAUTH_VNODE_WRITE_SECURITY) ? " WRITE_SECURITY" : "",
10252 (action & KAUTH_VNODE_CHANGE_OWNER) ? " CHANGE_OWNER" : "",
10253 (action & KAUTH_VNODE_NOIMMUTABLE) ? " (noimmutable)" : "",
10254 vnode_isdir(vp) ? "directory" : "file",
10255 vp->v_name ? vp->v_name : "<NULL>", action, vp, dvp);
10256
10257 /*
10258 * Extract the control bits from the action, everything else is
10259 * requested rights.
10260 */
10261 noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
10262 rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
10263
10264 if (rights & KAUTH_VNODE_DELETE) {
10265 #if DIAGNOSTIC
10266 if (dvp == NULL) {
10267 panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory");
10268 }
10269 #endif
10270 /*
10271 * check to see if we've already authorized the parent
10272 * directory for deletion of its children... if so, we
10273 * can skip a whole bunch of work... we will still have to
10274 * authorize that this specific child can be removed
10275 */
10276 if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE) {
10277 parent_authorized_for_delete_child = TRUE;
10278 }
10279 } else {
10280 vcp->dvp = NULLVP;
10281 vcp->dvap = NULL;
10282 }
10283
10284 /*
10285 * Check for read-only filesystems.
10286 */
10287 if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
10288 (vp->v_mount->mnt_flag & MNT_RDONLY) &&
10289 ((vp->v_type == VREG) || (vp->v_type == VDIR) ||
10290 (vp->v_type == VLNK) || (vp->v_type == VCPLX) ||
10291 (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) {
10292 result = EROFS;
10293 goto out;
10294 }
10295
10296 /*
10297 * Check for noexec filesystems.
10298 */
10299 if ((rights & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG) && (vp->v_mount->mnt_flag & MNT_NOEXEC)) {
10300 result = EACCES;
10301 goto out;
10302 }
10303
10304 /*
10305 * Handle cases related to filesystems with non-local enforcement.
10306 * This call can return 0, in which case we will fall through to perform a
10307 * check based on VNOP_GETATTR data. Otherwise it returns 1 and sets
10308 * an appropriate result, at which point we can return immediately.
10309 */
10310 if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx)) {
10311 goto out;
10312 }
10313
10314 /*
10315 * If the vnode is a namedstream (extended attribute) data vnode (eg.
10316 * a resource fork), *_DATA becomes *_EXTATTRIBUTES.
10317 */
10318 if (vnode_isnamedstream(vp)) {
10319 if (rights & KAUTH_VNODE_READ_DATA) {
10320 rights &= ~KAUTH_VNODE_READ_DATA;
10321 rights |= KAUTH_VNODE_READ_EXTATTRIBUTES;
10322 }
10323 if (rights & KAUTH_VNODE_WRITE_DATA) {
10324 rights &= ~KAUTH_VNODE_WRITE_DATA;
10325 rights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
10326 }
10327
10328 /*
10329 * Point 'vp' to the namedstream's parent for ACL checking
10330 */
10331 if ((vp->v_parent != NULL) &&
10332 (vget_internal(vp->v_parent, 0, VNODE_NODEAD | VNODE_DRAINO) == 0)) {
10333 parent_ref = TRUE;
10334 vcp->vp = vp = vp->v_parent;
10335 }
10336 }
10337
10338 if (vfs_context_issuser(ctx)) {
10339 /*
10340 * if we're not asking for execute permissions or modifications,
10341 * then we're done, this action is authorized.
10342 */
10343 if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) {
10344 goto success;
10345 }
10346
10347 is_suser = TRUE;
10348 }
10349
10350 /*
10351 * Get vnode attributes and extended security information for the vnode
10352 * and directory if required.
10353 *
10354 * If we're root we only want mode bits and flags for checking
10355 * execute and immutability.
10356 */
10357 VATTR_WANTED(&va, va_mode);
10358 VATTR_WANTED(&va, va_flags);
10359 if (!is_suser) {
10360 VATTR_WANTED(&va, va_uid);
10361 VATTR_WANTED(&va, va_gid);
10362 VATTR_WANTED(&va, va_acl);
10363 }
10364 if ((result = vnode_getattr(vp, &va, ctx)) != 0) {
10365 KAUTH_DEBUG("%p ERROR - failed to get vnode attributes - %d", vp, result);
10366 goto out;
10367 }
10368 VATTR_WANTED(&va, va_type);
10369 VATTR_RETURN(&va, va_type, vnode_vtype(vp));
10370
10371 if (vcp->dvp) {
10372 VATTR_WANTED(&dva, va_mode);
10373 VATTR_WANTED(&dva, va_flags);
10374 if (!is_suser) {
10375 VATTR_WANTED(&dva, va_uid);
10376 VATTR_WANTED(&dva, va_gid);
10377 VATTR_WANTED(&dva, va_acl);
10378 }
10379 if ((result = vnode_getattr(vcp->dvp, &dva, ctx)) != 0) {
10380 KAUTH_DEBUG("%p ERROR - failed to get directory vnode attributes - %d", vp, result);
10381 goto out;
10382 }
10383 VATTR_WANTED(&dva, va_type);
10384 VATTR_RETURN(&dva, va_type, vnode_vtype(vcp->dvp));
10385 }
10386
10387 result = vnode_attr_authorize_internal(vcp, vp->v_mount, rights, is_suser,
10388 &found_deny, noimmutable, parent_authorized_for_delete_child);
10389 out:
10390 if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) {
10391 kauth_acl_free(va.va_acl);
10392 }
10393 if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL)) {
10394 kauth_acl_free(dva.va_acl);
10395 }
10396
10397 if (result) {
10398 if (parent_ref) {
10399 vnode_put(vp);
10400 }
10401 *errorp = result;
10402 KAUTH_DEBUG("%p DENIED - auth denied", vp);
10403 return KAUTH_RESULT_DENY;
10404 }
10405 if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) {
10406 /*
10407 * if we were successfully granted the right to search this directory
10408 * and there were NO ACL DENYs for search and the posix permissions also don't
10409 * deny execute, we can synthesize a global right that allows anyone to
10410 * traverse this directory during a pathname lookup without having to
10411 * match the credential associated with this cache of rights.
10412 *
10413 * Note that we can correctly cache KAUTH_VNODE_SEARCHBYANYONE
10414 * only if we actually check ACLs which we don't for root. As
10415 * a workaround, the lookup fast path checks for root.
10416 */
10417 if (!VATTR_IS_SUPPORTED(&va, va_mode) ||
10418 ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) ==
10419 (S_IXUSR | S_IXGRP | S_IXOTH))) {
10420 vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE);
10421 }
10422 }
10423 success:
10424 if (parent_ref) {
10425 vnode_put(vp);
10426 }
10427
10428 /*
10429 * Note that this implies that we will allow requests for no rights, as well as
10430 * for rights that we do not recognise. There should be none of these.
10431 */
10432 KAUTH_DEBUG("%p ALLOWED - auth granted", vp);
10433 return KAUTH_RESULT_ALLOW;
10434 }
10435
10436 int
vnode_attr_authorize_init(struct vnode_attr * vap,struct vnode_attr * dvap,kauth_action_t action,vfs_context_t ctx)10437 vnode_attr_authorize_init(struct vnode_attr *vap, struct vnode_attr *dvap,
10438 kauth_action_t action, vfs_context_t ctx)
10439 {
10440 VATTR_INIT(vap);
10441 VATTR_WANTED(vap, va_type);
10442 VATTR_WANTED(vap, va_mode);
10443 VATTR_WANTED(vap, va_flags);
10444 if (dvap) {
10445 VATTR_INIT(dvap);
10446 if (action & KAUTH_VNODE_DELETE) {
10447 VATTR_WANTED(dvap, va_type);
10448 VATTR_WANTED(dvap, va_mode);
10449 VATTR_WANTED(dvap, va_flags);
10450 }
10451 } else if (action & KAUTH_VNODE_DELETE) {
10452 return EINVAL;
10453 }
10454
10455 if (!vfs_context_issuser(ctx)) {
10456 VATTR_WANTED(vap, va_uid);
10457 VATTR_WANTED(vap, va_gid);
10458 VATTR_WANTED(vap, va_acl);
10459 if (dvap && (action & KAUTH_VNODE_DELETE)) {
10460 VATTR_WANTED(dvap, va_uid);
10461 VATTR_WANTED(dvap, va_gid);
10462 VATTR_WANTED(dvap, va_acl);
10463 }
10464 }
10465
10466 return 0;
10467 }
10468
10469 #define VNODE_SEC_ATTRS_NO_ACL (VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | VNODE_ATTR_va_mode | VNODE_ATTR_va_flags | VNODE_ATTR_va_type)
10470
10471 int
vnode_attr_authorize(struct vnode_attr * vap,struct vnode_attr * dvap,mount_t mp,kauth_action_t action,vfs_context_t ctx)10472 vnode_attr_authorize(struct vnode_attr *vap, struct vnode_attr *dvap, mount_t mp,
10473 kauth_action_t action, vfs_context_t ctx)
10474 {
10475 struct _vnode_authorize_context auth_context;
10476 vauth_ctx vcp;
10477 kauth_ace_rights_t rights;
10478 int noimmutable;
10479 boolean_t found_deny;
10480 boolean_t is_suser = FALSE;
10481 int result = 0;
10482 uid_t ouid = vap->va_uid;
10483 gid_t ogid = vap->va_gid;
10484
10485 vcp = &auth_context;
10486 vcp->ctx = ctx;
10487 vcp->vp = NULLVP;
10488 vcp->vap = vap;
10489 vcp->dvp = NULLVP;
10490 vcp->dvap = dvap;
10491 vcp->flags = vcp->flags_valid = 0;
10492
10493 noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
10494 rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
10495
10496 /*
10497 * Check for read-only filesystems.
10498 */
10499 if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
10500 mp && (mp->mnt_flag & MNT_RDONLY) &&
10501 ((vap->va_type == VREG) || (vap->va_type == VDIR) ||
10502 (vap->va_type == VLNK) || (rights & KAUTH_VNODE_DELETE) ||
10503 (rights & KAUTH_VNODE_DELETE_CHILD))) {
10504 result = EROFS;
10505 goto out;
10506 }
10507
10508 /*
10509 * Check for noexec filesystems.
10510 */
10511 if ((rights & KAUTH_VNODE_EXECUTE) &&
10512 (vap->va_type == VREG) && mp && (mp->mnt_flag & MNT_NOEXEC)) {
10513 result = EACCES;
10514 goto out;
10515 }
10516
10517 if (vfs_context_issuser(ctx)) {
10518 /*
10519 * if we're not asking for execute permissions or modifications,
10520 * then we're done, this action is authorized.
10521 */
10522 if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) {
10523 goto out;
10524 }
10525 is_suser = TRUE;
10526 }
10527
10528 if (mp) {
10529 if (vfs_extendedsecurity(mp) && VATTR_IS_ACTIVE(vap, va_acl) && !VATTR_IS_SUPPORTED(vap, va_acl)) {
10530 panic("(1) vnode attrs not complete for vnode_attr_authorize");
10531 }
10532 vnode_attr_handle_uid_and_gid(vap, mp, ctx);
10533 }
10534
10535 if ((vap->va_active & VNODE_SEC_ATTRS_NO_ACL) != (vap->va_supported & VNODE_SEC_ATTRS_NO_ACL)) {
10536 panic("(2) vnode attrs not complete for vnode_attr_authorize (2) vap->va_active = 0x%llx , vap->va_supported = 0x%llx",
10537 vap->va_active, vap->va_supported);
10538 }
10539
10540 result = vnode_attr_authorize_internal(vcp, mp, rights, is_suser,
10541 &found_deny, noimmutable, FALSE);
10542
10543 if (mp) {
10544 vap->va_uid = ouid;
10545 vap->va_gid = ogid;
10546 }
10547
10548 if (result == EPERM) {
10549 result = EACCES;
10550 }
10551 out:
10552 return result;
10553 }
10554
10555
10556 int
vnode_authattr_new(vnode_t dvp,struct vnode_attr * vap,int noauth,vfs_context_t ctx)10557 vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx)
10558 {
10559 return vnode_authattr_new_internal(dvp, vap, noauth, NULL, ctx);
10560 }
10561
10562 /*
10563 * Check that the attribute information in vattr can be legally applied to
10564 * a new file by the context.
10565 */
10566 static int
vnode_authattr_new_internal(vnode_t dvp,struct vnode_attr * vap,int noauth,uint32_t * defaulted_fieldsp,vfs_context_t ctx)10567 vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx)
10568 {
10569 int error;
10570 int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode;
10571 uint32_t inherit_flags;
10572 kauth_cred_t cred;
10573 guid_t changer;
10574 mount_t dmp;
10575 struct vnode_attr dva;
10576
10577 error = 0;
10578
10579 if (defaulted_fieldsp) {
10580 *defaulted_fieldsp = 0;
10581 }
10582
10583 defaulted_owner = defaulted_group = defaulted_mode = 0;
10584
10585 inherit_flags = 0;
10586
10587 /*
10588 * Require that the filesystem support extended security to apply any.
10589 */
10590 if (!vfs_extendedsecurity(dvp->v_mount) &&
10591 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
10592 error = EINVAL;
10593 goto out;
10594 }
10595
10596 /*
10597 * Default some fields.
10598 */
10599 dmp = dvp->v_mount;
10600
10601 /*
10602 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that
10603 * owner takes ownership of all new files.
10604 */
10605 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsowner != KAUTH_UID_NONE)) {
10606 VATTR_SET(vap, va_uid, dmp->mnt_fsowner);
10607 defaulted_owner = 1;
10608 } else {
10609 if (!VATTR_IS_ACTIVE(vap, va_uid)) {
10610 /* default owner is current user */
10611 VATTR_SET(vap, va_uid, kauth_cred_getuid(vfs_context_ucred(ctx)));
10612 defaulted_owner = 1;
10613 }
10614 }
10615
10616 /*
10617 * We need the dvp's va_flags and *may* need the gid of the directory,
10618 * we ask for both here.
10619 */
10620 VATTR_INIT(&dva);
10621 VATTR_WANTED(&dva, va_gid);
10622 VATTR_WANTED(&dva, va_flags);
10623 if ((error = vnode_getattr(dvp, &dva, ctx)) != 0) {
10624 goto out;
10625 }
10626
10627 /*
10628 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that
10629 * group takes ownership of all new files.
10630 */
10631 if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsgroup != KAUTH_GID_NONE)) {
10632 VATTR_SET(vap, va_gid, dmp->mnt_fsgroup);
10633 defaulted_group = 1;
10634 } else {
10635 if (!VATTR_IS_ACTIVE(vap, va_gid)) {
10636 /* default group comes from parent object, fallback to current user */
10637 if (VATTR_IS_SUPPORTED(&dva, va_gid)) {
10638 VATTR_SET(vap, va_gid, dva.va_gid);
10639 } else {
10640 VATTR_SET(vap, va_gid, kauth_cred_getgid(vfs_context_ucred(ctx)));
10641 }
10642 defaulted_group = 1;
10643 }
10644 }
10645
10646 if (!VATTR_IS_ACTIVE(vap, va_flags)) {
10647 VATTR_SET(vap, va_flags, 0);
10648 }
10649
10650 /* Determine if SF_RESTRICTED should be inherited from the parent
10651 * directory. */
10652 if (VATTR_IS_SUPPORTED(&dva, va_flags)) {
10653 inherit_flags = dva.va_flags & (UF_DATAVAULT | SF_RESTRICTED);
10654 }
10655
10656 /* default mode is everything, masked with current umask */
10657 if (!VATTR_IS_ACTIVE(vap, va_mode)) {
10658 VATTR_SET(vap, va_mode, ACCESSPERMS & ~vfs_context_proc(ctx)->p_fd.fd_cmask);
10659 KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o",
10660 vap->va_mode, vfs_context_proc(ctx)->p_fd.fd_cmask);
10661 defaulted_mode = 1;
10662 }
10663 /* set timestamps to now */
10664 if (!VATTR_IS_ACTIVE(vap, va_create_time)) {
10665 nanotime(&vap->va_create_time);
10666 VATTR_SET_ACTIVE(vap, va_create_time);
10667 }
10668
10669 /*
10670 * Check for attempts to set nonsensical fields.
10671 */
10672 if (vap->va_active & ~VNODE_ATTR_NEWOBJ) {
10673 error = EINVAL;
10674 KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx",
10675 vap->va_active & ~VNODE_ATTR_NEWOBJ);
10676 goto out;
10677 }
10678
10679 /*
10680 * Quickly check for the applicability of any enforcement here.
10681 * Tests below maintain the integrity of the local security model.
10682 */
10683 if (vfs_authopaque(dvp->v_mount)) {
10684 goto out;
10685 }
10686
10687 /*
10688 * We need to know if the caller is the superuser, or if the work is
10689 * otherwise already authorised.
10690 */
10691 cred = vfs_context_ucred(ctx);
10692 if (noauth) {
10693 /* doing work for the kernel */
10694 has_priv_suser = 1;
10695 } else {
10696 has_priv_suser = vfs_context_issuser(ctx);
10697 }
10698
10699
10700 if (VATTR_IS_ACTIVE(vap, va_flags)) {
10701 vap->va_flags &= ~SF_SYNTHETIC;
10702 if (has_priv_suser) {
10703 if ((vap->va_flags & (UF_SETTABLE | SF_SETTABLE)) != vap->va_flags) {
10704 error = EPERM;
10705 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
10706 goto out;
10707 }
10708 } else {
10709 if ((vap->va_flags & UF_SETTABLE) != vap->va_flags) {
10710 error = EPERM;
10711 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
10712 goto out;
10713 }
10714 }
10715 }
10716
10717 /* if not superuser, validate legality of new-item attributes */
10718 if (!has_priv_suser) {
10719 if (!defaulted_mode && VATTR_IS_ACTIVE(vap, va_mode)) {
10720 /* setgid? */
10721 if (vap->va_mode & S_ISGID) {
10722 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
10723 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
10724 goto out;
10725 }
10726 if (!ismember) {
10727 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", vap->va_gid);
10728 error = EPERM;
10729 goto out;
10730 }
10731 }
10732
10733 /* setuid? */
10734 if ((vap->va_mode & S_ISUID) && (vap->va_uid != kauth_cred_getuid(cred))) {
10735 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
10736 error = EPERM;
10737 goto out;
10738 }
10739 }
10740 if (!defaulted_owner && (vap->va_uid != kauth_cred_getuid(cred))) {
10741 KAUTH_DEBUG(" DENIED - cannot create new item owned by %d", vap->va_uid);
10742 error = EPERM;
10743 goto out;
10744 }
10745 if (!defaulted_group) {
10746 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
10747 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
10748 goto out;
10749 }
10750 if (!ismember) {
10751 KAUTH_DEBUG(" DENIED - cannot create new item with group %d - not a member", vap->va_gid);
10752 error = EPERM;
10753 goto out;
10754 }
10755 }
10756
10757 /* initialising owner/group UUID */
10758 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
10759 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
10760 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
10761 /* XXX ENOENT here - no GUID - should perhaps become EPERM */
10762 goto out;
10763 }
10764 if (!kauth_guid_equal(&vap->va_uuuid, &changer)) {
10765 KAUTH_DEBUG(" ERROR - cannot create item with supplied owner UUID - not us");
10766 error = EPERM;
10767 goto out;
10768 }
10769 }
10770 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
10771 if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
10772 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
10773 goto out;
10774 }
10775 if (!ismember) {
10776 KAUTH_DEBUG(" ERROR - cannot create item with supplied group UUID - not a member");
10777 error = EPERM;
10778 goto out;
10779 }
10780 }
10781 }
10782 out:
10783 if (inherit_flags) {
10784 /* Apply SF_RESTRICTED to the file if its parent directory was
10785 * restricted. This is done at the end so that root is not
10786 * required if this flag is only set due to inheritance. */
10787 VATTR_SET(vap, va_flags, (vap->va_flags | inherit_flags));
10788 }
10789 if (defaulted_fieldsp) {
10790 if (defaulted_mode) {
10791 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_MODE;
10792 }
10793 if (defaulted_group) {
10794 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_GID;
10795 }
10796 if (defaulted_owner) {
10797 *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_UID;
10798 }
10799 }
10800 return error;
10801 }
10802
10803 /*
10804 * Check that the attribute information in vap can be legally written by the
10805 * context.
10806 *
10807 * Call this when you're not sure about the vnode_attr; either its contents
10808 * have come from an unknown source, or when they are variable.
10809 *
10810 * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that
10811 * must be authorized to be permitted to write the vattr.
10812 */
10813 int
vnode_authattr(vnode_t vp,struct vnode_attr * vap,kauth_action_t * actionp,vfs_context_t ctx)10814 vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx)
10815 {
10816 struct vnode_attr ova;
10817 kauth_action_t required_action;
10818 int error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid;
10819 guid_t changer;
10820 gid_t group;
10821 uid_t owner;
10822 mode_t newmode;
10823 kauth_cred_t cred;
10824 uint32_t fdelta;
10825
10826 VATTR_INIT(&ova);
10827 required_action = 0;
10828 error = 0;
10829
10830 /*
10831 * Quickly check for enforcement applicability.
10832 */
10833 if (vfs_authopaque(vp->v_mount)) {
10834 goto out;
10835 }
10836
10837 /*
10838 * Check for attempts to set nonsensical fields.
10839 */
10840 if (vap->va_active & VNODE_ATTR_RDONLY) {
10841 KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)");
10842 error = EINVAL;
10843 goto out;
10844 }
10845
10846 /*
10847 * We need to know if the caller is the superuser.
10848 */
10849 cred = vfs_context_ucred(ctx);
10850 has_priv_suser = kauth_cred_issuser(cred);
10851
10852 /*
10853 * If any of the following are changing, we need information from the old file:
10854 * va_uid
10855 * va_gid
10856 * va_mode
10857 * va_uuuid
10858 * va_guuid
10859 */
10860 if (VATTR_IS_ACTIVE(vap, va_uid) ||
10861 VATTR_IS_ACTIVE(vap, va_gid) ||
10862 VATTR_IS_ACTIVE(vap, va_mode) ||
10863 VATTR_IS_ACTIVE(vap, va_uuuid) ||
10864 VATTR_IS_ACTIVE(vap, va_guuid)) {
10865 VATTR_WANTED(&ova, va_mode);
10866 VATTR_WANTED(&ova, va_uid);
10867 VATTR_WANTED(&ova, va_gid);
10868 VATTR_WANTED(&ova, va_uuuid);
10869 VATTR_WANTED(&ova, va_guuid);
10870 KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes");
10871 }
10872
10873 /*
10874 * If timestamps are being changed, we need to know who the file is owned
10875 * by.
10876 */
10877 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
10878 VATTR_IS_ACTIVE(vap, va_change_time) ||
10879 VATTR_IS_ACTIVE(vap, va_modify_time) ||
10880 VATTR_IS_ACTIVE(vap, va_access_time) ||
10881 VATTR_IS_ACTIVE(vap, va_backup_time) ||
10882 VATTR_IS_ACTIVE(vap, va_addedtime)) {
10883 VATTR_WANTED(&ova, va_uid);
10884 #if 0 /* enable this when we support UUIDs as official owners */
10885 VATTR_WANTED(&ova, va_uuuid);
10886 #endif
10887 KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID");
10888 }
10889
10890 /*
10891 * If flags are being changed, we need the old flags.
10892 */
10893 if (VATTR_IS_ACTIVE(vap, va_flags)) {
10894 KAUTH_DEBUG("ATTR - flags changing, fetching old flags");
10895 VATTR_WANTED(&ova, va_flags);
10896 }
10897
10898 /*
10899 * If ACLs are being changed, we need the old ACLs.
10900 */
10901 if (VATTR_IS_ACTIVE(vap, va_acl)) {
10902 KAUTH_DEBUG("ATTR - acl changing, fetching old flags");
10903 VATTR_WANTED(&ova, va_acl);
10904 }
10905
10906 /*
10907 * If the size is being set, make sure it's not a directory.
10908 */
10909 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
10910 /* size is only meaningful on regular files, don't permit otherwise */
10911 if (!vnode_isreg(vp)) {
10912 KAUTH_DEBUG("ATTR - ERROR: size change requested on non-file");
10913 error = vnode_isdir(vp) ? EISDIR : EINVAL;
10914 goto out;
10915 }
10916 }
10917
10918 /*
10919 * Get old data.
10920 */
10921 KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova.va_active);
10922 if ((error = vnode_getattr(vp, &ova, ctx)) != 0) {
10923 KAUTH_DEBUG(" ERROR - got %d trying to get attributes", error);
10924 goto out;
10925 }
10926
10927 /*
10928 * Size changes require write access to the file data.
10929 */
10930 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
10931 /* if we can't get the size, or it's different, we need write access */
10932 KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA");
10933 required_action |= KAUTH_VNODE_WRITE_DATA;
10934 }
10935
10936 /*
10937 * Changing timestamps?
10938 *
10939 * Note that we are only called to authorize user-requested time changes;
10940 * side-effect time changes are not authorized. Authorisation is only
10941 * required for existing files.
10942 *
10943 * Non-owners are not permitted to change the time on an existing
10944 * file to anything other than the current time.
10945 */
10946 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
10947 VATTR_IS_ACTIVE(vap, va_change_time) ||
10948 VATTR_IS_ACTIVE(vap, va_modify_time) ||
10949 VATTR_IS_ACTIVE(vap, va_access_time) ||
10950 VATTR_IS_ACTIVE(vap, va_backup_time) ||
10951 VATTR_IS_ACTIVE(vap, va_addedtime)) {
10952 /*
10953 * The owner and root may set any timestamps they like,
10954 * provided that the file is not immutable. The owner still needs
10955 * WRITE_ATTRIBUTES (implied by ownership but still deniable).
10956 */
10957 if (has_priv_suser || vauth_node_owner(&ova, cred)) {
10958 KAUTH_DEBUG("ATTR - root or owner changing timestamps");
10959 required_action |= KAUTH_VNODE_CHECKIMMUTABLE | KAUTH_VNODE_WRITE_ATTRIBUTES;
10960 } else {
10961 /* just setting the current time? */
10962 if (vap->va_vaflags & VA_UTIMES_NULL) {
10963 KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES");
10964 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
10965 } else {
10966 KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted");
10967 error = EACCES;
10968 goto out;
10969 }
10970 }
10971 }
10972
10973 /*
10974 * Changing file mode?
10975 */
10976 if (VATTR_IS_ACTIVE(vap, va_mode) && VATTR_IS_SUPPORTED(&ova, va_mode) && (ova.va_mode != vap->va_mode)) {
10977 KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova.va_mode, vap->va_mode);
10978
10979 /*
10980 * Mode changes always have the same basic auth requirements.
10981 */
10982 if (has_priv_suser) {
10983 KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check");
10984 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
10985 } else {
10986 /* need WRITE_SECURITY */
10987 KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY");
10988 required_action |= KAUTH_VNODE_WRITE_SECURITY;
10989 }
10990
10991 /*
10992 * Can't set the setgid bit if you're not in the group and not root. Have to have
10993 * existing group information in the case we're not setting it right now.
10994 */
10995 if (vap->va_mode & S_ISGID) {
10996 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
10997 if (!has_priv_suser) {
10998 if (VATTR_IS_ACTIVE(vap, va_gid)) {
10999 group = vap->va_gid;
11000 } else if (VATTR_IS_SUPPORTED(&ova, va_gid)) {
11001 group = ova.va_gid;
11002 } else {
11003 KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available");
11004 error = EINVAL;
11005 goto out;
11006 }
11007 /*
11008 * This might be too restrictive; WRITE_SECURITY might be implied by
11009 * membership in this case, rather than being an additional requirement.
11010 */
11011 if ((error = kauth_cred_ismember_gid(cred, group, &ismember)) != 0) {
11012 KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
11013 goto out;
11014 }
11015 if (!ismember) {
11016 KAUTH_DEBUG(" DENIED - can't set SGID bit, not a member of %d", group);
11017 error = EPERM;
11018 goto out;
11019 }
11020 }
11021 }
11022
11023 /*
11024 * Can't set the setuid bit unless you're root or the file's owner.
11025 */
11026 if (vap->va_mode & S_ISUID) {
11027 required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */
11028 if (!has_priv_suser) {
11029 if (VATTR_IS_ACTIVE(vap, va_uid)) {
11030 owner = vap->va_uid;
11031 } else if (VATTR_IS_SUPPORTED(&ova, va_uid)) {
11032 owner = ova.va_uid;
11033 } else {
11034 KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available");
11035 error = EINVAL;
11036 goto out;
11037 }
11038 if (owner != kauth_cred_getuid(cred)) {
11039 /*
11040 * We could allow this if WRITE_SECURITY is permitted, perhaps.
11041 */
11042 KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
11043 error = EPERM;
11044 goto out;
11045 }
11046 }
11047 }
11048 }
11049
11050 /*
11051 * Validate/mask flags changes. This checks that only the flags in
11052 * the UF_SETTABLE mask are being set, and preserves the flags in
11053 * the SF_SETTABLE case.
11054 *
11055 * Since flags changes may be made in conjunction with other changes,
11056 * we will ask the auth code to ignore immutability in the case that
11057 * the SF_* flags are not set and we are only manipulating the file flags.
11058 *
11059 */
11060 if (VATTR_IS_ACTIVE(vap, va_flags)) {
11061 /* compute changing flags bits */
11062 vap->va_flags &= ~SF_SYNTHETIC;
11063 ova.va_flags &= ~SF_SYNTHETIC;
11064 if (VATTR_IS_SUPPORTED(&ova, va_flags)) {
11065 fdelta = vap->va_flags ^ ova.va_flags;
11066 } else {
11067 fdelta = vap->va_flags;
11068 }
11069
11070 if (fdelta != 0) {
11071 KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY");
11072 required_action |= KAUTH_VNODE_WRITE_SECURITY;
11073
11074 /* check that changing bits are legal */
11075 if (has_priv_suser) {
11076 /*
11077 * The immutability check will prevent us from clearing the SF_*
11078 * flags unless the system securelevel permits it, so just check
11079 * for legal flags here.
11080 */
11081 if (fdelta & ~(UF_SETTABLE | SF_SETTABLE)) {
11082 error = EPERM;
11083 KAUTH_DEBUG(" DENIED - superuser attempt to set illegal flag(s)");
11084 goto out;
11085 }
11086 } else {
11087 if (fdelta & ~UF_SETTABLE) {
11088 error = EPERM;
11089 KAUTH_DEBUG(" DENIED - user attempt to set illegal flag(s)");
11090 goto out;
11091 }
11092 }
11093 /*
11094 * If the caller has the ability to manipulate file flags,
11095 * security is not reduced by ignoring them for this operation.
11096 *
11097 * A more complete test here would consider the 'after' states of the flags
11098 * to determine whether it would permit the operation, but this becomes
11099 * very complex.
11100 *
11101 * Ignoring immutability is conditional on securelevel; this does not bypass
11102 * the SF_* flags if securelevel > 0.
11103 */
11104 required_action |= KAUTH_VNODE_NOIMMUTABLE;
11105 }
11106 }
11107
11108 /*
11109 * Validate ownership information.
11110 */
11111 chowner = 0;
11112 chgroup = 0;
11113 clear_suid = 0;
11114 clear_sgid = 0;
11115
11116 /*
11117 * uid changing
11118 * Note that if the filesystem didn't give us a UID, we expect that it doesn't
11119 * support them in general, and will ignore it if/when we try to set it.
11120 * We might want to clear the uid out of vap completely here.
11121 */
11122 if (VATTR_IS_ACTIVE(vap, va_uid)) {
11123 if (VATTR_IS_SUPPORTED(&ova, va_uid) && (vap->va_uid != ova.va_uid)) {
11124 if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) {
11125 KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party");
11126 error = EPERM;
11127 goto out;
11128 }
11129 chowner = 1;
11130 }
11131 clear_suid = 1;
11132 }
11133
11134 /*
11135 * gid changing
11136 * Note that if the filesystem didn't give us a GID, we expect that it doesn't
11137 * support them in general, and will ignore it if/when we try to set it.
11138 * We might want to clear the gid out of vap completely here.
11139 */
11140 if (VATTR_IS_ACTIVE(vap, va_gid)) {
11141 if (VATTR_IS_SUPPORTED(&ova, va_gid) && (vap->va_gid != ova.va_gid)) {
11142 if (!has_priv_suser) {
11143 if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
11144 KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid);
11145 goto out;
11146 }
11147 if (!ismember) {
11148 KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group",
11149 ova.va_gid, vap->va_gid);
11150 error = EPERM;
11151 goto out;
11152 }
11153 }
11154 chgroup = 1;
11155 }
11156 clear_sgid = 1;
11157 }
11158
11159 /*
11160 * Owner UUID being set or changed.
11161 */
11162 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
11163 /* if the owner UUID is not actually changing ... */
11164 if (VATTR_IS_SUPPORTED(&ova, va_uuuid)) {
11165 if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid)) {
11166 goto no_uuuid_change;
11167 }
11168
11169 /*
11170 * If the current owner UUID is a null GUID, check
11171 * it against the UUID corresponding to the owner UID.
11172 */
11173 if (kauth_guid_equal(&ova.va_uuuid, &kauth_null_guid) &&
11174 VATTR_IS_SUPPORTED(&ova, va_uid)) {
11175 guid_t uid_guid;
11176
11177 if (kauth_cred_uid2guid(ova.va_uid, &uid_guid) == 0 &&
11178 kauth_guid_equal(&vap->va_uuuid, &uid_guid)) {
11179 goto no_uuuid_change;
11180 }
11181 }
11182 }
11183
11184 /*
11185 * The owner UUID cannot be set by a non-superuser to anything other than
11186 * their own or a null GUID (to "unset" the owner UUID).
11187 * Note that file systems must be prepared to handle the
11188 * null UUID case in a manner appropriate for that file
11189 * system.
11190 */
11191 if (!has_priv_suser) {
11192 if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
11193 KAUTH_DEBUG(" ERROR - got %d trying to get caller UUID", error);
11194 /* XXX ENOENT here - no UUID - should perhaps become EPERM */
11195 goto out;
11196 }
11197 if (!kauth_guid_equal(&vap->va_uuuid, &changer) &&
11198 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
11199 KAUTH_DEBUG(" ERROR - cannot set supplied owner UUID - not us / null");
11200 error = EPERM;
11201 goto out;
11202 }
11203 }
11204 chowner = 1;
11205 clear_suid = 1;
11206 }
11207 no_uuuid_change:
11208 /*
11209 * Group UUID being set or changed.
11210 */
11211 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
11212 /* if the group UUID is not actually changing ... */
11213 if (VATTR_IS_SUPPORTED(&ova, va_guuid)) {
11214 if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid)) {
11215 goto no_guuid_change;
11216 }
11217
11218 /*
11219 * If the current group UUID is a null UUID, check
11220 * it against the UUID corresponding to the group GID.
11221 */
11222 if (kauth_guid_equal(&ova.va_guuid, &kauth_null_guid) &&
11223 VATTR_IS_SUPPORTED(&ova, va_gid)) {
11224 guid_t gid_guid;
11225
11226 if (kauth_cred_gid2guid(ova.va_gid, &gid_guid) == 0 &&
11227 kauth_guid_equal(&vap->va_guuid, &gid_guid)) {
11228 goto no_guuid_change;
11229 }
11230 }
11231 }
11232
11233 /*
11234 * The group UUID cannot be set by a non-superuser to anything other than
11235 * one of which they are a member or a null GUID (to "unset"
11236 * the group UUID).
11237 * Note that file systems must be prepared to handle the
11238 * null UUID case in a manner appropriate for that file
11239 * system.
11240 */
11241 if (!has_priv_suser) {
11242 if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
11243 ismember = 1;
11244 } else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
11245 KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error);
11246 goto out;
11247 }
11248 if (!ismember) {
11249 KAUTH_DEBUG(" ERROR - cannot set supplied group UUID - not a member / null");
11250 error = EPERM;
11251 goto out;
11252 }
11253 }
11254 chgroup = 1;
11255 }
11256 no_guuid_change:
11257
11258 /*
11259 * Compute authorisation for group/ownership changes.
11260 */
11261 if (chowner || chgroup || clear_suid || clear_sgid) {
11262 if (has_priv_suser) {
11263 KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check");
11264 required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
11265 } else {
11266 if (chowner) {
11267 KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP");
11268 required_action |= KAUTH_VNODE_TAKE_OWNERSHIP;
11269 }
11270 if (chgroup && !chowner) {
11271 KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY");
11272 required_action |= KAUTH_VNODE_WRITE_SECURITY;
11273 }
11274 }
11275
11276 /*
11277 * clear set-uid and set-gid bits. POSIX only requires this for
11278 * non-privileged processes but we do it even for root.
11279 */
11280 if (VATTR_IS_ACTIVE(vap, va_mode)) {
11281 newmode = vap->va_mode;
11282 } else if (VATTR_IS_SUPPORTED(&ova, va_mode)) {
11283 newmode = ova.va_mode;
11284 } else {
11285 KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits");
11286 newmode = 0;
11287 }
11288
11289 /* chown always clears setuid/gid bits. An exception is made for
11290 * setattrlist which can set both at the same time: <uid, gid, mode> on a file:
11291 * setattrlist is allowed to set the new mode on the file and change (chown)
11292 * uid/gid.
11293 */
11294 if (newmode & (S_ISUID | S_ISGID)) {
11295 if (!VATTR_IS_ACTIVE(vap, va_mode)) {
11296 KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o",
11297 newmode, newmode & ~(S_ISUID | S_ISGID));
11298 newmode &= ~(S_ISUID | S_ISGID);
11299 }
11300 VATTR_SET(vap, va_mode, newmode);
11301 }
11302 }
11303
11304 /*
11305 * Authorise changes in the ACL.
11306 */
11307 if (VATTR_IS_ACTIVE(vap, va_acl)) {
11308 /* no existing ACL */
11309 if (!VATTR_IS_ACTIVE(&ova, va_acl) || (ova.va_acl == NULL)) {
11310 /* adding an ACL */
11311 if (vap->va_acl != NULL) {
11312 required_action |= KAUTH_VNODE_WRITE_SECURITY;
11313 KAUTH_DEBUG("CHMOD - adding ACL");
11314 }
11315
11316 /* removing an existing ACL */
11317 } else if (vap->va_acl == NULL) {
11318 required_action |= KAUTH_VNODE_WRITE_SECURITY;
11319 KAUTH_DEBUG("CHMOD - removing ACL");
11320
11321 /* updating an existing ACL */
11322 } else {
11323 if (vap->va_acl->acl_entrycount != ova.va_acl->acl_entrycount) {
11324 /* entry count changed, must be different */
11325 required_action |= KAUTH_VNODE_WRITE_SECURITY;
11326 KAUTH_DEBUG("CHMOD - adding/removing ACL entries");
11327 } else if (vap->va_acl->acl_entrycount > 0) {
11328 /* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */
11329 if (memcmp(&vap->va_acl->acl_ace[0], &ova.va_acl->acl_ace[0],
11330 sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) {
11331 required_action |= KAUTH_VNODE_WRITE_SECURITY;
11332 KAUTH_DEBUG("CHMOD - changing ACL entries");
11333 }
11334 }
11335 }
11336 }
11337
11338 /*
11339 * Other attributes that require authorisation.
11340 */
11341 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
11342 required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
11343 }
11344
11345 out:
11346 if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL)) {
11347 kauth_acl_free(ova.va_acl);
11348 }
11349 if (error == 0) {
11350 *actionp = required_action;
11351 }
11352 return error;
11353 }
11354
11355 static int
setlocklocal_callback(struct vnode * vp,__unused void * cargs)11356 setlocklocal_callback(struct vnode *vp, __unused void *cargs)
11357 {
11358 vnode_lock_spin(vp);
11359 vp->v_flag |= VLOCKLOCAL;
11360 vnode_unlock(vp);
11361
11362 return VNODE_RETURNED;
11363 }
11364
11365 void
vfs_setlocklocal(mount_t mp)11366 vfs_setlocklocal(mount_t mp)
11367 {
11368 mount_lock_spin(mp);
11369 mp->mnt_kern_flag |= MNTK_LOCK_LOCAL;
11370 mount_unlock(mp);
11371
11372 /*
11373 * The number of active vnodes is expected to be
11374 * very small when vfs_setlocklocal is invoked.
11375 */
11376 vnode_iterate(mp, 0, setlocklocal_callback, NULL);
11377 }
11378
11379 void
vfs_setcompoundopen(mount_t mp)11380 vfs_setcompoundopen(mount_t mp)
11381 {
11382 mount_lock_spin(mp);
11383 mp->mnt_compound_ops |= COMPOUND_VNOP_OPEN;
11384 mount_unlock(mp);
11385 }
11386
11387 void
vnode_setswapmount(vnode_t vp)11388 vnode_setswapmount(vnode_t vp)
11389 {
11390 mount_lock(vp->v_mount);
11391 vp->v_mount->mnt_kern_flag |= MNTK_SWAP_MOUNT;
11392 mount_unlock(vp->v_mount);
11393 }
11394
11395 void
vfs_setfskit(mount_t mp)11396 vfs_setfskit(mount_t mp)
11397 {
11398 mount_lock_spin(mp);
11399 mp->mnt_kern_flag |= MNTK_FSKIT;
11400 mount_unlock(mp);
11401 }
11402
11403 uint32_t
vfs_getextflags(mount_t mp)11404 vfs_getextflags(mount_t mp)
11405 {
11406 uint32_t flags_ext = 0;
11407
11408 if (mp->mnt_kern_flag & MNTK_SYSTEMDATA) {
11409 flags_ext |= MNT_EXT_ROOT_DATA_VOL;
11410 }
11411 if (mp->mnt_kern_flag & MNTK_FSKIT) {
11412 flags_ext |= MNT_EXT_FSKIT;
11413 }
11414 return flags_ext;
11415 }
11416
11417 char *
vfs_getfstypenameref_locked(mount_t mp,size_t * lenp)11418 vfs_getfstypenameref_locked(mount_t mp, size_t *lenp)
11419 {
11420 char *name;
11421
11422 if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
11423 name = mp->fstypename_override;
11424 } else {
11425 name = mp->mnt_vfsstat.f_fstypename;
11426 }
11427 if (lenp != NULL) {
11428 *lenp = strlen(name);
11429 }
11430 return name;
11431 }
11432
11433 void
vfs_getfstypename(mount_t mp,char * buf,size_t buflen)11434 vfs_getfstypename(mount_t mp, char *buf, size_t buflen)
11435 {
11436 mount_lock_spin(mp);
11437 strlcpy(buf, vfs_getfstypenameref_locked(mp, NULL), buflen);
11438 mount_unlock(mp);
11439 }
11440
11441 void
vfs_setfstypename_locked(mount_t mp,const char * name)11442 vfs_setfstypename_locked(mount_t mp, const char *name)
11443 {
11444 if (name == NULL || name[0] == '\0') {
11445 mp->mnt_kern_flag &= ~MNTK_TYPENAME_OVERRIDE;
11446 mp->fstypename_override[0] = '\0';
11447 } else {
11448 strlcpy(mp->fstypename_override, name,
11449 sizeof(mp->fstypename_override));
11450 mp->mnt_kern_flag |= MNTK_TYPENAME_OVERRIDE;
11451 }
11452 }
11453
11454 void
vfs_setfstypename(mount_t mp,const char * name)11455 vfs_setfstypename(mount_t mp, const char *name)
11456 {
11457 mount_lock_spin(mp);
11458 vfs_setfstypename_locked(mp, name);
11459 mount_unlock(mp);
11460 }
11461
11462 int64_t
vnode_getswappin_avail(vnode_t vp)11463 vnode_getswappin_avail(vnode_t vp)
11464 {
11465 int64_t max_swappin_avail = 0;
11466
11467 mount_lock(vp->v_mount);
11468 if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_SWAPPIN_SUPPORTED) {
11469 max_swappin_avail = vp->v_mount->mnt_max_swappin_available;
11470 }
11471 mount_unlock(vp->v_mount);
11472
11473 return max_swappin_avail;
11474 }
11475
11476
11477 void
vn_setunionwait(vnode_t vp)11478 vn_setunionwait(vnode_t vp)
11479 {
11480 vnode_lock_spin(vp);
11481 vp->v_flag |= VISUNION;
11482 vnode_unlock(vp);
11483 }
11484
11485
11486 void
vn_checkunionwait(vnode_t vp)11487 vn_checkunionwait(vnode_t vp)
11488 {
11489 vnode_lock_spin(vp);
11490 while ((vp->v_flag & VISUNION) == VISUNION) {
11491 msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0);
11492 }
11493 vnode_unlock(vp);
11494 }
11495
11496 void
vn_clearunionwait(vnode_t vp,int locked)11497 vn_clearunionwait(vnode_t vp, int locked)
11498 {
11499 if (!locked) {
11500 vnode_lock_spin(vp);
11501 }
11502 if ((vp->v_flag & VISUNION) == VISUNION) {
11503 vp->v_flag &= ~VISUNION;
11504 wakeup((caddr_t)&vp->v_flag);
11505 }
11506 if (!locked) {
11507 vnode_unlock(vp);
11508 }
11509 }
11510
11511 /*
11512 * Removes orphaned apple double files during a rmdir
11513 * Works by:
11514 * 1. vnode_suspend().
11515 * 2. Call VNOP_READDIR() till the end of directory is reached.
11516 * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY.
11517 * 4. Continue (2) and (3) till end of directory is reached.
11518 * 5. If all the entries in the directory were files with "._" name, delete all the files.
11519 * 6. vnode_resume()
11520 * 7. If deletion of all files succeeded, call VNOP_RMDIR() again.
11521 */
11522
11523 errno_t
rmdir_remove_orphaned_appleDouble(vnode_t vp,vfs_context_t ctx,int * restart_flag)11524 rmdir_remove_orphaned_appleDouble(vnode_t vp, vfs_context_t ctx, int * restart_flag)
11525 {
11526 #define UIO_BUFF_SIZE 2048
11527 uio_t auio = NULL;
11528 int eofflag, siz = UIO_BUFF_SIZE, alloc_size = 0, nentries = 0;
11529 int open_flag = 0, full_erase_flag = 0;
11530 UIO_STACKBUF(uio_buf, 1);
11531 char *rbuf = NULL;
11532 void *dir_pos;
11533 void *dir_end;
11534 struct dirent *dp;
11535 errno_t error;
11536
11537 error = vnode_suspend(vp);
11538
11539 /*
11540 * restart_flag is set so that the calling rmdir sleeps and resets
11541 */
11542 if (error == EBUSY) {
11543 *restart_flag = 1;
11544 }
11545 if (error != 0) {
11546 return error;
11547 }
11548
11549 /*
11550 * Prevent dataless fault materialization while we have
11551 * a suspended vnode.
11552 */
11553 uthread_t ut = current_uthread();
11554 bool saved_nodatalessfaults =
11555 (ut->uu_flag & UT_NSPACE_NODATALESSFAULTS) ? true : false;
11556 ut->uu_flag |= UT_NSPACE_NODATALESSFAULTS;
11557
11558 /*
11559 * set up UIO
11560 */
11561 rbuf = kalloc_data(siz, Z_WAITOK);
11562 alloc_size = siz;
11563 if (rbuf) {
11564 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
11565 &uio_buf[0], sizeof(uio_buf));
11566 }
11567 if (!rbuf || !auio) {
11568 error = ENOMEM;
11569 goto outsc;
11570 }
11571
11572 uio_setoffset(auio, 0);
11573
11574 eofflag = 0;
11575
11576 if ((error = VNOP_OPEN(vp, FREAD, ctx))) {
11577 goto outsc;
11578 } else {
11579 open_flag = 1;
11580 }
11581
11582 /*
11583 * First pass checks if all files are appleDouble files.
11584 */
11585
11586 do {
11587 siz = UIO_BUFF_SIZE;
11588 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
11589 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
11590
11591 if ((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx))) {
11592 goto outsc;
11593 }
11594
11595 if (uio_resid(auio) != 0) {
11596 siz -= uio_resid(auio);
11597 }
11598
11599 /*
11600 * Iterate through directory
11601 */
11602 dir_pos = (void*) rbuf;
11603 dir_end = (void*) (rbuf + siz);
11604 dp = (struct dirent*) (dir_pos);
11605
11606 if (dir_pos == dir_end) {
11607 eofflag = 1;
11608 }
11609
11610 while (dir_pos < dir_end) {
11611 /*
11612 * Check for . and .. as well as directories
11613 */
11614 if (dp->d_ino != 0 &&
11615 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
11616 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) {
11617 /*
11618 * Check for irregular files and ._ files
11619 * If there is a ._._ file abort the op
11620 */
11621 if (dp->d_namlen < 2 ||
11622 strncmp(dp->d_name, "._", 2) ||
11623 (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._", 2))) {
11624 error = ENOTEMPTY;
11625 goto outsc;
11626 }
11627 }
11628 dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen);
11629 dp = (struct dirent*)dir_pos;
11630 }
11631
11632 /*
11633 * workaround for HFS/NFS setting eofflag before end of file
11634 */
11635 if (vp->v_tag == VT_HFS && nentries > 2) {
11636 eofflag = 0;
11637 }
11638
11639 if (vp->v_tag == VT_NFS) {
11640 if (eofflag && !full_erase_flag) {
11641 full_erase_flag = 1;
11642 eofflag = 0;
11643 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
11644 } else if (!eofflag && full_erase_flag) {
11645 full_erase_flag = 0;
11646 }
11647 }
11648 } while (!eofflag);
11649 /*
11650 * If we've made it here all the files in the dir are ._ files.
11651 * We can delete the files even though the node is suspended
11652 * because we are the owner of the file.
11653 */
11654
11655 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
11656 eofflag = 0;
11657 full_erase_flag = 0;
11658
11659 do {
11660 siz = UIO_BUFF_SIZE;
11661 uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
11662 uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
11663
11664 error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx);
11665
11666 if (error != 0) {
11667 goto outsc;
11668 }
11669
11670 if (uio_resid(auio) != 0) {
11671 siz -= uio_resid(auio);
11672 }
11673
11674 /*
11675 * Iterate through directory
11676 */
11677 dir_pos = (void*) rbuf;
11678 dir_end = (void*) (rbuf + siz);
11679 dp = (struct dirent*) dir_pos;
11680
11681 if (dir_pos == dir_end) {
11682 eofflag = 1;
11683 }
11684
11685 while (dir_pos < dir_end) {
11686 /*
11687 * Check for . and .. as well as directories
11688 */
11689 if (dp->d_ino != 0 &&
11690 !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
11691 (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))
11692 ) {
11693 error = unlink1(ctx, vp,
11694 CAST_USER_ADDR_T(dp->d_name), UIO_SYSSPACE,
11695 VNODE_REMOVE_SKIP_NAMESPACE_EVENT |
11696 VNODE_REMOVE_NO_AUDIT_PATH);
11697
11698 if (error && error != ENOENT) {
11699 goto outsc;
11700 }
11701 }
11702 dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen);
11703 dp = (struct dirent*)dir_pos;
11704 }
11705
11706 /*
11707 * workaround for HFS/NFS setting eofflag before end of file
11708 */
11709 if (vp->v_tag == VT_HFS && nentries > 2) {
11710 eofflag = 0;
11711 }
11712
11713 if (vp->v_tag == VT_NFS) {
11714 if (eofflag && !full_erase_flag) {
11715 full_erase_flag = 1;
11716 eofflag = 0;
11717 uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
11718 } else if (!eofflag && full_erase_flag) {
11719 full_erase_flag = 0;
11720 }
11721 }
11722 } while (!eofflag);
11723
11724
11725 error = 0;
11726
11727 outsc:
11728 if (open_flag) {
11729 VNOP_CLOSE(vp, FREAD, ctx);
11730 }
11731
11732 if (auio) {
11733 uio_free(auio);
11734 }
11735 kfree_data(rbuf, alloc_size);
11736
11737 if (saved_nodatalessfaults == false) {
11738 ut->uu_flag &= ~UT_NSPACE_NODATALESSFAULTS;
11739 }
11740
11741 vnode_resume(vp);
11742
11743 return error;
11744 }
11745
11746
11747 void
lock_vnode_and_post(vnode_t vp,int kevent_num)11748 lock_vnode_and_post(vnode_t vp, int kevent_num)
11749 {
11750 /* Only take the lock if there's something there! */
11751 if (vp->v_knotes.slh_first != NULL) {
11752 vnode_lock(vp);
11753 KNOTE(&vp->v_knotes, kevent_num);
11754 vnode_unlock(vp);
11755 }
11756 }
11757
11758 void panic_print_vnodes(void);
11759
11760 /* define PANIC_PRINTS_VNODES only if investigation is required. */
11761 #ifdef PANIC_PRINTS_VNODES
11762
11763 static const char *
__vtype(uint16_t vtype)11764 __vtype(uint16_t vtype)
11765 {
11766 switch (vtype) {
11767 case VREG:
11768 return "R";
11769 case VDIR:
11770 return "D";
11771 case VBLK:
11772 return "B";
11773 case VCHR:
11774 return "C";
11775 case VLNK:
11776 return "L";
11777 case VSOCK:
11778 return "S";
11779 case VFIFO:
11780 return "F";
11781 case VBAD:
11782 return "x";
11783 case VSTR:
11784 return "T";
11785 case VCPLX:
11786 return "X";
11787 default:
11788 return "?";
11789 }
11790 }
11791
11792 /*
11793 * build a path from the bottom up
11794 * NOTE: called from the panic path - no alloc'ing of memory and no locks!
11795 */
11796 static char *
__vpath(vnode_t vp,char * str,int len,int depth)11797 __vpath(vnode_t vp, char *str, int len, int depth)
11798 {
11799 int vnm_len;
11800 const char *src;
11801 char *dst;
11802
11803 if (len <= 0) {
11804 return str;
11805 }
11806 /* str + len is the start of the string we created */
11807 if (!vp->v_name) {
11808 return str + len;
11809 }
11810
11811 /* follow mount vnodes to get the full path */
11812 if ((vp->v_flag & VROOT)) {
11813 if (vp->v_mount != NULL && vp->v_mount->mnt_vnodecovered) {
11814 return __vpath(vp->v_mount->mnt_vnodecovered,
11815 str, len, depth + 1);
11816 }
11817 return str + len;
11818 }
11819
11820 src = vp->v_name;
11821 vnm_len = strlen(src);
11822 if (vnm_len > len) {
11823 /* truncate the name to fit in the string */
11824 src += (vnm_len - len);
11825 vnm_len = len;
11826 }
11827
11828 /* start from the back and copy just characters (no NULLs) */
11829
11830 /* this will chop off leaf path (file) names */
11831 if (depth > 0) {
11832 dst = str + len - vnm_len;
11833 memcpy(dst, src, vnm_len);
11834 len -= vnm_len;
11835 } else {
11836 dst = str + len;
11837 }
11838
11839 if (vp->v_parent && len > 1) {
11840 /* follow parents up the chain */
11841 len--;
11842 *(dst - 1) = '/';
11843 return __vpath(vp->v_parent, str, len, depth + 1);
11844 }
11845
11846 return dst;
11847 }
11848
11849 #define SANE_VNODE_PRINT_LIMIT 5000
11850 void
panic_print_vnodes(void)11851 panic_print_vnodes(void)
11852 {
11853 mount_t mnt;
11854 vnode_t vp;
11855 int nvnodes = 0;
11856 const char *type;
11857 char *nm;
11858 char vname[257];
11859
11860 paniclog_append_noflush("\n***** VNODES *****\n"
11861 "TYPE UREF ICNT PATH\n");
11862
11863 /* NULL-terminate the path name */
11864 vname[sizeof(vname) - 1] = '\0';
11865
11866 /*
11867 * iterate all vnodelist items in all mounts (mntlist) -> mnt_vnodelist
11868 */
11869 TAILQ_FOREACH(mnt, &mountlist, mnt_list) {
11870 if (!ml_validate_nofault((vm_offset_t)mnt, sizeof(mount_t))) {
11871 paniclog_append_noflush("Unable to iterate the mount list %p - encountered an invalid mount pointer %p \n",
11872 &mountlist, mnt);
11873 break;
11874 }
11875
11876 TAILQ_FOREACH(vp, &mnt->mnt_vnodelist, v_mntvnodes) {
11877 if (!ml_validate_nofault((vm_offset_t)vp, sizeof(vnode_t))) {
11878 paniclog_append_noflush("Unable to iterate the vnode list %p - encountered an invalid vnode pointer %p \n",
11879 &mnt->mnt_vnodelist, vp);
11880 break;
11881 }
11882
11883 if (++nvnodes > SANE_VNODE_PRINT_LIMIT) {
11884 return;
11885 }
11886 type = __vtype(vp->v_type);
11887 nm = __vpath(vp, vname, sizeof(vname) - 1, 0);
11888 paniclog_append_noflush("%s %0d %0d %s\n",
11889 type, vp->v_usecount, vp->v_iocount, nm);
11890 }
11891 }
11892 }
11893
11894 #else /* !PANIC_PRINTS_VNODES */
11895 void
panic_print_vnodes(void)11896 panic_print_vnodes(void)
11897 {
11898 return;
11899 }
11900 #endif
11901
11902
11903 #ifdef CONFIG_IOCOUNT_TRACE
11904 static void
record_iocount_trace_vnode(vnode_t vp,int type)11905 record_iocount_trace_vnode(vnode_t vp, int type)
11906 {
11907 void *stacks[IOCOUNT_TRACE_MAX_FRAMES] = {0};
11908 int idx = vp->v_iocount_trace[type].idx;
11909
11910 if (idx >= IOCOUNT_TRACE_MAX_IDX) {
11911 return;
11912 }
11913
11914 OSBacktrace((void **)&stacks[0], IOCOUNT_TRACE_MAX_FRAMES);
11915
11916 /*
11917 * To save index space, only store the unique backtraces. If dup is found,
11918 * just bump the count and return.
11919 */
11920 for (int i = 0; i < idx; i++) {
11921 if (memcmp(&stacks[0], &vp->v_iocount_trace[type].stacks[i][0],
11922 sizeof(stacks)) == 0) {
11923 vp->v_iocount_trace[type].counts[i]++;
11924 return;
11925 }
11926 }
11927
11928 memcpy(&vp->v_iocount_trace[type].stacks[idx][0], &stacks[0],
11929 sizeof(stacks));
11930 vp->v_iocount_trace[type].counts[idx] = 1;
11931 vp->v_iocount_trace[type].idx++;
11932 }
11933
11934 static void
record_iocount_trace_uthread(vnode_t vp,int count)11935 record_iocount_trace_uthread(vnode_t vp, int count)
11936 {
11937 struct uthread *ut;
11938
11939 ut = current_uthread();
11940 ut->uu_iocount += count;
11941
11942 if (count == 1) {
11943 if (ut->uu_vpindex < 32) {
11944 OSBacktrace((void **)&ut->uu_pcs[ut->uu_vpindex][0], 10);
11945
11946 ut->uu_vps[ut->uu_vpindex] = vp;
11947 ut->uu_vpindex++;
11948 }
11949 }
11950 }
11951
11952 static void
record_vp(vnode_t vp,int count)11953 record_vp(vnode_t vp, int count)
11954 {
11955 if (__probable(bootarg_vnode_iocount_trace == 0 &&
11956 bootarg_uthread_iocount_trace == 0)) {
11957 return;
11958 }
11959
11960 #if CONFIG_TRIGGERS
11961 if (vp->v_resolve) {
11962 return;
11963 }
11964 #endif
11965 if ((vp->v_flag & VSYSTEM)) {
11966 return;
11967 }
11968
11969 if (bootarg_vnode_iocount_trace) {
11970 record_iocount_trace_vnode(vp,
11971 (count > 0) ? IOCOUNT_TRACE_VGET : IOCOUNT_TRACE_VPUT);
11972 }
11973 if (bootarg_uthread_iocount_trace) {
11974 record_iocount_trace_uthread(vp, count);
11975 }
11976 }
11977 #endif /* CONFIG_IOCOUNT_TRACE */
11978
11979 #if CONFIG_TRIGGERS
11980 #define __triggers_unused
11981 #else
11982 #define __triggers_unused __unused
11983 #endif
11984
11985 resolver_result_t
vfs_resolver_result(__triggers_unused uint32_t seq,__triggers_unused enum resolver_status stat,__triggers_unused int aux)11986 vfs_resolver_result(__triggers_unused uint32_t seq, __triggers_unused enum resolver_status stat, __triggers_unused int aux)
11987 {
11988 #if CONFIG_TRIGGERS
11989 /*
11990 * |<--- 32 --->|<--- 28 --->|<- 4 ->|
11991 * sequence auxiliary status
11992 */
11993 return (((uint64_t)seq) << 32) |
11994 (((uint64_t)(aux & 0x0fffffff)) << 4) |
11995 (uint64_t)(stat & 0x0000000F);
11996 #else
11997 return (0x0ULL) | (((uint64_t)ENOTSUP) << 4) | (((uint64_t)RESOLVER_ERROR) & 0xF);
11998 #endif
11999 }
12000
12001 #if CONFIG_TRIGGERS
12002
12003 #define TRIG_DEBUG 0
12004
12005 #if TRIG_DEBUG
12006 #define TRIG_LOG(...) do { printf("%s: ", __FUNCTION__); printf(__VA_ARGS__); } while (0)
12007 #else
12008 #define TRIG_LOG(...)
12009 #endif
12010
12011 /*
12012 * Resolver result functions
12013 */
12014
12015
12016 enum resolver_status
vfs_resolver_status(resolver_result_t result)12017 vfs_resolver_status(resolver_result_t result)
12018 {
12019 /* lower 4 bits is status */
12020 return result & 0x0000000F;
12021 }
12022
12023 uint32_t
vfs_resolver_sequence(resolver_result_t result)12024 vfs_resolver_sequence(resolver_result_t result)
12025 {
12026 /* upper 32 bits is sequence */
12027 return (uint32_t)(result >> 32);
12028 }
12029
12030 int
vfs_resolver_auxiliary(resolver_result_t result)12031 vfs_resolver_auxiliary(resolver_result_t result)
12032 {
12033 /* 28 bits of auxiliary */
12034 return (int)(((uint32_t)(result & 0xFFFFFFF0)) >> 4);
12035 }
12036
12037 /*
12038 * SPI
12039 * Call in for resolvers to update vnode trigger state
12040 */
12041 int
vnode_trigger_update(vnode_t vp,resolver_result_t result)12042 vnode_trigger_update(vnode_t vp, resolver_result_t result)
12043 {
12044 vnode_resolve_t rp;
12045 uint32_t seq;
12046 enum resolver_status stat;
12047
12048 if (vp->v_resolve == NULL) {
12049 return EINVAL;
12050 }
12051
12052 stat = vfs_resolver_status(result);
12053 seq = vfs_resolver_sequence(result);
12054
12055 if ((stat != RESOLVER_RESOLVED) && (stat != RESOLVER_UNRESOLVED)) {
12056 return EINVAL;
12057 }
12058
12059 rp = vp->v_resolve;
12060 lck_mtx_lock(&rp->vr_lock);
12061
12062 if (seq > rp->vr_lastseq) {
12063 if (stat == RESOLVER_RESOLVED) {
12064 rp->vr_flags |= VNT_RESOLVED;
12065 } else {
12066 rp->vr_flags &= ~VNT_RESOLVED;
12067 }
12068
12069 rp->vr_lastseq = seq;
12070 }
12071
12072 lck_mtx_unlock(&rp->vr_lock);
12073
12074 return 0;
12075 }
12076
12077 static int
vnode_resolver_attach(vnode_t vp,vnode_resolve_t rp,boolean_t ref)12078 vnode_resolver_attach(vnode_t vp, vnode_resolve_t rp, boolean_t ref)
12079 {
12080 int error;
12081
12082 vnode_lock_spin(vp);
12083 if (vp->v_resolve != NULL) {
12084 vnode_unlock(vp);
12085 return EINVAL;
12086 } else {
12087 vp->v_resolve = rp;
12088 }
12089 vnode_unlock(vp);
12090
12091 if (ref) {
12092 error = vnode_ref_ext(vp, O_EVTONLY, VNODE_REF_FORCE);
12093 if (error != 0) {
12094 panic("VNODE_REF_FORCE didn't help...");
12095 }
12096 }
12097
12098 return 0;
12099 }
12100
12101 /*
12102 * VFS internal interfaces for vnode triggers
12103 *
12104 * vnode must already have an io count on entry
12105 * v_resolve is stable when io count is non-zero
12106 */
12107 static int
vnode_resolver_create(mount_t mp,vnode_t vp,struct vnode_trigger_param * tinfo,boolean_t external)12108 vnode_resolver_create(mount_t mp, vnode_t vp, struct vnode_trigger_param *tinfo, boolean_t external)
12109 {
12110 vnode_resolve_t rp;
12111 int result;
12112 char byte;
12113
12114 #if 1
12115 /* minimum pointer test (debugging) */
12116 if (tinfo->vnt_data) {
12117 byte = *((char *)tinfo->vnt_data);
12118 }
12119 #endif
12120 rp = kalloc_type(struct vnode_resolve, Z_WAITOK | Z_NOFAIL);
12121
12122 lck_mtx_init(&rp->vr_lock, &trigger_vnode_lck_grp, &trigger_vnode_lck_attr);
12123
12124 rp->vr_resolve_func = tinfo->vnt_resolve_func;
12125 rp->vr_unresolve_func = tinfo->vnt_unresolve_func;
12126 rp->vr_rearm_func = tinfo->vnt_rearm_func;
12127 rp->vr_reclaim_func = tinfo->vnt_reclaim_func;
12128 rp->vr_data = tinfo->vnt_data;
12129 rp->vr_lastseq = 0;
12130 rp->vr_flags = tinfo->vnt_flags & VNT_VALID_MASK;
12131 if (external) {
12132 rp->vr_flags |= VNT_EXTERNAL;
12133 }
12134
12135 result = vnode_resolver_attach(vp, rp, external);
12136 if (result != 0) {
12137 goto out;
12138 }
12139
12140 if (mp) {
12141 OSAddAtomic(1, &mp->mnt_numtriggers);
12142 }
12143
12144 return result;
12145
12146 out:
12147 kfree_type(struct vnode_resolve, rp);
12148 return result;
12149 }
12150
12151 static void
vnode_resolver_release(vnode_resolve_t rp)12152 vnode_resolver_release(vnode_resolve_t rp)
12153 {
12154 /*
12155 * Give them a chance to free any private data
12156 */
12157 if (rp->vr_data && rp->vr_reclaim_func) {
12158 rp->vr_reclaim_func(NULLVP, rp->vr_data);
12159 }
12160
12161 lck_mtx_destroy(&rp->vr_lock, &trigger_vnode_lck_grp);
12162 kfree_type(struct vnode_resolve, rp);
12163 }
12164
12165 /* Called after the vnode has been drained */
12166 static void
vnode_resolver_detach(vnode_t vp)12167 vnode_resolver_detach(vnode_t vp)
12168 {
12169 vnode_resolve_t rp;
12170 mount_t mp;
12171
12172 mp = vnode_mount(vp);
12173
12174 vnode_lock(vp);
12175 rp = vp->v_resolve;
12176 vp->v_resolve = NULL;
12177 vnode_unlock(vp);
12178
12179 if ((rp->vr_flags & VNT_EXTERNAL) != 0) {
12180 vnode_rele_ext(vp, O_EVTONLY, 1);
12181 }
12182
12183 vnode_resolver_release(rp);
12184
12185 /* Keep count of active trigger vnodes per mount */
12186 OSAddAtomic(-1, &mp->mnt_numtriggers);
12187 }
12188
12189 __private_extern__
12190 void
vnode_trigger_rearm(vnode_t vp,vfs_context_t ctx)12191 vnode_trigger_rearm(vnode_t vp, vfs_context_t ctx)
12192 {
12193 vnode_resolve_t rp;
12194 resolver_result_t result;
12195 enum resolver_status status;
12196 uint32_t seq;
12197
12198 if ((vp->v_resolve == NULL) ||
12199 (vp->v_resolve->vr_rearm_func == NULL) ||
12200 (vp->v_resolve->vr_flags & VNT_AUTO_REARM) == 0) {
12201 return;
12202 }
12203
12204 rp = vp->v_resolve;
12205 lck_mtx_lock(&rp->vr_lock);
12206
12207 /*
12208 * Check if VFS initiated this unmount. If so, we'll catch it after the unresolve completes.
12209 */
12210 if (rp->vr_flags & VNT_VFS_UNMOUNTED) {
12211 lck_mtx_unlock(&rp->vr_lock);
12212 return;
12213 }
12214
12215 /* Check if this vnode is already armed */
12216 if ((rp->vr_flags & VNT_RESOLVED) == 0) {
12217 lck_mtx_unlock(&rp->vr_lock);
12218 return;
12219 }
12220
12221 lck_mtx_unlock(&rp->vr_lock);
12222
12223 result = rp->vr_rearm_func(vp, 0, rp->vr_data, ctx);
12224 status = vfs_resolver_status(result);
12225 seq = vfs_resolver_sequence(result);
12226
12227 lck_mtx_lock(&rp->vr_lock);
12228 if (seq > rp->vr_lastseq) {
12229 if (status == RESOLVER_UNRESOLVED) {
12230 rp->vr_flags &= ~VNT_RESOLVED;
12231 }
12232 rp->vr_lastseq = seq;
12233 }
12234 lck_mtx_unlock(&rp->vr_lock);
12235 }
12236
12237 __private_extern__
12238 int
vnode_trigger_resolve(vnode_t vp,struct nameidata * ndp,vfs_context_t ctx)12239 vnode_trigger_resolve(vnode_t vp, struct nameidata *ndp, vfs_context_t ctx)
12240 {
12241 vnode_resolve_t rp;
12242 enum path_operation op;
12243 resolver_result_t result;
12244 enum resolver_status status;
12245 uint32_t seq;
12246
12247 /*
12248 * N.B. we cannot call vfs_context_can_resolve_triggers()
12249 * here because we really only want to suppress that in
12250 * the event the trigger will be resolved by something in
12251 * user-space. Any triggers that are resolved by the kernel
12252 * do not pose a threat of deadlock.
12253 */
12254
12255 /* Only trigger on topmost vnodes */
12256 if ((vp->v_resolve == NULL) ||
12257 (vp->v_resolve->vr_resolve_func == NULL) ||
12258 (vp->v_mountedhere != NULL)) {
12259 return 0;
12260 }
12261
12262 rp = vp->v_resolve;
12263 lck_mtx_lock(&rp->vr_lock);
12264
12265 /* Check if this vnode is already resolved */
12266 if (rp->vr_flags & VNT_RESOLVED) {
12267 lck_mtx_unlock(&rp->vr_lock);
12268 return 0;
12269 }
12270
12271 lck_mtx_unlock(&rp->vr_lock);
12272
12273 #if CONFIG_MACF
12274 if ((rp->vr_flags & VNT_KERN_RESOLVE) == 0) {
12275 /*
12276 * VNT_KERN_RESOLVE indicates this trigger has no parameters
12277 * at the discression of the accessing process other than
12278 * the act of access. All other triggers must be checked
12279 */
12280 int rv = mac_vnode_check_trigger_resolve(ctx, vp, &ndp->ni_cnd);
12281 if (rv != 0) {
12282 return rv;
12283 }
12284 }
12285 #endif
12286
12287 /*
12288 * XXX
12289 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
12290 * is there anyway to know this???
12291 * there can also be other legitimate lookups in parallel
12292 *
12293 * XXX - should we call this on a separate thread with a timeout?
12294 *
12295 * XXX - should we use ISLASTCN to pick the op value??? Perhaps only leafs should
12296 * get the richer set and non-leafs should get generic OP_LOOKUP? TBD
12297 */
12298 op = (ndp->ni_op < OP_MAXOP) ? ndp->ni_op: OP_LOOKUP;
12299
12300 result = rp->vr_resolve_func(vp, &ndp->ni_cnd, op, 0, rp->vr_data, ctx);
12301 status = vfs_resolver_status(result);
12302 seq = vfs_resolver_sequence(result);
12303
12304 lck_mtx_lock(&rp->vr_lock);
12305 if (seq > rp->vr_lastseq) {
12306 if (status == RESOLVER_RESOLVED) {
12307 rp->vr_flags |= VNT_RESOLVED;
12308 }
12309 rp->vr_lastseq = seq;
12310 }
12311 lck_mtx_unlock(&rp->vr_lock);
12312
12313 /* On resolver errors, propagate the error back up */
12314 return status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0;
12315 }
12316
12317 static int
vnode_trigger_unresolve(vnode_t vp,int flags,vfs_context_t ctx)12318 vnode_trigger_unresolve(vnode_t vp, int flags, vfs_context_t ctx)
12319 {
12320 vnode_resolve_t rp;
12321 resolver_result_t result;
12322 enum resolver_status status;
12323 uint32_t seq;
12324
12325 if ((vp->v_resolve == NULL) || (vp->v_resolve->vr_unresolve_func == NULL)) {
12326 return 0;
12327 }
12328
12329 rp = vp->v_resolve;
12330 lck_mtx_lock(&rp->vr_lock);
12331
12332 /* Check if this vnode is already resolved */
12333 if ((rp->vr_flags & VNT_RESOLVED) == 0) {
12334 printf("vnode_trigger_unresolve: not currently resolved\n");
12335 lck_mtx_unlock(&rp->vr_lock);
12336 return 0;
12337 }
12338
12339 rp->vr_flags |= VNT_VFS_UNMOUNTED;
12340
12341 lck_mtx_unlock(&rp->vr_lock);
12342
12343 /*
12344 * XXX
12345 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
12346 * there can also be other legitimate lookups in parallel
12347 *
12348 * XXX - should we call this on a separate thread with a timeout?
12349 */
12350
12351 result = rp->vr_unresolve_func(vp, flags, rp->vr_data, ctx);
12352 status = vfs_resolver_status(result);
12353 seq = vfs_resolver_sequence(result);
12354
12355 lck_mtx_lock(&rp->vr_lock);
12356 if (seq > rp->vr_lastseq) {
12357 if (status == RESOLVER_UNRESOLVED) {
12358 rp->vr_flags &= ~VNT_RESOLVED;
12359 }
12360 rp->vr_lastseq = seq;
12361 }
12362 rp->vr_flags &= ~VNT_VFS_UNMOUNTED;
12363 lck_mtx_unlock(&rp->vr_lock);
12364
12365 /* On resolver errors, propagate the error back up */
12366 return status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0;
12367 }
12368
12369 static int
triggerisdescendant(mount_t mp,mount_t rmp)12370 triggerisdescendant(mount_t mp, mount_t rmp)
12371 {
12372 int match = FALSE;
12373
12374 /*
12375 * walk up vnode covered chain looking for a match
12376 */
12377 name_cache_lock_shared();
12378
12379 while (1) {
12380 vnode_t vp;
12381
12382 /* did we encounter "/" ? */
12383 if (mp->mnt_flag & MNT_ROOTFS) {
12384 break;
12385 }
12386
12387 vp = mp->mnt_vnodecovered;
12388 if (vp == NULLVP) {
12389 break;
12390 }
12391
12392 mp = vp->v_mount;
12393 if (mp == rmp) {
12394 match = TRUE;
12395 break;
12396 }
12397 }
12398
12399 name_cache_unlock();
12400
12401 return match;
12402 }
12403
12404 struct trigger_unmount_info {
12405 vfs_context_t ctx;
12406 mount_t top_mp;
12407 vnode_t trigger_vp;
12408 mount_t trigger_mp;
12409 uint32_t trigger_vid;
12410 int flags;
12411 };
12412
12413 static int
trigger_unmount_callback(mount_t mp,void * arg)12414 trigger_unmount_callback(mount_t mp, void * arg)
12415 {
12416 struct trigger_unmount_info * infop = (struct trigger_unmount_info *)arg;
12417 boolean_t mountedtrigger = FALSE;
12418
12419 /*
12420 * When we encounter the top level mount we're done
12421 */
12422 if (mp == infop->top_mp) {
12423 return VFS_RETURNED_DONE;
12424 }
12425
12426 if ((mp->mnt_vnodecovered == NULL) ||
12427 (vnode_getwithref(mp->mnt_vnodecovered) != 0)) {
12428 return VFS_RETURNED;
12429 }
12430
12431 if ((mp->mnt_vnodecovered->v_mountedhere == mp) &&
12432 (mp->mnt_vnodecovered->v_resolve != NULL) &&
12433 (mp->mnt_vnodecovered->v_resolve->vr_flags & VNT_RESOLVED)) {
12434 mountedtrigger = TRUE;
12435 }
12436 vnode_put(mp->mnt_vnodecovered);
12437
12438 /*
12439 * When we encounter a mounted trigger, check if its under the top level mount
12440 */
12441 if (!mountedtrigger || !triggerisdescendant(mp, infop->top_mp)) {
12442 return VFS_RETURNED;
12443 }
12444
12445 /*
12446 * Process any pending nested mount (now that its not referenced)
12447 */
12448 if ((infop->trigger_vp != NULLVP) &&
12449 (vnode_getwithvid(infop->trigger_vp, infop->trigger_vid) == 0)) {
12450 vnode_t vp = infop->trigger_vp;
12451 int error;
12452
12453 vnode_drop(infop->trigger_vp);
12454 infop->trigger_vp = NULLVP;
12455
12456 if (mp == vp->v_mountedhere) {
12457 vnode_put(vp);
12458 printf("trigger_unmount_callback: unexpected match '%s'\n",
12459 mp->mnt_vfsstat.f_mntonname);
12460 return VFS_RETURNED;
12461 }
12462 if (infop->trigger_mp != vp->v_mountedhere) {
12463 vnode_put(vp);
12464 printf("trigger_unmount_callback: trigger mnt changed! (%p != %p)\n",
12465 infop->trigger_mp, vp->v_mountedhere);
12466 goto savenext;
12467 }
12468
12469 error = vnode_trigger_unresolve(vp, infop->flags, infop->ctx);
12470 vnode_put(vp);
12471 if (error) {
12472 printf("unresolving: '%s', err %d\n",
12473 vp->v_mountedhere ? vp->v_mountedhere->mnt_vfsstat.f_mntonname :
12474 "???", error);
12475 return VFS_RETURNED_DONE; /* stop iteration on errors */
12476 }
12477 } else if (infop->trigger_vp != NULLVP) {
12478 vnode_drop(infop->trigger_vp);
12479 }
12480
12481 savenext:
12482 /*
12483 * We can't call resolver here since we hold a mount iter
12484 * ref on mp so save its covered vp for later processing
12485 */
12486 infop->trigger_vp = mp->mnt_vnodecovered;
12487 if ((infop->trigger_vp != NULLVP) &&
12488 (vnode_getwithref(infop->trigger_vp) == 0)) {
12489 if (infop->trigger_vp->v_mountedhere == mp) {
12490 infop->trigger_vid = infop->trigger_vp->v_id;
12491 vnode_hold(infop->trigger_vp);
12492 infop->trigger_mp = mp;
12493 }
12494 vnode_put(infop->trigger_vp);
12495 }
12496
12497 return VFS_RETURNED;
12498 }
12499
12500 /*
12501 * Attempt to unmount any trigger mounts nested underneath a mount.
12502 * This is a best effort attempt and no retries are performed here.
12503 *
12504 * Note: mp->mnt_rwlock is held exclusively on entry (so be carefull)
12505 */
12506 __private_extern__
12507 void
vfs_nested_trigger_unmounts(mount_t mp,int flags,vfs_context_t ctx)12508 vfs_nested_trigger_unmounts(mount_t mp, int flags, vfs_context_t ctx)
12509 {
12510 struct trigger_unmount_info info;
12511
12512 /* Must have trigger vnodes */
12513 if (mp->mnt_numtriggers == 0) {
12514 return;
12515 }
12516 /* Avoid recursive requests (by checking covered vnode) */
12517 if ((mp->mnt_vnodecovered != NULL) &&
12518 (vnode_getwithref(mp->mnt_vnodecovered) == 0)) {
12519 boolean_t recursive = FALSE;
12520
12521 if ((mp->mnt_vnodecovered->v_mountedhere == mp) &&
12522 (mp->mnt_vnodecovered->v_resolve != NULL) &&
12523 (mp->mnt_vnodecovered->v_resolve->vr_flags & VNT_VFS_UNMOUNTED)) {
12524 recursive = TRUE;
12525 }
12526 vnode_put(mp->mnt_vnodecovered);
12527 if (recursive) {
12528 return;
12529 }
12530 }
12531
12532 /*
12533 * Attempt to unmount any nested trigger mounts (best effort)
12534 */
12535 info.ctx = ctx;
12536 info.top_mp = mp;
12537 info.trigger_vp = NULLVP;
12538 info.trigger_vid = 0;
12539 info.trigger_mp = NULL;
12540 info.flags = flags;
12541
12542 (void) vfs_iterate(VFS_ITERATE_TAIL_FIRST, trigger_unmount_callback, &info);
12543
12544 /*
12545 * Process remaining nested mount (now that its not referenced)
12546 */
12547 if ((info.trigger_vp != NULLVP) &&
12548 (vnode_getwithvid(info.trigger_vp, info.trigger_vid) == 0)) {
12549 vnode_t vp = info.trigger_vp;
12550
12551 if (info.trigger_mp == vp->v_mountedhere) {
12552 (void) vnode_trigger_unresolve(vp, flags, ctx);
12553 }
12554 vnode_put(vp);
12555 vnode_drop(vp);
12556 } else if (info.trigger_vp != NULLVP) {
12557 vnode_drop(info.trigger_vp);
12558 }
12559 }
12560
12561 int
vfs_addtrigger(mount_t mp,const char * relpath,struct vnode_trigger_info * vtip,vfs_context_t ctx)12562 vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, vfs_context_t ctx)
12563 {
12564 struct nameidata *ndp;
12565 int res;
12566 vnode_t rvp, vp;
12567 struct vnode_trigger_param vtp;
12568
12569 /*
12570 * Must be called for trigger callback, wherein rwlock is held
12571 */
12572 lck_rw_assert(&mp->mnt_rwlock, LCK_RW_ASSERT_HELD);
12573
12574 TRIG_LOG("Adding trigger at %s\n", relpath);
12575 TRIG_LOG("Trying VFS_ROOT\n");
12576
12577 ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
12578
12579 /*
12580 * We do a lookup starting at the root of the mountpoint, unwilling
12581 * to cross into other mountpoints.
12582 */
12583 res = VFS_ROOT(mp, &rvp, ctx);
12584 if (res != 0) {
12585 goto out;
12586 }
12587
12588 TRIG_LOG("Trying namei\n");
12589
12590 NDINIT(ndp, LOOKUP, OP_LOOKUP, USEDVP | NOCROSSMOUNT | FOLLOW, UIO_SYSSPACE,
12591 CAST_USER_ADDR_T(relpath), ctx);
12592 ndp->ni_dvp = rvp;
12593 res = namei(ndp);
12594 if (res != 0) {
12595 vnode_put(rvp);
12596 goto out;
12597 }
12598
12599 vp = ndp->ni_vp;
12600 nameidone(ndp);
12601 vnode_put(rvp);
12602
12603 TRIG_LOG("Trying vnode_resolver_create()\n");
12604
12605 /*
12606 * Set up blob. vnode_create() takes a larger structure
12607 * with creation info, and we needed something different
12608 * for this case. One needs to win, or we need to munge both;
12609 * vnode_create() wins.
12610 */
12611 bzero(&vtp, sizeof(vtp));
12612 vtp.vnt_resolve_func = vtip->vti_resolve_func;
12613 vtp.vnt_unresolve_func = vtip->vti_unresolve_func;
12614 vtp.vnt_rearm_func = vtip->vti_rearm_func;
12615 vtp.vnt_reclaim_func = vtip->vti_reclaim_func;
12616 vtp.vnt_reclaim_func = vtip->vti_reclaim_func;
12617 vtp.vnt_data = vtip->vti_data;
12618 vtp.vnt_flags = vtip->vti_flags;
12619
12620 res = vnode_resolver_create(mp, vp, &vtp, TRUE);
12621 vnode_put(vp);
12622 out:
12623 kfree_type(struct nameidata, ndp);
12624 TRIG_LOG("Returning %d\n", res);
12625 return res;
12626 }
12627
12628 #endif /* CONFIG_TRIGGERS */
12629
12630 vm_offset_t
kdebug_vnode(vnode_t vp)12631 kdebug_vnode(vnode_t vp)
12632 {
12633 return VM_KERNEL_ADDRPERM(vp);
12634 }
12635
12636 static int flush_cache_on_write = 0;
12637 SYSCTL_INT(_kern, OID_AUTO, flush_cache_on_write,
12638 CTLFLAG_RW | CTLFLAG_LOCKED, &flush_cache_on_write, 0,
12639 "always flush the drive cache on writes to uncached files");
12640
12641 int
vnode_should_flush_after_write(vnode_t vp,int ioflag)12642 vnode_should_flush_after_write(vnode_t vp, int ioflag)
12643 {
12644 return flush_cache_on_write
12645 && (ISSET(ioflag, IO_NOCACHE) || vnode_isnocache(vp));
12646 }
12647
12648 /*
12649 * sysctl for use by disk I/O tracing tools to get the list of existing
12650 * vnodes' paths
12651 */
12652
12653 #define NPATH_WORDS (MAXPATHLEN / sizeof(unsigned long))
12654 struct vnode_trace_paths_context {
12655 uint64_t count;
12656 /*
12657 * Must be a multiple of 4, then -1, for tracing!
12658 */
12659 unsigned long path[NPATH_WORDS + (4 - (NPATH_WORDS % 4)) - 1];
12660 };
12661
12662 static int
vnode_trace_path_callback(struct vnode * vp,void * vctx)12663 vnode_trace_path_callback(struct vnode *vp, void *vctx)
12664 {
12665 struct vnode_trace_paths_context *ctx = vctx;
12666 size_t path_len = sizeof(ctx->path);
12667
12668 int getpath_len = (int)path_len;
12669 if (vn_getpath(vp, (char *)ctx->path, &getpath_len) == 0) {
12670 /* vn_getpath() NUL-terminates, and len includes the NUL. */
12671 assert(getpath_len >= 0);
12672 path_len = (size_t)getpath_len;
12673
12674 assert(path_len <= sizeof(ctx->path));
12675 kdebug_vfs_lookup((const char *)ctx->path, path_len, vp,
12676 KDBG_VFSLKUP_LOOKUP | KDBG_VFS_LOOKUP_FLAG_NOPROCFILT);
12677
12678 if (++(ctx->count) == 1000) {
12679 thread_yield_to_preemption();
12680 ctx->count = 0;
12681 }
12682 }
12683
12684 return VNODE_RETURNED;
12685 }
12686
12687 static int
vfs_trace_paths_callback(mount_t mp,void * arg)12688 vfs_trace_paths_callback(mount_t mp, void *arg)
12689 {
12690 if (mp->mnt_flag & MNT_LOCAL) {
12691 vnode_iterate(mp, VNODE_ITERATE_ALL, vnode_trace_path_callback, arg);
12692 }
12693
12694 return VFS_RETURNED;
12695 }
12696
12697 static int sysctl_vfs_trace_paths SYSCTL_HANDLER_ARGS {
12698 struct vnode_trace_paths_context ctx;
12699
12700 (void)oidp;
12701 (void)arg1;
12702 (void)arg2;
12703 (void)req;
12704
12705 if (!kauth_cred_issuser(kauth_cred_get())) {
12706 return EPERM;
12707 }
12708
12709 if (!kdebug_enable || !kdebug_debugid_enabled(VFS_LOOKUP)) {
12710 return EINVAL;
12711 }
12712
12713 bzero(&ctx, sizeof(struct vnode_trace_paths_context));
12714
12715 vfs_iterate(0, vfs_trace_paths_callback, &ctx);
12716
12717 return 0;
12718 }
12719
12720 SYSCTL_PROC(_vfs_generic, OID_AUTO, trace_paths, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED, NULL, 0, &sysctl_vfs_trace_paths, "-", "trace_paths");
12721
12722 #if CONFIG_FILE_LEASES
12723 #include <IOKit/IOBSD.h>
12724 #include <sys/file_internal.h>
12725
12726 #define FILE_LEASES_ENTITLEMENT "com.apple.private.vfs.file-leases"
12727
12728 static uint32_t lease_break_timeout = 60; /* secs */
12729
12730 #if (DEVELOPMENT || DEBUG)
12731 static int lease_debug = 0;
12732 static int lease_entitlement_override = 0;
12733
12734 SYSCTL_NODE(_vfs, OID_AUTO, lease, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "vfs lease");
12735 SYSCTL_UINT(_vfs_lease, OID_AUTO, break_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, &lease_break_timeout, 0, "");
12736 SYSCTL_INT(_vfs_lease, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &lease_debug, 0, "");
12737 SYSCTL_INT(_vfs_lease, OID_AUTO, entitlement_override, CTLFLAG_RW | CTLFLAG_LOCKED, &lease_entitlement_override, 0, "");
12738
12739 #define LEASEDBG(fmt, args...) \
12740 do { \
12741 if (__improbable(lease_debug)) { \
12742 pid_t cur_pid = proc_getpid(current_proc()); \
12743 printf("%s(%d): " fmt "\n", __func__, cur_pid, ##args); \
12744 } \
12745 } while(0)
12746 #else
12747 #define LEASEDBG(fmt, args...) /**/
12748 #endif /* (DEVELOPMENT || DEBUG) */
12749
12750 static bool
allow_setlease(vfs_context_t ctx)12751 allow_setlease(vfs_context_t ctx)
12752 {
12753 bool entitled;
12754
12755 entitled = IOTaskHasEntitlement(vfs_context_task(ctx),
12756 FILE_LEASES_ENTITLEMENT);
12757
12758 #if (DEVELOPMENT || DEBUG)
12759 if (!entitled) {
12760 entitled = (lease_entitlement_override == 1);
12761 }
12762 #endif
12763
12764 return entitled;
12765 }
12766
12767 static file_lease_t
file_lease_alloc(struct fileglob * fg,int fl_type,pid_t pid)12768 file_lease_alloc(struct fileglob *fg, int fl_type, pid_t pid)
12769 {
12770 file_lease_t fl;
12771
12772 fl = kalloc_type(struct file_lease, Z_WAITOK);
12773 /*
12774 * Duplicated file descriptors created by dup() or fork() would have the
12775 * same 'fileglob' so the lease can be released or modified with the
12776 * duplicated fds. Opening the same file (by either same or different
12777 * process) would have different 'fileglob' so a lease always follows a
12778 * 'fileglob'.
12779 */
12780 fl->fl_fg = fg;
12781 fl->fl_type = fl_type;
12782 fl->fl_pid = pid;
12783 fl->fl_downgrade_start = fl->fl_release_start = 0;
12784
12785 return fl;
12786 }
12787
12788 static void
file_lease_free(file_lease_t fl)12789 file_lease_free(file_lease_t fl)
12790 {
12791 kfree_type(struct file_lease, fl);
12792 }
12793
12794 /*
12795 * A read lease can be placed only on a file/directory that is opened for
12796 * read-only which means no other processes have the file/directory opened in
12797 * read-write/write-only mode or mmap'ed writable.
12798 * A write lease can be placed on a file only if there are no other opens
12799 * for the file.
12800 *
12801 * Needs to be called with vnode's lock held.
12802 */
12803 static int
check_for_open_conflict(vnode_t vp,struct fileglob * fg,int fl_type,int expcounts)12804 check_for_open_conflict(vnode_t vp, struct fileglob *fg, int fl_type,
12805 int expcounts)
12806 {
12807 int error = 0;
12808
12809 if (fl_type == F_RDLCK) {
12810 if (vp->v_writecount > expcounts &&
12811 !(vp->v_writecount == 1 && (fg->fg_flag & FWRITE))) {
12812 error = EAGAIN;
12813 } else if (ubc_is_mapped_writable(vp)) {
12814 error = EAGAIN;
12815 }
12816 } else if (fl_type == F_WRLCK && vp->v_usecount > expcounts) {
12817 error = EAGAIN;
12818 }
12819
12820 return error;
12821 }
12822
12823 /* Needs to be called with vnode's lock held. */
12824 static void
modify_file_lease(vnode_t vp,file_lease_t fl,int new_fl_type,struct fileglob * new_fg)12825 modify_file_lease(vnode_t vp, file_lease_t fl, int new_fl_type,
12826 struct fileglob *new_fg)
12827 {
12828 LEASEDBG("fl %p changing fl_type from %d to %d (flags 0x%x)",
12829 fl, fl->fl_type, new_fl_type, fl->fl_flags);
12830
12831 fl->fl_type = new_fl_type;
12832
12833 /*
12834 * The lease being modified may be using a different file
12835 * descriptor, so usurp the fileglob pointer here. In this
12836 * case the old descriptor no longer holds the lease.
12837 */
12838 if (new_fg != NULL) {
12839 fl->fl_fg = new_fg;
12840 }
12841
12842 if (fl->fl_flags & FL_FLAG_RELEASE_PENDING ||
12843 fl->fl_flags & FL_FLAG_DOWNGRADE_PENDING) {
12844 wakeup(&vp->v_leases);
12845 }
12846 }
12847
12848 static int
acquire_file_lease(vnode_t vp,struct fileglob * fg,int fl_type,int expcounts,vfs_context_t ctx)12849 acquire_file_lease(vnode_t vp, struct fileglob *fg, int fl_type, int expcounts,
12850 vfs_context_t ctx)
12851 {
12852 file_lease_t fl, new_fl, our_fl;
12853 int error;
12854
12855 /* Make sure "expected count" looks sane. */
12856 if (expcounts < 0 || expcounts > OPEN_MAX) {
12857 return EINVAL;
12858 }
12859
12860 new_fl = file_lease_alloc(fg, fl_type, vfs_context_pid(ctx));
12861
12862 vnode_lock(vp);
12863
12864 error = check_for_open_conflict(vp, fg, fl_type, expcounts);
12865 if (error) {
12866 LEASEDBG("open conflict on vp %p type %d writecnt %d usecnt %d "
12867 "fl_type %d expcounts %d",
12868 vp, vp->v_type, vp->v_writecount, vp->v_usecount, fl_type,
12869 expcounts);
12870 goto out;
12871 }
12872
12873 our_fl = NULL;
12874 LIST_FOREACH(fl, &vp->v_leases, fl_link) {
12875 /* Does the existing lease belong to us? */
12876 if (fl->fl_fg == new_fl->fl_fg ||
12877 fl->fl_pid == new_fl->fl_pid) {
12878 our_fl = fl;
12879 continue;
12880 }
12881
12882 /*
12883 * We don't allow placing a new write lease when there is an existing
12884 * read lease that doesn't belong to us. We also don't allow putting
12885 * a new read lease if there is a pending release on the lease.
12886 * Putting a new read lease when there is a pending downgrade on the
12887 * lease is fine as it won't cause lease conflict.
12888 */
12889 if (fl_type == F_WRLCK || fl->fl_flags & FL_FLAG_RELEASE_PENDING) {
12890 break;
12891 }
12892 }
12893
12894 /*
12895 * Found an existing lease that we don't own and it conflicts with the
12896 * new lease.
12897 */
12898 if (fl) {
12899 LEASEDBG("lease conflict on vp %p fl %p fl_type %d cur_fl_type %d",
12900 vp, fl, fl_type, fl->fl_type);
12901 goto out;
12902 }
12903
12904 /* Found an existing lease that we own so just change the type. */
12905 if (our_fl) {
12906 LEASEDBG("replace lease on vp %p fl %p old_fl_type %d new_fl_type %d",
12907 vp, our_fl, our_fl->fl_type, fl_type);
12908
12909 modify_file_lease(vp, our_fl, new_fl->fl_type, new_fl->fl_fg);
12910 goto out;
12911 }
12912
12913 LEASEDBG("acquired lease on vp %p type %d fl %p fl_type %d fg %p",
12914 vp, vp->v_type, new_fl, new_fl->fl_type, new_fl->fl_fg);
12915
12916 LIST_INSERT_HEAD(&vp->v_leases, new_fl, fl_link);
12917 new_fl = NULL;
12918
12919 out:
12920 vnode_unlock(vp);
12921
12922 if (new_fl) {
12923 file_lease_free(new_fl);
12924 }
12925
12926 return error;
12927 }
12928
12929 static int
release_file_lease(vnode_t vp,struct fileglob * fg)12930 release_file_lease(vnode_t vp, struct fileglob *fg)
12931 {
12932 file_lease_t fl, fl_tmp;
12933 int error = 0;
12934
12935 LEASEDBG("request to release lease on vp %p type %d fg %p",
12936 vp, vp->v_type, fg);
12937
12938 vnode_lock(vp);
12939
12940 LIST_FOREACH_SAFE(fl, &vp->v_leases, fl_link, fl_tmp) {
12941 if (fl->fl_fg == fg) {
12942 LEASEDBG("released lease on vp %p fl %p type %d",
12943 vp, fl, fl->fl_type);
12944
12945 LIST_REMOVE(fl, fl_link);
12946 modify_file_lease(vp, fl, F_UNLCK, NULL);
12947 break;
12948 }
12949 }
12950
12951 vnode_unlock(vp);
12952
12953 if (fl) {
12954 file_lease_free(fl);
12955 } else {
12956 error = ENOLCK;
12957 }
12958
12959 return error;
12960 }
12961
12962 /*
12963 * Acquire or release a file lease according to the given type (F_RDLCK,
12964 * F_WRLCK or F_UNLCK).
12965 *
12966 * Returns: 0 Success
12967 * EAGAIN Failed to acquire a file lease due to conflicting opens
12968 * ENOLCK Failed to release a file lease due to lease not found
12969 * EPERM Current task doesn't have the entitlement
12970 */
12971 int
vnode_setlease(vnode_t vp,struct fileglob * fg,int fl_type,int expcounts,vfs_context_t ctx)12972 vnode_setlease(vnode_t vp, struct fileglob *fg, int fl_type, int expcounts,
12973 vfs_context_t ctx)
12974 {
12975 int error;
12976
12977 if (!allow_setlease(ctx)) {
12978 return EPERM;
12979 }
12980
12981 error = (fl_type == F_UNLCK) ? release_file_lease(vp, fg) :
12982 acquire_file_lease(vp, fg, fl_type, expcounts, ctx);
12983
12984 return error;
12985 }
12986
12987 /*
12988 * Retrieve the currently in place lease for the file.
12989 *
12990 * Returns:
12991 * F_RDLCK Read lease
12992 * F_WRLCK Write lease
12993 * F_UNLCK No lease
12994 */
12995 int
vnode_getlease(vnode_t vp)12996 vnode_getlease(vnode_t vp)
12997 {
12998 file_lease_t fl;
12999 int fl_type = F_UNLCK;
13000
13001 vnode_lock(vp);
13002
13003 /*
13004 * There should be only one type of lease in the list as read and write
13005 * leases can't co-exist for the same file.
13006 */
13007 fl = LIST_FIRST(&vp->v_leases);
13008 if (fl) {
13009 fl_type = fl->fl_type;
13010 }
13011
13012 vnode_unlock(vp);
13013
13014 LEASEDBG("vp %p fl %p fl_type %d", vp, fl, fl_type);
13015
13016 return fl_type;
13017 }
13018
13019 /* Must be called with vnode's lock held. */
13020 static bool
check_for_lease_conflict(vnode_t vp,int breaker_fl_type,vfs_context_t ctx)13021 check_for_lease_conflict(vnode_t vp, int breaker_fl_type, vfs_context_t ctx)
13022 {
13023 file_lease_t fl;
13024 pid_t pid = vfs_context_pid(ctx);
13025 bool is_conflict = false;
13026
13027 LIST_FOREACH(fl, &vp->v_leases, fl_link) {
13028 if ((fl->fl_type == F_WRLCK && fl->fl_pid != pid) ||
13029 (breaker_fl_type == F_WRLCK && fl->fl_pid != pid)) {
13030 LEASEDBG("conflict detected on vp %p type %d fl_type %d "
13031 "breaker_fl_type %d",
13032 vp, vp->v_type, fl->fl_type, breaker_fl_type);
13033
13034 is_conflict = true;
13035 break;
13036 }
13037 }
13038
13039 return is_conflict;
13040 }
13041
13042 static uint64_t
absolutetime_elapsed_in_secs(uint64_t start)13043 absolutetime_elapsed_in_secs(uint64_t start)
13044 {
13045 uint64_t elapsed, elapsed_sec;
13046 uint64_t now = mach_absolute_time();
13047
13048 elapsed = now - start;
13049 absolutetime_to_nanoseconds(elapsed, &elapsed_sec);
13050 elapsed_sec /= NSEC_PER_SEC;
13051
13052 return elapsed_sec;
13053 }
13054
13055 /* Must be called with vnode's lock held. */
13056 static void
handle_lease_break_timedout(vnode_t vp)13057 handle_lease_break_timedout(vnode_t vp)
13058 {
13059 file_lease_t fl, fl_tmp;
13060 uint64_t elapsed_sec;
13061
13062 LIST_FOREACH_SAFE(fl, &vp->v_leases, fl_link, fl_tmp) {
13063 if (fl->fl_flags & FL_FLAG_DOWNGRADE_PENDING) {
13064 elapsed_sec = absolutetime_elapsed_in_secs(fl->fl_downgrade_start);
13065
13066 if (elapsed_sec >= lease_break_timeout) {
13067 LEASEDBG("force downgrade on vp %p for fl %p elapsed %llu "
13068 "timeout %u", vp, fl, elapsed_sec, lease_break_timeout);
13069
13070 fl->fl_flags &= ~FL_FLAG_DOWNGRADE_PENDING;
13071 fl->fl_downgrade_start = 0;
13072 modify_file_lease(vp, fl, F_RDLCK, NULL);
13073 continue;
13074 }
13075 }
13076 if (fl->fl_flags & FL_FLAG_RELEASE_PENDING) {
13077 elapsed_sec = absolutetime_elapsed_in_secs(fl->fl_release_start);
13078
13079 if (elapsed_sec >= lease_break_timeout) {
13080 LEASEDBG("force release on vp %p for fl %p elapsed %llu "
13081 "timeout %u", vp, fl, elapsed_sec, lease_break_timeout);
13082
13083 LIST_REMOVE(fl, fl_link);
13084 file_lease_free(fl);
13085 continue;
13086 }
13087 }
13088 }
13089
13090 /* Wakeup the lease breaker(s). */
13091 wakeup(&vp->v_leases);
13092 }
13093
13094 /* Must be called with vnode's lock held. */
13095 static void
wait_for_lease_break(vnode_t vp,int breaker_fl_type,vfs_context_t ctx)13096 wait_for_lease_break(vnode_t vp, int breaker_fl_type, vfs_context_t ctx)
13097 {
13098 file_lease_t fl;
13099 struct timespec ts;
13100 uint64_t elapsed_sec, start_time;
13101 int error;
13102
13103 restart:
13104 fl = LIST_FIRST(&vp->v_leases);
13105 assert(fl);
13106
13107 /*
13108 * In a rare case it is possible that the lease that we are blocked on has
13109 * been released and a new lease has been put in place after we are
13110 * signalled to wake up. In this particular, we would treat it as no
13111 * conflict and proceed. This could only happen for directory leasing.
13112 */
13113 if ((fl->fl_flags & (FL_FLAG_DOWNGRADE_PENDING | FL_FLAG_RELEASE_PENDING)) == 0) {
13114 LEASEDBG("new lease in place on vp %p fl %p fl_type %d "
13115 "breaker_fl_type %d",
13116 vp, fl, fl->fl_type, breaker_fl_type);
13117
13118 return;
13119 }
13120 /*
13121 * Figure out which timer to use for lease break timedout as we could have
13122 * both timers active. If both timers active, pick the one with earliest
13123 * start time.
13124 */
13125 if (fl->fl_release_start) {
13126 if (fl->fl_downgrade_start == 0 ||
13127 fl->fl_downgrade_start < fl->fl_release_start) {
13128 start_time = fl->fl_release_start;
13129 } else {
13130 start_time = fl->fl_downgrade_start;
13131 }
13132 } else {
13133 start_time = fl->fl_downgrade_start;
13134 }
13135 assert(start_time > 0);
13136
13137 elapsed_sec = absolutetime_elapsed_in_secs(start_time);
13138
13139 LEASEDBG("elapsed_sec %llu release_start %llu downgrade_start %llu",
13140 elapsed_sec, fl->fl_release_start, fl->fl_downgrade_start);
13141
13142 ts.tv_sec = (lease_break_timeout > elapsed_sec ?
13143 (lease_break_timeout - elapsed_sec) : 0);
13144 ts.tv_nsec = (ts.tv_sec == 0 ? 1 : 0);
13145 error = msleep(&vp->v_leases, &vp->v_lock, PVFS, __func__, &ts);
13146
13147 if (error == 0 || error != EWOULDBLOCK) {
13148 /*
13149 * Woken up due to lease is released/downgraded by lease holder.
13150 * We don't expect any other error from msleep() beside EWOULDBLOCK.
13151 * Check if there is any further conflicts. If so, then continue to
13152 * wait for the next conflict to resolve.
13153 */
13154 if (check_for_lease_conflict(vp, breaker_fl_type, ctx)) {
13155 goto restart;
13156 }
13157 } else {
13158 /*
13159 * Woken due to lease break timeout expired (EWOULDBLOCK returned).
13160 * Break/downgrade all conflicting leases.
13161 */
13162 handle_lease_break_timedout(vp);
13163
13164 if (check_for_lease_conflict(vp, breaker_fl_type, ctx)) {
13165 goto restart;
13166 }
13167 }
13168 }
13169
13170 /* Must be called with vnode's lock held. */
13171 static void
send_lease_break_event(vnode_t vp,uint32_t event)13172 send_lease_break_event(vnode_t vp, uint32_t event)
13173 {
13174 if (vp->v_knotes.slh_first != NULL) {
13175 KNOTE(&vp->v_knotes, event);
13176 }
13177 }
13178
13179 static bool
is_dataless_file(vnode_t vp,vfs_context_t ctx)13180 is_dataless_file(vnode_t vp, vfs_context_t ctx)
13181 {
13182 struct vnode_attr va;
13183 bool is_dataless = false;
13184 int error;
13185
13186 VATTR_INIT(&va);
13187 VATTR_WANTED(&va, va_flags);
13188
13189 error = vnode_getattr(vp, &va, ctx);
13190 if (!error && (va.va_flags & SF_DATALESS)) {
13191 is_dataless = true;
13192 }
13193
13194 return is_dataless;
13195 }
13196
13197 /*
13198 * Break lease(s) in place for the file when there is conflict.
13199 * This function would return 0 for almost all call sites. The only exception
13200 * is when it is called from open1() with O_NONBLOCK flag and it needs to block
13201 * waiting for the lease conflict(s) to resolve. In this case EWOULDBLOCK is
13202 * returned.
13203 */
13204 int
vnode_breaklease(vnode_t vp,uint32_t oflags,vfs_context_t ctx)13205 vnode_breaklease(vnode_t vp, uint32_t oflags, vfs_context_t ctx)
13206 {
13207 file_lease_t fl;
13208 uint64_t now;
13209 int fl_type;
13210 int error = 0;
13211
13212 vnode_lock(vp);
13213
13214 if (__probable(LIST_EMPTY(&vp->v_leases))) {
13215 goto out_unlock;
13216 }
13217
13218 /* Determine the access mode requested by the lease breaker. */
13219 fl_type = (oflags & (O_WRONLY | O_RDWR | O_CREAT | O_TRUNC)) ? F_WRLCK : F_RDLCK;
13220
13221 /*
13222 * If the lease-breaker is just reading, check that it can break
13223 * leases first. If the lease-breaker is writing, or if the
13224 * context was not specified, we always break.
13225 * We skip lease break if the lease-breaker is dataless manipulator and
13226 * the file is dataless.
13227 */
13228 if ((fl_type == F_RDLCK && !vfs_context_can_break_leases(ctx)) ||
13229 (vfs_context_is_dataless_manipulator(ctx) && (vp->v_type == VREG) &&
13230 is_dataless_file(vp, ctx))) {
13231 goto out_unlock;
13232 }
13233
13234 if (!check_for_lease_conflict(vp, fl_type, ctx)) {
13235 goto out_unlock;
13236 }
13237
13238 now = mach_absolute_time();
13239
13240 LEASEDBG("break lease on vp %p type %d oflags 0x%x cur_time %llu",
13241 vp, vp->v_type, oflags, now);
13242
13243 /*
13244 * We get to this point then this means all lease(s) are conflict and
13245 * we need to send the lease break event to the lease holder(s).
13246 * It is possible that a lease could have both downgrade and release events
13247 * pending triggered by multiple breakers trying to open the file in
13248 * different modes. Both events would have different lease break timers.
13249 * Consider the following case:
13250 * 1. Process A holds the write lease on file X.
13251 * 2. Provess B opens the file X in read-only mode.
13252 * This triggers downgrade lease event to Process A.
13253 * 3. While downgrade is pending, Process C opens the file X in read-write
13254 * mode. This triggers release lease event to Process A.
13255 */
13256 LIST_FOREACH(fl, &vp->v_leases, fl_link) {
13257 if (fl_type == F_WRLCK) {
13258 /* File is opened for writing or truncate. */
13259 if (fl->fl_flags & FL_FLAG_RELEASE_PENDING) {
13260 continue;
13261 }
13262 fl->fl_release_start = now;
13263 fl->fl_flags |= FL_FLAG_RELEASE_PENDING;
13264 send_lease_break_event(vp, NOTE_LEASE_RELEASE);
13265 } else {
13266 /* File is opened for reading. */
13267 if (fl->fl_flags & FL_FLAG_DOWNGRADE_PENDING ||
13268 fl->fl_flags & FL_FLAG_RELEASE_PENDING) {
13269 continue;
13270 }
13271 fl->fl_downgrade_start = now;
13272 fl->fl_flags |= FL_FLAG_DOWNGRADE_PENDING;
13273 send_lease_break_event(vp, NOTE_LEASE_DOWNGRADE);
13274 }
13275 }
13276
13277 /*
13278 * If open is requested with O_NONBLOCK, then we can't block and wait for
13279 * the lease to be released/downgraded. Just bail out with EWOULDBLOCK.
13280 */
13281 if (oflags & O_NONBLOCK) {
13282 error = EWOULDBLOCK;
13283 goto out;
13284 }
13285
13286 wait_for_lease_break(vp, fl_type, ctx);
13287
13288 out:
13289 LEASEDBG("break lease on vp %p oflags 0x%x, error %d", vp, oflags, error);
13290
13291 out_unlock:
13292 vnode_unlock(vp);
13293
13294 return error;
13295 }
13296
13297 /*
13298 * Get parent vnode by parent ID (only for file system that supports
13299 * MNTK_PATH_FROM_ID).
13300 * On success, the parent's vnode is returned with iocount held.
13301 */
13302 static vnode_t
vnode_getparent_byid(vnode_t vp)13303 vnode_getparent_byid(vnode_t vp)
13304 {
13305 struct vnode_attr va;
13306 vnode_t dvp = NULLVP;
13307 vfs_context_t ctx = vfs_context_current();
13308 int error;
13309
13310 if (!(vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID)) {
13311 goto out;
13312 }
13313
13314 VATTR_INIT(&va);
13315 VATTR_WANTED(&va, va_parentid);
13316
13317 /* Get the vnode's parent id from the file system. */
13318 error = vnode_getattr(vp, &va, ctx);
13319 if (error || !VATTR_IS_SUPPORTED(&va, va_parentid)) {
13320 goto out;
13321 }
13322
13323 /*
13324 * Ask the file system for the parent vnode.
13325 * We are ignoring the error here as we don't expect the parent vnode to be
13326 * populated on error.
13327 */
13328 (void)VFS_VGET(vp->v_mount, (ino64_t)va.va_parentid, &dvp, ctx);
13329
13330 out:
13331 return dvp;
13332 }
13333
13334 /*
13335 * Break directory's lease.
13336 * If 'need_parent' is true, then parent is obtained via vnode_getparent() (or
13337 * vnode_getparent_byid()) on the provided 'vp'.
13338 */
13339 void
vnode_breakdirlease(vnode_t vp,bool need_parent,uint32_t oflags)13340 vnode_breakdirlease(vnode_t vp, bool need_parent, uint32_t oflags)
13341 {
13342 vnode_t dvp;
13343
13344 if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VDIR) ||
13345 (vp == rootvnode)) {
13346 return;
13347 }
13348
13349 /*
13350 * If parent is not provided, first try to get it from the name cache.
13351 * If failed, then we will attempt to ask the file system for parent vnode.
13352 * This is just a best effort as both attempts could still fail.
13353 */
13354 if (need_parent) {
13355 dvp = vnode_getparent(vp);
13356 if (__improbable(dvp == NULLVP)) {
13357 dvp = vnode_getparent_byid(vp);
13358 }
13359 } else {
13360 dvp = vp;
13361 }
13362
13363 if (__probable(dvp != NULLVP)) {
13364 /* Always break dir leases. */
13365 (void)vnode_breaklease(dvp, oflags, vfs_context_current());
13366 }
13367
13368 if (need_parent && (dvp != NULLVP)) {
13369 vnode_put(dvp);
13370 }
13371 }
13372
13373 /*
13374 * Revoke all lease(s) in place for the file.
13375 * This is called when the vnode is reclaimed.
13376 */
13377 void
vnode_revokelease(vnode_t vp,bool locked)13378 vnode_revokelease(vnode_t vp, bool locked)
13379 {
13380 file_lease_t fl, fl_tmp;
13381 bool need_wakeup = false;
13382
13383 if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VDIR)) {
13384 return;
13385 }
13386
13387 if (!locked) {
13388 vnode_lock(vp);
13389 }
13390
13391 LIST_FOREACH_SAFE(fl, &vp->v_leases, fl_link, fl_tmp) {
13392 LIST_REMOVE(fl, fl_link);
13393 file_lease_free(fl);
13394 need_wakeup = true;
13395 }
13396
13397 /* Wakeup any lease breaker(s) that might be currently blocked. */
13398 if (__improbable(need_wakeup)) {
13399 wakeup(&vp->v_leases);
13400 }
13401
13402 if (!locked) {
13403 vnode_unlock(vp);
13404 }
13405 }
13406
13407 #endif /* CONFIG_FILE_LEASES */
13408
13409 errno_t
vnode_rdadvise(vnode_t vp,off_t offset,int len,vfs_context_t ctx)13410 vnode_rdadvise(vnode_t vp, off_t offset, int len, vfs_context_t ctx)
13411 {
13412 struct radvisory ra_struct;
13413
13414 assert(vp);
13415
13416 if (offset < 0 || len < 0) {
13417 return EINVAL;
13418 }
13419
13420 ra_struct.ra_offset = offset;
13421 ra_struct.ra_count = len;
13422
13423 return VNOP_IOCTL(vp, F_RDADVISE, (caddr_t)&ra_struct, 0, ctx);
13424 }
13425
13426 int
vnode_hasmultipath(vnode_t vp)13427 vnode_hasmultipath(vnode_t vp)
13428 {
13429 struct vnode_attr va;
13430 bool is_local_volume = !!(vp->v_mount->mnt_flag & MNT_LOCAL);
13431 bool link_locked = false;
13432 int has_multipath = 0;
13433 int error;
13434
13435 /*
13436 * If the volume doesn't support directory hard link then the directory
13437 * can't be a hard link.
13438 */
13439 if ((vp->v_type == VDIR) && is_local_volume &&
13440 !(vp->v_mount->mnt_kern_flag & MNTK_DIR_HARDLINKS)) {
13441 goto out;
13442 }
13443
13444 vnode_link_lock(vp);
13445 link_locked = true;
13446
13447 if (is_local_volume &&
13448 (os_atomic_load(&vp->v_ext_flag, relaxed) & VE_NOT_HARDLINK)) {
13449 goto out;
13450 }
13451
13452 /*
13453 * Not all file systems adopt vnode_setmultipath() to mark a vnode is
13454 * hard link (VISHARDLINK) so we need to call into the file system to get
13455 * the link count attributes to determine if the vnode has multiple paths.
13456 */
13457 VATTR_INIT(&va);
13458 VATTR_WANTED(&va, va_nlink);
13459 VATTR_WANTED(&va, va_dirlinkcount);
13460
13461 error = vnode_getattr(vp, &va, vfs_context_current());
13462 if (error) {
13463 goto out;
13464 }
13465
13466 if ((vp->v_type == VDIR) && VATTR_IS_SUPPORTED(&va, va_dirlinkcount)) {
13467 has_multipath = (va.va_dirlinkcount > 1);
13468 } else if (VATTR_IS_SUPPORTED(&va, va_nlink)) {
13469 has_multipath = (va.va_nlink > 1);
13470 }
13471
13472 if (has_multipath == 0) {
13473 os_atomic_or(&vp->v_ext_flag, VE_NOT_HARDLINK, relaxed);
13474 }
13475
13476 out:
13477 if (link_locked) {
13478 vnode_link_unlock(vp);
13479 }
13480
13481 return has_multipath;
13482 }
13483
13484 bool
vnode_isappendonly(vnode_t vp)13485 vnode_isappendonly(vnode_t vp)
13486 {
13487 return os_atomic_load(&vp->v_ext_flag, relaxed) & VE_APPENDONLY;
13488 }
13489