xref: /xnu-8020.121.3/bsd/vfs/kpi_vfs.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1989, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kpi_vfs.c
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 /*
76  * External virtual filesystem routines
77  */
78 
79 
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/disk.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf.h>
93 #include <sys/errno.h>
94 #include <kern/kalloc.h>
95 #include <sys/domain.h>
96 #include <sys/mbuf.h>
97 #include <sys/syslog.h>
98 #include <sys/ubc.h>
99 #include <sys/vm.h>
100 #include <sys/sysctl.h>
101 #include <sys/filedesc.h>
102 #include <sys/event.h>
103 #include <sys/fsevents.h>
104 #include <sys/user.h>
105 #include <sys/lockf.h>
106 #include <sys/xattr.h>
107 #include <sys/kdebug.h>
108 
109 #include <kern/assert.h>
110 #include <kern/zalloc.h>
111 #include <kern/task.h>
112 #include <kern/policy_internal.h>
113 
114 #include <libkern/OSByteOrder.h>
115 
116 #include <miscfs/specfs/specdev.h>
117 
118 #include <mach/mach_types.h>
119 #include <mach/memory_object_types.h>
120 #include <mach/task.h>
121 
122 #if CONFIG_MACF
123 #include <security/mac_framework.h>
124 #endif
125 
126 #if NULLFS
127 #include <miscfs/nullfs/nullfs.h>
128 #endif
129 
130 #include <sys/sdt.h>
131 
132 #define ESUCCESS 0
133 #undef mount_t
134 #undef vnode_t
135 
136 #define COMPAT_ONLY
137 
138 #define NATIVE_XATTR(VP)  \
139 	((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
140 
141 #if CONFIG_APPLEDOUBLE
142 static void xattrfile_remove(vnode_t dvp, const char *basename,
143     vfs_context_t ctx, int force);
144 static void xattrfile_setattr(vnode_t dvp, const char * basename,
145     struct vnode_attr * vap, vfs_context_t ctx);
146 #endif /* CONFIG_APPLEDOUBLE */
147 
148 extern lck_rw_t rootvnode_rw_lock;
149 
150 static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp);
151 
152 KALLOC_TYPE_DEFINE(KT_VFS_CONTEXT, struct vfs_context, KT_PRIV_ACCT);
153 
154 extern int fstypenumstart;
155 char vfs_typenum_arr[13];
156 
157 LCK_GRP_DECLARE(typenum_arr_grp, "typenum array group");
158 LCK_MTX_DECLARE(vfs_typenum_mtx, &typenum_arr_grp);
159 /*
160  * vnode_setneedinactive
161  *
162  * Description: Indicate that when the last iocount on this vnode goes away,
163  *              and the usecount is also zero, we should inform the filesystem
164  *              via VNOP_INACTIVE.
165  *
166  * Parameters:  vnode_t		vnode to mark
167  *
168  * Returns:     Nothing
169  *
170  * Notes:       Notably used when we're deleting a file--we need not have a
171  *              usecount, so VNOP_INACTIVE may not get called by anyone.  We
172  *              want it called when we drop our iocount.
173  */
174 void
vnode_setneedinactive(vnode_t vp)175 vnode_setneedinactive(vnode_t vp)
176 {
177 	cache_purge(vp);
178 
179 	vnode_lock_spin(vp);
180 	vp->v_lflag |= VL_NEEDINACTIVE;
181 	vnode_unlock(vp);
182 }
183 
184 
185 /* ====================================================================== */
186 /* ************  EXTERNAL KERNEL APIS  ********************************** */
187 /* ====================================================================== */
188 
189 /*
190  * implementations of exported VFS operations
191  */
192 int
VFS_MOUNT(mount_t mp,vnode_t devvp,user_addr_t data,vfs_context_t ctx)193 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
194 {
195 	int error;
196 
197 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0)) {
198 		return ENOTSUP;
199 	}
200 
201 	if (vfs_context_is64bit(ctx)) {
202 		if (vfs_64bitready(mp)) {
203 			error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
204 		} else {
205 			error = ENOTSUP;
206 		}
207 	} else {
208 		error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
209 	}
210 
211 	return error;
212 }
213 
214 int
VFS_START(mount_t mp,int flags,vfs_context_t ctx)215 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
216 {
217 	int error;
218 
219 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0)) {
220 		return ENOTSUP;
221 	}
222 
223 	error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
224 
225 	return error;
226 }
227 
228 int
VFS_UNMOUNT(mount_t mp,int flags,vfs_context_t ctx)229 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
230 {
231 	int error;
232 
233 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0)) {
234 		return ENOTSUP;
235 	}
236 
237 	error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
238 
239 	return error;
240 }
241 
242 /*
243  * Returns:	0			Success
244  *		ENOTSUP			Not supported
245  *		<vfs_root>:ENOENT
246  *		<vfs_root>:???
247  *
248  * Note:	The return codes from the underlying VFS's root routine can't
249  *		be fully enumerated here, since third party VFS authors may not
250  *		limit their error returns to the ones documented here, even
251  *		though this may result in some programs functioning incorrectly.
252  *
253  *		The return codes documented above are those which may currently
254  *		be returned by HFS from hfs_vfs_root, which is a simple wrapper
255  *		for a call to hfs_vget on the volume mount point, not including
256  *		additional error codes which may be propagated from underlying
257  *		routines called by hfs_vget.
258  */
259 int
VFS_ROOT(mount_t mp,struct vnode ** vpp,vfs_context_t ctx)260 VFS_ROOT(mount_t mp, struct vnode  ** vpp, vfs_context_t ctx)
261 {
262 	int error;
263 
264 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0)) {
265 		return ENOTSUP;
266 	}
267 
268 	if (ctx == NULL) {
269 		ctx = vfs_context_current();
270 	}
271 
272 	error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
273 
274 	return error;
275 }
276 
277 int
VFS_QUOTACTL(mount_t mp,int cmd,uid_t uid,caddr_t datap,vfs_context_t ctx)278 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
279 {
280 	int error;
281 
282 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0)) {
283 		return ENOTSUP;
284 	}
285 
286 	error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
287 
288 	return error;
289 }
290 
291 int
VFS_GETATTR(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)292 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
293 {
294 	int error;
295 
296 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0)) {
297 		return ENOTSUP;
298 	}
299 
300 	if (ctx == NULL) {
301 		ctx = vfs_context_current();
302 	}
303 
304 	error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
305 
306 	return error;
307 }
308 
309 int
VFS_SETATTR(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)310 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
311 {
312 	int error;
313 
314 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0)) {
315 		return ENOTSUP;
316 	}
317 
318 	if (ctx == NULL) {
319 		ctx = vfs_context_current();
320 	}
321 
322 	error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
323 
324 	return error;
325 }
326 
327 int
VFS_SYNC(mount_t mp,int flags,vfs_context_t ctx)328 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
329 {
330 	int error;
331 
332 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0)) {
333 		return ENOTSUP;
334 	}
335 
336 	if (ctx == NULL) {
337 		ctx = vfs_context_current();
338 	}
339 
340 	error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
341 
342 	return error;
343 }
344 
345 int
VFS_VGET(mount_t mp,ino64_t ino,struct vnode ** vpp,vfs_context_t ctx)346 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
347 {
348 	int error;
349 
350 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0)) {
351 		return ENOTSUP;
352 	}
353 
354 	if (ctx == NULL) {
355 		ctx = vfs_context_current();
356 	}
357 
358 	error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
359 
360 	return error;
361 }
362 
363 int
VFS_FHTOVP(mount_t mp,int fhlen,unsigned char * fhp,vnode_t * vpp,vfs_context_t ctx)364 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_t ctx)
365 {
366 	int error;
367 
368 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0)) {
369 		return ENOTSUP;
370 	}
371 
372 	if (ctx == NULL) {
373 		ctx = vfs_context_current();
374 	}
375 
376 	error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
377 
378 	return error;
379 }
380 
381 int
VFS_VPTOFH(struct vnode * vp,int * fhlenp,unsigned char * fhp,vfs_context_t ctx)382 VFS_VPTOFH(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t ctx)
383 {
384 	int error;
385 
386 	if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0)) {
387 		return ENOTSUP;
388 	}
389 
390 	if (ctx == NULL) {
391 		ctx = vfs_context_current();
392 	}
393 
394 	error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
395 
396 	return error;
397 }
398 
399 int
VFS_IOCTL(struct mount * mp,u_long command,caddr_t data,int flags,vfs_context_t context)400 VFS_IOCTL(struct mount *mp, u_long command, caddr_t data,
401     int flags, vfs_context_t context)
402 {
403 	if (mp == dead_mountp || !mp->mnt_op->vfs_ioctl) {
404 		return ENOTSUP;
405 	}
406 
407 	return mp->mnt_op->vfs_ioctl(mp, command, data, flags,
408 	           context ?: vfs_context_current());
409 }
410 
411 int
VFS_VGET_SNAPDIR(mount_t mp,vnode_t * vpp,vfs_context_t ctx)412 VFS_VGET_SNAPDIR(mount_t mp, vnode_t *vpp, vfs_context_t ctx)
413 {
414 	int error;
415 
416 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget_snapdir == 0)) {
417 		return ENOTSUP;
418 	}
419 
420 	if (ctx == NULL) {
421 		ctx = vfs_context_current();
422 	}
423 
424 	error = (*mp->mnt_op->vfs_vget_snapdir)(mp, vpp, ctx);
425 
426 	return error;
427 }
428 
429 /* returns the cached throttle mask for the mount_t */
430 uint64_t
vfs_throttle_mask(mount_t mp)431 vfs_throttle_mask(mount_t mp)
432 {
433 	return mp->mnt_throttle_mask;
434 }
435 
436 /* returns a  copy of vfs type name for the mount_t */
437 void
vfs_name(mount_t mp,char * buffer)438 vfs_name(mount_t mp, char *buffer)
439 {
440 	strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
441 }
442 
443 /* returns  vfs type number for the mount_t */
444 int
vfs_typenum(mount_t mp)445 vfs_typenum(mount_t mp)
446 {
447 	return mp->mnt_vtable->vfc_typenum;
448 }
449 
450 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers.  */
451 void*
vfs_mntlabel(mount_t mp)452 vfs_mntlabel(mount_t mp)
453 {
454 	return (void*)mac_mount_label(mp);
455 }
456 
457 uint64_t
vfs_mount_id(mount_t mp)458 vfs_mount_id(mount_t mp)
459 {
460 	return mp->mnt_mount_id;
461 }
462 
463 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
464 uint64_t
vfs_flags(mount_t mp)465 vfs_flags(mount_t mp)
466 {
467 	return (uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
468 }
469 
470 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
471 void
vfs_setflags(mount_t mp,uint64_t flags)472 vfs_setflags(mount_t mp, uint64_t flags)
473 {
474 	uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
475 
476 	mount_lock(mp);
477 	mp->mnt_flag |= lflags;
478 	mount_unlock(mp);
479 }
480 
481 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
482 void
vfs_clearflags(mount_t mp,uint64_t flags)483 vfs_clearflags(mount_t mp, uint64_t flags)
484 {
485 	uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
486 
487 	mount_lock(mp);
488 	mp->mnt_flag &= ~lflags;
489 	mount_unlock(mp);
490 }
491 
492 /* Is the mount_t ronly and upgrade read/write requested? */
493 int
vfs_iswriteupgrade(mount_t mp)494 vfs_iswriteupgrade(mount_t mp) /* ronly &&  MNTK_WANTRDWR */
495 {
496 	return (mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR);
497 }
498 
499 
500 /* Is the mount_t mounted ronly */
501 int
vfs_isrdonly(mount_t mp)502 vfs_isrdonly(mount_t mp)
503 {
504 	return mp->mnt_flag & MNT_RDONLY;
505 }
506 
507 /* Is the mount_t mounted for filesystem synchronous writes? */
508 int
vfs_issynchronous(mount_t mp)509 vfs_issynchronous(mount_t mp)
510 {
511 	return mp->mnt_flag & MNT_SYNCHRONOUS;
512 }
513 
514 /* Is the mount_t mounted read/write? */
515 int
vfs_isrdwr(mount_t mp)516 vfs_isrdwr(mount_t mp)
517 {
518 	return (mp->mnt_flag & MNT_RDONLY) == 0;
519 }
520 
521 
522 /* Is mount_t marked for update (ie MNT_UPDATE) */
523 int
vfs_isupdate(mount_t mp)524 vfs_isupdate(mount_t mp)
525 {
526 	return mp->mnt_flag & MNT_UPDATE;
527 }
528 
529 
530 /* Is mount_t marked for reload (ie MNT_RELOAD) */
531 int
vfs_isreload(mount_t mp)532 vfs_isreload(mount_t mp)
533 {
534 	return (mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD);
535 }
536 
537 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
538 int
vfs_isforce(mount_t mp)539 vfs_isforce(mount_t mp)
540 {
541 	if (mp->mnt_lflag & MNT_LFORCE) {
542 		return 1;
543 	} else {
544 		return 0;
545 	}
546 }
547 
548 int
vfs_isunmount(mount_t mp)549 vfs_isunmount(mount_t mp)
550 {
551 	if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
552 		return 1;
553 	} else {
554 		return 0;
555 	}
556 }
557 
558 int
vfs_64bitready(mount_t mp)559 vfs_64bitready(mount_t mp)
560 {
561 	if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
562 		return 1;
563 	} else {
564 		return 0;
565 	}
566 }
567 
568 
569 int
vfs_authcache_ttl(mount_t mp)570 vfs_authcache_ttl(mount_t mp)
571 {
572 	if ((mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
573 		return mp->mnt_authcache_ttl;
574 	} else {
575 		return CACHED_RIGHT_INFINITE_TTL;
576 	}
577 }
578 
579 void
vfs_setauthcache_ttl(mount_t mp,int ttl)580 vfs_setauthcache_ttl(mount_t mp, int ttl)
581 {
582 	mount_lock(mp);
583 	mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
584 	mp->mnt_authcache_ttl = ttl;
585 	mount_unlock(mp);
586 }
587 
588 void
vfs_clearauthcache_ttl(mount_t mp)589 vfs_clearauthcache_ttl(mount_t mp)
590 {
591 	mount_lock(mp);
592 	mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
593 	/*
594 	 * back to the default TTL value in case
595 	 * MNTK_AUTH_OPAQUE is set on this mount
596 	 */
597 	mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
598 	mount_unlock(mp);
599 }
600 
601 int
vfs_authopaque(mount_t mp)602 vfs_authopaque(mount_t mp)
603 {
604 	if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE)) {
605 		return 1;
606 	} else {
607 		return 0;
608 	}
609 }
610 
611 int
vfs_authopaqueaccess(mount_t mp)612 vfs_authopaqueaccess(mount_t mp)
613 {
614 	if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS)) {
615 		return 1;
616 	} else {
617 		return 0;
618 	}
619 }
620 
621 void
vfs_setauthopaque(mount_t mp)622 vfs_setauthopaque(mount_t mp)
623 {
624 	mount_lock(mp);
625 	mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
626 	mount_unlock(mp);
627 }
628 
629 void
vfs_setauthopaqueaccess(mount_t mp)630 vfs_setauthopaqueaccess(mount_t mp)
631 {
632 	mount_lock(mp);
633 	mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
634 	mount_unlock(mp);
635 }
636 
637 void
vfs_clearauthopaque(mount_t mp)638 vfs_clearauthopaque(mount_t mp)
639 {
640 	mount_lock(mp);
641 	mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
642 	mount_unlock(mp);
643 }
644 
645 void
vfs_clearauthopaqueaccess(mount_t mp)646 vfs_clearauthopaqueaccess(mount_t mp)
647 {
648 	mount_lock(mp);
649 	mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
650 	mount_unlock(mp);
651 }
652 
653 void
vfs_setextendedsecurity(mount_t mp)654 vfs_setextendedsecurity(mount_t mp)
655 {
656 	mount_lock(mp);
657 	mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
658 	mount_unlock(mp);
659 }
660 
661 void
vfs_setmntsystem(mount_t mp)662 vfs_setmntsystem(mount_t mp)
663 {
664 	mount_lock(mp);
665 	mp->mnt_kern_flag |= MNTK_SYSTEM;
666 	mount_unlock(mp);
667 }
668 
669 void
vfs_setmntsystemdata(mount_t mp)670 vfs_setmntsystemdata(mount_t mp)
671 {
672 	mount_lock(mp);
673 	mp->mnt_kern_flag |= MNTK_SYSTEMDATA;
674 	mount_unlock(mp);
675 }
676 
677 void
vfs_setmntswap(mount_t mp)678 vfs_setmntswap(mount_t mp)
679 {
680 	mount_lock(mp);
681 	mp->mnt_kern_flag |= (MNTK_SYSTEM | MNTK_SWAP_MOUNT);
682 	mount_unlock(mp);
683 }
684 
685 void
vfs_clearextendedsecurity(mount_t mp)686 vfs_clearextendedsecurity(mount_t mp)
687 {
688 	mount_lock(mp);
689 	mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
690 	mount_unlock(mp);
691 }
692 
693 void
vfs_setnoswap(mount_t mp)694 vfs_setnoswap(mount_t mp)
695 {
696 	mount_lock(mp);
697 	mp->mnt_kern_flag |= MNTK_NOSWAP;
698 	mount_unlock(mp);
699 }
700 
701 void
vfs_clearnoswap(mount_t mp)702 vfs_clearnoswap(mount_t mp)
703 {
704 	mount_lock(mp);
705 	mp->mnt_kern_flag &= ~MNTK_NOSWAP;
706 	mount_unlock(mp);
707 }
708 
709 int
vfs_extendedsecurity(mount_t mp)710 vfs_extendedsecurity(mount_t mp)
711 {
712 	return mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY;
713 }
714 
715 /* returns the max size of short symlink in this mount_t */
716 uint32_t
vfs_maxsymlen(mount_t mp)717 vfs_maxsymlen(mount_t mp)
718 {
719 	return mp->mnt_maxsymlinklen;
720 }
721 
722 /* set  max size of short symlink on mount_t */
723 void
vfs_setmaxsymlen(mount_t mp,uint32_t symlen)724 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
725 {
726 	mp->mnt_maxsymlinklen = symlen;
727 }
728 
729 boolean_t
vfs_is_basesystem(mount_t mp)730 vfs_is_basesystem(mount_t mp)
731 {
732 	return ((mp->mnt_supl_kern_flag & MNTK_SUPL_BASESYSTEM) == 0) ? false : true;
733 }
734 
735 /* return a pointer to the RO vfs_statfs associated with mount_t */
736 struct vfsstatfs *
vfs_statfs(mount_t mp)737 vfs_statfs(mount_t mp)
738 {
739 	return &mp->mnt_vfsstat;
740 }
741 
742 int
vfs_getattr(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)743 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
744 {
745 	int             error;
746 
747 	if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0) {
748 		return error;
749 	}
750 
751 	/*
752 	 * If we have a filesystem create time, use it to default some others.
753 	 */
754 	if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
755 		if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time)) {
756 			VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
757 		}
758 	}
759 
760 	return 0;
761 }
762 
763 int
vfs_setattr(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)764 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
765 {
766 	int error;
767 
768 	/*
769 	 * with a read-only system volume, we need to allow rename of the root volume
770 	 * even if it's read-only.  Don't return EROFS here if setattr changes only
771 	 * the volume name
772 	 */
773 	if (vfs_isrdonly(mp) &&
774 	    !((strcmp(mp->mnt_vfsstat.f_fstypename, "apfs") == 0) && (vfa->f_active == VFSATTR_f_vol_name))) {
775 		return EROFS;
776 	}
777 
778 	error = VFS_SETATTR(mp, vfa, ctx);
779 
780 	/*
781 	 * If we had alternate ways of setting vfs attributes, we'd
782 	 * fall back here.
783 	 */
784 
785 	return error;
786 }
787 
788 /* return the private data handle stored in mount_t */
789 void *
vfs_fsprivate(mount_t mp)790 vfs_fsprivate(mount_t mp)
791 {
792 	return mp->mnt_data;
793 }
794 
795 /* set the private data handle in mount_t */
796 void
vfs_setfsprivate(mount_t mp,void * mntdata)797 vfs_setfsprivate(mount_t mp, void *mntdata)
798 {
799 	mount_lock(mp);
800 	mp->mnt_data = mntdata;
801 	mount_unlock(mp);
802 }
803 
804 /* query whether the mount point supports native EAs */
805 int
vfs_nativexattrs(mount_t mp)806 vfs_nativexattrs(mount_t mp)
807 {
808 	return mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS;
809 }
810 
811 /*
812  * return the block size of the underlying
813  * device associated with mount_t
814  */
815 int
vfs_devblocksize(mount_t mp)816 vfs_devblocksize(mount_t mp)
817 {
818 	return mp->mnt_devblocksize;
819 }
820 
821 /*
822  * Returns vnode with an iocount that must be released with vnode_put()
823  */
824 vnode_t
vfs_vnodecovered(mount_t mp)825 vfs_vnodecovered(mount_t mp)
826 {
827 	vnode_t vp = mp->mnt_vnodecovered;
828 	if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
829 		return NULL;
830 	} else {
831 		return vp;
832 	}
833 }
834 
835 /*
836  * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
837  * The iocount must be released with vnode_put().  Note that this KPI is subtle
838  * with respect to the validity of using this device vnode for anything substantial
839  * (which is discouraged).  If commands are sent to the device driver without
840  * taking proper steps to ensure that the device is still open, chaos may ensue.
841  * Similarly, this routine should only be called if there is some guarantee that
842  * the mount itself is still valid.
843  */
844 vnode_t
vfs_devvp(mount_t mp)845 vfs_devvp(mount_t mp)
846 {
847 	vnode_t vp = mp->mnt_devvp;
848 
849 	if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
850 		return vp;
851 	}
852 
853 	return NULLVP;
854 }
855 
856 /*
857  * return the io attributes associated with mount_t
858  */
859 void
vfs_ioattr(mount_t mp,struct vfsioattr * ioattrp)860 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
861 {
862 	ioattrp->io_reserved[0] = NULL;
863 	ioattrp->io_reserved[1] = NULL;
864 	if (mp == NULL) {
865 		ioattrp->io_maxreadcnt  = MAXPHYS;
866 		ioattrp->io_maxwritecnt = MAXPHYS;
867 		ioattrp->io_segreadcnt  = 32;
868 		ioattrp->io_segwritecnt = 32;
869 		ioattrp->io_maxsegreadsize  = MAXPHYS;
870 		ioattrp->io_maxsegwritesize = MAXPHYS;
871 		ioattrp->io_devblocksize = DEV_BSIZE;
872 		ioattrp->io_flags = 0;
873 		ioattrp->io_max_swappin_available = 0;
874 	} else {
875 		ioattrp->io_maxreadcnt  = mp->mnt_maxreadcnt;
876 		ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
877 		ioattrp->io_segreadcnt  = mp->mnt_segreadcnt;
878 		ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
879 		ioattrp->io_maxsegreadsize  = mp->mnt_maxsegreadsize;
880 		ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
881 		ioattrp->io_devblocksize = mp->mnt_devblocksize;
882 		ioattrp->io_flags = mp->mnt_ioflags;
883 		ioattrp->io_max_swappin_available = mp->mnt_max_swappin_available;
884 	}
885 }
886 
887 
888 /*
889  * set the IO attributes associated with mount_t
890  */
891 void
vfs_setioattr(mount_t mp,struct vfsioattr * ioattrp)892 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
893 {
894 	if (mp == NULL) {
895 		return;
896 	}
897 	mp->mnt_maxreadcnt  = ioattrp->io_maxreadcnt;
898 	mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
899 	mp->mnt_segreadcnt  = ioattrp->io_segreadcnt;
900 	mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
901 	mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
902 	mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
903 	mp->mnt_devblocksize = ioattrp->io_devblocksize;
904 	mp->mnt_ioflags = ioattrp->io_flags;
905 	mp->mnt_max_swappin_available = ioattrp->io_max_swappin_available;
906 }
907 
908 /*
909  * Add a new filesystem into the kernel specified in passed in
910  * vfstable structure. It fills in the vnode
911  * dispatch vector that is to be passed to when vnodes are created.
912  * It returns a handle which is to be used to when the FS is to be removed
913  */
914 typedef int (*PFI)(void *);
915 extern int vfs_opv_numops;
916 errno_t
vfs_fsadd(struct vfs_fsentry * vfe,vfstable_t * handle)917 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle)
918 {
919 	struct vfstable *newvfstbl = NULL;
920 	int     i, j;
921 	int(***opv_desc_vector_p)(void *);
922 	int(**opv_desc_vector)(void *);
923 	const struct vnodeopv_entry_desc        *opve_descp;
924 	int desccount;
925 	int descsize;
926 	PFI *descptr;
927 
928 	/*
929 	 * This routine is responsible for all the initialization that would
930 	 * ordinarily be done as part of the system startup;
931 	 */
932 
933 	if (vfe == (struct vfs_fsentry *)0) {
934 		return EINVAL;
935 	}
936 
937 	desccount = vfe->vfe_vopcnt;
938 	if ((desccount <= 0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
939 	    || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL)) {
940 		return EINVAL;
941 	}
942 
943 	/* Non-threadsafe filesystems are not supported */
944 	if ((vfe->vfe_flags &  (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
945 		return EINVAL;
946 	}
947 
948 	newvfstbl = kalloc_type(struct vfstable, Z_WAITOK | Z_ZERO);
949 	newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
950 	strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
951 	if ((vfe->vfe_flags & VFS_TBLNOTYPENUM)) {
952 		int tmp;
953 		int found = 0;
954 		lck_mtx_lock(&vfs_typenum_mtx);
955 		for (tmp = fstypenumstart; tmp < OID_AUTO_START; tmp++) {
956 			if (isclr(vfs_typenum_arr, tmp)) {
957 				newvfstbl->vfc_typenum = tmp;
958 				setbit(vfs_typenum_arr, tmp);
959 				found = 1;
960 				break;
961 			}
962 		}
963 		if (!found) {
964 			lck_mtx_unlock(&vfs_typenum_mtx);
965 			return EINVAL;
966 		}
967 		if (maxvfstypenum < OID_AUTO_START) {
968 			/* getvfsbyname checks up to but not including maxvfstypenum */
969 			maxvfstypenum = newvfstbl->vfc_typenum + 1;
970 		}
971 		lck_mtx_unlock(&vfs_typenum_mtx);
972 	} else {
973 		newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
974 		lck_mtx_lock(&vfs_typenum_mtx);
975 		setbit(vfs_typenum_arr, newvfstbl->vfc_typenum);
976 		if (newvfstbl->vfc_typenum >= maxvfstypenum) {
977 			maxvfstypenum = newvfstbl->vfc_typenum + 1;
978 		}
979 		lck_mtx_unlock(&vfs_typenum_mtx);
980 	}
981 
982 
983 	newvfstbl->vfc_refcount = 0;
984 	newvfstbl->vfc_flags = 0;
985 	newvfstbl->vfc_mountroot = NULL;
986 	newvfstbl->vfc_next = NULL;
987 	newvfstbl->vfc_vfsflags = 0;
988 	if (vfe->vfe_flags &  VFS_TBL64BITREADY) {
989 		newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
990 	}
991 	if (vfe->vfe_flags &  VFS_TBLVNOP_PAGEINV2) {
992 		newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
993 	}
994 	if (vfe->vfe_flags &  VFS_TBLVNOP_PAGEOUTV2) {
995 		newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
996 	}
997 	if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL) {
998 		newvfstbl->vfc_flags |= MNT_LOCAL;
999 	}
1000 	if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0) {
1001 		newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
1002 	} else {
1003 		newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
1004 	}
1005 
1006 	if (vfe->vfe_flags &  VFS_TBLNATIVEXATTR) {
1007 		newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
1008 	}
1009 	if (vfe->vfe_flags &  VFS_TBLUNMOUNT_PREFLIGHT) {
1010 		newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
1011 	}
1012 	if (vfe->vfe_flags &  VFS_TBLREADDIR_EXTENDED) {
1013 		newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
1014 	}
1015 	if (vfe->vfe_flags & VFS_TBLNOMACLABEL) {
1016 		newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
1017 	}
1018 	if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME) {
1019 		newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
1020 	}
1021 	if (vfe->vfe_flags & VFS_TBLVNOP_SECLUDE_RENAME) {
1022 		newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_SECLUDE_RENAME;
1023 	}
1024 	if (vfe->vfe_flags & VFS_TBLCANMOUNTROOT) {
1025 		newvfstbl->vfc_vfsflags |= VFC_VFSCANMOUNTROOT;
1026 	}
1027 
1028 	/*
1029 	 * Allocate and init the vectors.
1030 	 * Also handle backwards compatibility.
1031 	 *
1032 	 * We allocate one large block to hold all <desccount>
1033 	 * vnode operation vectors stored contiguously.
1034 	 */
1035 	/* XXX - shouldn't be M_TEMP */
1036 
1037 	descsize = desccount * vfs_opv_numops;
1038 	descptr = kalloc_type(PFI, descsize, Z_WAITOK | Z_ZERO);
1039 
1040 	newvfstbl->vfc_descptr = descptr;
1041 	newvfstbl->vfc_descsize = descsize;
1042 
1043 	newvfstbl->vfc_sysctl = NULL;
1044 
1045 	for (i = 0; i < desccount; i++) {
1046 		opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1047 		/*
1048 		 * Fill in the caller's pointer to the start of the i'th vector.
1049 		 * They'll need to supply it when calling vnode_create.
1050 		 */
1051 		opv_desc_vector = descptr + i * vfs_opv_numops;
1052 		*opv_desc_vector_p = opv_desc_vector;
1053 
1054 		for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
1055 			opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
1056 
1057 			/* Silently skip known-disabled operations */
1058 			if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) {
1059 				printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
1060 				    vfe->vfe_opvdescs[i], opve_descp->opve_op->vdesc_name);
1061 				continue;
1062 			}
1063 
1064 			/*
1065 			 * Sanity check:  is this operation listed
1066 			 * in the list of operations?  We check this
1067 			 * by seeing if its offset is zero.  Since
1068 			 * the default routine should always be listed
1069 			 * first, it should be the only one with a zero
1070 			 * offset.  Any other operation with a zero
1071 			 * offset is probably not listed in
1072 			 * vfs_op_descs, and so is probably an error.
1073 			 *
1074 			 * A panic here means the layer programmer
1075 			 * has committed the all-too common bug
1076 			 * of adding a new operation to the layer's
1077 			 * list of vnode operations but
1078 			 * not adding the operation to the system-wide
1079 			 * list of supported operations.
1080 			 */
1081 			if (opve_descp->opve_op->vdesc_offset == 0 &&
1082 			    opve_descp->opve_op != VDESC(vnop_default)) {
1083 				printf("vfs_fsadd: operation %s not listed in %s.\n",
1084 				    opve_descp->opve_op->vdesc_name,
1085 				    "vfs_op_descs");
1086 				panic("vfs_fsadd: bad operation");
1087 			}
1088 			/*
1089 			 * Fill in this entry.
1090 			 */
1091 			opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
1092 			    opve_descp->opve_impl;
1093 		}
1094 
1095 		/*
1096 		 * Finally, go back and replace unfilled routines
1097 		 * with their default.  (Sigh, an O(n^3) algorithm.  I
1098 		 * could make it better, but that'd be work, and n is small.)
1099 		 */
1100 		opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1101 
1102 		/*
1103 		 * Force every operations vector to have a default routine.
1104 		 */
1105 		opv_desc_vector = *opv_desc_vector_p;
1106 		if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) {
1107 			panic("vfs_fsadd: operation vector without default routine.");
1108 		}
1109 		for (j = 0; j < vfs_opv_numops; j++) {
1110 			if (opv_desc_vector[j] == NULL) {
1111 				opv_desc_vector[j] =
1112 				    opv_desc_vector[VOFFSET(vnop_default)];
1113 			}
1114 		}
1115 	} /* end of each vnodeopv_desc parsing */
1116 
1117 	*handle = vfstable_add(newvfstbl);
1118 
1119 	if (newvfstbl->vfc_vfsops->vfs_init) {
1120 		struct vfsconf vfsc;
1121 		bzero(&vfsc, sizeof(struct vfsconf));
1122 		vfsc.vfc_reserved1 = 0;
1123 		bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
1124 		vfsc.vfc_typenum = (*handle)->vfc_typenum;
1125 		vfsc.vfc_refcount = (*handle)->vfc_refcount;
1126 		vfsc.vfc_flags = (*handle)->vfc_flags;
1127 		vfsc.vfc_reserved2 = 0;
1128 		vfsc.vfc_reserved3 = 0;
1129 
1130 		(*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
1131 	}
1132 
1133 	kfree_type(struct vfstable, newvfstbl);
1134 
1135 	return 0;
1136 }
1137 
1138 /*
1139  * Removes the filesystem from kernel.
1140  * The argument passed in is the handle that was given when
1141  * file system was added
1142  */
1143 errno_t
vfs_fsremove(vfstable_t handle)1144 vfs_fsremove(vfstable_t handle)
1145 {
1146 	struct vfstable * vfstbl =  (struct vfstable *)handle;
1147 	void *old_desc = NULL;
1148 	size_t descsize = 0;
1149 	errno_t err;
1150 
1151 	/* Preflight check for any mounts */
1152 	mount_list_lock();
1153 	if (vfstbl->vfc_refcount != 0) {
1154 		mount_list_unlock();
1155 		return EBUSY;
1156 	}
1157 
1158 	/* Free the spot in vfs_typenum_arr */
1159 	lck_mtx_lock(&vfs_typenum_mtx);
1160 	clrbit(vfs_typenum_arr, handle->vfc_typenum);
1161 	if (maxvfstypenum == handle->vfc_typenum) {
1162 		maxvfstypenum--;
1163 	}
1164 	lck_mtx_unlock(&vfs_typenum_mtx);
1165 
1166 	/*
1167 	 * save the old descriptor; the free cannot occur unconditionally,
1168 	 * since vfstable_del() may fail.
1169 	 */
1170 	if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1171 		old_desc = vfstbl->vfc_descptr;
1172 		descsize = vfstbl->vfc_descsize;
1173 	}
1174 	err = vfstable_del(vfstbl);
1175 
1176 	mount_list_unlock();
1177 
1178 	/* free the descriptor if the delete was successful */
1179 	if (err == 0) {
1180 		kfree_type(PFI, descsize, old_desc);
1181 	}
1182 
1183 	return err;
1184 }
1185 
1186 void
vfs_setowner(mount_t mp,uid_t uid,gid_t gid)1187 vfs_setowner(mount_t mp, uid_t uid, gid_t gid)
1188 {
1189 	mp->mnt_fsowner = uid;
1190 	mp->mnt_fsgroup = gid;
1191 }
1192 
1193 /*
1194  * Callers should be careful how they use this; accessing
1195  * mnt_last_write_completed_timestamp is not thread-safe.  Writing to
1196  * it isn't either.  Point is: be prepared to deal with strange values
1197  * being returned.
1198  */
1199 uint64_t
vfs_idle_time(mount_t mp)1200 vfs_idle_time(mount_t mp)
1201 {
1202 	if (mp->mnt_pending_write_size) {
1203 		return 0;
1204 	}
1205 
1206 	struct timeval now;
1207 
1208 	microuptime(&now);
1209 
1210 	return (now.tv_sec
1211 	       - mp->mnt_last_write_completed_timestamp.tv_sec) * 1000000
1212 	       + now.tv_usec - mp->mnt_last_write_completed_timestamp.tv_usec;
1213 }
1214 
1215 int
vfs_context_pid(vfs_context_t ctx)1216 vfs_context_pid(vfs_context_t ctx)
1217 {
1218 	return proc_pid(vfs_context_proc(ctx));
1219 }
1220 
1221 int
vfs_context_copy_audit_token(vfs_context_t ctx,audit_token_t * token)1222 vfs_context_copy_audit_token(vfs_context_t ctx, audit_token_t *token)
1223 {
1224 	kern_return_t           err;
1225 	task_t                  task = NULL;
1226 	mach_msg_type_number_t  info_size = TASK_AUDIT_TOKEN_COUNT;
1227 
1228 	if (ctx != NULL && ctx->vc_thread != NULL) {
1229 		task = get_threadtask(ctx->vc_thread);
1230 	}
1231 
1232 	if (task == NULL) {
1233 		// Not sure how this would happen; we are supposed to be
1234 		// in the middle of using the context. Regardless, don't
1235 		// wander off a NULL pointer.
1236 		return ESRCH;
1237 	}
1238 
1239 	err = task_info(task, TASK_AUDIT_TOKEN, (integer_t *)token, &info_size);
1240 	return (err) ? ESRCH : 0;
1241 }
1242 
1243 int
vfs_context_suser(vfs_context_t ctx)1244 vfs_context_suser(vfs_context_t ctx)
1245 {
1246 	return suser(ctx->vc_ucred, NULL);
1247 }
1248 
1249 /*
1250  * Return bit field of signals posted to all threads in the context's process.
1251  *
1252  * XXX Signals should be tied to threads, not processes, for most uses of this
1253  * XXX call.
1254  */
1255 int
vfs_context_issignal(vfs_context_t ctx,sigset_t mask)1256 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1257 {
1258 	proc_t p = vfs_context_proc(ctx);
1259 	if (p) {
1260 		return proc_pendingsignals(p, mask);
1261 	}
1262 	return 0;
1263 }
1264 
1265 int
vfs_context_is64bit(vfs_context_t ctx)1266 vfs_context_is64bit(vfs_context_t ctx)
1267 {
1268 	uthread_t uth;
1269 
1270 	if (ctx != NULL && ctx->vc_thread != NULL) {
1271 		uth = get_bsdthread_info(ctx->vc_thread);
1272 	} else {
1273 		uth = current_uthread();
1274 	}
1275 	return uthread_is64bit(uth);
1276 }
1277 
1278 boolean_t
vfs_context_can_resolve_triggers(vfs_context_t ctx)1279 vfs_context_can_resolve_triggers(vfs_context_t ctx)
1280 {
1281 	proc_t proc = vfs_context_proc(ctx);
1282 
1283 	if (proc) {
1284 		if (proc->p_vfs_iopolicy &
1285 		    P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE) {
1286 			return false;
1287 		}
1288 		return true;
1289 	}
1290 	return false;
1291 }
1292 
1293 /*
1294  * vfs_context_proc
1295  *
1296  * Description:	Given a vfs_context_t, return the proc_t associated with it.
1297  *
1298  * Parameters:	vfs_context_t			The context to use
1299  *
1300  * Returns:	proc_t				The process for this context
1301  *
1302  * Notes:	This function will return the current_proc() if any of the
1303  *		following conditions are true:
1304  *
1305  *		o	The supplied context pointer is NULL
1306  *		o	There is no Mach thread associated with the context
1307  *		o	There is no Mach task associated with the Mach thread
1308  *		o	There is no proc_t associated with the Mach task
1309  *		o	The proc_t has no per process open file table
1310  *
1311  *		This causes this function to return a value matching as
1312  *		closely as possible the previous behaviour.
1313  */
1314 proc_t
vfs_context_proc(vfs_context_t ctx)1315 vfs_context_proc(vfs_context_t ctx)
1316 {
1317 	proc_t  proc = NULL;
1318 
1319 	if (ctx != NULL && ctx->vc_thread != NULL) {
1320 		proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread);
1321 	}
1322 
1323 	return proc == NULL ? current_proc() : proc;
1324 }
1325 
1326 /*
1327  * vfs_context_get_special_port
1328  *
1329  * Description: Return the requested special port from the task associated
1330  *              with the given context.
1331  *
1332  * Parameters:	vfs_context_t			The context to use
1333  *              int				Index of special port
1334  *              ipc_port_t *			Pointer to returned port
1335  *
1336  * Returns:	kern_return_t			see task_get_special_port()
1337  */
1338 kern_return_t
vfs_context_get_special_port(vfs_context_t ctx,int which,ipc_port_t * portp)1339 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1340 {
1341 	task_t                  task = NULL;
1342 
1343 	if (ctx != NULL && ctx->vc_thread != NULL) {
1344 		task = get_threadtask(ctx->vc_thread);
1345 	}
1346 
1347 	return task_get_special_port(task, which, portp);
1348 }
1349 
1350 /*
1351  * vfs_context_set_special_port
1352  *
1353  * Description: Set the requested special port in the task associated
1354  *              with the given context.
1355  *
1356  * Parameters:	vfs_context_t			The context to use
1357  *              int				Index of special port
1358  *              ipc_port_t			New special port
1359  *
1360  * Returns:	kern_return_t			see task_set_special_port_internal()
1361  */
1362 kern_return_t
vfs_context_set_special_port(vfs_context_t ctx,int which,ipc_port_t port)1363 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1364 {
1365 	task_t                  task = NULL;
1366 
1367 	if (ctx != NULL && ctx->vc_thread != NULL) {
1368 		task = get_threadtask(ctx->vc_thread);
1369 	}
1370 
1371 	return task_set_special_port_internal(task, which, port);
1372 }
1373 
1374 /*
1375  * vfs_context_thread
1376  *
1377  * Description:	Return the Mach thread associated with a vfs_context_t
1378  *
1379  * Parameters:	vfs_context_t			The context to use
1380  *
1381  * Returns:	thread_t			The thread for this context, or
1382  *						NULL, if there is not one.
1383  *
1384  * Notes:	NULL thread_t's are legal, but discouraged.  They occur only
1385  *		as a result of a static vfs_context_t declaration in a function
1386  *		and will result in this function returning NULL.
1387  *
1388  *		This is intentional; this function should NOT return the
1389  *		current_thread() in this case.
1390  */
1391 thread_t
vfs_context_thread(vfs_context_t ctx)1392 vfs_context_thread(vfs_context_t ctx)
1393 {
1394 	return ctx->vc_thread;
1395 }
1396 
1397 
1398 /*
1399  * vfs_context_cwd
1400  *
1401  * Description:	Returns a reference on the vnode for the current working
1402  *		directory for the supplied context
1403  *
1404  * Parameters:	vfs_context_t			The context to use
1405  *
1406  * Returns:	vnode_t				The current working directory
1407  *						for this context
1408  *
1409  * Notes:	The function first attempts to obtain the current directory
1410  *		from the thread, and if it is not present there, falls back
1411  *		to obtaining it from the process instead.  If it can't be
1412  *		obtained from either place, we return NULLVP.
1413  */
1414 vnode_t
vfs_context_cwd(vfs_context_t ctx)1415 vfs_context_cwd(vfs_context_t ctx)
1416 {
1417 	vnode_t cwd = NULLVP;
1418 
1419 	if (ctx != NULL && ctx->vc_thread != NULL) {
1420 		uthread_t uth = get_bsdthread_info(ctx->vc_thread);
1421 		proc_t proc;
1422 
1423 		/*
1424 		 * Get the cwd from the thread; if there isn't one, get it
1425 		 * from the process, instead.
1426 		 */
1427 		if ((cwd = uth->uu_cdir) == NULLVP &&
1428 		    (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL) {
1429 			cwd = proc->p_fd.fd_cdir;
1430 		}
1431 	}
1432 
1433 	return cwd;
1434 }
1435 
1436 /*
1437  * vfs_context_create
1438  *
1439  * Description: Allocate and initialize a new context.
1440  *
1441  * Parameters:  vfs_context_t:                  Context to copy, or NULL for new
1442  *
1443  * Returns:     Pointer to new context
1444  *
1445  * Notes:       Copy cred and thread from argument, if available; else
1446  *              initialize with current thread and new cred.  Returns
1447  *              with a reference held on the credential.
1448  */
1449 vfs_context_t
vfs_context_create(vfs_context_t ctx)1450 vfs_context_create(vfs_context_t ctx)
1451 {
1452 	vfs_context_t newcontext;
1453 
1454 	newcontext = zalloc_flags(KT_VFS_CONTEXT, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1455 
1456 	if (ctx == NULL) {
1457 		ctx = vfs_context_current();
1458 	}
1459 	*newcontext = *ctx;
1460 	if (IS_VALID_CRED(ctx->vc_ucred)) {
1461 		kauth_cred_ref(ctx->vc_ucred);
1462 	}
1463 
1464 	return newcontext;
1465 }
1466 
1467 
1468 vfs_context_t
vfs_context_current(void)1469 vfs_context_current(void)
1470 {
1471 	static_assert(offsetof(struct thread_ro, tro_owner) ==
1472 	    offsetof(struct vfs_context, vc_thread));
1473 	static_assert(offsetof(struct thread_ro, tro_cred) ==
1474 	    offsetof(struct vfs_context, vc_ucred));
1475 
1476 	return (vfs_context_t)current_thread_ro();
1477 }
1478 
1479 vfs_context_t
vfs_context_kernel(void)1480 vfs_context_kernel(void)
1481 {
1482 	return &vfs_context0;
1483 }
1484 
1485 int
vfs_context_rele(vfs_context_t ctx)1486 vfs_context_rele(vfs_context_t ctx)
1487 {
1488 	if (ctx) {
1489 		if (IS_VALID_CRED(ctx->vc_ucred)) {
1490 			kauth_cred_unref(&ctx->vc_ucred);
1491 		}
1492 		zfree(KT_VFS_CONTEXT, ctx);
1493 	}
1494 	return 0;
1495 }
1496 
1497 
1498 kauth_cred_t
vfs_context_ucred(vfs_context_t ctx)1499 vfs_context_ucred(vfs_context_t ctx)
1500 {
1501 	return ctx->vc_ucred;
1502 }
1503 
1504 /*
1505  * Return true if the context is owned by the superuser.
1506  */
1507 int
vfs_context_issuser(vfs_context_t ctx)1508 vfs_context_issuser(vfs_context_t ctx)
1509 {
1510 	return kauth_cred_issuser(vfs_context_ucred(ctx));
1511 }
1512 
1513 int
vfs_context_iskernel(vfs_context_t ctx)1514 vfs_context_iskernel(vfs_context_t ctx)
1515 {
1516 	return ctx == &vfs_context0;
1517 }
1518 
1519 /*
1520  * Given a context, for all fields of vfs_context_t which
1521  * are not held with a reference, set those fields to the
1522  * values for the current execution context.  Currently, this
1523  * just means the vc_thread.
1524  *
1525  * Returns: 0 for success, nonzero for failure
1526  *
1527  * The intended use is:
1528  * 1. vfs_context_create()	gets the caller a context
1529  * 2. vfs_context_bind()        sets the unrefcounted data
1530  * 3. vfs_context_rele()        releases the context
1531  *
1532  */
1533 int
vfs_context_bind(vfs_context_t ctx)1534 vfs_context_bind(vfs_context_t ctx)
1535 {
1536 	ctx->vc_thread = current_thread();
1537 	return 0;
1538 }
1539 
1540 int
vfs_set_thread_fs_private(uint8_t tag,uint64_t fs_private)1541 vfs_set_thread_fs_private(uint8_t tag, uint64_t fs_private)
1542 {
1543 	struct uthread *ut;
1544 
1545 	if (tag != FS_PRIVATE_TAG_APFS) {
1546 		return ENOTSUP;
1547 	}
1548 
1549 	ut = current_uthread();
1550 	ut->t_fs_private = fs_private;
1551 
1552 	return 0;
1553 }
1554 
1555 int
vfs_get_thread_fs_private(uint8_t tag,uint64_t * fs_private)1556 vfs_get_thread_fs_private(uint8_t tag, uint64_t *fs_private)
1557 {
1558 	struct uthread *ut;
1559 
1560 	if (tag != FS_PRIVATE_TAG_APFS) {
1561 		return ENOTSUP;
1562 	}
1563 
1564 	ut = current_uthread();
1565 	*fs_private = ut->t_fs_private;
1566 
1567 	return 0;
1568 }
1569 
1570 int
vfs_isswapmount(mount_t mnt)1571 vfs_isswapmount(mount_t mnt)
1572 {
1573 	return mnt && ISSET(mnt->mnt_kern_flag, MNTK_SWAP_MOUNT) ? 1 : 0;
1574 }
1575 
1576 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1577 
1578 
1579 /*
1580  * Convert between vnode types and inode formats (since POSIX.1
1581  * defines mode word of stat structure in terms of inode formats).
1582  */
1583 enum vtype
vnode_iftovt(int mode)1584 vnode_iftovt(int mode)
1585 {
1586 	return iftovt_tab[((mode) & S_IFMT) >> 12];
1587 }
1588 
1589 int
vnode_vttoif(enum vtype indx)1590 vnode_vttoif(enum vtype indx)
1591 {
1592 	return vttoif_tab[(int)(indx)];
1593 }
1594 
1595 int
vnode_makeimode(int indx,int mode)1596 vnode_makeimode(int indx, int mode)
1597 {
1598 	return (int)(VTTOIF(indx) | (mode));
1599 }
1600 
1601 
1602 /*
1603  * vnode manipulation functions.
1604  */
1605 
1606 /* returns system root vnode iocount; It should be released using vnode_put() */
1607 vnode_t
vfs_rootvnode(void)1608 vfs_rootvnode(void)
1609 {
1610 	vnode_t vp = NULLVP;
1611 
1612 	if (rootvnode) {
1613 		lck_rw_lock_shared(&rootvnode_rw_lock);
1614 		vp = rootvnode;
1615 		if (vp && (vnode_get(vp) != 0)) {
1616 			vp = NULLVP;
1617 		}
1618 		lck_rw_unlock_shared(&rootvnode_rw_lock);
1619 	}
1620 
1621 	return vp;
1622 }
1623 
1624 uint32_t
vnode_vid(vnode_t vp)1625 vnode_vid(vnode_t vp)
1626 {
1627 	return (uint32_t)(vp->v_id);
1628 }
1629 
1630 mount_t
vnode_mount(vnode_t vp)1631 vnode_mount(vnode_t vp)
1632 {
1633 	return vp->v_mount;
1634 }
1635 
1636 #if CONFIG_IOSCHED
1637 vnode_t
vnode_mountdevvp(vnode_t vp)1638 vnode_mountdevvp(vnode_t vp)
1639 {
1640 	if (vp->v_mount) {
1641 		return vp->v_mount->mnt_devvp;
1642 	} else {
1643 		return (vnode_t)0;
1644 	}
1645 }
1646 #endif
1647 
1648 boolean_t
vnode_isonexternalstorage(vnode_t vp)1649 vnode_isonexternalstorage(vnode_t vp)
1650 {
1651 	if (vp) {
1652 		if (vp->v_mount) {
1653 			if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_PERIPHERAL_DRIVE) {
1654 				return TRUE;
1655 			}
1656 		}
1657 	}
1658 	return FALSE;
1659 }
1660 
1661 boolean_t
vnode_isonssd(vnode_t vp)1662 vnode_isonssd(vnode_t vp)
1663 {
1664 	if (vp) {
1665 		if (vp->v_mount) {
1666 			if (vp->v_mount->mnt_kern_flag & MNTK_SSD) {
1667 				return TRUE;
1668 			}
1669 		}
1670 	}
1671 	return FALSE;
1672 }
1673 
1674 mount_t
vnode_mountedhere(vnode_t vp)1675 vnode_mountedhere(vnode_t vp)
1676 {
1677 	mount_t mp;
1678 
1679 	if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1680 	    (mp->mnt_vnodecovered == vp)) {
1681 		return mp;
1682 	} else {
1683 		return (mount_t)NULL;
1684 	}
1685 }
1686 
1687 /* returns vnode type of vnode_t */
1688 enum vtype
vnode_vtype(vnode_t vp)1689 vnode_vtype(vnode_t vp)
1690 {
1691 	return vp->v_type;
1692 }
1693 
1694 /* returns FS specific node saved in vnode */
1695 void *
vnode_fsnode(vnode_t vp)1696 vnode_fsnode(vnode_t vp)
1697 {
1698 	return vp->v_data;
1699 }
1700 
1701 void
vnode_clearfsnode(vnode_t vp)1702 vnode_clearfsnode(vnode_t vp)
1703 {
1704 	vp->v_data = NULL;
1705 }
1706 
1707 dev_t
vnode_specrdev(vnode_t vp)1708 vnode_specrdev(vnode_t vp)
1709 {
1710 	return vp->v_rdev;
1711 }
1712 
1713 
1714 /* Accessor functions */
1715 /* is vnode_t a root vnode */
1716 int
vnode_isvroot(vnode_t vp)1717 vnode_isvroot(vnode_t vp)
1718 {
1719 	return (vp->v_flag & VROOT)? 1 : 0;
1720 }
1721 
1722 /* is vnode_t a system vnode */
1723 int
vnode_issystem(vnode_t vp)1724 vnode_issystem(vnode_t vp)
1725 {
1726 	return (vp->v_flag & VSYSTEM)? 1 : 0;
1727 }
1728 
1729 /* is vnode_t a swap file vnode */
1730 int
vnode_isswap(vnode_t vp)1731 vnode_isswap(vnode_t vp)
1732 {
1733 	return (vp->v_flag & VSWAP)? 1 : 0;
1734 }
1735 
1736 /* is vnode_t a tty */
1737 int
vnode_istty(vnode_t vp)1738 vnode_istty(vnode_t vp)
1739 {
1740 	return (vp->v_flag & VISTTY) ? 1 : 0;
1741 }
1742 
1743 /* if vnode_t mount operation in progress */
1744 int
vnode_ismount(vnode_t vp)1745 vnode_ismount(vnode_t vp)
1746 {
1747 	return (vp->v_flag & VMOUNT)? 1 : 0;
1748 }
1749 
1750 /* is this vnode under recyle now */
1751 int
vnode_isrecycled(vnode_t vp)1752 vnode_isrecycled(vnode_t vp)
1753 {
1754 	int ret;
1755 
1756 	vnode_lock_spin(vp);
1757 	ret =  (vp->v_lflag & (VL_TERMINATE | VL_DEAD))? 1 : 0;
1758 	vnode_unlock(vp);
1759 	return ret;
1760 }
1761 
1762 /* is this vnode marked for termination */
1763 int
vnode_willberecycled(vnode_t vp)1764 vnode_willberecycled(vnode_t vp)
1765 {
1766 	return (vp->v_lflag & VL_MARKTERM) ? 1 : 0;
1767 }
1768 
1769 
1770 /* vnode was created by background task requesting rapid aging
1771  *  and has not since been referenced by a normal task */
1772 int
vnode_israge(vnode_t vp)1773 vnode_israge(vnode_t vp)
1774 {
1775 	return (vp->v_flag & VRAGE)? 1 : 0;
1776 }
1777 
1778 int
vnode_needssnapshots(vnode_t vp)1779 vnode_needssnapshots(vnode_t vp)
1780 {
1781 	return (vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0;
1782 }
1783 
1784 
1785 /* Check the process/thread to see if we should skip atime updates */
1786 int
vfs_ctx_skipatime(vfs_context_t ctx)1787 vfs_ctx_skipatime(vfs_context_t ctx)
1788 {
1789 	struct uthread *ut;
1790 	proc_t proc;
1791 	thread_t thr;
1792 
1793 	proc = vfs_context_proc(ctx);
1794 	thr = vfs_context_thread(ctx);
1795 
1796 	/* Validate pointers in case we were invoked via a kernel context */
1797 	if (thr && proc) {
1798 		ut = get_bsdthread_info(thr);
1799 
1800 		if (proc->p_lflag & P_LRAGE_VNODES) {
1801 			return 1;
1802 		}
1803 
1804 		if (ut) {
1805 			if (ut->uu_flag & (UT_RAGE_VNODES | UT_ATIME_UPDATE)) {
1806 				return 1;
1807 			}
1808 		}
1809 
1810 		if (proc->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) {
1811 			return 1;
1812 		}
1813 	}
1814 	return 0;
1815 }
1816 
1817 /* is vnode_t marked to not keep data cached once it's been consumed */
1818 int
vnode_isnocache(vnode_t vp)1819 vnode_isnocache(vnode_t vp)
1820 {
1821 	return (vp->v_flag & VNOCACHE_DATA)? 1 : 0;
1822 }
1823 
1824 /*
1825  * has sequential readahead been disabled on this vnode
1826  */
1827 int
vnode_isnoreadahead(vnode_t vp)1828 vnode_isnoreadahead(vnode_t vp)
1829 {
1830 	return (vp->v_flag & VRAOFF)? 1 : 0;
1831 }
1832 
1833 int
vnode_is_openevt(vnode_t vp)1834 vnode_is_openevt(vnode_t vp)
1835 {
1836 	return (vp->v_flag & VOPENEVT)? 1 : 0;
1837 }
1838 
1839 /* is vnode_t a standard one? */
1840 int
vnode_isstandard(vnode_t vp)1841 vnode_isstandard(vnode_t vp)
1842 {
1843 	return (vp->v_flag & VSTANDARD)? 1 : 0;
1844 }
1845 
1846 /* don't vflush() if SKIPSYSTEM */
1847 int
vnode_isnoflush(vnode_t vp)1848 vnode_isnoflush(vnode_t vp)
1849 {
1850 	return (vp->v_flag & VNOFLUSH)? 1 : 0;
1851 }
1852 
1853 /* is vnode_t a regular file */
1854 int
vnode_isreg(vnode_t vp)1855 vnode_isreg(vnode_t vp)
1856 {
1857 	return (vp->v_type == VREG)? 1 : 0;
1858 }
1859 
1860 /* is vnode_t a directory? */
1861 int
vnode_isdir(vnode_t vp)1862 vnode_isdir(vnode_t vp)
1863 {
1864 	return (vp->v_type == VDIR)? 1 : 0;
1865 }
1866 
1867 /* is vnode_t a symbolic link ? */
1868 int
vnode_islnk(vnode_t vp)1869 vnode_islnk(vnode_t vp)
1870 {
1871 	return (vp->v_type == VLNK)? 1 : 0;
1872 }
1873 
1874 int
vnode_lookup_continue_needed(vnode_t vp,struct componentname * cnp)1875 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
1876 {
1877 	struct nameidata *ndp = cnp->cn_ndp;
1878 
1879 	if (ndp == NULL) {
1880 		panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL");
1881 	}
1882 
1883 	if (vnode_isdir(vp)) {
1884 		if (vp->v_mountedhere != NULL) {
1885 			goto yes;
1886 		}
1887 
1888 #if CONFIG_TRIGGERS
1889 		if (vp->v_resolve) {
1890 			goto yes;
1891 		}
1892 #endif /* CONFIG_TRIGGERS */
1893 	}
1894 
1895 
1896 	if (vnode_islnk(vp)) {
1897 		/* From lookup():  || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1898 		if (cnp->cn_flags & FOLLOW) {
1899 			goto yes;
1900 		}
1901 		if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
1902 			goto yes;
1903 		}
1904 	}
1905 
1906 	return 0;
1907 
1908 yes:
1909 	ndp->ni_flag |= NAMEI_CONTLOOKUP;
1910 	return EKEEPLOOKING;
1911 }
1912 
1913 /* is vnode_t a fifo ? */
1914 int
vnode_isfifo(vnode_t vp)1915 vnode_isfifo(vnode_t vp)
1916 {
1917 	return (vp->v_type == VFIFO)? 1 : 0;
1918 }
1919 
1920 /* is vnode_t a block device? */
1921 int
vnode_isblk(vnode_t vp)1922 vnode_isblk(vnode_t vp)
1923 {
1924 	return (vp->v_type == VBLK)? 1 : 0;
1925 }
1926 
1927 int
vnode_isspec(vnode_t vp)1928 vnode_isspec(vnode_t vp)
1929 {
1930 	return ((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0;
1931 }
1932 
1933 /* is vnode_t a char device? */
1934 int
vnode_ischr(vnode_t vp)1935 vnode_ischr(vnode_t vp)
1936 {
1937 	return (vp->v_type == VCHR)? 1 : 0;
1938 }
1939 
1940 /* is vnode_t a socket? */
1941 int
vnode_issock(vnode_t vp)1942 vnode_issock(vnode_t vp)
1943 {
1944 	return (vp->v_type == VSOCK)? 1 : 0;
1945 }
1946 
1947 /* is vnode_t a device with multiple active vnodes referring to it? */
1948 int
vnode_isaliased(vnode_t vp)1949 vnode_isaliased(vnode_t vp)
1950 {
1951 	enum vtype vt = vp->v_type;
1952 	if (!((vt == VCHR) || (vt == VBLK))) {
1953 		return 0;
1954 	} else {
1955 		return vp->v_specflags & SI_ALIASED;
1956 	}
1957 }
1958 
1959 /* is vnode_t a named stream? */
1960 int
vnode_isnamedstream(vnode_t vp)1961 vnode_isnamedstream(
1962 #if NAMEDSTREAMS
1963 	vnode_t vp
1964 #else
1965 	__unused vnode_t vp
1966 #endif
1967 	)
1968 {
1969 #if NAMEDSTREAMS
1970 	return (vp->v_flag & VISNAMEDSTREAM) ? 1 : 0;
1971 #else
1972 	return 0;
1973 #endif
1974 }
1975 
1976 int
vnode_isshadow(vnode_t vp)1977 vnode_isshadow(
1978 #if NAMEDSTREAMS
1979 	vnode_t vp
1980 #else
1981 	__unused vnode_t vp
1982 #endif
1983 	)
1984 {
1985 #if NAMEDSTREAMS
1986 	return (vp->v_flag & VISSHADOW) ? 1 : 0;
1987 #else
1988 	return 0;
1989 #endif
1990 }
1991 
1992 /* does vnode have associated named stream vnodes ? */
1993 int
vnode_hasnamedstreams(vnode_t vp)1994 vnode_hasnamedstreams(
1995 #if NAMEDSTREAMS
1996 	vnode_t vp
1997 #else
1998 	__unused vnode_t vp
1999 #endif
2000 	)
2001 {
2002 #if NAMEDSTREAMS
2003 	return (vp->v_lflag & VL_HASSTREAMS) ? 1 : 0;
2004 #else
2005 	return 0;
2006 #endif
2007 }
2008 /* TBD:  set vnode_t to not cache data after it is consumed once; used for quota */
2009 void
vnode_setnocache(vnode_t vp)2010 vnode_setnocache(vnode_t vp)
2011 {
2012 	vnode_lock_spin(vp);
2013 	vp->v_flag |= VNOCACHE_DATA;
2014 	vnode_unlock(vp);
2015 }
2016 
2017 void
vnode_clearnocache(vnode_t vp)2018 vnode_clearnocache(vnode_t vp)
2019 {
2020 	vnode_lock_spin(vp);
2021 	vp->v_flag &= ~VNOCACHE_DATA;
2022 	vnode_unlock(vp);
2023 }
2024 
2025 void
vnode_set_openevt(vnode_t vp)2026 vnode_set_openevt(vnode_t vp)
2027 {
2028 	vnode_lock_spin(vp);
2029 	vp->v_flag |= VOPENEVT;
2030 	vnode_unlock(vp);
2031 }
2032 
2033 void
vnode_clear_openevt(vnode_t vp)2034 vnode_clear_openevt(vnode_t vp)
2035 {
2036 	vnode_lock_spin(vp);
2037 	vp->v_flag &= ~VOPENEVT;
2038 	vnode_unlock(vp);
2039 }
2040 
2041 
2042 void
vnode_setnoreadahead(vnode_t vp)2043 vnode_setnoreadahead(vnode_t vp)
2044 {
2045 	vnode_lock_spin(vp);
2046 	vp->v_flag |= VRAOFF;
2047 	vnode_unlock(vp);
2048 }
2049 
2050 void
vnode_clearnoreadahead(vnode_t vp)2051 vnode_clearnoreadahead(vnode_t vp)
2052 {
2053 	vnode_lock_spin(vp);
2054 	vp->v_flag &= ~VRAOFF;
2055 	vnode_unlock(vp);
2056 }
2057 
2058 int
vnode_isfastdevicecandidate(vnode_t vp)2059 vnode_isfastdevicecandidate(vnode_t vp)
2060 {
2061 	return (vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0;
2062 }
2063 
2064 void
vnode_setfastdevicecandidate(vnode_t vp)2065 vnode_setfastdevicecandidate(vnode_t vp)
2066 {
2067 	vnode_lock_spin(vp);
2068 	vp->v_flag |= VFASTDEVCANDIDATE;
2069 	vnode_unlock(vp);
2070 }
2071 
2072 void
vnode_clearfastdevicecandidate(vnode_t vp)2073 vnode_clearfastdevicecandidate(vnode_t vp)
2074 {
2075 	vnode_lock_spin(vp);
2076 	vp->v_flag &= ~VFASTDEVCANDIDATE;
2077 	vnode_unlock(vp);
2078 }
2079 
2080 int
vnode_isautocandidate(vnode_t vp)2081 vnode_isautocandidate(vnode_t vp)
2082 {
2083 	return (vp->v_flag & VAUTOCANDIDATE)? 1 : 0;
2084 }
2085 
2086 void
vnode_setautocandidate(vnode_t vp)2087 vnode_setautocandidate(vnode_t vp)
2088 {
2089 	vnode_lock_spin(vp);
2090 	vp->v_flag |= VAUTOCANDIDATE;
2091 	vnode_unlock(vp);
2092 }
2093 
2094 void
vnode_clearautocandidate(vnode_t vp)2095 vnode_clearautocandidate(vnode_t vp)
2096 {
2097 	vnode_lock_spin(vp);
2098 	vp->v_flag &= ~VAUTOCANDIDATE;
2099 	vnode_unlock(vp);
2100 }
2101 
2102 
2103 
2104 
2105 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
2106 void
vnode_setnoflush(vnode_t vp)2107 vnode_setnoflush(vnode_t vp)
2108 {
2109 	vnode_lock_spin(vp);
2110 	vp->v_flag |= VNOFLUSH;
2111 	vnode_unlock(vp);
2112 }
2113 
2114 void
vnode_clearnoflush(vnode_t vp)2115 vnode_clearnoflush(vnode_t vp)
2116 {
2117 	vnode_lock_spin(vp);
2118 	vp->v_flag &= ~VNOFLUSH;
2119 	vnode_unlock(vp);
2120 }
2121 
2122 
2123 /* is vnode_t a blkdevice and has a FS mounted on it */
2124 int
vnode_ismountedon(vnode_t vp)2125 vnode_ismountedon(vnode_t vp)
2126 {
2127 	return (vp->v_specflags & SI_MOUNTEDON)? 1 : 0;
2128 }
2129 
2130 void
vnode_setmountedon(vnode_t vp)2131 vnode_setmountedon(vnode_t vp)
2132 {
2133 	vnode_lock_spin(vp);
2134 	vp->v_specflags |= SI_MOUNTEDON;
2135 	vnode_unlock(vp);
2136 }
2137 
2138 void
vnode_clearmountedon(vnode_t vp)2139 vnode_clearmountedon(vnode_t vp)
2140 {
2141 	vnode_lock_spin(vp);
2142 	vp->v_specflags &= ~SI_MOUNTEDON;
2143 	vnode_unlock(vp);
2144 }
2145 
2146 
2147 void
vnode_settag(vnode_t vp,int tag)2148 vnode_settag(vnode_t vp, int tag)
2149 {
2150 	/*
2151 	 * We only assign enum values to v_tag, but add an assert to make sure we
2152 	 * catch it in dev/debug builds if this ever change.
2153 	 */
2154 	assert(tag >= SHRT_MIN && tag <= SHRT_MAX);
2155 	vp->v_tag = (uint16_t)tag;
2156 }
2157 
2158 int
vnode_tag(vnode_t vp)2159 vnode_tag(vnode_t vp)
2160 {
2161 	return vp->v_tag;
2162 }
2163 
2164 vnode_t
vnode_parent(vnode_t vp)2165 vnode_parent(vnode_t vp)
2166 {
2167 	return vp->v_parent;
2168 }
2169 
2170 void
vnode_setparent(vnode_t vp,vnode_t dvp)2171 vnode_setparent(vnode_t vp, vnode_t dvp)
2172 {
2173 	vp->v_parent = dvp;
2174 }
2175 
2176 void
vnode_setname(vnode_t vp,char * name)2177 vnode_setname(vnode_t vp, char * name)
2178 {
2179 	vp->v_name = name;
2180 }
2181 
2182 /* return the registered  FS name when adding the FS to kernel */
2183 void
vnode_vfsname(vnode_t vp,char * buf)2184 vnode_vfsname(vnode_t vp, char * buf)
2185 {
2186 	strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
2187 }
2188 
2189 /* return the FS type number */
2190 int
vnode_vfstypenum(vnode_t vp)2191 vnode_vfstypenum(vnode_t vp)
2192 {
2193 	return vp->v_mount->mnt_vtable->vfc_typenum;
2194 }
2195 
2196 int
vnode_vfs64bitready(vnode_t vp)2197 vnode_vfs64bitready(vnode_t vp)
2198 {
2199 	/*
2200 	 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
2201 	 */
2202 	if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
2203 		return 1;
2204 	} else {
2205 		return 0;
2206 	}
2207 }
2208 
2209 
2210 
2211 /* return the visible flags on associated mount point of vnode_t */
2212 uint32_t
vnode_vfsvisflags(vnode_t vp)2213 vnode_vfsvisflags(vnode_t vp)
2214 {
2215 	return vp->v_mount->mnt_flag & MNT_VISFLAGMASK;
2216 }
2217 
2218 /* return the command modifier flags on associated mount point of vnode_t */
2219 uint32_t
vnode_vfscmdflags(vnode_t vp)2220 vnode_vfscmdflags(vnode_t vp)
2221 {
2222 	return vp->v_mount->mnt_flag & MNT_CMDFLAGS;
2223 }
2224 
2225 /* return the max symlink of short links  of vnode_t */
2226 uint32_t
vnode_vfsmaxsymlen(vnode_t vp)2227 vnode_vfsmaxsymlen(vnode_t vp)
2228 {
2229 	return vp->v_mount->mnt_maxsymlinklen;
2230 }
2231 
2232 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2233 struct vfsstatfs *
vnode_vfsstatfs(vnode_t vp)2234 vnode_vfsstatfs(vnode_t vp)
2235 {
2236 	return &vp->v_mount->mnt_vfsstat;
2237 }
2238 
2239 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2240 void *
vnode_vfsfsprivate(vnode_t vp)2241 vnode_vfsfsprivate(vnode_t vp)
2242 {
2243 	return vp->v_mount->mnt_data;
2244 }
2245 
2246 /* is vnode_t in a rdonly mounted  FS */
2247 int
vnode_vfsisrdonly(vnode_t vp)2248 vnode_vfsisrdonly(vnode_t vp)
2249 {
2250 	return (vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0;
2251 }
2252 
2253 int
vnode_compound_rename_available(vnode_t vp)2254 vnode_compound_rename_available(vnode_t vp)
2255 {
2256 	return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
2257 }
2258 int
vnode_compound_rmdir_available(vnode_t vp)2259 vnode_compound_rmdir_available(vnode_t vp)
2260 {
2261 	return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
2262 }
2263 int
vnode_compound_mkdir_available(vnode_t vp)2264 vnode_compound_mkdir_available(vnode_t vp)
2265 {
2266 	return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
2267 }
2268 int
vnode_compound_remove_available(vnode_t vp)2269 vnode_compound_remove_available(vnode_t vp)
2270 {
2271 	return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
2272 }
2273 int
vnode_compound_open_available(vnode_t vp)2274 vnode_compound_open_available(vnode_t vp)
2275 {
2276 	return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
2277 }
2278 
2279 int
vnode_compound_op_available(vnode_t vp,compound_vnop_id_t opid)2280 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
2281 {
2282 	return (vp->v_mount->mnt_compound_ops & opid) != 0;
2283 }
2284 
2285 /*
2286  * Returns vnode ref to current working directory; if a per-thread current
2287  * working directory is in effect, return that instead of the per process one.
2288  *
2289  * XXX Published, but not used.
2290  */
2291 vnode_t
current_workingdir(void)2292 current_workingdir(void)
2293 {
2294 	return vfs_context_cwd(vfs_context_current());
2295 }
2296 
2297 /*
2298  * Get a filesec and optional acl contents from an extended attribute.
2299  * Function will attempt to retrive ACL, UUID, and GUID information using a
2300  * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2301  *
2302  * Parameters:	vp			The vnode on which to operate.
2303  *		fsecp			The filesec (and ACL, if any) being
2304  *					retrieved.
2305  *		ctx			The vnode context in which the
2306  *					operation is to be attempted.
2307  *
2308  * Returns:	0			Success
2309  *		!0			errno value
2310  *
2311  * Notes:	The kauth_filesec_t in '*fsecp', if retrieved, will be in
2312  *		host byte order, as will be the ACL contents, if any.
2313  *		Internally, we will cannonize these values from network (PPC)
2314  *		byte order after we retrieve them so that the on-disk contents
2315  *		of the extended attribute are identical for both PPC and Intel
2316  *		(if we were not being required to provide this service via
2317  *		fallback, this would be the job of the filesystem
2318  *		'VNOP_GETATTR' call).
2319  *
2320  *		We use ntohl() because it has a transitive property on Intel
2321  *		machines and no effect on PPC mancines.  This guarantees us
2322  *
2323  * XXX:		Deleting rather than ignoreing a corrupt security structure is
2324  *		probably the only way to reset it without assistance from an
2325  *		file system integrity checking tool.  Right now we ignore it.
2326  *
2327  * XXX:		We should enummerate the possible errno values here, and where
2328  *		in the code they originated.
2329  */
2330 static int
vnode_get_filesec(vnode_t vp,kauth_filesec_t * fsecp,vfs_context_t ctx)2331 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2332 {
2333 	kauth_filesec_t fsec;
2334 	uio_t   fsec_uio;
2335 	size_t  fsec_size;
2336 	size_t  xsize, rsize;
2337 	int     error;
2338 	uint32_t        host_fsec_magic;
2339 	uint32_t        host_acl_entrycount;
2340 
2341 	fsec = NULL;
2342 	fsec_uio = NULL;
2343 
2344 	/* find out how big the EA is */
2345 	error = vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx);
2346 	if (error != 0) {
2347 		/* no EA, no filesec */
2348 		if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2349 			error = 0;
2350 		}
2351 		/* either way, we are done */
2352 		goto out;
2353 	}
2354 
2355 	/*
2356 	 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2357 	 * ACE entrly ACL, and if it's larger than that, it must have the right
2358 	 * number of bytes such that it contains an atomic number of ACEs,
2359 	 * rather than partial entries.  Otherwise, we ignore it.
2360 	 */
2361 	if (!KAUTH_FILESEC_VALID(xsize)) {
2362 		KAUTH_DEBUG("    ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2363 		error = 0;
2364 		goto out;
2365 	}
2366 
2367 	/* how many entries would fit? */
2368 	fsec_size = KAUTH_FILESEC_COUNT(xsize);
2369 	if (fsec_size > KAUTH_ACL_MAX_ENTRIES) {
2370 		KAUTH_DEBUG("    ERROR - Bogus (too large) kauth_fiilesec_t: %ld bytes", xsize);
2371 		error = 0;
2372 		goto out;
2373 	}
2374 
2375 	/* get buffer and uio */
2376 	if (((fsec = kauth_filesec_alloc((int)fsec_size)) == NULL) ||
2377 	    ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2378 	    uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2379 		KAUTH_DEBUG("    ERROR - could not allocate iov to read ACL");
2380 		error = ENOMEM;
2381 		goto out;
2382 	}
2383 
2384 	/* read security attribute */
2385 	rsize = xsize;
2386 	if ((error = vn_getxattr(vp,
2387 	    KAUTH_FILESEC_XATTR,
2388 	    fsec_uio,
2389 	    &rsize,
2390 	    XATTR_NOSECURITY,
2391 	    ctx)) != 0) {
2392 		/* no attribute - no security data */
2393 		if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2394 			error = 0;
2395 		}
2396 		/* either way, we are done */
2397 		goto out;
2398 	}
2399 
2400 	/*
2401 	 * Validate security structure; the validation must take place in host
2402 	 * byte order.  If it's corrupt, we will just ignore it.
2403 	 */
2404 
2405 	/* Validate the size before trying to convert it */
2406 	if (rsize < KAUTH_FILESEC_SIZE(0)) {
2407 		KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2408 		goto out;
2409 	}
2410 
2411 	/* Validate the magic number before trying to convert it */
2412 	host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2413 	if (fsec->fsec_magic != host_fsec_magic) {
2414 		KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2415 		goto out;
2416 	}
2417 
2418 	/* Validate the entry count before trying to convert it. */
2419 	host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2420 	if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2421 		if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2422 			KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2423 			goto out;
2424 		}
2425 		if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2426 			KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2427 			goto out;
2428 		}
2429 	}
2430 
2431 	kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2432 
2433 	*fsecp = fsec;
2434 	fsec = NULL;
2435 	error = 0;
2436 out:
2437 	if (fsec != NULL) {
2438 		kauth_filesec_free(fsec);
2439 	}
2440 	if (fsec_uio != NULL) {
2441 		uio_free(fsec_uio);
2442 	}
2443 	if (error) {
2444 		*fsecp = NULL;
2445 	}
2446 	return error;
2447 }
2448 
2449 /*
2450  * Set a filesec and optional acl contents into an extended attribute.
2451  * function will attempt to store ACL, UUID, and GUID information using a
2452  * write to a named extended attribute (KAUTH_FILESEC_XATTR).  The 'acl'
2453  * may or may not point to the `fsec->fsec_acl`, depending on whether the
2454  * original caller supplied an acl.
2455  *
2456  * Parameters:	vp			The vnode on which to operate.
2457  *		fsec			The filesec being set.
2458  *		acl			The acl to be associated with 'fsec'.
2459  *		ctx			The vnode context in which the
2460  *					operation is to be attempted.
2461  *
2462  * Returns:	0			Success
2463  *		!0			errno value
2464  *
2465  * Notes:	Both the fsec and the acl are always valid.
2466  *
2467  *		The kauth_filesec_t in 'fsec', if any, is in host byte order,
2468  *		as are the acl contents, if they are used.  Internally, we will
2469  *		cannonize these values into network (PPC) byte order before we
2470  *		attempt to write them so that the on-disk contents of the
2471  *		extended attribute are identical for both PPC and Intel (if we
2472  *		were not being required to provide this service via fallback,
2473  *		this would be the job of the filesystem 'VNOP_SETATTR' call).
2474  *		We reverse this process on the way out, so we leave with the
2475  *		same byte order we started with.
2476  *
2477  * XXX:		We should enummerate the possible errno values here, and where
2478  *		in the code they originated.
2479  */
2480 static int
vnode_set_filesec(vnode_t vp,kauth_filesec_t fsec,kauth_acl_t acl,vfs_context_t ctx)2481 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2482 {
2483 	uio_t           fsec_uio;
2484 	int             error;
2485 	uint32_t        saved_acl_copysize;
2486 
2487 	fsec_uio = NULL;
2488 
2489 	if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2490 		KAUTH_DEBUG("    ERROR - could not allocate iov to write ACL");
2491 		error = ENOMEM;
2492 		goto out;
2493 	}
2494 	/*
2495 	 * Save the pre-converted ACL copysize, because it gets swapped too
2496 	 * if we are running with the wrong endianness.
2497 	 */
2498 	saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2499 
2500 	kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2501 
2502 	uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2503 	uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2504 	error = vn_setxattr(vp,
2505 	    KAUTH_FILESEC_XATTR,
2506 	    fsec_uio,
2507 	    XATTR_NOSECURITY,           /* we have auth'ed already */
2508 	    ctx);
2509 	VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2510 
2511 	kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2512 
2513 out:
2514 	if (fsec_uio != NULL) {
2515 		uio_free(fsec_uio);
2516 	}
2517 	return error;
2518 }
2519 
2520 /*
2521  * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2522  */
2523 void
vnode_attr_handle_mnt_ignore_ownership(struct vnode_attr * vap,mount_t mp,vfs_context_t ctx)2524 vnode_attr_handle_mnt_ignore_ownership(struct vnode_attr *vap, mount_t mp, vfs_context_t ctx)
2525 {
2526 	uid_t   nuid;
2527 	gid_t   ngid;
2528 
2529 	if (VATTR_IS_ACTIVE(vap, va_uid)) {
2530 		if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) {
2531 			nuid = vap->va_uid;
2532 		} else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2533 			nuid = mp->mnt_fsowner;
2534 			if (nuid == KAUTH_UID_NONE) {
2535 				nuid = 99;
2536 			}
2537 		} else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2538 			nuid = vap->va_uid;
2539 		} else {
2540 			/* this will always be something sensible */
2541 			nuid = mp->mnt_fsowner;
2542 		}
2543 		if ((nuid == 99) && !vfs_context_issuser(ctx)) {
2544 			nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2545 		}
2546 		VATTR_RETURN(vap, va_uid, nuid);
2547 	}
2548 	if (VATTR_IS_ACTIVE(vap, va_gid)) {
2549 		if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) {
2550 			ngid = vap->va_gid;
2551 		} else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2552 			ngid = mp->mnt_fsgroup;
2553 			if (ngid == KAUTH_GID_NONE) {
2554 				ngid = 99;
2555 			}
2556 		} else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2557 			ngid = vap->va_gid;
2558 		} else {
2559 			/* this will always be something sensible */
2560 			ngid = mp->mnt_fsgroup;
2561 		}
2562 		if ((ngid == 99) && !vfs_context_issuser(ctx)) {
2563 			ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2564 		}
2565 		VATTR_RETURN(vap, va_gid, ngid);
2566 	}
2567 }
2568 
2569 /*
2570  * Returns:	0			Success
2571  *		ENOMEM			Not enough space [only if has filesec]
2572  *		EINVAL			Requested unknown attributes
2573  *		VNOP_GETATTR:		???
2574  *		vnode_get_filesec:	???
2575  *		kauth_cred_guid2uid:	???
2576  *		kauth_cred_guid2gid:	???
2577  *		vfs_update_vfsstat:	???
2578  */
2579 int
vnode_getattr(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)2580 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2581 {
2582 	kauth_filesec_t fsec;
2583 	kauth_acl_t facl;
2584 	int     error;
2585 
2586 	/*
2587 	 * Reject attempts to fetch unknown attributes.
2588 	 */
2589 	if (vap->va_active & ~VNODE_ATTR_ALL) {
2590 		return EINVAL;
2591 	}
2592 
2593 	/* don't ask for extended security data if the filesystem doesn't support it */
2594 	if (!vfs_extendedsecurity(vnode_mount(vp))) {
2595 		VATTR_CLEAR_ACTIVE(vap, va_acl);
2596 		VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2597 		VATTR_CLEAR_ACTIVE(vap, va_guuid);
2598 	}
2599 
2600 	/*
2601 	 * If the caller wants size values we might have to synthesise, give the
2602 	 * filesystem the opportunity to supply better intermediate results.
2603 	 */
2604 	if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2605 	    VATTR_IS_ACTIVE(vap, va_total_size) ||
2606 	    VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2607 		VATTR_SET_ACTIVE(vap, va_data_size);
2608 		VATTR_SET_ACTIVE(vap, va_data_alloc);
2609 		VATTR_SET_ACTIVE(vap, va_total_size);
2610 		VATTR_SET_ACTIVE(vap, va_total_alloc);
2611 	}
2612 
2613 	vap->va_vaflags &= ~VA_USEFSID;
2614 
2615 	error = VNOP_GETATTR(vp, vap, ctx);
2616 	if (error) {
2617 		KAUTH_DEBUG("ERROR - returning %d", error);
2618 		goto out;
2619 	}
2620 
2621 	/*
2622 	 * If extended security data was requested but not returned, try the fallback
2623 	 * path.
2624 	 */
2625 	if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2626 		fsec = NULL;
2627 
2628 		if (XATTR_VNODE_SUPPORTED(vp)) {
2629 			/* try to get the filesec */
2630 			if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2631 				goto out;
2632 			}
2633 		}
2634 		/* if no filesec, no attributes */
2635 		if (fsec == NULL) {
2636 			VATTR_RETURN(vap, va_acl, NULL);
2637 			VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2638 			VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2639 		} else {
2640 			/* looks good, try to return what we were asked for */
2641 			VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2642 			VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2643 
2644 			/* only return the ACL if we were actually asked for it */
2645 			if (VATTR_IS_ACTIVE(vap, va_acl)) {
2646 				if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2647 					VATTR_RETURN(vap, va_acl, NULL);
2648 				} else {
2649 					facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2650 					if (facl == NULL) {
2651 						kauth_filesec_free(fsec);
2652 						error = ENOMEM;
2653 						goto out;
2654 					}
2655 					__nochk_bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2656 					VATTR_RETURN(vap, va_acl, facl);
2657 				}
2658 			}
2659 			kauth_filesec_free(fsec);
2660 		}
2661 	}
2662 	/*
2663 	 * If someone gave us an unsolicited filesec, toss it.  We promise that
2664 	 * we're OK with a filesystem giving us anything back, but our callers
2665 	 * only expect what they asked for.
2666 	 */
2667 	if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2668 		if (vap->va_acl != NULL) {
2669 			kauth_acl_free(vap->va_acl);
2670 		}
2671 		VATTR_CLEAR_SUPPORTED(vap, va_acl);
2672 	}
2673 
2674 #if 0   /* enable when we have a filesystem only supporting UUIDs */
2675 	/*
2676 	 * Handle the case where we need a UID/GID, but only have extended
2677 	 * security information.
2678 	 */
2679 	if (VATTR_NOT_RETURNED(vap, va_uid) &&
2680 	    VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2681 	    !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2682 		if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0) {
2683 			VATTR_RETURN(vap, va_uid, nuid);
2684 		}
2685 	}
2686 	if (VATTR_NOT_RETURNED(vap, va_gid) &&
2687 	    VATTR_IS_SUPPORTED(vap, va_guuid) &&
2688 	    !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2689 		if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0) {
2690 			VATTR_RETURN(vap, va_gid, ngid);
2691 		}
2692 	}
2693 #endif
2694 
2695 	vnode_attr_handle_mnt_ignore_ownership(vap, vp->v_mount, ctx);
2696 
2697 	/*
2698 	 * Synthesise some values that can be reasonably guessed.
2699 	 */
2700 	if (!VATTR_IS_SUPPORTED(vap, va_iosize)) {
2701 		assert(vp->v_mount->mnt_vfsstat.f_iosize <= UINT32_MAX);
2702 		VATTR_RETURN(vap, va_iosize, (uint32_t)vp->v_mount->mnt_vfsstat.f_iosize);
2703 	}
2704 
2705 	if (!VATTR_IS_SUPPORTED(vap, va_flags)) {
2706 		VATTR_RETURN(vap, va_flags, 0);
2707 	}
2708 
2709 	if (!VATTR_IS_SUPPORTED(vap, va_filerev)) {
2710 		VATTR_RETURN(vap, va_filerev, 0);
2711 	}
2712 
2713 	if (!VATTR_IS_SUPPORTED(vap, va_gen)) {
2714 		VATTR_RETURN(vap, va_gen, 0);
2715 	}
2716 
2717 	/*
2718 	 * Default sizes.  Ordering here is important, as later defaults build on earlier ones.
2719 	 */
2720 	if (!VATTR_IS_SUPPORTED(vap, va_data_size)) {
2721 		VATTR_RETURN(vap, va_data_size, 0);
2722 	}
2723 
2724 	/* do we want any of the possibly-computed values? */
2725 	if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2726 	    VATTR_IS_ACTIVE(vap, va_total_size) ||
2727 	    VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2728 		/* make sure f_bsize is valid */
2729 		if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2730 			if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0) {
2731 				goto out;
2732 			}
2733 		}
2734 
2735 		/* default va_data_alloc from va_data_size */
2736 		if (!VATTR_IS_SUPPORTED(vap, va_data_alloc)) {
2737 			VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2738 		}
2739 
2740 		/* default va_total_size from va_data_size */
2741 		if (!VATTR_IS_SUPPORTED(vap, va_total_size)) {
2742 			VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2743 		}
2744 
2745 		/* default va_total_alloc from va_total_size which is guaranteed at this point */
2746 		if (!VATTR_IS_SUPPORTED(vap, va_total_alloc)) {
2747 			VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2748 		}
2749 	}
2750 
2751 	/*
2752 	 * If we don't have a change time, pull it from the modtime.
2753 	 */
2754 	if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time)) {
2755 		VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2756 	}
2757 
2758 	/*
2759 	 * This is really only supported for the creation VNOPs, but since the field is there
2760 	 * we should populate it correctly.
2761 	 */
2762 	VATTR_RETURN(vap, va_type, vp->v_type);
2763 
2764 	/*
2765 	 * The fsid can be obtained from the mountpoint directly.
2766 	 */
2767 	if (VATTR_IS_ACTIVE(vap, va_fsid) &&
2768 	    (!VATTR_IS_SUPPORTED(vap, va_fsid) ||
2769 	    vap->va_vaflags & VA_REALFSID || !(vap->va_vaflags & VA_USEFSID))) {
2770 		VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2771 	}
2772 
2773 out:
2774 	vap->va_vaflags &= ~VA_USEFSID;
2775 
2776 	return error;
2777 }
2778 
2779 /*
2780  * Choose 32 bit or 64 bit fsid
2781  */
2782 uint64_t
vnode_get_va_fsid(struct vnode_attr * vap)2783 vnode_get_va_fsid(struct vnode_attr *vap)
2784 {
2785 	if (VATTR_IS_SUPPORTED(vap, va_fsid64)) {
2786 		return (uint64_t)vap->va_fsid64.val[0] + ((uint64_t)vap->va_fsid64.val[1] << 32);
2787 	}
2788 	return vap->va_fsid;
2789 }
2790 
2791 /*
2792  * Set the attributes on a vnode in a vnode context.
2793  *
2794  * Parameters:	vp			The vnode whose attributes to set.
2795  *		vap			A pointer to the attributes to set.
2796  *		ctx			The vnode context in which the
2797  *					operation is to be attempted.
2798  *
2799  * Returns:	0			Success
2800  *		!0			errno value
2801  *
2802  * Notes:	The kauth_filesec_t in 'vap', if any, is in host byte order.
2803  *
2804  *		The contents of the data area pointed to by 'vap' may be
2805  *		modified if the vnode is on a filesystem which has been
2806  *		mounted with ingore ownership flags, or by the underlyng
2807  *		VFS itself, or by the fallback code, if the underlying VFS
2808  *		does not support ACL, UUID, or GUUID attributes directly.
2809  *
2810  * XXX:		We should enummerate the possible errno values here, and where
2811  *		in the code they originated.
2812  */
2813 int
vnode_setattr(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)2814 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2815 {
2816 	int     error;
2817 #if CONFIG_FSE
2818 	uint64_t active;
2819 	int     is_perm_change = 0;
2820 	int     is_stat_change = 0;
2821 #endif
2822 
2823 	/*
2824 	 * Reject attempts to set unknown attributes.
2825 	 */
2826 	if (vap->va_active & ~VNODE_ATTR_ALL) {
2827 		return EINVAL;
2828 	}
2829 
2830 	/*
2831 	 * Make sure the filesystem is mounted R/W.
2832 	 * If not, return an error.
2833 	 */
2834 	if (vfs_isrdonly(vp->v_mount)) {
2835 		error = EROFS;
2836 		goto out;
2837 	}
2838 
2839 #if DEVELOPMENT || DEBUG
2840 	/*
2841 	 * XXX VSWAP: Check for entitlements or special flag here
2842 	 * so we can restrict access appropriately.
2843 	 */
2844 #else /* DEVELOPMENT || DEBUG */
2845 
2846 	if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
2847 		error = EPERM;
2848 		goto out;
2849 	}
2850 #endif /* DEVELOPMENT || DEBUG */
2851 
2852 #if NAMEDSTREAMS
2853 	/* For streams, va_data_size is the only setable attribute. */
2854 	if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2855 		error = EPERM;
2856 		goto out;
2857 	}
2858 #endif
2859 	/* Check for truncation */
2860 	if (VATTR_IS_ACTIVE(vap, va_data_size)) {
2861 		switch (vp->v_type) {
2862 		case VREG:
2863 			/* For regular files it's ok */
2864 			break;
2865 		case VDIR:
2866 			/* Not allowed to truncate directories */
2867 			error = EISDIR;
2868 			goto out;
2869 		default:
2870 			/* For everything else we will clear the bit and let underlying FS decide on the rest */
2871 			VATTR_CLEAR_ACTIVE(vap, va_data_size);
2872 			if (vap->va_active) {
2873 				break;
2874 			}
2875 			/* If it was the only bit set, return success, to handle cases like redirect to /dev/null */
2876 			return 0;
2877 		}
2878 	}
2879 
2880 	/*
2881 	 * If ownership is being ignored on this volume, we silently discard
2882 	 * ownership changes.
2883 	 */
2884 	if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2885 		VATTR_CLEAR_ACTIVE(vap, va_uid);
2886 		VATTR_CLEAR_ACTIVE(vap, va_gid);
2887 	}
2888 
2889 	/*
2890 	 * Make sure that extended security is enabled if we're going to try
2891 	 * to set any.
2892 	 */
2893 	if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2894 	    (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2895 		KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2896 		error = ENOTSUP;
2897 		goto out;
2898 	}
2899 
2900 	/* Never allow the setting of any unsupported superuser flags. */
2901 	if (VATTR_IS_ACTIVE(vap, va_flags)) {
2902 		vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE);
2903 	}
2904 
2905 #if CONFIG_FSE
2906 	/*
2907 	 * Remember all of the active attributes that we're
2908 	 * attempting to modify.
2909 	 */
2910 	active = vap->va_active & ~VNODE_ATTR_RDONLY;
2911 #endif
2912 
2913 	error = VNOP_SETATTR(vp, vap, ctx);
2914 
2915 	if ((error == 0) && !VATTR_ALL_SUPPORTED(vap)) {
2916 		error = vnode_setattr_fallback(vp, vap, ctx);
2917 	}
2918 
2919 #if CONFIG_FSE
2920 #define PERMISSION_BITS (VNODE_ATTR_BIT(va_uid) | VNODE_ATTR_BIT(va_uuuid) | \
2921 	                 VNODE_ATTR_BIT(va_gid) | VNODE_ATTR_BIT(va_guuid) | \
2922 	                 VNODE_ATTR_BIT(va_mode) | VNODE_ATTR_BIT(va_acl))
2923 
2924 	/*
2925 	 * Now that we've changed them, decide whether to send an
2926 	 * FSevent.
2927 	 */
2928 	if ((active & PERMISSION_BITS) & vap->va_supported) {
2929 		is_perm_change = 1;
2930 	} else {
2931 		/*
2932 		 * We've already checked the permission bits, and we
2933 		 * also want to filter out access time / backup time
2934 		 * changes.
2935 		 */
2936 		active &= ~(PERMISSION_BITS |
2937 		    VNODE_ATTR_BIT(va_access_time) |
2938 		    VNODE_ATTR_BIT(va_backup_time));
2939 
2940 		/* Anything left to notify about? */
2941 		if (active & vap->va_supported) {
2942 			is_stat_change = 1;
2943 		}
2944 	}
2945 
2946 	if (error == 0) {
2947 		if (is_perm_change) {
2948 			if (need_fsevent(FSE_CHOWN, vp)) {
2949 				add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2950 			}
2951 		} else if (is_stat_change && need_fsevent(FSE_STAT_CHANGED, vp)) {
2952 			add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
2953 		}
2954 	}
2955 #undef PERMISSION_BITS
2956 #endif
2957 
2958 out:
2959 	return error;
2960 }
2961 
2962 /*
2963  * Fallback for setting the attributes on a vnode in a vnode context.  This
2964  * Function will attempt to store ACL, UUID, and GUID information utilizing
2965  * a read/modify/write operation against an EA used as a backing store for
2966  * the object.
2967  *
2968  * Parameters:	vp			The vnode whose attributes to set.
2969  *		vap			A pointer to the attributes to set.
2970  *		ctx			The vnode context in which the
2971  *					operation is to be attempted.
2972  *
2973  * Returns:	0			Success
2974  *		!0			errno value
2975  *
2976  * Notes:	The kauth_filesec_t in 'vap', if any, is in host byte order,
2977  *		as are the fsec and lfsec, if they are used.
2978  *
2979  *		The contents of the data area pointed to by 'vap' may be
2980  *		modified to indicate that the attribute is supported for
2981  *		any given requested attribute.
2982  *
2983  * XXX:		We should enummerate the possible errno values here, and where
2984  *		in the code they originated.
2985  */
2986 int
vnode_setattr_fallback(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)2987 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2988 {
2989 	kauth_filesec_t fsec;
2990 	kauth_acl_t facl;
2991 	struct kauth_filesec lfsec;
2992 	int     error;
2993 
2994 	error = 0;
2995 
2996 	/*
2997 	 * Extended security fallback via extended attributes.
2998 	 *
2999 	 * Note that we do not free the filesec; the caller is expected to
3000 	 * do this.
3001 	 */
3002 	if (VATTR_NOT_RETURNED(vap, va_acl) ||
3003 	    VATTR_NOT_RETURNED(vap, va_uuuid) ||
3004 	    VATTR_NOT_RETURNED(vap, va_guuid)) {
3005 		VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
3006 
3007 		/*
3008 		 * Fail for file types that we don't permit extended security
3009 		 * to be set on.
3010 		 */
3011 		if (!XATTR_VNODE_SUPPORTED(vp)) {
3012 			VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
3013 			error = EINVAL;
3014 			goto out;
3015 		}
3016 
3017 		/*
3018 		 * If we don't have all the extended security items, we need
3019 		 * to fetch the existing data to perform a read-modify-write
3020 		 * operation.
3021 		 */
3022 		fsec = NULL;
3023 		if (!VATTR_IS_ACTIVE(vap, va_acl) ||
3024 		    !VATTR_IS_ACTIVE(vap, va_uuuid) ||
3025 		    !VATTR_IS_ACTIVE(vap, va_guuid)) {
3026 			if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
3027 				KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
3028 				goto out;
3029 			}
3030 		}
3031 		/* if we didn't get a filesec, use our local one */
3032 		if (fsec == NULL) {
3033 			KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
3034 			fsec = &lfsec;
3035 		} else {
3036 			KAUTH_DEBUG("SETATTR - updating existing filesec");
3037 		}
3038 		/* find the ACL */
3039 		facl = &fsec->fsec_acl;
3040 
3041 		/* if we're using the local filesec, we need to initialise it */
3042 		if (fsec == &lfsec) {
3043 			fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
3044 			fsec->fsec_owner = kauth_null_guid;
3045 			fsec->fsec_group = kauth_null_guid;
3046 			facl->acl_entrycount = KAUTH_FILESEC_NOACL;
3047 			facl->acl_flags = 0;
3048 		}
3049 
3050 		/*
3051 		 * Update with the supplied attributes.
3052 		 */
3053 		if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
3054 			KAUTH_DEBUG("SETATTR - updating owner UUID");
3055 			fsec->fsec_owner = vap->va_uuuid;
3056 			VATTR_SET_SUPPORTED(vap, va_uuuid);
3057 		}
3058 		if (VATTR_IS_ACTIVE(vap, va_guuid)) {
3059 			KAUTH_DEBUG("SETATTR - updating group UUID");
3060 			fsec->fsec_group = vap->va_guuid;
3061 			VATTR_SET_SUPPORTED(vap, va_guuid);
3062 		}
3063 		if (VATTR_IS_ACTIVE(vap, va_acl)) {
3064 			if (vap->va_acl == NULL) {
3065 				KAUTH_DEBUG("SETATTR - removing ACL");
3066 				facl->acl_entrycount = KAUTH_FILESEC_NOACL;
3067 			} else {
3068 				KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
3069 				facl = vap->va_acl;
3070 			}
3071 			VATTR_SET_SUPPORTED(vap, va_acl);
3072 		}
3073 
3074 		/*
3075 		 * If the filesec data is all invalid, we can just remove
3076 		 * the EA completely.
3077 		 */
3078 		if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
3079 		    kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
3080 		    kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
3081 			error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
3082 			/* no attribute is ok, nothing to delete */
3083 			if (error == ENOATTR) {
3084 				error = 0;
3085 			}
3086 			VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
3087 		} else {
3088 			/* write the EA */
3089 			error = vnode_set_filesec(vp, fsec, facl, ctx);
3090 			VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
3091 		}
3092 
3093 		/* if we fetched a filesec, dispose of the buffer */
3094 		if (fsec != &lfsec) {
3095 			kauth_filesec_free(fsec);
3096 		}
3097 	}
3098 out:
3099 
3100 	return error;
3101 }
3102 
3103 /*
3104  * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
3105  * event on a vnode.
3106  */
3107 int
vnode_notify(vnode_t vp,uint32_t events,struct vnode_attr * vap)3108 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
3109 {
3110 	/* These are the same as the corresponding knotes, at least for now.  Cheating a little. */
3111 	uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
3112 	    | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
3113 	uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
3114 	    | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
3115 	uint32_t knote_events = (events & knote_mask);
3116 
3117 	/* Permissions are not explicitly part of the kqueue model */
3118 	if (events & VNODE_EVENT_PERMS) {
3119 		knote_events |= NOTE_ATTRIB;
3120 	}
3121 
3122 	/* Directory contents information just becomes NOTE_WRITE */
3123 	if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
3124 		knote_events |= NOTE_WRITE;
3125 	}
3126 
3127 	if (knote_events) {
3128 		lock_vnode_and_post(vp, knote_events);
3129 #if CONFIG_FSE
3130 		if (vap != NULL) {
3131 			create_fsevent_from_kevent(vp, events, vap);
3132 		}
3133 #else
3134 		(void)vap;
3135 #endif
3136 	}
3137 
3138 	return 0;
3139 }
3140 
3141 
3142 
3143 int
vnode_isdyldsharedcache(vnode_t vp)3144 vnode_isdyldsharedcache(vnode_t vp)
3145 {
3146 	return (vp->v_flag & VSHARED_DYLD) ? 1 : 0;
3147 }
3148 
3149 
3150 /*
3151  * For a filesystem that isn't tracking its own vnode watchers:
3152  * check whether a vnode is being monitored.
3153  */
3154 int
vnode_ismonitored(vnode_t vp)3155 vnode_ismonitored(vnode_t vp)
3156 {
3157 	return vp->v_knotes.slh_first != NULL;
3158 }
3159 
3160 int
vnode_getbackingvnode(vnode_t in_vp,vnode_t * out_vpp)3161 vnode_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp)
3162 {
3163 	if (out_vpp) {
3164 		*out_vpp = NULLVP;
3165 	}
3166 #if NULLFS
3167 	return nullfs_getbackingvnode(in_vp, out_vpp);
3168 #else
3169 #pragma unused(in_vp)
3170 	return ENOENT;
3171 #endif
3172 }
3173 
3174 /*
3175  * Initialize a struct vnode_attr and activate the attributes required
3176  * by the vnode_notify() call.
3177  */
3178 int
vfs_get_notify_attributes(struct vnode_attr * vap)3179 vfs_get_notify_attributes(struct vnode_attr *vap)
3180 {
3181 	VATTR_INIT(vap);
3182 	vap->va_active = VNODE_NOTIFY_ATTRS;
3183 	return 0;
3184 }
3185 
3186 #if CONFIG_TRIGGERS
3187 int
vfs_settriggercallback(fsid_t * fsid,vfs_trigger_callback_t vtc,void * data,uint32_t flags __unused,vfs_context_t ctx)3188 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
3189 {
3190 	int error;
3191 	mount_t mp;
3192 
3193 	mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
3194 	if (mp == NULL) {
3195 		return ENOENT;
3196 	}
3197 
3198 	error = vfs_busy(mp, LK_NOWAIT);
3199 	mount_iterdrop(mp);
3200 
3201 	if (error != 0) {
3202 		return ENOENT;
3203 	}
3204 
3205 	mount_lock(mp);
3206 	if (mp->mnt_triggercallback != NULL) {
3207 		error = EBUSY;
3208 		mount_unlock(mp);
3209 		goto out;
3210 	}
3211 
3212 	mp->mnt_triggercallback = vtc;
3213 	mp->mnt_triggerdata = data;
3214 	mount_unlock(mp);
3215 
3216 	mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
3217 
3218 out:
3219 	vfs_unbusy(mp);
3220 	return 0;
3221 }
3222 #endif /* CONFIG_TRIGGERS */
3223 
3224 /*
3225  *  Definition of vnode operations.
3226  */
3227 
3228 #if 0
3229 /*
3230 *#
3231 *#% lookup       dvp     L ? ?
3232 *#% lookup       vpp     - L -
3233 */
3234 struct vnop_lookup_args {
3235 	struct vnodeop_desc *a_desc;
3236 	vnode_t a_dvp;
3237 	vnode_t *a_vpp;
3238 	struct componentname *a_cnp;
3239 	vfs_context_t a_context;
3240 };
3241 #endif /* 0*/
3242 
3243 /*
3244  * Returns:	0			Success
3245  *	lock_fsnode:ENOENT		No such file or directory [only for VFS
3246  *					 that is not thread safe & vnode is
3247  *					 currently being/has been terminated]
3248  *	<vfs_lookup>:ENAMETOOLONG
3249  *	<vfs_lookup>:ENOENT
3250  *	<vfs_lookup>:EJUSTRETURN
3251  *	<vfs_lookup>:EPERM
3252  *	<vfs_lookup>:EISDIR
3253  *	<vfs_lookup>:ENOTDIR
3254  *	<vfs_lookup>:???
3255  *
3256  * Note:	The return codes from the underlying VFS's lookup routine can't
3257  *		be fully enumerated here, since third party VFS authors may not
3258  *		limit their error returns to the ones documented here, even
3259  *		though this may result in some programs functioning incorrectly.
3260  *
3261  *		The return codes documented above are those which may currently
3262  *		be returned by HFS from hfs_lookup, not including additional
3263  *		error code which may be propagated from underlying routines.
3264  */
3265 errno_t
VNOP_LOOKUP(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,vfs_context_t ctx)3266 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
3267 {
3268 	int _err;
3269 	struct vnop_lookup_args a;
3270 
3271 	a.a_desc = &vnop_lookup_desc;
3272 	a.a_dvp = dvp;
3273 	a.a_vpp = vpp;
3274 	a.a_cnp = cnp;
3275 	a.a_context = ctx;
3276 
3277 	_err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
3278 	if (_err == 0 && *vpp) {
3279 		DTRACE_FSINFO(lookup, vnode_t, *vpp);
3280 	}
3281 
3282 	return _err;
3283 }
3284 
3285 #if 0
3286 struct vnop_compound_open_args {
3287 	struct vnodeop_desc *a_desc;
3288 	vnode_t a_dvp;
3289 	vnode_t *a_vpp;
3290 	struct componentname *a_cnp;
3291 	int32_t a_flags;
3292 	int32_t a_fmode;
3293 	struct vnode_attr *a_vap;
3294 	vfs_context_t a_context;
3295 	void *a_reserved;
3296 };
3297 #endif /* 0 */
3298 
3299 int
VNOP_COMPOUND_OPEN(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,int32_t fmode,uint32_t * statusp,struct vnode_attr * vap,vfs_context_t ctx)3300 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
3301 {
3302 	int _err;
3303 	struct vnop_compound_open_args a;
3304 	int did_create = 0;
3305 	int want_create;
3306 	uint32_t tmp_status = 0;
3307 	struct componentname *cnp = &ndp->ni_cnd;
3308 
3309 	want_create = (flags & O_CREAT);
3310 
3311 	a.a_desc = &vnop_compound_open_desc;
3312 	a.a_dvp = dvp;
3313 	a.a_vpp = vpp; /* Could be NULL */
3314 	a.a_cnp = cnp;
3315 	a.a_flags = flags;
3316 	a.a_fmode = fmode;
3317 	a.a_status = (statusp != NULL) ? statusp : &tmp_status;
3318 	a.a_vap = vap;
3319 	a.a_context = ctx;
3320 	a.a_open_create_authorizer = vn_authorize_create;
3321 	a.a_open_existing_authorizer = vn_authorize_open_existing;
3322 	a.a_reserved = NULL;
3323 
3324 	if (dvp == NULLVP) {
3325 		panic("No dvp?");
3326 	}
3327 	if (want_create && !vap) {
3328 		panic("Want create, but no vap?");
3329 	}
3330 	if (!want_create && vap) {
3331 		panic("Don't want create, but have a vap?");
3332 	}
3333 
3334 	_err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
3335 	if (want_create) {
3336 		if (_err == 0 && *vpp) {
3337 			DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3338 		} else {
3339 			DTRACE_FSINFO(compound_open, vnode_t, dvp);
3340 		}
3341 	} else {
3342 		DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3343 	}
3344 
3345 	did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
3346 
3347 	if (did_create && !want_create) {
3348 		panic("Filesystem did a create, even though none was requested?");
3349 	}
3350 
3351 	if (did_create) {
3352 #if CONFIG_APPLEDOUBLE
3353 		if (!NATIVE_XATTR(dvp)) {
3354 			/*
3355 			 * Remove stale Apple Double file (if any).
3356 			 */
3357 			xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3358 		}
3359 #endif /* CONFIG_APPLEDOUBLE */
3360 		/* On create, provide kqueue notification */
3361 		post_event_if_success(dvp, _err, NOTE_WRITE);
3362 	}
3363 
3364 	lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
3365 #if 0 /* FSEvents... */
3366 	if (*vpp && _err && _err != EKEEPLOOKING) {
3367 		vnode_put(*vpp);
3368 		*vpp = NULLVP;
3369 	}
3370 #endif /* 0 */
3371 
3372 	return _err;
3373 }
3374 
3375 #if 0
3376 struct vnop_create_args {
3377 	struct vnodeop_desc *a_desc;
3378 	vnode_t a_dvp;
3379 	vnode_t *a_vpp;
3380 	struct componentname *a_cnp;
3381 	struct vnode_attr *a_vap;
3382 	vfs_context_t a_context;
3383 };
3384 #endif /* 0*/
3385 errno_t
VNOP_CREATE(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)3386 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3387 {
3388 	int _err;
3389 	struct vnop_create_args a;
3390 
3391 	a.a_desc = &vnop_create_desc;
3392 	a.a_dvp = dvp;
3393 	a.a_vpp = vpp;
3394 	a.a_cnp = cnp;
3395 	a.a_vap = vap;
3396 	a.a_context = ctx;
3397 
3398 	_err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
3399 	if (_err == 0 && *vpp) {
3400 		DTRACE_FSINFO(create, vnode_t, *vpp);
3401 	}
3402 
3403 #if CONFIG_APPLEDOUBLE
3404 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
3405 		/*
3406 		 * Remove stale Apple Double file (if any).
3407 		 */
3408 		xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3409 	}
3410 #endif /* CONFIG_APPLEDOUBLE */
3411 
3412 	post_event_if_success(dvp, _err, NOTE_WRITE);
3413 
3414 	return _err;
3415 }
3416 
3417 #if 0
3418 /*
3419 *#
3420 *#% whiteout     dvp     L L L
3421 *#% whiteout     cnp     - - -
3422 *#% whiteout     flag    - - -
3423 *#
3424 */
3425 struct vnop_whiteout_args {
3426 	struct vnodeop_desc *a_desc;
3427 	vnode_t a_dvp;
3428 	struct componentname *a_cnp;
3429 	int a_flags;
3430 	vfs_context_t a_context;
3431 };
3432 #endif /* 0*/
3433 errno_t
VNOP_WHITEOUT(__unused vnode_t dvp,__unused struct componentname * cnp,__unused int flags,__unused vfs_context_t ctx)3434 VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp,
3435     __unused int flags, __unused vfs_context_t ctx)
3436 {
3437 	return ENOTSUP;       // XXX OBSOLETE
3438 }
3439 
3440 #if 0
3441 /*
3442 *#
3443 *#% mknod        dvp     L U U
3444 *#% mknod        vpp     - X -
3445 *#
3446 */
3447 struct vnop_mknod_args {
3448 	struct vnodeop_desc *a_desc;
3449 	vnode_t a_dvp;
3450 	vnode_t *a_vpp;
3451 	struct componentname *a_cnp;
3452 	struct vnode_attr *a_vap;
3453 	vfs_context_t a_context;
3454 };
3455 #endif /* 0*/
3456 errno_t
VNOP_MKNOD(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)3457 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3458 {
3459 	int _err;
3460 	struct vnop_mknod_args a;
3461 
3462 	a.a_desc = &vnop_mknod_desc;
3463 	a.a_dvp = dvp;
3464 	a.a_vpp = vpp;
3465 	a.a_cnp = cnp;
3466 	a.a_vap = vap;
3467 	a.a_context = ctx;
3468 
3469 	_err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
3470 	if (_err == 0 && *vpp) {
3471 		DTRACE_FSINFO(mknod, vnode_t, *vpp);
3472 	}
3473 
3474 	post_event_if_success(dvp, _err, NOTE_WRITE);
3475 
3476 	return _err;
3477 }
3478 
3479 #if 0
3480 /*
3481 *#
3482 *#% open         vp      L L L
3483 *#
3484 */
3485 struct vnop_open_args {
3486 	struct vnodeop_desc *a_desc;
3487 	vnode_t a_vp;
3488 	int a_mode;
3489 	vfs_context_t a_context;
3490 };
3491 #endif /* 0*/
3492 errno_t
VNOP_OPEN(vnode_t vp,int mode,vfs_context_t ctx)3493 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3494 {
3495 	int _err;
3496 	struct vnop_open_args a;
3497 
3498 	if (ctx == NULL) {
3499 		ctx = vfs_context_current();
3500 	}
3501 	a.a_desc = &vnop_open_desc;
3502 	a.a_vp = vp;
3503 	a.a_mode = mode;
3504 	a.a_context = ctx;
3505 
3506 	_err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3507 	DTRACE_FSINFO(open, vnode_t, vp);
3508 
3509 	return _err;
3510 }
3511 
3512 #if 0
3513 /*
3514 *#
3515 *#% close        vp      U U U
3516 *#
3517 */
3518 struct vnop_close_args {
3519 	struct vnodeop_desc *a_desc;
3520 	vnode_t a_vp;
3521 	int a_fflag;
3522 	vfs_context_t a_context;
3523 };
3524 #endif /* 0*/
3525 errno_t
VNOP_CLOSE(vnode_t vp,int fflag,vfs_context_t ctx)3526 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3527 {
3528 	int _err;
3529 	struct vnop_close_args a;
3530 
3531 	if (ctx == NULL) {
3532 		ctx = vfs_context_current();
3533 	}
3534 	a.a_desc = &vnop_close_desc;
3535 	a.a_vp = vp;
3536 	a.a_fflag = fflag;
3537 	a.a_context = ctx;
3538 
3539 	_err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3540 	DTRACE_FSINFO(close, vnode_t, vp);
3541 
3542 	return _err;
3543 }
3544 
3545 #if 0
3546 /*
3547 *#
3548 *#% access       vp      L L L
3549 *#
3550 */
3551 struct vnop_access_args {
3552 	struct vnodeop_desc *a_desc;
3553 	vnode_t a_vp;
3554 	int a_action;
3555 	vfs_context_t a_context;
3556 };
3557 #endif /* 0*/
3558 errno_t
VNOP_ACCESS(vnode_t vp,int action,vfs_context_t ctx)3559 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3560 {
3561 	int _err;
3562 	struct vnop_access_args a;
3563 
3564 	if (ctx == NULL) {
3565 		ctx = vfs_context_current();
3566 	}
3567 	a.a_desc = &vnop_access_desc;
3568 	a.a_vp = vp;
3569 	a.a_action = action;
3570 	a.a_context = ctx;
3571 
3572 	_err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3573 	DTRACE_FSINFO(access, vnode_t, vp);
3574 
3575 	return _err;
3576 }
3577 
3578 #if 0
3579 /*
3580 *#
3581 *#% getattr      vp      = = =
3582 *#
3583 */
3584 struct vnop_getattr_args {
3585 	struct vnodeop_desc *a_desc;
3586 	vnode_t a_vp;
3587 	struct vnode_attr *a_vap;
3588 	vfs_context_t a_context;
3589 };
3590 #endif /* 0*/
3591 errno_t
VNOP_GETATTR(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3592 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3593 {
3594 	int _err;
3595 	struct vnop_getattr_args a;
3596 
3597 	a.a_desc = &vnop_getattr_desc;
3598 	a.a_vp = vp;
3599 	a.a_vap = vap;
3600 	a.a_context = ctx;
3601 
3602 	_err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3603 	DTRACE_FSINFO(getattr, vnode_t, vp);
3604 
3605 	return _err;
3606 }
3607 
3608 #if 0
3609 /*
3610 *#
3611 *#% setattr      vp      L L L
3612 *#
3613 */
3614 struct vnop_setattr_args {
3615 	struct vnodeop_desc *a_desc;
3616 	vnode_t a_vp;
3617 	struct vnode_attr *a_vap;
3618 	vfs_context_t a_context;
3619 };
3620 #endif /* 0*/
3621 errno_t
VNOP_SETATTR(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3622 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3623 {
3624 	int _err;
3625 	struct vnop_setattr_args a;
3626 
3627 	a.a_desc = &vnop_setattr_desc;
3628 	a.a_vp = vp;
3629 	a.a_vap = vap;
3630 	a.a_context = ctx;
3631 
3632 	_err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3633 	DTRACE_FSINFO(setattr, vnode_t, vp);
3634 
3635 #if CONFIG_APPLEDOUBLE
3636 	/*
3637 	 * Shadow uid/gid/mod change to extended attribute file.
3638 	 */
3639 	if (_err == 0 && !NATIVE_XATTR(vp)) {
3640 		struct vnode_attr va;
3641 		int change = 0;
3642 
3643 		VATTR_INIT(&va);
3644 		if (VATTR_IS_ACTIVE(vap, va_uid)) {
3645 			VATTR_SET(&va, va_uid, vap->va_uid);
3646 			change = 1;
3647 		}
3648 		if (VATTR_IS_ACTIVE(vap, va_gid)) {
3649 			VATTR_SET(&va, va_gid, vap->va_gid);
3650 			change = 1;
3651 		}
3652 		if (VATTR_IS_ACTIVE(vap, va_mode)) {
3653 			VATTR_SET(&va, va_mode, vap->va_mode);
3654 			change = 1;
3655 		}
3656 		if (change) {
3657 			vnode_t dvp;
3658 			const char   *vname;
3659 
3660 			dvp = vnode_getparent(vp);
3661 			vname = vnode_getname(vp);
3662 
3663 			xattrfile_setattr(dvp, vname, &va, ctx);
3664 			if (dvp != NULLVP) {
3665 				vnode_put(dvp);
3666 			}
3667 			if (vname != NULL) {
3668 				vnode_putname(vname);
3669 			}
3670 		}
3671 	}
3672 #endif /* CONFIG_APPLEDOUBLE */
3673 
3674 	/*
3675 	 * If we have changed any of the things about the file that are likely
3676 	 * to result in changes to authorization results, blow the vnode auth
3677 	 * cache
3678 	 */
3679 	if (_err == 0 && (
3680 		    VATTR_IS_SUPPORTED(vap, va_mode) ||
3681 		    VATTR_IS_SUPPORTED(vap, va_uid) ||
3682 		    VATTR_IS_SUPPORTED(vap, va_gid) ||
3683 		    VATTR_IS_SUPPORTED(vap, va_flags) ||
3684 		    VATTR_IS_SUPPORTED(vap, va_acl) ||
3685 		    VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3686 		    VATTR_IS_SUPPORTED(vap, va_guuid))) {
3687 		vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3688 
3689 #if NAMEDSTREAMS
3690 		if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3691 			vnode_t svp;
3692 			if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3693 				vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3694 				vnode_put(svp);
3695 			}
3696 		}
3697 #endif /* NAMEDSTREAMS */
3698 	}
3699 
3700 
3701 	post_event_if_success(vp, _err, NOTE_ATTRIB);
3702 
3703 	return _err;
3704 }
3705 
3706 
3707 #if 0
3708 /*
3709 *#
3710 *#% read         vp      L L L
3711 *#
3712 */
3713 struct vnop_read_args {
3714 	struct vnodeop_desc *a_desc;
3715 	vnode_t a_vp;
3716 	struct uio *a_uio;
3717 	int a_ioflag;
3718 	vfs_context_t a_context;
3719 };
3720 #endif /* 0*/
3721 errno_t
VNOP_READ(vnode_t vp,struct uio * uio,int ioflag,vfs_context_t ctx)3722 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3723 {
3724 	int _err;
3725 	struct vnop_read_args a;
3726 #if CONFIG_DTRACE
3727 	user_ssize_t resid = uio_resid(uio);
3728 #endif
3729 
3730 	if (ctx == NULL) {
3731 		return EINVAL;
3732 	}
3733 
3734 	a.a_desc = &vnop_read_desc;
3735 	a.a_vp = vp;
3736 	a.a_uio = uio;
3737 	a.a_ioflag = ioflag;
3738 	a.a_context = ctx;
3739 
3740 	_err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3741 	DTRACE_FSINFO_IO(read,
3742 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3743 
3744 	return _err;
3745 }
3746 
3747 
3748 #if 0
3749 /*
3750 *#
3751 *#% write        vp      L L L
3752 *#
3753 */
3754 struct vnop_write_args {
3755 	struct vnodeop_desc *a_desc;
3756 	vnode_t a_vp;
3757 	struct uio *a_uio;
3758 	int a_ioflag;
3759 	vfs_context_t a_context;
3760 };
3761 #endif /* 0*/
3762 errno_t
VNOP_WRITE(vnode_t vp,struct uio * uio,int ioflag,vfs_context_t ctx)3763 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3764 {
3765 	struct vnop_write_args a;
3766 	int _err;
3767 #if CONFIG_DTRACE
3768 	user_ssize_t resid = uio_resid(uio);
3769 #endif
3770 
3771 	if (ctx == NULL) {
3772 		return EINVAL;
3773 	}
3774 
3775 	a.a_desc = &vnop_write_desc;
3776 	a.a_vp = vp;
3777 	a.a_uio = uio;
3778 	a.a_ioflag = ioflag;
3779 	a.a_context = ctx;
3780 
3781 	_err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3782 	DTRACE_FSINFO_IO(write,
3783 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3784 
3785 	post_event_if_success(vp, _err, NOTE_WRITE);
3786 
3787 	return _err;
3788 }
3789 
3790 
3791 #if 0
3792 /*
3793 *#
3794 *#% ioctl        vp      U U U
3795 *#
3796 */
3797 struct vnop_ioctl_args {
3798 	struct vnodeop_desc *a_desc;
3799 	vnode_t a_vp;
3800 	u_long a_command;
3801 	caddr_t a_data;
3802 	int a_fflag;
3803 	vfs_context_t a_context;
3804 };
3805 #endif /* 0*/
3806 errno_t
VNOP_IOCTL(vnode_t vp,u_long command,caddr_t data,int fflag,vfs_context_t ctx)3807 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
3808 {
3809 	int _err;
3810 	struct vnop_ioctl_args a;
3811 
3812 	if (ctx == NULL) {
3813 		ctx = vfs_context_current();
3814 	}
3815 
3816 	/*
3817 	 * This check should probably have been put in the TTY code instead...
3818 	 *
3819 	 * We have to be careful about what we assume during startup and shutdown.
3820 	 * We have to be able to use the root filesystem's device vnode even when
3821 	 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3822 	 * structure.  If there is no data pointer, it doesn't matter whether
3823 	 * the device is 64-bit ready.  Any command (like DKIOCSYNCHRONIZE)
3824 	 * which passes NULL for its data pointer can therefore be used during
3825 	 * mount or unmount of the root filesystem.
3826 	 *
3827 	 * Depending on what root filesystems need to do during mount/unmount, we
3828 	 * may need to loosen this check again in the future.
3829 	 */
3830 	if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3831 		if (data != NULL && !vnode_vfs64bitready(vp)) {
3832 			return ENOTTY;
3833 		}
3834 	}
3835 
3836 	if ((command == DKIOCISSOLIDSTATE) && (vp == rootvp) && rootvp_is_ssd && data) {
3837 		*data = 1;
3838 		return 0;
3839 	}
3840 
3841 	a.a_desc = &vnop_ioctl_desc;
3842 	a.a_vp = vp;
3843 	a.a_command = command;
3844 	a.a_data = data;
3845 	a.a_fflag = fflag;
3846 	a.a_context = ctx;
3847 
3848 	_err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
3849 	DTRACE_FSINFO(ioctl, vnode_t, vp);
3850 
3851 	return _err;
3852 }
3853 
3854 
3855 #if 0
3856 /*
3857 *#
3858 *#% select       vp      U U U
3859 *#
3860 */
3861 struct vnop_select_args {
3862 	struct vnodeop_desc *a_desc;
3863 	vnode_t a_vp;
3864 	int a_which;
3865 	int a_fflags;
3866 	void *a_wql;
3867 	vfs_context_t a_context;
3868 };
3869 #endif /* 0*/
3870 errno_t
VNOP_SELECT(vnode_t vp,int which,int fflags,void * wql,vfs_context_t ctx)3871 VNOP_SELECT(vnode_t vp, int which, int fflags, void * wql, vfs_context_t ctx)
3872 {
3873 	int _err;
3874 	struct vnop_select_args a;
3875 
3876 	if (ctx == NULL) {
3877 		ctx = vfs_context_current();
3878 	}
3879 	a.a_desc = &vnop_select_desc;
3880 	a.a_vp = vp;
3881 	a.a_which = which;
3882 	a.a_fflags = fflags;
3883 	a.a_context = ctx;
3884 	a.a_wql = wql;
3885 
3886 	_err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
3887 	DTRACE_FSINFO(select, vnode_t, vp);
3888 
3889 	return _err;
3890 }
3891 
3892 
3893 #if 0
3894 /*
3895 *#
3896 *#% exchange fvp         L L L
3897 *#% exchange tvp         L L L
3898 *#
3899 */
3900 struct vnop_exchange_args {
3901 	struct vnodeop_desc *a_desc;
3902 	vnode_t a_fvp;
3903 	vnode_t a_tvp;
3904 	int a_options;
3905 	vfs_context_t a_context;
3906 };
3907 #endif /* 0*/
3908 errno_t
VNOP_EXCHANGE(vnode_t fvp,vnode_t tvp,int options,vfs_context_t ctx)3909 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
3910 {
3911 	int _err;
3912 	struct vnop_exchange_args a;
3913 
3914 	a.a_desc = &vnop_exchange_desc;
3915 	a.a_fvp = fvp;
3916 	a.a_tvp = tvp;
3917 	a.a_options = options;
3918 	a.a_context = ctx;
3919 
3920 	_err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
3921 	DTRACE_FSINFO(exchange, vnode_t, fvp);
3922 
3923 	/* Don't post NOTE_WRITE because file descriptors follow the data ... */
3924 	post_event_if_success(fvp, _err, NOTE_ATTRIB);
3925 	post_event_if_success(tvp, _err, NOTE_ATTRIB);
3926 
3927 	return _err;
3928 }
3929 
3930 
3931 #if 0
3932 /*
3933 *#
3934 *#% revoke       vp      U U U
3935 *#
3936 */
3937 struct vnop_revoke_args {
3938 	struct vnodeop_desc *a_desc;
3939 	vnode_t a_vp;
3940 	int a_flags;
3941 	vfs_context_t a_context;
3942 };
3943 #endif /* 0*/
3944 errno_t
VNOP_REVOKE(vnode_t vp,int flags,vfs_context_t ctx)3945 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
3946 {
3947 	struct vnop_revoke_args a;
3948 	int _err;
3949 
3950 	a.a_desc = &vnop_revoke_desc;
3951 	a.a_vp = vp;
3952 	a.a_flags = flags;
3953 	a.a_context = ctx;
3954 
3955 	_err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
3956 	DTRACE_FSINFO(revoke, vnode_t, vp);
3957 
3958 	return _err;
3959 }
3960 
3961 
3962 #if 0
3963 /*
3964 *#
3965 *# mmap_check - vp U U U
3966 *#
3967 */
3968 struct vnop_mmap_check_args {
3969 	struct vnodeop_desc *a_desc;
3970 	vnode_t a_vp;
3971 	int a_flags;
3972 	vfs_context_t a_context;
3973 };
3974 #endif /* 0 */
3975 errno_t
VNOP_MMAP_CHECK(vnode_t vp,int flags,vfs_context_t ctx)3976 VNOP_MMAP_CHECK(vnode_t vp, int flags, vfs_context_t ctx)
3977 {
3978 	int _err;
3979 	struct vnop_mmap_check_args a;
3980 
3981 	a.a_desc = &vnop_mmap_check_desc;
3982 	a.a_vp = vp;
3983 	a.a_flags = flags;
3984 	a.a_context = ctx;
3985 
3986 	_err = (*vp->v_op[vnop_mmap_check_desc.vdesc_offset])(&a);
3987 	if (_err == ENOTSUP) {
3988 		_err = 0;
3989 	}
3990 	DTRACE_FSINFO(mmap_check, vnode_t, vp);
3991 
3992 	return _err;
3993 }
3994 
3995 #if 0
3996 /*
3997 *#
3998 *# mmap - vp U U U
3999 *#
4000 */
4001 struct vnop_mmap_args {
4002 	struct vnodeop_desc *a_desc;
4003 	vnode_t a_vp;
4004 	int a_fflags;
4005 	vfs_context_t a_context;
4006 };
4007 #endif /* 0*/
4008 errno_t
VNOP_MMAP(vnode_t vp,int fflags,vfs_context_t ctx)4009 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
4010 {
4011 	int _err;
4012 	struct vnop_mmap_args a;
4013 
4014 	a.a_desc = &vnop_mmap_desc;
4015 	a.a_vp = vp;
4016 	a.a_fflags = fflags;
4017 	a.a_context = ctx;
4018 
4019 	_err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
4020 	DTRACE_FSINFO(mmap, vnode_t, vp);
4021 
4022 	return _err;
4023 }
4024 
4025 
4026 #if 0
4027 /*
4028 *#
4029 *# mnomap - vp U U U
4030 *#
4031 */
4032 struct vnop_mnomap_args {
4033 	struct vnodeop_desc *a_desc;
4034 	vnode_t a_vp;
4035 	vfs_context_t a_context;
4036 };
4037 #endif /* 0*/
4038 errno_t
VNOP_MNOMAP(vnode_t vp,vfs_context_t ctx)4039 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
4040 {
4041 	int _err;
4042 	struct vnop_mnomap_args a;
4043 
4044 	a.a_desc = &vnop_mnomap_desc;
4045 	a.a_vp = vp;
4046 	a.a_context = ctx;
4047 
4048 	_err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
4049 	DTRACE_FSINFO(mnomap, vnode_t, vp);
4050 
4051 	return _err;
4052 }
4053 
4054 
4055 #if 0
4056 /*
4057 *#
4058 *#% fsync        vp      L L L
4059 *#
4060 */
4061 struct vnop_fsync_args {
4062 	struct vnodeop_desc *a_desc;
4063 	vnode_t a_vp;
4064 	int a_waitfor;
4065 	vfs_context_t a_context;
4066 };
4067 #endif /* 0*/
4068 errno_t
VNOP_FSYNC(vnode_t vp,int waitfor,vfs_context_t ctx)4069 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
4070 {
4071 	struct vnop_fsync_args a;
4072 	int _err;
4073 
4074 	a.a_desc = &vnop_fsync_desc;
4075 	a.a_vp = vp;
4076 	a.a_waitfor = waitfor;
4077 	a.a_context = ctx;
4078 
4079 	_err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
4080 	DTRACE_FSINFO(fsync, vnode_t, vp);
4081 
4082 	return _err;
4083 }
4084 
4085 
4086 #if 0
4087 /*
4088 *#
4089 *#% remove       dvp     L U U
4090 *#% remove       vp      L U U
4091 *#
4092 */
4093 struct vnop_remove_args {
4094 	struct vnodeop_desc *a_desc;
4095 	vnode_t a_dvp;
4096 	vnode_t a_vp;
4097 	struct componentname *a_cnp;
4098 	int a_flags;
4099 	vfs_context_t a_context;
4100 };
4101 #endif /* 0*/
4102 errno_t
VNOP_REMOVE(vnode_t dvp,vnode_t vp,struct componentname * cnp,int flags,vfs_context_t ctx)4103 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
4104 {
4105 	int _err;
4106 	struct vnop_remove_args a;
4107 
4108 	a.a_desc = &vnop_remove_desc;
4109 	a.a_dvp = dvp;
4110 	a.a_vp = vp;
4111 	a.a_cnp = cnp;
4112 	a.a_flags = flags;
4113 	a.a_context = ctx;
4114 
4115 	_err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
4116 	DTRACE_FSINFO(remove, vnode_t, vp);
4117 
4118 	if (_err == 0) {
4119 		vnode_setneedinactive(vp);
4120 #if CONFIG_APPLEDOUBLE
4121 		if (!(NATIVE_XATTR(dvp))) {
4122 			/*
4123 			 * Remove any associated extended attribute file (._ AppleDouble file).
4124 			 */
4125 			xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4126 		}
4127 #endif /* CONFIG_APPLEDOUBLE */
4128 	}
4129 
4130 	post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4131 	post_event_if_success(dvp, _err, NOTE_WRITE);
4132 
4133 	return _err;
4134 }
4135 
4136 int
VNOP_COMPOUND_REMOVE(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,struct vnode_attr * vap,vfs_context_t ctx)4137 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
4138 {
4139 	int _err;
4140 	struct vnop_compound_remove_args a;
4141 	int no_vp = (*vpp == NULLVP);
4142 
4143 	a.a_desc = &vnop_compound_remove_desc;
4144 	a.a_dvp = dvp;
4145 	a.a_vpp = vpp;
4146 	a.a_cnp = &ndp->ni_cnd;
4147 	a.a_flags = flags;
4148 	a.a_vap = vap;
4149 	a.a_context = ctx;
4150 	a.a_remove_authorizer = vn_authorize_unlink;
4151 
4152 	_err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
4153 	if (_err == 0 && *vpp) {
4154 		DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
4155 	} else {
4156 		DTRACE_FSINFO(compound_remove, vnode_t, dvp);
4157 	}
4158 	if (_err == 0) {
4159 		vnode_setneedinactive(*vpp);
4160 #if CONFIG_APPLEDOUBLE
4161 		if (!(NATIVE_XATTR(dvp))) {
4162 			/*
4163 			 * Remove any associated extended attribute file (._ AppleDouble file).
4164 			 */
4165 			xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
4166 		}
4167 #endif /* CONFIG_APPLEDOUBLE */
4168 	}
4169 
4170 	post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4171 	post_event_if_success(dvp, _err, NOTE_WRITE);
4172 
4173 	if (no_vp) {
4174 		lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4175 		if (*vpp && _err && _err != EKEEPLOOKING) {
4176 			vnode_put(*vpp);
4177 			*vpp = NULLVP;
4178 		}
4179 	}
4180 
4181 	//printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
4182 
4183 	return _err;
4184 }
4185 
4186 #if 0
4187 /*
4188 *#
4189 *#% link         vp      U U U
4190 *#% link         tdvp    L U U
4191 *#
4192 */
4193 struct vnop_link_args {
4194 	struct vnodeop_desc *a_desc;
4195 	vnode_t a_vp;
4196 	vnode_t a_tdvp;
4197 	struct componentname *a_cnp;
4198 	vfs_context_t a_context;
4199 };
4200 #endif /* 0*/
4201 errno_t
VNOP_LINK(vnode_t vp,vnode_t tdvp,struct componentname * cnp,vfs_context_t ctx)4202 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
4203 {
4204 	int _err;
4205 	struct vnop_link_args a;
4206 
4207 #if CONFIG_APPLEDOUBLE
4208 	/*
4209 	 * For file systems with non-native extended attributes,
4210 	 * disallow linking to an existing "._" Apple Double file.
4211 	 */
4212 	if (!NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
4213 		const char   *vname;
4214 
4215 		vname = vnode_getname(vp);
4216 		if (vname != NULL) {
4217 			_err = 0;
4218 			if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
4219 				_err = EPERM;
4220 			}
4221 			vnode_putname(vname);
4222 			if (_err) {
4223 				return _err;
4224 			}
4225 		}
4226 	}
4227 #endif /* CONFIG_APPLEDOUBLE */
4228 
4229 	a.a_desc = &vnop_link_desc;
4230 	a.a_vp = vp;
4231 	a.a_tdvp = tdvp;
4232 	a.a_cnp = cnp;
4233 	a.a_context = ctx;
4234 
4235 	_err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
4236 	DTRACE_FSINFO(link, vnode_t, vp);
4237 
4238 	post_event_if_success(vp, _err, NOTE_LINK);
4239 	post_event_if_success(tdvp, _err, NOTE_WRITE);
4240 
4241 	return _err;
4242 }
4243 
4244 errno_t
vn_rename(struct vnode * fdvp,struct vnode ** fvpp,struct componentname * fcnp,struct vnode_attr * fvap,struct vnode * tdvp,struct vnode ** tvpp,struct componentname * tcnp,struct vnode_attr * tvap,vfs_rename_flags_t flags,vfs_context_t ctx)4245 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4246     struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4247     vfs_rename_flags_t flags, vfs_context_t ctx)
4248 {
4249 	int _err;
4250 	struct nameidata *fromnd = NULL;
4251 	struct nameidata *tond = NULL;
4252 #if CONFIG_APPLEDOUBLE
4253 	vnode_t src_attr_vp = NULLVP;
4254 	vnode_t dst_attr_vp = NULLVP;
4255 	char smallname1[48];
4256 	char smallname2[48];
4257 	char *xfromname = NULL;
4258 	char *xtoname = NULL;
4259 #endif /* CONFIG_APPLEDOUBLE */
4260 	int batched;
4261 	uint32_t tdfflags;      // Target directory file flags
4262 
4263 	batched = vnode_compound_rename_available(fdvp);
4264 
4265 	if (!batched) {
4266 		if (*fvpp == NULLVP) {
4267 			panic("Not batched, and no fvp?");
4268 		}
4269 	}
4270 
4271 #if CONFIG_APPLEDOUBLE
4272 	/*
4273 	 * We need to preflight any potential AppleDouble file for the source file
4274 	 * before doing the rename operation, since we could potentially be doing
4275 	 * this operation on a network filesystem, and would end up duplicating
4276 	 * the work.  Also, save the source and destination names.  Skip it if the
4277 	 * source has a "._" prefix.
4278 	 */
4279 
4280 	size_t xfromname_len = 0;
4281 	size_t xtoname_len = 0;
4282 	if (!NATIVE_XATTR(fdvp) &&
4283 	    !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
4284 		int error;
4285 
4286 		/* Get source attribute file name. */
4287 		xfromname_len = fcnp->cn_namelen + 3;
4288 		if (xfromname_len > sizeof(smallname1)) {
4289 			xfromname = kalloc_data(xfromname_len, Z_WAITOK);
4290 		} else {
4291 			xfromname = &smallname1[0];
4292 		}
4293 		strlcpy(xfromname, "._", xfromname_len);
4294 		strlcat(xfromname, fcnp->cn_nameptr, xfromname_len);
4295 
4296 		/* Get destination attribute file name. */
4297 		xtoname_len = tcnp->cn_namelen + 3;
4298 		if (xtoname_len > sizeof(smallname2)) {
4299 			xtoname = kalloc_data(xtoname_len, Z_WAITOK);
4300 		} else {
4301 			xtoname = &smallname2[0];
4302 		}
4303 		strlcpy(xtoname, "._", xtoname_len);
4304 		strlcat(xtoname, tcnp->cn_nameptr, xtoname_len);
4305 
4306 		/*
4307 		 * Look up source attribute file, keep reference on it if exists.
4308 		 * Note that we do the namei with the nameiop of RENAME, which is different than
4309 		 * in the rename syscall. It's OK if the source file does not exist, since this
4310 		 * is only for AppleDouble files.
4311 		 */
4312 		fromnd = kalloc_type(struct nameidata, Z_WAITOK);
4313 		NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
4314 		    UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
4315 		fromnd->ni_dvp = fdvp;
4316 		error = namei(fromnd);
4317 
4318 		/*
4319 		 * If there was an error looking up source attribute file,
4320 		 * we'll behave as if it didn't exist.
4321 		 */
4322 
4323 		if (error == 0) {
4324 			if (fromnd->ni_vp) {
4325 				/* src_attr_vp indicates need to call vnode_put / nameidone later */
4326 				src_attr_vp = fromnd->ni_vp;
4327 
4328 				if (fromnd->ni_vp->v_type != VREG) {
4329 					src_attr_vp = NULLVP;
4330 					vnode_put(fromnd->ni_vp);
4331 				}
4332 			}
4333 			/*
4334 			 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4335 			 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4336 			 * have a vnode here, so we drop our namei buffer for the source attribute file
4337 			 */
4338 			if (src_attr_vp == NULLVP) {
4339 				nameidone(fromnd);
4340 			}
4341 		}
4342 	}
4343 #endif /* CONFIG_APPLEDOUBLE */
4344 
4345 	if (batched) {
4346 		_err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
4347 		if (_err != 0) {
4348 			printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
4349 		}
4350 	} else {
4351 		if (flags) {
4352 			_err = VNOP_RENAMEX(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, flags, ctx);
4353 			if (_err == ENOTSUP && flags == VFS_RENAME_SECLUDE) {
4354 				// Legacy...
4355 				if ((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) {
4356 					fcnp->cn_flags |= CN_SECLUDE_RENAME;
4357 					_err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4358 				}
4359 			}
4360 		} else {
4361 			_err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4362 		}
4363 	}
4364 
4365 	/*
4366 	 * If moved to a new directory that is restricted,
4367 	 * set the restricted flag on the item moved.
4368 	 */
4369 	if (_err == 0) {
4370 		_err = vnode_flags(tdvp, &tdfflags, ctx);
4371 		if (_err == 0) {
4372 			uint32_t inherit_flags = tdfflags & (UF_DATAVAULT | SF_RESTRICTED);
4373 			if (inherit_flags) {
4374 				uint32_t fflags;
4375 				_err = vnode_flags(*fvpp, &fflags, ctx);
4376 				if (_err == 0 && fflags != (fflags | inherit_flags)) {
4377 					struct vnode_attr va;
4378 					VATTR_INIT(&va);
4379 					VATTR_SET(&va, va_flags, fflags | inherit_flags);
4380 					_err = vnode_setattr(*fvpp, &va, ctx);
4381 				}
4382 			}
4383 		}
4384 	}
4385 
4386 #if CONFIG_MACF
4387 	if (_err == 0) {
4388 		mac_vnode_notify_rename(
4389 			ctx,                        /* ctx */
4390 			*fvpp,                      /* fvp */
4391 			fdvp,                       /* fdvp */
4392 			fcnp,                       /* fcnp */
4393 			*tvpp,                      /* tvp */
4394 			tdvp,                       /* tdvp */
4395 			tcnp,                       /* tcnp */
4396 			(flags & VFS_RENAME_SWAP)   /* swap */
4397 			);
4398 	}
4399 #endif
4400 
4401 #if CONFIG_APPLEDOUBLE
4402 	/*
4403 	 * Rename any associated extended attribute file (._ AppleDouble file).
4404 	 */
4405 	if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
4406 		int error = 0;
4407 
4408 		/*
4409 		 * Get destination attribute file vnode.
4410 		 * Note that tdvp already has an iocount reference. Make sure to check that we
4411 		 * get a valid vnode from namei.
4412 		 */
4413 		tond = kalloc_type(struct nameidata, Z_WAITOK);
4414 		NDINIT(tond, RENAME, OP_RENAME,
4415 		    NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4416 		    CAST_USER_ADDR_T(xtoname), ctx);
4417 		tond->ni_dvp = tdvp;
4418 		error = namei(tond);
4419 
4420 		if (error) {
4421 			goto ad_error;
4422 		}
4423 
4424 		if (tond->ni_vp) {
4425 			dst_attr_vp = tond->ni_vp;
4426 		}
4427 
4428 		if (src_attr_vp) {
4429 			const char *old_name = src_attr_vp->v_name;
4430 			vnode_t old_parent = src_attr_vp->v_parent;
4431 
4432 			if (batched) {
4433 				error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
4434 				    tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
4435 				    0, ctx);
4436 			} else {
4437 				error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
4438 				    tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
4439 			}
4440 
4441 			if (error == 0 && old_name == src_attr_vp->v_name &&
4442 			    old_parent == src_attr_vp->v_parent) {
4443 				int update_flags = VNODE_UPDATE_NAME;
4444 
4445 				if (fdvp != tdvp) {
4446 					update_flags |= VNODE_UPDATE_PARENT;
4447 				}
4448 
4449 				if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
4450 					vnode_update_identity(src_attr_vp, tdvp,
4451 					    tond->ni_cnd.cn_nameptr,
4452 					    tond->ni_cnd.cn_namelen,
4453 					    tond->ni_cnd.cn_hash,
4454 					    update_flags);
4455 				}
4456 			}
4457 
4458 			/* kevent notifications for moving resource files
4459 			 * _err is zero if we're here, so no need to notify directories, code
4460 			 * below will do that.  only need to post the rename on the source and
4461 			 * possibly a delete on the dest
4462 			 */
4463 			post_event_if_success(src_attr_vp, error, NOTE_RENAME);
4464 			if (dst_attr_vp) {
4465 				post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4466 			}
4467 		} else if (dst_attr_vp) {
4468 			/*
4469 			 * Just delete destination attribute file vnode if it exists, since
4470 			 * we didn't have a source attribute file.
4471 			 * Note that tdvp already has an iocount reference.
4472 			 */
4473 
4474 			struct vnop_remove_args args;
4475 
4476 			args.a_desc    = &vnop_remove_desc;
4477 			args.a_dvp     = tdvp;
4478 			args.a_vp      = dst_attr_vp;
4479 			args.a_cnp     = &tond->ni_cnd;
4480 			args.a_context = ctx;
4481 
4482 			if (error == 0) {
4483 				error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
4484 
4485 				if (error == 0) {
4486 					vnode_setneedinactive(dst_attr_vp);
4487 				}
4488 			}
4489 
4490 			/* kevent notification for deleting the destination's attribute file
4491 			 * if it existed.  Only need to post the delete on the destination, since
4492 			 * the code below will handle the directories.
4493 			 */
4494 			post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4495 		}
4496 	}
4497 ad_error:
4498 	if (src_attr_vp) {
4499 		vnode_put(src_attr_vp);
4500 		nameidone(fromnd);
4501 	}
4502 	if (dst_attr_vp) {
4503 		vnode_put(dst_attr_vp);
4504 		nameidone(tond);
4505 	}
4506 	if (xfromname && xfromname != &smallname1[0]) {
4507 		kfree_data(xfromname, xfromname_len);
4508 	}
4509 	if (xtoname && xtoname != &smallname2[0]) {
4510 		kfree_data(xtoname, xtoname_len);
4511 	}
4512 #endif /* CONFIG_APPLEDOUBLE */
4513 	kfree_type(struct nameidata, fromnd);
4514 	kfree_type(struct nameidata, tond);
4515 	return _err;
4516 }
4517 
4518 
4519 #if 0
4520 /*
4521 *#
4522 *#% rename       fdvp    U U U
4523 *#% rename       fvp     U U U
4524 *#% rename       tdvp    L U U
4525 *#% rename       tvp     X U U
4526 *#
4527 */
4528 struct vnop_rename_args {
4529 	struct vnodeop_desc *a_desc;
4530 	vnode_t a_fdvp;
4531 	vnode_t a_fvp;
4532 	struct componentname *a_fcnp;
4533 	vnode_t a_tdvp;
4534 	vnode_t a_tvp;
4535 	struct componentname *a_tcnp;
4536 	vfs_context_t a_context;
4537 };
4538 #endif /* 0*/
4539 errno_t
VNOP_RENAME(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx)4540 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4541     struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4542     vfs_context_t ctx)
4543 {
4544 	int _err = 0;
4545 	struct vnop_rename_args a;
4546 
4547 	a.a_desc = &vnop_rename_desc;
4548 	a.a_fdvp = fdvp;
4549 	a.a_fvp = fvp;
4550 	a.a_fcnp = fcnp;
4551 	a.a_tdvp = tdvp;
4552 	a.a_tvp = tvp;
4553 	a.a_tcnp = tcnp;
4554 	a.a_context = ctx;
4555 
4556 	/* do the rename of the main file. */
4557 	_err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4558 	DTRACE_FSINFO(rename, vnode_t, fdvp);
4559 
4560 	if (_err) {
4561 		return _err;
4562 	}
4563 
4564 	return post_rename(fdvp, fvp, tdvp, tvp);
4565 }
4566 
4567 static errno_t
post_rename(vnode_t fdvp,vnode_t fvp,vnode_t tdvp,vnode_t tvp)4568 post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp)
4569 {
4570 	if (tvp && tvp != fvp) {
4571 		vnode_setneedinactive(tvp);
4572 	}
4573 
4574 	/* Wrote at least one directory.  If transplanted a dir, also changed link counts */
4575 	int events = NOTE_WRITE;
4576 	if (vnode_isdir(fvp)) {
4577 		/* Link count on dir changed only if we are moving a dir and...
4578 		 *      --Moved to new dir, not overwriting there
4579 		 *      --Kept in same dir and DID overwrite
4580 		 */
4581 		if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4582 			events |= NOTE_LINK;
4583 		}
4584 	}
4585 
4586 	lock_vnode_and_post(fdvp, events);
4587 	if (fdvp != tdvp) {
4588 		lock_vnode_and_post(tdvp, events);
4589 	}
4590 
4591 	/* If you're replacing the target, post a deletion for it */
4592 	if (tvp) {
4593 		lock_vnode_and_post(tvp, NOTE_DELETE);
4594 	}
4595 
4596 	lock_vnode_and_post(fvp, NOTE_RENAME);
4597 
4598 	return 0;
4599 }
4600 
4601 #if 0
4602 /*
4603 *#
4604 *#% renamex      fdvp    U U U
4605 *#% renamex      fvp     U U U
4606 *#% renamex      tdvp    L U U
4607 *#% renamex      tvp     X U U
4608 *#
4609 */
4610 struct vnop_renamex_args {
4611 	struct vnodeop_desc *a_desc;
4612 	vnode_t a_fdvp;
4613 	vnode_t a_fvp;
4614 	struct componentname *a_fcnp;
4615 	vnode_t a_tdvp;
4616 	vnode_t a_tvp;
4617 	struct componentname *a_tcnp;
4618 	vfs_rename_flags_t a_flags;
4619 	vfs_context_t a_context;
4620 };
4621 #endif /* 0*/
4622 errno_t
VNOP_RENAMEX(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_rename_flags_t flags,vfs_context_t ctx)4623 VNOP_RENAMEX(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4624     struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4625     vfs_rename_flags_t flags, vfs_context_t ctx)
4626 {
4627 	int _err = 0;
4628 	struct vnop_renamex_args a;
4629 
4630 	a.a_desc = &vnop_renamex_desc;
4631 	a.a_fdvp = fdvp;
4632 	a.a_fvp = fvp;
4633 	a.a_fcnp = fcnp;
4634 	a.a_tdvp = tdvp;
4635 	a.a_tvp = tvp;
4636 	a.a_tcnp = tcnp;
4637 	a.a_flags = flags;
4638 	a.a_context = ctx;
4639 
4640 	/* do the rename of the main file. */
4641 	_err = (*fdvp->v_op[vnop_renamex_desc.vdesc_offset])(&a);
4642 	DTRACE_FSINFO(renamex, vnode_t, fdvp);
4643 
4644 	if (_err) {
4645 		return _err;
4646 	}
4647 
4648 	return post_rename(fdvp, fvp, tdvp, tvp);
4649 }
4650 
4651 
4652 int
VNOP_COMPOUND_RENAME(struct vnode * fdvp,struct vnode ** fvpp,struct componentname * fcnp,struct vnode_attr * fvap,struct vnode * tdvp,struct vnode ** tvpp,struct componentname * tcnp,struct vnode_attr * tvap,uint32_t flags,vfs_context_t ctx)4653 VNOP_COMPOUND_RENAME(
4654 	struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4655 	struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4656 	uint32_t flags, vfs_context_t ctx)
4657 {
4658 	int _err = 0;
4659 	int events;
4660 	struct vnop_compound_rename_args a;
4661 	int no_fvp, no_tvp;
4662 
4663 	no_fvp = (*fvpp) == NULLVP;
4664 	no_tvp = (*tvpp) == NULLVP;
4665 
4666 	a.a_desc = &vnop_compound_rename_desc;
4667 
4668 	a.a_fdvp = fdvp;
4669 	a.a_fvpp = fvpp;
4670 	a.a_fcnp = fcnp;
4671 	a.a_fvap = fvap;
4672 
4673 	a.a_tdvp = tdvp;
4674 	a.a_tvpp = tvpp;
4675 	a.a_tcnp = tcnp;
4676 	a.a_tvap = tvap;
4677 
4678 	a.a_flags = flags;
4679 	a.a_context = ctx;
4680 	a.a_rename_authorizer = vn_authorize_rename;
4681 	a.a_reserved = NULL;
4682 
4683 	/* do the rename of the main file. */
4684 	_err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4685 	DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
4686 
4687 	if (_err == 0) {
4688 		if (*tvpp && *tvpp != *fvpp) {
4689 			vnode_setneedinactive(*tvpp);
4690 		}
4691 	}
4692 
4693 	/* Wrote at least one directory.  If transplanted a dir, also changed link counts */
4694 	if (_err == 0 && *fvpp != *tvpp) {
4695 		if (!*fvpp) {
4696 			panic("No fvpp after compound rename?");
4697 		}
4698 
4699 		events = NOTE_WRITE;
4700 		if (vnode_isdir(*fvpp)) {
4701 			/* Link count on dir changed only if we are moving a dir and...
4702 			 *      --Moved to new dir, not overwriting there
4703 			 *      --Kept in same dir and DID overwrite
4704 			 */
4705 			if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4706 				events |= NOTE_LINK;
4707 			}
4708 		}
4709 
4710 		lock_vnode_and_post(fdvp, events);
4711 		if (fdvp != tdvp) {
4712 			lock_vnode_and_post(tdvp, events);
4713 		}
4714 
4715 		/* If you're replacing the target, post a deletion for it */
4716 		if (*tvpp) {
4717 			lock_vnode_and_post(*tvpp, NOTE_DELETE);
4718 		}
4719 
4720 		lock_vnode_and_post(*fvpp, NOTE_RENAME);
4721 	}
4722 
4723 	if (no_fvp) {
4724 		lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4725 	}
4726 	if (no_tvp && *tvpp != NULLVP) {
4727 		lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4728 	}
4729 
4730 	if (_err && _err != EKEEPLOOKING) {
4731 		if (*fvpp) {
4732 			vnode_put(*fvpp);
4733 			*fvpp = NULLVP;
4734 		}
4735 		if (*tvpp) {
4736 			vnode_put(*tvpp);
4737 			*tvpp = NULLVP;
4738 		}
4739 	}
4740 
4741 	return _err;
4742 }
4743 
4744 int
vn_mkdir(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)4745 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4746     struct vnode_attr *vap, vfs_context_t ctx)
4747 {
4748 	if (ndp->ni_cnd.cn_nameiop != CREATE) {
4749 		panic("Non-CREATE nameiop in vn_mkdir()?");
4750 	}
4751 
4752 	if (vnode_compound_mkdir_available(dvp)) {
4753 		return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4754 	} else {
4755 		return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4756 	}
4757 }
4758 
4759 #if 0
4760 /*
4761 *#
4762 *#% mkdir        dvp     L U U
4763 *#% mkdir        vpp     - L -
4764 *#
4765 */
4766 struct vnop_mkdir_args {
4767 	struct vnodeop_desc *a_desc;
4768 	vnode_t a_dvp;
4769 	vnode_t *a_vpp;
4770 	struct componentname *a_cnp;
4771 	struct vnode_attr *a_vap;
4772 	vfs_context_t a_context;
4773 };
4774 #endif /* 0*/
4775 errno_t
VNOP_MKDIR(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)4776 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4777     struct vnode_attr *vap, vfs_context_t ctx)
4778 {
4779 	int _err;
4780 	struct vnop_mkdir_args a;
4781 
4782 	a.a_desc = &vnop_mkdir_desc;
4783 	a.a_dvp = dvp;
4784 	a.a_vpp = vpp;
4785 	a.a_cnp = cnp;
4786 	a.a_vap = vap;
4787 	a.a_context = ctx;
4788 
4789 	_err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4790 	if (_err == 0 && *vpp) {
4791 		DTRACE_FSINFO(mkdir, vnode_t, *vpp);
4792 	}
4793 #if CONFIG_APPLEDOUBLE
4794 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
4795 		/*
4796 		 * Remove stale Apple Double file (if any).
4797 		 */
4798 		xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4799 	}
4800 #endif /* CONFIG_APPLEDOUBLE */
4801 
4802 	post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4803 
4804 	return _err;
4805 }
4806 
4807 int
VNOP_COMPOUND_MKDIR(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)4808 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4809     struct vnode_attr *vap, vfs_context_t ctx)
4810 {
4811 	int _err;
4812 	struct vnop_compound_mkdir_args a;
4813 
4814 	a.a_desc = &vnop_compound_mkdir_desc;
4815 	a.a_dvp = dvp;
4816 	a.a_vpp = vpp;
4817 	a.a_cnp = &ndp->ni_cnd;
4818 	a.a_vap = vap;
4819 	a.a_flags = 0;
4820 	a.a_context = ctx;
4821 #if 0
4822 	a.a_mkdir_authorizer = vn_authorize_mkdir;
4823 #endif /* 0 */
4824 	a.a_reserved = NULL;
4825 
4826 	_err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
4827 	if (_err == 0 && *vpp) {
4828 		DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
4829 	}
4830 #if CONFIG_APPLEDOUBLE
4831 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
4832 		/*
4833 		 * Remove stale Apple Double file (if any).
4834 		 */
4835 		xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4836 	}
4837 #endif /* CONFIG_APPLEDOUBLE */
4838 
4839 	post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4840 
4841 	lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
4842 	if (*vpp && _err && _err != EKEEPLOOKING) {
4843 		vnode_put(*vpp);
4844 		*vpp = NULLVP;
4845 	}
4846 
4847 	return _err;
4848 }
4849 
4850 int
vn_rmdir(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)4851 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
4852 {
4853 	if (vnode_compound_rmdir_available(dvp)) {
4854 		return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
4855 	} else {
4856 		if (*vpp == NULLVP) {
4857 			panic("NULL vp, but not a compound VNOP?");
4858 		}
4859 		if (vap != NULL) {
4860 			panic("Non-NULL vap, but not a compound VNOP?");
4861 		}
4862 		return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
4863 	}
4864 }
4865 
4866 #if 0
4867 /*
4868 *#
4869 *#% rmdir        dvp     L U U
4870 *#% rmdir        vp      L U U
4871 *#
4872 */
4873 struct vnop_rmdir_args {
4874 	struct vnodeop_desc *a_desc;
4875 	vnode_t a_dvp;
4876 	vnode_t a_vp;
4877 	struct componentname *a_cnp;
4878 	vfs_context_t a_context;
4879 };
4880 
4881 #endif /* 0*/
4882 errno_t
VNOP_RMDIR(struct vnode * dvp,struct vnode * vp,struct componentname * cnp,vfs_context_t ctx)4883 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
4884 {
4885 	int _err;
4886 	struct vnop_rmdir_args a;
4887 
4888 	a.a_desc = &vnop_rmdir_desc;
4889 	a.a_dvp = dvp;
4890 	a.a_vp = vp;
4891 	a.a_cnp = cnp;
4892 	a.a_context = ctx;
4893 
4894 	_err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
4895 	DTRACE_FSINFO(rmdir, vnode_t, vp);
4896 
4897 	if (_err == 0) {
4898 		vnode_setneedinactive(vp);
4899 #if CONFIG_APPLEDOUBLE
4900 		if (!(NATIVE_XATTR(dvp))) {
4901 			/*
4902 			 * Remove any associated extended attribute file (._ AppleDouble file).
4903 			 */
4904 			xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4905 		}
4906 #endif
4907 	}
4908 
4909 	/* If you delete a dir, it loses its "." reference --> NOTE_LINK */
4910 	post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4911 	post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4912 
4913 	return _err;
4914 }
4915 
4916 int
VNOP_COMPOUND_RMDIR(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)4917 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4918     struct vnode_attr *vap, vfs_context_t ctx)
4919 {
4920 	int _err;
4921 	struct vnop_compound_rmdir_args a;
4922 	int no_vp;
4923 
4924 	a.a_desc = &vnop_mkdir_desc;
4925 	a.a_dvp = dvp;
4926 	a.a_vpp = vpp;
4927 	a.a_cnp = &ndp->ni_cnd;
4928 	a.a_vap = vap;
4929 	a.a_flags = 0;
4930 	a.a_context = ctx;
4931 	a.a_rmdir_authorizer = vn_authorize_rmdir;
4932 	a.a_reserved = NULL;
4933 
4934 	no_vp = (*vpp == NULLVP);
4935 
4936 	_err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
4937 	if (_err == 0 && *vpp) {
4938 		DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
4939 	}
4940 #if CONFIG_APPLEDOUBLE
4941 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
4942 		/*
4943 		 * Remove stale Apple Double file (if any).
4944 		 */
4945 		xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4946 	}
4947 #endif
4948 
4949 	if (*vpp) {
4950 		post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4951 	}
4952 	post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4953 
4954 	if (no_vp) {
4955 		lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4956 
4957 #if 0 /* Removing orphaned ._ files requires a vp.... */
4958 		if (*vpp && _err && _err != EKEEPLOOKING) {
4959 			vnode_put(*vpp);
4960 			*vpp = NULLVP;
4961 		}
4962 #endif  /* 0 */
4963 	}
4964 
4965 	return _err;
4966 }
4967 
4968 #if CONFIG_APPLEDOUBLE
4969 /*
4970  * Remove a ._ AppleDouble file
4971  */
4972 #define AD_STALE_SECS  (180)
4973 static void
xattrfile_remove(vnode_t dvp,const char * basename,vfs_context_t ctx,int force)4974 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
4975 {
4976 	vnode_t xvp;
4977 	struct nameidata nd;
4978 	char smallname[64];
4979 	char *filename = NULL;
4980 	size_t alloc_len;
4981 	size_t copy_len;
4982 
4983 	if ((basename == NULL) || (basename[0] == '\0') ||
4984 	    (basename[0] == '.' && basename[1] == '_')) {
4985 		return;
4986 	}
4987 	filename = &smallname[0];
4988 	alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
4989 	if (alloc_len >= sizeof(smallname)) {
4990 		alloc_len++;  /* snprintf result doesn't include '\0' */
4991 		filename = kalloc_data(alloc_len, Z_WAITOK);
4992 		copy_len = snprintf(filename, alloc_len, "._%s", basename);
4993 	}
4994 	NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
4995 	    CAST_USER_ADDR_T(filename), ctx);
4996 	nd.ni_dvp = dvp;
4997 	if (namei(&nd) != 0) {
4998 		goto out2;
4999 	}
5000 
5001 	xvp = nd.ni_vp;
5002 	nameidone(&nd);
5003 	if (xvp->v_type != VREG) {
5004 		goto out1;
5005 	}
5006 
5007 	/*
5008 	 * When creating a new object and a "._" file already
5009 	 * exists, check to see if its a stale "._" file.
5010 	 *
5011 	 */
5012 	if (!force) {
5013 		struct vnode_attr va;
5014 
5015 		VATTR_INIT(&va);
5016 		VATTR_WANTED(&va, va_data_size);
5017 		VATTR_WANTED(&va, va_modify_time);
5018 		if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
5019 		    VATTR_IS_SUPPORTED(&va, va_data_size) &&
5020 		    VATTR_IS_SUPPORTED(&va, va_modify_time) &&
5021 		    va.va_data_size != 0) {
5022 			struct timeval tv;
5023 
5024 			microtime(&tv);
5025 			if ((tv.tv_sec > va.va_modify_time.tv_sec) &&
5026 			    (tv.tv_sec - va.va_modify_time.tv_sec) > AD_STALE_SECS) {
5027 				force = 1;  /* must be stale */
5028 			}
5029 		}
5030 	}
5031 	if (force) {
5032 		int  error;
5033 
5034 		error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
5035 		if (error == 0) {
5036 			vnode_setneedinactive(xvp);
5037 		}
5038 
5039 		post_event_if_success(xvp, error, NOTE_DELETE);
5040 		post_event_if_success(dvp, error, NOTE_WRITE);
5041 	}
5042 
5043 out1:
5044 	vnode_put(dvp);
5045 	vnode_put(xvp);
5046 out2:
5047 	if (filename && filename != &smallname[0]) {
5048 		kfree_data(filename, alloc_len);
5049 	}
5050 }
5051 
5052 /*
5053  * Shadow uid/gid/mod to a ._ AppleDouble file
5054  */
5055 static void
xattrfile_setattr(vnode_t dvp,const char * basename,struct vnode_attr * vap,vfs_context_t ctx)5056 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
5057     vfs_context_t ctx)
5058 {
5059 	vnode_t xvp;
5060 	struct nameidata nd;
5061 	char smallname[64];
5062 	char *filename = NULL;
5063 	size_t alloc_len;
5064 	size_t copy_len;
5065 
5066 	if ((dvp == NULLVP) ||
5067 	    (basename == NULL) || (basename[0] == '\0') ||
5068 	    (basename[0] == '.' && basename[1] == '_')) {
5069 		return;
5070 	}
5071 	filename = &smallname[0];
5072 	alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
5073 	if (alloc_len >= sizeof(smallname)) {
5074 		alloc_len++;  /* snprintf result doesn't include '\0' */
5075 		filename = kalloc_data(alloc_len, Z_WAITOK);
5076 		copy_len = snprintf(filename, alloc_len, "._%s", basename);
5077 	}
5078 	NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
5079 	    CAST_USER_ADDR_T(filename), ctx);
5080 	nd.ni_dvp = dvp;
5081 	if (namei(&nd) != 0) {
5082 		goto out2;
5083 	}
5084 
5085 	xvp = nd.ni_vp;
5086 	nameidone(&nd);
5087 
5088 	if (xvp->v_type == VREG) {
5089 		struct vnop_setattr_args a;
5090 
5091 		a.a_desc = &vnop_setattr_desc;
5092 		a.a_vp = xvp;
5093 		a.a_vap = vap;
5094 		a.a_context = ctx;
5095 
5096 		(void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
5097 	}
5098 
5099 	vnode_put(xvp);
5100 out2:
5101 	if (filename && filename != &smallname[0]) {
5102 		kfree_data(filename, alloc_len);
5103 	}
5104 }
5105 #endif /* CONFIG_APPLEDOUBLE */
5106 
5107  #if 0
5108 /*
5109 *#
5110 *#% symlink      dvp     L U U
5111 *#% symlink      vpp     - U -
5112 *#
5113 */
5114 struct vnop_symlink_args {
5115 	struct vnodeop_desc *a_desc;
5116 	vnode_t a_dvp;
5117 	vnode_t *a_vpp;
5118 	struct componentname *a_cnp;
5119 	struct vnode_attr *a_vap;
5120 	char *a_target;
5121 	vfs_context_t a_context;
5122 };
5123 
5124 #endif /* 0*/
5125 errno_t
VNOP_SYMLINK(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct vnode_attr * vap,char * target,vfs_context_t ctx)5126 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
5127     struct vnode_attr *vap, char *target, vfs_context_t ctx)
5128 {
5129 	int _err;
5130 	struct vnop_symlink_args a;
5131 
5132 	a.a_desc = &vnop_symlink_desc;
5133 	a.a_dvp = dvp;
5134 	a.a_vpp = vpp;
5135 	a.a_cnp = cnp;
5136 	a.a_vap = vap;
5137 	a.a_target = target;
5138 	a.a_context = ctx;
5139 
5140 	_err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
5141 	DTRACE_FSINFO(symlink, vnode_t, dvp);
5142 #if CONFIG_APPLEDOUBLE
5143 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
5144 		/*
5145 		 * Remove stale Apple Double file (if any).  Posts its own knotes
5146 		 */
5147 		xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
5148 	}
5149 #endif /* CONFIG_APPLEDOUBLE */
5150 
5151 	post_event_if_success(dvp, _err, NOTE_WRITE);
5152 
5153 	return _err;
5154 }
5155 
5156 #if 0
5157 /*
5158 *#
5159 *#% readdir      vp      L L L
5160 *#
5161 */
5162 struct vnop_readdir_args {
5163 	struct vnodeop_desc *a_desc;
5164 	vnode_t a_vp;
5165 	struct uio *a_uio;
5166 	int a_flags;
5167 	int *a_eofflag;
5168 	int *a_numdirent;
5169 	vfs_context_t a_context;
5170 };
5171 
5172 #endif /* 0*/
5173 errno_t
VNOP_READDIR(struct vnode * vp,struct uio * uio,int flags,int * eofflag,int * numdirent,vfs_context_t ctx)5174 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
5175     int *numdirent, vfs_context_t ctx)
5176 {
5177 	int _err;
5178 	struct vnop_readdir_args a;
5179 #if CONFIG_DTRACE
5180 	user_ssize_t resid = uio_resid(uio);
5181 #endif
5182 
5183 	a.a_desc = &vnop_readdir_desc;
5184 	a.a_vp = vp;
5185 	a.a_uio = uio;
5186 	a.a_flags = flags;
5187 	a.a_eofflag = eofflag;
5188 	a.a_numdirent = numdirent;
5189 	a.a_context = ctx;
5190 
5191 	_err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
5192 	DTRACE_FSINFO_IO(readdir,
5193 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5194 
5195 	return _err;
5196 }
5197 
5198 #if 0
5199 /*
5200 *#
5201 *#% readdirattr  vp      L L L
5202 *#
5203 */
5204 struct vnop_readdirattr_args {
5205 	struct vnodeop_desc *a_desc;
5206 	vnode_t a_vp;
5207 	struct attrlist *a_alist;
5208 	struct uio *a_uio;
5209 	uint32_t a_maxcount;
5210 	uint32_t a_options;
5211 	uint32_t *a_newstate;
5212 	int *a_eofflag;
5213 	uint32_t *a_actualcount;
5214 	vfs_context_t a_context;
5215 };
5216 
5217 #endif /* 0*/
5218 errno_t
VNOP_READDIRATTR(struct vnode * vp,struct attrlist * alist,struct uio * uio,uint32_t maxcount,uint32_t options,uint32_t * newstate,int * eofflag,uint32_t * actualcount,vfs_context_t ctx)5219 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
5220     uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
5221 {
5222 	int _err;
5223 	struct vnop_readdirattr_args a;
5224 #if CONFIG_DTRACE
5225 	user_ssize_t resid = uio_resid(uio);
5226 #endif
5227 
5228 	a.a_desc = &vnop_readdirattr_desc;
5229 	a.a_vp = vp;
5230 	a.a_alist = alist;
5231 	a.a_uio = uio;
5232 	a.a_maxcount = maxcount;
5233 	a.a_options = options;
5234 	a.a_newstate = newstate;
5235 	a.a_eofflag = eofflag;
5236 	a.a_actualcount = actualcount;
5237 	a.a_context = ctx;
5238 
5239 	_err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
5240 	DTRACE_FSINFO_IO(readdirattr,
5241 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5242 
5243 	return _err;
5244 }
5245 
5246 #if 0
5247 struct vnop_getttrlistbulk_args {
5248 	struct vnodeop_desc *a_desc;
5249 	vnode_t a_vp;
5250 	struct attrlist *a_alist;
5251 	struct vnode_attr *a_vap;
5252 	struct uio *a_uio;
5253 	void *a_private
5254 	uint64_t a_options;
5255 	int *a_eofflag;
5256 	uint32_t *a_actualcount;
5257 	vfs_context_t a_context;
5258 };
5259 #endif /* 0*/
5260 errno_t
VNOP_GETATTRLISTBULK(struct vnode * vp,struct attrlist * alist,struct vnode_attr * vap,struct uio * uio,void * private,uint64_t options,int32_t * eofflag,int32_t * actualcount,vfs_context_t ctx)5261 VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist,
5262     struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options,
5263     int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx)
5264 {
5265 	int _err;
5266 	struct vnop_getattrlistbulk_args a;
5267 #if CONFIG_DTRACE
5268 	user_ssize_t resid = uio_resid(uio);
5269 #endif
5270 
5271 	a.a_desc = &vnop_getattrlistbulk_desc;
5272 	a.a_vp = vp;
5273 	a.a_alist = alist;
5274 	a.a_vap = vap;
5275 	a.a_uio = uio;
5276 	a.a_private = private;
5277 	a.a_options = options;
5278 	a.a_eofflag = eofflag;
5279 	a.a_actualcount = actualcount;
5280 	a.a_context = ctx;
5281 
5282 	_err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a);
5283 	DTRACE_FSINFO_IO(getattrlistbulk,
5284 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5285 
5286 	return _err;
5287 }
5288 
5289 #if 0
5290 /*
5291 *#
5292 *#% readlink     vp      L L L
5293 *#
5294 */
5295 struct vnop_readlink_args {
5296 	struct vnodeop_desc *a_desc;
5297 	vnode_t a_vp;
5298 	struct uio *a_uio;
5299 	vfs_context_t a_context;
5300 };
5301 #endif /* 0 */
5302 
5303 /*
5304  * Returns:	0			Success
5305  *		lock_fsnode:ENOENT	No such file or directory [only for VFS
5306  *					 that is not thread safe & vnode is
5307  *					 currently being/has been terminated]
5308  *		<vfs_readlink>:EINVAL
5309  *		<vfs_readlink>:???
5310  *
5311  * Note:	The return codes from the underlying VFS's readlink routine
5312  *		can't be fully enumerated here, since third party VFS authors
5313  *		may not limit their error returns to the ones documented here,
5314  *		even though this may result in some programs functioning
5315  *		incorrectly.
5316  *
5317  *		The return codes documented above are those which may currently
5318  *		be returned by HFS from hfs_vnop_readlink, not including
5319  *		additional error code which may be propagated from underlying
5320  *		routines.
5321  */
5322 errno_t
VNOP_READLINK(struct vnode * vp,struct uio * uio,vfs_context_t ctx)5323 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
5324 {
5325 	int _err;
5326 	struct vnop_readlink_args a;
5327 #if CONFIG_DTRACE
5328 	user_ssize_t resid = uio_resid(uio);
5329 #endif
5330 	a.a_desc = &vnop_readlink_desc;
5331 	a.a_vp = vp;
5332 	a.a_uio = uio;
5333 	a.a_context = ctx;
5334 
5335 	_err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
5336 	DTRACE_FSINFO_IO(readlink,
5337 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5338 
5339 	return _err;
5340 }
5341 
5342 #if 0
5343 /*
5344 *#
5345 *#% inactive     vp      L U U
5346 *#
5347 */
5348 struct vnop_inactive_args {
5349 	struct vnodeop_desc *a_desc;
5350 	vnode_t a_vp;
5351 	vfs_context_t a_context;
5352 };
5353 #endif /* 0*/
5354 errno_t
VNOP_INACTIVE(struct vnode * vp,vfs_context_t ctx)5355 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
5356 {
5357 	int _err;
5358 	struct vnop_inactive_args a;
5359 
5360 	a.a_desc = &vnop_inactive_desc;
5361 	a.a_vp = vp;
5362 	a.a_context = ctx;
5363 
5364 	_err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
5365 	DTRACE_FSINFO(inactive, vnode_t, vp);
5366 
5367 #if NAMEDSTREAMS
5368 	/* For file systems that do not support namedstream natively, mark
5369 	 * the shadow stream file vnode to be recycled as soon as the last
5370 	 * reference goes away.  To avoid re-entering reclaim code, do not
5371 	 * call recycle on terminating namedstream vnodes.
5372 	 */
5373 	if (vnode_isnamedstream(vp) &&
5374 	    (vp->v_parent != NULLVP) &&
5375 	    vnode_isshadow(vp) &&
5376 	    ((vp->v_lflag & VL_TERMINATE) == 0)) {
5377 		vnode_recycle(vp);
5378 	}
5379 #endif
5380 
5381 	return _err;
5382 }
5383 
5384 
5385 #if 0
5386 /*
5387 *#
5388 *#% reclaim      vp      U U U
5389 *#
5390 */
5391 struct vnop_reclaim_args {
5392 	struct vnodeop_desc *a_desc;
5393 	vnode_t a_vp;
5394 	vfs_context_t a_context;
5395 };
5396 #endif /* 0*/
5397 errno_t
VNOP_RECLAIM(struct vnode * vp,vfs_context_t ctx)5398 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
5399 {
5400 	int _err;
5401 	struct vnop_reclaim_args a;
5402 
5403 	a.a_desc = &vnop_reclaim_desc;
5404 	a.a_vp = vp;
5405 	a.a_context = ctx;
5406 
5407 	_err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
5408 	DTRACE_FSINFO(reclaim, vnode_t, vp);
5409 
5410 	return _err;
5411 }
5412 
5413 
5414 /*
5415  * Returns:	0			Success
5416  *	lock_fsnode:ENOENT		No such file or directory [only for VFS
5417  *					 that is not thread safe & vnode is
5418  *					 currently being/has been terminated]
5419  *	<vnop_pathconf_desc>:???	[per FS implementation specific]
5420  */
5421 #if 0
5422 /*
5423 *#
5424 *#% pathconf     vp      L L L
5425 *#
5426 */
5427 struct vnop_pathconf_args {
5428 	struct vnodeop_desc *a_desc;
5429 	vnode_t a_vp;
5430 	int a_name;
5431 	int32_t *a_retval;
5432 	vfs_context_t a_context;
5433 };
5434 #endif /* 0*/
5435 errno_t
VNOP_PATHCONF(struct vnode * vp,int name,int32_t * retval,vfs_context_t ctx)5436 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
5437 {
5438 	int _err;
5439 	struct vnop_pathconf_args a;
5440 
5441 	a.a_desc = &vnop_pathconf_desc;
5442 	a.a_vp = vp;
5443 	a.a_name = name;
5444 	a.a_retval = retval;
5445 	a.a_context = ctx;
5446 
5447 	_err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
5448 	DTRACE_FSINFO(pathconf, vnode_t, vp);
5449 
5450 	return _err;
5451 }
5452 
5453 /*
5454  * Returns:	0			Success
5455  *	err_advlock:ENOTSUP
5456  *	lf_advlock:???
5457  *	<vnop_advlock_desc>:???
5458  *
5459  * Notes:	VFS implementations of advisory locking using calls through
5460  *		<vnop_advlock_desc> because lock enforcement does not occur
5461  *		locally should try to limit themselves to the return codes
5462  *		documented above for lf_advlock and err_advlock.
5463  */
5464 #if 0
5465 /*
5466 *#
5467 *#% advlock      vp      U U U
5468 *#
5469 */
5470 struct vnop_advlock_args {
5471 	struct vnodeop_desc *a_desc;
5472 	vnode_t a_vp;
5473 	caddr_t a_id;
5474 	int a_op;
5475 	struct flock *a_fl;
5476 	int a_flags;
5477 	vfs_context_t a_context;
5478 };
5479 #endif /* 0*/
5480 errno_t
VNOP_ADVLOCK(struct vnode * vp,caddr_t id,int op,struct flock * fl,int flags,vfs_context_t ctx,struct timespec * timeout)5481 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
5482 {
5483 	int _err;
5484 	struct vnop_advlock_args a;
5485 
5486 	a.a_desc = &vnop_advlock_desc;
5487 	a.a_vp = vp;
5488 	a.a_id = id;
5489 	a.a_op = op;
5490 	a.a_fl = fl;
5491 	a.a_flags = flags;
5492 	a.a_context = ctx;
5493 	a.a_timeout = timeout;
5494 
5495 	/* Disallow advisory locking on non-seekable vnodes */
5496 	if (vnode_isfifo(vp)) {
5497 		_err = err_advlock(&a);
5498 	} else {
5499 		if ((vp->v_flag & VLOCKLOCAL)) {
5500 			/* Advisory locking done at this layer */
5501 			_err = lf_advlock(&a);
5502 		} else if (flags & F_OFD_LOCK) {
5503 			/* Non-local locking doesn't work for OFD locks */
5504 			_err = err_advlock(&a);
5505 		} else {
5506 			/* Advisory locking done by underlying filesystem */
5507 			_err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
5508 		}
5509 		DTRACE_FSINFO(advlock, vnode_t, vp);
5510 		if (op == F_UNLCK &&
5511 		    (flags & (F_FLOCK | F_OFD_LOCK)) != 0) {
5512 			post_event_if_success(vp, _err, NOTE_FUNLOCK);
5513 		}
5514 	}
5515 
5516 	return _err;
5517 }
5518 
5519 
5520 
5521 #if 0
5522 /*
5523 *#
5524 *#% allocate     vp      L L L
5525 *#
5526 */
5527 struct vnop_allocate_args {
5528 	struct vnodeop_desc *a_desc;
5529 	vnode_t a_vp;
5530 	off_t a_length;
5531 	u_int32_t a_flags;
5532 	off_t *a_bytesallocated;
5533 	off_t a_offset;
5534 	vfs_context_t a_context;
5535 };
5536 
5537 #endif /* 0*/
5538 errno_t
VNOP_ALLOCATE(struct vnode * vp,off_t length,u_int32_t flags,off_t * bytesallocated,off_t offset,vfs_context_t ctx)5539 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
5540 {
5541 	int _err;
5542 	struct vnop_allocate_args a;
5543 
5544 	a.a_desc = &vnop_allocate_desc;
5545 	a.a_vp = vp;
5546 	a.a_length = length;
5547 	a.a_flags = flags;
5548 	a.a_bytesallocated = bytesallocated;
5549 	a.a_offset = offset;
5550 	a.a_context = ctx;
5551 
5552 	_err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
5553 	DTRACE_FSINFO(allocate, vnode_t, vp);
5554 #if CONFIG_FSE
5555 	if (_err == 0) {
5556 		add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
5557 	}
5558 #endif
5559 
5560 	return _err;
5561 }
5562 
5563 #if 0
5564 /*
5565 *#
5566 *#% pagein       vp      = = =
5567 *#
5568 */
5569 struct vnop_pagein_args {
5570 	struct vnodeop_desc *a_desc;
5571 	vnode_t a_vp;
5572 	upl_t a_pl;
5573 	upl_offset_t a_pl_offset;
5574 	off_t a_f_offset;
5575 	size_t a_size;
5576 	int a_flags;
5577 	vfs_context_t a_context;
5578 };
5579 #endif /* 0*/
5580 errno_t
VNOP_PAGEIN(struct vnode * vp,upl_t pl,upl_offset_t pl_offset,off_t f_offset,size_t size,int flags,vfs_context_t ctx)5581 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5582 {
5583 	int _err;
5584 	struct vnop_pagein_args a;
5585 
5586 	a.a_desc = &vnop_pagein_desc;
5587 	a.a_vp = vp;
5588 	a.a_pl = pl;
5589 	a.a_pl_offset = pl_offset;
5590 	a.a_f_offset = f_offset;
5591 	a.a_size = size;
5592 	a.a_flags = flags;
5593 	a.a_context = ctx;
5594 
5595 	_err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
5596 	DTRACE_FSINFO(pagein, vnode_t, vp);
5597 
5598 	return _err;
5599 }
5600 
5601 #if 0
5602 /*
5603 *#
5604 *#% pageout      vp      = = =
5605 *#
5606 */
5607 struct vnop_pageout_args {
5608 	struct vnodeop_desc *a_desc;
5609 	vnode_t a_vp;
5610 	upl_t a_pl;
5611 	upl_offset_t a_pl_offset;
5612 	off_t a_f_offset;
5613 	size_t a_size;
5614 	int a_flags;
5615 	vfs_context_t a_context;
5616 };
5617 
5618 #endif /* 0*/
5619 errno_t
VNOP_PAGEOUT(struct vnode * vp,upl_t pl,upl_offset_t pl_offset,off_t f_offset,size_t size,int flags,vfs_context_t ctx)5620 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5621 {
5622 	int _err;
5623 	struct vnop_pageout_args a;
5624 
5625 	a.a_desc = &vnop_pageout_desc;
5626 	a.a_vp = vp;
5627 	a.a_pl = pl;
5628 	a.a_pl_offset = pl_offset;
5629 	a.a_f_offset = f_offset;
5630 	a.a_size = size;
5631 	a.a_flags = flags;
5632 	a.a_context = ctx;
5633 
5634 	_err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
5635 	DTRACE_FSINFO(pageout, vnode_t, vp);
5636 
5637 	post_event_if_success(vp, _err, NOTE_WRITE);
5638 
5639 	return _err;
5640 }
5641 
5642 int
vn_remove(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,struct vnode_attr * vap,vfs_context_t ctx)5643 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5644 {
5645 	if (vnode_compound_remove_available(dvp)) {
5646 		return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5647 	} else {
5648 		return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5649 	}
5650 }
5651 
5652 #if CONFIG_SEARCHFS
5653 
5654 #if 0
5655 /*
5656 *#
5657 *#% searchfs     vp      L L L
5658 *#
5659 */
5660 struct vnop_searchfs_args {
5661 	struct vnodeop_desc *a_desc;
5662 	vnode_t a_vp;
5663 	void *a_searchparams1;
5664 	void *a_searchparams2;
5665 	struct attrlist *a_searchattrs;
5666 	uint32_t a_maxmatches;
5667 	struct timeval *a_timelimit;
5668 	struct attrlist *a_returnattrs;
5669 	uint32_t *a_nummatches;
5670 	uint32_t a_scriptcode;
5671 	uint32_t a_options;
5672 	struct uio *a_uio;
5673 	struct searchstate *a_searchstate;
5674 	vfs_context_t a_context;
5675 };
5676 
5677 #endif /* 0*/
5678 errno_t
VNOP_SEARCHFS(struct vnode * vp,void * searchparams1,void * searchparams2,struct attrlist * searchattrs,uint32_t maxmatches,struct timeval * timelimit,struct attrlist * returnattrs,uint32_t * nummatches,uint32_t scriptcode,uint32_t options,struct uio * uio,struct searchstate * searchstate,vfs_context_t ctx)5679 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
5680 {
5681 	int _err;
5682 	struct vnop_searchfs_args a;
5683 
5684 	a.a_desc = &vnop_searchfs_desc;
5685 	a.a_vp = vp;
5686 	a.a_searchparams1 = searchparams1;
5687 	a.a_searchparams2 = searchparams2;
5688 	a.a_searchattrs = searchattrs;
5689 	a.a_maxmatches = maxmatches;
5690 	a.a_timelimit = timelimit;
5691 	a.a_returnattrs = returnattrs;
5692 	a.a_nummatches = nummatches;
5693 	a.a_scriptcode = scriptcode;
5694 	a.a_options = options;
5695 	a.a_uio = uio;
5696 	a.a_searchstate = searchstate;
5697 	a.a_context = ctx;
5698 
5699 	_err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5700 	DTRACE_FSINFO(searchfs, vnode_t, vp);
5701 
5702 	return _err;
5703 }
5704 #endif /* CONFIG_SEARCHFS */
5705 
5706 #if 0
5707 /*
5708 *#
5709 *#% copyfile fvp U U U
5710 *#% copyfile tdvp L U U
5711 *#% copyfile tvp X U U
5712 *#
5713 */
5714 struct vnop_copyfile_args {
5715 	struct vnodeop_desc *a_desc;
5716 	vnode_t a_fvp;
5717 	vnode_t a_tdvp;
5718 	vnode_t a_tvp;
5719 	struct componentname *a_tcnp;
5720 	int a_mode;
5721 	int a_flags;
5722 	vfs_context_t a_context;
5723 };
5724 #endif /* 0*/
5725 errno_t
VNOP_COPYFILE(struct vnode * fvp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,int mode,int flags,vfs_context_t ctx)5726 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5727     int mode, int flags, vfs_context_t ctx)
5728 {
5729 	int _err;
5730 	struct vnop_copyfile_args a;
5731 	a.a_desc = &vnop_copyfile_desc;
5732 	a.a_fvp = fvp;
5733 	a.a_tdvp = tdvp;
5734 	a.a_tvp = tvp;
5735 	a.a_tcnp = tcnp;
5736 	a.a_mode = mode;
5737 	a.a_flags = flags;
5738 	a.a_context = ctx;
5739 	_err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
5740 	DTRACE_FSINFO(copyfile, vnode_t, fvp);
5741 	return _err;
5742 }
5743 
5744 #if 0
5745 struct vnop_clonefile_args {
5746 	struct vnodeop_desc *a_desc;
5747 	vnode_t a_fvp;
5748 	vnode_t a_dvp;
5749 	vnode_t *a_vpp;
5750 	struct componentname *a_cnp;
5751 	struct vnode_attr *a_vap;
5752 	uint32_t a_flags;
5753 	vfs_context_t a_context;
5754 	int (*a_dir_clone_authorizer)(  /* Authorization callback */
5755 		struct vnode_attr *vap,         /* attribute to be authorized */
5756 		kauth_action_t action,         /* action for which attribute is to be authorized */
5757 		struct vnode_attr *dvap,         /* target directory attributes */
5758 		vnode_t sdvp,         /* source directory vnode pointer (optional) */
5759 		mount_t mp,         /* mount point of filesystem */
5760 		dir_clone_authorizer_op_t vattr_op,         /* specific operation requested : setup, authorization or cleanup  */
5761 		uint32_t flags;         /* value passed in a_flags to the VNOP */
5762 		vfs_context_t ctx,                      /* As passed to VNOP */
5763 		void *reserved);                        /* Always NULL */
5764 	void *a_reserved;               /* Currently unused */
5765 };
5766 #endif /* 0 */
5767 
5768 errno_t
VNOP_CLONEFILE(vnode_t fvp,vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,uint32_t flags,vfs_context_t ctx)5769 VNOP_CLONEFILE(vnode_t fvp, vnode_t dvp, vnode_t *vpp,
5770     struct componentname *cnp, struct vnode_attr *vap, uint32_t flags,
5771     vfs_context_t ctx)
5772 {
5773 	int _err;
5774 	struct vnop_clonefile_args a;
5775 	a.a_desc = &vnop_clonefile_desc;
5776 	a.a_fvp = fvp;
5777 	a.a_dvp = dvp;
5778 	a.a_vpp = vpp;
5779 	a.a_cnp = cnp;
5780 	a.a_vap = vap;
5781 	a.a_flags = flags;
5782 	a.a_context = ctx;
5783 
5784 	if (vnode_vtype(fvp) == VDIR) {
5785 		a.a_dir_clone_authorizer = vnode_attr_authorize_dir_clone;
5786 	} else {
5787 		a.a_dir_clone_authorizer = NULL;
5788 	}
5789 
5790 	_err = (*dvp->v_op[vnop_clonefile_desc.vdesc_offset])(&a);
5791 
5792 	if (_err == 0 && *vpp) {
5793 		DTRACE_FSINFO(clonefile, vnode_t, *vpp);
5794 		if (kdebug_enable) {
5795 			kdebug_lookup(*vpp, cnp);
5796 		}
5797 	}
5798 
5799 	post_event_if_success(dvp, _err, NOTE_WRITE);
5800 
5801 	return _err;
5802 }
5803 
5804 errno_t
VNOP_GETXATTR(vnode_t vp,const char * name,uio_t uio,size_t * size,int options,vfs_context_t ctx)5805 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5806 {
5807 	struct vnop_getxattr_args a;
5808 	int error;
5809 
5810 	a.a_desc = &vnop_getxattr_desc;
5811 	a.a_vp = vp;
5812 	a.a_name = name;
5813 	a.a_uio = uio;
5814 	a.a_size = size;
5815 	a.a_options = options;
5816 	a.a_context = ctx;
5817 
5818 	error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
5819 	DTRACE_FSINFO(getxattr, vnode_t, vp);
5820 
5821 	return error;
5822 }
5823 
5824 errno_t
VNOP_SETXATTR(vnode_t vp,const char * name,uio_t uio,int options,vfs_context_t ctx)5825 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
5826 {
5827 	struct vnop_setxattr_args a;
5828 	int error;
5829 
5830 	a.a_desc = &vnop_setxattr_desc;
5831 	a.a_vp = vp;
5832 	a.a_name = name;
5833 	a.a_uio = uio;
5834 	a.a_options = options;
5835 	a.a_context = ctx;
5836 
5837 	error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
5838 	DTRACE_FSINFO(setxattr, vnode_t, vp);
5839 
5840 	if (error == 0) {
5841 		vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
5842 	}
5843 
5844 	post_event_if_success(vp, error, NOTE_ATTRIB);
5845 
5846 	return error;
5847 }
5848 
5849 errno_t
VNOP_REMOVEXATTR(vnode_t vp,const char * name,int options,vfs_context_t ctx)5850 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
5851 {
5852 	struct vnop_removexattr_args a;
5853 	int error;
5854 
5855 	a.a_desc = &vnop_removexattr_desc;
5856 	a.a_vp = vp;
5857 	a.a_name = name;
5858 	a.a_options = options;
5859 	a.a_context = ctx;
5860 
5861 	error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
5862 	DTRACE_FSINFO(removexattr, vnode_t, vp);
5863 
5864 	post_event_if_success(vp, error, NOTE_ATTRIB);
5865 
5866 	return error;
5867 }
5868 
5869 errno_t
VNOP_LISTXATTR(vnode_t vp,uio_t uio,size_t * size,int options,vfs_context_t ctx)5870 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5871 {
5872 	struct vnop_listxattr_args a;
5873 	int error;
5874 
5875 	a.a_desc = &vnop_listxattr_desc;
5876 	a.a_vp = vp;
5877 	a.a_uio = uio;
5878 	a.a_size = size;
5879 	a.a_options = options;
5880 	a.a_context = ctx;
5881 
5882 	error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
5883 	DTRACE_FSINFO(listxattr, vnode_t, vp);
5884 
5885 	return error;
5886 }
5887 
5888 
5889 #if 0
5890 /*
5891 *#
5892 *#% blktooff vp = = =
5893 *#
5894 */
5895 struct vnop_blktooff_args {
5896 	struct vnodeop_desc *a_desc;
5897 	vnode_t a_vp;
5898 	daddr64_t a_lblkno;
5899 	off_t *a_offset;
5900 };
5901 #endif /* 0*/
5902 errno_t
VNOP_BLKTOOFF(struct vnode * vp,daddr64_t lblkno,off_t * offset)5903 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
5904 {
5905 	int _err;
5906 	struct vnop_blktooff_args a;
5907 
5908 	a.a_desc = &vnop_blktooff_desc;
5909 	a.a_vp = vp;
5910 	a.a_lblkno = lblkno;
5911 	a.a_offset = offset;
5912 
5913 	_err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
5914 	DTRACE_FSINFO(blktooff, vnode_t, vp);
5915 
5916 	return _err;
5917 }
5918 
5919 #if 0
5920 /*
5921 *#
5922 *#% offtoblk vp = = =
5923 *#
5924 */
5925 struct vnop_offtoblk_args {
5926 	struct vnodeop_desc *a_desc;
5927 	vnode_t a_vp;
5928 	off_t a_offset;
5929 	daddr64_t *a_lblkno;
5930 };
5931 #endif /* 0*/
5932 errno_t
VNOP_OFFTOBLK(struct vnode * vp,off_t offset,daddr64_t * lblkno)5933 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
5934 {
5935 	int _err;
5936 	struct vnop_offtoblk_args a;
5937 
5938 	a.a_desc = &vnop_offtoblk_desc;
5939 	a.a_vp = vp;
5940 	a.a_offset = offset;
5941 	a.a_lblkno = lblkno;
5942 
5943 	_err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
5944 	DTRACE_FSINFO(offtoblk, vnode_t, vp);
5945 
5946 	return _err;
5947 }
5948 
5949 #if 0
5950 /*
5951 *#
5952 *#% ap vp L L L
5953 *#
5954 */
5955 struct vnop_verify_args {
5956 	struct vnodeop_desc *a_desc;
5957 	vnode_t a_vp;
5958 	off_t a_foffset;
5959 	char *a_buf;
5960 	size_t a_bufsize;
5961 	size_t *a_verifyblksize;
5962 	void **a_verify_ctxp;
5963 	int a_flags;
5964 	vfs_context_t a_context;
5965 };
5966 #endif
5967 
5968 errno_t
VNOP_VERIFY(struct vnode * vp,off_t foffset,uint8_t * buf,size_t bufsize,size_t * verify_block_size,void ** verify_ctxp,vnode_verify_flags_t flags,vfs_context_t ctx)5969 VNOP_VERIFY(struct vnode *vp, off_t foffset, uint8_t *buf, size_t bufsize,
5970     size_t *verify_block_size, void **verify_ctxp, vnode_verify_flags_t flags,
5971     vfs_context_t ctx)
5972 {
5973 	int _err;
5974 	struct vnop_verify_args a;
5975 
5976 	if (ctx == NULL) {
5977 		ctx = vfs_context_kernel();
5978 	}
5979 	a.a_desc = &vnop_verify_desc;
5980 	a.a_vp = vp;
5981 	a.a_foffset = foffset;
5982 	a.a_buf = buf;
5983 	a.a_bufsize = bufsize;
5984 	a.a_verifyblksize = verify_block_size;
5985 	a.a_flags = flags;
5986 	a.a_verify_ctxp = verify_ctxp;
5987 	a.a_context = ctx;
5988 
5989 	_err = (*vp->v_op[vnop_verify_desc.vdesc_offset])(&a);
5990 	DTRACE_FSINFO(verify, vnode_t, vp);
5991 
5992 	/* It is not an error for a filesystem to not support this VNOP */
5993 	if (_err == ENOTSUP) {
5994 		if (!buf && verify_block_size) {
5995 			*verify_block_size = 0;
5996 		}
5997 
5998 		_err = 0;
5999 	}
6000 
6001 	return _err;
6002 }
6003 
6004 #if 0
6005 /*
6006 *#
6007 *#% blockmap vp L L L
6008 *#
6009 */
6010 struct vnop_blockmap_args {
6011 	struct vnodeop_desc *a_desc;
6012 	vnode_t a_vp;
6013 	off_t a_foffset;
6014 	size_t a_size;
6015 	daddr64_t *a_bpn;
6016 	size_t *a_run;
6017 	void *a_poff;
6018 	int a_flags;
6019 	vfs_context_t a_context;
6020 };
6021 #endif /* 0*/
6022 errno_t
VNOP_BLOCKMAP(struct vnode * vp,off_t foffset,size_t size,daddr64_t * bpn,size_t * run,void * poff,int flags,vfs_context_t ctx)6023 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
6024 {
6025 	int _err;
6026 	struct vnop_blockmap_args a;
6027 	size_t localrun = 0;
6028 
6029 	if (ctx == NULL) {
6030 		ctx = vfs_context_current();
6031 	}
6032 	a.a_desc = &vnop_blockmap_desc;
6033 	a.a_vp = vp;
6034 	a.a_foffset = foffset;
6035 	a.a_size = size;
6036 	a.a_bpn = bpn;
6037 	a.a_run = &localrun;
6038 	a.a_poff = poff;
6039 	a.a_flags = flags;
6040 	a.a_context = ctx;
6041 
6042 	_err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
6043 	DTRACE_FSINFO(blockmap, vnode_t, vp);
6044 
6045 	/*
6046 	 * We used a local variable to request information from the underlying
6047 	 * filesystem about the length of the I/O run in question.  If
6048 	 * we get malformed output from the filesystem, we cap it to the length
6049 	 * requested, at most.  Update 'run' on the way out.
6050 	 */
6051 	if (_err == 0) {
6052 		if (localrun > size) {
6053 			localrun = size;
6054 		}
6055 
6056 		if (run) {
6057 			*run = localrun;
6058 		}
6059 	}
6060 
6061 	return _err;
6062 }
6063 
6064 #if 0
6065 struct vnop_strategy_args {
6066 	struct vnodeop_desc *a_desc;
6067 	struct buf *a_bp;
6068 };
6069 
6070 #endif /* 0*/
6071 errno_t
VNOP_STRATEGY(struct buf * bp)6072 VNOP_STRATEGY(struct buf *bp)
6073 {
6074 	int _err;
6075 	struct vnop_strategy_args a;
6076 	vnode_t vp = buf_vnode(bp);
6077 	a.a_desc = &vnop_strategy_desc;
6078 	a.a_bp = bp;
6079 	_err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
6080 	DTRACE_FSINFO(strategy, vnode_t, vp);
6081 	return _err;
6082 }
6083 
6084 #if 0
6085 struct vnop_bwrite_args {
6086 	struct vnodeop_desc *a_desc;
6087 	buf_t a_bp;
6088 };
6089 #endif /* 0*/
6090 errno_t
VNOP_BWRITE(struct buf * bp)6091 VNOP_BWRITE(struct buf *bp)
6092 {
6093 	int _err;
6094 	struct vnop_bwrite_args a;
6095 	vnode_t vp = buf_vnode(bp);
6096 	a.a_desc = &vnop_bwrite_desc;
6097 	a.a_bp = bp;
6098 	_err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
6099 	DTRACE_FSINFO(bwrite, vnode_t, vp);
6100 	return _err;
6101 }
6102 
6103 #if 0
6104 struct vnop_kqfilt_add_args {
6105 	struct vnodeop_desc *a_desc;
6106 	struct vnode *a_vp;
6107 	struct knote *a_kn;
6108 	vfs_context_t a_context;
6109 };
6110 #endif
6111 errno_t
VNOP_KQFILT_ADD(struct vnode * vp,struct knote * kn,vfs_context_t ctx)6112 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
6113 {
6114 	int _err;
6115 	struct vnop_kqfilt_add_args a;
6116 
6117 	a.a_desc = VDESC(vnop_kqfilt_add);
6118 	a.a_vp = vp;
6119 	a.a_kn = kn;
6120 	a.a_context = ctx;
6121 
6122 	_err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
6123 	DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
6124 
6125 	return _err;
6126 }
6127 
6128 #if 0
6129 struct vnop_kqfilt_remove_args {
6130 	struct vnodeop_desc *a_desc;
6131 	struct vnode *a_vp;
6132 	uintptr_t a_ident;
6133 	vfs_context_t a_context;
6134 };
6135 #endif
6136 errno_t
VNOP_KQFILT_REMOVE(struct vnode * vp,uintptr_t ident,vfs_context_t ctx)6137 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
6138 {
6139 	int _err;
6140 	struct vnop_kqfilt_remove_args a;
6141 
6142 	a.a_desc = VDESC(vnop_kqfilt_remove);
6143 	a.a_vp = vp;
6144 	a.a_ident = ident;
6145 	a.a_context = ctx;
6146 
6147 	_err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
6148 	DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
6149 
6150 	return _err;
6151 }
6152 
6153 errno_t
VNOP_MONITOR(vnode_t vp,uint32_t events,uint32_t flags,void * handle,vfs_context_t ctx)6154 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
6155 {
6156 	int _err;
6157 	struct vnop_monitor_args a;
6158 
6159 	a.a_desc = VDESC(vnop_monitor);
6160 	a.a_vp = vp;
6161 	a.a_events = events;
6162 	a.a_flags = flags;
6163 	a.a_handle = handle;
6164 	a.a_context = ctx;
6165 
6166 	_err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
6167 	DTRACE_FSINFO(monitor, vnode_t, vp);
6168 
6169 	return _err;
6170 }
6171 
6172 #if 0
6173 struct vnop_setlabel_args {
6174 	struct vnodeop_desc *a_desc;
6175 	struct vnode *a_vp;
6176 	struct label *a_vl;
6177 	vfs_context_t a_context;
6178 };
6179 #endif
6180 errno_t
VNOP_SETLABEL(struct vnode * vp,struct label * label,vfs_context_t ctx)6181 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
6182 {
6183 	int _err;
6184 	struct vnop_setlabel_args a;
6185 
6186 	a.a_desc = VDESC(vnop_setlabel);
6187 	a.a_vp = vp;
6188 	a.a_vl = label;
6189 	a.a_context = ctx;
6190 
6191 	_err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
6192 	DTRACE_FSINFO(setlabel, vnode_t, vp);
6193 
6194 	return _err;
6195 }
6196 
6197 
6198 #if NAMEDSTREAMS
6199 /*
6200  * Get a named streamed
6201  */
6202 errno_t
VNOP_GETNAMEDSTREAM(vnode_t vp,vnode_t * svpp,const char * name,enum nsoperation operation,int flags,vfs_context_t ctx)6203 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
6204 {
6205 	int _err;
6206 	struct vnop_getnamedstream_args a;
6207 
6208 	a.a_desc = &vnop_getnamedstream_desc;
6209 	a.a_vp = vp;
6210 	a.a_svpp = svpp;
6211 	a.a_name = name;
6212 	a.a_operation = operation;
6213 	a.a_flags = flags;
6214 	a.a_context = ctx;
6215 
6216 	_err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
6217 	DTRACE_FSINFO(getnamedstream, vnode_t, vp);
6218 	return _err;
6219 }
6220 
6221 /*
6222  * Create a named streamed
6223  */
6224 errno_t
VNOP_MAKENAMEDSTREAM(vnode_t vp,vnode_t * svpp,const char * name,int flags,vfs_context_t ctx)6225 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
6226 {
6227 	int _err;
6228 	struct vnop_makenamedstream_args a;
6229 
6230 	a.a_desc = &vnop_makenamedstream_desc;
6231 	a.a_vp = vp;
6232 	a.a_svpp = svpp;
6233 	a.a_name = name;
6234 	a.a_flags = flags;
6235 	a.a_context = ctx;
6236 
6237 	_err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
6238 	DTRACE_FSINFO(makenamedstream, vnode_t, vp);
6239 	return _err;
6240 }
6241 
6242 
6243 /*
6244  * Remove a named streamed
6245  */
6246 errno_t
VNOP_REMOVENAMEDSTREAM(vnode_t vp,vnode_t svp,const char * name,int flags,vfs_context_t ctx)6247 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
6248 {
6249 	int _err;
6250 	struct vnop_removenamedstream_args a;
6251 
6252 	a.a_desc = &vnop_removenamedstream_desc;
6253 	a.a_vp = vp;
6254 	a.a_svp = svp;
6255 	a.a_name = name;
6256 	a.a_flags = flags;
6257 	a.a_context = ctx;
6258 
6259 	_err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
6260 	DTRACE_FSINFO(removenamedstream, vnode_t, vp);
6261 	return _err;
6262 }
6263 #endif
6264