xref: /xnu-12377.1.9/bsd/vfs/kpi_vfs.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea) !
1 /*
2  * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1989, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kpi_vfs.c
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 /*
76  * External virtual filesystem routines
77  */
78 
79 
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/disk.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf.h>
93 #include <sys/errno.h>
94 #include <kern/kalloc.h>
95 #include <sys/domain.h>
96 #include <sys/mbuf.h>
97 #include <sys/syslog.h>
98 #include <sys/ubc.h>
99 #include <sys/ubc_internal.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/filedesc.h>
103 #include <sys/event.h>
104 #include <sys/fsevents.h>
105 #include <sys/user.h>
106 #include <sys/lockf.h>
107 #include <sys/xattr.h>
108 #include <sys/kdebug.h>
109 #include <vfs/vfs_disk_conditioner.h>
110 
111 #include <kern/assert.h>
112 #include <kern/zalloc.h>
113 #include <kern/task.h>
114 #include <kern/policy_internal.h>
115 
116 #include <libkern/OSByteOrder.h>
117 
118 #include <miscfs/specfs/specdev.h>
119 
120 #include <mach/mach_types.h>
121 #include <mach/memory_object_types.h>
122 #include <mach/task.h>
123 
124 #if CONFIG_MACF
125 #include <security/mac_framework.h>
126 #endif
127 
128 #if NULLFS
129 #include <miscfs/nullfs/nullfs.h>
130 #endif
131 
132 #include <sys/sdt.h>
133 
134 #define ESUCCESS 0
135 #undef mount_t
136 #undef vnode_t
137 
138 #define COMPAT_ONLY
139 
140 #define NATIVE_XATTR(VP)  \
141 	((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
142 
143 #if CONFIG_APPLEDOUBLE
144 static void xattrfile_remove(vnode_t dvp, const char *basename,
145     vfs_context_t ctx, int force);
146 static void xattrfile_setattr(vnode_t dvp, const char * basename,
147     struct vnode_attr * vap, vfs_context_t ctx);
148 #endif /* CONFIG_APPLEDOUBLE */
149 
150 extern lck_rw_t rootvnode_rw_lock;
151 
152 static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp);
153 
154 KALLOC_TYPE_DEFINE(KT_VFS_CONTEXT, struct vfs_context, KT_PRIV_ACCT);
155 
156 extern int fstypenumstart;
157 char vfs_typenum_arr[13];
158 
159 LCK_GRP_DECLARE(typenum_arr_grp, "typenum array group");
160 LCK_MTX_DECLARE(vfs_typenum_mtx, &typenum_arr_grp);
161 /*
162  * vnode_setneedinactive
163  *
164  * Description: Indicate that when the last iocount on this vnode goes away,
165  *              and the usecount is also zero, we should inform the filesystem
166  *              via VNOP_INACTIVE.
167  *
168  * Parameters:  vnode_t		vnode to mark
169  *
170  * Returns:     Nothing
171  *
172  * Notes:       Notably used when we're deleting a file--we need not have a
173  *              usecount, so VNOP_INACTIVE may not get called by anyone.  We
174  *              want it called when we drop our iocount.
175  */
176 void
vnode_setneedinactive(vnode_t vp)177 vnode_setneedinactive(vnode_t vp)
178 {
179 	cache_purge(vp);
180 
181 	vnode_lock_spin(vp);
182 	vp->v_lflag |= VL_NEEDINACTIVE;
183 	vnode_unlock(vp);
184 }
185 
186 
187 /* ====================================================================== */
188 /* ************  EXTERNAL KERNEL APIS  ********************************** */
189 /* ====================================================================== */
190 
191 /*
192  * implementations of exported VFS operations
193  */
194 int
VFS_MOUNT(mount_t mp,vnode_t devvp,user_addr_t data,vfs_context_t ctx)195 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
196 {
197 	int error;
198 
199 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0)) {
200 		return ENOTSUP;
201 	}
202 
203 	if (vfs_context_is64bit(ctx)) {
204 		if (vfs_64bitready(mp)) {
205 			error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
206 		} else {
207 			error = ENOTSUP;
208 		}
209 	} else {
210 		error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
211 	}
212 
213 	return error;
214 }
215 
216 int
VFS_START(mount_t mp,int flags,vfs_context_t ctx)217 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
218 {
219 	int error;
220 
221 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0)) {
222 		return ENOTSUP;
223 	}
224 
225 	error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
226 
227 	return error;
228 }
229 
230 int
VFS_UNMOUNT(mount_t mp,int flags,vfs_context_t ctx)231 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
232 {
233 	int error;
234 
235 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0)) {
236 		return ENOTSUP;
237 	}
238 
239 	error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
240 
241 	return error;
242 }
243 
244 /*
245  * Returns:	0			Success
246  *		ENOTSUP			Not supported
247  *		<vfs_root>:ENOENT
248  *		<vfs_root>:???
249  *
250  * Note:	The return codes from the underlying VFS's root routine can't
251  *		be fully enumerated here, since third party VFS authors may not
252  *		limit their error returns to the ones documented here, even
253  *		though this may result in some programs functioning incorrectly.
254  *
255  *		The return codes documented above are those which may currently
256  *		be returned by HFS from hfs_vfs_root, which is a simple wrapper
257  *		for a call to hfs_vget on the volume mount point, not including
258  *		additional error codes which may be propagated from underlying
259  *		routines called by hfs_vget.
260  */
261 int
VFS_ROOT(mount_t mp,struct vnode ** vpp,vfs_context_t ctx)262 VFS_ROOT(mount_t mp, struct vnode  ** vpp, vfs_context_t ctx)
263 {
264 	int error;
265 
266 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0)) {
267 		return ENOTSUP;
268 	}
269 
270 	if (ctx == NULL) {
271 		ctx = vfs_context_current();
272 	}
273 
274 	error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
275 
276 	return error;
277 }
278 
279 int
VFS_QUOTACTL(mount_t mp,int cmd,uid_t uid,caddr_t datap,vfs_context_t ctx)280 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
281 {
282 	int error;
283 
284 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0)) {
285 		return ENOTSUP;
286 	}
287 
288 	error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
289 
290 	return error;
291 }
292 
293 int
VFS_GETATTR(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)294 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
295 {
296 	int error;
297 
298 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0)) {
299 		return ENOTSUP;
300 	}
301 
302 	if (ctx == NULL) {
303 		ctx = vfs_context_current();
304 	}
305 
306 	error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
307 
308 	return error;
309 }
310 
311 int
VFS_SETATTR(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)312 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
313 {
314 	int error;
315 
316 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0)) {
317 		return ENOTSUP;
318 	}
319 
320 	if (ctx == NULL) {
321 		ctx = vfs_context_current();
322 	}
323 
324 	error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
325 
326 	return error;
327 }
328 
329 int
VFS_SYNC(mount_t mp,int flags,vfs_context_t ctx)330 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
331 {
332 	int error;
333 
334 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0)) {
335 		return ENOTSUP;
336 	}
337 
338 	if (ctx == NULL) {
339 		ctx = vfs_context_current();
340 	}
341 
342 	error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
343 
344 	return error;
345 }
346 
347 int
VFS_VGET(mount_t mp,ino64_t ino,struct vnode ** vpp,vfs_context_t ctx)348 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
349 {
350 	int error;
351 
352 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0)) {
353 		return ENOTSUP;
354 	}
355 
356 	if (ctx == NULL) {
357 		ctx = vfs_context_current();
358 	}
359 
360 	error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
361 
362 	return error;
363 }
364 
365 int
VFS_FHTOVP(mount_t mp,int fhlen,unsigned char * fhp,vnode_t * vpp,vfs_context_t ctx)366 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_t ctx)
367 {
368 	int error;
369 
370 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0)) {
371 		return ENOTSUP;
372 	}
373 
374 	if (ctx == NULL) {
375 		ctx = vfs_context_current();
376 	}
377 
378 	error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
379 
380 	return error;
381 }
382 
383 int
VFS_VPTOFH(struct vnode * vp,int * fhlenp,unsigned char * fhp,vfs_context_t ctx)384 VFS_VPTOFH(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t ctx)
385 {
386 	int error;
387 
388 	if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0)) {
389 		return ENOTSUP;
390 	}
391 
392 	if (ctx == NULL) {
393 		ctx = vfs_context_current();
394 	}
395 
396 	error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
397 
398 	return error;
399 }
400 
401 int
VFS_IOCTL(struct mount * mp,u_long command,caddr_t data,int flags,vfs_context_t context)402 VFS_IOCTL(struct mount *mp, u_long command, caddr_t data,
403     int flags, vfs_context_t context)
404 {
405 	if (mp == dead_mountp || !mp->mnt_op->vfs_ioctl) {
406 		return ENOTSUP;
407 	}
408 
409 	return mp->mnt_op->vfs_ioctl(mp, command, data, flags,
410 	           context ?: vfs_context_current());
411 }
412 
413 int
VFS_VGET_SNAPDIR(mount_t mp,vnode_t * vpp,vfs_context_t ctx)414 VFS_VGET_SNAPDIR(mount_t mp, vnode_t *vpp, vfs_context_t ctx)
415 {
416 	int error;
417 
418 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget_snapdir == 0)) {
419 		return ENOTSUP;
420 	}
421 
422 	if (ctx == NULL) {
423 		ctx = vfs_context_current();
424 	}
425 
426 	error = (*mp->mnt_op->vfs_vget_snapdir)(mp, vpp, ctx);
427 
428 	return error;
429 }
430 
431 /* returns the cached throttle mask for the mount_t */
432 uint64_t
vfs_throttle_mask(mount_t mp)433 vfs_throttle_mask(mount_t mp)
434 {
435 	return mp->mnt_throttle_mask;
436 }
437 
438 /* returns a  copy of vfs type name for the mount_t */
439 void
vfs_name(mount_t mp,char * buffer)440 vfs_name(mount_t mp, char *buffer)
441 {
442 	strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
443 }
444 
445 /* returns  vfs type number for the mount_t */
446 int
vfs_typenum(mount_t mp)447 vfs_typenum(mount_t mp)
448 {
449 	return mp->mnt_vtable->vfc_typenum;
450 }
451 
452 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers.  */
453 void*
vfs_mntlabel(mount_t mp)454 vfs_mntlabel(mount_t mp)
455 {
456 	return (void*)mac_mount_label(mp);
457 }
458 
459 uint64_t
vfs_mount_id(mount_t mp)460 vfs_mount_id(mount_t mp)
461 {
462 	return mp->mnt_mount_id;
463 }
464 
465 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
466 uint64_t
vfs_flags(mount_t mp)467 vfs_flags(mount_t mp)
468 {
469 	return (uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
470 }
471 
472 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
473 void
vfs_setflags(mount_t mp,uint64_t flags)474 vfs_setflags(mount_t mp, uint64_t flags)
475 {
476 	uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
477 
478 	mount_lock(mp);
479 	mp->mnt_flag |= lflags;
480 	mount_unlock(mp);
481 }
482 
483 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
484 void
vfs_clearflags(mount_t mp,uint64_t flags)485 vfs_clearflags(mount_t mp, uint64_t flags)
486 {
487 	uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
488 
489 	mount_lock(mp);
490 	mp->mnt_flag &= ~lflags;
491 	mount_unlock(mp);
492 }
493 
494 /* Is the mount_t ronly and upgrade read/write requested? */
495 int
vfs_iswriteupgrade(mount_t mp)496 vfs_iswriteupgrade(mount_t mp) /* ronly &&  MNTK_WANTRDWR */
497 {
498 	return (mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR);
499 }
500 
501 
502 /* Is the mount_t mounted ronly */
503 int
vfs_isrdonly(mount_t mp)504 vfs_isrdonly(mount_t mp)
505 {
506 	return mp->mnt_flag & MNT_RDONLY;
507 }
508 
509 /* Is the mount_t mounted for filesystem synchronous writes? */
510 int
vfs_issynchronous(mount_t mp)511 vfs_issynchronous(mount_t mp)
512 {
513 	return mp->mnt_flag & MNT_SYNCHRONOUS;
514 }
515 
516 /* Is the mount_t mounted read/write? */
517 int
vfs_isrdwr(mount_t mp)518 vfs_isrdwr(mount_t mp)
519 {
520 	return (mp->mnt_flag & MNT_RDONLY) == 0;
521 }
522 
523 
524 /* Is mount_t marked for update (ie MNT_UPDATE) */
525 int
vfs_isupdate(mount_t mp)526 vfs_isupdate(mount_t mp)
527 {
528 	return mp->mnt_flag & MNT_UPDATE;
529 }
530 
531 
532 /* Is mount_t marked for reload (ie MNT_RELOAD) */
533 int
vfs_isreload(mount_t mp)534 vfs_isreload(mount_t mp)
535 {
536 	return (mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD);
537 }
538 
539 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
540 int
vfs_isforce(mount_t mp)541 vfs_isforce(mount_t mp)
542 {
543 	if (mp->mnt_lflag & MNT_LFORCE) {
544 		return 1;
545 	} else {
546 		return 0;
547 	}
548 }
549 
550 int
vfs_isunmount(mount_t mp)551 vfs_isunmount(mount_t mp)
552 {
553 	if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
554 		return 1;
555 	} else {
556 		return 0;
557 	}
558 }
559 
560 int
vfs_64bitready(mount_t mp)561 vfs_64bitready(mount_t mp)
562 {
563 	if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
564 		return 1;
565 	} else {
566 		return 0;
567 	}
568 }
569 
570 
571 int
vfs_authcache_ttl(mount_t mp)572 vfs_authcache_ttl(mount_t mp)
573 {
574 	if ((mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
575 		return mp->mnt_authcache_ttl;
576 	} else {
577 		return CACHED_RIGHT_INFINITE_TTL;
578 	}
579 }
580 
581 void
vfs_setauthcache_ttl(mount_t mp,int ttl)582 vfs_setauthcache_ttl(mount_t mp, int ttl)
583 {
584 	mount_lock(mp);
585 	mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
586 	mp->mnt_authcache_ttl = ttl;
587 	mount_unlock(mp);
588 }
589 
590 void
vfs_clearauthcache_ttl(mount_t mp)591 vfs_clearauthcache_ttl(mount_t mp)
592 {
593 	mount_lock(mp);
594 	mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
595 	/*
596 	 * back to the default TTL value in case
597 	 * MNTK_AUTH_OPAQUE is set on this mount
598 	 */
599 	mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
600 	mount_unlock(mp);
601 }
602 
603 int
vfs_authopaque(mount_t mp)604 vfs_authopaque(mount_t mp)
605 {
606 	if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE)) {
607 		return 1;
608 	} else {
609 		return 0;
610 	}
611 }
612 
613 int
vfs_authopaqueaccess(mount_t mp)614 vfs_authopaqueaccess(mount_t mp)
615 {
616 	if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS)) {
617 		return 1;
618 	} else {
619 		return 0;
620 	}
621 }
622 
623 void
vfs_setauthopaque(mount_t mp)624 vfs_setauthopaque(mount_t mp)
625 {
626 	mount_lock(mp);
627 	mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
628 	mount_unlock(mp);
629 }
630 
631 void
vfs_setauthopaqueaccess(mount_t mp)632 vfs_setauthopaqueaccess(mount_t mp)
633 {
634 	mount_lock(mp);
635 	mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
636 	mount_unlock(mp);
637 }
638 
639 void
vfs_clearauthopaque(mount_t mp)640 vfs_clearauthopaque(mount_t mp)
641 {
642 	mount_lock(mp);
643 	mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
644 	mount_unlock(mp);
645 }
646 
647 void
vfs_clearauthopaqueaccess(mount_t mp)648 vfs_clearauthopaqueaccess(mount_t mp)
649 {
650 	mount_lock(mp);
651 	mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
652 	mount_unlock(mp);
653 }
654 
655 void
vfs_setextendedsecurity(mount_t mp)656 vfs_setextendedsecurity(mount_t mp)
657 {
658 	mount_lock(mp);
659 	mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
660 	mount_unlock(mp);
661 }
662 
663 void
vfs_setmntsystem(mount_t mp)664 vfs_setmntsystem(mount_t mp)
665 {
666 	mount_lock(mp);
667 	mp->mnt_kern_flag |= MNTK_SYSTEM;
668 	mount_unlock(mp);
669 }
670 
671 void
vfs_setmntsystemdata(mount_t mp)672 vfs_setmntsystemdata(mount_t mp)
673 {
674 	mount_lock(mp);
675 	mp->mnt_kern_flag |= MNTK_SYSTEMDATA;
676 	mount_unlock(mp);
677 }
678 
679 void
vfs_setmntswap(mount_t mp)680 vfs_setmntswap(mount_t mp)
681 {
682 	mount_lock(mp);
683 	mp->mnt_kern_flag |= (MNTK_SYSTEM | MNTK_SWAP_MOUNT);
684 	mount_unlock(mp);
685 }
686 
687 void
vfs_clearextendedsecurity(mount_t mp)688 vfs_clearextendedsecurity(mount_t mp)
689 {
690 	mount_lock(mp);
691 	mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
692 	mount_unlock(mp);
693 }
694 
695 void
vfs_setnoswap(mount_t mp)696 vfs_setnoswap(mount_t mp)
697 {
698 	mount_lock(mp);
699 	mp->mnt_kern_flag |= MNTK_NOSWAP;
700 	mount_unlock(mp);
701 }
702 
703 void
vfs_clearnoswap(mount_t mp)704 vfs_clearnoswap(mount_t mp)
705 {
706 	mount_lock(mp);
707 	mp->mnt_kern_flag &= ~MNTK_NOSWAP;
708 	mount_unlock(mp);
709 }
710 
711 int
vfs_extendedsecurity(mount_t mp)712 vfs_extendedsecurity(mount_t mp)
713 {
714 	return mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY;
715 }
716 
717 /* returns the max size of short symlink in this mount_t */
718 uint32_t
vfs_maxsymlen(mount_t mp)719 vfs_maxsymlen(mount_t mp)
720 {
721 	return mp->mnt_maxsymlinklen;
722 }
723 
724 /* set  max size of short symlink on mount_t */
725 void
vfs_setmaxsymlen(mount_t mp,uint32_t symlen)726 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
727 {
728 	mp->mnt_maxsymlinklen = symlen;
729 }
730 
731 boolean_t
vfs_is_basesystem(mount_t mp)732 vfs_is_basesystem(mount_t mp)
733 {
734 	return ((mp->mnt_supl_kern_flag & MNTK_SUPL_BASESYSTEM) == 0) ? false : true;
735 }
736 
737 /* return a pointer to the RO vfs_statfs associated with mount_t */
738 struct vfsstatfs *
vfs_statfs(mount_t mp)739 vfs_statfs(mount_t mp)
740 {
741 	return &mp->mnt_vfsstat;
742 }
743 
744 int
vfs_getattr(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)745 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
746 {
747 	int             error;
748 
749 	if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0) {
750 		return error;
751 	}
752 
753 	/*
754 	 * If we have a filesystem create time, use it to default some others.
755 	 */
756 	if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
757 		if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time)) {
758 			VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
759 		}
760 	}
761 
762 	return 0;
763 }
764 
765 int
vfs_setattr(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)766 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
767 {
768 	int error;
769 
770 	/*
771 	 * with a read-only system volume, we need to allow rename of the root volume
772 	 * even if it's read-only.  Don't return EROFS here if setattr changes only
773 	 * the volume name
774 	 */
775 	if (vfs_isrdonly(mp) &&
776 	    !((strcmp(mp->mnt_vfsstat.f_fstypename, "apfs") == 0) && (vfa->f_active == VFSATTR_f_vol_name))) {
777 		return EROFS;
778 	}
779 
780 	error = VFS_SETATTR(mp, vfa, ctx);
781 
782 	/*
783 	 * If we had alternate ways of setting vfs attributes, we'd
784 	 * fall back here.
785 	 */
786 
787 	return error;
788 }
789 
790 /* return the private data handle stored in mount_t */
791 void *
vfs_fsprivate(mount_t mp)792 vfs_fsprivate(mount_t mp)
793 {
794 	return mp->mnt_data;
795 }
796 
797 /* set the private data handle in mount_t */
798 void
vfs_setfsprivate(mount_t mp,void * mntdata)799 vfs_setfsprivate(mount_t mp, void *mntdata)
800 {
801 	mount_lock(mp);
802 	mp->mnt_data = mntdata;
803 	mount_unlock(mp);
804 }
805 
806 /* query whether the mount point supports native EAs */
807 int
vfs_nativexattrs(mount_t mp)808 vfs_nativexattrs(mount_t mp)
809 {
810 	return mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS;
811 }
812 
813 /*
814  * return the block size of the underlying
815  * device associated with mount_t
816  */
817 int
vfs_devblocksize(mount_t mp)818 vfs_devblocksize(mount_t mp)
819 {
820 	return mp->mnt_devblocksize;
821 }
822 
823 /*
824  * Returns vnode with an iocount that must be released with vnode_put()
825  */
826 vnode_t
vfs_vnodecovered(mount_t mp)827 vfs_vnodecovered(mount_t mp)
828 {
829 	vnode_t vp = mp->mnt_vnodecovered;
830 	if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
831 		return NULL;
832 	} else {
833 		return vp;
834 	}
835 }
836 /*
837  * Similar to vfs_vnodecovered() except this variant doesn't block and returns
838  * NULL if the covered vnode is being reclaimed.
839  * Returns vnode with an iocount that must be released with vnode_put().
840  */
841 vnode_t
vfs_vnodecovered_noblock(mount_t mp)842 vfs_vnodecovered_noblock(mount_t mp)
843 {
844 	vnode_t vp = mp->mnt_vnodecovered;
845 
846 	if ((vp == NULL) || (vnode_getwithref_noblock(vp) != 0)) {
847 		return NULL;
848 	} else {
849 		return vp;
850 	}
851 }
852 
853 int
vfs_setdevvp(mount_t mp,vnode_t devvp)854 vfs_setdevvp(mount_t mp, vnode_t devvp)
855 {
856 	if (mp == NULL) {
857 		return 0;
858 	}
859 
860 	if (devvp) {
861 		if (devvp->v_type != VBLK) {
862 			return EINVAL;
863 		}
864 
865 		if (major(devvp->v_rdev) >= nblkdev) {
866 			return ENXIO;
867 		}
868 	}
869 
870 	mp->mnt_devvp = devvp;
871 
872 	return 0;
873 }
874 
875 /*
876  * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
877  * The iocount must be released with vnode_put().  Note that this KPI is subtle
878  * with respect to the validity of using this device vnode for anything substantial
879  * (which is discouraged).  If commands are sent to the device driver without
880  * taking proper steps to ensure that the device is still open, chaos may ensue.
881  * Similarly, this routine should only be called if there is some guarantee that
882  * the mount itself is still valid.
883  */
884 vnode_t
vfs_devvp(mount_t mp)885 vfs_devvp(mount_t mp)
886 {
887 	vnode_t vp = mp->mnt_devvp;
888 
889 	if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
890 		return vp;
891 	}
892 
893 	return NULLVP;
894 }
895 
896 /*
897  * return the io attributes associated with mount_t
898  */
899 void
vfs_ioattr(mount_t mp,struct vfsioattr * ioattrp)900 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
901 {
902 	ioattrp->io_reserved[0] = NULL;
903 	ioattrp->io_reserved[1] = NULL;
904 	if (mp == NULL) {
905 		ioattrp->io_maxreadcnt  = MAXPHYS;
906 		ioattrp->io_maxwritecnt = MAXPHYS;
907 		ioattrp->io_segreadcnt  = 32;
908 		ioattrp->io_segwritecnt = 32;
909 		ioattrp->io_maxsegreadsize  = MAXPHYS;
910 		ioattrp->io_maxsegwritesize = MAXPHYS;
911 		ioattrp->io_devblocksize = DEV_BSIZE;
912 		ioattrp->io_flags = 0;
913 		ioattrp->io_max_swappin_available = 0;
914 	} else {
915 		ioattrp->io_maxreadcnt  = mp->mnt_maxreadcnt;
916 		ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
917 		ioattrp->io_segreadcnt  = mp->mnt_segreadcnt;
918 		ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
919 		ioattrp->io_maxsegreadsize  = mp->mnt_maxsegreadsize;
920 		ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
921 		ioattrp->io_devblocksize = mp->mnt_devblocksize;
922 		ioattrp->io_flags = mp->mnt_ioflags;
923 		ioattrp->io_max_swappin_available = mp->mnt_max_swappin_available;
924 	}
925 }
926 
927 
928 /*
929  * set the IO attributes associated with mount_t
930  */
931 void
vfs_setioattr(mount_t mp,struct vfsioattr * ioattrp)932 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
933 {
934 	if (mp == NULL) {
935 		return;
936 	}
937 	mp->mnt_maxreadcnt  = ioattrp->io_maxreadcnt;
938 	mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
939 	mp->mnt_segreadcnt  = ioattrp->io_segreadcnt;
940 	mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
941 	mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
942 	mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
943 	mp->mnt_devblocksize = ioattrp->io_devblocksize;
944 	mp->mnt_ioflags = ioattrp->io_flags;
945 	mp->mnt_max_swappin_available = ioattrp->io_max_swappin_available;
946 }
947 
948 /*
949  * Add a new filesystem into the kernel specified in passed in
950  * vfstable structure. It fills in the vnode
951  * dispatch vector that is to be passed to when vnodes are created.
952  * It returns a handle which is to be used to when the FS is to be removed
953  */
954 typedef int (*PFI)(void *);
955 extern int vfs_opv_numops;
956 errno_t
vfs_fsadd(struct vfs_fsentry * vfe,vfstable_t * handle)957 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle)
958 {
959 	struct vfstable *newvfstbl = NULL;
960 	int     i, j;
961 	int(***opv_desc_vector_p)(void *);
962 	int(**opv_desc_vector)(void *);
963 	const struct vnodeopv_entry_desc        *opve_descp;
964 	int desccount;
965 	int descsize;
966 	PFI *descptr;
967 
968 	/*
969 	 * This routine is responsible for all the initialization that would
970 	 * ordinarily be done as part of the system startup;
971 	 */
972 
973 	if (vfe == (struct vfs_fsentry *)0) {
974 		return EINVAL;
975 	}
976 
977 	desccount = vfe->vfe_vopcnt;
978 	if ((desccount <= 0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
979 	    || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL)) {
980 		return EINVAL;
981 	}
982 
983 	/* Non-threadsafe filesystems are not supported */
984 	if ((vfe->vfe_flags &  (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
985 		return EINVAL;
986 	}
987 
988 	newvfstbl = kalloc_type(struct vfstable, Z_WAITOK | Z_ZERO);
989 	newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
990 	strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
991 	if ((vfe->vfe_flags & VFS_TBLNOTYPENUM)) {
992 		int tmp;
993 		int found = 0;
994 		lck_mtx_lock(&vfs_typenum_mtx);
995 		for (tmp = fstypenumstart; tmp < OID_AUTO_START; tmp++) {
996 			if (isclr(vfs_typenum_arr, tmp)) {
997 				newvfstbl->vfc_typenum = tmp;
998 				setbit(vfs_typenum_arr, tmp);
999 				found = 1;
1000 				break;
1001 			}
1002 		}
1003 		if (!found) {
1004 			lck_mtx_unlock(&vfs_typenum_mtx);
1005 			return EINVAL;
1006 		}
1007 		if (maxvfstypenum < OID_AUTO_START) {
1008 			/* getvfsbyname checks up to but not including maxvfstypenum */
1009 			maxvfstypenum = newvfstbl->vfc_typenum + 1;
1010 		}
1011 		lck_mtx_unlock(&vfs_typenum_mtx);
1012 	} else {
1013 		newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
1014 		lck_mtx_lock(&vfs_typenum_mtx);
1015 		setbit(vfs_typenum_arr, newvfstbl->vfc_typenum);
1016 		if (newvfstbl->vfc_typenum >= maxvfstypenum) {
1017 			maxvfstypenum = newvfstbl->vfc_typenum + 1;
1018 		}
1019 		lck_mtx_unlock(&vfs_typenum_mtx);
1020 	}
1021 
1022 
1023 	newvfstbl->vfc_refcount = 0;
1024 	newvfstbl->vfc_flags = 0;
1025 	newvfstbl->vfc_mountroot = NULL;
1026 	newvfstbl->vfc_next = NULL;
1027 	newvfstbl->vfc_vfsflags = 0;
1028 	if (vfe->vfe_flags &  VFS_TBL64BITREADY) {
1029 		newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
1030 	}
1031 	if (vfe->vfe_flags &  VFS_TBLVNOP_PAGEINV2) {
1032 		newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
1033 	}
1034 	if (vfe->vfe_flags &  VFS_TBLVNOP_PAGEOUTV2) {
1035 		newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
1036 	}
1037 	if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL) {
1038 		newvfstbl->vfc_flags |= MNT_LOCAL;
1039 	}
1040 	if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0) {
1041 		newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
1042 	} else {
1043 		newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
1044 	}
1045 
1046 	if (vfe->vfe_flags &  VFS_TBLNATIVEXATTR) {
1047 		newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
1048 	}
1049 	if (vfe->vfe_flags &  VFS_TBLUNMOUNT_PREFLIGHT) {
1050 		newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
1051 	}
1052 	if (vfe->vfe_flags &  VFS_TBLREADDIR_EXTENDED) {
1053 		newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
1054 	}
1055 	if (vfe->vfe_flags & VFS_TBLNOMACLABEL) {
1056 		newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
1057 	}
1058 	if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME) {
1059 		newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
1060 	}
1061 	if (vfe->vfe_flags & VFS_TBLVNOP_SECLUDE_RENAME) {
1062 		newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_SECLUDE_RENAME;
1063 	}
1064 	if (vfe->vfe_flags & VFS_TBLCANMOUNTROOT) {
1065 		newvfstbl->vfc_vfsflags |= VFC_VFSCANMOUNTROOT;
1066 	}
1067 
1068 	/*
1069 	 * Allocate and init the vectors.
1070 	 * Also handle backwards compatibility.
1071 	 *
1072 	 * We allocate one large block to hold all <desccount>
1073 	 * vnode operation vectors stored contiguously.
1074 	 */
1075 	/* XXX - shouldn't be M_TEMP */
1076 
1077 	descsize = desccount * vfs_opv_numops;
1078 	descptr = kalloc_type(PFI, descsize, Z_WAITOK | Z_ZERO);
1079 
1080 	newvfstbl->vfc_descptr = descptr;
1081 	newvfstbl->vfc_descsize = descsize;
1082 
1083 	newvfstbl->vfc_sysctl = NULL;
1084 
1085 	for (i = 0; i < desccount; i++) {
1086 		opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1087 		/*
1088 		 * Fill in the caller's pointer to the start of the i'th vector.
1089 		 * They'll need to supply it when calling vnode_create.
1090 		 */
1091 		opv_desc_vector = descptr + i * vfs_opv_numops;
1092 		*opv_desc_vector_p = opv_desc_vector;
1093 
1094 		for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
1095 			opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
1096 
1097 			/* Silently skip known-disabled operations */
1098 			if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) {
1099 				printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
1100 				    vfe->vfe_opvdescs[i], opve_descp->opve_op->vdesc_name);
1101 				continue;
1102 			}
1103 
1104 			/*
1105 			 * Sanity check:  is this operation listed
1106 			 * in the list of operations?  We check this
1107 			 * by seeing if its offset is zero.  Since
1108 			 * the default routine should always be listed
1109 			 * first, it should be the only one with a zero
1110 			 * offset.  Any other operation with a zero
1111 			 * offset is probably not listed in
1112 			 * vfs_op_descs, and so is probably an error.
1113 			 *
1114 			 * A panic here means the layer programmer
1115 			 * has committed the all-too common bug
1116 			 * of adding a new operation to the layer's
1117 			 * list of vnode operations but
1118 			 * not adding the operation to the system-wide
1119 			 * list of supported operations.
1120 			 */
1121 			if (opve_descp->opve_op->vdesc_offset == 0 &&
1122 			    opve_descp->opve_op != VDESC(vnop_default)) {
1123 				printf("vfs_fsadd: operation %s not listed in %s.\n",
1124 				    opve_descp->opve_op->vdesc_name,
1125 				    "vfs_op_descs");
1126 				panic("vfs_fsadd: bad operation");
1127 			}
1128 			/*
1129 			 * Fill in this entry.
1130 			 */
1131 			opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
1132 			    opve_descp->opve_impl;
1133 		}
1134 
1135 		/*
1136 		 * Finally, go back and replace unfilled routines
1137 		 * with their default.  (Sigh, an O(n^3) algorithm.  I
1138 		 * could make it better, but that'd be work, and n is small.)
1139 		 */
1140 		opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1141 
1142 		/*
1143 		 * Force every operations vector to have a default routine.
1144 		 */
1145 		opv_desc_vector = *opv_desc_vector_p;
1146 		if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) {
1147 			panic("vfs_fsadd: operation vector without default routine.");
1148 		}
1149 		for (j = 0; j < vfs_opv_numops; j++) {
1150 			if (opv_desc_vector[j] == NULL) {
1151 				opv_desc_vector[j] =
1152 				    opv_desc_vector[VOFFSET(vnop_default)];
1153 			}
1154 		}
1155 	} /* end of each vnodeopv_desc parsing */
1156 
1157 	*handle = vfstable_add(newvfstbl);
1158 
1159 	if (newvfstbl->vfc_vfsops->vfs_init) {
1160 		struct vfsconf vfsc;
1161 		bzero(&vfsc, sizeof(struct vfsconf));
1162 		vfsc.vfc_reserved1 = 0;
1163 		bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
1164 		vfsc.vfc_typenum = (*handle)->vfc_typenum;
1165 		vfsc.vfc_refcount = (*handle)->vfc_refcount;
1166 		vfsc.vfc_flags = (*handle)->vfc_flags;
1167 		vfsc.vfc_reserved2 = 0;
1168 		vfsc.vfc_reserved3 = 0;
1169 
1170 		(*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
1171 	}
1172 
1173 	kfree_type(struct vfstable, newvfstbl);
1174 
1175 	return 0;
1176 }
1177 
1178 /*
1179  * Removes the filesystem from kernel.
1180  * The argument passed in is the handle that was given when
1181  * file system was added
1182  */
1183 errno_t
vfs_fsremove(vfstable_t handle)1184 vfs_fsremove(vfstable_t handle)
1185 {
1186 	struct vfstable * vfstbl =  (struct vfstable *)handle;
1187 	void *old_desc = NULL;
1188 	size_t descsize = 0;
1189 	errno_t err;
1190 
1191 	/* Preflight check for any mounts */
1192 	mount_list_lock();
1193 	if (vfstbl->vfc_refcount != 0) {
1194 		mount_list_unlock();
1195 		return EBUSY;
1196 	}
1197 
1198 	/* Free the spot in vfs_typenum_arr */
1199 	lck_mtx_lock(&vfs_typenum_mtx);
1200 	clrbit(vfs_typenum_arr, handle->vfc_typenum);
1201 	if (maxvfstypenum == handle->vfc_typenum) {
1202 		maxvfstypenum--;
1203 	}
1204 	lck_mtx_unlock(&vfs_typenum_mtx);
1205 
1206 	/*
1207 	 * save the old descriptor; the free cannot occur unconditionally,
1208 	 * since vfstable_del() may fail.
1209 	 */
1210 	if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1211 		old_desc = vfstbl->vfc_descptr;
1212 		descsize = vfstbl->vfc_descsize;
1213 	}
1214 	err = vfstable_del(vfstbl);
1215 
1216 	mount_list_unlock();
1217 
1218 	/* free the descriptor if the delete was successful */
1219 	if (err == 0) {
1220 		kfree_type(PFI, descsize, old_desc);
1221 	}
1222 
1223 	return err;
1224 }
1225 
1226 void
vfs_setowner(mount_t mp,uid_t uid,gid_t gid)1227 vfs_setowner(mount_t mp, uid_t uid, gid_t gid)
1228 {
1229 	mp->mnt_fsowner = uid;
1230 	mp->mnt_fsgroup = gid;
1231 }
1232 
1233 /*
1234  * Callers should be careful how they use this; accessing
1235  * mnt_last_write_completed_timestamp is not thread-safe.  Writing to
1236  * it isn't either.  Point is: be prepared to deal with strange values
1237  * being returned.
1238  */
1239 uint64_t
vfs_idle_time(mount_t mp)1240 vfs_idle_time(mount_t mp)
1241 {
1242 	if (mp->mnt_pending_write_size) {
1243 		return 0;
1244 	}
1245 
1246 	struct timeval now;
1247 
1248 	microuptime(&now);
1249 
1250 	return (now.tv_sec
1251 	       - mp->mnt_last_write_completed_timestamp.tv_sec) * 1000000
1252 	       + now.tv_usec - mp->mnt_last_write_completed_timestamp.tv_usec;
1253 }
1254 
1255 /*
1256  * vfs_context_create_with_proc() takes a reference on an arbitrary
1257  * thread in the process.  To distinguish this reference-counted thread
1258  * from the usual non-reference-counted thread, we set the least significant
1259  * bit of of vc_thread.
1260  */
1261 #define VFS_CONTEXT_THREAD_IS_REFERENCED(ctx) \
1262 	(!!(((uintptr_t)(ctx)->vc_thread) & 1UL))
1263 
1264 #define VFS_CONTEXT_SET_REFERENCED_THREAD(ctx, thr) \
1265 	(ctx)->vc_thread = (thread_t)(((uintptr_t)(thr)) | 1UL)
1266 
1267 #define VFS_CONTEXT_GET_THREAD(ctx) \
1268 	((thread_t)(((uintptr_t)(ctx)->vc_thread) & ~1UL))
1269 
1270 int
vfs_context_pid(vfs_context_t ctx)1271 vfs_context_pid(vfs_context_t ctx)
1272 {
1273 	return proc_pid(vfs_context_proc(ctx));
1274 }
1275 
1276 int
vfs_context_copy_audit_token(vfs_context_t ctx,audit_token_t * token)1277 vfs_context_copy_audit_token(vfs_context_t ctx, audit_token_t *token)
1278 {
1279 	kern_return_t           err;
1280 	task_t                  task;
1281 	mach_msg_type_number_t  info_size = TASK_AUDIT_TOKEN_COUNT;
1282 
1283 	task = vfs_context_task(ctx);
1284 
1285 	if (task == NULL) {
1286 		// Not sure how this would happen; we are supposed to be
1287 		// in the middle of using the context. Regardless, don't
1288 		// wander off a NULL pointer.
1289 		return ESRCH;
1290 	}
1291 
1292 	err = task_info(task, TASK_AUDIT_TOKEN, (integer_t *)token, &info_size);
1293 	return (err) ? ESRCH : 0;
1294 }
1295 
1296 int
vfs_context_suser(vfs_context_t ctx)1297 vfs_context_suser(vfs_context_t ctx)
1298 {
1299 	return suser(ctx->vc_ucred, NULL);
1300 }
1301 
1302 /*
1303  * Return bit field of signals posted to all threads in the context's process.
1304  *
1305  * XXX Signals should be tied to threads, not processes, for most uses of this
1306  * XXX call.
1307  */
1308 int
vfs_context_issignal(vfs_context_t ctx,sigset_t mask)1309 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1310 {
1311 	proc_t p = vfs_context_proc(ctx);
1312 	if (p) {
1313 		return proc_pendingsignals(p, mask);
1314 	}
1315 	return 0;
1316 }
1317 
1318 int
vfs_context_is64bit(vfs_context_t ctx)1319 vfs_context_is64bit(vfs_context_t ctx)
1320 {
1321 	uthread_t uth;
1322 	thread_t t;
1323 
1324 	if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1325 		uth = get_bsdthread_info(t);
1326 	} else {
1327 		uth = current_uthread();
1328 	}
1329 	return uthread_is64bit(uth);
1330 }
1331 
1332 boolean_t
vfs_context_can_resolve_triggers(vfs_context_t ctx)1333 vfs_context_can_resolve_triggers(vfs_context_t ctx)
1334 {
1335 	proc_t proc = vfs_context_proc(ctx);
1336 
1337 	if (proc) {
1338 		if (proc->p_vfs_iopolicy &
1339 		    P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE) {
1340 			return false;
1341 		}
1342 		return true;
1343 	}
1344 	return false;
1345 }
1346 
1347 boolean_t
vfs_context_can_break_leases(vfs_context_t ctx)1348 vfs_context_can_break_leases(vfs_context_t ctx)
1349 {
1350 	proc_t proc = vfs_context_proc(ctx);
1351 
1352 	if (proc) {
1353 		/*
1354 		 * We do not have a separate I/O policy for this,
1355 		 * because the scenarios where we would not want
1356 		 * local file lease breaks are currently exactly
1357 		 * the same as where we would not want dataless
1358 		 * file materialization (mainly, system daemons
1359 		 * passively snooping file activity).
1360 		 */
1361 		if (proc->p_vfs_iopolicy &
1362 		    P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES) {
1363 			return true;
1364 		}
1365 		return false;
1366 	}
1367 	return true;
1368 }
1369 
1370 bool
vfs_context_allow_fs_blksize_nocache_write(vfs_context_t ctx)1371 vfs_context_allow_fs_blksize_nocache_write(vfs_context_t ctx)
1372 {
1373 	thread_t t;
1374 	proc_t p;
1375 
1376 	if ((ctx == NULL) || (t = VFS_CONTEXT_GET_THREAD(ctx)) == NULL) {
1377 		return false;
1378 	}
1379 
1380 	p = (proc_t)get_bsdthreadtask_info(t);
1381 	if (p && (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_NOCACHE_WRITE_FS_BLKSIZE)) {
1382 		return true;
1383 	}
1384 
1385 	return false;
1386 }
1387 
1388 boolean_t
vfs_context_skip_mtime_update(vfs_context_t ctx)1389 vfs_context_skip_mtime_update(vfs_context_t ctx)
1390 {
1391 	proc_t p = vfs_context_proc(ctx);
1392 	thread_t t = vfs_context_thread(ctx);
1393 	uthread_t ut = t ? get_bsdthread_info(t) : NULL;
1394 
1395 	if (ut && (os_atomic_load(&ut->uu_flag, relaxed) & UT_SKIP_MTIME_UPDATE)) {
1396 		return true;
1397 	}
1398 
1399 	/*
1400 	 * If the 'UT_SKIP_MTIME_UPDATE_IGNORE' policy is set for this thread then
1401 	 * we override the default behavior and ignore the process's mtime update
1402 	 * policy.
1403 	 */
1404 	if (ut && (os_atomic_load(&ut->uu_flag, relaxed) & UT_SKIP_MTIME_UPDATE_IGNORE)) {
1405 		return false;
1406 	}
1407 
1408 	if (p && (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_SKIP_MTIME_UPDATE)) {
1409 		return true;
1410 	}
1411 
1412 	return false;
1413 }
1414 
1415 boolean_t
vfs_context_allow_entitled_reserve_access(vfs_context_t ctx)1416 vfs_context_allow_entitled_reserve_access(vfs_context_t ctx)
1417 {
1418 	thread_t t;
1419 	uthread_t uth;
1420 	proc_t p;
1421 
1422 	if ((ctx == NULL) || (t = VFS_CONTEXT_GET_THREAD(ctx)) == NULL) {
1423 		return false;
1424 	}
1425 
1426 	uth = get_bsdthread_info(t);
1427 	if (uth && (os_atomic_load(&uth->uu_flag, relaxed) & UT_FS_ENTITLED_RESERVE_ACCESS)) {
1428 		return true;
1429 	}
1430 
1431 	p = (proc_t)get_bsdthreadtask_info(t);
1432 	if (p && (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_ENTITLED_RESERVE_ACCESS)) {
1433 		return true;
1434 	}
1435 
1436 	return false;
1437 }
1438 
1439 /*
1440  * vfs_context_proc
1441  *
1442  * Description:	Given a vfs_context_t, return the proc_t associated with it.
1443  *
1444  * Parameters:	vfs_context_t			The context to use
1445  *
1446  * Returns:	proc_t				The process for this context
1447  *
1448  * Notes:	This function will return the current_proc() if any of the
1449  *		following conditions are true:
1450  *
1451  *		o	The supplied context pointer is NULL
1452  *		o	There is no Mach thread associated with the context
1453  *		o	There is no Mach task associated with the Mach thread
1454  *		o	There is no proc_t associated with the Mach task
1455  *		o	The proc_t has no per process open file table
1456  *
1457  *		This causes this function to return a value matching as
1458  *		closely as possible the previous behaviour.
1459  */
1460 proc_t
vfs_context_proc(vfs_context_t ctx)1461 vfs_context_proc(vfs_context_t ctx)
1462 {
1463 	proc_t  proc = NULL;
1464 	thread_t t;
1465 
1466 	if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1467 		proc = (proc_t)get_bsdthreadtask_info(t);
1468 	}
1469 
1470 	return proc == NULL ? current_proc() : proc;
1471 }
1472 
1473 /*
1474  * vfs_context_get_special_port
1475  *
1476  * Description: Return the requested special port from the task associated
1477  *              with the given context.
1478  *
1479  * Parameters:	vfs_context_t			The context to use
1480  *              int				Index of special port
1481  *              ipc_port_t *			Pointer to returned port
1482  *
1483  * Returns:	kern_return_t			see task_get_special_port()
1484  */
1485 kern_return_t
vfs_context_get_special_port(vfs_context_t ctx,int which,ipc_port_t * portp)1486 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1487 {
1488 	return task_get_special_port(vfs_context_task(ctx), which, portp);
1489 }
1490 
1491 /*
1492  * vfs_context_set_special_port
1493  *
1494  * Description: Set the requested special port in the task associated
1495  *              with the given context.
1496  *
1497  * Parameters:	vfs_context_t			The context to use
1498  *              int				Index of special port
1499  *              ipc_port_t			New special port
1500  *
1501  * Returns:	kern_return_t			see task_set_special_port_internal()
1502  */
1503 kern_return_t
vfs_context_set_special_port(vfs_context_t ctx,int which,ipc_port_t port)1504 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1505 {
1506 	return task_set_special_port_internal(vfs_context_task(ctx),
1507 	           which, port);
1508 }
1509 
1510 /*
1511  * vfs_context_thread
1512  *
1513  * Description:	Return the Mach thread associated with a vfs_context_t
1514  *
1515  * Parameters:	vfs_context_t			The context to use
1516  *
1517  * Returns:	thread_t			The thread for this context, or
1518  *						NULL, if there is not one.
1519  *
1520  * Notes:	NULL thread_t's are legal, but discouraged.  They occur only
1521  *		as a result of a static vfs_context_t declaration in a function
1522  *		and will result in this function returning NULL.
1523  *
1524  *		This is intentional; this function should NOT return the
1525  *		current_thread() in this case.
1526  */
1527 thread_t
vfs_context_thread(vfs_context_t ctx)1528 vfs_context_thread(vfs_context_t ctx)
1529 {
1530 	return VFS_CONTEXT_GET_THREAD(ctx);
1531 }
1532 
1533 /*
1534  * vfs_context_task
1535  *
1536  * Description:	Return the Mach task associated with a vfs_context_t
1537  *
1538  * Parameters:	vfs_context_t			The context to use
1539  *
1540  * Returns:	task_t				The task for this context, or
1541  *						NULL, if there is not one.
1542  *
1543  * Notes:	NULL task_t's are legal, but discouraged.  They occur only
1544  *		as a result of a static vfs_context_t declaration in a function
1545  *		and will result in this function returning NULL.
1546  *
1547  *		This is intentional; this function should NOT return the
1548  *		task associated with current_thread() in this case.
1549  */
1550 task_t
vfs_context_task(vfs_context_t ctx)1551 vfs_context_task(vfs_context_t ctx)
1552 {
1553 	task_t                  task = NULL;
1554 	thread_t                t;
1555 
1556 	if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1557 		task = get_threadtask(t);
1558 	}
1559 
1560 	return task;
1561 }
1562 
1563 /*
1564  * vfs_context_cwd
1565  *
1566  * Description:	Returns a reference on the vnode for the current working
1567  *		directory for the supplied context
1568  *
1569  * Parameters:	vfs_context_t			The context to use
1570  *
1571  * Returns:	vnode_t				The current working directory
1572  *						for this context
1573  *
1574  * Notes:	The function first attempts to obtain the current directory
1575  *		from the thread, and if it is not present there, falls back
1576  *		to obtaining it from the process instead.  If it can't be
1577  *		obtained from either place, we return NULLVP.
1578  */
1579 vnode_t
vfs_context_cwd(vfs_context_t ctx)1580 vfs_context_cwd(vfs_context_t ctx)
1581 {
1582 	vnode_t cwd = NULLVP;
1583 	thread_t t;
1584 
1585 	if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1586 		uthread_t uth = get_bsdthread_info(t);
1587 		proc_t proc;
1588 
1589 		/*
1590 		 * Get the cwd from the thread; if there isn't one, get it
1591 		 * from the process, instead.
1592 		 */
1593 		if ((cwd = uth->uu_cdir) == NULLVP &&
1594 		    (proc = (proc_t)get_bsdthreadtask_info(t)) != NULL) {
1595 			cwd = proc->p_fd.fd_cdir;
1596 		}
1597 	}
1598 
1599 	return cwd;
1600 }
1601 
1602 /*
1603  * vfs_context_create
1604  *
1605  * Description: Allocate and initialize a new context.
1606  *
1607  * Parameters:  vfs_context_t:                  Context to copy, or NULL for new
1608  *
1609  * Returns:     Pointer to new context
1610  *
1611  * Notes:       Copy cred and thread from argument, if available; else
1612  *              initialize with current thread and new cred.  Returns
1613  *              with a reference held on the credential.
1614  */
1615 vfs_context_t
vfs_context_create(vfs_context_t ctx)1616 vfs_context_create(vfs_context_t ctx)
1617 {
1618 	vfs_context_t newcontext;
1619 
1620 	newcontext = zalloc_flags(KT_VFS_CONTEXT, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1621 
1622 	if (ctx == NULL) {
1623 		ctx = vfs_context_current();
1624 	}
1625 	*newcontext = *ctx;
1626 	if (IS_VALID_CRED(ctx->vc_ucred)) {
1627 		kauth_cred_ref(ctx->vc_ucred);
1628 	}
1629 
1630 	return newcontext;
1631 }
1632 
1633 /*
1634  * vfs_context_create_with_proc
1635  *
1636  * Description: Create a new context with credentials taken from
1637  *              the specified proc.
1638  *
1639  * Parameters:  proc_t: The process whose crendials to use.
1640  *
1641  * Returns:     Pointer to new context.
1642  *
1643  * Notes:       The context will also take a reference on an arbitrary
1644  *              thread in the process as well as the process's credentials.
1645  */
1646 vfs_context_t
vfs_context_create_with_proc(proc_t p)1647 vfs_context_create_with_proc(proc_t p)
1648 {
1649 	vfs_context_t newcontext;
1650 	thread_t thread;
1651 	kauth_cred_t cred;
1652 
1653 	if (p == current_proc()) {
1654 		return vfs_context_create(NULL);
1655 	}
1656 
1657 	newcontext = zalloc_flags(KT_VFS_CONTEXT, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1658 
1659 	proc_lock(p);
1660 	thread = proc_thread(p);        /* XXX */
1661 	if (thread != NULL) {
1662 		thread_reference(thread);
1663 	}
1664 	proc_unlock(p);
1665 
1666 	cred = kauth_cred_proc_ref(p);
1667 
1668 	if (thread != NULL) {
1669 		VFS_CONTEXT_SET_REFERENCED_THREAD(newcontext, thread);
1670 	}
1671 	newcontext->vc_ucred = cred;
1672 
1673 	return newcontext;
1674 }
1675 
1676 vfs_context_t
vfs_context_current(void)1677 vfs_context_current(void)
1678 {
1679 	static_assert(offsetof(struct thread_ro, tro_owner) ==
1680 	    offsetof(struct vfs_context, vc_thread));
1681 	static_assert(offsetof(struct thread_ro, tro_cred) ==
1682 	    offsetof(struct vfs_context, vc_ucred));
1683 
1684 	return (vfs_context_t)current_thread_ro();
1685 }
1686 
1687 vfs_context_t
vfs_context_kernel(void)1688 vfs_context_kernel(void)
1689 {
1690 	return &vfs_context0;
1691 }
1692 
1693 int
vfs_context_rele(vfs_context_t ctx)1694 vfs_context_rele(vfs_context_t ctx)
1695 {
1696 	if (ctx) {
1697 		if (IS_VALID_CRED(ctx->vc_ucred)) {
1698 			kauth_cred_unref(&ctx->vc_ucred);
1699 		}
1700 		if (VFS_CONTEXT_THREAD_IS_REFERENCED(ctx)) {
1701 			assert(VFS_CONTEXT_GET_THREAD(ctx) != NULL);
1702 			thread_deallocate(VFS_CONTEXT_GET_THREAD(ctx));
1703 		}
1704 		zfree(KT_VFS_CONTEXT, ctx);
1705 	}
1706 	return 0;
1707 }
1708 
1709 
1710 kauth_cred_t
vfs_context_ucred(vfs_context_t ctx)1711 vfs_context_ucred(vfs_context_t ctx)
1712 {
1713 	return ctx->vc_ucred;
1714 }
1715 
1716 /*
1717  * Return true if the context is owned by the superuser.
1718  */
1719 int
vfs_context_issuser(vfs_context_t ctx)1720 vfs_context_issuser(vfs_context_t ctx)
1721 {
1722 	return kauth_cred_issuser(vfs_context_ucred(ctx));
1723 }
1724 
1725 int
vfs_context_iskernel(vfs_context_t ctx)1726 vfs_context_iskernel(vfs_context_t ctx)
1727 {
1728 	return ctx == &vfs_context0;
1729 }
1730 
1731 /*
1732  * Given a context, for all fields of vfs_context_t which
1733  * are not held with a reference, set those fields to the
1734  * values for the current execution context.
1735  *
1736  * Returns: 0 for success, nonzero for failure
1737  *
1738  * The intended use is:
1739  * 1. vfs_context_create()	gets the caller a context
1740  * 2. vfs_context_bind()        sets the unrefcounted data
1741  * 3. vfs_context_rele()        releases the context
1742  *
1743  */
1744 int
vfs_context_bind(vfs_context_t ctx)1745 vfs_context_bind(vfs_context_t ctx)
1746 {
1747 	assert(!VFS_CONTEXT_THREAD_IS_REFERENCED(ctx));
1748 	ctx->vc_thread = current_thread();
1749 	return 0;
1750 }
1751 
1752 int
vfs_set_thread_fs_private(uint8_t tag,uint64_t fs_private)1753 vfs_set_thread_fs_private(uint8_t tag, uint64_t fs_private)
1754 {
1755 	struct uthread *ut;
1756 
1757 	if (tag != FS_PRIVATE_TAG_APFS) {
1758 		return ENOTSUP;
1759 	}
1760 
1761 	ut = current_uthread();
1762 	ut->t_fs_private = fs_private;
1763 
1764 	return 0;
1765 }
1766 
1767 int
vfs_get_thread_fs_private(uint8_t tag,uint64_t * fs_private)1768 vfs_get_thread_fs_private(uint8_t tag, uint64_t *fs_private)
1769 {
1770 	struct uthread *ut;
1771 
1772 	if (tag != FS_PRIVATE_TAG_APFS) {
1773 		return ENOTSUP;
1774 	}
1775 
1776 	ut = current_uthread();
1777 	*fs_private = ut->t_fs_private;
1778 
1779 	return 0;
1780 }
1781 
1782 int
vfs_isswapmount(mount_t mnt)1783 vfs_isswapmount(mount_t mnt)
1784 {
1785 	return mnt && ISSET(mnt->mnt_kern_flag, MNTK_SWAP_MOUNT) ? 1 : 0;
1786 }
1787 
1788 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1789 
1790 
1791 /*
1792  * Convert between vnode types and inode formats (since POSIX.1
1793  * defines mode word of stat structure in terms of inode formats).
1794  */
1795 enum vtype
vnode_iftovt(int mode)1796 vnode_iftovt(int mode)
1797 {
1798 	return iftovt_tab[((mode) & S_IFMT) >> 12];
1799 }
1800 
1801 int
vnode_vttoif(enum vtype indx)1802 vnode_vttoif(enum vtype indx)
1803 {
1804 	return vttoif_tab[(int)(indx)];
1805 }
1806 
1807 int
vnode_makeimode(int indx,int mode)1808 vnode_makeimode(int indx, int mode)
1809 {
1810 	return (int)(VTTOIF(indx) | (mode));
1811 }
1812 
1813 
1814 /*
1815  * vnode manipulation functions.
1816  */
1817 
1818 /* returns system root vnode iocount; It should be released using vnode_put() */
1819 vnode_t
vfs_rootvnode(void)1820 vfs_rootvnode(void)
1821 {
1822 	vnode_t vp = NULLVP;
1823 
1824 	if (rootvnode) {
1825 		lck_rw_lock_shared(&rootvnode_rw_lock);
1826 		vp = rootvnode;
1827 		if (vp && (vnode_get(vp) != 0)) {
1828 			vp = NULLVP;
1829 		}
1830 		lck_rw_unlock_shared(&rootvnode_rw_lock);
1831 	}
1832 
1833 	return vp;
1834 }
1835 
1836 uint32_t
vnode_vid(vnode_t vp)1837 vnode_vid(vnode_t vp)
1838 {
1839 	return (uint32_t)(vp->v_id);
1840 }
1841 
1842 mount_t
vnode_mount(vnode_t vp)1843 vnode_mount(vnode_t vp)
1844 {
1845 	return vp->v_mount;
1846 }
1847 
1848 #if CONFIG_IOSCHED
1849 vnode_t
vnode_mountdevvp(vnode_t vp)1850 vnode_mountdevvp(vnode_t vp)
1851 {
1852 	if (vp->v_mount) {
1853 		return vp->v_mount->mnt_devvp;
1854 	} else {
1855 		return (vnode_t)0;
1856 	}
1857 }
1858 #endif
1859 
1860 boolean_t
vnode_isonexternalstorage(vnode_t vp)1861 vnode_isonexternalstorage(vnode_t vp)
1862 {
1863 	if (vp) {
1864 		if (vp->v_mount) {
1865 			if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_PERIPHERAL_DRIVE) {
1866 				return TRUE;
1867 			}
1868 		}
1869 	}
1870 	return FALSE;
1871 }
1872 
1873 boolean_t
vnode_isonssd(vnode_t vp)1874 vnode_isonssd(vnode_t vp)
1875 {
1876 	if (vp) {
1877 		mount_t mp = vp->v_mount;
1878 		if (mp && disk_conditioner_mount_is_ssd(mp)) {
1879 			return TRUE;
1880 		}
1881 	}
1882 	return FALSE;
1883 }
1884 
1885 mount_t
vnode_mountedhere(vnode_t vp)1886 vnode_mountedhere(vnode_t vp)
1887 {
1888 	mount_t mp;
1889 
1890 	if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1891 	    (mp->mnt_vnodecovered == vp)) {
1892 		return mp;
1893 	} else {
1894 		return (mount_t)NULL;
1895 	}
1896 }
1897 
1898 /* returns vnode type of vnode_t */
1899 enum vtype
vnode_vtype(vnode_t vp)1900 vnode_vtype(vnode_t vp)
1901 {
1902 	return vp->v_type;
1903 }
1904 
1905 /* returns FS specific node saved in vnode */
1906 void *
vnode_fsnode(vnode_t vp)1907 vnode_fsnode(vnode_t vp)
1908 {
1909 	return vp->v_data;
1910 }
1911 
1912 void
vnode_clearfsnode(vnode_t vp)1913 vnode_clearfsnode(vnode_t vp)
1914 {
1915 	vp->v_data = NULL;
1916 }
1917 
1918 dev_t
vnode_specrdev(vnode_t vp)1919 vnode_specrdev(vnode_t vp)
1920 {
1921 	return vp->v_rdev;
1922 }
1923 
1924 
1925 /* Accessor functions */
1926 /* is vnode_t a root vnode */
1927 int
vnode_isvroot(vnode_t vp)1928 vnode_isvroot(vnode_t vp)
1929 {
1930 	return (vp->v_flag & VROOT)? 1 : 0;
1931 }
1932 
1933 /* is vnode_t a system vnode */
1934 int
vnode_issystem(vnode_t vp)1935 vnode_issystem(vnode_t vp)
1936 {
1937 	return (vp->v_flag & VSYSTEM)? 1 : 0;
1938 }
1939 
1940 /* is vnode_t a swap file vnode */
1941 int
vnode_isswap(vnode_t vp)1942 vnode_isswap(vnode_t vp)
1943 {
1944 	return (vp->v_flag & VSWAP)? 1 : 0;
1945 }
1946 
1947 /* is vnode_t a tty */
1948 int
vnode_istty(vnode_t vp)1949 vnode_istty(vnode_t vp)
1950 {
1951 	return (vp->v_flag & VISTTY) ? 1 : 0;
1952 }
1953 
1954 /* if vnode_t mount operation in progress */
1955 int
vnode_ismount(vnode_t vp)1956 vnode_ismount(vnode_t vp)
1957 {
1958 	return (vp->v_flag & VMOUNT)? 1 : 0;
1959 }
1960 
1961 /* is this vnode under recyle now */
1962 int
vnode_isrecycled(vnode_t vp)1963 vnode_isrecycled(vnode_t vp)
1964 {
1965 	int ret;
1966 
1967 	vnode_lock_spin(vp);
1968 	ret =  (vp->v_lflag & (VL_TERMINATE | VL_DEAD))? 1 : 0;
1969 	vnode_unlock(vp);
1970 	return ret;
1971 }
1972 
1973 /* is this vnode marked for termination */
1974 int
vnode_willberecycled(vnode_t vp)1975 vnode_willberecycled(vnode_t vp)
1976 {
1977 	return (vp->v_lflag & VL_MARKTERM) ? 1 : 0;
1978 }
1979 
1980 
1981 /* vnode was created by background task requesting rapid aging
1982  *  and has not since been referenced by a normal task */
1983 int
vnode_israge(vnode_t vp)1984 vnode_israge(vnode_t vp)
1985 {
1986 	return (vp->v_flag & VRAGE)? 1 : 0;
1987 }
1988 
1989 int
vnode_needssnapshots(__unused vnode_t vp)1990 vnode_needssnapshots(__unused vnode_t vp)
1991 {
1992 	return 0;
1993 }
1994 
1995 
1996 /* Check the process/thread to see if we should skip atime updates */
1997 int
vfs_ctx_skipatime(vfs_context_t ctx)1998 vfs_ctx_skipatime(vfs_context_t ctx)
1999 {
2000 	struct uthread *ut;
2001 	proc_t proc;
2002 	thread_t thr;
2003 
2004 	proc = vfs_context_proc(ctx);
2005 	thr = vfs_context_thread(ctx);
2006 
2007 	/* Validate pointers in case we were invoked via a kernel context */
2008 	if (thr && proc) {
2009 		ut = get_bsdthread_info(thr);
2010 
2011 		if (proc->p_lflag & P_LRAGE_VNODES) {
2012 			return 1;
2013 		}
2014 
2015 		if (ut) {
2016 			if (ut->uu_flag & (UT_RAGE_VNODES | UT_ATIME_UPDATE)) {
2017 				return 1;
2018 			}
2019 		}
2020 
2021 		if (proc->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) {
2022 			return 1;
2023 		}
2024 	}
2025 	return 0;
2026 }
2027 
2028 /* is vnode_t marked to not keep data cached once it's been consumed */
2029 int
vnode_isnocache(vnode_t vp)2030 vnode_isnocache(vnode_t vp)
2031 {
2032 	return (vp->v_flag & VNOCACHE_DATA)? 1 : 0;
2033 }
2034 
2035 /*
2036  * has sequential readahead been disabled on this vnode
2037  */
2038 int
vnode_isnoreadahead(vnode_t vp)2039 vnode_isnoreadahead(vnode_t vp)
2040 {
2041 	return (vp->v_flag & VRAOFF)? 1 : 0;
2042 }
2043 
2044 int
vnode_is_openevt(vnode_t vp)2045 vnode_is_openevt(vnode_t vp)
2046 {
2047 	return (vp->v_flag & VOPENEVT)? 1 : 0;
2048 }
2049 
2050 /* is vnode_t a standard one? */
2051 int
vnode_isstandard(vnode_t vp)2052 vnode_isstandard(vnode_t vp)
2053 {
2054 	return (vp->v_flag & VSTANDARD)? 1 : 0;
2055 }
2056 
2057 /* don't vflush() if SKIPSYSTEM */
2058 int
vnode_isnoflush(vnode_t vp)2059 vnode_isnoflush(vnode_t vp)
2060 {
2061 	return (vp->v_flag & VNOFLUSH)? 1 : 0;
2062 }
2063 
2064 /* is vnode_t a regular file */
2065 int
vnode_isreg(vnode_t vp)2066 vnode_isreg(vnode_t vp)
2067 {
2068 	return (vp->v_type == VREG)? 1 : 0;
2069 }
2070 
2071 /* is vnode_t a directory? */
2072 int
vnode_isdir(vnode_t vp)2073 vnode_isdir(vnode_t vp)
2074 {
2075 	return (vp->v_type == VDIR)? 1 : 0;
2076 }
2077 
2078 /* is vnode_t a symbolic link ? */
2079 int
vnode_islnk(vnode_t vp)2080 vnode_islnk(vnode_t vp)
2081 {
2082 	return (vp->v_type == VLNK)? 1 : 0;
2083 }
2084 
2085 int
vnode_lookup_continue_needed(vnode_t vp,struct componentname * cnp)2086 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
2087 {
2088 	struct nameidata *ndp = cnp->cn_ndp;
2089 
2090 	if (ndp == NULL) {
2091 		panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL");
2092 	}
2093 
2094 	if (vnode_isdir(vp)) {
2095 		if (vp->v_mountedhere != NULL) {
2096 			goto yes;
2097 		}
2098 
2099 #if CONFIG_TRIGGERS
2100 		if (vp->v_resolve) {
2101 			goto yes;
2102 		}
2103 #endif /* CONFIG_TRIGGERS */
2104 	}
2105 
2106 
2107 	if (vnode_islnk(vp)) {
2108 		/* From lookup():  || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
2109 		if (cnp->cn_flags & FOLLOW) {
2110 			goto yes;
2111 		}
2112 		if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
2113 			goto yes;
2114 		}
2115 	}
2116 
2117 	return 0;
2118 
2119 yes:
2120 	ndp->ni_flag |= NAMEI_CONTLOOKUP;
2121 	return EKEEPLOOKING;
2122 }
2123 
2124 /* is vnode_t a fifo ? */
2125 int
vnode_isfifo(vnode_t vp)2126 vnode_isfifo(vnode_t vp)
2127 {
2128 	return (vp->v_type == VFIFO)? 1 : 0;
2129 }
2130 
2131 /* is vnode_t a block device? */
2132 int
vnode_isblk(vnode_t vp)2133 vnode_isblk(vnode_t vp)
2134 {
2135 	return (vp->v_type == VBLK)? 1 : 0;
2136 }
2137 
2138 int
vnode_isspec(vnode_t vp)2139 vnode_isspec(vnode_t vp)
2140 {
2141 	return ((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0;
2142 }
2143 
2144 /* is vnode_t a char device? */
2145 int
vnode_ischr(vnode_t vp)2146 vnode_ischr(vnode_t vp)
2147 {
2148 	return (vp->v_type == VCHR)? 1 : 0;
2149 }
2150 
2151 /* is vnode_t a socket? */
2152 int
vnode_issock(vnode_t vp)2153 vnode_issock(vnode_t vp)
2154 {
2155 	return (vp->v_type == VSOCK)? 1 : 0;
2156 }
2157 
2158 /* is vnode_t a device with multiple active vnodes referring to it? */
2159 int
vnode_isaliased(vnode_t vp)2160 vnode_isaliased(vnode_t vp)
2161 {
2162 	enum vtype vt = vp->v_type;
2163 	if (!((vt == VCHR) || (vt == VBLK))) {
2164 		return 0;
2165 	} else {
2166 		return vp->v_specflags & SI_ALIASED;
2167 	}
2168 }
2169 
2170 /* is vnode_t a named stream? */
2171 int
vnode_isnamedstream(vnode_t vp)2172 vnode_isnamedstream(
2173 #if NAMEDSTREAMS
2174 	vnode_t vp
2175 #else
2176 	__unused vnode_t vp
2177 #endif
2178 	)
2179 {
2180 #if NAMEDSTREAMS
2181 	return (vp->v_flag & VISNAMEDSTREAM) ? 1 : 0;
2182 #else
2183 	return 0;
2184 #endif
2185 }
2186 
2187 int
vnode_isshadow(vnode_t vp)2188 vnode_isshadow(
2189 #if NAMEDSTREAMS
2190 	vnode_t vp
2191 #else
2192 	__unused vnode_t vp
2193 #endif
2194 	)
2195 {
2196 #if NAMEDSTREAMS
2197 	return (vp->v_flag & VISSHADOW) ? 1 : 0;
2198 #else
2199 	return 0;
2200 #endif
2201 }
2202 
2203 /* does vnode have associated named stream vnodes ? */
2204 int
vnode_hasnamedstreams(vnode_t vp)2205 vnode_hasnamedstreams(
2206 #if NAMEDSTREAMS
2207 	vnode_t vp
2208 #else
2209 	__unused vnode_t vp
2210 #endif
2211 	)
2212 {
2213 #if NAMEDSTREAMS
2214 	return (vp->v_lflag & VL_HASSTREAMS) ? 1 : 0;
2215 #else
2216 	return 0;
2217 #endif
2218 }
2219 /* TBD:  set vnode_t to not cache data after it is consumed once; used for quota */
2220 void
vnode_setnocache(vnode_t vp)2221 vnode_setnocache(vnode_t vp)
2222 {
2223 	vnode_lock_spin(vp);
2224 	vp->v_flag |= VNOCACHE_DATA;
2225 	vnode_unlock(vp);
2226 }
2227 
2228 void
vnode_clearnocache(vnode_t vp)2229 vnode_clearnocache(vnode_t vp)
2230 {
2231 	vnode_lock_spin(vp);
2232 	vp->v_flag &= ~VNOCACHE_DATA;
2233 	vnode_unlock(vp);
2234 }
2235 
2236 void
vnode_set_openevt(vnode_t vp)2237 vnode_set_openevt(vnode_t vp)
2238 {
2239 	vnode_lock_spin(vp);
2240 	vp->v_flag |= VOPENEVT;
2241 	vnode_unlock(vp);
2242 }
2243 
2244 void
vnode_clear_openevt(vnode_t vp)2245 vnode_clear_openevt(vnode_t vp)
2246 {
2247 	vnode_lock_spin(vp);
2248 	vp->v_flag &= ~VOPENEVT;
2249 	vnode_unlock(vp);
2250 }
2251 
2252 
2253 void
vnode_setnoreadahead(vnode_t vp)2254 vnode_setnoreadahead(vnode_t vp)
2255 {
2256 	vnode_lock_spin(vp);
2257 	vp->v_flag |= VRAOFF;
2258 	vnode_unlock(vp);
2259 }
2260 
2261 void
vnode_clearnoreadahead(vnode_t vp)2262 vnode_clearnoreadahead(vnode_t vp)
2263 {
2264 	vnode_lock_spin(vp);
2265 	vp->v_flag &= ~VRAOFF;
2266 	vnode_unlock(vp);
2267 }
2268 
2269 int
vnode_isfastdevicecandidate(vnode_t vp)2270 vnode_isfastdevicecandidate(vnode_t vp)
2271 {
2272 	return (vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0;
2273 }
2274 
2275 void
vnode_setfastdevicecandidate(vnode_t vp)2276 vnode_setfastdevicecandidate(vnode_t vp)
2277 {
2278 	vnode_lock_spin(vp);
2279 	vp->v_flag |= VFASTDEVCANDIDATE;
2280 	vnode_unlock(vp);
2281 }
2282 
2283 void
vnode_clearfastdevicecandidate(vnode_t vp)2284 vnode_clearfastdevicecandidate(vnode_t vp)
2285 {
2286 	vnode_lock_spin(vp);
2287 	vp->v_flag &= ~VFASTDEVCANDIDATE;
2288 	vnode_unlock(vp);
2289 }
2290 
2291 int
vnode_isautocandidate(vnode_t vp)2292 vnode_isautocandidate(vnode_t vp)
2293 {
2294 	return (vp->v_flag & VAUTOCANDIDATE)? 1 : 0;
2295 }
2296 
2297 void
vnode_setautocandidate(vnode_t vp)2298 vnode_setautocandidate(vnode_t vp)
2299 {
2300 	vnode_lock_spin(vp);
2301 	vp->v_flag |= VAUTOCANDIDATE;
2302 	vnode_unlock(vp);
2303 }
2304 
2305 void
vnode_clearautocandidate(vnode_t vp)2306 vnode_clearautocandidate(vnode_t vp)
2307 {
2308 	vnode_lock_spin(vp);
2309 	vp->v_flag &= ~VAUTOCANDIDATE;
2310 	vnode_unlock(vp);
2311 }
2312 
2313 
2314 
2315 
2316 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
2317 void
vnode_setnoflush(vnode_t vp)2318 vnode_setnoflush(vnode_t vp)
2319 {
2320 	vnode_lock_spin(vp);
2321 	vp->v_flag |= VNOFLUSH;
2322 	vnode_unlock(vp);
2323 }
2324 
2325 void
vnode_clearnoflush(vnode_t vp)2326 vnode_clearnoflush(vnode_t vp)
2327 {
2328 	vnode_lock_spin(vp);
2329 	vp->v_flag &= ~VNOFLUSH;
2330 	vnode_unlock(vp);
2331 }
2332 
2333 /* Get the memory object control associated with the vnode */
2334 memory_object_control_t
vnode_memoryobject(vnode_t vp)2335 vnode_memoryobject(vnode_t vp)
2336 {
2337 	return ubc_getobject(vp, UBC_FLAGS_NONE);
2338 }
2339 
2340 /* is vnode_t a blkdevice and has a FS mounted on it */
2341 int
vnode_ismountedon(vnode_t vp)2342 vnode_ismountedon(vnode_t vp)
2343 {
2344 	return (vp->v_specflags & SI_MOUNTEDON)? 1 : 0;
2345 }
2346 
2347 void
vnode_setmountedon(vnode_t vp)2348 vnode_setmountedon(vnode_t vp)
2349 {
2350 	vnode_lock_spin(vp);
2351 	vp->v_specflags |= SI_MOUNTEDON;
2352 	vnode_unlock(vp);
2353 }
2354 
2355 void
vnode_clearmountedon(vnode_t vp)2356 vnode_clearmountedon(vnode_t vp)
2357 {
2358 	vnode_lock_spin(vp);
2359 	vp->v_specflags &= ~SI_MOUNTEDON;
2360 	vnode_unlock(vp);
2361 }
2362 
2363 
2364 void
vnode_settag(vnode_t vp,int tag)2365 vnode_settag(vnode_t vp, int tag)
2366 {
2367 	/*
2368 	 * We only assign enum values to v_tag, but add an assert to make sure we
2369 	 * catch it in dev/debug builds if this ever change.
2370 	 */
2371 	assert(tag >= SHRT_MIN && tag <= SHRT_MAX);
2372 	vp->v_tag = (uint16_t)tag;
2373 }
2374 
2375 int
vnode_tag(vnode_t vp)2376 vnode_tag(vnode_t vp)
2377 {
2378 	return vp->v_tag;
2379 }
2380 
2381 vnode_t
vnode_parent(vnode_t vp)2382 vnode_parent(vnode_t vp)
2383 {
2384 	return vp->v_parent;
2385 }
2386 
2387 void
vnode_setparent(vnode_t vp,vnode_t dvp)2388 vnode_setparent(vnode_t vp, vnode_t dvp)
2389 {
2390 	vp->v_parent = dvp;
2391 }
2392 
2393 void
vnode_setname(vnode_t vp,char * name)2394 vnode_setname(vnode_t vp, char * name)
2395 {
2396 	vp->v_name = name;
2397 }
2398 
2399 /* return the registered  FS name when adding the FS to kernel */
2400 void
vnode_vfsname(vnode_t vp,char * buf)2401 vnode_vfsname(vnode_t vp, char * buf)
2402 {
2403 	strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
2404 }
2405 
2406 /* return the FS type number */
2407 int
vnode_vfstypenum(vnode_t vp)2408 vnode_vfstypenum(vnode_t vp)
2409 {
2410 	return vp->v_mount->mnt_vtable->vfc_typenum;
2411 }
2412 
2413 int
vnode_vfs64bitready(vnode_t vp)2414 vnode_vfs64bitready(vnode_t vp)
2415 {
2416 	/*
2417 	 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
2418 	 */
2419 	if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
2420 		return 1;
2421 	} else {
2422 		return 0;
2423 	}
2424 }
2425 
2426 
2427 
2428 /* return the visible flags on associated mount point of vnode_t */
2429 uint32_t
vnode_vfsvisflags(vnode_t vp)2430 vnode_vfsvisflags(vnode_t vp)
2431 {
2432 	return vp->v_mount->mnt_flag & MNT_VISFLAGMASK;
2433 }
2434 
2435 /* return the command modifier flags on associated mount point of vnode_t */
2436 uint32_t
vnode_vfscmdflags(vnode_t vp)2437 vnode_vfscmdflags(vnode_t vp)
2438 {
2439 	return vp->v_mount->mnt_flag & MNT_CMDFLAGS;
2440 }
2441 
2442 /* return the max symlink of short links  of vnode_t */
2443 uint32_t
vnode_vfsmaxsymlen(vnode_t vp)2444 vnode_vfsmaxsymlen(vnode_t vp)
2445 {
2446 	return vp->v_mount->mnt_maxsymlinklen;
2447 }
2448 
2449 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2450 struct vfsstatfs *
vnode_vfsstatfs(vnode_t vp)2451 vnode_vfsstatfs(vnode_t vp)
2452 {
2453 	return &vp->v_mount->mnt_vfsstat;
2454 }
2455 
2456 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2457 void *
vnode_vfsfsprivate(vnode_t vp)2458 vnode_vfsfsprivate(vnode_t vp)
2459 {
2460 	return vp->v_mount->mnt_data;
2461 }
2462 
2463 /* is vnode_t in a rdonly mounted  FS */
2464 int
vnode_vfsisrdonly(vnode_t vp)2465 vnode_vfsisrdonly(vnode_t vp)
2466 {
2467 	return (vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0;
2468 }
2469 
2470 int
vnode_compound_rename_available(vnode_t vp)2471 vnode_compound_rename_available(vnode_t vp)
2472 {
2473 	return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
2474 }
2475 int
vnode_compound_rmdir_available(vnode_t vp)2476 vnode_compound_rmdir_available(vnode_t vp)
2477 {
2478 	return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
2479 }
2480 int
vnode_compound_mkdir_available(vnode_t vp)2481 vnode_compound_mkdir_available(vnode_t vp)
2482 {
2483 	return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
2484 }
2485 int
vnode_compound_remove_available(vnode_t vp)2486 vnode_compound_remove_available(vnode_t vp)
2487 {
2488 	return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
2489 }
2490 int
vnode_compound_open_available(vnode_t vp)2491 vnode_compound_open_available(vnode_t vp)
2492 {
2493 	return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
2494 }
2495 
2496 int
vnode_compound_op_available(vnode_t vp,compound_vnop_id_t opid)2497 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
2498 {
2499 	return (vp->v_mount->mnt_compound_ops & opid) != 0;
2500 }
2501 
2502 /*
2503  * Returns vnode ref to current working directory; if a per-thread current
2504  * working directory is in effect, return that instead of the per process one.
2505  *
2506  * XXX Published, but not used.
2507  */
2508 vnode_t
current_workingdir(void)2509 current_workingdir(void)
2510 {
2511 	return vfs_context_cwd(vfs_context_current());
2512 }
2513 
2514 /*
2515  * Get a filesec and optional acl contents from an extended attribute.
2516  * Function will attempt to retrive ACL, UUID, and GUID information using a
2517  * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2518  *
2519  * Parameters:	vp			The vnode on which to operate.
2520  *		fsecp			The filesec (and ACL, if any) being
2521  *					retrieved.
2522  *		ctx			The vnode context in which the
2523  *					operation is to be attempted.
2524  *
2525  * Returns:	0			Success
2526  *		!0			errno value
2527  *
2528  * Notes:	The kauth_filesec_t in '*fsecp', if retrieved, will be in
2529  *		host byte order, as will be the ACL contents, if any.
2530  *		Internally, we will cannonize these values from network (PPC)
2531  *		byte order after we retrieve them so that the on-disk contents
2532  *		of the extended attribute are identical for both PPC and Intel
2533  *		(if we were not being required to provide this service via
2534  *		fallback, this would be the job of the filesystem
2535  *		'VNOP_GETATTR' call).
2536  *
2537  *		We use ntohl() because it has a transitive property on Intel
2538  *		machines and no effect on PPC mancines.  This guarantees us
2539  *
2540  * XXX:		Deleting rather than ignoreing a corrupt security structure is
2541  *		probably the only way to reset it without assistance from an
2542  *		file system integrity checking tool.  Right now we ignore it.
2543  *
2544  * XXX:		We should enummerate the possible errno values here, and where
2545  *		in the code they originated.
2546  */
2547 static int
vnode_get_filesec(vnode_t vp,kauth_filesec_t * fsecp,vfs_context_t ctx)2548 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2549 {
2550 	kauth_filesec_t fsec;
2551 	uio_t   fsec_uio;
2552 	size_t  fsec_size;
2553 	size_t  xsize, rsize;
2554 	int     error;
2555 	uint32_t        host_fsec_magic;
2556 	uint32_t        host_acl_entrycount;
2557 
2558 	fsec = NULL;
2559 	fsec_uio = NULL;
2560 
2561 	/* find out how big the EA is */
2562 	error = vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx);
2563 	if (error != 0) {
2564 		/* no EA, no filesec */
2565 		if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2566 			error = 0;
2567 		}
2568 		/* either way, we are done */
2569 		goto out;
2570 	}
2571 
2572 	/*
2573 	 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2574 	 * ACE entrly ACL, and if it's larger than that, it must have the right
2575 	 * number of bytes such that it contains an atomic number of ACEs,
2576 	 * rather than partial entries.  Otherwise, we ignore it.
2577 	 */
2578 	if (!KAUTH_FILESEC_VALID(xsize)) {
2579 		KAUTH_DEBUG("    ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2580 		error = 0;
2581 		goto out;
2582 	}
2583 
2584 	/* how many entries would fit? */
2585 	fsec_size = KAUTH_FILESEC_COUNT(xsize);
2586 	if (fsec_size > KAUTH_ACL_MAX_ENTRIES) {
2587 		KAUTH_DEBUG("    ERROR - Bogus (too large) kauth_fiilesec_t: %ld bytes", xsize);
2588 		error = 0;
2589 		goto out;
2590 	}
2591 
2592 	/* get buffer and uio */
2593 	if (((fsec = kauth_filesec_alloc((int)fsec_size)) == NULL) ||
2594 	    ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2595 	    uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2596 		KAUTH_DEBUG("    ERROR - could not allocate iov to read ACL");
2597 		error = ENOMEM;
2598 		goto out;
2599 	}
2600 
2601 	/* read security attribute */
2602 	rsize = xsize;
2603 	if ((error = vn_getxattr(vp,
2604 	    KAUTH_FILESEC_XATTR,
2605 	    fsec_uio,
2606 	    &rsize,
2607 	    XATTR_NOSECURITY,
2608 	    ctx)) != 0) {
2609 		/* no attribute - no security data */
2610 		if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2611 			error = 0;
2612 		}
2613 		/* either way, we are done */
2614 		goto out;
2615 	}
2616 
2617 	/*
2618 	 * Validate security structure; the validation must take place in host
2619 	 * byte order.  If it's corrupt, we will just ignore it.
2620 	 */
2621 
2622 	/* Validate the size before trying to convert it */
2623 	if (rsize < KAUTH_FILESEC_SIZE(0)) {
2624 		KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2625 		goto out;
2626 	}
2627 
2628 	/* Validate the magic number before trying to convert it */
2629 	host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2630 	if (fsec->fsec_magic != host_fsec_magic) {
2631 		KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2632 		goto out;
2633 	}
2634 
2635 	/* Validate the entry count before trying to convert it. */
2636 	host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2637 	if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2638 		if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2639 			KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2640 			goto out;
2641 		}
2642 		if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2643 			KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2644 			goto out;
2645 		}
2646 	}
2647 
2648 	kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2649 
2650 	*fsecp = fsec;
2651 	fsec = NULL;
2652 	error = 0;
2653 out:
2654 	if (fsec != NULL) {
2655 		kauth_filesec_free(fsec);
2656 	}
2657 	if (fsec_uio != NULL) {
2658 		uio_free(fsec_uio);
2659 	}
2660 	if (error) {
2661 		*fsecp = NULL;
2662 	}
2663 	return error;
2664 }
2665 
2666 /*
2667  * Set a filesec and optional acl contents into an extended attribute.
2668  * function will attempt to store ACL, UUID, and GUID information using a
2669  * write to a named extended attribute (KAUTH_FILESEC_XATTR).  The 'acl'
2670  * may or may not point to the `fsec->fsec_acl`, depending on whether the
2671  * original caller supplied an acl.
2672  *
2673  * Parameters:	vp			The vnode on which to operate.
2674  *		fsec			The filesec being set.
2675  *		acl			The acl to be associated with 'fsec'.
2676  *		ctx			The vnode context in which the
2677  *					operation is to be attempted.
2678  *
2679  * Returns:	0			Success
2680  *		!0			errno value
2681  *
2682  * Notes:	Both the fsec and the acl are always valid.
2683  *
2684  *		The kauth_filesec_t in 'fsec', if any, is in host byte order,
2685  *		as are the acl contents, if they are used.  Internally, we will
2686  *		cannonize these values into network (PPC) byte order before we
2687  *		attempt to write them so that the on-disk contents of the
2688  *		extended attribute are identical for both PPC and Intel (if we
2689  *		were not being required to provide this service via fallback,
2690  *		this would be the job of the filesystem 'VNOP_SETATTR' call).
2691  *		We reverse this process on the way out, so we leave with the
2692  *		same byte order we started with.
2693  *
2694  * XXX:		We should enummerate the possible errno values here, and where
2695  *		in the code they originated.
2696  */
2697 static int
vnode_set_filesec(vnode_t vp,kauth_filesec_t fsec,kauth_acl_t acl,vfs_context_t ctx)2698 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2699 {
2700 	uio_t           fsec_uio;
2701 	int             error;
2702 	uint32_t        saved_acl_copysize;
2703 
2704 	fsec_uio = NULL;
2705 
2706 	if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2707 		KAUTH_DEBUG("    ERROR - could not allocate iov to write ACL");
2708 		error = ENOMEM;
2709 		goto out;
2710 	}
2711 	/*
2712 	 * Save the pre-converted ACL copysize, because it gets swapped too
2713 	 * if we are running with the wrong endianness.
2714 	 */
2715 	saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2716 
2717 	kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2718 
2719 	uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2720 	uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2721 	error = vn_setxattr(vp,
2722 	    KAUTH_FILESEC_XATTR,
2723 	    fsec_uio,
2724 	    XATTR_NOSECURITY,           /* we have auth'ed already */
2725 	    ctx);
2726 	VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2727 
2728 	kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2729 
2730 out:
2731 	if (fsec_uio != NULL) {
2732 		uio_free(fsec_uio);
2733 	}
2734 	return error;
2735 }
2736 
2737 /*
2738  * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2739  */
2740 void
vnode_attr_handle_uid_and_gid(struct vnode_attr * vap,mount_t mp,vfs_context_t ctx)2741 vnode_attr_handle_uid_and_gid(struct vnode_attr *vap, mount_t mp, vfs_context_t ctx)
2742 {
2743 	uid_t   nuid;
2744 	gid_t   ngid;
2745 	bool is_suser = vfs_context_issuser(ctx) ? true : false;
2746 
2747 	if (VATTR_IS_ACTIVE(vap, va_uid)) {
2748 		if (is_suser && VATTR_IS_SUPPORTED(vap, va_uid)) {
2749 			nuid = vap->va_uid;
2750 		} else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2751 			nuid = mp->mnt_fsowner;
2752 			if (nuid == KAUTH_UID_NONE) {
2753 				nuid = 99;
2754 			}
2755 		} else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2756 			nuid = vap->va_uid;
2757 		} else {
2758 			/* this will always be something sensible */
2759 			nuid = mp->mnt_fsowner;
2760 		}
2761 		if ((nuid == 99) && !is_suser) {
2762 			nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2763 		}
2764 		VATTR_RETURN(vap, va_uid, nuid);
2765 	}
2766 	if (VATTR_IS_ACTIVE(vap, va_gid)) {
2767 		if (is_suser && VATTR_IS_SUPPORTED(vap, va_gid)) {
2768 			ngid = vap->va_gid;
2769 		} else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2770 			ngid = mp->mnt_fsgroup;
2771 			if (ngid == KAUTH_GID_NONE) {
2772 				ngid = 99;
2773 			}
2774 		} else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2775 			ngid = vap->va_gid;
2776 		} else {
2777 			/* this will always be something sensible */
2778 			ngid = mp->mnt_fsgroup;
2779 		}
2780 		if ((ngid == 99) && !is_suser) {
2781 			ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2782 		}
2783 		VATTR_RETURN(vap, va_gid, ngid);
2784 	}
2785 }
2786 
2787 /*
2788  * Returns:	0			Success
2789  *		ENOMEM			Not enough space [only if has filesec]
2790  *		EINVAL			Requested unknown attributes
2791  *		VNOP_GETATTR:		???
2792  *		vnode_get_filesec:	???
2793  *		kauth_cred_guid2uid:	???
2794  *		kauth_cred_guid2gid:	???
2795  *		vfs_update_vfsstat:	???
2796  */
2797 int
vnode_getattr(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)2798 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2799 {
2800 	kauth_filesec_t fsec;
2801 	kauth_acl_t facl;
2802 	int     error;
2803 
2804 	/*
2805 	 * Reject attempts to fetch unknown attributes.
2806 	 */
2807 	if (vap->va_active & ~VNODE_ATTR_ALL) {
2808 		return EINVAL;
2809 	}
2810 
2811 	/* don't ask for extended security data if the filesystem doesn't support it */
2812 	if (!vfs_extendedsecurity(vnode_mount(vp))) {
2813 		VATTR_CLEAR_ACTIVE(vap, va_acl);
2814 		VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2815 		VATTR_CLEAR_ACTIVE(vap, va_guuid);
2816 	}
2817 
2818 	/*
2819 	 * If the caller wants size values we might have to synthesise, give the
2820 	 * filesystem the opportunity to supply better intermediate results.
2821 	 */
2822 	if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2823 	    VATTR_IS_ACTIVE(vap, va_total_size) ||
2824 	    VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2825 		VATTR_SET_ACTIVE(vap, va_data_size);
2826 		VATTR_SET_ACTIVE(vap, va_data_alloc);
2827 		VATTR_SET_ACTIVE(vap, va_total_size);
2828 		VATTR_SET_ACTIVE(vap, va_total_alloc);
2829 	}
2830 
2831 	vap->va_vaflags &= ~VA_USEFSID;
2832 
2833 	error = VNOP_GETATTR(vp, vap, ctx);
2834 	if (error) {
2835 		KAUTH_DEBUG("ERROR - returning %d", error);
2836 		goto out;
2837 	}
2838 
2839 	/*
2840 	 * If extended security data was requested but not returned, try the fallback
2841 	 * path.
2842 	 */
2843 	if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2844 		fsec = NULL;
2845 
2846 		if (XATTR_VNODE_SUPPORTED(vp)) {
2847 			/* try to get the filesec */
2848 			if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2849 				goto out;
2850 			}
2851 		}
2852 		/* if no filesec, no attributes */
2853 		if (fsec == NULL) {
2854 			VATTR_RETURN(vap, va_acl, NULL);
2855 			VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2856 			VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2857 		} else {
2858 			/* looks good, try to return what we were asked for */
2859 			VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2860 			VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2861 
2862 			/* only return the ACL if we were actually asked for it */
2863 			if (VATTR_IS_ACTIVE(vap, va_acl)) {
2864 				if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2865 					VATTR_RETURN(vap, va_acl, NULL);
2866 				} else {
2867 					facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2868 					if (facl == NULL) {
2869 						kauth_filesec_free(fsec);
2870 						error = ENOMEM;
2871 						goto out;
2872 					}
2873 					__nochk_bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2874 					VATTR_RETURN(vap, va_acl, facl);
2875 				}
2876 			}
2877 			kauth_filesec_free(fsec);
2878 		}
2879 	}
2880 	/*
2881 	 * If someone gave us an unsolicited filesec, toss it.  We promise that
2882 	 * we're OK with a filesystem giving us anything back, but our callers
2883 	 * only expect what they asked for.
2884 	 */
2885 	if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2886 		if (vap->va_acl != NULL) {
2887 			kauth_acl_free(vap->va_acl);
2888 		}
2889 		VATTR_CLEAR_SUPPORTED(vap, va_acl);
2890 	}
2891 
2892 #if 0   /* enable when we have a filesystem only supporting UUIDs */
2893 	/*
2894 	 * Handle the case where we need a UID/GID, but only have extended
2895 	 * security information.
2896 	 */
2897 	if (VATTR_NOT_RETURNED(vap, va_uid) &&
2898 	    VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2899 	    !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2900 		if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0) {
2901 			VATTR_RETURN(vap, va_uid, nuid);
2902 		}
2903 	}
2904 	if (VATTR_NOT_RETURNED(vap, va_gid) &&
2905 	    VATTR_IS_SUPPORTED(vap, va_guuid) &&
2906 	    !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2907 		if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0) {
2908 			VATTR_RETURN(vap, va_gid, ngid);
2909 		}
2910 	}
2911 #endif
2912 
2913 	vnode_attr_handle_uid_and_gid(vap, vp->v_mount, ctx);
2914 
2915 	/*
2916 	 * Synthesise some values that can be reasonably guessed.
2917 	 */
2918 	if (!VATTR_IS_SUPPORTED(vap, va_iosize)) {
2919 		assert(vp->v_mount->mnt_vfsstat.f_iosize <= UINT32_MAX);
2920 		VATTR_RETURN(vap, va_iosize, (uint32_t)vp->v_mount->mnt_vfsstat.f_iosize);
2921 	}
2922 
2923 	if (!VATTR_IS_SUPPORTED(vap, va_flags)) {
2924 		VATTR_RETURN(vap, va_flags, 0);
2925 	}
2926 
2927 	if (!VATTR_IS_SUPPORTED(vap, va_filerev)) {
2928 		VATTR_RETURN(vap, va_filerev, 0);
2929 	}
2930 
2931 	if (!VATTR_IS_SUPPORTED(vap, va_gen)) {
2932 		VATTR_RETURN(vap, va_gen, 0);
2933 	}
2934 
2935 	/*
2936 	 * Default sizes.  Ordering here is important, as later defaults build on earlier ones.
2937 	 */
2938 	if (VATTR_IS_SUPPORTED(vap, va_data_size)) {
2939 		/* va_data_size (uint64_t) is often assigned to off_t (int64_t), which can result in a negative size. */
2940 		if (vap->va_data_size > INT64_MAX) {
2941 			vap->va_data_size = INT64_MAX;
2942 		}
2943 	} else {
2944 		VATTR_RETURN(vap, va_data_size, 0);
2945 	}
2946 
2947 	/* do we want any of the possibly-computed values? */
2948 	if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2949 	    VATTR_IS_ACTIVE(vap, va_total_size) ||
2950 	    VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2951 		/* make sure f_bsize is valid */
2952 		if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2953 			if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0) {
2954 				goto out;
2955 			}
2956 		}
2957 
2958 		/* default va_data_alloc from va_data_size */
2959 		if (!VATTR_IS_SUPPORTED(vap, va_data_alloc)) {
2960 			VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2961 		}
2962 
2963 		/* default va_total_size from va_data_size */
2964 		if (!VATTR_IS_SUPPORTED(vap, va_total_size)) {
2965 			VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2966 		}
2967 
2968 		/* default va_total_alloc from va_total_size which is guaranteed at this point */
2969 		if (!VATTR_IS_SUPPORTED(vap, va_total_alloc)) {
2970 			VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2971 		}
2972 	}
2973 
2974 	/*
2975 	 * If we don't have a change time, pull it from the modtime.
2976 	 */
2977 	if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time)) {
2978 		VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2979 	}
2980 
2981 	/*
2982 	 * This is really only supported for the creation VNOPs, but since the field is there
2983 	 * we should populate it correctly.
2984 	 */
2985 	VATTR_RETURN(vap, va_type, vp->v_type);
2986 
2987 	/*
2988 	 * The fsid can be obtained from the mountpoint directly.
2989 	 */
2990 	if (VATTR_IS_ACTIVE(vap, va_fsid) &&
2991 	    (!VATTR_IS_SUPPORTED(vap, va_fsid) ||
2992 	    vap->va_vaflags & VA_REALFSID || !(vap->va_vaflags & VA_USEFSID))) {
2993 		VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2994 	}
2995 
2996 out:
2997 	vap->va_vaflags &= ~VA_USEFSID;
2998 
2999 	return error;
3000 }
3001 
3002 /*
3003  * Choose 32 bit or 64 bit fsid
3004  */
3005 uint64_t
vnode_get_va_fsid(struct vnode_attr * vap)3006 vnode_get_va_fsid(struct vnode_attr *vap)
3007 {
3008 	if (VATTR_IS_SUPPORTED(vap, va_fsid64)) {
3009 		return (uint64_t)vap->va_fsid64.val[0] + ((uint64_t)vap->va_fsid64.val[1] << 32);
3010 	}
3011 	return vap->va_fsid;
3012 }
3013 
3014 /*
3015  * Set the attributes on a vnode in a vnode context.
3016  *
3017  * Parameters:	vp			The vnode whose attributes to set.
3018  *		vap			A pointer to the attributes to set.
3019  *		ctx			The vnode context in which the
3020  *					operation is to be attempted.
3021  *
3022  * Returns:	0			Success
3023  *		!0			errno value
3024  *
3025  * Notes:	The kauth_filesec_t in 'vap', if any, is in host byte order.
3026  *
3027  *		The contents of the data area pointed to by 'vap' may be
3028  *		modified if the vnode is on a filesystem which has been
3029  *		mounted with ingore ownership flags, or by the underlyng
3030  *		VFS itself, or by the fallback code, if the underlying VFS
3031  *		does not support ACL, UUID, or GUUID attributes directly.
3032  *
3033  * XXX:		We should enummerate the possible errno values here, and where
3034  *		in the code they originated.
3035  */
3036 int
vnode_setattr(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3037 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
3038 {
3039 	int     error;
3040 #if CONFIG_FSE
3041 	uint64_t active;
3042 	int     is_perm_change = 0;
3043 	int     is_stat_change = 0;
3044 #endif
3045 
3046 	/*
3047 	 * Reject attempts to set unknown attributes.
3048 	 */
3049 	if (vap->va_active & ~VNODE_ATTR_ALL) {
3050 		return EINVAL;
3051 	}
3052 
3053 	/*
3054 	 * Make sure the filesystem is mounted R/W.
3055 	 * If not, return an error.
3056 	 */
3057 	if (vfs_isrdonly(vp->v_mount)) {
3058 		error = EROFS;
3059 		goto out;
3060 	}
3061 
3062 #if DEVELOPMENT || DEBUG
3063 	/*
3064 	 * XXX VSWAP: Check for entitlements or special flag here
3065 	 * so we can restrict access appropriately.
3066 	 */
3067 #else /* DEVELOPMENT || DEBUG */
3068 
3069 	if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
3070 		error = EPERM;
3071 		goto out;
3072 	}
3073 #endif /* DEVELOPMENT || DEBUG */
3074 
3075 #if NAMEDSTREAMS
3076 	/* For streams, va_data_size is the only setable attribute. */
3077 	if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
3078 		error = EPERM;
3079 		goto out;
3080 	}
3081 #endif
3082 	/* Check for truncation */
3083 	if (VATTR_IS_ACTIVE(vap, va_data_size)) {
3084 		switch (vp->v_type) {
3085 		case VREG:
3086 			/* For regular files it's ok */
3087 			break;
3088 		case VDIR:
3089 			/* Not allowed to truncate directories */
3090 			error = EISDIR;
3091 			goto out;
3092 		default:
3093 			/* For everything else we will clear the bit and let underlying FS decide on the rest */
3094 			VATTR_CLEAR_ACTIVE(vap, va_data_size);
3095 			if (vap->va_active) {
3096 				break;
3097 			}
3098 			/* If it was the only bit set, return success, to handle cases like redirect to /dev/null */
3099 			return 0;
3100 		}
3101 	}
3102 
3103 	/*
3104 	 * If ownership is being ignored on this volume, we silently discard
3105 	 * ownership changes.
3106 	 */
3107 	if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
3108 		VATTR_CLEAR_ACTIVE(vap, va_uid);
3109 		VATTR_CLEAR_ACTIVE(vap, va_gid);
3110 	}
3111 
3112 	/*
3113 	 * Make sure that extended security is enabled if we're going to try
3114 	 * to set any.
3115 	 */
3116 	if (!vfs_extendedsecurity(vnode_mount(vp)) &&
3117 	    (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
3118 		KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
3119 		error = ENOTSUP;
3120 		goto out;
3121 	}
3122 
3123 	/* Never allow the setting of any unsupported superuser flags. */
3124 	if (VATTR_IS_ACTIVE(vap, va_flags)) {
3125 		vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE);
3126 	}
3127 
3128 #if CONFIG_FSE
3129 	/*
3130 	 * Remember all of the active attributes that we're
3131 	 * attempting to modify.
3132 	 */
3133 	active = vap->va_active & ~VNODE_ATTR_RDONLY;
3134 #endif
3135 
3136 	error = VNOP_SETATTR(vp, vap, ctx);
3137 
3138 	if ((error == 0) && !VATTR_ALL_SUPPORTED(vap)) {
3139 		error = vnode_setattr_fallback(vp, vap, ctx);
3140 	}
3141 
3142 #if CONFIG_FSE
3143 #define PERMISSION_BITS (VNODE_ATTR_BIT(va_uid) | VNODE_ATTR_BIT(va_uuuid) | \
3144 	                 VNODE_ATTR_BIT(va_gid) | VNODE_ATTR_BIT(va_guuid) | \
3145 	                 VNODE_ATTR_BIT(va_mode) | VNODE_ATTR_BIT(va_acl))
3146 
3147 	/*
3148 	 * Now that we've changed them, decide whether to send an
3149 	 * FSevent.
3150 	 */
3151 	if ((active & PERMISSION_BITS) & vap->va_supported) {
3152 		is_perm_change = 1;
3153 	} else {
3154 		/*
3155 		 * We've already checked the permission bits, and we
3156 		 * also want to filter out access time / backup time
3157 		 * changes.
3158 		 */
3159 		active &= ~(PERMISSION_BITS |
3160 		    VNODE_ATTR_BIT(va_access_time) |
3161 		    VNODE_ATTR_BIT(va_backup_time));
3162 
3163 		/* Anything left to notify about? */
3164 		if (active & vap->va_supported) {
3165 			is_stat_change = 1;
3166 		}
3167 	}
3168 
3169 	if (error == 0) {
3170 		if (is_perm_change) {
3171 			if (need_fsevent(FSE_CHOWN, vp)) {
3172 				add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
3173 			}
3174 		} else if (is_stat_change && need_fsevent(FSE_STAT_CHANGED, vp)) {
3175 			add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
3176 		}
3177 	}
3178 #undef PERMISSION_BITS
3179 #endif
3180 
3181 out:
3182 	return error;
3183 }
3184 
3185 /*
3186  * Fallback for setting the attributes on a vnode in a vnode context.  This
3187  * Function will attempt to store ACL, UUID, and GUID information utilizing
3188  * a read/modify/write operation against an EA used as a backing store for
3189  * the object.
3190  *
3191  * Parameters:	vp			The vnode whose attributes to set.
3192  *		vap			A pointer to the attributes to set.
3193  *		ctx			The vnode context in which the
3194  *					operation is to be attempted.
3195  *
3196  * Returns:	0			Success
3197  *		!0			errno value
3198  *
3199  * Notes:	The kauth_filesec_t in 'vap', if any, is in host byte order,
3200  *		as are the fsec and lfsec, if they are used.
3201  *
3202  *		The contents of the data area pointed to by 'vap' may be
3203  *		modified to indicate that the attribute is supported for
3204  *		any given requested attribute.
3205  *
3206  * XXX:		We should enummerate the possible errno values here, and where
3207  *		in the code they originated.
3208  */
3209 int
vnode_setattr_fallback(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3210 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
3211 {
3212 	kauth_filesec_t fsec;
3213 	kauth_acl_t facl;
3214 	struct kauth_filesec lfsec;
3215 	int     error;
3216 
3217 	error = 0;
3218 
3219 	/*
3220 	 * Extended security fallback via extended attributes.
3221 	 *
3222 	 * Note that we do not free the filesec; the caller is expected to
3223 	 * do this.
3224 	 */
3225 	if (VATTR_NOT_RETURNED(vap, va_acl) ||
3226 	    VATTR_NOT_RETURNED(vap, va_uuuid) ||
3227 	    VATTR_NOT_RETURNED(vap, va_guuid)) {
3228 		VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
3229 
3230 		/*
3231 		 * Fail for file types that we don't permit extended security
3232 		 * to be set on.
3233 		 */
3234 		if (!XATTR_VNODE_SUPPORTED(vp)) {
3235 			VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
3236 			error = EINVAL;
3237 			goto out;
3238 		}
3239 
3240 		/*
3241 		 * If we don't have all the extended security items, we need
3242 		 * to fetch the existing data to perform a read-modify-write
3243 		 * operation.
3244 		 */
3245 		fsec = NULL;
3246 		if (!VATTR_IS_ACTIVE(vap, va_acl) ||
3247 		    !VATTR_IS_ACTIVE(vap, va_uuuid) ||
3248 		    !VATTR_IS_ACTIVE(vap, va_guuid)) {
3249 			if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
3250 				KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
3251 				goto out;
3252 			}
3253 		}
3254 		/* if we didn't get a filesec, use our local one */
3255 		if (fsec == NULL) {
3256 			KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
3257 			fsec = &lfsec;
3258 		} else {
3259 			KAUTH_DEBUG("SETATTR - updating existing filesec");
3260 		}
3261 		/* find the ACL */
3262 		facl = &fsec->fsec_acl;
3263 
3264 		/* if we're using the local filesec, we need to initialise it */
3265 		if (fsec == &lfsec) {
3266 			fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
3267 			fsec->fsec_owner = kauth_null_guid;
3268 			fsec->fsec_group = kauth_null_guid;
3269 			facl->acl_entrycount = KAUTH_FILESEC_NOACL;
3270 			facl->acl_flags = 0;
3271 		}
3272 
3273 		/*
3274 		 * Update with the supplied attributes.
3275 		 */
3276 		if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
3277 			KAUTH_DEBUG("SETATTR - updating owner UUID");
3278 			fsec->fsec_owner = vap->va_uuuid;
3279 			VATTR_SET_SUPPORTED(vap, va_uuuid);
3280 		}
3281 		if (VATTR_IS_ACTIVE(vap, va_guuid)) {
3282 			KAUTH_DEBUG("SETATTR - updating group UUID");
3283 			fsec->fsec_group = vap->va_guuid;
3284 			VATTR_SET_SUPPORTED(vap, va_guuid);
3285 		}
3286 		if (VATTR_IS_ACTIVE(vap, va_acl)) {
3287 			if (vap->va_acl == NULL) {
3288 				KAUTH_DEBUG("SETATTR - removing ACL");
3289 				facl->acl_entrycount = KAUTH_FILESEC_NOACL;
3290 			} else {
3291 				KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
3292 				facl = vap->va_acl;
3293 			}
3294 			VATTR_SET_SUPPORTED(vap, va_acl);
3295 		}
3296 
3297 		/*
3298 		 * If the filesec data is all invalid, we can just remove
3299 		 * the EA completely.
3300 		 */
3301 		if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
3302 		    kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
3303 		    kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
3304 			error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
3305 			/* no attribute is ok, nothing to delete */
3306 			if (error == ENOATTR) {
3307 				error = 0;
3308 			}
3309 			VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
3310 		} else {
3311 			/* write the EA */
3312 			error = vnode_set_filesec(vp, fsec, facl, ctx);
3313 			VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
3314 		}
3315 
3316 		/* if we fetched a filesec, dispose of the buffer */
3317 		if (fsec != &lfsec) {
3318 			kauth_filesec_free(fsec);
3319 		}
3320 	}
3321 out:
3322 
3323 	return error;
3324 }
3325 
3326 /*
3327  * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
3328  * event on a vnode.
3329  */
3330 int
vnode_notify(vnode_t vp,uint32_t events,struct vnode_attr * vap)3331 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
3332 {
3333 	/* These are the same as the corresponding knotes, at least for now.  Cheating a little. */
3334 	uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
3335 	    | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
3336 	uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
3337 	    | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
3338 	uint32_t knote_events = (events & knote_mask);
3339 
3340 	/* Permissions are not explicitly part of the kqueue model */
3341 	if (events & VNODE_EVENT_PERMS) {
3342 		knote_events |= NOTE_ATTRIB;
3343 	}
3344 
3345 	/* Directory contents information just becomes NOTE_WRITE */
3346 	if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
3347 		knote_events |= NOTE_WRITE;
3348 	}
3349 
3350 	if (knote_events) {
3351 		lock_vnode_and_post(vp, knote_events);
3352 #if CONFIG_FSE
3353 		if (vap != NULL) {
3354 			create_fsevent_from_kevent(vp, events, vap);
3355 		}
3356 #else
3357 		(void)vap;
3358 #endif
3359 	}
3360 
3361 	return 0;
3362 }
3363 
3364 
3365 
3366 int
vnode_isdyldsharedcache(vnode_t vp)3367 vnode_isdyldsharedcache(vnode_t vp)
3368 {
3369 	return (vp->v_flag & VSHARED_DYLD) ? 1 : 0;
3370 }
3371 
3372 
3373 /*
3374  * For a filesystem that isn't tracking its own vnode watchers:
3375  * check whether a vnode is being monitored.
3376  */
3377 int
vnode_ismonitored(vnode_t vp)3378 vnode_ismonitored(vnode_t vp)
3379 {
3380 	return vp->v_knotes.slh_first != NULL;
3381 }
3382 
3383 int
vnode_getbackingvnode(vnode_t in_vp,vnode_t * out_vpp)3384 vnode_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp)
3385 {
3386 	if (out_vpp) {
3387 		*out_vpp = NULLVP;
3388 	}
3389 #if NULLFS
3390 	return nullfs_getbackingvnode(in_vp, out_vpp);
3391 #else
3392 #pragma unused(in_vp)
3393 	return ENOENT;
3394 #endif
3395 }
3396 
3397 /*
3398  * Initialize a struct vnode_attr and activate the attributes required
3399  * by the vnode_notify() call.
3400  */
3401 int
vfs_get_notify_attributes(struct vnode_attr * vap)3402 vfs_get_notify_attributes(struct vnode_attr *vap)
3403 {
3404 	VATTR_INIT(vap);
3405 	vap->va_active = VNODE_NOTIFY_ATTRS;
3406 	return 0;
3407 }
3408 
3409 #if CONFIG_TRIGGERS
3410 int
vfs_settriggercallback(fsid_t * fsid,vfs_trigger_callback_t vtc,void * data,uint32_t flags __unused,vfs_context_t ctx)3411 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
3412 {
3413 	int error;
3414 	mount_t mp;
3415 
3416 	mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
3417 	if (mp == NULL) {
3418 		return ENOENT;
3419 	}
3420 
3421 	error = vfs_busy(mp, LK_NOWAIT);
3422 	mount_iterdrop(mp);
3423 
3424 	if (error != 0) {
3425 		return ENOENT;
3426 	}
3427 
3428 	mount_lock(mp);
3429 	if (mp->mnt_triggercallback != NULL) {
3430 		error = EBUSY;
3431 		mount_unlock(mp);
3432 		goto out;
3433 	}
3434 
3435 	mp->mnt_triggercallback = vtc;
3436 	mp->mnt_triggerdata = data;
3437 	mount_unlock(mp);
3438 
3439 	mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
3440 
3441 out:
3442 	vfs_unbusy(mp);
3443 	return 0;
3444 }
3445 #endif /* CONFIG_TRIGGERS */
3446 
3447 /*
3448  *  Definition of vnode operations.
3449  */
3450 
3451 #if 0
3452 /*
3453 *#
3454 *#% lookup       dvp     L ? ?
3455 *#% lookup       vpp     - L -
3456 */
3457 struct vnop_lookup_args {
3458 	struct vnodeop_desc *a_desc;
3459 	vnode_t a_dvp;
3460 	vnode_t *a_vpp;
3461 	struct componentname *a_cnp;
3462 	vfs_context_t a_context;
3463 };
3464 #endif /* 0*/
3465 
3466 /*
3467  * Returns:	0			Success
3468  *	lock_fsnode:ENOENT		No such file or directory [only for VFS
3469  *					 that is not thread safe & vnode is
3470  *					 currently being/has been terminated]
3471  *	<vfs_lookup>:ENAMETOOLONG
3472  *	<vfs_lookup>:ENOENT
3473  *	<vfs_lookup>:EJUSTRETURN
3474  *	<vfs_lookup>:EPERM
3475  *	<vfs_lookup>:EISDIR
3476  *	<vfs_lookup>:ENOTDIR
3477  *	<vfs_lookup>:???
3478  *
3479  * Note:	The return codes from the underlying VFS's lookup routine can't
3480  *		be fully enumerated here, since third party VFS authors may not
3481  *		limit their error returns to the ones documented here, even
3482  *		though this may result in some programs functioning incorrectly.
3483  *
3484  *		The return codes documented above are those which may currently
3485  *		be returned by HFS from hfs_lookup, not including additional
3486  *		error code which may be propagated from underlying routines.
3487  */
3488 errno_t
VNOP_LOOKUP(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,vfs_context_t ctx)3489 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
3490 {
3491 	int _err;
3492 	struct vnop_lookup_args a;
3493 
3494 	a.a_desc = &vnop_lookup_desc;
3495 	a.a_dvp = dvp;
3496 	a.a_vpp = vpp;
3497 	a.a_cnp = cnp;
3498 	a.a_context = ctx;
3499 
3500 	_err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
3501 	if (_err == 0 && *vpp) {
3502 		DTRACE_FSINFO(lookup, vnode_t, *vpp);
3503 	}
3504 
3505 	return _err;
3506 }
3507 
3508 #if 0
3509 struct vnop_compound_open_args {
3510 	struct vnodeop_desc *a_desc;
3511 	vnode_t a_dvp;
3512 	vnode_t *a_vpp;
3513 	struct componentname *a_cnp;
3514 	int32_t a_flags;
3515 	int32_t a_fmode;
3516 	struct vnode_attr *a_vap;
3517 	vfs_context_t a_context;
3518 	void *a_reserved;
3519 };
3520 #endif /* 0 */
3521 
3522 int
VNOP_COMPOUND_OPEN(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,int32_t fmode,uint32_t * statusp,struct vnode_attr * vap,vfs_context_t ctx)3523 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
3524 {
3525 	int _err;
3526 	struct vnop_compound_open_args a;
3527 	int did_create = 0;
3528 	int want_create;
3529 	uint32_t tmp_status = 0;
3530 	struct componentname *cnp = &ndp->ni_cnd;
3531 
3532 	want_create = (flags & O_CREAT);
3533 
3534 	a.a_desc = &vnop_compound_open_desc;
3535 	a.a_dvp = dvp;
3536 	a.a_vpp = vpp; /* Could be NULL */
3537 	a.a_cnp = cnp;
3538 	a.a_flags = flags;
3539 	a.a_fmode = fmode;
3540 	a.a_status = (statusp != NULL) ? statusp : &tmp_status;
3541 	a.a_vap = vap;
3542 	a.a_context = ctx;
3543 	a.a_open_create_authorizer = vn_authorize_create;
3544 	a.a_open_existing_authorizer = vn_authorize_open_existing;
3545 	a.a_reserved = NULL;
3546 
3547 	if (dvp == NULLVP) {
3548 		panic("No dvp?");
3549 	}
3550 	if (want_create && !vap) {
3551 		panic("Want create, but no vap?");
3552 	}
3553 	if (!want_create && vap) {
3554 		panic("Don't want create, but have a vap?");
3555 	}
3556 
3557 	_err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
3558 	if (want_create) {
3559 		if (_err == 0 && *vpp) {
3560 			DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3561 		} else {
3562 			DTRACE_FSINFO(compound_open, vnode_t, dvp);
3563 		}
3564 	} else {
3565 		DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3566 	}
3567 
3568 	did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
3569 
3570 	if (did_create && !want_create) {
3571 		panic("Filesystem did a create, even though none was requested?");
3572 	}
3573 
3574 	if (did_create) {
3575 #if CONFIG_APPLEDOUBLE
3576 		if (!NATIVE_XATTR(dvp)) {
3577 			/*
3578 			 * Remove stale Apple Double file (if any).
3579 			 */
3580 			xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3581 		}
3582 #endif /* CONFIG_APPLEDOUBLE */
3583 		/* On create, provide kqueue notification */
3584 		post_event_if_success(dvp, _err, NOTE_WRITE);
3585 	}
3586 
3587 	lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
3588 #if 0 /* FSEvents... */
3589 	if (*vpp && _err && _err != EKEEPLOOKING) {
3590 		vnode_put(*vpp);
3591 		*vpp = NULLVP;
3592 	}
3593 #endif /* 0 */
3594 
3595 	return _err;
3596 }
3597 
3598 #if 0
3599 struct vnop_create_args {
3600 	struct vnodeop_desc *a_desc;
3601 	vnode_t a_dvp;
3602 	vnode_t *a_vpp;
3603 	struct componentname *a_cnp;
3604 	struct vnode_attr *a_vap;
3605 	vfs_context_t a_context;
3606 };
3607 #endif /* 0*/
3608 errno_t
VNOP_CREATE(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)3609 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3610 {
3611 	int _err;
3612 	struct vnop_create_args a;
3613 
3614 	a.a_desc = &vnop_create_desc;
3615 	a.a_dvp = dvp;
3616 	a.a_vpp = vpp;
3617 	a.a_cnp = cnp;
3618 	a.a_vap = vap;
3619 	a.a_context = ctx;
3620 
3621 	_err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
3622 	if (_err == 0 && *vpp) {
3623 		DTRACE_FSINFO(create, vnode_t, *vpp);
3624 	}
3625 
3626 #if CONFIG_APPLEDOUBLE
3627 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
3628 		/*
3629 		 * Remove stale Apple Double file (if any).
3630 		 */
3631 		xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3632 	}
3633 #endif /* CONFIG_APPLEDOUBLE */
3634 
3635 	post_event_if_success(dvp, _err, NOTE_WRITE);
3636 
3637 	return _err;
3638 }
3639 
3640 #if 0
3641 /*
3642 *#
3643 *#% whiteout     dvp     L L L
3644 *#% whiteout     cnp     - - -
3645 *#% whiteout     flag    - - -
3646 *#
3647 */
3648 struct vnop_whiteout_args {
3649 	struct vnodeop_desc *a_desc;
3650 	vnode_t a_dvp;
3651 	struct componentname *a_cnp;
3652 	int a_flags;
3653 	vfs_context_t a_context;
3654 };
3655 #endif /* 0*/
3656 errno_t
VNOP_WHITEOUT(__unused vnode_t dvp,__unused struct componentname * cnp,__unused int flags,__unused vfs_context_t ctx)3657 VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp,
3658     __unused int flags, __unused vfs_context_t ctx)
3659 {
3660 	return ENOTSUP;       // XXX OBSOLETE
3661 }
3662 
3663 #if 0
3664 /*
3665 *#
3666 *#% mknod        dvp     L U U
3667 *#% mknod        vpp     - X -
3668 *#
3669 */
3670 struct vnop_mknod_args {
3671 	struct vnodeop_desc *a_desc;
3672 	vnode_t a_dvp;
3673 	vnode_t *a_vpp;
3674 	struct componentname *a_cnp;
3675 	struct vnode_attr *a_vap;
3676 	vfs_context_t a_context;
3677 };
3678 #endif /* 0*/
3679 errno_t
VNOP_MKNOD(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)3680 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3681 {
3682 	int _err;
3683 	struct vnop_mknod_args a;
3684 
3685 	a.a_desc = &vnop_mknod_desc;
3686 	a.a_dvp = dvp;
3687 	a.a_vpp = vpp;
3688 	a.a_cnp = cnp;
3689 	a.a_vap = vap;
3690 	a.a_context = ctx;
3691 
3692 	_err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
3693 	if (_err == 0 && *vpp) {
3694 		DTRACE_FSINFO(mknod, vnode_t, *vpp);
3695 	}
3696 
3697 	post_event_if_success(dvp, _err, NOTE_WRITE);
3698 
3699 	return _err;
3700 }
3701 
3702 #if 0
3703 /*
3704 *#
3705 *#% open         vp      L L L
3706 *#
3707 */
3708 struct vnop_open_args {
3709 	struct vnodeop_desc *a_desc;
3710 	vnode_t a_vp;
3711 	int a_mode;
3712 	vfs_context_t a_context;
3713 };
3714 #endif /* 0*/
3715 errno_t
VNOP_OPEN(vnode_t vp,int mode,vfs_context_t ctx)3716 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3717 {
3718 	int _err;
3719 	struct vnop_open_args a;
3720 
3721 	if (ctx == NULL) {
3722 		ctx = vfs_context_current();
3723 	}
3724 	a.a_desc = &vnop_open_desc;
3725 	a.a_vp = vp;
3726 	a.a_mode = mode;
3727 	a.a_context = ctx;
3728 
3729 	_err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3730 	DTRACE_FSINFO(open, vnode_t, vp);
3731 
3732 	return _err;
3733 }
3734 
3735 #if 0
3736 /*
3737 *#
3738 *#% close        vp      U U U
3739 *#
3740 */
3741 struct vnop_close_args {
3742 	struct vnodeop_desc *a_desc;
3743 	vnode_t a_vp;
3744 	int a_fflag;
3745 	vfs_context_t a_context;
3746 };
3747 #endif /* 0*/
3748 errno_t
VNOP_CLOSE(vnode_t vp,int fflag,vfs_context_t ctx)3749 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3750 {
3751 	int _err;
3752 	struct vnop_close_args a;
3753 
3754 	if (ctx == NULL) {
3755 		ctx = vfs_context_current();
3756 	}
3757 	a.a_desc = &vnop_close_desc;
3758 	a.a_vp = vp;
3759 	a.a_fflag = fflag;
3760 	a.a_context = ctx;
3761 
3762 	_err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3763 	DTRACE_FSINFO(close, vnode_t, vp);
3764 
3765 	return _err;
3766 }
3767 
3768 #if 0
3769 /*
3770 *#
3771 *#% access       vp      L L L
3772 *#
3773 */
3774 struct vnop_access_args {
3775 	struct vnodeop_desc *a_desc;
3776 	vnode_t a_vp;
3777 	int a_action;
3778 	vfs_context_t a_context;
3779 };
3780 #endif /* 0*/
3781 errno_t
VNOP_ACCESS(vnode_t vp,int action,vfs_context_t ctx)3782 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3783 {
3784 	int _err;
3785 	struct vnop_access_args a;
3786 
3787 	if (ctx == NULL) {
3788 		ctx = vfs_context_current();
3789 	}
3790 	a.a_desc = &vnop_access_desc;
3791 	a.a_vp = vp;
3792 	a.a_action = action;
3793 	a.a_context = ctx;
3794 
3795 	_err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3796 	DTRACE_FSINFO(access, vnode_t, vp);
3797 
3798 	return _err;
3799 }
3800 
3801 #if 0
3802 /*
3803 *#
3804 *#% getattr      vp      = = =
3805 *#
3806 */
3807 struct vnop_getattr_args {
3808 	struct vnodeop_desc *a_desc;
3809 	vnode_t a_vp;
3810 	struct vnode_attr *a_vap;
3811 	vfs_context_t a_context;
3812 };
3813 #endif /* 0*/
3814 errno_t
VNOP_GETATTR(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3815 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3816 {
3817 	int _err;
3818 	struct vnop_getattr_args a;
3819 
3820 	a.a_desc = &vnop_getattr_desc;
3821 	a.a_vp = vp;
3822 	a.a_vap = vap;
3823 	a.a_context = ctx;
3824 
3825 	_err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3826 	DTRACE_FSINFO(getattr, vnode_t, vp);
3827 
3828 	return _err;
3829 }
3830 
3831 #if 0
3832 /*
3833 *#
3834 *#% setattr      vp      L L L
3835 *#
3836 */
3837 struct vnop_setattr_args {
3838 	struct vnodeop_desc *a_desc;
3839 	vnode_t a_vp;
3840 	struct vnode_attr *a_vap;
3841 	vfs_context_t a_context;
3842 };
3843 #endif /* 0*/
3844 errno_t
VNOP_SETATTR(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3845 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3846 {
3847 	int _err;
3848 	struct vnop_setattr_args a;
3849 
3850 	a.a_desc = &vnop_setattr_desc;
3851 	a.a_vp = vp;
3852 	a.a_vap = vap;
3853 	a.a_context = ctx;
3854 
3855 	_err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3856 	DTRACE_FSINFO(setattr, vnode_t, vp);
3857 
3858 #if CONFIG_APPLEDOUBLE
3859 	/*
3860 	 * Shadow uid/gid/mod change to extended attribute file.
3861 	 */
3862 	if (_err == 0 && !NATIVE_XATTR(vp)) {
3863 		struct vnode_attr *va;
3864 		int change = 0;
3865 
3866 		va = kalloc_type(struct vnode_attr, Z_WAITOK);
3867 		VATTR_INIT(va);
3868 		if (VATTR_IS_ACTIVE(vap, va_uid)) {
3869 			VATTR_SET(va, va_uid, vap->va_uid);
3870 			change = 1;
3871 		}
3872 		if (VATTR_IS_ACTIVE(vap, va_gid)) {
3873 			VATTR_SET(va, va_gid, vap->va_gid);
3874 			change = 1;
3875 		}
3876 		if (VATTR_IS_ACTIVE(vap, va_mode)) {
3877 			VATTR_SET(va, va_mode, vap->va_mode);
3878 			change = 1;
3879 		}
3880 		if (change) {
3881 			vnode_t dvp;
3882 			const char   *vname;
3883 
3884 			dvp = vnode_getparent(vp);
3885 			vname = vnode_getname(vp);
3886 
3887 			xattrfile_setattr(dvp, vname, va, ctx);
3888 			if (dvp != NULLVP) {
3889 				vnode_put(dvp);
3890 			}
3891 			if (vname != NULL) {
3892 				vnode_putname(vname);
3893 			}
3894 		}
3895 		kfree_type(struct vnode_attr, va);
3896 	}
3897 #endif /* CONFIG_APPLEDOUBLE */
3898 
3899 	/*
3900 	 * If we have changed any of the things about the file that are likely
3901 	 * to result in changes to authorization results, blow the vnode auth
3902 	 * cache
3903 	 */
3904 	if (_err == 0 && (
3905 		    VATTR_IS_SUPPORTED(vap, va_mode) ||
3906 		    VATTR_IS_SUPPORTED(vap, va_uid) ||
3907 		    VATTR_IS_SUPPORTED(vap, va_gid) ||
3908 		    VATTR_IS_SUPPORTED(vap, va_flags) ||
3909 		    VATTR_IS_SUPPORTED(vap, va_acl) ||
3910 		    VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3911 		    VATTR_IS_SUPPORTED(vap, va_guuid))) {
3912 		vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3913 
3914 #if NAMEDSTREAMS
3915 		if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3916 			vnode_t svp;
3917 			if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3918 				vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3919 				vnode_put(svp);
3920 			}
3921 		}
3922 #endif /* NAMEDSTREAMS */
3923 	}
3924 
3925 
3926 	post_event_if_success(vp, _err, NOTE_ATTRIB);
3927 
3928 	return _err;
3929 }
3930 
3931 
3932 #if 0
3933 /*
3934 *#
3935 *#% read         vp      L L L
3936 *#
3937 */
3938 struct vnop_read_args {
3939 	struct vnodeop_desc *a_desc;
3940 	vnode_t a_vp;
3941 	struct uio *a_uio;
3942 	int a_ioflag;
3943 	vfs_context_t a_context;
3944 };
3945 #endif /* 0*/
3946 errno_t
VNOP_READ(vnode_t vp,struct uio * uio,int ioflag,vfs_context_t ctx)3947 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3948 {
3949 	int _err;
3950 	struct vnop_read_args a;
3951 #if CONFIG_DTRACE
3952 	user_ssize_t resid = uio_resid(uio);
3953 #endif
3954 
3955 	if (ctx == NULL) {
3956 		return EINVAL;
3957 	}
3958 
3959 	a.a_desc = &vnop_read_desc;
3960 	a.a_vp = vp;
3961 	a.a_uio = uio;
3962 	a.a_ioflag = ioflag;
3963 	a.a_context = ctx;
3964 
3965 	_err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3966 	DTRACE_FSINFO_IO(read,
3967 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3968 
3969 	return _err;
3970 }
3971 
3972 
3973 #if 0
3974 /*
3975 *#
3976 *#% write        vp      L L L
3977 *#
3978 */
3979 struct vnop_write_args {
3980 	struct vnodeop_desc *a_desc;
3981 	vnode_t a_vp;
3982 	struct uio *a_uio;
3983 	int a_ioflag;
3984 	vfs_context_t a_context;
3985 };
3986 #endif /* 0*/
3987 errno_t
VNOP_WRITE(vnode_t vp,struct uio * uio,int ioflag,vfs_context_t ctx)3988 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3989 {
3990 	struct vnop_write_args a;
3991 	int _err;
3992 #if CONFIG_DTRACE
3993 	user_ssize_t resid = uio_resid(uio);
3994 #endif
3995 
3996 	if (ctx == NULL) {
3997 		return EINVAL;
3998 	}
3999 
4000 	a.a_desc = &vnop_write_desc;
4001 	a.a_vp = vp;
4002 	a.a_uio = uio;
4003 	a.a_ioflag = ioflag;
4004 	a.a_context = ctx;
4005 
4006 	_err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
4007 	DTRACE_FSINFO_IO(write,
4008 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4009 
4010 	post_event_if_success(vp, _err, NOTE_WRITE);
4011 
4012 	return _err;
4013 }
4014 
4015 
4016 #if 0
4017 /*
4018 *#
4019 *#% ioctl        vp      U U U
4020 *#
4021 */
4022 struct vnop_ioctl_args {
4023 	struct vnodeop_desc *a_desc;
4024 	vnode_t a_vp;
4025 	u_long a_command;
4026 	caddr_t a_data;
4027 	int a_fflag;
4028 	vfs_context_t a_context;
4029 };
4030 #endif /* 0*/
4031 errno_t
VNOP_IOCTL(vnode_t vp,u_long command,caddr_t data,int fflag,vfs_context_t ctx)4032 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
4033 {
4034 	int _err;
4035 	struct vnop_ioctl_args a;
4036 
4037 	if (ctx == NULL) {
4038 		ctx = vfs_context_current();
4039 	}
4040 
4041 	/*
4042 	 * This check should probably have been put in the TTY code instead...
4043 	 *
4044 	 * We have to be careful about what we assume during startup and shutdown.
4045 	 * We have to be able to use the root filesystem's device vnode even when
4046 	 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
4047 	 * structure.  If there is no data pointer, it doesn't matter whether
4048 	 * the device is 64-bit ready.  Any command (like DKIOCSYNCHRONIZE)
4049 	 * which passes NULL for its data pointer can therefore be used during
4050 	 * mount or unmount of the root filesystem.
4051 	 *
4052 	 * Depending on what root filesystems need to do during mount/unmount, we
4053 	 * may need to loosen this check again in the future.
4054 	 */
4055 	if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
4056 		if (data != NULL && !vnode_vfs64bitready(vp)) {
4057 			return ENOTTY;
4058 		}
4059 	}
4060 
4061 	if ((command == DKIOCISSOLIDSTATE) && (vp == rootvp) && rootvp_is_ssd && data) {
4062 		*data = 1;
4063 		return 0;
4064 	}
4065 
4066 	a.a_desc = &vnop_ioctl_desc;
4067 	a.a_vp = vp;
4068 	a.a_command = command;
4069 	a.a_data = data;
4070 	a.a_fflag = fflag;
4071 	a.a_context = ctx;
4072 
4073 	_err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
4074 	DTRACE_FSINFO(ioctl, vnode_t, vp);
4075 
4076 	return _err;
4077 }
4078 
4079 
4080 #if 0
4081 /*
4082 *#
4083 *#% select       vp      U U U
4084 *#
4085 */
4086 struct vnop_select_args {
4087 	struct vnodeop_desc *a_desc;
4088 	vnode_t a_vp;
4089 	int a_which;
4090 	int a_fflags;
4091 	void *a_wql;
4092 	vfs_context_t a_context;
4093 };
4094 #endif /* 0*/
4095 errno_t
VNOP_SELECT(vnode_t vp,int which,int fflags,void * wql,vfs_context_t ctx)4096 VNOP_SELECT(vnode_t vp, int which, int fflags, void * wql, vfs_context_t ctx)
4097 {
4098 	int _err;
4099 	struct vnop_select_args a;
4100 
4101 	if (ctx == NULL) {
4102 		ctx = vfs_context_current();
4103 	}
4104 	a.a_desc = &vnop_select_desc;
4105 	a.a_vp = vp;
4106 	a.a_which = which;
4107 	a.a_fflags = fflags;
4108 	a.a_context = ctx;
4109 	a.a_wql = wql;
4110 
4111 	_err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
4112 	DTRACE_FSINFO(select, vnode_t, vp);
4113 
4114 	return _err;
4115 }
4116 
4117 
4118 #if 0
4119 /*
4120 *#
4121 *#% exchange fvp         L L L
4122 *#% exchange tvp         L L L
4123 *#
4124 */
4125 struct vnop_exchange_args {
4126 	struct vnodeop_desc *a_desc;
4127 	vnode_t a_fvp;
4128 	vnode_t a_tvp;
4129 	int a_options;
4130 	vfs_context_t a_context;
4131 };
4132 #endif /* 0*/
4133 errno_t
VNOP_EXCHANGE(vnode_t fvp,vnode_t tvp,int options,vfs_context_t ctx)4134 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
4135 {
4136 	int _err;
4137 	struct vnop_exchange_args a;
4138 
4139 	a.a_desc = &vnop_exchange_desc;
4140 	a.a_fvp = fvp;
4141 	a.a_tvp = tvp;
4142 	a.a_options = options;
4143 	a.a_context = ctx;
4144 
4145 	_err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
4146 	DTRACE_FSINFO(exchange, vnode_t, fvp);
4147 
4148 	/* Don't post NOTE_WRITE because file descriptors follow the data ... */
4149 	post_event_if_success(fvp, _err, NOTE_ATTRIB);
4150 	post_event_if_success(tvp, _err, NOTE_ATTRIB);
4151 
4152 	return _err;
4153 }
4154 
4155 
4156 #if 0
4157 /*
4158 *#
4159 *#% revoke       vp      U U U
4160 *#
4161 */
4162 struct vnop_revoke_args {
4163 	struct vnodeop_desc *a_desc;
4164 	vnode_t a_vp;
4165 	int a_flags;
4166 	vfs_context_t a_context;
4167 };
4168 #endif /* 0*/
4169 errno_t
VNOP_REVOKE(vnode_t vp,int flags,vfs_context_t ctx)4170 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
4171 {
4172 	struct vnop_revoke_args a;
4173 	int _err;
4174 
4175 	a.a_desc = &vnop_revoke_desc;
4176 	a.a_vp = vp;
4177 	a.a_flags = flags;
4178 	a.a_context = ctx;
4179 
4180 	_err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
4181 	DTRACE_FSINFO(revoke, vnode_t, vp);
4182 
4183 	return _err;
4184 }
4185 
4186 
4187 #if 0
4188 /*
4189 *#
4190 *# mmap_check - vp U U U
4191 *#
4192 */
4193 struct vnop_mmap_check_args {
4194 	struct vnodeop_desc *a_desc;
4195 	vnode_t a_vp;
4196 	int a_flags;
4197 	vfs_context_t a_context;
4198 };
4199 #endif /* 0 */
4200 errno_t
VNOP_MMAP_CHECK(vnode_t vp,int flags,vfs_context_t ctx)4201 VNOP_MMAP_CHECK(vnode_t vp, int flags, vfs_context_t ctx)
4202 {
4203 	int _err;
4204 	struct vnop_mmap_check_args a;
4205 
4206 	a.a_desc = &vnop_mmap_check_desc;
4207 	a.a_vp = vp;
4208 	a.a_flags = flags;
4209 	a.a_context = ctx;
4210 
4211 	_err = (*vp->v_op[vnop_mmap_check_desc.vdesc_offset])(&a);
4212 	if (_err == ENOTSUP) {
4213 		_err = 0;
4214 	}
4215 	DTRACE_FSINFO(mmap_check, vnode_t, vp);
4216 
4217 	return _err;
4218 }
4219 
4220 #if 0
4221 /*
4222 *#
4223 *# mmap - vp U U U
4224 *#
4225 */
4226 struct vnop_mmap_args {
4227 	struct vnodeop_desc *a_desc;
4228 	vnode_t a_vp;
4229 	int a_fflags;
4230 	vfs_context_t a_context;
4231 };
4232 #endif /* 0*/
4233 errno_t
VNOP_MMAP(vnode_t vp,int fflags,vfs_context_t ctx)4234 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
4235 {
4236 	int _err;
4237 	struct vnop_mmap_args a;
4238 
4239 	a.a_desc = &vnop_mmap_desc;
4240 	a.a_vp = vp;
4241 	a.a_fflags = fflags;
4242 	a.a_context = ctx;
4243 
4244 	_err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
4245 	DTRACE_FSINFO(mmap, vnode_t, vp);
4246 
4247 	return _err;
4248 }
4249 
4250 
4251 #if 0
4252 /*
4253 *#
4254 *# mnomap - vp U U U
4255 *#
4256 */
4257 struct vnop_mnomap_args {
4258 	struct vnodeop_desc *a_desc;
4259 	vnode_t a_vp;
4260 	vfs_context_t a_context;
4261 };
4262 #endif /* 0*/
4263 errno_t
VNOP_MNOMAP(vnode_t vp,vfs_context_t ctx)4264 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
4265 {
4266 	int _err;
4267 	struct vnop_mnomap_args a;
4268 
4269 	a.a_desc = &vnop_mnomap_desc;
4270 	a.a_vp = vp;
4271 	a.a_context = ctx;
4272 
4273 	_err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
4274 	DTRACE_FSINFO(mnomap, vnode_t, vp);
4275 
4276 	return _err;
4277 }
4278 
4279 
4280 #if 0
4281 /*
4282 *#
4283 *#% fsync        vp      L L L
4284 *#
4285 */
4286 struct vnop_fsync_args {
4287 	struct vnodeop_desc *a_desc;
4288 	vnode_t a_vp;
4289 	int a_waitfor;
4290 	vfs_context_t a_context;
4291 };
4292 #endif /* 0*/
4293 errno_t
VNOP_FSYNC(vnode_t vp,int waitfor,vfs_context_t ctx)4294 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
4295 {
4296 	struct vnop_fsync_args a;
4297 	int _err;
4298 
4299 	a.a_desc = &vnop_fsync_desc;
4300 	a.a_vp = vp;
4301 	a.a_waitfor = waitfor;
4302 	a.a_context = ctx;
4303 
4304 	_err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
4305 	DTRACE_FSINFO(fsync, vnode_t, vp);
4306 
4307 	return _err;
4308 }
4309 
4310 
4311 #if 0
4312 /*
4313 *#
4314 *#% remove       dvp     L U U
4315 *#% remove       vp      L U U
4316 *#
4317 */
4318 struct vnop_remove_args {
4319 	struct vnodeop_desc *a_desc;
4320 	vnode_t a_dvp;
4321 	vnode_t a_vp;
4322 	struct componentname *a_cnp;
4323 	int a_flags;
4324 	vfs_context_t a_context;
4325 };
4326 #endif /* 0*/
4327 errno_t
VNOP_REMOVE(vnode_t dvp,vnode_t vp,struct componentname * cnp,int flags,vfs_context_t ctx)4328 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
4329 {
4330 	int _err;
4331 	struct vnop_remove_args a;
4332 
4333 	a.a_desc = &vnop_remove_desc;
4334 	a.a_dvp = dvp;
4335 	a.a_vp = vp;
4336 	a.a_cnp = cnp;
4337 	a.a_flags = flags;
4338 	a.a_context = ctx;
4339 
4340 	_err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
4341 	DTRACE_FSINFO(remove, vnode_t, vp);
4342 
4343 	if (_err == 0) {
4344 		vnode_setneedinactive(vp);
4345 #if CONFIG_APPLEDOUBLE
4346 		if (!(NATIVE_XATTR(dvp))) {
4347 			/*
4348 			 * Remove any associated extended attribute file (._ AppleDouble file).
4349 			 */
4350 			xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4351 		}
4352 #endif /* CONFIG_APPLEDOUBLE */
4353 	}
4354 
4355 	post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4356 	post_event_if_success(dvp, _err, NOTE_WRITE);
4357 
4358 	return _err;
4359 }
4360 
4361 int
VNOP_COMPOUND_REMOVE(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,struct vnode_attr * vap,vfs_context_t ctx)4362 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
4363 {
4364 	int _err;
4365 	struct vnop_compound_remove_args a;
4366 	int no_vp = (*vpp == NULLVP);
4367 
4368 	a.a_desc = &vnop_compound_remove_desc;
4369 	a.a_dvp = dvp;
4370 	a.a_vpp = vpp;
4371 	a.a_cnp = &ndp->ni_cnd;
4372 	a.a_flags = flags;
4373 	a.a_vap = vap;
4374 	a.a_context = ctx;
4375 	a.a_remove_authorizer = vn_authorize_unlink;
4376 
4377 	_err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
4378 	if (_err == 0 && *vpp) {
4379 		DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
4380 	} else {
4381 		DTRACE_FSINFO(compound_remove, vnode_t, dvp);
4382 	}
4383 	if (_err == 0) {
4384 		vnode_setneedinactive(*vpp);
4385 #if CONFIG_APPLEDOUBLE
4386 		if (!(NATIVE_XATTR(dvp))) {
4387 			/*
4388 			 * Remove any associated extended attribute file (._ AppleDouble file).
4389 			 */
4390 			xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
4391 		}
4392 #endif /* CONFIG_APPLEDOUBLE */
4393 	}
4394 
4395 	post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4396 	post_event_if_success(dvp, _err, NOTE_WRITE);
4397 
4398 	if (no_vp) {
4399 		lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4400 		if (*vpp && _err && _err != EKEEPLOOKING) {
4401 			vnode_put(*vpp);
4402 			*vpp = NULLVP;
4403 		}
4404 	}
4405 
4406 	//printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
4407 
4408 	return _err;
4409 }
4410 
4411 #if 0
4412 /*
4413 *#
4414 *#% link         vp      U U U
4415 *#% link         tdvp    L U U
4416 *#
4417 */
4418 struct vnop_link_args {
4419 	struct vnodeop_desc *a_desc;
4420 	vnode_t a_vp;
4421 	vnode_t a_tdvp;
4422 	struct componentname *a_cnp;
4423 	vfs_context_t a_context;
4424 };
4425 #endif /* 0*/
4426 errno_t
VNOP_LINK(vnode_t vp,vnode_t tdvp,struct componentname * cnp,vfs_context_t ctx)4427 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
4428 {
4429 	int _err;
4430 	struct vnop_link_args a;
4431 
4432 #if CONFIG_APPLEDOUBLE
4433 	/*
4434 	 * For file systems with non-native extended attributes,
4435 	 * disallow linking to an existing "._" Apple Double file.
4436 	 */
4437 	if (!NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
4438 		const char   *vname;
4439 
4440 		vname = vnode_getname(vp);
4441 		if (vname != NULL) {
4442 			_err = 0;
4443 			if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
4444 				_err = EPERM;
4445 			}
4446 			vnode_putname(vname);
4447 			if (_err) {
4448 				return _err;
4449 			}
4450 		}
4451 	}
4452 #endif /* CONFIG_APPLEDOUBLE */
4453 
4454 	a.a_desc = &vnop_link_desc;
4455 	a.a_vp = vp;
4456 	a.a_tdvp = tdvp;
4457 	a.a_cnp = cnp;
4458 	a.a_context = ctx;
4459 
4460 	_err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
4461 	DTRACE_FSINFO(link, vnode_t, vp);
4462 
4463 	post_event_if_success(vp, _err, NOTE_LINK);
4464 	post_event_if_success(tdvp, _err, NOTE_WRITE);
4465 
4466 	return _err;
4467 }
4468 
4469 errno_t
vn_rename(struct vnode * fdvp,struct vnode ** fvpp,struct componentname * fcnp,struct vnode_attr * fvap,struct vnode * tdvp,struct vnode ** tvpp,struct componentname * tcnp,struct vnode_attr * tvap,vfs_rename_flags_t flags,vfs_context_t ctx)4470 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4471     struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4472     vfs_rename_flags_t flags, vfs_context_t ctx)
4473 {
4474 	int _err;
4475 	struct nameidata *fromnd = NULL;
4476 	struct nameidata *tond = NULL;
4477 #if CONFIG_APPLEDOUBLE
4478 	vnode_t src_attr_vp = NULLVP;
4479 	vnode_t dst_attr_vp = NULLVP;
4480 	char smallname1[48];
4481 	char smallname2[48];
4482 	char *xfromname = NULL;
4483 	char *xtoname = NULL;
4484 #endif /* CONFIG_APPLEDOUBLE */
4485 	int batched;
4486 	uint32_t tdfflags;      // Target directory file flags
4487 
4488 	batched = vnode_compound_rename_available(fdvp);
4489 
4490 	if (!batched) {
4491 		if (*fvpp == NULLVP) {
4492 			panic("Not batched, and no fvp?");
4493 		}
4494 	}
4495 
4496 #if CONFIG_APPLEDOUBLE
4497 	/*
4498 	 * We need to preflight any potential AppleDouble file for the source file
4499 	 * before doing the rename operation, since we could potentially be doing
4500 	 * this operation on a network filesystem, and would end up duplicating
4501 	 * the work.  Also, save the source and destination names.  Skip it if the
4502 	 * source has a "._" prefix.
4503 	 */
4504 
4505 	size_t xfromname_len = 0;
4506 	size_t xtoname_len = 0;
4507 	if (!NATIVE_XATTR(fdvp) &&
4508 	    !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
4509 		int error;
4510 
4511 		/* Get source attribute file name. */
4512 		xfromname_len = fcnp->cn_namelen + 3;
4513 		if (xfromname_len > sizeof(smallname1)) {
4514 			xfromname = kalloc_data(xfromname_len, Z_WAITOK);
4515 		} else {
4516 			xfromname = &smallname1[0];
4517 		}
4518 		strlcpy(xfromname, "._", xfromname_len);
4519 		strlcat(xfromname, fcnp->cn_nameptr, xfromname_len);
4520 
4521 		/* Get destination attribute file name. */
4522 		xtoname_len = tcnp->cn_namelen + 3;
4523 		if (xtoname_len > sizeof(smallname2)) {
4524 			xtoname = kalloc_data(xtoname_len, Z_WAITOK);
4525 		} else {
4526 			xtoname = &smallname2[0];
4527 		}
4528 		strlcpy(xtoname, "._", xtoname_len);
4529 		strlcat(xtoname, tcnp->cn_nameptr, xtoname_len);
4530 
4531 		/*
4532 		 * Look up source attribute file, keep reference on it if exists.
4533 		 * Note that we do the namei with the nameiop of RENAME, which is different than
4534 		 * in the rename syscall. It's OK if the source file does not exist, since this
4535 		 * is only for AppleDouble files.
4536 		 */
4537 		fromnd = kalloc_type(struct nameidata, Z_WAITOK);
4538 		NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
4539 		    UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
4540 		fromnd->ni_dvp = fdvp;
4541 		error = namei(fromnd);
4542 
4543 		/*
4544 		 * If there was an error looking up source attribute file,
4545 		 * we'll behave as if it didn't exist.
4546 		 */
4547 
4548 		if (error == 0) {
4549 			if (fromnd->ni_vp) {
4550 				/* src_attr_vp indicates need to call vnode_put / nameidone later */
4551 				src_attr_vp = fromnd->ni_vp;
4552 
4553 				if (fromnd->ni_vp->v_type != VREG) {
4554 					src_attr_vp = NULLVP;
4555 					vnode_put(fromnd->ni_vp);
4556 				}
4557 			}
4558 			/*
4559 			 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4560 			 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4561 			 * have a vnode here, so we drop our namei buffer for the source attribute file
4562 			 */
4563 			if (src_attr_vp == NULLVP) {
4564 				nameidone(fromnd);
4565 			}
4566 		}
4567 	}
4568 #endif /* CONFIG_APPLEDOUBLE */
4569 
4570 	if (batched) {
4571 		_err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
4572 		if (_err != 0) {
4573 			printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
4574 		}
4575 	} else {
4576 		if (flags) {
4577 			_err = VNOP_RENAMEX(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, flags, ctx);
4578 			if (_err == ENOTSUP && flags == VFS_RENAME_SECLUDE) {
4579 				// Legacy...
4580 				if ((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) {
4581 					fcnp->cn_flags |= CN_SECLUDE_RENAME;
4582 					_err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4583 				}
4584 			}
4585 		} else {
4586 			_err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4587 		}
4588 	}
4589 
4590 	/*
4591 	 * If moved to a new directory that is restricted,
4592 	 * set the restricted flag on the item moved.
4593 	 */
4594 	if (_err == 0) {
4595 		_err = vnode_flags(tdvp, &tdfflags, ctx);
4596 		if (_err == 0) {
4597 			uint32_t inherit_flags = tdfflags & (UF_DATAVAULT | SF_RESTRICTED);
4598 			if (inherit_flags) {
4599 				uint32_t fflags;
4600 				_err = vnode_flags(*fvpp, &fflags, ctx);
4601 				if (_err == 0 && fflags != (fflags | inherit_flags)) {
4602 					struct vnode_attr va;
4603 					VATTR_INIT(&va);
4604 					VATTR_SET(&va, va_flags, fflags | inherit_flags);
4605 					_err = vnode_setattr(*fvpp, &va, ctx);
4606 				}
4607 			}
4608 		}
4609 	}
4610 
4611 #if CONFIG_MACF
4612 	if (_err == 0) {
4613 		if (flags & VFS_RENAME_SWAP) {
4614 			mac_vnode_notify_rename_swap(
4615 				ctx,                        /* ctx */
4616 				fdvp,                       /* fdvp */
4617 				*fvpp,                      /* fvp */
4618 				fcnp,                       /* fcnp */
4619 				tdvp,                       /* tdvp */
4620 				*tvpp,                      /* tvp */
4621 				tcnp                        /* tcnp */
4622 				);
4623 		} else {
4624 			mac_vnode_notify_rename(
4625 				ctx,                        /* ctx */
4626 				*fvpp,                      /* fvp */
4627 				tdvp,                       /* tdvp */
4628 				tcnp                        /* tcnp */
4629 				);
4630 		}
4631 	}
4632 #endif
4633 
4634 #if CONFIG_APPLEDOUBLE
4635 	/*
4636 	 * Rename any associated extended attribute file (._ AppleDouble file).
4637 	 */
4638 	if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
4639 		int error = 0;
4640 
4641 		/*
4642 		 * Get destination attribute file vnode.
4643 		 * Note that tdvp already has an iocount reference. Make sure to check that we
4644 		 * get a valid vnode from namei.
4645 		 */
4646 		tond = kalloc_type(struct nameidata, Z_WAITOK);
4647 		NDINIT(tond, RENAME, OP_RENAME,
4648 		    NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4649 		    CAST_USER_ADDR_T(xtoname), ctx);
4650 		tond->ni_dvp = tdvp;
4651 		error = namei(tond);
4652 
4653 		if (error) {
4654 			goto ad_error;
4655 		}
4656 
4657 		if (tond->ni_vp) {
4658 			dst_attr_vp = tond->ni_vp;
4659 		}
4660 
4661 		if (src_attr_vp) {
4662 			const char *old_name = src_attr_vp->v_name;
4663 			vnode_t old_parent = src_attr_vp->v_parent;
4664 
4665 			if (batched) {
4666 				error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
4667 				    tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
4668 				    0, ctx);
4669 			} else {
4670 				error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
4671 				    tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
4672 			}
4673 
4674 			if (error == 0 && old_name == src_attr_vp->v_name &&
4675 			    old_parent == src_attr_vp->v_parent) {
4676 				int update_flags = VNODE_UPDATE_NAME;
4677 
4678 				if (fdvp != tdvp) {
4679 					update_flags |= VNODE_UPDATE_PARENT;
4680 				}
4681 
4682 				if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
4683 					vnode_update_identity(src_attr_vp, tdvp,
4684 					    tond->ni_cnd.cn_nameptr,
4685 					    tond->ni_cnd.cn_namelen,
4686 					    tond->ni_cnd.cn_hash,
4687 					    update_flags);
4688 				}
4689 			}
4690 
4691 			/* kevent notifications for moving resource files
4692 			 * _err is zero if we're here, so no need to notify directories, code
4693 			 * below will do that.  only need to post the rename on the source and
4694 			 * possibly a delete on the dest
4695 			 */
4696 			post_event_if_success(src_attr_vp, error, NOTE_RENAME);
4697 			if (dst_attr_vp) {
4698 				post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4699 			}
4700 		} else if (dst_attr_vp) {
4701 			/*
4702 			 * Just delete destination attribute file vnode if it exists, since
4703 			 * we didn't have a source attribute file.
4704 			 * Note that tdvp already has an iocount reference.
4705 			 */
4706 
4707 			struct vnop_remove_args args;
4708 
4709 			args.a_desc    = &vnop_remove_desc;
4710 			args.a_dvp     = tdvp;
4711 			args.a_vp      = dst_attr_vp;
4712 			args.a_cnp     = &tond->ni_cnd;
4713 			args.a_context = ctx;
4714 
4715 			if (error == 0) {
4716 				error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
4717 
4718 				if (error == 0) {
4719 					vnode_setneedinactive(dst_attr_vp);
4720 				}
4721 			}
4722 
4723 			/* kevent notification for deleting the destination's attribute file
4724 			 * if it existed.  Only need to post the delete on the destination, since
4725 			 * the code below will handle the directories.
4726 			 */
4727 			post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4728 		}
4729 	}
4730 ad_error:
4731 	if (src_attr_vp) {
4732 		vnode_put(src_attr_vp);
4733 		nameidone(fromnd);
4734 	}
4735 	if (dst_attr_vp) {
4736 		vnode_put(dst_attr_vp);
4737 		nameidone(tond);
4738 	}
4739 	if (xfromname && xfromname != &smallname1[0]) {
4740 		kfree_data(xfromname, xfromname_len);
4741 	}
4742 	if (xtoname && xtoname != &smallname2[0]) {
4743 		kfree_data(xtoname, xtoname_len);
4744 	}
4745 #endif /* CONFIG_APPLEDOUBLE */
4746 	kfree_type(struct nameidata, fromnd);
4747 	kfree_type(struct nameidata, tond);
4748 	return _err;
4749 }
4750 
4751 
4752 #if 0
4753 /*
4754 *#
4755 *#% rename       fdvp    U U U
4756 *#% rename       fvp     U U U
4757 *#% rename       tdvp    L U U
4758 *#% rename       tvp     X U U
4759 *#
4760 */
4761 struct vnop_rename_args {
4762 	struct vnodeop_desc *a_desc;
4763 	vnode_t a_fdvp;
4764 	vnode_t a_fvp;
4765 	struct componentname *a_fcnp;
4766 	vnode_t a_tdvp;
4767 	vnode_t a_tvp;
4768 	struct componentname *a_tcnp;
4769 	vfs_context_t a_context;
4770 };
4771 #endif /* 0*/
4772 errno_t
VNOP_RENAME(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx)4773 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4774     struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4775     vfs_context_t ctx)
4776 {
4777 	int _err = 0;
4778 	struct vnop_rename_args a;
4779 
4780 	a.a_desc = &vnop_rename_desc;
4781 	a.a_fdvp = fdvp;
4782 	a.a_fvp = fvp;
4783 	a.a_fcnp = fcnp;
4784 	a.a_tdvp = tdvp;
4785 	a.a_tvp = tvp;
4786 	a.a_tcnp = tcnp;
4787 	a.a_context = ctx;
4788 
4789 	/* do the rename of the main file. */
4790 	_err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4791 	DTRACE_FSINFO(rename, vnode_t, fdvp);
4792 
4793 	if (_err) {
4794 		return _err;
4795 	}
4796 
4797 	return post_rename(fdvp, fvp, tdvp, tvp);
4798 }
4799 
4800 static errno_t
post_rename(vnode_t fdvp,vnode_t fvp,vnode_t tdvp,vnode_t tvp)4801 post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp)
4802 {
4803 	if (tvp && tvp != fvp) {
4804 		vnode_setneedinactive(tvp);
4805 	}
4806 
4807 	/* Wrote at least one directory.  If transplanted a dir, also changed link counts */
4808 	int events = NOTE_WRITE;
4809 	if (vnode_isdir(fvp)) {
4810 		/* Link count on dir changed only if we are moving a dir and...
4811 		 *      --Moved to new dir, not overwriting there
4812 		 *      --Kept in same dir and DID overwrite
4813 		 */
4814 		if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4815 			events |= NOTE_LINK;
4816 		}
4817 	}
4818 
4819 	lock_vnode_and_post(fdvp, events);
4820 	if (fdvp != tdvp) {
4821 		lock_vnode_and_post(tdvp, events);
4822 	}
4823 
4824 	/* If you're replacing the target, post a deletion for it */
4825 	if (tvp) {
4826 		lock_vnode_and_post(tvp, NOTE_DELETE);
4827 	}
4828 
4829 	lock_vnode_and_post(fvp, NOTE_RENAME);
4830 
4831 	return 0;
4832 }
4833 
4834 #if 0
4835 /*
4836 *#
4837 *#% renamex      fdvp    U U U
4838 *#% renamex      fvp     U U U
4839 *#% renamex      tdvp    L U U
4840 *#% renamex      tvp     X U U
4841 *#
4842 */
4843 struct vnop_renamex_args {
4844 	struct vnodeop_desc *a_desc;
4845 	vnode_t a_fdvp;
4846 	vnode_t a_fvp;
4847 	struct componentname *a_fcnp;
4848 	vnode_t a_tdvp;
4849 	vnode_t a_tvp;
4850 	struct componentname *a_tcnp;
4851 	vfs_rename_flags_t a_flags;
4852 	vfs_context_t a_context;
4853 };
4854 #endif /* 0*/
4855 errno_t
VNOP_RENAMEX(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_rename_flags_t flags,vfs_context_t ctx)4856 VNOP_RENAMEX(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4857     struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4858     vfs_rename_flags_t flags, vfs_context_t ctx)
4859 {
4860 	int _err = 0;
4861 	struct vnop_renamex_args a;
4862 
4863 	a.a_desc = &vnop_renamex_desc;
4864 	a.a_fdvp = fdvp;
4865 	a.a_fvp = fvp;
4866 	a.a_fcnp = fcnp;
4867 	a.a_tdvp = tdvp;
4868 	a.a_tvp = tvp;
4869 	a.a_tcnp = tcnp;
4870 	a.a_flags = flags;
4871 	a.a_context = ctx;
4872 
4873 	/* do the rename of the main file. */
4874 	_err = (*fdvp->v_op[vnop_renamex_desc.vdesc_offset])(&a);
4875 	DTRACE_FSINFO(renamex, vnode_t, fdvp);
4876 
4877 	if (_err) {
4878 		return _err;
4879 	}
4880 
4881 	return post_rename(fdvp, fvp, tdvp, tvp);
4882 }
4883 
4884 
4885 int
VNOP_COMPOUND_RENAME(struct vnode * fdvp,struct vnode ** fvpp,struct componentname * fcnp,struct vnode_attr * fvap,struct vnode * tdvp,struct vnode ** tvpp,struct componentname * tcnp,struct vnode_attr * tvap,uint32_t flags,vfs_context_t ctx)4886 VNOP_COMPOUND_RENAME(
4887 	struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4888 	struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4889 	uint32_t flags, vfs_context_t ctx)
4890 {
4891 	int _err = 0;
4892 	int events;
4893 	struct vnop_compound_rename_args a;
4894 	int no_fvp, no_tvp;
4895 
4896 	no_fvp = (*fvpp) == NULLVP;
4897 	no_tvp = (*tvpp) == NULLVP;
4898 
4899 	a.a_desc = &vnop_compound_rename_desc;
4900 
4901 	a.a_fdvp = fdvp;
4902 	a.a_fvpp = fvpp;
4903 	a.a_fcnp = fcnp;
4904 	a.a_fvap = fvap;
4905 
4906 	a.a_tdvp = tdvp;
4907 	a.a_tvpp = tvpp;
4908 	a.a_tcnp = tcnp;
4909 	a.a_tvap = tvap;
4910 
4911 	a.a_flags = flags;
4912 	a.a_context = ctx;
4913 	a.a_rename_authorizer = vn_authorize_rename;
4914 	a.a_reserved = NULL;
4915 
4916 	/* do the rename of the main file. */
4917 	_err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4918 	DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
4919 
4920 	if (_err == 0) {
4921 		if (*tvpp && *tvpp != *fvpp) {
4922 			vnode_setneedinactive(*tvpp);
4923 		}
4924 	}
4925 
4926 	/* Wrote at least one directory.  If transplanted a dir, also changed link counts */
4927 	if (_err == 0 && *fvpp != *tvpp) {
4928 		if (!*fvpp) {
4929 			panic("No fvpp after compound rename?");
4930 		}
4931 
4932 		events = NOTE_WRITE;
4933 		if (vnode_isdir(*fvpp)) {
4934 			/* Link count on dir changed only if we are moving a dir and...
4935 			 *      --Moved to new dir, not overwriting there
4936 			 *      --Kept in same dir and DID overwrite
4937 			 */
4938 			if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4939 				events |= NOTE_LINK;
4940 			}
4941 		}
4942 
4943 		lock_vnode_and_post(fdvp, events);
4944 		if (fdvp != tdvp) {
4945 			lock_vnode_and_post(tdvp, events);
4946 		}
4947 
4948 		/* If you're replacing the target, post a deletion for it */
4949 		if (*tvpp) {
4950 			lock_vnode_and_post(*tvpp, NOTE_DELETE);
4951 		}
4952 
4953 		lock_vnode_and_post(*fvpp, NOTE_RENAME);
4954 	}
4955 
4956 	if (no_fvp) {
4957 		lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4958 	}
4959 	if (no_tvp && *tvpp != NULLVP) {
4960 		lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4961 	}
4962 
4963 	if (_err && _err != EKEEPLOOKING) {
4964 		if (*fvpp) {
4965 			vnode_put(*fvpp);
4966 			*fvpp = NULLVP;
4967 		}
4968 		if (*tvpp) {
4969 			vnode_put(*tvpp);
4970 			*tvpp = NULLVP;
4971 		}
4972 	}
4973 
4974 	return _err;
4975 }
4976 
4977 int
vn_mkdir(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)4978 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4979     struct vnode_attr *vap, vfs_context_t ctx)
4980 {
4981 	if (ndp->ni_cnd.cn_nameiop != CREATE) {
4982 		panic("Non-CREATE nameiop in vn_mkdir()?");
4983 	}
4984 
4985 	if (vnode_compound_mkdir_available(dvp)) {
4986 		return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4987 	} else {
4988 		return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4989 	}
4990 }
4991 
4992 #if 0
4993 /*
4994 *#
4995 *#% mkdir        dvp     L U U
4996 *#% mkdir        vpp     - L -
4997 *#
4998 */
4999 struct vnop_mkdir_args {
5000 	struct vnodeop_desc *a_desc;
5001 	vnode_t a_dvp;
5002 	vnode_t *a_vpp;
5003 	struct componentname *a_cnp;
5004 	struct vnode_attr *a_vap;
5005 	vfs_context_t a_context;
5006 };
5007 #endif /* 0*/
5008 errno_t
VNOP_MKDIR(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)5009 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
5010     struct vnode_attr *vap, vfs_context_t ctx)
5011 {
5012 	int _err;
5013 	struct vnop_mkdir_args a;
5014 
5015 	a.a_desc = &vnop_mkdir_desc;
5016 	a.a_dvp = dvp;
5017 	a.a_vpp = vpp;
5018 	a.a_cnp = cnp;
5019 	a.a_vap = vap;
5020 	a.a_context = ctx;
5021 
5022 	_err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
5023 	if (_err == 0 && *vpp) {
5024 		DTRACE_FSINFO(mkdir, vnode_t, *vpp);
5025 	}
5026 #if CONFIG_APPLEDOUBLE
5027 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
5028 		/*
5029 		 * Remove stale Apple Double file (if any).
5030 		 */
5031 		xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
5032 	}
5033 #endif /* CONFIG_APPLEDOUBLE */
5034 
5035 	post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5036 
5037 	return _err;
5038 }
5039 
5040 int
VNOP_COMPOUND_MKDIR(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)5041 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
5042     struct vnode_attr *vap, vfs_context_t ctx)
5043 {
5044 	int _err;
5045 	struct vnop_compound_mkdir_args a;
5046 
5047 	a.a_desc = &vnop_compound_mkdir_desc;
5048 	a.a_dvp = dvp;
5049 	a.a_vpp = vpp;
5050 	a.a_cnp = &ndp->ni_cnd;
5051 	a.a_vap = vap;
5052 	a.a_flags = 0;
5053 	a.a_context = ctx;
5054 #if 0
5055 	a.a_mkdir_authorizer = vn_authorize_mkdir;
5056 #endif /* 0 */
5057 	a.a_reserved = NULL;
5058 
5059 	_err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
5060 	if (_err == 0 && *vpp) {
5061 		DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
5062 	}
5063 #if CONFIG_APPLEDOUBLE
5064 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
5065 		/*
5066 		 * Remove stale Apple Double file (if any).
5067 		 */
5068 		xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
5069 	}
5070 #endif /* CONFIG_APPLEDOUBLE */
5071 
5072 	post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5073 
5074 	lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
5075 	if (*vpp && _err && _err != EKEEPLOOKING) {
5076 		vnode_put(*vpp);
5077 		*vpp = NULLVP;
5078 	}
5079 
5080 	return _err;
5081 }
5082 
5083 int
vn_rmdir(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)5084 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
5085 {
5086 	if (vnode_compound_rmdir_available(dvp)) {
5087 		return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
5088 	} else {
5089 		if (*vpp == NULLVP) {
5090 			panic("NULL vp, but not a compound VNOP?");
5091 		}
5092 		if (vap != NULL) {
5093 			panic("Non-NULL vap, but not a compound VNOP?");
5094 		}
5095 		return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
5096 	}
5097 }
5098 
5099 #if 0
5100 /*
5101 *#
5102 *#% rmdir        dvp     L U U
5103 *#% rmdir        vp      L U U
5104 *#
5105 */
5106 struct vnop_rmdir_args {
5107 	struct vnodeop_desc *a_desc;
5108 	vnode_t a_dvp;
5109 	vnode_t a_vp;
5110 	struct componentname *a_cnp;
5111 	vfs_context_t a_context;
5112 };
5113 
5114 #endif /* 0*/
5115 errno_t
VNOP_RMDIR(struct vnode * dvp,struct vnode * vp,struct componentname * cnp,vfs_context_t ctx)5116 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
5117 {
5118 	int _err;
5119 	struct vnop_rmdir_args a;
5120 
5121 	a.a_desc = &vnop_rmdir_desc;
5122 	a.a_dvp = dvp;
5123 	a.a_vp = vp;
5124 	a.a_cnp = cnp;
5125 	a.a_context = ctx;
5126 
5127 	_err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
5128 	DTRACE_FSINFO(rmdir, vnode_t, vp);
5129 
5130 	if (_err == 0) {
5131 		vnode_setneedinactive(vp);
5132 #if CONFIG_APPLEDOUBLE
5133 		if (!(NATIVE_XATTR(dvp))) {
5134 			/*
5135 			 * Remove any associated extended attribute file (._ AppleDouble file).
5136 			 */
5137 			xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
5138 		}
5139 #endif
5140 	}
5141 
5142 	/* If you delete a dir, it loses its "." reference --> NOTE_LINK */
5143 	post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
5144 	post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5145 
5146 	return _err;
5147 }
5148 
5149 int
VNOP_COMPOUND_RMDIR(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)5150 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
5151     struct vnode_attr *vap, vfs_context_t ctx)
5152 {
5153 	int _err;
5154 	struct vnop_compound_rmdir_args a;
5155 	int no_vp;
5156 
5157 	a.a_desc = &vnop_mkdir_desc;
5158 	a.a_dvp = dvp;
5159 	a.a_vpp = vpp;
5160 	a.a_cnp = &ndp->ni_cnd;
5161 	a.a_vap = vap;
5162 	a.a_flags = 0;
5163 	a.a_context = ctx;
5164 	a.a_rmdir_authorizer = vn_authorize_rmdir;
5165 	a.a_reserved = NULL;
5166 
5167 	no_vp = (*vpp == NULLVP);
5168 
5169 	_err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
5170 	if (_err == 0 && *vpp) {
5171 		DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
5172 	}
5173 #if CONFIG_APPLEDOUBLE
5174 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
5175 		/*
5176 		 * Remove stale Apple Double file (if any).
5177 		 */
5178 		xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
5179 	}
5180 #endif
5181 
5182 	if (*vpp) {
5183 		post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
5184 	}
5185 	post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5186 
5187 	if (no_vp) {
5188 		lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
5189 
5190 #if 0 /* Removing orphaned ._ files requires a vp.... */
5191 		if (*vpp && _err && _err != EKEEPLOOKING) {
5192 			vnode_put(*vpp);
5193 			*vpp = NULLVP;
5194 		}
5195 #endif  /* 0 */
5196 	}
5197 
5198 	return _err;
5199 }
5200 
5201 #if CONFIG_APPLEDOUBLE
5202 /*
5203  * Remove a ._ AppleDouble file
5204  */
5205 #define AD_STALE_SECS  (180)
5206 static void
xattrfile_remove(vnode_t dvp,const char * basename,vfs_context_t ctx,int force)5207 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
5208 {
5209 	vnode_t xvp;
5210 	struct nameidata nd;
5211 	char smallname[64];
5212 	char *filename = NULL;
5213 	size_t alloc_len;
5214 	size_t copy_len;
5215 
5216 	if ((basename == NULL) || (basename[0] == '\0') ||
5217 	    (basename[0] == '.' && basename[1] == '_')) {
5218 		return;
5219 	}
5220 	filename = &smallname[0];
5221 	alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
5222 	if (alloc_len >= sizeof(smallname)) {
5223 		alloc_len++;  /* snprintf result doesn't include '\0' */
5224 		filename = kalloc_data(alloc_len, Z_WAITOK);
5225 		copy_len = snprintf(filename, alloc_len, "._%s", basename);
5226 	}
5227 	NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
5228 	    CAST_USER_ADDR_T(filename), ctx);
5229 	nd.ni_dvp = dvp;
5230 	if (namei(&nd) != 0) {
5231 		goto out2;
5232 	}
5233 
5234 	xvp = nd.ni_vp;
5235 	dvp = nd.ni_dvp;
5236 	nameidone(&nd);
5237 	if (xvp->v_type != VREG) {
5238 		goto out1;
5239 	}
5240 
5241 	/*
5242 	 * When creating a new object and a "._" file already
5243 	 * exists, check to see if it's a stale "._" file. These are
5244 	 * typically AppleDouble (AD) files generated via XNU's
5245 	 * VFS compatibility shims for storing XATTRs and streams
5246 	 * on filesystems that do not support them natively.
5247 	 */
5248 	if (!force) {
5249 		struct vnode_attr va;
5250 
5251 		VATTR_INIT(&va);
5252 		VATTR_WANTED(&va, va_data_size);
5253 		VATTR_WANTED(&va, va_modify_time);
5254 		VATTR_WANTED(&va, va_change_time);
5255 
5256 		if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
5257 		    VATTR_IS_SUPPORTED(&va, va_data_size) &&
5258 		    va.va_data_size != 0) {
5259 			struct timeval tv_compare = {};
5260 			struct timeval tv_now = {};
5261 
5262 			/*
5263 			 * If the file exists (and has non-zero size), then use the newer of
5264 			 * chgtime / modtime to compare against present time. Note that setting XATTRs or updating
5265 			 * streams through the compatibility interfaces may not trigger chgtime to be updated, so
5266 			 * checking either modtime or chgtime is useful.
5267 			 */
5268 			if (VATTR_IS_SUPPORTED(&va, va_modify_time) && (va.va_modify_time.tv_sec)) {
5269 				if (VATTR_IS_SUPPORTED(&va, va_change_time) && (va.va_change_time.tv_sec)) {
5270 					tv_compare.tv_sec = va.va_change_time.tv_sec;
5271 					if (tv_compare.tv_sec < va.va_modify_time.tv_sec) {
5272 						tv_compare.tv_sec = va.va_modify_time.tv_sec;
5273 					}
5274 				} else {
5275 					/* fall back to mod-time alone if chgtime not supported or set to 0 */
5276 					tv_compare.tv_sec = va.va_modify_time.tv_sec;
5277 				}
5278 			}
5279 
5280 			/* Now, we have a time to compare against, compare against AD_STALE_SEC */
5281 			microtime(&tv_now);
5282 			if ((tv_compare.tv_sec > 0) &&
5283 			    (tv_now.tv_sec > tv_compare.tv_sec) &&
5284 			    ((tv_now.tv_sec - tv_compare.tv_sec) > AD_STALE_SECS)) {
5285 				force = 1;  /* must be stale */
5286 			}
5287 		}
5288 	}
5289 
5290 	if (force) {
5291 		int error;
5292 
5293 		error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
5294 		if (error == 0) {
5295 			vnode_setneedinactive(xvp);
5296 		}
5297 
5298 		post_event_if_success(xvp, error, NOTE_DELETE);
5299 		post_event_if_success(dvp, error, NOTE_WRITE);
5300 	}
5301 
5302 out1:
5303 	vnode_put(dvp);
5304 	vnode_put(xvp);
5305 out2:
5306 	if (filename && filename != &smallname[0]) {
5307 		kfree_data(filename, alloc_len);
5308 	}
5309 }
5310 
5311 /*
5312  * Shadow uid/gid/mod to a ._ AppleDouble file
5313  */
5314 __attribute__((noinline))
5315 static void
xattrfile_setattr(vnode_t dvp,const char * basename,struct vnode_attr * vap,vfs_context_t ctx)5316 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
5317     vfs_context_t ctx)
5318 {
5319 	vnode_t xvp;
5320 	struct nameidata nd;
5321 	char smallname[64];
5322 	char *filename = NULL;
5323 	size_t alloc_len;
5324 	size_t copy_len;
5325 
5326 	if ((dvp == NULLVP) ||
5327 	    (basename == NULL) || (basename[0] == '\0') ||
5328 	    (basename[0] == '.' && basename[1] == '_')) {
5329 		return;
5330 	}
5331 	filename = &smallname[0];
5332 	alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
5333 	if (alloc_len >= sizeof(smallname)) {
5334 		alloc_len++;  /* snprintf result doesn't include '\0' */
5335 		filename = kalloc_data(alloc_len, Z_WAITOK);
5336 		copy_len = snprintf(filename, alloc_len, "._%s", basename);
5337 	}
5338 	NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
5339 	    CAST_USER_ADDR_T(filename), ctx);
5340 	nd.ni_dvp = dvp;
5341 	if (namei(&nd) != 0) {
5342 		goto out2;
5343 	}
5344 
5345 	xvp = nd.ni_vp;
5346 	nameidone(&nd);
5347 
5348 	if (xvp->v_type == VREG) {
5349 		struct vnop_setattr_args a;
5350 
5351 		a.a_desc = &vnop_setattr_desc;
5352 		a.a_vp = xvp;
5353 		a.a_vap = vap;
5354 		a.a_context = ctx;
5355 
5356 		(void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
5357 	}
5358 
5359 	vnode_put(xvp);
5360 out2:
5361 	if (filename && filename != &smallname[0]) {
5362 		kfree_data(filename, alloc_len);
5363 	}
5364 }
5365 #endif /* CONFIG_APPLEDOUBLE */
5366 
5367  #if 0
5368 /*
5369 *#
5370 *#% symlink      dvp     L U U
5371 *#% symlink      vpp     - U -
5372 *#
5373 */
5374 struct vnop_symlink_args {
5375 	struct vnodeop_desc *a_desc;
5376 	vnode_t a_dvp;
5377 	vnode_t *a_vpp;
5378 	struct componentname *a_cnp;
5379 	struct vnode_attr *a_vap;
5380 	char *a_target;
5381 	vfs_context_t a_context;
5382 };
5383 
5384 #endif /* 0*/
5385 errno_t
VNOP_SYMLINK(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct vnode_attr * vap,char * target,vfs_context_t ctx)5386 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
5387     struct vnode_attr *vap, char *target, vfs_context_t ctx)
5388 {
5389 	int _err;
5390 	struct vnop_symlink_args a;
5391 
5392 	a.a_desc = &vnop_symlink_desc;
5393 	a.a_dvp = dvp;
5394 	a.a_vpp = vpp;
5395 	a.a_cnp = cnp;
5396 	a.a_vap = vap;
5397 	a.a_target = target;
5398 	a.a_context = ctx;
5399 
5400 	_err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
5401 	DTRACE_FSINFO(symlink, vnode_t, dvp);
5402 #if CONFIG_APPLEDOUBLE
5403 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
5404 		/*
5405 		 * Remove stale Apple Double file (if any).  Posts its own knotes
5406 		 */
5407 		xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
5408 	}
5409 #endif /* CONFIG_APPLEDOUBLE */
5410 
5411 	post_event_if_success(dvp, _err, NOTE_WRITE);
5412 
5413 	return _err;
5414 }
5415 
5416 #if 0
5417 /*
5418 *#
5419 *#% readdir      vp      L L L
5420 *#
5421 */
5422 struct vnop_readdir_args {
5423 	struct vnodeop_desc *a_desc;
5424 	vnode_t a_vp;
5425 	struct uio *a_uio;
5426 	int a_flags;
5427 	int *a_eofflag;
5428 	int *a_numdirent;
5429 	vfs_context_t a_context;
5430 };
5431 
5432 #endif /* 0*/
5433 errno_t
VNOP_READDIR(struct vnode * vp,struct uio * uio,int flags,int * eofflag,int * numdirent,vfs_context_t ctx)5434 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
5435     int *numdirent, vfs_context_t ctx)
5436 {
5437 	int _err;
5438 	struct vnop_readdir_args a;
5439 #if CONFIG_DTRACE
5440 	user_ssize_t resid = uio_resid(uio);
5441 #endif
5442 
5443 	a.a_desc = &vnop_readdir_desc;
5444 	a.a_vp = vp;
5445 	a.a_uio = uio;
5446 	a.a_flags = flags;
5447 	a.a_eofflag = eofflag;
5448 	a.a_numdirent = numdirent;
5449 	a.a_context = ctx;
5450 
5451 	_err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
5452 	DTRACE_FSINFO_IO(readdir,
5453 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5454 
5455 	return _err;
5456 }
5457 
5458 #if 0
5459 /*
5460 *#
5461 *#% readdirattr  vp      L L L
5462 *#
5463 */
5464 struct vnop_readdirattr_args {
5465 	struct vnodeop_desc *a_desc;
5466 	vnode_t a_vp;
5467 	struct attrlist *a_alist;
5468 	struct uio *a_uio;
5469 	uint32_t a_maxcount;
5470 	uint32_t a_options;
5471 	uint32_t *a_newstate;
5472 	int *a_eofflag;
5473 	uint32_t *a_actualcount;
5474 	vfs_context_t a_context;
5475 };
5476 
5477 #endif /* 0*/
5478 errno_t
VNOP_READDIRATTR(struct vnode * vp,struct attrlist * alist,struct uio * uio,uint32_t maxcount,uint32_t options,uint32_t * newstate,int * eofflag,uint32_t * actualcount,vfs_context_t ctx)5479 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
5480     uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
5481 {
5482 	int _err;
5483 	struct vnop_readdirattr_args a;
5484 #if CONFIG_DTRACE
5485 	user_ssize_t resid = uio_resid(uio);
5486 #endif
5487 
5488 	a.a_desc = &vnop_readdirattr_desc;
5489 	a.a_vp = vp;
5490 	a.a_alist = alist;
5491 	a.a_uio = uio;
5492 	a.a_maxcount = maxcount;
5493 	a.a_options = options;
5494 	a.a_newstate = newstate;
5495 	a.a_eofflag = eofflag;
5496 	a.a_actualcount = actualcount;
5497 	a.a_context = ctx;
5498 
5499 	_err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
5500 	DTRACE_FSINFO_IO(readdirattr,
5501 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5502 
5503 	return _err;
5504 }
5505 
5506 #if 0
5507 struct vnop_getttrlistbulk_args {
5508 	struct vnodeop_desc *a_desc;
5509 	vnode_t a_vp;
5510 	struct attrlist *a_alist;
5511 	struct vnode_attr *a_vap;
5512 	struct uio *a_uio;
5513 	void *a_private
5514 	uint64_t a_options;
5515 	int *a_eofflag;
5516 	uint32_t *a_actualcount;
5517 	vfs_context_t a_context;
5518 };
5519 #endif /* 0*/
5520 errno_t
VNOP_GETATTRLISTBULK(struct vnode * vp,struct attrlist * alist,struct vnode_attr * vap,struct uio * uio,void * private,uint64_t options,int32_t * eofflag,int32_t * actualcount,vfs_context_t ctx)5521 VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist,
5522     struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options,
5523     int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx)
5524 {
5525 	int _err;
5526 	struct vnop_getattrlistbulk_args a;
5527 #if CONFIG_DTRACE
5528 	user_ssize_t resid = uio_resid(uio);
5529 #endif
5530 
5531 	a.a_desc = &vnop_getattrlistbulk_desc;
5532 	a.a_vp = vp;
5533 	a.a_alist = alist;
5534 	a.a_vap = vap;
5535 	a.a_uio = uio;
5536 	a.a_private = private;
5537 	a.a_options = options;
5538 	a.a_eofflag = eofflag;
5539 	a.a_actualcount = actualcount;
5540 	a.a_context = ctx;
5541 
5542 	_err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a);
5543 	DTRACE_FSINFO_IO(getattrlistbulk,
5544 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5545 
5546 	return _err;
5547 }
5548 
5549 #if 0
5550 /*
5551 *#
5552 *#% readlink     vp      L L L
5553 *#
5554 */
5555 struct vnop_readlink_args {
5556 	struct vnodeop_desc *a_desc;
5557 	vnode_t a_vp;
5558 	struct uio *a_uio;
5559 	vfs_context_t a_context;
5560 };
5561 #endif /* 0 */
5562 
5563 /*
5564  * Returns:	0			Success
5565  *		lock_fsnode:ENOENT	No such file or directory [only for VFS
5566  *					 that is not thread safe & vnode is
5567  *					 currently being/has been terminated]
5568  *		<vfs_readlink>:EINVAL
5569  *		<vfs_readlink>:???
5570  *
5571  * Note:	The return codes from the underlying VFS's readlink routine
5572  *		can't be fully enumerated here, since third party VFS authors
5573  *		may not limit their error returns to the ones documented here,
5574  *		even though this may result in some programs functioning
5575  *		incorrectly.
5576  *
5577  *		The return codes documented above are those which may currently
5578  *		be returned by HFS from hfs_vnop_readlink, not including
5579  *		additional error code which may be propagated from underlying
5580  *		routines.
5581  */
5582 errno_t
VNOP_READLINK(struct vnode * vp,struct uio * uio,vfs_context_t ctx)5583 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
5584 {
5585 	int _err;
5586 	struct vnop_readlink_args a;
5587 #if CONFIG_DTRACE
5588 	user_ssize_t resid = uio_resid(uio);
5589 #endif
5590 	a.a_desc = &vnop_readlink_desc;
5591 	a.a_vp = vp;
5592 	a.a_uio = uio;
5593 	a.a_context = ctx;
5594 
5595 	_err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
5596 	DTRACE_FSINFO_IO(readlink,
5597 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5598 
5599 	return _err;
5600 }
5601 
5602 #if 0
5603 /*
5604 *#
5605 *#% inactive     vp      L U U
5606 *#
5607 */
5608 struct vnop_inactive_args {
5609 	struct vnodeop_desc *a_desc;
5610 	vnode_t a_vp;
5611 	vfs_context_t a_context;
5612 };
5613 #endif /* 0*/
5614 errno_t
VNOP_INACTIVE(struct vnode * vp,vfs_context_t ctx)5615 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
5616 {
5617 	int _err;
5618 	struct vnop_inactive_args a;
5619 
5620 	a.a_desc = &vnop_inactive_desc;
5621 	a.a_vp = vp;
5622 	a.a_context = ctx;
5623 
5624 	_err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
5625 	DTRACE_FSINFO(inactive, vnode_t, vp);
5626 
5627 #if NAMEDSTREAMS
5628 	/* For file systems that do not support namedstream natively, mark
5629 	 * the shadow stream file vnode to be recycled as soon as the last
5630 	 * reference goes away.  To avoid re-entering reclaim code, do not
5631 	 * call recycle on terminating namedstream vnodes.
5632 	 */
5633 	if (vnode_isnamedstream(vp) &&
5634 	    (vp->v_parent != NULLVP) &&
5635 	    vnode_isshadow(vp) &&
5636 	    ((vp->v_lflag & VL_TERMINATE) == 0)) {
5637 		vnode_recycle(vp);
5638 	}
5639 #endif
5640 
5641 	return _err;
5642 }
5643 
5644 
5645 #if 0
5646 /*
5647 *#
5648 *#% reclaim      vp      U U U
5649 *#
5650 */
5651 struct vnop_reclaim_args {
5652 	struct vnodeop_desc *a_desc;
5653 	vnode_t a_vp;
5654 	vfs_context_t a_context;
5655 };
5656 #endif /* 0*/
5657 errno_t
VNOP_RECLAIM(struct vnode * vp,vfs_context_t ctx)5658 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
5659 {
5660 	int _err;
5661 	struct vnop_reclaim_args a;
5662 
5663 	a.a_desc = &vnop_reclaim_desc;
5664 	a.a_vp = vp;
5665 	a.a_context = ctx;
5666 
5667 	_err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
5668 	DTRACE_FSINFO(reclaim, vnode_t, vp);
5669 
5670 	return _err;
5671 }
5672 
5673 
5674 /*
5675  * Returns:	0			Success
5676  *	lock_fsnode:ENOENT		No such file or directory [only for VFS
5677  *					 that is not thread safe & vnode is
5678  *					 currently being/has been terminated]
5679  *	<vnop_pathconf_desc>:???	[per FS implementation specific]
5680  */
5681 #if 0
5682 /*
5683 *#
5684 *#% pathconf     vp      L L L
5685 *#
5686 */
5687 struct vnop_pathconf_args {
5688 	struct vnodeop_desc *a_desc;
5689 	vnode_t a_vp;
5690 	int a_name;
5691 	int32_t *a_retval;
5692 	vfs_context_t a_context;
5693 };
5694 #endif /* 0*/
5695 errno_t
VNOP_PATHCONF(struct vnode * vp,int name,int32_t * retval,vfs_context_t ctx)5696 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
5697 {
5698 	int _err;
5699 	struct vnop_pathconf_args a;
5700 
5701 	a.a_desc = &vnop_pathconf_desc;
5702 	a.a_vp = vp;
5703 	a.a_name = name;
5704 	a.a_retval = retval;
5705 	a.a_context = ctx;
5706 
5707 	_err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
5708 	DTRACE_FSINFO(pathconf, vnode_t, vp);
5709 
5710 	return _err;
5711 }
5712 
5713 /*
5714  * Returns:	0			Success
5715  *	err_advlock:ENOTSUP
5716  *	lf_advlock:???
5717  *	<vnop_advlock_desc>:???
5718  *
5719  * Notes:	VFS implementations of advisory locking using calls through
5720  *		<vnop_advlock_desc> because lock enforcement does not occur
5721  *		locally should try to limit themselves to the return codes
5722  *		documented above for lf_advlock and err_advlock.
5723  */
5724 #if 0
5725 /*
5726 *#
5727 *#% advlock      vp      U U U
5728 *#
5729 */
5730 struct vnop_advlock_args {
5731 	struct vnodeop_desc *a_desc;
5732 	vnode_t a_vp;
5733 	caddr_t a_id;
5734 	int a_op;
5735 	struct flock *a_fl;
5736 	int a_flags;
5737 	vfs_context_t a_context;
5738 };
5739 #endif /* 0*/
5740 errno_t
VNOP_ADVLOCK(struct vnode * vp,caddr_t id,int op,struct flock * fl,int flags,vfs_context_t ctx,struct timespec * timeout)5741 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
5742 {
5743 	int _err;
5744 	struct vnop_advlock_args a;
5745 
5746 	a.a_desc = &vnop_advlock_desc;
5747 	a.a_vp = vp;
5748 	a.a_id = id;
5749 	a.a_op = op;
5750 	a.a_fl = fl;
5751 	a.a_flags = flags;
5752 	a.a_context = ctx;
5753 	a.a_timeout = timeout;
5754 
5755 	/* Disallow advisory locking on non-seekable vnodes */
5756 	if (vnode_isfifo(vp)) {
5757 		_err = err_advlock(&a);
5758 	} else {
5759 		if ((vp->v_flag & VLOCKLOCAL)) {
5760 			/* Advisory locking done at this layer */
5761 			_err = lf_advlock(&a);
5762 		} else if (flags & F_OFD_LOCK) {
5763 			/* Non-local locking doesn't work for OFD locks */
5764 			_err = err_advlock(&a);
5765 		} else if (op == F_TRANSFER) {
5766 			/* Non-local locking doesn't have F_TRANSFER */
5767 			_err = err_advlock(&a);
5768 		} else {
5769 			/* Advisory locking done by underlying filesystem */
5770 			_err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
5771 		}
5772 		DTRACE_FSINFO(advlock, vnode_t, vp);
5773 		if (op == F_UNLCK &&
5774 		    (flags & (F_FLOCK | F_OFD_LOCK)) != 0) {
5775 			post_event_if_success(vp, _err, NOTE_FUNLOCK);
5776 		}
5777 	}
5778 
5779 	return _err;
5780 }
5781 
5782 
5783 
5784 #if 0
5785 /*
5786 *#
5787 *#% allocate     vp      L L L
5788 *#
5789 */
5790 struct vnop_allocate_args {
5791 	struct vnodeop_desc *a_desc;
5792 	vnode_t a_vp;
5793 	off_t a_length;
5794 	u_int32_t a_flags;
5795 	off_t *a_bytesallocated;
5796 	off_t a_offset;
5797 	vfs_context_t a_context;
5798 };
5799 
5800 #endif /* 0*/
5801 errno_t
VNOP_ALLOCATE(struct vnode * vp,off_t length,u_int32_t flags,off_t * bytesallocated,off_t offset,vfs_context_t ctx)5802 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
5803 {
5804 	int _err;
5805 	struct vnop_allocate_args a;
5806 
5807 	a.a_desc = &vnop_allocate_desc;
5808 	a.a_vp = vp;
5809 	a.a_length = length;
5810 	a.a_flags = flags;
5811 	a.a_bytesallocated = bytesallocated;
5812 	a.a_offset = offset;
5813 	a.a_context = ctx;
5814 
5815 	_err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
5816 	DTRACE_FSINFO(allocate, vnode_t, vp);
5817 #if CONFIG_FSE
5818 	if (_err == 0) {
5819 		add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
5820 	}
5821 #endif
5822 
5823 	return _err;
5824 }
5825 
5826 #if 0
5827 /*
5828 *#
5829 *#% pagein       vp      = = =
5830 *#
5831 */
5832 struct vnop_pagein_args {
5833 	struct vnodeop_desc *a_desc;
5834 	vnode_t a_vp;
5835 	upl_t a_pl;
5836 	upl_offset_t a_pl_offset;
5837 	off_t a_f_offset;
5838 	size_t a_size;
5839 	int a_flags;
5840 	vfs_context_t a_context;
5841 };
5842 #endif /* 0*/
5843 errno_t
VNOP_PAGEIN(struct vnode * vp,upl_t pl,upl_offset_t pl_offset,off_t f_offset,size_t size,int flags,vfs_context_t ctx)5844 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5845 {
5846 	int _err;
5847 	struct vnop_pagein_args a;
5848 
5849 	a.a_desc = &vnop_pagein_desc;
5850 	a.a_vp = vp;
5851 	a.a_pl = pl;
5852 	a.a_pl_offset = pl_offset;
5853 	a.a_f_offset = f_offset;
5854 	a.a_size = size;
5855 	a.a_flags = flags;
5856 	a.a_context = ctx;
5857 
5858 	_err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
5859 	DTRACE_FSINFO(pagein, vnode_t, vp);
5860 
5861 	return _err;
5862 }
5863 
5864 #if 0
5865 /*
5866 *#
5867 *#% pageout      vp      = = =
5868 *#
5869 */
5870 struct vnop_pageout_args {
5871 	struct vnodeop_desc *a_desc;
5872 	vnode_t a_vp;
5873 	upl_t a_pl;
5874 	upl_offset_t a_pl_offset;
5875 	off_t a_f_offset;
5876 	size_t a_size;
5877 	int a_flags;
5878 	vfs_context_t a_context;
5879 };
5880 
5881 #endif /* 0*/
5882 errno_t
VNOP_PAGEOUT(struct vnode * vp,upl_t pl,upl_offset_t pl_offset,off_t f_offset,size_t size,int flags,vfs_context_t ctx)5883 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5884 {
5885 	int _err;
5886 	struct vnop_pageout_args a;
5887 
5888 	a.a_desc = &vnop_pageout_desc;
5889 	a.a_vp = vp;
5890 	a.a_pl = pl;
5891 	a.a_pl_offset = pl_offset;
5892 	a.a_f_offset = f_offset;
5893 	a.a_size = size;
5894 	a.a_flags = flags;
5895 	a.a_context = ctx;
5896 
5897 	_err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
5898 	DTRACE_FSINFO(pageout, vnode_t, vp);
5899 
5900 	post_event_if_success(vp, _err, NOTE_WRITE);
5901 
5902 	return _err;
5903 }
5904 
5905 int
vn_remove(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,struct vnode_attr * vap,vfs_context_t ctx)5906 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5907 {
5908 	if (vnode_compound_remove_available(dvp)) {
5909 		return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5910 	} else {
5911 		return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5912 	}
5913 }
5914 
5915 #if CONFIG_SEARCHFS
5916 
5917 #if 0
5918 /*
5919 *#
5920 *#% searchfs     vp      L L L
5921 *#
5922 */
5923 struct vnop_searchfs_args {
5924 	struct vnodeop_desc *a_desc;
5925 	vnode_t a_vp;
5926 	void *a_searchparams1;
5927 	void *a_searchparams2;
5928 	struct attrlist *a_searchattrs;
5929 	uint32_t a_maxmatches;
5930 	struct timeval *a_timelimit;
5931 	struct attrlist *a_returnattrs;
5932 	uint32_t *a_nummatches;
5933 	uint32_t a_scriptcode;
5934 	uint32_t a_options;
5935 	struct uio *a_uio;
5936 	struct searchstate *a_searchstate;
5937 	vfs_context_t a_context;
5938 };
5939 
5940 #endif /* 0*/
5941 errno_t
VNOP_SEARCHFS(struct vnode * vp,void * searchparams1,void * searchparams2,struct attrlist * searchattrs,uint32_t maxmatches,struct timeval * timelimit,struct attrlist * returnattrs,uint32_t * nummatches,uint32_t scriptcode,uint32_t options,struct uio * uio,struct searchstate * searchstate,vfs_context_t ctx)5942 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
5943 {
5944 	int _err;
5945 	struct vnop_searchfs_args a;
5946 
5947 	a.a_desc = &vnop_searchfs_desc;
5948 	a.a_vp = vp;
5949 	a.a_searchparams1 = searchparams1;
5950 	a.a_searchparams2 = searchparams2;
5951 	a.a_searchattrs = searchattrs;
5952 	a.a_maxmatches = maxmatches;
5953 	a.a_timelimit = timelimit;
5954 	a.a_returnattrs = returnattrs;
5955 	a.a_nummatches = nummatches;
5956 	a.a_scriptcode = scriptcode;
5957 	a.a_options = options;
5958 	a.a_uio = uio;
5959 	a.a_searchstate = searchstate;
5960 	a.a_context = ctx;
5961 
5962 	_err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5963 	DTRACE_FSINFO(searchfs, vnode_t, vp);
5964 
5965 	return _err;
5966 }
5967 #endif /* CONFIG_SEARCHFS */
5968 
5969 #if 0
5970 /*
5971 *#
5972 *#% copyfile fvp U U U
5973 *#% copyfile tdvp L U U
5974 *#% copyfile tvp X U U
5975 *#
5976 */
5977 struct vnop_copyfile_args {
5978 	struct vnodeop_desc *a_desc;
5979 	vnode_t a_fvp;
5980 	vnode_t a_tdvp;
5981 	vnode_t a_tvp;
5982 	struct componentname *a_tcnp;
5983 	int a_mode;
5984 	int a_flags;
5985 	vfs_context_t a_context;
5986 };
5987 #endif /* 0*/
5988 errno_t
VNOP_COPYFILE(struct vnode * fvp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,int mode,int flags,vfs_context_t ctx)5989 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5990     int mode, int flags, vfs_context_t ctx)
5991 {
5992 	int _err;
5993 	struct vnop_copyfile_args a;
5994 	a.a_desc = &vnop_copyfile_desc;
5995 	a.a_fvp = fvp;
5996 	a.a_tdvp = tdvp;
5997 	a.a_tvp = tvp;
5998 	a.a_tcnp = tcnp;
5999 	a.a_mode = mode;
6000 	a.a_flags = flags;
6001 	a.a_context = ctx;
6002 	_err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
6003 	DTRACE_FSINFO(copyfile, vnode_t, fvp);
6004 	return _err;
6005 }
6006 
6007 #if 0
6008 struct vnop_clonefile_args {
6009 	struct vnodeop_desc *a_desc;
6010 	vnode_t a_fvp;
6011 	vnode_t a_dvp;
6012 	vnode_t *a_vpp;
6013 	struct componentname *a_cnp;
6014 	struct vnode_attr *a_vap;
6015 	uint32_t a_flags;
6016 	vfs_context_t a_context;
6017 	int (*a_dir_clone_authorizer)(  /* Authorization callback */
6018 		struct vnode_attr *vap,         /* attribute to be authorized */
6019 		kauth_action_t action,         /* action for which attribute is to be authorized */
6020 		struct vnode_attr *dvap,         /* target directory attributes */
6021 		vnode_t sdvp,         /* source directory vnode pointer (optional) */
6022 		mount_t mp,         /* mount point of filesystem */
6023 		dir_clone_authorizer_op_t vattr_op,         /* specific operation requested : setup, authorization or cleanup  */
6024 		uint32_t flags;         /* value passed in a_flags to the VNOP */
6025 		vfs_context_t ctx,                      /* As passed to VNOP */
6026 		void *reserved);                        /* Always NULL */
6027 	void *a_reserved;               /* Currently unused */
6028 };
6029 #endif /* 0 */
6030 
6031 errno_t
VNOP_CLONEFILE(vnode_t fvp,vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,uint32_t flags,vfs_context_t ctx)6032 VNOP_CLONEFILE(vnode_t fvp, vnode_t dvp, vnode_t *vpp,
6033     struct componentname *cnp, struct vnode_attr *vap, uint32_t flags,
6034     vfs_context_t ctx)
6035 {
6036 	int _err;
6037 	struct vnop_clonefile_args a;
6038 	a.a_desc = &vnop_clonefile_desc;
6039 	a.a_fvp = fvp;
6040 	a.a_dvp = dvp;
6041 	a.a_vpp = vpp;
6042 	a.a_cnp = cnp;
6043 	a.a_vap = vap;
6044 	a.a_flags = flags;
6045 	a.a_context = ctx;
6046 
6047 	if (vnode_vtype(fvp) == VDIR) {
6048 		a.a_dir_clone_authorizer = vnode_attr_authorize_dir_clone;
6049 	} else {
6050 		a.a_dir_clone_authorizer = NULL;
6051 	}
6052 
6053 	_err = (*dvp->v_op[vnop_clonefile_desc.vdesc_offset])(&a);
6054 
6055 	if (_err == 0 && *vpp) {
6056 		DTRACE_FSINFO(clonefile, vnode_t, *vpp);
6057 		if (kdebug_enable) {
6058 			kdebug_lookup(*vpp, cnp);
6059 		}
6060 	}
6061 
6062 	post_event_if_success(dvp, _err, NOTE_WRITE);
6063 
6064 	return _err;
6065 }
6066 
6067 errno_t
VNOP_GETXATTR(vnode_t vp,const char * name,uio_t uio,size_t * size,int options,vfs_context_t ctx)6068 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
6069 {
6070 	struct vnop_getxattr_args a;
6071 	int error;
6072 
6073 	a.a_desc = &vnop_getxattr_desc;
6074 	a.a_vp = vp;
6075 	a.a_name = name;
6076 	a.a_uio = uio;
6077 	a.a_size = size;
6078 	a.a_options = options;
6079 	a.a_context = ctx;
6080 
6081 	error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
6082 	DTRACE_FSINFO(getxattr, vnode_t, vp);
6083 
6084 	return error;
6085 }
6086 
6087 errno_t
VNOP_SETXATTR(vnode_t vp,const char * name,uio_t uio,int options,vfs_context_t ctx)6088 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
6089 {
6090 	struct vnop_setxattr_args a;
6091 	int error;
6092 
6093 	a.a_desc = &vnop_setxattr_desc;
6094 	a.a_vp = vp;
6095 	a.a_name = name;
6096 	a.a_uio = uio;
6097 	a.a_options = options;
6098 	a.a_context = ctx;
6099 
6100 	error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
6101 	DTRACE_FSINFO(setxattr, vnode_t, vp);
6102 
6103 	if (error == 0) {
6104 		vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
6105 	}
6106 
6107 	post_event_if_success(vp, error, NOTE_ATTRIB);
6108 
6109 	return error;
6110 }
6111 
6112 errno_t
VNOP_REMOVEXATTR(vnode_t vp,const char * name,int options,vfs_context_t ctx)6113 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
6114 {
6115 	struct vnop_removexattr_args a;
6116 	int error;
6117 
6118 	a.a_desc = &vnop_removexattr_desc;
6119 	a.a_vp = vp;
6120 	a.a_name = name;
6121 	a.a_options = options;
6122 	a.a_context = ctx;
6123 
6124 	error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
6125 	DTRACE_FSINFO(removexattr, vnode_t, vp);
6126 
6127 	post_event_if_success(vp, error, NOTE_ATTRIB);
6128 
6129 	return error;
6130 }
6131 
6132 errno_t
VNOP_LISTXATTR(vnode_t vp,uio_t uio,size_t * size,int options,vfs_context_t ctx)6133 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
6134 {
6135 	struct vnop_listxattr_args a;
6136 	int error;
6137 
6138 	a.a_desc = &vnop_listxattr_desc;
6139 	a.a_vp = vp;
6140 	a.a_uio = uio;
6141 	a.a_size = size;
6142 	a.a_options = options;
6143 	a.a_context = ctx;
6144 
6145 	error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
6146 	DTRACE_FSINFO(listxattr, vnode_t, vp);
6147 
6148 	return error;
6149 }
6150 
6151 
6152 #if 0
6153 /*
6154 *#
6155 *#% blktooff vp = = =
6156 *#
6157 */
6158 struct vnop_blktooff_args {
6159 	struct vnodeop_desc *a_desc;
6160 	vnode_t a_vp;
6161 	daddr64_t a_lblkno;
6162 	off_t *a_offset;
6163 };
6164 #endif /* 0*/
6165 errno_t
VNOP_BLKTOOFF(struct vnode * vp,daddr64_t lblkno,off_t * offset)6166 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
6167 {
6168 	int _err;
6169 	struct vnop_blktooff_args a;
6170 
6171 	a.a_desc = &vnop_blktooff_desc;
6172 	a.a_vp = vp;
6173 	a.a_lblkno = lblkno;
6174 	a.a_offset = offset;
6175 
6176 	_err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
6177 	DTRACE_FSINFO(blktooff, vnode_t, vp);
6178 
6179 	return _err;
6180 }
6181 
6182 #if 0
6183 /*
6184 *#
6185 *#% offtoblk vp = = =
6186 *#
6187 */
6188 struct vnop_offtoblk_args {
6189 	struct vnodeop_desc *a_desc;
6190 	vnode_t a_vp;
6191 	off_t a_offset;
6192 	daddr64_t *a_lblkno;
6193 };
6194 #endif /* 0*/
6195 errno_t
VNOP_OFFTOBLK(struct vnode * vp,off_t offset,daddr64_t * lblkno)6196 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
6197 {
6198 	int _err;
6199 	struct vnop_offtoblk_args a;
6200 
6201 	a.a_desc = &vnop_offtoblk_desc;
6202 	a.a_vp = vp;
6203 	a.a_offset = offset;
6204 	a.a_lblkno = lblkno;
6205 
6206 	_err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
6207 	DTRACE_FSINFO(offtoblk, vnode_t, vp);
6208 
6209 	return _err;
6210 }
6211 
6212 #if 0
6213 /*
6214 *#
6215 *#% ap vp L L L
6216 *#
6217 */
6218 struct vnop_verify_args {
6219 	struct vnodeop_desc *a_desc;
6220 	vnode_t a_vp;
6221 	off_t a_foffset;
6222 	char *a_buf;
6223 	size_t a_bufsize;
6224 	size_t *a_verifyblksize;
6225 	void **a_verify_ctxp;
6226 	int a_flags;
6227 	vfs_context_t a_context;
6228 	vnode_verifY_kind_t *a_verifykind;
6229 };
6230 #endif
6231 
6232 errno_t
VNOP_VERIFY(struct vnode * vp,off_t foffset,uint8_t * buf,size_t bufsize,size_t * verify_block_size,void ** verify_ctxp,vnode_verify_flags_t flags,vfs_context_t ctx,vnode_verify_kind_t * verify_kind)6233 VNOP_VERIFY(struct vnode *vp, off_t foffset, uint8_t *buf, size_t bufsize,
6234     size_t *verify_block_size, void **verify_ctxp, vnode_verify_flags_t flags,
6235     vfs_context_t ctx, vnode_verify_kind_t *verify_kind)
6236 {
6237 	int _err;
6238 	struct vnop_verify_args a;
6239 
6240 	assert(!(flags & VNODE_VERIFY_CONTEXT_ALLOC) || ((foffset >= 0) && bufsize));
6241 	assert(!(flags & (VNODE_VERIFY_CONTEXT_FREE | VNODE_VERIFY_WITH_CONTEXT)) || verify_ctxp);
6242 	assert(!(flags & (VNODE_VERIFY_PRECOMPUTED | VNODE_VERIFY_WITH_CONTEXT)) ||
6243 	    ((foffset >= 0) && buf && bufsize));
6244 
6245 	if (ctx == NULL) {
6246 		ctx = vfs_context_kernel();
6247 	}
6248 	a.a_desc = &vnop_verify_desc;
6249 	a.a_vp = vp;
6250 	a.a_foffset = foffset;
6251 	a.a_buf = buf;
6252 	a.a_bufsize = bufsize;
6253 	a.a_verifyblksize = verify_block_size;
6254 	a.a_flags = flags;
6255 	a.a_verify_ctxp = verify_ctxp;
6256 	a.a_context = ctx;
6257 	if (verify_kind != NULL) {
6258 		*verify_kind = VK_HASH_NONE;
6259 	}
6260 	a.a_verifykind = verify_kind;
6261 
6262 	_err = (*vp->v_op[vnop_verify_desc.vdesc_offset])(&a);
6263 	DTRACE_FSINFO(verify, vnode_t, vp);
6264 
6265 	/* It is not an error for a filesystem to not support this VNOP */
6266 	if (_err == ENOTSUP) {
6267 		if (!buf && verify_block_size) {
6268 			*verify_block_size = 0;
6269 		}
6270 
6271 		_err = 0;
6272 	}
6273 
6274 	return _err;
6275 }
6276 
6277 #if 0
6278 /*
6279 *#
6280 *#% blockmap vp L L L
6281 *#
6282 */
6283 struct vnop_blockmap_args {
6284 	struct vnodeop_desc *a_desc;
6285 	vnode_t a_vp;
6286 	off_t a_foffset;
6287 	size_t a_size;
6288 	daddr64_t *a_bpn;
6289 	size_t *a_run;
6290 	void *a_poff;
6291 	int a_flags;
6292 	vfs_context_t a_context;
6293 };
6294 #endif /* 0*/
6295 errno_t
VNOP_BLOCKMAP(struct vnode * vp,off_t foffset,size_t size,daddr64_t * bpn,size_t * run,void * poff,int flags,vfs_context_t ctx)6296 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
6297 {
6298 	int _err;
6299 	struct vnop_blockmap_args a;
6300 	size_t localrun = 0;
6301 
6302 	if (ctx == NULL) {
6303 		ctx = vfs_context_current();
6304 	}
6305 	a.a_desc = &vnop_blockmap_desc;
6306 	a.a_vp = vp;
6307 	a.a_foffset = foffset;
6308 	a.a_size = size;
6309 	a.a_bpn = bpn;
6310 	a.a_run = &localrun;
6311 	a.a_poff = poff;
6312 	a.a_flags = flags;
6313 	a.a_context = ctx;
6314 
6315 	_err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
6316 	DTRACE_FSINFO(blockmap, vnode_t, vp);
6317 
6318 	/*
6319 	 * We used a local variable to request information from the underlying
6320 	 * filesystem about the length of the I/O run in question.  If
6321 	 * we get malformed output from the filesystem, we cap it to the length
6322 	 * requested, at most.  Update 'run' on the way out.
6323 	 */
6324 	if (_err == 0) {
6325 		if (localrun > size) {
6326 			localrun = size;
6327 		}
6328 
6329 		if (run) {
6330 			*run = localrun;
6331 		}
6332 	}
6333 
6334 	return _err;
6335 }
6336 
6337 #if 0
6338 struct vnop_strategy_args {
6339 	struct vnodeop_desc *a_desc;
6340 	struct buf *a_bp;
6341 };
6342 
6343 #endif /* 0*/
6344 errno_t
VNOP_STRATEGY(struct buf * bp)6345 VNOP_STRATEGY(struct buf *bp)
6346 {
6347 	int _err;
6348 	struct vnop_strategy_args a;
6349 	vnode_t vp = buf_vnode(bp);
6350 	a.a_desc = &vnop_strategy_desc;
6351 	a.a_bp = bp;
6352 	_err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
6353 	DTRACE_FSINFO(strategy, vnode_t, vp);
6354 	return _err;
6355 }
6356 
6357 #if 0
6358 struct vnop_bwrite_args {
6359 	struct vnodeop_desc *a_desc;
6360 	buf_t a_bp;
6361 };
6362 #endif /* 0*/
6363 errno_t
VNOP_BWRITE(struct buf * bp)6364 VNOP_BWRITE(struct buf *bp)
6365 {
6366 	int _err;
6367 	struct vnop_bwrite_args a;
6368 	vnode_t vp = buf_vnode(bp);
6369 	a.a_desc = &vnop_bwrite_desc;
6370 	a.a_bp = bp;
6371 	_err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
6372 	DTRACE_FSINFO(bwrite, vnode_t, vp);
6373 	return _err;
6374 }
6375 
6376 #if 0
6377 struct vnop_kqfilt_add_args {
6378 	struct vnodeop_desc *a_desc;
6379 	struct vnode *a_vp;
6380 	struct knote *a_kn;
6381 	vfs_context_t a_context;
6382 };
6383 #endif
6384 errno_t
VNOP_KQFILT_ADD(struct vnode * vp,struct knote * kn,vfs_context_t ctx)6385 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
6386 {
6387 	int _err;
6388 	struct vnop_kqfilt_add_args a;
6389 
6390 	a.a_desc = VDESC(vnop_kqfilt_add);
6391 	a.a_vp = vp;
6392 	a.a_kn = kn;
6393 	a.a_context = ctx;
6394 
6395 	_err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
6396 	DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
6397 
6398 	return _err;
6399 }
6400 
6401 #if 0
6402 struct vnop_kqfilt_remove_args {
6403 	struct vnodeop_desc *a_desc;
6404 	struct vnode *a_vp;
6405 	uintptr_t a_ident;
6406 	vfs_context_t a_context;
6407 };
6408 #endif
6409 errno_t
VNOP_KQFILT_REMOVE(struct vnode * vp,uintptr_t ident,vfs_context_t ctx)6410 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
6411 {
6412 	int _err;
6413 	struct vnop_kqfilt_remove_args a;
6414 
6415 	a.a_desc = VDESC(vnop_kqfilt_remove);
6416 	a.a_vp = vp;
6417 	a.a_ident = ident;
6418 	a.a_context = ctx;
6419 
6420 	_err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
6421 	DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
6422 
6423 	return _err;
6424 }
6425 
6426 errno_t
VNOP_MONITOR(vnode_t vp,uint32_t events,uint32_t flags,void * handle,vfs_context_t ctx)6427 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
6428 {
6429 	int _err;
6430 	struct vnop_monitor_args a;
6431 
6432 	a.a_desc = VDESC(vnop_monitor);
6433 	a.a_vp = vp;
6434 	a.a_events = events;
6435 	a.a_flags = flags;
6436 	a.a_handle = handle;
6437 	a.a_context = ctx;
6438 
6439 	_err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
6440 	DTRACE_FSINFO(monitor, vnode_t, vp);
6441 
6442 	return _err;
6443 }
6444 
6445 #if 0
6446 struct vnop_setlabel_args {
6447 	struct vnodeop_desc *a_desc;
6448 	struct vnode *a_vp;
6449 	struct label *a_vl;
6450 	vfs_context_t a_context;
6451 };
6452 #endif
6453 errno_t
VNOP_SETLABEL(struct vnode * vp,struct label * label,vfs_context_t ctx)6454 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
6455 {
6456 	int _err;
6457 	struct vnop_setlabel_args a;
6458 
6459 	a.a_desc = VDESC(vnop_setlabel);
6460 	a.a_vp = vp;
6461 	a.a_vl = label;
6462 	a.a_context = ctx;
6463 
6464 	_err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
6465 	DTRACE_FSINFO(setlabel, vnode_t, vp);
6466 
6467 	return _err;
6468 }
6469 
6470 
6471 #if NAMEDSTREAMS
6472 /*
6473  * Get a named streamed
6474  */
6475 errno_t
VNOP_GETNAMEDSTREAM(vnode_t vp,vnode_t * svpp,const char * name,enum nsoperation operation,int flags,vfs_context_t ctx)6476 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
6477 {
6478 	int _err;
6479 	struct vnop_getnamedstream_args a;
6480 
6481 	a.a_desc = &vnop_getnamedstream_desc;
6482 	a.a_vp = vp;
6483 	a.a_svpp = svpp;
6484 	a.a_name = name;
6485 	a.a_operation = operation;
6486 	a.a_flags = flags;
6487 	a.a_context = ctx;
6488 
6489 	_err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
6490 	DTRACE_FSINFO(getnamedstream, vnode_t, vp);
6491 	return _err;
6492 }
6493 
6494 /*
6495  * Create a named streamed
6496  */
6497 errno_t
VNOP_MAKENAMEDSTREAM(vnode_t vp,vnode_t * svpp,const char * name,int flags,vfs_context_t ctx)6498 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
6499 {
6500 	int _err;
6501 	struct vnop_makenamedstream_args a;
6502 
6503 	a.a_desc = &vnop_makenamedstream_desc;
6504 	a.a_vp = vp;
6505 	a.a_svpp = svpp;
6506 	a.a_name = name;
6507 	a.a_flags = flags;
6508 	a.a_context = ctx;
6509 
6510 	_err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
6511 	DTRACE_FSINFO(makenamedstream, vnode_t, vp);
6512 	return _err;
6513 }
6514 
6515 
6516 /*
6517  * Remove a named streamed
6518  */
6519 errno_t
VNOP_REMOVENAMEDSTREAM(vnode_t vp,vnode_t svp,const char * name,int flags,vfs_context_t ctx)6520 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
6521 {
6522 	int _err;
6523 	struct vnop_removenamedstream_args a;
6524 
6525 	a.a_desc = &vnop_removenamedstream_desc;
6526 	a.a_vp = vp;
6527 	a.a_svp = svp;
6528 	a.a_name = name;
6529 	a.a_flags = flags;
6530 	a.a_context = ctx;
6531 
6532 	_err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
6533 	DTRACE_FSINFO(removenamedstream, vnode_t, vp);
6534 	return _err;
6535 }
6536 #endif
6537