1 /*
2 * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/disk.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf.h>
93 #include <sys/errno.h>
94 #include <kern/kalloc.h>
95 #include <sys/domain.h>
96 #include <sys/mbuf.h>
97 #include <sys/syslog.h>
98 #include <sys/ubc.h>
99 #include <sys/vm.h>
100 #include <sys/sysctl.h>
101 #include <sys/filedesc.h>
102 #include <sys/event.h>
103 #include <sys/fsevents.h>
104 #include <sys/user.h>
105 #include <sys/lockf.h>
106 #include <sys/xattr.h>
107 #include <sys/kdebug.h>
108
109 #include <kern/assert.h>
110 #include <kern/zalloc.h>
111 #include <kern/task.h>
112 #include <kern/policy_internal.h>
113
114 #include <libkern/OSByteOrder.h>
115
116 #include <miscfs/specfs/specdev.h>
117
118 #include <mach/mach_types.h>
119 #include <mach/memory_object_types.h>
120 #include <mach/task.h>
121
122 #if CONFIG_MACF
123 #include <security/mac_framework.h>
124 #endif
125
126 #if NULLFS
127 #include <miscfs/nullfs/nullfs.h>
128 #endif
129
130 #include <sys/sdt.h>
131
132 #define ESUCCESS 0
133 #undef mount_t
134 #undef vnode_t
135
136 #define COMPAT_ONLY
137
138 #define NATIVE_XATTR(VP) \
139 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
140
141 #if CONFIG_APPLEDOUBLE
142 static void xattrfile_remove(vnode_t dvp, const char *basename,
143 vfs_context_t ctx, int force);
144 static void xattrfile_setattr(vnode_t dvp, const char * basename,
145 struct vnode_attr * vap, vfs_context_t ctx);
146 #endif /* CONFIG_APPLEDOUBLE */
147
148 extern lck_rw_t rootvnode_rw_lock;
149
150 static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp);
151
152 KALLOC_TYPE_DEFINE(KT_VFS_CONTEXT, struct vfs_context, KT_PRIV_ACCT);
153
154 extern int fstypenumstart;
155 char vfs_typenum_arr[13];
156
157 LCK_GRP_DECLARE(typenum_arr_grp, "typenum array group");
158 LCK_MTX_DECLARE(vfs_typenum_mtx, &typenum_arr_grp);
159 /*
160 * vnode_setneedinactive
161 *
162 * Description: Indicate that when the last iocount on this vnode goes away,
163 * and the usecount is also zero, we should inform the filesystem
164 * via VNOP_INACTIVE.
165 *
166 * Parameters: vnode_t vnode to mark
167 *
168 * Returns: Nothing
169 *
170 * Notes: Notably used when we're deleting a file--we need not have a
171 * usecount, so VNOP_INACTIVE may not get called by anyone. We
172 * want it called when we drop our iocount.
173 */
174 void
vnode_setneedinactive(vnode_t vp)175 vnode_setneedinactive(vnode_t vp)
176 {
177 cache_purge(vp);
178
179 vnode_lock_spin(vp);
180 vp->v_lflag |= VL_NEEDINACTIVE;
181 vnode_unlock(vp);
182 }
183
184
185 /* ====================================================================== */
186 /* ************ EXTERNAL KERNEL APIS ********************************** */
187 /* ====================================================================== */
188
189 /*
190 * implementations of exported VFS operations
191 */
192 int
VFS_MOUNT(mount_t mp,vnode_t devvp,user_addr_t data,vfs_context_t ctx)193 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
194 {
195 int error;
196
197 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0)) {
198 return ENOTSUP;
199 }
200
201 if (vfs_context_is64bit(ctx)) {
202 if (vfs_64bitready(mp)) {
203 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
204 } else {
205 error = ENOTSUP;
206 }
207 } else {
208 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
209 }
210
211 return error;
212 }
213
214 int
VFS_START(mount_t mp,int flags,vfs_context_t ctx)215 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
216 {
217 int error;
218
219 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0)) {
220 return ENOTSUP;
221 }
222
223 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
224
225 return error;
226 }
227
228 int
VFS_UNMOUNT(mount_t mp,int flags,vfs_context_t ctx)229 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
230 {
231 int error;
232
233 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0)) {
234 return ENOTSUP;
235 }
236
237 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
238
239 return error;
240 }
241
242 /*
243 * Returns: 0 Success
244 * ENOTSUP Not supported
245 * <vfs_root>:ENOENT
246 * <vfs_root>:???
247 *
248 * Note: The return codes from the underlying VFS's root routine can't
249 * be fully enumerated here, since third party VFS authors may not
250 * limit their error returns to the ones documented here, even
251 * though this may result in some programs functioning incorrectly.
252 *
253 * The return codes documented above are those which may currently
254 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
255 * for a call to hfs_vget on the volume mount point, not including
256 * additional error codes which may be propagated from underlying
257 * routines called by hfs_vget.
258 */
259 int
VFS_ROOT(mount_t mp,struct vnode ** vpp,vfs_context_t ctx)260 VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
261 {
262 int error;
263
264 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0)) {
265 return ENOTSUP;
266 }
267
268 if (ctx == NULL) {
269 ctx = vfs_context_current();
270 }
271
272 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
273
274 return error;
275 }
276
277 int
VFS_QUOTACTL(mount_t mp,int cmd,uid_t uid,caddr_t datap,vfs_context_t ctx)278 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
279 {
280 int error;
281
282 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0)) {
283 return ENOTSUP;
284 }
285
286 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
287
288 return error;
289 }
290
291 int
VFS_GETATTR(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)292 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
293 {
294 int error;
295
296 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0)) {
297 return ENOTSUP;
298 }
299
300 if (ctx == NULL) {
301 ctx = vfs_context_current();
302 }
303
304 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
305
306 return error;
307 }
308
309 int
VFS_SETATTR(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)310 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
311 {
312 int error;
313
314 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0)) {
315 return ENOTSUP;
316 }
317
318 if (ctx == NULL) {
319 ctx = vfs_context_current();
320 }
321
322 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
323
324 return error;
325 }
326
327 int
VFS_SYNC(mount_t mp,int flags,vfs_context_t ctx)328 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
329 {
330 int error;
331
332 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0)) {
333 return ENOTSUP;
334 }
335
336 if (ctx == NULL) {
337 ctx = vfs_context_current();
338 }
339
340 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
341
342 return error;
343 }
344
345 int
VFS_VGET(mount_t mp,ino64_t ino,struct vnode ** vpp,vfs_context_t ctx)346 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
347 {
348 int error;
349
350 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0)) {
351 return ENOTSUP;
352 }
353
354 if (ctx == NULL) {
355 ctx = vfs_context_current();
356 }
357
358 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
359
360 return error;
361 }
362
363 int
VFS_FHTOVP(mount_t mp,int fhlen,unsigned char * fhp,vnode_t * vpp,vfs_context_t ctx)364 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_t ctx)
365 {
366 int error;
367
368 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0)) {
369 return ENOTSUP;
370 }
371
372 if (ctx == NULL) {
373 ctx = vfs_context_current();
374 }
375
376 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
377
378 return error;
379 }
380
381 int
VFS_VPTOFH(struct vnode * vp,int * fhlenp,unsigned char * fhp,vfs_context_t ctx)382 VFS_VPTOFH(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t ctx)
383 {
384 int error;
385
386 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0)) {
387 return ENOTSUP;
388 }
389
390 if (ctx == NULL) {
391 ctx = vfs_context_current();
392 }
393
394 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
395
396 return error;
397 }
398
399 int
VFS_IOCTL(struct mount * mp,u_long command,caddr_t data,int flags,vfs_context_t context)400 VFS_IOCTL(struct mount *mp, u_long command, caddr_t data,
401 int flags, vfs_context_t context)
402 {
403 if (mp == dead_mountp || !mp->mnt_op->vfs_ioctl) {
404 return ENOTSUP;
405 }
406
407 return mp->mnt_op->vfs_ioctl(mp, command, data, flags,
408 context ?: vfs_context_current());
409 }
410
411 int
VFS_VGET_SNAPDIR(mount_t mp,vnode_t * vpp,vfs_context_t ctx)412 VFS_VGET_SNAPDIR(mount_t mp, vnode_t *vpp, vfs_context_t ctx)
413 {
414 int error;
415
416 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget_snapdir == 0)) {
417 return ENOTSUP;
418 }
419
420 if (ctx == NULL) {
421 ctx = vfs_context_current();
422 }
423
424 error = (*mp->mnt_op->vfs_vget_snapdir)(mp, vpp, ctx);
425
426 return error;
427 }
428
429 /* returns the cached throttle mask for the mount_t */
430 uint64_t
vfs_throttle_mask(mount_t mp)431 vfs_throttle_mask(mount_t mp)
432 {
433 return mp->mnt_throttle_mask;
434 }
435
436 /* returns a copy of vfs type name for the mount_t */
437 void
vfs_name(mount_t mp,char * buffer)438 vfs_name(mount_t mp, char *buffer)
439 {
440 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
441 }
442
443 /* returns vfs type number for the mount_t */
444 int
vfs_typenum(mount_t mp)445 vfs_typenum(mount_t mp)
446 {
447 return mp->mnt_vtable->vfc_typenum;
448 }
449
450 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
451 void*
vfs_mntlabel(mount_t mp)452 vfs_mntlabel(mount_t mp)
453 {
454 return (void*)mac_mount_label(mp);
455 }
456
457 uint64_t
vfs_mount_id(mount_t mp)458 vfs_mount_id(mount_t mp)
459 {
460 return mp->mnt_mount_id;
461 }
462
463 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
464 uint64_t
vfs_flags(mount_t mp)465 vfs_flags(mount_t mp)
466 {
467 return (uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
468 }
469
470 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
471 void
vfs_setflags(mount_t mp,uint64_t flags)472 vfs_setflags(mount_t mp, uint64_t flags)
473 {
474 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
475
476 mount_lock(mp);
477 mp->mnt_flag |= lflags;
478 mount_unlock(mp);
479 }
480
481 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
482 void
vfs_clearflags(mount_t mp,uint64_t flags)483 vfs_clearflags(mount_t mp, uint64_t flags)
484 {
485 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
486
487 mount_lock(mp);
488 mp->mnt_flag &= ~lflags;
489 mount_unlock(mp);
490 }
491
492 /* Is the mount_t ronly and upgrade read/write requested? */
493 int
vfs_iswriteupgrade(mount_t mp)494 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
495 {
496 return (mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR);
497 }
498
499
500 /* Is the mount_t mounted ronly */
501 int
vfs_isrdonly(mount_t mp)502 vfs_isrdonly(mount_t mp)
503 {
504 return mp->mnt_flag & MNT_RDONLY;
505 }
506
507 /* Is the mount_t mounted for filesystem synchronous writes? */
508 int
vfs_issynchronous(mount_t mp)509 vfs_issynchronous(mount_t mp)
510 {
511 return mp->mnt_flag & MNT_SYNCHRONOUS;
512 }
513
514 /* Is the mount_t mounted read/write? */
515 int
vfs_isrdwr(mount_t mp)516 vfs_isrdwr(mount_t mp)
517 {
518 return (mp->mnt_flag & MNT_RDONLY) == 0;
519 }
520
521
522 /* Is mount_t marked for update (ie MNT_UPDATE) */
523 int
vfs_isupdate(mount_t mp)524 vfs_isupdate(mount_t mp)
525 {
526 return mp->mnt_flag & MNT_UPDATE;
527 }
528
529
530 /* Is mount_t marked for reload (ie MNT_RELOAD) */
531 int
vfs_isreload(mount_t mp)532 vfs_isreload(mount_t mp)
533 {
534 return (mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD);
535 }
536
537 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
538 int
vfs_isforce(mount_t mp)539 vfs_isforce(mount_t mp)
540 {
541 if (mp->mnt_lflag & MNT_LFORCE) {
542 return 1;
543 } else {
544 return 0;
545 }
546 }
547
548 int
vfs_isunmount(mount_t mp)549 vfs_isunmount(mount_t mp)
550 {
551 if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
552 return 1;
553 } else {
554 return 0;
555 }
556 }
557
558 int
vfs_64bitready(mount_t mp)559 vfs_64bitready(mount_t mp)
560 {
561 if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
562 return 1;
563 } else {
564 return 0;
565 }
566 }
567
568
569 int
vfs_authcache_ttl(mount_t mp)570 vfs_authcache_ttl(mount_t mp)
571 {
572 if ((mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
573 return mp->mnt_authcache_ttl;
574 } else {
575 return CACHED_RIGHT_INFINITE_TTL;
576 }
577 }
578
579 void
vfs_setauthcache_ttl(mount_t mp,int ttl)580 vfs_setauthcache_ttl(mount_t mp, int ttl)
581 {
582 mount_lock(mp);
583 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
584 mp->mnt_authcache_ttl = ttl;
585 mount_unlock(mp);
586 }
587
588 void
vfs_clearauthcache_ttl(mount_t mp)589 vfs_clearauthcache_ttl(mount_t mp)
590 {
591 mount_lock(mp);
592 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
593 /*
594 * back to the default TTL value in case
595 * MNTK_AUTH_OPAQUE is set on this mount
596 */
597 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
598 mount_unlock(mp);
599 }
600
601 int
vfs_authopaque(mount_t mp)602 vfs_authopaque(mount_t mp)
603 {
604 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE)) {
605 return 1;
606 } else {
607 return 0;
608 }
609 }
610
611 int
vfs_authopaqueaccess(mount_t mp)612 vfs_authopaqueaccess(mount_t mp)
613 {
614 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS)) {
615 return 1;
616 } else {
617 return 0;
618 }
619 }
620
621 void
vfs_setauthopaque(mount_t mp)622 vfs_setauthopaque(mount_t mp)
623 {
624 mount_lock(mp);
625 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
626 mount_unlock(mp);
627 }
628
629 void
vfs_setauthopaqueaccess(mount_t mp)630 vfs_setauthopaqueaccess(mount_t mp)
631 {
632 mount_lock(mp);
633 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
634 mount_unlock(mp);
635 }
636
637 void
vfs_clearauthopaque(mount_t mp)638 vfs_clearauthopaque(mount_t mp)
639 {
640 mount_lock(mp);
641 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
642 mount_unlock(mp);
643 }
644
645 void
vfs_clearauthopaqueaccess(mount_t mp)646 vfs_clearauthopaqueaccess(mount_t mp)
647 {
648 mount_lock(mp);
649 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
650 mount_unlock(mp);
651 }
652
653 void
vfs_setextendedsecurity(mount_t mp)654 vfs_setextendedsecurity(mount_t mp)
655 {
656 mount_lock(mp);
657 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
658 mount_unlock(mp);
659 }
660
661 void
vfs_setmntsystem(mount_t mp)662 vfs_setmntsystem(mount_t mp)
663 {
664 mount_lock(mp);
665 mp->mnt_kern_flag |= MNTK_SYSTEM;
666 mount_unlock(mp);
667 }
668
669 void
vfs_setmntsystemdata(mount_t mp)670 vfs_setmntsystemdata(mount_t mp)
671 {
672 mount_lock(mp);
673 mp->mnt_kern_flag |= MNTK_SYSTEMDATA;
674 mount_unlock(mp);
675 }
676
677 void
vfs_setmntswap(mount_t mp)678 vfs_setmntswap(mount_t mp)
679 {
680 mount_lock(mp);
681 mp->mnt_kern_flag |= (MNTK_SYSTEM | MNTK_SWAP_MOUNT);
682 mount_unlock(mp);
683 }
684
685 void
vfs_clearextendedsecurity(mount_t mp)686 vfs_clearextendedsecurity(mount_t mp)
687 {
688 mount_lock(mp);
689 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
690 mount_unlock(mp);
691 }
692
693 void
vfs_setnoswap(mount_t mp)694 vfs_setnoswap(mount_t mp)
695 {
696 mount_lock(mp);
697 mp->mnt_kern_flag |= MNTK_NOSWAP;
698 mount_unlock(mp);
699 }
700
701 void
vfs_clearnoswap(mount_t mp)702 vfs_clearnoswap(mount_t mp)
703 {
704 mount_lock(mp);
705 mp->mnt_kern_flag &= ~MNTK_NOSWAP;
706 mount_unlock(mp);
707 }
708
709 int
vfs_extendedsecurity(mount_t mp)710 vfs_extendedsecurity(mount_t mp)
711 {
712 return mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY;
713 }
714
715 /* returns the max size of short symlink in this mount_t */
716 uint32_t
vfs_maxsymlen(mount_t mp)717 vfs_maxsymlen(mount_t mp)
718 {
719 return mp->mnt_maxsymlinklen;
720 }
721
722 /* set max size of short symlink on mount_t */
723 void
vfs_setmaxsymlen(mount_t mp,uint32_t symlen)724 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
725 {
726 mp->mnt_maxsymlinklen = symlen;
727 }
728
729 boolean_t
vfs_is_basesystem(mount_t mp)730 vfs_is_basesystem(mount_t mp)
731 {
732 return ((mp->mnt_supl_kern_flag & MNTK_SUPL_BASESYSTEM) == 0) ? false : true;
733 }
734
735 /* return a pointer to the RO vfs_statfs associated with mount_t */
736 struct vfsstatfs *
vfs_statfs(mount_t mp)737 vfs_statfs(mount_t mp)
738 {
739 return &mp->mnt_vfsstat;
740 }
741
742 int
vfs_getattr(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)743 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
744 {
745 int error;
746
747 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0) {
748 return error;
749 }
750
751 /*
752 * If we have a filesystem create time, use it to default some others.
753 */
754 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
755 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time)) {
756 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
757 }
758 }
759
760 return 0;
761 }
762
763 int
vfs_setattr(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)764 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
765 {
766 int error;
767
768 /*
769 * with a read-only system volume, we need to allow rename of the root volume
770 * even if it's read-only. Don't return EROFS here if setattr changes only
771 * the volume name
772 */
773 if (vfs_isrdonly(mp) &&
774 !((strcmp(mp->mnt_vfsstat.f_fstypename, "apfs") == 0) && (vfa->f_active == VFSATTR_f_vol_name))) {
775 return EROFS;
776 }
777
778 error = VFS_SETATTR(mp, vfa, ctx);
779
780 /*
781 * If we had alternate ways of setting vfs attributes, we'd
782 * fall back here.
783 */
784
785 return error;
786 }
787
788 /* return the private data handle stored in mount_t */
789 void *
vfs_fsprivate(mount_t mp)790 vfs_fsprivate(mount_t mp)
791 {
792 return mp->mnt_data;
793 }
794
795 /* set the private data handle in mount_t */
796 void
vfs_setfsprivate(mount_t mp,void * mntdata)797 vfs_setfsprivate(mount_t mp, void *mntdata)
798 {
799 mount_lock(mp);
800 mp->mnt_data = mntdata;
801 mount_unlock(mp);
802 }
803
804 /* query whether the mount point supports native EAs */
805 int
vfs_nativexattrs(mount_t mp)806 vfs_nativexattrs(mount_t mp)
807 {
808 return mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS;
809 }
810
811 /*
812 * return the block size of the underlying
813 * device associated with mount_t
814 */
815 int
vfs_devblocksize(mount_t mp)816 vfs_devblocksize(mount_t mp)
817 {
818 return mp->mnt_devblocksize;
819 }
820
821 /*
822 * Returns vnode with an iocount that must be released with vnode_put()
823 */
824 vnode_t
vfs_vnodecovered(mount_t mp)825 vfs_vnodecovered(mount_t mp)
826 {
827 vnode_t vp = mp->mnt_vnodecovered;
828 if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
829 return NULL;
830 } else {
831 return vp;
832 }
833 }
834
835 /*
836 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
837 * The iocount must be released with vnode_put(). Note that this KPI is subtle
838 * with respect to the validity of using this device vnode for anything substantial
839 * (which is discouraged). If commands are sent to the device driver without
840 * taking proper steps to ensure that the device is still open, chaos may ensue.
841 * Similarly, this routine should only be called if there is some guarantee that
842 * the mount itself is still valid.
843 */
844 vnode_t
vfs_devvp(mount_t mp)845 vfs_devvp(mount_t mp)
846 {
847 vnode_t vp = mp->mnt_devvp;
848
849 if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
850 return vp;
851 }
852
853 return NULLVP;
854 }
855
856 /*
857 * return the io attributes associated with mount_t
858 */
859 void
vfs_ioattr(mount_t mp,struct vfsioattr * ioattrp)860 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
861 {
862 ioattrp->io_reserved[0] = NULL;
863 ioattrp->io_reserved[1] = NULL;
864 if (mp == NULL) {
865 ioattrp->io_maxreadcnt = MAXPHYS;
866 ioattrp->io_maxwritecnt = MAXPHYS;
867 ioattrp->io_segreadcnt = 32;
868 ioattrp->io_segwritecnt = 32;
869 ioattrp->io_maxsegreadsize = MAXPHYS;
870 ioattrp->io_maxsegwritesize = MAXPHYS;
871 ioattrp->io_devblocksize = DEV_BSIZE;
872 ioattrp->io_flags = 0;
873 ioattrp->io_max_swappin_available = 0;
874 } else {
875 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
876 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
877 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
878 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
879 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
880 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
881 ioattrp->io_devblocksize = mp->mnt_devblocksize;
882 ioattrp->io_flags = mp->mnt_ioflags;
883 ioattrp->io_max_swappin_available = mp->mnt_max_swappin_available;
884 }
885 }
886
887
888 /*
889 * set the IO attributes associated with mount_t
890 */
891 void
vfs_setioattr(mount_t mp,struct vfsioattr * ioattrp)892 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
893 {
894 if (mp == NULL) {
895 return;
896 }
897 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
898 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
899 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
900 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
901 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
902 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
903 mp->mnt_devblocksize = ioattrp->io_devblocksize;
904 mp->mnt_ioflags = ioattrp->io_flags;
905 mp->mnt_max_swappin_available = ioattrp->io_max_swappin_available;
906 }
907
908 /*
909 * Add a new filesystem into the kernel specified in passed in
910 * vfstable structure. It fills in the vnode
911 * dispatch vector that is to be passed to when vnodes are created.
912 * It returns a handle which is to be used to when the FS is to be removed
913 */
914 typedef int (*PFI)(void *);
915 extern int vfs_opv_numops;
916 errno_t
vfs_fsadd(struct vfs_fsentry * vfe,vfstable_t * handle)917 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle)
918 {
919 struct vfstable *newvfstbl = NULL;
920 int i, j;
921 int(***opv_desc_vector_p)(void *);
922 int(**opv_desc_vector)(void *);
923 const struct vnodeopv_entry_desc *opve_descp;
924 int desccount;
925 int descsize;
926 PFI *descptr;
927
928 /*
929 * This routine is responsible for all the initialization that would
930 * ordinarily be done as part of the system startup;
931 */
932
933 if (vfe == (struct vfs_fsentry *)0) {
934 return EINVAL;
935 }
936
937 desccount = vfe->vfe_vopcnt;
938 if ((desccount <= 0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
939 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL)) {
940 return EINVAL;
941 }
942
943 /* Non-threadsafe filesystems are not supported */
944 if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
945 return EINVAL;
946 }
947
948 newvfstbl = kalloc_type(struct vfstable, Z_WAITOK | Z_ZERO);
949 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
950 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
951 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM)) {
952 int tmp;
953 int found = 0;
954 lck_mtx_lock(&vfs_typenum_mtx);
955 for (tmp = fstypenumstart; tmp < OID_AUTO_START; tmp++) {
956 if (isclr(vfs_typenum_arr, tmp)) {
957 newvfstbl->vfc_typenum = tmp;
958 setbit(vfs_typenum_arr, tmp);
959 found = 1;
960 break;
961 }
962 }
963 if (!found) {
964 lck_mtx_unlock(&vfs_typenum_mtx);
965 return EINVAL;
966 }
967 if (maxvfstypenum < OID_AUTO_START) {
968 /* getvfsbyname checks up to but not including maxvfstypenum */
969 maxvfstypenum = newvfstbl->vfc_typenum + 1;
970 }
971 lck_mtx_unlock(&vfs_typenum_mtx);
972 } else {
973 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
974 lck_mtx_lock(&vfs_typenum_mtx);
975 setbit(vfs_typenum_arr, newvfstbl->vfc_typenum);
976 if (newvfstbl->vfc_typenum >= maxvfstypenum) {
977 maxvfstypenum = newvfstbl->vfc_typenum + 1;
978 }
979 lck_mtx_unlock(&vfs_typenum_mtx);
980 }
981
982
983 newvfstbl->vfc_refcount = 0;
984 newvfstbl->vfc_flags = 0;
985 newvfstbl->vfc_mountroot = NULL;
986 newvfstbl->vfc_next = NULL;
987 newvfstbl->vfc_vfsflags = 0;
988 if (vfe->vfe_flags & VFS_TBL64BITREADY) {
989 newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
990 }
991 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2) {
992 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
993 }
994 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2) {
995 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
996 }
997 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL) {
998 newvfstbl->vfc_flags |= MNT_LOCAL;
999 }
1000 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0) {
1001 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
1002 } else {
1003 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
1004 }
1005
1006 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR) {
1007 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
1008 }
1009 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT) {
1010 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
1011 }
1012 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED) {
1013 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
1014 }
1015 if (vfe->vfe_flags & VFS_TBLNOMACLABEL) {
1016 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
1017 }
1018 if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME) {
1019 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
1020 }
1021 if (vfe->vfe_flags & VFS_TBLVNOP_SECLUDE_RENAME) {
1022 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_SECLUDE_RENAME;
1023 }
1024 if (vfe->vfe_flags & VFS_TBLCANMOUNTROOT) {
1025 newvfstbl->vfc_vfsflags |= VFC_VFSCANMOUNTROOT;
1026 }
1027
1028 /*
1029 * Allocate and init the vectors.
1030 * Also handle backwards compatibility.
1031 *
1032 * We allocate one large block to hold all <desccount>
1033 * vnode operation vectors stored contiguously.
1034 */
1035 /* XXX - shouldn't be M_TEMP */
1036
1037 descsize = desccount * vfs_opv_numops;
1038 descptr = kalloc_type(PFI, descsize, Z_WAITOK | Z_ZERO);
1039
1040 newvfstbl->vfc_descptr = descptr;
1041 newvfstbl->vfc_descsize = descsize;
1042
1043 newvfstbl->vfc_sysctl = NULL;
1044
1045 for (i = 0; i < desccount; i++) {
1046 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1047 /*
1048 * Fill in the caller's pointer to the start of the i'th vector.
1049 * They'll need to supply it when calling vnode_create.
1050 */
1051 opv_desc_vector = descptr + i * vfs_opv_numops;
1052 *opv_desc_vector_p = opv_desc_vector;
1053
1054 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
1055 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
1056
1057 /* Silently skip known-disabled operations */
1058 if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) {
1059 printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
1060 vfe->vfe_opvdescs[i], opve_descp->opve_op->vdesc_name);
1061 continue;
1062 }
1063
1064 /*
1065 * Sanity check: is this operation listed
1066 * in the list of operations? We check this
1067 * by seeing if its offset is zero. Since
1068 * the default routine should always be listed
1069 * first, it should be the only one with a zero
1070 * offset. Any other operation with a zero
1071 * offset is probably not listed in
1072 * vfs_op_descs, and so is probably an error.
1073 *
1074 * A panic here means the layer programmer
1075 * has committed the all-too common bug
1076 * of adding a new operation to the layer's
1077 * list of vnode operations but
1078 * not adding the operation to the system-wide
1079 * list of supported operations.
1080 */
1081 if (opve_descp->opve_op->vdesc_offset == 0 &&
1082 opve_descp->opve_op != VDESC(vnop_default)) {
1083 printf("vfs_fsadd: operation %s not listed in %s.\n",
1084 opve_descp->opve_op->vdesc_name,
1085 "vfs_op_descs");
1086 panic("vfs_fsadd: bad operation");
1087 }
1088 /*
1089 * Fill in this entry.
1090 */
1091 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
1092 opve_descp->opve_impl;
1093 }
1094
1095 /*
1096 * Finally, go back and replace unfilled routines
1097 * with their default. (Sigh, an O(n^3) algorithm. I
1098 * could make it better, but that'd be work, and n is small.)
1099 */
1100 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1101
1102 /*
1103 * Force every operations vector to have a default routine.
1104 */
1105 opv_desc_vector = *opv_desc_vector_p;
1106 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) {
1107 panic("vfs_fsadd: operation vector without default routine.");
1108 }
1109 for (j = 0; j < vfs_opv_numops; j++) {
1110 if (opv_desc_vector[j] == NULL) {
1111 opv_desc_vector[j] =
1112 opv_desc_vector[VOFFSET(vnop_default)];
1113 }
1114 }
1115 } /* end of each vnodeopv_desc parsing */
1116
1117 *handle = vfstable_add(newvfstbl);
1118
1119 if (newvfstbl->vfc_vfsops->vfs_init) {
1120 struct vfsconf vfsc;
1121 bzero(&vfsc, sizeof(struct vfsconf));
1122 vfsc.vfc_reserved1 = 0;
1123 bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
1124 vfsc.vfc_typenum = (*handle)->vfc_typenum;
1125 vfsc.vfc_refcount = (*handle)->vfc_refcount;
1126 vfsc.vfc_flags = (*handle)->vfc_flags;
1127 vfsc.vfc_reserved2 = 0;
1128 vfsc.vfc_reserved3 = 0;
1129
1130 (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
1131 }
1132
1133 kfree_type(struct vfstable, newvfstbl);
1134
1135 return 0;
1136 }
1137
1138 /*
1139 * Removes the filesystem from kernel.
1140 * The argument passed in is the handle that was given when
1141 * file system was added
1142 */
1143 errno_t
vfs_fsremove(vfstable_t handle)1144 vfs_fsremove(vfstable_t handle)
1145 {
1146 struct vfstable * vfstbl = (struct vfstable *)handle;
1147 void *old_desc = NULL;
1148 size_t descsize = 0;
1149 errno_t err;
1150
1151 /* Preflight check for any mounts */
1152 mount_list_lock();
1153 if (vfstbl->vfc_refcount != 0) {
1154 mount_list_unlock();
1155 return EBUSY;
1156 }
1157
1158 /* Free the spot in vfs_typenum_arr */
1159 lck_mtx_lock(&vfs_typenum_mtx);
1160 clrbit(vfs_typenum_arr, handle->vfc_typenum);
1161 if (maxvfstypenum == handle->vfc_typenum) {
1162 maxvfstypenum--;
1163 }
1164 lck_mtx_unlock(&vfs_typenum_mtx);
1165
1166 /*
1167 * save the old descriptor; the free cannot occur unconditionally,
1168 * since vfstable_del() may fail.
1169 */
1170 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1171 old_desc = vfstbl->vfc_descptr;
1172 descsize = vfstbl->vfc_descsize;
1173 }
1174 err = vfstable_del(vfstbl);
1175
1176 mount_list_unlock();
1177
1178 /* free the descriptor if the delete was successful */
1179 if (err == 0) {
1180 kfree_type(PFI, descsize, old_desc);
1181 }
1182
1183 return err;
1184 }
1185
1186 void
vfs_setowner(mount_t mp,uid_t uid,gid_t gid)1187 vfs_setowner(mount_t mp, uid_t uid, gid_t gid)
1188 {
1189 mp->mnt_fsowner = uid;
1190 mp->mnt_fsgroup = gid;
1191 }
1192
1193 /*
1194 * Callers should be careful how they use this; accessing
1195 * mnt_last_write_completed_timestamp is not thread-safe. Writing to
1196 * it isn't either. Point is: be prepared to deal with strange values
1197 * being returned.
1198 */
1199 uint64_t
vfs_idle_time(mount_t mp)1200 vfs_idle_time(mount_t mp)
1201 {
1202 if (mp->mnt_pending_write_size) {
1203 return 0;
1204 }
1205
1206 struct timeval now;
1207
1208 microuptime(&now);
1209
1210 return (now.tv_sec
1211 - mp->mnt_last_write_completed_timestamp.tv_sec) * 1000000
1212 + now.tv_usec - mp->mnt_last_write_completed_timestamp.tv_usec;
1213 }
1214
1215 /*
1216 * vfs_context_create_with_proc() takes a reference on an arbitrary
1217 * thread in the process. To distinguish this reference-counted thread
1218 * from the usual non-reference-counted thread, we set the least significant
1219 * bit of of vc_thread.
1220 */
1221 #define VFS_CONTEXT_THREAD_IS_REFERENCED(ctx) \
1222 (!!(((uintptr_t)(ctx)->vc_thread) & 1UL))
1223
1224 #define VFS_CONTEXT_SET_REFERENCED_THREAD(ctx, thr) \
1225 (ctx)->vc_thread = (thread_t)(((uintptr_t)(thr)) | 1UL)
1226
1227 #define VFS_CONTEXT_GET_THREAD(ctx) \
1228 ((thread_t)(((uintptr_t)(ctx)->vc_thread) & ~1UL))
1229
1230 int
vfs_context_pid(vfs_context_t ctx)1231 vfs_context_pid(vfs_context_t ctx)
1232 {
1233 return proc_pid(vfs_context_proc(ctx));
1234 }
1235
1236 int
vfs_context_copy_audit_token(vfs_context_t ctx,audit_token_t * token)1237 vfs_context_copy_audit_token(vfs_context_t ctx, audit_token_t *token)
1238 {
1239 kern_return_t err;
1240 task_t task;
1241 mach_msg_type_number_t info_size = TASK_AUDIT_TOKEN_COUNT;
1242
1243 task = vfs_context_task(ctx);
1244
1245 if (task == NULL) {
1246 // Not sure how this would happen; we are supposed to be
1247 // in the middle of using the context. Regardless, don't
1248 // wander off a NULL pointer.
1249 return ESRCH;
1250 }
1251
1252 err = task_info(task, TASK_AUDIT_TOKEN, (integer_t *)token, &info_size);
1253 return (err) ? ESRCH : 0;
1254 }
1255
1256 int
vfs_context_suser(vfs_context_t ctx)1257 vfs_context_suser(vfs_context_t ctx)
1258 {
1259 return suser(ctx->vc_ucred, NULL);
1260 }
1261
1262 /*
1263 * Return bit field of signals posted to all threads in the context's process.
1264 *
1265 * XXX Signals should be tied to threads, not processes, for most uses of this
1266 * XXX call.
1267 */
1268 int
vfs_context_issignal(vfs_context_t ctx,sigset_t mask)1269 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1270 {
1271 proc_t p = vfs_context_proc(ctx);
1272 if (p) {
1273 return proc_pendingsignals(p, mask);
1274 }
1275 return 0;
1276 }
1277
1278 int
vfs_context_is64bit(vfs_context_t ctx)1279 vfs_context_is64bit(vfs_context_t ctx)
1280 {
1281 uthread_t uth;
1282 thread_t t;
1283
1284 if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1285 uth = get_bsdthread_info(t);
1286 } else {
1287 uth = current_uthread();
1288 }
1289 return uthread_is64bit(uth);
1290 }
1291
1292 boolean_t
vfs_context_can_resolve_triggers(vfs_context_t ctx)1293 vfs_context_can_resolve_triggers(vfs_context_t ctx)
1294 {
1295 proc_t proc = vfs_context_proc(ctx);
1296
1297 if (proc) {
1298 if (proc->p_vfs_iopolicy &
1299 P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE) {
1300 return false;
1301 }
1302 return true;
1303 }
1304 return false;
1305 }
1306
1307 boolean_t
vfs_context_can_break_leases(vfs_context_t ctx)1308 vfs_context_can_break_leases(vfs_context_t ctx)
1309 {
1310 proc_t proc = vfs_context_proc(ctx);
1311
1312 if (proc) {
1313 /*
1314 * We do not have a separate I/O policy for this,
1315 * because the scenarios where we would not want
1316 * local file lease breaks are currently exactly
1317 * the same as where we would not want dataless
1318 * file materialization (mainly, system daemons
1319 * passively snooping file activity).
1320 */
1321 if (proc->p_vfs_iopolicy &
1322 P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES) {
1323 return true;
1324 }
1325 return false;
1326 }
1327 return true;
1328 }
1329
1330 /*
1331 * vfs_context_proc
1332 *
1333 * Description: Given a vfs_context_t, return the proc_t associated with it.
1334 *
1335 * Parameters: vfs_context_t The context to use
1336 *
1337 * Returns: proc_t The process for this context
1338 *
1339 * Notes: This function will return the current_proc() if any of the
1340 * following conditions are true:
1341 *
1342 * o The supplied context pointer is NULL
1343 * o There is no Mach thread associated with the context
1344 * o There is no Mach task associated with the Mach thread
1345 * o There is no proc_t associated with the Mach task
1346 * o The proc_t has no per process open file table
1347 *
1348 * This causes this function to return a value matching as
1349 * closely as possible the previous behaviour.
1350 */
1351 proc_t
vfs_context_proc(vfs_context_t ctx)1352 vfs_context_proc(vfs_context_t ctx)
1353 {
1354 proc_t proc = NULL;
1355 thread_t t;
1356
1357 if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1358 proc = (proc_t)get_bsdthreadtask_info(t);
1359 }
1360
1361 return proc == NULL ? current_proc() : proc;
1362 }
1363
1364 /*
1365 * vfs_context_get_special_port
1366 *
1367 * Description: Return the requested special port from the task associated
1368 * with the given context.
1369 *
1370 * Parameters: vfs_context_t The context to use
1371 * int Index of special port
1372 * ipc_port_t * Pointer to returned port
1373 *
1374 * Returns: kern_return_t see task_get_special_port()
1375 */
1376 kern_return_t
vfs_context_get_special_port(vfs_context_t ctx,int which,ipc_port_t * portp)1377 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1378 {
1379 return task_get_special_port(vfs_context_task(ctx), which, portp);
1380 }
1381
1382 /*
1383 * vfs_context_set_special_port
1384 *
1385 * Description: Set the requested special port in the task associated
1386 * with the given context.
1387 *
1388 * Parameters: vfs_context_t The context to use
1389 * int Index of special port
1390 * ipc_port_t New special port
1391 *
1392 * Returns: kern_return_t see task_set_special_port_internal()
1393 */
1394 kern_return_t
vfs_context_set_special_port(vfs_context_t ctx,int which,ipc_port_t port)1395 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1396 {
1397 return task_set_special_port_internal(vfs_context_task(ctx),
1398 which, port);
1399 }
1400
1401 /*
1402 * vfs_context_thread
1403 *
1404 * Description: Return the Mach thread associated with a vfs_context_t
1405 *
1406 * Parameters: vfs_context_t The context to use
1407 *
1408 * Returns: thread_t The thread for this context, or
1409 * NULL, if there is not one.
1410 *
1411 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1412 * as a result of a static vfs_context_t declaration in a function
1413 * and will result in this function returning NULL.
1414 *
1415 * This is intentional; this function should NOT return the
1416 * current_thread() in this case.
1417 */
1418 thread_t
vfs_context_thread(vfs_context_t ctx)1419 vfs_context_thread(vfs_context_t ctx)
1420 {
1421 return VFS_CONTEXT_GET_THREAD(ctx);
1422 }
1423
1424 /*
1425 * vfs_context_task
1426 *
1427 * Description: Return the Mach task associated with a vfs_context_t
1428 *
1429 * Parameters: vfs_context_t The context to use
1430 *
1431 * Returns: task_t The task for this context, or
1432 * NULL, if there is not one.
1433 *
1434 * Notes: NULL task_t's are legal, but discouraged. They occur only
1435 * as a result of a static vfs_context_t declaration in a function
1436 * and will result in this function returning NULL.
1437 *
1438 * This is intentional; this function should NOT return the
1439 * task associated with current_thread() in this case.
1440 */
1441 task_t
vfs_context_task(vfs_context_t ctx)1442 vfs_context_task(vfs_context_t ctx)
1443 {
1444 task_t task = NULL;
1445 thread_t t;
1446
1447 if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1448 task = get_threadtask(t);
1449 }
1450
1451 return task;
1452 }
1453
1454 /*
1455 * vfs_context_cwd
1456 *
1457 * Description: Returns a reference on the vnode for the current working
1458 * directory for the supplied context
1459 *
1460 * Parameters: vfs_context_t The context to use
1461 *
1462 * Returns: vnode_t The current working directory
1463 * for this context
1464 *
1465 * Notes: The function first attempts to obtain the current directory
1466 * from the thread, and if it is not present there, falls back
1467 * to obtaining it from the process instead. If it can't be
1468 * obtained from either place, we return NULLVP.
1469 */
1470 vnode_t
vfs_context_cwd(vfs_context_t ctx)1471 vfs_context_cwd(vfs_context_t ctx)
1472 {
1473 vnode_t cwd = NULLVP;
1474 thread_t t;
1475
1476 if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1477 uthread_t uth = get_bsdthread_info(t);
1478 proc_t proc;
1479
1480 /*
1481 * Get the cwd from the thread; if there isn't one, get it
1482 * from the process, instead.
1483 */
1484 if ((cwd = uth->uu_cdir) == NULLVP &&
1485 (proc = (proc_t)get_bsdthreadtask_info(t)) != NULL) {
1486 cwd = proc->p_fd.fd_cdir;
1487 }
1488 }
1489
1490 return cwd;
1491 }
1492
1493 /*
1494 * vfs_context_create
1495 *
1496 * Description: Allocate and initialize a new context.
1497 *
1498 * Parameters: vfs_context_t: Context to copy, or NULL for new
1499 *
1500 * Returns: Pointer to new context
1501 *
1502 * Notes: Copy cred and thread from argument, if available; else
1503 * initialize with current thread and new cred. Returns
1504 * with a reference held on the credential.
1505 */
1506 vfs_context_t
vfs_context_create(vfs_context_t ctx)1507 vfs_context_create(vfs_context_t ctx)
1508 {
1509 vfs_context_t newcontext;
1510
1511 newcontext = zalloc_flags(KT_VFS_CONTEXT, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1512
1513 if (ctx == NULL) {
1514 ctx = vfs_context_current();
1515 }
1516 *newcontext = *ctx;
1517 if (IS_VALID_CRED(ctx->vc_ucred)) {
1518 kauth_cred_ref(ctx->vc_ucred);
1519 }
1520
1521 return newcontext;
1522 }
1523
1524 /*
1525 * vfs_context_create_with_proc
1526 *
1527 * Description: Create a new context with credentials taken from
1528 * the specified proc.
1529 *
1530 * Parameters: proc_t: The process whose crendials to use.
1531 *
1532 * Returns: Pointer to new context.
1533 *
1534 * Notes: The context will also take a reference on an arbitrary
1535 * thread in the process as well as the process's credentials.
1536 */
1537 vfs_context_t
vfs_context_create_with_proc(proc_t p)1538 vfs_context_create_with_proc(proc_t p)
1539 {
1540 vfs_context_t newcontext;
1541 thread_t thread;
1542 kauth_cred_t cred;
1543
1544 if (p == current_proc()) {
1545 return vfs_context_create(NULL);
1546 }
1547
1548 newcontext = zalloc_flags(KT_VFS_CONTEXT, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1549
1550 proc_lock(p);
1551 thread = proc_thread(p); /* XXX */
1552 if (thread != NULL) {
1553 thread_reference(thread);
1554 }
1555 proc_unlock(p);
1556
1557 cred = kauth_cred_proc_ref(p);
1558
1559 VFS_CONTEXT_SET_REFERENCED_THREAD(newcontext, thread);
1560 newcontext->vc_ucred = cred;
1561
1562 return newcontext;
1563 }
1564
1565 vfs_context_t
vfs_context_current(void)1566 vfs_context_current(void)
1567 {
1568 static_assert(offsetof(struct thread_ro, tro_owner) ==
1569 offsetof(struct vfs_context, vc_thread));
1570 static_assert(offsetof(struct thread_ro, tro_cred) ==
1571 offsetof(struct vfs_context, vc_ucred));
1572
1573 return (vfs_context_t)current_thread_ro();
1574 }
1575
1576 vfs_context_t
vfs_context_kernel(void)1577 vfs_context_kernel(void)
1578 {
1579 return &vfs_context0;
1580 }
1581
1582 int
vfs_context_rele(vfs_context_t ctx)1583 vfs_context_rele(vfs_context_t ctx)
1584 {
1585 if (ctx) {
1586 if (IS_VALID_CRED(ctx->vc_ucred)) {
1587 kauth_cred_unref(&ctx->vc_ucred);
1588 }
1589 if (VFS_CONTEXT_THREAD_IS_REFERENCED(ctx)) {
1590 assert(VFS_CONTEXT_GET_THREAD(ctx) != NULL);
1591 thread_deallocate(VFS_CONTEXT_GET_THREAD(ctx));
1592 }
1593 zfree(KT_VFS_CONTEXT, ctx);
1594 }
1595 return 0;
1596 }
1597
1598
1599 kauth_cred_t
vfs_context_ucred(vfs_context_t ctx)1600 vfs_context_ucred(vfs_context_t ctx)
1601 {
1602 return ctx->vc_ucred;
1603 }
1604
1605 /*
1606 * Return true if the context is owned by the superuser.
1607 */
1608 int
vfs_context_issuser(vfs_context_t ctx)1609 vfs_context_issuser(vfs_context_t ctx)
1610 {
1611 return kauth_cred_issuser(vfs_context_ucred(ctx));
1612 }
1613
1614 int
vfs_context_iskernel(vfs_context_t ctx)1615 vfs_context_iskernel(vfs_context_t ctx)
1616 {
1617 return ctx == &vfs_context0;
1618 }
1619
1620 /*
1621 * Given a context, for all fields of vfs_context_t which
1622 * are not held with a reference, set those fields to the
1623 * values for the current execution context.
1624 *
1625 * Returns: 0 for success, nonzero for failure
1626 *
1627 * The intended use is:
1628 * 1. vfs_context_create() gets the caller a context
1629 * 2. vfs_context_bind() sets the unrefcounted data
1630 * 3. vfs_context_rele() releases the context
1631 *
1632 */
1633 int
vfs_context_bind(vfs_context_t ctx)1634 vfs_context_bind(vfs_context_t ctx)
1635 {
1636 assert(!VFS_CONTEXT_THREAD_IS_REFERENCED(ctx));
1637 ctx->vc_thread = current_thread();
1638 return 0;
1639 }
1640
1641 int
vfs_set_thread_fs_private(uint8_t tag,uint64_t fs_private)1642 vfs_set_thread_fs_private(uint8_t tag, uint64_t fs_private)
1643 {
1644 struct uthread *ut;
1645
1646 if (tag != FS_PRIVATE_TAG_APFS) {
1647 return ENOTSUP;
1648 }
1649
1650 ut = current_uthread();
1651 ut->t_fs_private = fs_private;
1652
1653 return 0;
1654 }
1655
1656 int
vfs_get_thread_fs_private(uint8_t tag,uint64_t * fs_private)1657 vfs_get_thread_fs_private(uint8_t tag, uint64_t *fs_private)
1658 {
1659 struct uthread *ut;
1660
1661 if (tag != FS_PRIVATE_TAG_APFS) {
1662 return ENOTSUP;
1663 }
1664
1665 ut = current_uthread();
1666 *fs_private = ut->t_fs_private;
1667
1668 return 0;
1669 }
1670
1671 int
vfs_isswapmount(mount_t mnt)1672 vfs_isswapmount(mount_t mnt)
1673 {
1674 return mnt && ISSET(mnt->mnt_kern_flag, MNTK_SWAP_MOUNT) ? 1 : 0;
1675 }
1676
1677 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1678
1679
1680 /*
1681 * Convert between vnode types and inode formats (since POSIX.1
1682 * defines mode word of stat structure in terms of inode formats).
1683 */
1684 enum vtype
vnode_iftovt(int mode)1685 vnode_iftovt(int mode)
1686 {
1687 return iftovt_tab[((mode) & S_IFMT) >> 12];
1688 }
1689
1690 int
vnode_vttoif(enum vtype indx)1691 vnode_vttoif(enum vtype indx)
1692 {
1693 return vttoif_tab[(int)(indx)];
1694 }
1695
1696 int
vnode_makeimode(int indx,int mode)1697 vnode_makeimode(int indx, int mode)
1698 {
1699 return (int)(VTTOIF(indx) | (mode));
1700 }
1701
1702
1703 /*
1704 * vnode manipulation functions.
1705 */
1706
1707 /* returns system root vnode iocount; It should be released using vnode_put() */
1708 vnode_t
vfs_rootvnode(void)1709 vfs_rootvnode(void)
1710 {
1711 vnode_t vp = NULLVP;
1712
1713 if (rootvnode) {
1714 lck_rw_lock_shared(&rootvnode_rw_lock);
1715 vp = rootvnode;
1716 if (vp && (vnode_get(vp) != 0)) {
1717 vp = NULLVP;
1718 }
1719 lck_rw_unlock_shared(&rootvnode_rw_lock);
1720 }
1721
1722 return vp;
1723 }
1724
1725 uint32_t
vnode_vid(vnode_t vp)1726 vnode_vid(vnode_t vp)
1727 {
1728 return (uint32_t)(vp->v_id);
1729 }
1730
1731 mount_t
vnode_mount(vnode_t vp)1732 vnode_mount(vnode_t vp)
1733 {
1734 return vp->v_mount;
1735 }
1736
1737 #if CONFIG_IOSCHED
1738 vnode_t
vnode_mountdevvp(vnode_t vp)1739 vnode_mountdevvp(vnode_t vp)
1740 {
1741 if (vp->v_mount) {
1742 return vp->v_mount->mnt_devvp;
1743 } else {
1744 return (vnode_t)0;
1745 }
1746 }
1747 #endif
1748
1749 boolean_t
vnode_isonexternalstorage(vnode_t vp)1750 vnode_isonexternalstorage(vnode_t vp)
1751 {
1752 if (vp) {
1753 if (vp->v_mount) {
1754 if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_PERIPHERAL_DRIVE) {
1755 return TRUE;
1756 }
1757 }
1758 }
1759 return FALSE;
1760 }
1761
1762 boolean_t
vnode_isonssd(vnode_t vp)1763 vnode_isonssd(vnode_t vp)
1764 {
1765 if (vp) {
1766 if (vp->v_mount) {
1767 if (vp->v_mount->mnt_kern_flag & MNTK_SSD) {
1768 return TRUE;
1769 }
1770 }
1771 }
1772 return FALSE;
1773 }
1774
1775 mount_t
vnode_mountedhere(vnode_t vp)1776 vnode_mountedhere(vnode_t vp)
1777 {
1778 mount_t mp;
1779
1780 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1781 (mp->mnt_vnodecovered == vp)) {
1782 return mp;
1783 } else {
1784 return (mount_t)NULL;
1785 }
1786 }
1787
1788 /* returns vnode type of vnode_t */
1789 enum vtype
vnode_vtype(vnode_t vp)1790 vnode_vtype(vnode_t vp)
1791 {
1792 return vp->v_type;
1793 }
1794
1795 /* returns FS specific node saved in vnode */
1796 void *
vnode_fsnode(vnode_t vp)1797 vnode_fsnode(vnode_t vp)
1798 {
1799 return vp->v_data;
1800 }
1801
1802 void
vnode_clearfsnode(vnode_t vp)1803 vnode_clearfsnode(vnode_t vp)
1804 {
1805 vp->v_data = NULL;
1806 }
1807
1808 dev_t
vnode_specrdev(vnode_t vp)1809 vnode_specrdev(vnode_t vp)
1810 {
1811 return vp->v_rdev;
1812 }
1813
1814
1815 /* Accessor functions */
1816 /* is vnode_t a root vnode */
1817 int
vnode_isvroot(vnode_t vp)1818 vnode_isvroot(vnode_t vp)
1819 {
1820 return (vp->v_flag & VROOT)? 1 : 0;
1821 }
1822
1823 /* is vnode_t a system vnode */
1824 int
vnode_issystem(vnode_t vp)1825 vnode_issystem(vnode_t vp)
1826 {
1827 return (vp->v_flag & VSYSTEM)? 1 : 0;
1828 }
1829
1830 /* is vnode_t a swap file vnode */
1831 int
vnode_isswap(vnode_t vp)1832 vnode_isswap(vnode_t vp)
1833 {
1834 return (vp->v_flag & VSWAP)? 1 : 0;
1835 }
1836
1837 /* is vnode_t a tty */
1838 int
vnode_istty(vnode_t vp)1839 vnode_istty(vnode_t vp)
1840 {
1841 return (vp->v_flag & VISTTY) ? 1 : 0;
1842 }
1843
1844 /* if vnode_t mount operation in progress */
1845 int
vnode_ismount(vnode_t vp)1846 vnode_ismount(vnode_t vp)
1847 {
1848 return (vp->v_flag & VMOUNT)? 1 : 0;
1849 }
1850
1851 /* is this vnode under recyle now */
1852 int
vnode_isrecycled(vnode_t vp)1853 vnode_isrecycled(vnode_t vp)
1854 {
1855 int ret;
1856
1857 vnode_lock_spin(vp);
1858 ret = (vp->v_lflag & (VL_TERMINATE | VL_DEAD))? 1 : 0;
1859 vnode_unlock(vp);
1860 return ret;
1861 }
1862
1863 /* is this vnode marked for termination */
1864 int
vnode_willberecycled(vnode_t vp)1865 vnode_willberecycled(vnode_t vp)
1866 {
1867 return (vp->v_lflag & VL_MARKTERM) ? 1 : 0;
1868 }
1869
1870
1871 /* vnode was created by background task requesting rapid aging
1872 * and has not since been referenced by a normal task */
1873 int
vnode_israge(vnode_t vp)1874 vnode_israge(vnode_t vp)
1875 {
1876 return (vp->v_flag & VRAGE)? 1 : 0;
1877 }
1878
1879 int
vnode_needssnapshots(__unused vnode_t vp)1880 vnode_needssnapshots(__unused vnode_t vp)
1881 {
1882 return 0;
1883 }
1884
1885
1886 /* Check the process/thread to see if we should skip atime updates */
1887 int
vfs_ctx_skipatime(vfs_context_t ctx)1888 vfs_ctx_skipatime(vfs_context_t ctx)
1889 {
1890 struct uthread *ut;
1891 proc_t proc;
1892 thread_t thr;
1893
1894 proc = vfs_context_proc(ctx);
1895 thr = vfs_context_thread(ctx);
1896
1897 /* Validate pointers in case we were invoked via a kernel context */
1898 if (thr && proc) {
1899 ut = get_bsdthread_info(thr);
1900
1901 if (proc->p_lflag & P_LRAGE_VNODES) {
1902 return 1;
1903 }
1904
1905 if (ut) {
1906 if (ut->uu_flag & (UT_RAGE_VNODES | UT_ATIME_UPDATE)) {
1907 return 1;
1908 }
1909 }
1910
1911 if (proc->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) {
1912 return 1;
1913 }
1914 }
1915 return 0;
1916 }
1917
1918 /* is vnode_t marked to not keep data cached once it's been consumed */
1919 int
vnode_isnocache(vnode_t vp)1920 vnode_isnocache(vnode_t vp)
1921 {
1922 return (vp->v_flag & VNOCACHE_DATA)? 1 : 0;
1923 }
1924
1925 /*
1926 * has sequential readahead been disabled on this vnode
1927 */
1928 int
vnode_isnoreadahead(vnode_t vp)1929 vnode_isnoreadahead(vnode_t vp)
1930 {
1931 return (vp->v_flag & VRAOFF)? 1 : 0;
1932 }
1933
1934 int
vnode_is_openevt(vnode_t vp)1935 vnode_is_openevt(vnode_t vp)
1936 {
1937 return (vp->v_flag & VOPENEVT)? 1 : 0;
1938 }
1939
1940 /* is vnode_t a standard one? */
1941 int
vnode_isstandard(vnode_t vp)1942 vnode_isstandard(vnode_t vp)
1943 {
1944 return (vp->v_flag & VSTANDARD)? 1 : 0;
1945 }
1946
1947 /* don't vflush() if SKIPSYSTEM */
1948 int
vnode_isnoflush(vnode_t vp)1949 vnode_isnoflush(vnode_t vp)
1950 {
1951 return (vp->v_flag & VNOFLUSH)? 1 : 0;
1952 }
1953
1954 /* is vnode_t a regular file */
1955 int
vnode_isreg(vnode_t vp)1956 vnode_isreg(vnode_t vp)
1957 {
1958 return (vp->v_type == VREG)? 1 : 0;
1959 }
1960
1961 /* is vnode_t a directory? */
1962 int
vnode_isdir(vnode_t vp)1963 vnode_isdir(vnode_t vp)
1964 {
1965 return (vp->v_type == VDIR)? 1 : 0;
1966 }
1967
1968 /* is vnode_t a symbolic link ? */
1969 int
vnode_islnk(vnode_t vp)1970 vnode_islnk(vnode_t vp)
1971 {
1972 return (vp->v_type == VLNK)? 1 : 0;
1973 }
1974
1975 int
vnode_lookup_continue_needed(vnode_t vp,struct componentname * cnp)1976 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
1977 {
1978 struct nameidata *ndp = cnp->cn_ndp;
1979
1980 if (ndp == NULL) {
1981 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL");
1982 }
1983
1984 if (vnode_isdir(vp)) {
1985 if (vp->v_mountedhere != NULL) {
1986 goto yes;
1987 }
1988
1989 #if CONFIG_TRIGGERS
1990 if (vp->v_resolve) {
1991 goto yes;
1992 }
1993 #endif /* CONFIG_TRIGGERS */
1994 }
1995
1996
1997 if (vnode_islnk(vp)) {
1998 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
1999 if (cnp->cn_flags & FOLLOW) {
2000 goto yes;
2001 }
2002 if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
2003 goto yes;
2004 }
2005 }
2006
2007 return 0;
2008
2009 yes:
2010 ndp->ni_flag |= NAMEI_CONTLOOKUP;
2011 return EKEEPLOOKING;
2012 }
2013
2014 /* is vnode_t a fifo ? */
2015 int
vnode_isfifo(vnode_t vp)2016 vnode_isfifo(vnode_t vp)
2017 {
2018 return (vp->v_type == VFIFO)? 1 : 0;
2019 }
2020
2021 /* is vnode_t a block device? */
2022 int
vnode_isblk(vnode_t vp)2023 vnode_isblk(vnode_t vp)
2024 {
2025 return (vp->v_type == VBLK)? 1 : 0;
2026 }
2027
2028 int
vnode_isspec(vnode_t vp)2029 vnode_isspec(vnode_t vp)
2030 {
2031 return ((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0;
2032 }
2033
2034 /* is vnode_t a char device? */
2035 int
vnode_ischr(vnode_t vp)2036 vnode_ischr(vnode_t vp)
2037 {
2038 return (vp->v_type == VCHR)? 1 : 0;
2039 }
2040
2041 /* is vnode_t a socket? */
2042 int
vnode_issock(vnode_t vp)2043 vnode_issock(vnode_t vp)
2044 {
2045 return (vp->v_type == VSOCK)? 1 : 0;
2046 }
2047
2048 /* is vnode_t a device with multiple active vnodes referring to it? */
2049 int
vnode_isaliased(vnode_t vp)2050 vnode_isaliased(vnode_t vp)
2051 {
2052 enum vtype vt = vp->v_type;
2053 if (!((vt == VCHR) || (vt == VBLK))) {
2054 return 0;
2055 } else {
2056 return vp->v_specflags & SI_ALIASED;
2057 }
2058 }
2059
2060 /* is vnode_t a named stream? */
2061 int
vnode_isnamedstream(vnode_t vp)2062 vnode_isnamedstream(
2063 #if NAMEDSTREAMS
2064 vnode_t vp
2065 #else
2066 __unused vnode_t vp
2067 #endif
2068 )
2069 {
2070 #if NAMEDSTREAMS
2071 return (vp->v_flag & VISNAMEDSTREAM) ? 1 : 0;
2072 #else
2073 return 0;
2074 #endif
2075 }
2076
2077 int
vnode_isshadow(vnode_t vp)2078 vnode_isshadow(
2079 #if NAMEDSTREAMS
2080 vnode_t vp
2081 #else
2082 __unused vnode_t vp
2083 #endif
2084 )
2085 {
2086 #if NAMEDSTREAMS
2087 return (vp->v_flag & VISSHADOW) ? 1 : 0;
2088 #else
2089 return 0;
2090 #endif
2091 }
2092
2093 /* does vnode have associated named stream vnodes ? */
2094 int
vnode_hasnamedstreams(vnode_t vp)2095 vnode_hasnamedstreams(
2096 #if NAMEDSTREAMS
2097 vnode_t vp
2098 #else
2099 __unused vnode_t vp
2100 #endif
2101 )
2102 {
2103 #if NAMEDSTREAMS
2104 return (vp->v_lflag & VL_HASSTREAMS) ? 1 : 0;
2105 #else
2106 return 0;
2107 #endif
2108 }
2109 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
2110 void
vnode_setnocache(vnode_t vp)2111 vnode_setnocache(vnode_t vp)
2112 {
2113 vnode_lock_spin(vp);
2114 vp->v_flag |= VNOCACHE_DATA;
2115 vnode_unlock(vp);
2116 }
2117
2118 void
vnode_clearnocache(vnode_t vp)2119 vnode_clearnocache(vnode_t vp)
2120 {
2121 vnode_lock_spin(vp);
2122 vp->v_flag &= ~VNOCACHE_DATA;
2123 vnode_unlock(vp);
2124 }
2125
2126 void
vnode_set_openevt(vnode_t vp)2127 vnode_set_openevt(vnode_t vp)
2128 {
2129 vnode_lock_spin(vp);
2130 vp->v_flag |= VOPENEVT;
2131 vnode_unlock(vp);
2132 }
2133
2134 void
vnode_clear_openevt(vnode_t vp)2135 vnode_clear_openevt(vnode_t vp)
2136 {
2137 vnode_lock_spin(vp);
2138 vp->v_flag &= ~VOPENEVT;
2139 vnode_unlock(vp);
2140 }
2141
2142
2143 void
vnode_setnoreadahead(vnode_t vp)2144 vnode_setnoreadahead(vnode_t vp)
2145 {
2146 vnode_lock_spin(vp);
2147 vp->v_flag |= VRAOFF;
2148 vnode_unlock(vp);
2149 }
2150
2151 void
vnode_clearnoreadahead(vnode_t vp)2152 vnode_clearnoreadahead(vnode_t vp)
2153 {
2154 vnode_lock_spin(vp);
2155 vp->v_flag &= ~VRAOFF;
2156 vnode_unlock(vp);
2157 }
2158
2159 int
vnode_isfastdevicecandidate(vnode_t vp)2160 vnode_isfastdevicecandidate(vnode_t vp)
2161 {
2162 return (vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0;
2163 }
2164
2165 void
vnode_setfastdevicecandidate(vnode_t vp)2166 vnode_setfastdevicecandidate(vnode_t vp)
2167 {
2168 vnode_lock_spin(vp);
2169 vp->v_flag |= VFASTDEVCANDIDATE;
2170 vnode_unlock(vp);
2171 }
2172
2173 void
vnode_clearfastdevicecandidate(vnode_t vp)2174 vnode_clearfastdevicecandidate(vnode_t vp)
2175 {
2176 vnode_lock_spin(vp);
2177 vp->v_flag &= ~VFASTDEVCANDIDATE;
2178 vnode_unlock(vp);
2179 }
2180
2181 int
vnode_isautocandidate(vnode_t vp)2182 vnode_isautocandidate(vnode_t vp)
2183 {
2184 return (vp->v_flag & VAUTOCANDIDATE)? 1 : 0;
2185 }
2186
2187 void
vnode_setautocandidate(vnode_t vp)2188 vnode_setautocandidate(vnode_t vp)
2189 {
2190 vnode_lock_spin(vp);
2191 vp->v_flag |= VAUTOCANDIDATE;
2192 vnode_unlock(vp);
2193 }
2194
2195 void
vnode_clearautocandidate(vnode_t vp)2196 vnode_clearautocandidate(vnode_t vp)
2197 {
2198 vnode_lock_spin(vp);
2199 vp->v_flag &= ~VAUTOCANDIDATE;
2200 vnode_unlock(vp);
2201 }
2202
2203
2204
2205
2206 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
2207 void
vnode_setnoflush(vnode_t vp)2208 vnode_setnoflush(vnode_t vp)
2209 {
2210 vnode_lock_spin(vp);
2211 vp->v_flag |= VNOFLUSH;
2212 vnode_unlock(vp);
2213 }
2214
2215 void
vnode_clearnoflush(vnode_t vp)2216 vnode_clearnoflush(vnode_t vp)
2217 {
2218 vnode_lock_spin(vp);
2219 vp->v_flag &= ~VNOFLUSH;
2220 vnode_unlock(vp);
2221 }
2222
2223
2224 /* is vnode_t a blkdevice and has a FS mounted on it */
2225 int
vnode_ismountedon(vnode_t vp)2226 vnode_ismountedon(vnode_t vp)
2227 {
2228 return (vp->v_specflags & SI_MOUNTEDON)? 1 : 0;
2229 }
2230
2231 void
vnode_setmountedon(vnode_t vp)2232 vnode_setmountedon(vnode_t vp)
2233 {
2234 vnode_lock_spin(vp);
2235 vp->v_specflags |= SI_MOUNTEDON;
2236 vnode_unlock(vp);
2237 }
2238
2239 void
vnode_clearmountedon(vnode_t vp)2240 vnode_clearmountedon(vnode_t vp)
2241 {
2242 vnode_lock_spin(vp);
2243 vp->v_specflags &= ~SI_MOUNTEDON;
2244 vnode_unlock(vp);
2245 }
2246
2247
2248 void
vnode_settag(vnode_t vp,int tag)2249 vnode_settag(vnode_t vp, int tag)
2250 {
2251 /*
2252 * We only assign enum values to v_tag, but add an assert to make sure we
2253 * catch it in dev/debug builds if this ever change.
2254 */
2255 assert(tag >= SHRT_MIN && tag <= SHRT_MAX);
2256 vp->v_tag = (uint16_t)tag;
2257 }
2258
2259 int
vnode_tag(vnode_t vp)2260 vnode_tag(vnode_t vp)
2261 {
2262 return vp->v_tag;
2263 }
2264
2265 vnode_t
vnode_parent(vnode_t vp)2266 vnode_parent(vnode_t vp)
2267 {
2268 return vp->v_parent;
2269 }
2270
2271 void
vnode_setparent(vnode_t vp,vnode_t dvp)2272 vnode_setparent(vnode_t vp, vnode_t dvp)
2273 {
2274 vp->v_parent = dvp;
2275 }
2276
2277 void
vnode_setname(vnode_t vp,char * name)2278 vnode_setname(vnode_t vp, char * name)
2279 {
2280 vp->v_name = name;
2281 }
2282
2283 /* return the registered FS name when adding the FS to kernel */
2284 void
vnode_vfsname(vnode_t vp,char * buf)2285 vnode_vfsname(vnode_t vp, char * buf)
2286 {
2287 strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
2288 }
2289
2290 /* return the FS type number */
2291 int
vnode_vfstypenum(vnode_t vp)2292 vnode_vfstypenum(vnode_t vp)
2293 {
2294 return vp->v_mount->mnt_vtable->vfc_typenum;
2295 }
2296
2297 int
vnode_vfs64bitready(vnode_t vp)2298 vnode_vfs64bitready(vnode_t vp)
2299 {
2300 /*
2301 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
2302 */
2303 if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
2304 return 1;
2305 } else {
2306 return 0;
2307 }
2308 }
2309
2310
2311
2312 /* return the visible flags on associated mount point of vnode_t */
2313 uint32_t
vnode_vfsvisflags(vnode_t vp)2314 vnode_vfsvisflags(vnode_t vp)
2315 {
2316 return vp->v_mount->mnt_flag & MNT_VISFLAGMASK;
2317 }
2318
2319 /* return the command modifier flags on associated mount point of vnode_t */
2320 uint32_t
vnode_vfscmdflags(vnode_t vp)2321 vnode_vfscmdflags(vnode_t vp)
2322 {
2323 return vp->v_mount->mnt_flag & MNT_CMDFLAGS;
2324 }
2325
2326 /* return the max symlink of short links of vnode_t */
2327 uint32_t
vnode_vfsmaxsymlen(vnode_t vp)2328 vnode_vfsmaxsymlen(vnode_t vp)
2329 {
2330 return vp->v_mount->mnt_maxsymlinklen;
2331 }
2332
2333 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2334 struct vfsstatfs *
vnode_vfsstatfs(vnode_t vp)2335 vnode_vfsstatfs(vnode_t vp)
2336 {
2337 return &vp->v_mount->mnt_vfsstat;
2338 }
2339
2340 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2341 void *
vnode_vfsfsprivate(vnode_t vp)2342 vnode_vfsfsprivate(vnode_t vp)
2343 {
2344 return vp->v_mount->mnt_data;
2345 }
2346
2347 /* is vnode_t in a rdonly mounted FS */
2348 int
vnode_vfsisrdonly(vnode_t vp)2349 vnode_vfsisrdonly(vnode_t vp)
2350 {
2351 return (vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0;
2352 }
2353
2354 int
vnode_compound_rename_available(vnode_t vp)2355 vnode_compound_rename_available(vnode_t vp)
2356 {
2357 return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
2358 }
2359 int
vnode_compound_rmdir_available(vnode_t vp)2360 vnode_compound_rmdir_available(vnode_t vp)
2361 {
2362 return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
2363 }
2364 int
vnode_compound_mkdir_available(vnode_t vp)2365 vnode_compound_mkdir_available(vnode_t vp)
2366 {
2367 return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
2368 }
2369 int
vnode_compound_remove_available(vnode_t vp)2370 vnode_compound_remove_available(vnode_t vp)
2371 {
2372 return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
2373 }
2374 int
vnode_compound_open_available(vnode_t vp)2375 vnode_compound_open_available(vnode_t vp)
2376 {
2377 return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
2378 }
2379
2380 int
vnode_compound_op_available(vnode_t vp,compound_vnop_id_t opid)2381 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
2382 {
2383 return (vp->v_mount->mnt_compound_ops & opid) != 0;
2384 }
2385
2386 /*
2387 * Returns vnode ref to current working directory; if a per-thread current
2388 * working directory is in effect, return that instead of the per process one.
2389 *
2390 * XXX Published, but not used.
2391 */
2392 vnode_t
current_workingdir(void)2393 current_workingdir(void)
2394 {
2395 return vfs_context_cwd(vfs_context_current());
2396 }
2397
2398 /*
2399 * Get a filesec and optional acl contents from an extended attribute.
2400 * Function will attempt to retrive ACL, UUID, and GUID information using a
2401 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2402 *
2403 * Parameters: vp The vnode on which to operate.
2404 * fsecp The filesec (and ACL, if any) being
2405 * retrieved.
2406 * ctx The vnode context in which the
2407 * operation is to be attempted.
2408 *
2409 * Returns: 0 Success
2410 * !0 errno value
2411 *
2412 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2413 * host byte order, as will be the ACL contents, if any.
2414 * Internally, we will cannonize these values from network (PPC)
2415 * byte order after we retrieve them so that the on-disk contents
2416 * of the extended attribute are identical for both PPC and Intel
2417 * (if we were not being required to provide this service via
2418 * fallback, this would be the job of the filesystem
2419 * 'VNOP_GETATTR' call).
2420 *
2421 * We use ntohl() because it has a transitive property on Intel
2422 * machines and no effect on PPC mancines. This guarantees us
2423 *
2424 * XXX: Deleting rather than ignoreing a corrupt security structure is
2425 * probably the only way to reset it without assistance from an
2426 * file system integrity checking tool. Right now we ignore it.
2427 *
2428 * XXX: We should enummerate the possible errno values here, and where
2429 * in the code they originated.
2430 */
2431 static int
vnode_get_filesec(vnode_t vp,kauth_filesec_t * fsecp,vfs_context_t ctx)2432 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2433 {
2434 kauth_filesec_t fsec;
2435 uio_t fsec_uio;
2436 size_t fsec_size;
2437 size_t xsize, rsize;
2438 int error;
2439 uint32_t host_fsec_magic;
2440 uint32_t host_acl_entrycount;
2441
2442 fsec = NULL;
2443 fsec_uio = NULL;
2444
2445 /* find out how big the EA is */
2446 error = vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx);
2447 if (error != 0) {
2448 /* no EA, no filesec */
2449 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2450 error = 0;
2451 }
2452 /* either way, we are done */
2453 goto out;
2454 }
2455
2456 /*
2457 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2458 * ACE entrly ACL, and if it's larger than that, it must have the right
2459 * number of bytes such that it contains an atomic number of ACEs,
2460 * rather than partial entries. Otherwise, we ignore it.
2461 */
2462 if (!KAUTH_FILESEC_VALID(xsize)) {
2463 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2464 error = 0;
2465 goto out;
2466 }
2467
2468 /* how many entries would fit? */
2469 fsec_size = KAUTH_FILESEC_COUNT(xsize);
2470 if (fsec_size > KAUTH_ACL_MAX_ENTRIES) {
2471 KAUTH_DEBUG(" ERROR - Bogus (too large) kauth_fiilesec_t: %ld bytes", xsize);
2472 error = 0;
2473 goto out;
2474 }
2475
2476 /* get buffer and uio */
2477 if (((fsec = kauth_filesec_alloc((int)fsec_size)) == NULL) ||
2478 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2479 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2480 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2481 error = ENOMEM;
2482 goto out;
2483 }
2484
2485 /* read security attribute */
2486 rsize = xsize;
2487 if ((error = vn_getxattr(vp,
2488 KAUTH_FILESEC_XATTR,
2489 fsec_uio,
2490 &rsize,
2491 XATTR_NOSECURITY,
2492 ctx)) != 0) {
2493 /* no attribute - no security data */
2494 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2495 error = 0;
2496 }
2497 /* either way, we are done */
2498 goto out;
2499 }
2500
2501 /*
2502 * Validate security structure; the validation must take place in host
2503 * byte order. If it's corrupt, we will just ignore it.
2504 */
2505
2506 /* Validate the size before trying to convert it */
2507 if (rsize < KAUTH_FILESEC_SIZE(0)) {
2508 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2509 goto out;
2510 }
2511
2512 /* Validate the magic number before trying to convert it */
2513 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2514 if (fsec->fsec_magic != host_fsec_magic) {
2515 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2516 goto out;
2517 }
2518
2519 /* Validate the entry count before trying to convert it. */
2520 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2521 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2522 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2523 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2524 goto out;
2525 }
2526 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2527 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2528 goto out;
2529 }
2530 }
2531
2532 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2533
2534 *fsecp = fsec;
2535 fsec = NULL;
2536 error = 0;
2537 out:
2538 if (fsec != NULL) {
2539 kauth_filesec_free(fsec);
2540 }
2541 if (fsec_uio != NULL) {
2542 uio_free(fsec_uio);
2543 }
2544 if (error) {
2545 *fsecp = NULL;
2546 }
2547 return error;
2548 }
2549
2550 /*
2551 * Set a filesec and optional acl contents into an extended attribute.
2552 * function will attempt to store ACL, UUID, and GUID information using a
2553 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2554 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2555 * original caller supplied an acl.
2556 *
2557 * Parameters: vp The vnode on which to operate.
2558 * fsec The filesec being set.
2559 * acl The acl to be associated with 'fsec'.
2560 * ctx The vnode context in which the
2561 * operation is to be attempted.
2562 *
2563 * Returns: 0 Success
2564 * !0 errno value
2565 *
2566 * Notes: Both the fsec and the acl are always valid.
2567 *
2568 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2569 * as are the acl contents, if they are used. Internally, we will
2570 * cannonize these values into network (PPC) byte order before we
2571 * attempt to write them so that the on-disk contents of the
2572 * extended attribute are identical for both PPC and Intel (if we
2573 * were not being required to provide this service via fallback,
2574 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2575 * We reverse this process on the way out, so we leave with the
2576 * same byte order we started with.
2577 *
2578 * XXX: We should enummerate the possible errno values here, and where
2579 * in the code they originated.
2580 */
2581 static int
vnode_set_filesec(vnode_t vp,kauth_filesec_t fsec,kauth_acl_t acl,vfs_context_t ctx)2582 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2583 {
2584 uio_t fsec_uio;
2585 int error;
2586 uint32_t saved_acl_copysize;
2587
2588 fsec_uio = NULL;
2589
2590 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2591 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2592 error = ENOMEM;
2593 goto out;
2594 }
2595 /*
2596 * Save the pre-converted ACL copysize, because it gets swapped too
2597 * if we are running with the wrong endianness.
2598 */
2599 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2600
2601 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2602
2603 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2604 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2605 error = vn_setxattr(vp,
2606 KAUTH_FILESEC_XATTR,
2607 fsec_uio,
2608 XATTR_NOSECURITY, /* we have auth'ed already */
2609 ctx);
2610 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2611
2612 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2613
2614 out:
2615 if (fsec_uio != NULL) {
2616 uio_free(fsec_uio);
2617 }
2618 return error;
2619 }
2620
2621 /*
2622 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2623 */
2624 void
vnode_attr_handle_uid_and_gid(struct vnode_attr * vap,mount_t mp,vfs_context_t ctx)2625 vnode_attr_handle_uid_and_gid(struct vnode_attr *vap, mount_t mp, vfs_context_t ctx)
2626 {
2627 uid_t nuid;
2628 gid_t ngid;
2629 bool is_suser = vfs_context_issuser(ctx) ? true : false;
2630
2631 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2632 if (is_suser && VATTR_IS_SUPPORTED(vap, va_uid)) {
2633 nuid = vap->va_uid;
2634 } else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2635 nuid = mp->mnt_fsowner;
2636 if (nuid == KAUTH_UID_NONE) {
2637 nuid = 99;
2638 }
2639 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2640 nuid = vap->va_uid;
2641 } else {
2642 /* this will always be something sensible */
2643 nuid = mp->mnt_fsowner;
2644 }
2645 if ((nuid == 99) && !is_suser) {
2646 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2647 }
2648 VATTR_RETURN(vap, va_uid, nuid);
2649 }
2650 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2651 if (is_suser && VATTR_IS_SUPPORTED(vap, va_gid)) {
2652 ngid = vap->va_gid;
2653 } else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2654 ngid = mp->mnt_fsgroup;
2655 if (ngid == KAUTH_GID_NONE) {
2656 ngid = 99;
2657 }
2658 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2659 ngid = vap->va_gid;
2660 } else {
2661 /* this will always be something sensible */
2662 ngid = mp->mnt_fsgroup;
2663 }
2664 if ((ngid == 99) && !is_suser) {
2665 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2666 }
2667 VATTR_RETURN(vap, va_gid, ngid);
2668 }
2669 }
2670
2671 /*
2672 * Returns: 0 Success
2673 * ENOMEM Not enough space [only if has filesec]
2674 * EINVAL Requested unknown attributes
2675 * VNOP_GETATTR: ???
2676 * vnode_get_filesec: ???
2677 * kauth_cred_guid2uid: ???
2678 * kauth_cred_guid2gid: ???
2679 * vfs_update_vfsstat: ???
2680 */
2681 int
vnode_getattr(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)2682 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2683 {
2684 kauth_filesec_t fsec;
2685 kauth_acl_t facl;
2686 int error;
2687
2688 /*
2689 * Reject attempts to fetch unknown attributes.
2690 */
2691 if (vap->va_active & ~VNODE_ATTR_ALL) {
2692 return EINVAL;
2693 }
2694
2695 /* don't ask for extended security data if the filesystem doesn't support it */
2696 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2697 VATTR_CLEAR_ACTIVE(vap, va_acl);
2698 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2699 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2700 }
2701
2702 /*
2703 * If the caller wants size values we might have to synthesise, give the
2704 * filesystem the opportunity to supply better intermediate results.
2705 */
2706 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2707 VATTR_IS_ACTIVE(vap, va_total_size) ||
2708 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2709 VATTR_SET_ACTIVE(vap, va_data_size);
2710 VATTR_SET_ACTIVE(vap, va_data_alloc);
2711 VATTR_SET_ACTIVE(vap, va_total_size);
2712 VATTR_SET_ACTIVE(vap, va_total_alloc);
2713 }
2714
2715 vap->va_vaflags &= ~VA_USEFSID;
2716
2717 error = VNOP_GETATTR(vp, vap, ctx);
2718 if (error) {
2719 KAUTH_DEBUG("ERROR - returning %d", error);
2720 goto out;
2721 }
2722
2723 /*
2724 * If extended security data was requested but not returned, try the fallback
2725 * path.
2726 */
2727 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2728 fsec = NULL;
2729
2730 if (XATTR_VNODE_SUPPORTED(vp)) {
2731 /* try to get the filesec */
2732 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2733 goto out;
2734 }
2735 }
2736 /* if no filesec, no attributes */
2737 if (fsec == NULL) {
2738 VATTR_RETURN(vap, va_acl, NULL);
2739 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2740 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2741 } else {
2742 /* looks good, try to return what we were asked for */
2743 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2744 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2745
2746 /* only return the ACL if we were actually asked for it */
2747 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2748 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2749 VATTR_RETURN(vap, va_acl, NULL);
2750 } else {
2751 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2752 if (facl == NULL) {
2753 kauth_filesec_free(fsec);
2754 error = ENOMEM;
2755 goto out;
2756 }
2757 __nochk_bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2758 VATTR_RETURN(vap, va_acl, facl);
2759 }
2760 }
2761 kauth_filesec_free(fsec);
2762 }
2763 }
2764 /*
2765 * If someone gave us an unsolicited filesec, toss it. We promise that
2766 * we're OK with a filesystem giving us anything back, but our callers
2767 * only expect what they asked for.
2768 */
2769 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2770 if (vap->va_acl != NULL) {
2771 kauth_acl_free(vap->va_acl);
2772 }
2773 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2774 }
2775
2776 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2777 /*
2778 * Handle the case where we need a UID/GID, but only have extended
2779 * security information.
2780 */
2781 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2782 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2783 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2784 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0) {
2785 VATTR_RETURN(vap, va_uid, nuid);
2786 }
2787 }
2788 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2789 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2790 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2791 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0) {
2792 VATTR_RETURN(vap, va_gid, ngid);
2793 }
2794 }
2795 #endif
2796
2797 vnode_attr_handle_uid_and_gid(vap, vp->v_mount, ctx);
2798
2799 /*
2800 * Synthesise some values that can be reasonably guessed.
2801 */
2802 if (!VATTR_IS_SUPPORTED(vap, va_iosize)) {
2803 assert(vp->v_mount->mnt_vfsstat.f_iosize <= UINT32_MAX);
2804 VATTR_RETURN(vap, va_iosize, (uint32_t)vp->v_mount->mnt_vfsstat.f_iosize);
2805 }
2806
2807 if (!VATTR_IS_SUPPORTED(vap, va_flags)) {
2808 VATTR_RETURN(vap, va_flags, 0);
2809 }
2810
2811 if (!VATTR_IS_SUPPORTED(vap, va_filerev)) {
2812 VATTR_RETURN(vap, va_filerev, 0);
2813 }
2814
2815 if (!VATTR_IS_SUPPORTED(vap, va_gen)) {
2816 VATTR_RETURN(vap, va_gen, 0);
2817 }
2818
2819 /*
2820 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2821 */
2822 if (!VATTR_IS_SUPPORTED(vap, va_data_size)) {
2823 VATTR_RETURN(vap, va_data_size, 0);
2824 }
2825
2826 /* do we want any of the possibly-computed values? */
2827 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2828 VATTR_IS_ACTIVE(vap, va_total_size) ||
2829 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2830 /* make sure f_bsize is valid */
2831 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2832 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0) {
2833 goto out;
2834 }
2835 }
2836
2837 /* default va_data_alloc from va_data_size */
2838 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc)) {
2839 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2840 }
2841
2842 /* default va_total_size from va_data_size */
2843 if (!VATTR_IS_SUPPORTED(vap, va_total_size)) {
2844 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2845 }
2846
2847 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2848 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc)) {
2849 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2850 }
2851 }
2852
2853 /*
2854 * If we don't have a change time, pull it from the modtime.
2855 */
2856 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time)) {
2857 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2858 }
2859
2860 /*
2861 * This is really only supported for the creation VNOPs, but since the field is there
2862 * we should populate it correctly.
2863 */
2864 VATTR_RETURN(vap, va_type, vp->v_type);
2865
2866 /*
2867 * The fsid can be obtained from the mountpoint directly.
2868 */
2869 if (VATTR_IS_ACTIVE(vap, va_fsid) &&
2870 (!VATTR_IS_SUPPORTED(vap, va_fsid) ||
2871 vap->va_vaflags & VA_REALFSID || !(vap->va_vaflags & VA_USEFSID))) {
2872 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2873 }
2874
2875 out:
2876 vap->va_vaflags &= ~VA_USEFSID;
2877
2878 return error;
2879 }
2880
2881 /*
2882 * Choose 32 bit or 64 bit fsid
2883 */
2884 uint64_t
vnode_get_va_fsid(struct vnode_attr * vap)2885 vnode_get_va_fsid(struct vnode_attr *vap)
2886 {
2887 if (VATTR_IS_SUPPORTED(vap, va_fsid64)) {
2888 return (uint64_t)vap->va_fsid64.val[0] + ((uint64_t)vap->va_fsid64.val[1] << 32);
2889 }
2890 return vap->va_fsid;
2891 }
2892
2893 /*
2894 * Set the attributes on a vnode in a vnode context.
2895 *
2896 * Parameters: vp The vnode whose attributes to set.
2897 * vap A pointer to the attributes to set.
2898 * ctx The vnode context in which the
2899 * operation is to be attempted.
2900 *
2901 * Returns: 0 Success
2902 * !0 errno value
2903 *
2904 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
2905 *
2906 * The contents of the data area pointed to by 'vap' may be
2907 * modified if the vnode is on a filesystem which has been
2908 * mounted with ingore ownership flags, or by the underlyng
2909 * VFS itself, or by the fallback code, if the underlying VFS
2910 * does not support ACL, UUID, or GUUID attributes directly.
2911 *
2912 * XXX: We should enummerate the possible errno values here, and where
2913 * in the code they originated.
2914 */
2915 int
vnode_setattr(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)2916 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2917 {
2918 int error;
2919 #if CONFIG_FSE
2920 uint64_t active;
2921 int is_perm_change = 0;
2922 int is_stat_change = 0;
2923 #endif
2924
2925 /*
2926 * Reject attempts to set unknown attributes.
2927 */
2928 if (vap->va_active & ~VNODE_ATTR_ALL) {
2929 return EINVAL;
2930 }
2931
2932 /*
2933 * Make sure the filesystem is mounted R/W.
2934 * If not, return an error.
2935 */
2936 if (vfs_isrdonly(vp->v_mount)) {
2937 error = EROFS;
2938 goto out;
2939 }
2940
2941 #if DEVELOPMENT || DEBUG
2942 /*
2943 * XXX VSWAP: Check for entitlements or special flag here
2944 * so we can restrict access appropriately.
2945 */
2946 #else /* DEVELOPMENT || DEBUG */
2947
2948 if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
2949 error = EPERM;
2950 goto out;
2951 }
2952 #endif /* DEVELOPMENT || DEBUG */
2953
2954 #if NAMEDSTREAMS
2955 /* For streams, va_data_size is the only setable attribute. */
2956 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
2957 error = EPERM;
2958 goto out;
2959 }
2960 #endif
2961 /* Check for truncation */
2962 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
2963 switch (vp->v_type) {
2964 case VREG:
2965 /* For regular files it's ok */
2966 break;
2967 case VDIR:
2968 /* Not allowed to truncate directories */
2969 error = EISDIR;
2970 goto out;
2971 default:
2972 /* For everything else we will clear the bit and let underlying FS decide on the rest */
2973 VATTR_CLEAR_ACTIVE(vap, va_data_size);
2974 if (vap->va_active) {
2975 break;
2976 }
2977 /* If it was the only bit set, return success, to handle cases like redirect to /dev/null */
2978 return 0;
2979 }
2980 }
2981
2982 /*
2983 * If ownership is being ignored on this volume, we silently discard
2984 * ownership changes.
2985 */
2986 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2987 VATTR_CLEAR_ACTIVE(vap, va_uid);
2988 VATTR_CLEAR_ACTIVE(vap, va_gid);
2989 }
2990
2991 /*
2992 * Make sure that extended security is enabled if we're going to try
2993 * to set any.
2994 */
2995 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
2996 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
2997 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
2998 error = ENOTSUP;
2999 goto out;
3000 }
3001
3002 /* Never allow the setting of any unsupported superuser flags. */
3003 if (VATTR_IS_ACTIVE(vap, va_flags)) {
3004 vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE);
3005 }
3006
3007 #if CONFIG_FSE
3008 /*
3009 * Remember all of the active attributes that we're
3010 * attempting to modify.
3011 */
3012 active = vap->va_active & ~VNODE_ATTR_RDONLY;
3013 #endif
3014
3015 error = VNOP_SETATTR(vp, vap, ctx);
3016
3017 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap)) {
3018 error = vnode_setattr_fallback(vp, vap, ctx);
3019 }
3020
3021 #if CONFIG_FSE
3022 #define PERMISSION_BITS (VNODE_ATTR_BIT(va_uid) | VNODE_ATTR_BIT(va_uuuid) | \
3023 VNODE_ATTR_BIT(va_gid) | VNODE_ATTR_BIT(va_guuid) | \
3024 VNODE_ATTR_BIT(va_mode) | VNODE_ATTR_BIT(va_acl))
3025
3026 /*
3027 * Now that we've changed them, decide whether to send an
3028 * FSevent.
3029 */
3030 if ((active & PERMISSION_BITS) & vap->va_supported) {
3031 is_perm_change = 1;
3032 } else {
3033 /*
3034 * We've already checked the permission bits, and we
3035 * also want to filter out access time / backup time
3036 * changes.
3037 */
3038 active &= ~(PERMISSION_BITS |
3039 VNODE_ATTR_BIT(va_access_time) |
3040 VNODE_ATTR_BIT(va_backup_time));
3041
3042 /* Anything left to notify about? */
3043 if (active & vap->va_supported) {
3044 is_stat_change = 1;
3045 }
3046 }
3047
3048 if (error == 0) {
3049 if (is_perm_change) {
3050 if (need_fsevent(FSE_CHOWN, vp)) {
3051 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
3052 }
3053 } else if (is_stat_change && need_fsevent(FSE_STAT_CHANGED, vp)) {
3054 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
3055 }
3056 }
3057 #undef PERMISSION_BITS
3058 #endif
3059
3060 out:
3061 return error;
3062 }
3063
3064 /*
3065 * Fallback for setting the attributes on a vnode in a vnode context. This
3066 * Function will attempt to store ACL, UUID, and GUID information utilizing
3067 * a read/modify/write operation against an EA used as a backing store for
3068 * the object.
3069 *
3070 * Parameters: vp The vnode whose attributes to set.
3071 * vap A pointer to the attributes to set.
3072 * ctx The vnode context in which the
3073 * operation is to be attempted.
3074 *
3075 * Returns: 0 Success
3076 * !0 errno value
3077 *
3078 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
3079 * as are the fsec and lfsec, if they are used.
3080 *
3081 * The contents of the data area pointed to by 'vap' may be
3082 * modified to indicate that the attribute is supported for
3083 * any given requested attribute.
3084 *
3085 * XXX: We should enummerate the possible errno values here, and where
3086 * in the code they originated.
3087 */
3088 int
vnode_setattr_fallback(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3089 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
3090 {
3091 kauth_filesec_t fsec;
3092 kauth_acl_t facl;
3093 struct kauth_filesec lfsec;
3094 int error;
3095
3096 error = 0;
3097
3098 /*
3099 * Extended security fallback via extended attributes.
3100 *
3101 * Note that we do not free the filesec; the caller is expected to
3102 * do this.
3103 */
3104 if (VATTR_NOT_RETURNED(vap, va_acl) ||
3105 VATTR_NOT_RETURNED(vap, va_uuuid) ||
3106 VATTR_NOT_RETURNED(vap, va_guuid)) {
3107 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
3108
3109 /*
3110 * Fail for file types that we don't permit extended security
3111 * to be set on.
3112 */
3113 if (!XATTR_VNODE_SUPPORTED(vp)) {
3114 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
3115 error = EINVAL;
3116 goto out;
3117 }
3118
3119 /*
3120 * If we don't have all the extended security items, we need
3121 * to fetch the existing data to perform a read-modify-write
3122 * operation.
3123 */
3124 fsec = NULL;
3125 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
3126 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
3127 !VATTR_IS_ACTIVE(vap, va_guuid)) {
3128 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
3129 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
3130 goto out;
3131 }
3132 }
3133 /* if we didn't get a filesec, use our local one */
3134 if (fsec == NULL) {
3135 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
3136 fsec = &lfsec;
3137 } else {
3138 KAUTH_DEBUG("SETATTR - updating existing filesec");
3139 }
3140 /* find the ACL */
3141 facl = &fsec->fsec_acl;
3142
3143 /* if we're using the local filesec, we need to initialise it */
3144 if (fsec == &lfsec) {
3145 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
3146 fsec->fsec_owner = kauth_null_guid;
3147 fsec->fsec_group = kauth_null_guid;
3148 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
3149 facl->acl_flags = 0;
3150 }
3151
3152 /*
3153 * Update with the supplied attributes.
3154 */
3155 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
3156 KAUTH_DEBUG("SETATTR - updating owner UUID");
3157 fsec->fsec_owner = vap->va_uuuid;
3158 VATTR_SET_SUPPORTED(vap, va_uuuid);
3159 }
3160 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
3161 KAUTH_DEBUG("SETATTR - updating group UUID");
3162 fsec->fsec_group = vap->va_guuid;
3163 VATTR_SET_SUPPORTED(vap, va_guuid);
3164 }
3165 if (VATTR_IS_ACTIVE(vap, va_acl)) {
3166 if (vap->va_acl == NULL) {
3167 KAUTH_DEBUG("SETATTR - removing ACL");
3168 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
3169 } else {
3170 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
3171 facl = vap->va_acl;
3172 }
3173 VATTR_SET_SUPPORTED(vap, va_acl);
3174 }
3175
3176 /*
3177 * If the filesec data is all invalid, we can just remove
3178 * the EA completely.
3179 */
3180 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
3181 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
3182 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
3183 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
3184 /* no attribute is ok, nothing to delete */
3185 if (error == ENOATTR) {
3186 error = 0;
3187 }
3188 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
3189 } else {
3190 /* write the EA */
3191 error = vnode_set_filesec(vp, fsec, facl, ctx);
3192 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
3193 }
3194
3195 /* if we fetched a filesec, dispose of the buffer */
3196 if (fsec != &lfsec) {
3197 kauth_filesec_free(fsec);
3198 }
3199 }
3200 out:
3201
3202 return error;
3203 }
3204
3205 /*
3206 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
3207 * event on a vnode.
3208 */
3209 int
vnode_notify(vnode_t vp,uint32_t events,struct vnode_attr * vap)3210 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
3211 {
3212 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
3213 uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
3214 | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
3215 uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
3216 | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
3217 uint32_t knote_events = (events & knote_mask);
3218
3219 /* Permissions are not explicitly part of the kqueue model */
3220 if (events & VNODE_EVENT_PERMS) {
3221 knote_events |= NOTE_ATTRIB;
3222 }
3223
3224 /* Directory contents information just becomes NOTE_WRITE */
3225 if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
3226 knote_events |= NOTE_WRITE;
3227 }
3228
3229 if (knote_events) {
3230 lock_vnode_and_post(vp, knote_events);
3231 #if CONFIG_FSE
3232 if (vap != NULL) {
3233 create_fsevent_from_kevent(vp, events, vap);
3234 }
3235 #else
3236 (void)vap;
3237 #endif
3238 }
3239
3240 return 0;
3241 }
3242
3243
3244
3245 int
vnode_isdyldsharedcache(vnode_t vp)3246 vnode_isdyldsharedcache(vnode_t vp)
3247 {
3248 return (vp->v_flag & VSHARED_DYLD) ? 1 : 0;
3249 }
3250
3251
3252 /*
3253 * For a filesystem that isn't tracking its own vnode watchers:
3254 * check whether a vnode is being monitored.
3255 */
3256 int
vnode_ismonitored(vnode_t vp)3257 vnode_ismonitored(vnode_t vp)
3258 {
3259 return vp->v_knotes.slh_first != NULL;
3260 }
3261
3262 int
vnode_getbackingvnode(vnode_t in_vp,vnode_t * out_vpp)3263 vnode_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp)
3264 {
3265 if (out_vpp) {
3266 *out_vpp = NULLVP;
3267 }
3268 #if NULLFS
3269 return nullfs_getbackingvnode(in_vp, out_vpp);
3270 #else
3271 #pragma unused(in_vp)
3272 return ENOENT;
3273 #endif
3274 }
3275
3276 /*
3277 * Initialize a struct vnode_attr and activate the attributes required
3278 * by the vnode_notify() call.
3279 */
3280 int
vfs_get_notify_attributes(struct vnode_attr * vap)3281 vfs_get_notify_attributes(struct vnode_attr *vap)
3282 {
3283 VATTR_INIT(vap);
3284 vap->va_active = VNODE_NOTIFY_ATTRS;
3285 return 0;
3286 }
3287
3288 #if CONFIG_TRIGGERS
3289 int
vfs_settriggercallback(fsid_t * fsid,vfs_trigger_callback_t vtc,void * data,uint32_t flags __unused,vfs_context_t ctx)3290 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
3291 {
3292 int error;
3293 mount_t mp;
3294
3295 mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
3296 if (mp == NULL) {
3297 return ENOENT;
3298 }
3299
3300 error = vfs_busy(mp, LK_NOWAIT);
3301 mount_iterdrop(mp);
3302
3303 if (error != 0) {
3304 return ENOENT;
3305 }
3306
3307 mount_lock(mp);
3308 if (mp->mnt_triggercallback != NULL) {
3309 error = EBUSY;
3310 mount_unlock(mp);
3311 goto out;
3312 }
3313
3314 mp->mnt_triggercallback = vtc;
3315 mp->mnt_triggerdata = data;
3316 mount_unlock(mp);
3317
3318 mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
3319
3320 out:
3321 vfs_unbusy(mp);
3322 return 0;
3323 }
3324 #endif /* CONFIG_TRIGGERS */
3325
3326 /*
3327 * Definition of vnode operations.
3328 */
3329
3330 #if 0
3331 /*
3332 *#
3333 *#% lookup dvp L ? ?
3334 *#% lookup vpp - L -
3335 */
3336 struct vnop_lookup_args {
3337 struct vnodeop_desc *a_desc;
3338 vnode_t a_dvp;
3339 vnode_t *a_vpp;
3340 struct componentname *a_cnp;
3341 vfs_context_t a_context;
3342 };
3343 #endif /* 0*/
3344
3345 /*
3346 * Returns: 0 Success
3347 * lock_fsnode:ENOENT No such file or directory [only for VFS
3348 * that is not thread safe & vnode is
3349 * currently being/has been terminated]
3350 * <vfs_lookup>:ENAMETOOLONG
3351 * <vfs_lookup>:ENOENT
3352 * <vfs_lookup>:EJUSTRETURN
3353 * <vfs_lookup>:EPERM
3354 * <vfs_lookup>:EISDIR
3355 * <vfs_lookup>:ENOTDIR
3356 * <vfs_lookup>:???
3357 *
3358 * Note: The return codes from the underlying VFS's lookup routine can't
3359 * be fully enumerated here, since third party VFS authors may not
3360 * limit their error returns to the ones documented here, even
3361 * though this may result in some programs functioning incorrectly.
3362 *
3363 * The return codes documented above are those which may currently
3364 * be returned by HFS from hfs_lookup, not including additional
3365 * error code which may be propagated from underlying routines.
3366 */
3367 errno_t
VNOP_LOOKUP(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,vfs_context_t ctx)3368 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
3369 {
3370 int _err;
3371 struct vnop_lookup_args a;
3372
3373 a.a_desc = &vnop_lookup_desc;
3374 a.a_dvp = dvp;
3375 a.a_vpp = vpp;
3376 a.a_cnp = cnp;
3377 a.a_context = ctx;
3378
3379 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
3380 if (_err == 0 && *vpp) {
3381 DTRACE_FSINFO(lookup, vnode_t, *vpp);
3382 }
3383
3384 return _err;
3385 }
3386
3387 #if 0
3388 struct vnop_compound_open_args {
3389 struct vnodeop_desc *a_desc;
3390 vnode_t a_dvp;
3391 vnode_t *a_vpp;
3392 struct componentname *a_cnp;
3393 int32_t a_flags;
3394 int32_t a_fmode;
3395 struct vnode_attr *a_vap;
3396 vfs_context_t a_context;
3397 void *a_reserved;
3398 };
3399 #endif /* 0 */
3400
3401 int
VNOP_COMPOUND_OPEN(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,int32_t fmode,uint32_t * statusp,struct vnode_attr * vap,vfs_context_t ctx)3402 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
3403 {
3404 int _err;
3405 struct vnop_compound_open_args a;
3406 int did_create = 0;
3407 int want_create;
3408 uint32_t tmp_status = 0;
3409 struct componentname *cnp = &ndp->ni_cnd;
3410
3411 want_create = (flags & O_CREAT);
3412
3413 a.a_desc = &vnop_compound_open_desc;
3414 a.a_dvp = dvp;
3415 a.a_vpp = vpp; /* Could be NULL */
3416 a.a_cnp = cnp;
3417 a.a_flags = flags;
3418 a.a_fmode = fmode;
3419 a.a_status = (statusp != NULL) ? statusp : &tmp_status;
3420 a.a_vap = vap;
3421 a.a_context = ctx;
3422 a.a_open_create_authorizer = vn_authorize_create;
3423 a.a_open_existing_authorizer = vn_authorize_open_existing;
3424 a.a_reserved = NULL;
3425
3426 if (dvp == NULLVP) {
3427 panic("No dvp?");
3428 }
3429 if (want_create && !vap) {
3430 panic("Want create, but no vap?");
3431 }
3432 if (!want_create && vap) {
3433 panic("Don't want create, but have a vap?");
3434 }
3435
3436 _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
3437 if (want_create) {
3438 if (_err == 0 && *vpp) {
3439 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3440 } else {
3441 DTRACE_FSINFO(compound_open, vnode_t, dvp);
3442 }
3443 } else {
3444 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3445 }
3446
3447 did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
3448
3449 if (did_create && !want_create) {
3450 panic("Filesystem did a create, even though none was requested?");
3451 }
3452
3453 if (did_create) {
3454 #if CONFIG_APPLEDOUBLE
3455 if (!NATIVE_XATTR(dvp)) {
3456 /*
3457 * Remove stale Apple Double file (if any).
3458 */
3459 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3460 }
3461 #endif /* CONFIG_APPLEDOUBLE */
3462 /* On create, provide kqueue notification */
3463 post_event_if_success(dvp, _err, NOTE_WRITE);
3464 }
3465
3466 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
3467 #if 0 /* FSEvents... */
3468 if (*vpp && _err && _err != EKEEPLOOKING) {
3469 vnode_put(*vpp);
3470 *vpp = NULLVP;
3471 }
3472 #endif /* 0 */
3473
3474 return _err;
3475 }
3476
3477 #if 0
3478 struct vnop_create_args {
3479 struct vnodeop_desc *a_desc;
3480 vnode_t a_dvp;
3481 vnode_t *a_vpp;
3482 struct componentname *a_cnp;
3483 struct vnode_attr *a_vap;
3484 vfs_context_t a_context;
3485 };
3486 #endif /* 0*/
3487 errno_t
VNOP_CREATE(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)3488 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3489 {
3490 int _err;
3491 struct vnop_create_args a;
3492
3493 a.a_desc = &vnop_create_desc;
3494 a.a_dvp = dvp;
3495 a.a_vpp = vpp;
3496 a.a_cnp = cnp;
3497 a.a_vap = vap;
3498 a.a_context = ctx;
3499
3500 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
3501 if (_err == 0 && *vpp) {
3502 DTRACE_FSINFO(create, vnode_t, *vpp);
3503 }
3504
3505 #if CONFIG_APPLEDOUBLE
3506 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3507 /*
3508 * Remove stale Apple Double file (if any).
3509 */
3510 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3511 }
3512 #endif /* CONFIG_APPLEDOUBLE */
3513
3514 post_event_if_success(dvp, _err, NOTE_WRITE);
3515
3516 return _err;
3517 }
3518
3519 #if 0
3520 /*
3521 *#
3522 *#% whiteout dvp L L L
3523 *#% whiteout cnp - - -
3524 *#% whiteout flag - - -
3525 *#
3526 */
3527 struct vnop_whiteout_args {
3528 struct vnodeop_desc *a_desc;
3529 vnode_t a_dvp;
3530 struct componentname *a_cnp;
3531 int a_flags;
3532 vfs_context_t a_context;
3533 };
3534 #endif /* 0*/
3535 errno_t
VNOP_WHITEOUT(__unused vnode_t dvp,__unused struct componentname * cnp,__unused int flags,__unused vfs_context_t ctx)3536 VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp,
3537 __unused int flags, __unused vfs_context_t ctx)
3538 {
3539 return ENOTSUP; // XXX OBSOLETE
3540 }
3541
3542 #if 0
3543 /*
3544 *#
3545 *#% mknod dvp L U U
3546 *#% mknod vpp - X -
3547 *#
3548 */
3549 struct vnop_mknod_args {
3550 struct vnodeop_desc *a_desc;
3551 vnode_t a_dvp;
3552 vnode_t *a_vpp;
3553 struct componentname *a_cnp;
3554 struct vnode_attr *a_vap;
3555 vfs_context_t a_context;
3556 };
3557 #endif /* 0*/
3558 errno_t
VNOP_MKNOD(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)3559 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3560 {
3561 int _err;
3562 struct vnop_mknod_args a;
3563
3564 a.a_desc = &vnop_mknod_desc;
3565 a.a_dvp = dvp;
3566 a.a_vpp = vpp;
3567 a.a_cnp = cnp;
3568 a.a_vap = vap;
3569 a.a_context = ctx;
3570
3571 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
3572 if (_err == 0 && *vpp) {
3573 DTRACE_FSINFO(mknod, vnode_t, *vpp);
3574 }
3575
3576 post_event_if_success(dvp, _err, NOTE_WRITE);
3577
3578 return _err;
3579 }
3580
3581 #if 0
3582 /*
3583 *#
3584 *#% open vp L L L
3585 *#
3586 */
3587 struct vnop_open_args {
3588 struct vnodeop_desc *a_desc;
3589 vnode_t a_vp;
3590 int a_mode;
3591 vfs_context_t a_context;
3592 };
3593 #endif /* 0*/
3594 errno_t
VNOP_OPEN(vnode_t vp,int mode,vfs_context_t ctx)3595 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3596 {
3597 int _err;
3598 struct vnop_open_args a;
3599
3600 if (ctx == NULL) {
3601 ctx = vfs_context_current();
3602 }
3603 a.a_desc = &vnop_open_desc;
3604 a.a_vp = vp;
3605 a.a_mode = mode;
3606 a.a_context = ctx;
3607
3608 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3609 DTRACE_FSINFO(open, vnode_t, vp);
3610
3611 return _err;
3612 }
3613
3614 #if 0
3615 /*
3616 *#
3617 *#% close vp U U U
3618 *#
3619 */
3620 struct vnop_close_args {
3621 struct vnodeop_desc *a_desc;
3622 vnode_t a_vp;
3623 int a_fflag;
3624 vfs_context_t a_context;
3625 };
3626 #endif /* 0*/
3627 errno_t
VNOP_CLOSE(vnode_t vp,int fflag,vfs_context_t ctx)3628 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3629 {
3630 int _err;
3631 struct vnop_close_args a;
3632
3633 if (ctx == NULL) {
3634 ctx = vfs_context_current();
3635 }
3636 a.a_desc = &vnop_close_desc;
3637 a.a_vp = vp;
3638 a.a_fflag = fflag;
3639 a.a_context = ctx;
3640
3641 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3642 DTRACE_FSINFO(close, vnode_t, vp);
3643
3644 return _err;
3645 }
3646
3647 #if 0
3648 /*
3649 *#
3650 *#% access vp L L L
3651 *#
3652 */
3653 struct vnop_access_args {
3654 struct vnodeop_desc *a_desc;
3655 vnode_t a_vp;
3656 int a_action;
3657 vfs_context_t a_context;
3658 };
3659 #endif /* 0*/
3660 errno_t
VNOP_ACCESS(vnode_t vp,int action,vfs_context_t ctx)3661 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3662 {
3663 int _err;
3664 struct vnop_access_args a;
3665
3666 if (ctx == NULL) {
3667 ctx = vfs_context_current();
3668 }
3669 a.a_desc = &vnop_access_desc;
3670 a.a_vp = vp;
3671 a.a_action = action;
3672 a.a_context = ctx;
3673
3674 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3675 DTRACE_FSINFO(access, vnode_t, vp);
3676
3677 return _err;
3678 }
3679
3680 #if 0
3681 /*
3682 *#
3683 *#% getattr vp = = =
3684 *#
3685 */
3686 struct vnop_getattr_args {
3687 struct vnodeop_desc *a_desc;
3688 vnode_t a_vp;
3689 struct vnode_attr *a_vap;
3690 vfs_context_t a_context;
3691 };
3692 #endif /* 0*/
3693 errno_t
VNOP_GETATTR(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3694 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3695 {
3696 int _err;
3697 struct vnop_getattr_args a;
3698
3699 a.a_desc = &vnop_getattr_desc;
3700 a.a_vp = vp;
3701 a.a_vap = vap;
3702 a.a_context = ctx;
3703
3704 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3705 DTRACE_FSINFO(getattr, vnode_t, vp);
3706
3707 return _err;
3708 }
3709
3710 #if 0
3711 /*
3712 *#
3713 *#% setattr vp L L L
3714 *#
3715 */
3716 struct vnop_setattr_args {
3717 struct vnodeop_desc *a_desc;
3718 vnode_t a_vp;
3719 struct vnode_attr *a_vap;
3720 vfs_context_t a_context;
3721 };
3722 #endif /* 0*/
3723 errno_t
VNOP_SETATTR(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3724 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3725 {
3726 int _err;
3727 struct vnop_setattr_args a;
3728
3729 a.a_desc = &vnop_setattr_desc;
3730 a.a_vp = vp;
3731 a.a_vap = vap;
3732 a.a_context = ctx;
3733
3734 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3735 DTRACE_FSINFO(setattr, vnode_t, vp);
3736
3737 #if CONFIG_APPLEDOUBLE
3738 /*
3739 * Shadow uid/gid/mod change to extended attribute file.
3740 */
3741 if (_err == 0 && !NATIVE_XATTR(vp)) {
3742 struct vnode_attr va;
3743 int change = 0;
3744
3745 VATTR_INIT(&va);
3746 if (VATTR_IS_ACTIVE(vap, va_uid)) {
3747 VATTR_SET(&va, va_uid, vap->va_uid);
3748 change = 1;
3749 }
3750 if (VATTR_IS_ACTIVE(vap, va_gid)) {
3751 VATTR_SET(&va, va_gid, vap->va_gid);
3752 change = 1;
3753 }
3754 if (VATTR_IS_ACTIVE(vap, va_mode)) {
3755 VATTR_SET(&va, va_mode, vap->va_mode);
3756 change = 1;
3757 }
3758 if (change) {
3759 vnode_t dvp;
3760 const char *vname;
3761
3762 dvp = vnode_getparent(vp);
3763 vname = vnode_getname(vp);
3764
3765 xattrfile_setattr(dvp, vname, &va, ctx);
3766 if (dvp != NULLVP) {
3767 vnode_put(dvp);
3768 }
3769 if (vname != NULL) {
3770 vnode_putname(vname);
3771 }
3772 }
3773 }
3774 #endif /* CONFIG_APPLEDOUBLE */
3775
3776 /*
3777 * If we have changed any of the things about the file that are likely
3778 * to result in changes to authorization results, blow the vnode auth
3779 * cache
3780 */
3781 if (_err == 0 && (
3782 VATTR_IS_SUPPORTED(vap, va_mode) ||
3783 VATTR_IS_SUPPORTED(vap, va_uid) ||
3784 VATTR_IS_SUPPORTED(vap, va_gid) ||
3785 VATTR_IS_SUPPORTED(vap, va_flags) ||
3786 VATTR_IS_SUPPORTED(vap, va_acl) ||
3787 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3788 VATTR_IS_SUPPORTED(vap, va_guuid))) {
3789 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3790
3791 #if NAMEDSTREAMS
3792 if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3793 vnode_t svp;
3794 if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3795 vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3796 vnode_put(svp);
3797 }
3798 }
3799 #endif /* NAMEDSTREAMS */
3800 }
3801
3802
3803 post_event_if_success(vp, _err, NOTE_ATTRIB);
3804
3805 return _err;
3806 }
3807
3808
3809 #if 0
3810 /*
3811 *#
3812 *#% read vp L L L
3813 *#
3814 */
3815 struct vnop_read_args {
3816 struct vnodeop_desc *a_desc;
3817 vnode_t a_vp;
3818 struct uio *a_uio;
3819 int a_ioflag;
3820 vfs_context_t a_context;
3821 };
3822 #endif /* 0*/
3823 errno_t
VNOP_READ(vnode_t vp,struct uio * uio,int ioflag,vfs_context_t ctx)3824 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3825 {
3826 int _err;
3827 struct vnop_read_args a;
3828 #if CONFIG_DTRACE
3829 user_ssize_t resid = uio_resid(uio);
3830 #endif
3831
3832 if (ctx == NULL) {
3833 return EINVAL;
3834 }
3835
3836 a.a_desc = &vnop_read_desc;
3837 a.a_vp = vp;
3838 a.a_uio = uio;
3839 a.a_ioflag = ioflag;
3840 a.a_context = ctx;
3841
3842 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3843 DTRACE_FSINFO_IO(read,
3844 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3845
3846 return _err;
3847 }
3848
3849
3850 #if 0
3851 /*
3852 *#
3853 *#% write vp L L L
3854 *#
3855 */
3856 struct vnop_write_args {
3857 struct vnodeop_desc *a_desc;
3858 vnode_t a_vp;
3859 struct uio *a_uio;
3860 int a_ioflag;
3861 vfs_context_t a_context;
3862 };
3863 #endif /* 0*/
3864 errno_t
VNOP_WRITE(vnode_t vp,struct uio * uio,int ioflag,vfs_context_t ctx)3865 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3866 {
3867 struct vnop_write_args a;
3868 int _err;
3869 #if CONFIG_DTRACE
3870 user_ssize_t resid = uio_resid(uio);
3871 #endif
3872
3873 if (ctx == NULL) {
3874 return EINVAL;
3875 }
3876
3877 a.a_desc = &vnop_write_desc;
3878 a.a_vp = vp;
3879 a.a_uio = uio;
3880 a.a_ioflag = ioflag;
3881 a.a_context = ctx;
3882
3883 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3884 DTRACE_FSINFO_IO(write,
3885 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3886
3887 post_event_if_success(vp, _err, NOTE_WRITE);
3888
3889 return _err;
3890 }
3891
3892
3893 #if 0
3894 /*
3895 *#
3896 *#% ioctl vp U U U
3897 *#
3898 */
3899 struct vnop_ioctl_args {
3900 struct vnodeop_desc *a_desc;
3901 vnode_t a_vp;
3902 u_long a_command;
3903 caddr_t a_data;
3904 int a_fflag;
3905 vfs_context_t a_context;
3906 };
3907 #endif /* 0*/
3908 errno_t
VNOP_IOCTL(vnode_t vp,u_long command,caddr_t data,int fflag,vfs_context_t ctx)3909 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
3910 {
3911 int _err;
3912 struct vnop_ioctl_args a;
3913
3914 if (ctx == NULL) {
3915 ctx = vfs_context_current();
3916 }
3917
3918 /*
3919 * This check should probably have been put in the TTY code instead...
3920 *
3921 * We have to be careful about what we assume during startup and shutdown.
3922 * We have to be able to use the root filesystem's device vnode even when
3923 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3924 * structure. If there is no data pointer, it doesn't matter whether
3925 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZE)
3926 * which passes NULL for its data pointer can therefore be used during
3927 * mount or unmount of the root filesystem.
3928 *
3929 * Depending on what root filesystems need to do during mount/unmount, we
3930 * may need to loosen this check again in the future.
3931 */
3932 if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3933 if (data != NULL && !vnode_vfs64bitready(vp)) {
3934 return ENOTTY;
3935 }
3936 }
3937
3938 if ((command == DKIOCISSOLIDSTATE) && (vp == rootvp) && rootvp_is_ssd && data) {
3939 *data = 1;
3940 return 0;
3941 }
3942
3943 a.a_desc = &vnop_ioctl_desc;
3944 a.a_vp = vp;
3945 a.a_command = command;
3946 a.a_data = data;
3947 a.a_fflag = fflag;
3948 a.a_context = ctx;
3949
3950 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
3951 DTRACE_FSINFO(ioctl, vnode_t, vp);
3952
3953 return _err;
3954 }
3955
3956
3957 #if 0
3958 /*
3959 *#
3960 *#% select vp U U U
3961 *#
3962 */
3963 struct vnop_select_args {
3964 struct vnodeop_desc *a_desc;
3965 vnode_t a_vp;
3966 int a_which;
3967 int a_fflags;
3968 void *a_wql;
3969 vfs_context_t a_context;
3970 };
3971 #endif /* 0*/
3972 errno_t
VNOP_SELECT(vnode_t vp,int which,int fflags,void * wql,vfs_context_t ctx)3973 VNOP_SELECT(vnode_t vp, int which, int fflags, void * wql, vfs_context_t ctx)
3974 {
3975 int _err;
3976 struct vnop_select_args a;
3977
3978 if (ctx == NULL) {
3979 ctx = vfs_context_current();
3980 }
3981 a.a_desc = &vnop_select_desc;
3982 a.a_vp = vp;
3983 a.a_which = which;
3984 a.a_fflags = fflags;
3985 a.a_context = ctx;
3986 a.a_wql = wql;
3987
3988 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
3989 DTRACE_FSINFO(select, vnode_t, vp);
3990
3991 return _err;
3992 }
3993
3994
3995 #if 0
3996 /*
3997 *#
3998 *#% exchange fvp L L L
3999 *#% exchange tvp L L L
4000 *#
4001 */
4002 struct vnop_exchange_args {
4003 struct vnodeop_desc *a_desc;
4004 vnode_t a_fvp;
4005 vnode_t a_tvp;
4006 int a_options;
4007 vfs_context_t a_context;
4008 };
4009 #endif /* 0*/
4010 errno_t
VNOP_EXCHANGE(vnode_t fvp,vnode_t tvp,int options,vfs_context_t ctx)4011 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
4012 {
4013 int _err;
4014 struct vnop_exchange_args a;
4015
4016 a.a_desc = &vnop_exchange_desc;
4017 a.a_fvp = fvp;
4018 a.a_tvp = tvp;
4019 a.a_options = options;
4020 a.a_context = ctx;
4021
4022 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
4023 DTRACE_FSINFO(exchange, vnode_t, fvp);
4024
4025 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
4026 post_event_if_success(fvp, _err, NOTE_ATTRIB);
4027 post_event_if_success(tvp, _err, NOTE_ATTRIB);
4028
4029 return _err;
4030 }
4031
4032
4033 #if 0
4034 /*
4035 *#
4036 *#% revoke vp U U U
4037 *#
4038 */
4039 struct vnop_revoke_args {
4040 struct vnodeop_desc *a_desc;
4041 vnode_t a_vp;
4042 int a_flags;
4043 vfs_context_t a_context;
4044 };
4045 #endif /* 0*/
4046 errno_t
VNOP_REVOKE(vnode_t vp,int flags,vfs_context_t ctx)4047 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
4048 {
4049 struct vnop_revoke_args a;
4050 int _err;
4051
4052 a.a_desc = &vnop_revoke_desc;
4053 a.a_vp = vp;
4054 a.a_flags = flags;
4055 a.a_context = ctx;
4056
4057 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
4058 DTRACE_FSINFO(revoke, vnode_t, vp);
4059
4060 return _err;
4061 }
4062
4063
4064 #if 0
4065 /*
4066 *#
4067 *# mmap_check - vp U U U
4068 *#
4069 */
4070 struct vnop_mmap_check_args {
4071 struct vnodeop_desc *a_desc;
4072 vnode_t a_vp;
4073 int a_flags;
4074 vfs_context_t a_context;
4075 };
4076 #endif /* 0 */
4077 errno_t
VNOP_MMAP_CHECK(vnode_t vp,int flags,vfs_context_t ctx)4078 VNOP_MMAP_CHECK(vnode_t vp, int flags, vfs_context_t ctx)
4079 {
4080 int _err;
4081 struct vnop_mmap_check_args a;
4082
4083 a.a_desc = &vnop_mmap_check_desc;
4084 a.a_vp = vp;
4085 a.a_flags = flags;
4086 a.a_context = ctx;
4087
4088 _err = (*vp->v_op[vnop_mmap_check_desc.vdesc_offset])(&a);
4089 if (_err == ENOTSUP) {
4090 _err = 0;
4091 }
4092 DTRACE_FSINFO(mmap_check, vnode_t, vp);
4093
4094 return _err;
4095 }
4096
4097 #if 0
4098 /*
4099 *#
4100 *# mmap - vp U U U
4101 *#
4102 */
4103 struct vnop_mmap_args {
4104 struct vnodeop_desc *a_desc;
4105 vnode_t a_vp;
4106 int a_fflags;
4107 vfs_context_t a_context;
4108 };
4109 #endif /* 0*/
4110 errno_t
VNOP_MMAP(vnode_t vp,int fflags,vfs_context_t ctx)4111 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
4112 {
4113 int _err;
4114 struct vnop_mmap_args a;
4115
4116 a.a_desc = &vnop_mmap_desc;
4117 a.a_vp = vp;
4118 a.a_fflags = fflags;
4119 a.a_context = ctx;
4120
4121 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
4122 DTRACE_FSINFO(mmap, vnode_t, vp);
4123
4124 return _err;
4125 }
4126
4127
4128 #if 0
4129 /*
4130 *#
4131 *# mnomap - vp U U U
4132 *#
4133 */
4134 struct vnop_mnomap_args {
4135 struct vnodeop_desc *a_desc;
4136 vnode_t a_vp;
4137 vfs_context_t a_context;
4138 };
4139 #endif /* 0*/
4140 errno_t
VNOP_MNOMAP(vnode_t vp,vfs_context_t ctx)4141 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
4142 {
4143 int _err;
4144 struct vnop_mnomap_args a;
4145
4146 a.a_desc = &vnop_mnomap_desc;
4147 a.a_vp = vp;
4148 a.a_context = ctx;
4149
4150 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
4151 DTRACE_FSINFO(mnomap, vnode_t, vp);
4152
4153 return _err;
4154 }
4155
4156
4157 #if 0
4158 /*
4159 *#
4160 *#% fsync vp L L L
4161 *#
4162 */
4163 struct vnop_fsync_args {
4164 struct vnodeop_desc *a_desc;
4165 vnode_t a_vp;
4166 int a_waitfor;
4167 vfs_context_t a_context;
4168 };
4169 #endif /* 0*/
4170 errno_t
VNOP_FSYNC(vnode_t vp,int waitfor,vfs_context_t ctx)4171 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
4172 {
4173 struct vnop_fsync_args a;
4174 int _err;
4175
4176 a.a_desc = &vnop_fsync_desc;
4177 a.a_vp = vp;
4178 a.a_waitfor = waitfor;
4179 a.a_context = ctx;
4180
4181 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
4182 DTRACE_FSINFO(fsync, vnode_t, vp);
4183
4184 return _err;
4185 }
4186
4187
4188 #if 0
4189 /*
4190 *#
4191 *#% remove dvp L U U
4192 *#% remove vp L U U
4193 *#
4194 */
4195 struct vnop_remove_args {
4196 struct vnodeop_desc *a_desc;
4197 vnode_t a_dvp;
4198 vnode_t a_vp;
4199 struct componentname *a_cnp;
4200 int a_flags;
4201 vfs_context_t a_context;
4202 };
4203 #endif /* 0*/
4204 errno_t
VNOP_REMOVE(vnode_t dvp,vnode_t vp,struct componentname * cnp,int flags,vfs_context_t ctx)4205 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
4206 {
4207 int _err;
4208 struct vnop_remove_args a;
4209
4210 a.a_desc = &vnop_remove_desc;
4211 a.a_dvp = dvp;
4212 a.a_vp = vp;
4213 a.a_cnp = cnp;
4214 a.a_flags = flags;
4215 a.a_context = ctx;
4216
4217 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
4218 DTRACE_FSINFO(remove, vnode_t, vp);
4219
4220 if (_err == 0) {
4221 vnode_setneedinactive(vp);
4222 #if CONFIG_APPLEDOUBLE
4223 if (!(NATIVE_XATTR(dvp))) {
4224 /*
4225 * Remove any associated extended attribute file (._ AppleDouble file).
4226 */
4227 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4228 }
4229 #endif /* CONFIG_APPLEDOUBLE */
4230 }
4231
4232 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4233 post_event_if_success(dvp, _err, NOTE_WRITE);
4234
4235 return _err;
4236 }
4237
4238 int
VNOP_COMPOUND_REMOVE(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,struct vnode_attr * vap,vfs_context_t ctx)4239 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
4240 {
4241 int _err;
4242 struct vnop_compound_remove_args a;
4243 int no_vp = (*vpp == NULLVP);
4244
4245 a.a_desc = &vnop_compound_remove_desc;
4246 a.a_dvp = dvp;
4247 a.a_vpp = vpp;
4248 a.a_cnp = &ndp->ni_cnd;
4249 a.a_flags = flags;
4250 a.a_vap = vap;
4251 a.a_context = ctx;
4252 a.a_remove_authorizer = vn_authorize_unlink;
4253
4254 _err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
4255 if (_err == 0 && *vpp) {
4256 DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
4257 } else {
4258 DTRACE_FSINFO(compound_remove, vnode_t, dvp);
4259 }
4260 if (_err == 0) {
4261 vnode_setneedinactive(*vpp);
4262 #if CONFIG_APPLEDOUBLE
4263 if (!(NATIVE_XATTR(dvp))) {
4264 /*
4265 * Remove any associated extended attribute file (._ AppleDouble file).
4266 */
4267 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
4268 }
4269 #endif /* CONFIG_APPLEDOUBLE */
4270 }
4271
4272 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4273 post_event_if_success(dvp, _err, NOTE_WRITE);
4274
4275 if (no_vp) {
4276 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4277 if (*vpp && _err && _err != EKEEPLOOKING) {
4278 vnode_put(*vpp);
4279 *vpp = NULLVP;
4280 }
4281 }
4282
4283 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
4284
4285 return _err;
4286 }
4287
4288 #if 0
4289 /*
4290 *#
4291 *#% link vp U U U
4292 *#% link tdvp L U U
4293 *#
4294 */
4295 struct vnop_link_args {
4296 struct vnodeop_desc *a_desc;
4297 vnode_t a_vp;
4298 vnode_t a_tdvp;
4299 struct componentname *a_cnp;
4300 vfs_context_t a_context;
4301 };
4302 #endif /* 0*/
4303 errno_t
VNOP_LINK(vnode_t vp,vnode_t tdvp,struct componentname * cnp,vfs_context_t ctx)4304 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
4305 {
4306 int _err;
4307 struct vnop_link_args a;
4308
4309 #if CONFIG_APPLEDOUBLE
4310 /*
4311 * For file systems with non-native extended attributes,
4312 * disallow linking to an existing "._" Apple Double file.
4313 */
4314 if (!NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
4315 const char *vname;
4316
4317 vname = vnode_getname(vp);
4318 if (vname != NULL) {
4319 _err = 0;
4320 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
4321 _err = EPERM;
4322 }
4323 vnode_putname(vname);
4324 if (_err) {
4325 return _err;
4326 }
4327 }
4328 }
4329 #endif /* CONFIG_APPLEDOUBLE */
4330
4331 a.a_desc = &vnop_link_desc;
4332 a.a_vp = vp;
4333 a.a_tdvp = tdvp;
4334 a.a_cnp = cnp;
4335 a.a_context = ctx;
4336
4337 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
4338 DTRACE_FSINFO(link, vnode_t, vp);
4339
4340 post_event_if_success(vp, _err, NOTE_LINK);
4341 post_event_if_success(tdvp, _err, NOTE_WRITE);
4342
4343 return _err;
4344 }
4345
4346 errno_t
vn_rename(struct vnode * fdvp,struct vnode ** fvpp,struct componentname * fcnp,struct vnode_attr * fvap,struct vnode * tdvp,struct vnode ** tvpp,struct componentname * tcnp,struct vnode_attr * tvap,vfs_rename_flags_t flags,vfs_context_t ctx)4347 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4348 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4349 vfs_rename_flags_t flags, vfs_context_t ctx)
4350 {
4351 int _err;
4352 struct nameidata *fromnd = NULL;
4353 struct nameidata *tond = NULL;
4354 #if CONFIG_APPLEDOUBLE
4355 vnode_t src_attr_vp = NULLVP;
4356 vnode_t dst_attr_vp = NULLVP;
4357 char smallname1[48];
4358 char smallname2[48];
4359 char *xfromname = NULL;
4360 char *xtoname = NULL;
4361 #endif /* CONFIG_APPLEDOUBLE */
4362 int batched;
4363 uint32_t tdfflags; // Target directory file flags
4364
4365 batched = vnode_compound_rename_available(fdvp);
4366
4367 if (!batched) {
4368 if (*fvpp == NULLVP) {
4369 panic("Not batched, and no fvp?");
4370 }
4371 }
4372
4373 #if CONFIG_APPLEDOUBLE
4374 /*
4375 * We need to preflight any potential AppleDouble file for the source file
4376 * before doing the rename operation, since we could potentially be doing
4377 * this operation on a network filesystem, and would end up duplicating
4378 * the work. Also, save the source and destination names. Skip it if the
4379 * source has a "._" prefix.
4380 */
4381
4382 size_t xfromname_len = 0;
4383 size_t xtoname_len = 0;
4384 if (!NATIVE_XATTR(fdvp) &&
4385 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
4386 int error;
4387
4388 /* Get source attribute file name. */
4389 xfromname_len = fcnp->cn_namelen + 3;
4390 if (xfromname_len > sizeof(smallname1)) {
4391 xfromname = kalloc_data(xfromname_len, Z_WAITOK);
4392 } else {
4393 xfromname = &smallname1[0];
4394 }
4395 strlcpy(xfromname, "._", xfromname_len);
4396 strlcat(xfromname, fcnp->cn_nameptr, xfromname_len);
4397
4398 /* Get destination attribute file name. */
4399 xtoname_len = tcnp->cn_namelen + 3;
4400 if (xtoname_len > sizeof(smallname2)) {
4401 xtoname = kalloc_data(xtoname_len, Z_WAITOK);
4402 } else {
4403 xtoname = &smallname2[0];
4404 }
4405 strlcpy(xtoname, "._", xtoname_len);
4406 strlcat(xtoname, tcnp->cn_nameptr, xtoname_len);
4407
4408 /*
4409 * Look up source attribute file, keep reference on it if exists.
4410 * Note that we do the namei with the nameiop of RENAME, which is different than
4411 * in the rename syscall. It's OK if the source file does not exist, since this
4412 * is only for AppleDouble files.
4413 */
4414 fromnd = kalloc_type(struct nameidata, Z_WAITOK);
4415 NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
4416 UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
4417 fromnd->ni_dvp = fdvp;
4418 error = namei(fromnd);
4419
4420 /*
4421 * If there was an error looking up source attribute file,
4422 * we'll behave as if it didn't exist.
4423 */
4424
4425 if (error == 0) {
4426 if (fromnd->ni_vp) {
4427 /* src_attr_vp indicates need to call vnode_put / nameidone later */
4428 src_attr_vp = fromnd->ni_vp;
4429
4430 if (fromnd->ni_vp->v_type != VREG) {
4431 src_attr_vp = NULLVP;
4432 vnode_put(fromnd->ni_vp);
4433 }
4434 }
4435 /*
4436 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4437 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4438 * have a vnode here, so we drop our namei buffer for the source attribute file
4439 */
4440 if (src_attr_vp == NULLVP) {
4441 nameidone(fromnd);
4442 }
4443 }
4444 }
4445 #endif /* CONFIG_APPLEDOUBLE */
4446
4447 if (batched) {
4448 _err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
4449 if (_err != 0) {
4450 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
4451 }
4452 } else {
4453 if (flags) {
4454 _err = VNOP_RENAMEX(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, flags, ctx);
4455 if (_err == ENOTSUP && flags == VFS_RENAME_SECLUDE) {
4456 // Legacy...
4457 if ((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) {
4458 fcnp->cn_flags |= CN_SECLUDE_RENAME;
4459 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4460 }
4461 }
4462 } else {
4463 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4464 }
4465 }
4466
4467 /*
4468 * If moved to a new directory that is restricted,
4469 * set the restricted flag on the item moved.
4470 */
4471 if (_err == 0) {
4472 _err = vnode_flags(tdvp, &tdfflags, ctx);
4473 if (_err == 0) {
4474 uint32_t inherit_flags = tdfflags & (UF_DATAVAULT | SF_RESTRICTED);
4475 if (inherit_flags) {
4476 uint32_t fflags;
4477 _err = vnode_flags(*fvpp, &fflags, ctx);
4478 if (_err == 0 && fflags != (fflags | inherit_flags)) {
4479 struct vnode_attr va;
4480 VATTR_INIT(&va);
4481 VATTR_SET(&va, va_flags, fflags | inherit_flags);
4482 _err = vnode_setattr(*fvpp, &va, ctx);
4483 }
4484 }
4485 }
4486 }
4487
4488 #if CONFIG_MACF
4489 if (_err == 0) {
4490 mac_vnode_notify_rename(
4491 ctx, /* ctx */
4492 *fvpp, /* fvp */
4493 fdvp, /* fdvp */
4494 fcnp, /* fcnp */
4495 *tvpp, /* tvp */
4496 tdvp, /* tdvp */
4497 tcnp, /* tcnp */
4498 (flags & VFS_RENAME_SWAP) /* swap */
4499 );
4500 }
4501 #endif
4502
4503 #if CONFIG_APPLEDOUBLE
4504 /*
4505 * Rename any associated extended attribute file (._ AppleDouble file).
4506 */
4507 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
4508 int error = 0;
4509
4510 /*
4511 * Get destination attribute file vnode.
4512 * Note that tdvp already has an iocount reference. Make sure to check that we
4513 * get a valid vnode from namei.
4514 */
4515 tond = kalloc_type(struct nameidata, Z_WAITOK);
4516 NDINIT(tond, RENAME, OP_RENAME,
4517 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4518 CAST_USER_ADDR_T(xtoname), ctx);
4519 tond->ni_dvp = tdvp;
4520 error = namei(tond);
4521
4522 if (error) {
4523 goto ad_error;
4524 }
4525
4526 if (tond->ni_vp) {
4527 dst_attr_vp = tond->ni_vp;
4528 }
4529
4530 if (src_attr_vp) {
4531 const char *old_name = src_attr_vp->v_name;
4532 vnode_t old_parent = src_attr_vp->v_parent;
4533
4534 if (batched) {
4535 error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
4536 tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
4537 0, ctx);
4538 } else {
4539 error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
4540 tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
4541 }
4542
4543 if (error == 0 && old_name == src_attr_vp->v_name &&
4544 old_parent == src_attr_vp->v_parent) {
4545 int update_flags = VNODE_UPDATE_NAME;
4546
4547 if (fdvp != tdvp) {
4548 update_flags |= VNODE_UPDATE_PARENT;
4549 }
4550
4551 if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
4552 vnode_update_identity(src_attr_vp, tdvp,
4553 tond->ni_cnd.cn_nameptr,
4554 tond->ni_cnd.cn_namelen,
4555 tond->ni_cnd.cn_hash,
4556 update_flags);
4557 }
4558 }
4559
4560 /* kevent notifications for moving resource files
4561 * _err is zero if we're here, so no need to notify directories, code
4562 * below will do that. only need to post the rename on the source and
4563 * possibly a delete on the dest
4564 */
4565 post_event_if_success(src_attr_vp, error, NOTE_RENAME);
4566 if (dst_attr_vp) {
4567 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4568 }
4569 } else if (dst_attr_vp) {
4570 /*
4571 * Just delete destination attribute file vnode if it exists, since
4572 * we didn't have a source attribute file.
4573 * Note that tdvp already has an iocount reference.
4574 */
4575
4576 struct vnop_remove_args args;
4577
4578 args.a_desc = &vnop_remove_desc;
4579 args.a_dvp = tdvp;
4580 args.a_vp = dst_attr_vp;
4581 args.a_cnp = &tond->ni_cnd;
4582 args.a_context = ctx;
4583
4584 if (error == 0) {
4585 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
4586
4587 if (error == 0) {
4588 vnode_setneedinactive(dst_attr_vp);
4589 }
4590 }
4591
4592 /* kevent notification for deleting the destination's attribute file
4593 * if it existed. Only need to post the delete on the destination, since
4594 * the code below will handle the directories.
4595 */
4596 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4597 }
4598 }
4599 ad_error:
4600 if (src_attr_vp) {
4601 vnode_put(src_attr_vp);
4602 nameidone(fromnd);
4603 }
4604 if (dst_attr_vp) {
4605 vnode_put(dst_attr_vp);
4606 nameidone(tond);
4607 }
4608 if (xfromname && xfromname != &smallname1[0]) {
4609 kfree_data(xfromname, xfromname_len);
4610 }
4611 if (xtoname && xtoname != &smallname2[0]) {
4612 kfree_data(xtoname, xtoname_len);
4613 }
4614 #endif /* CONFIG_APPLEDOUBLE */
4615 kfree_type(struct nameidata, fromnd);
4616 kfree_type(struct nameidata, tond);
4617 return _err;
4618 }
4619
4620
4621 #if 0
4622 /*
4623 *#
4624 *#% rename fdvp U U U
4625 *#% rename fvp U U U
4626 *#% rename tdvp L U U
4627 *#% rename tvp X U U
4628 *#
4629 */
4630 struct vnop_rename_args {
4631 struct vnodeop_desc *a_desc;
4632 vnode_t a_fdvp;
4633 vnode_t a_fvp;
4634 struct componentname *a_fcnp;
4635 vnode_t a_tdvp;
4636 vnode_t a_tvp;
4637 struct componentname *a_tcnp;
4638 vfs_context_t a_context;
4639 };
4640 #endif /* 0*/
4641 errno_t
VNOP_RENAME(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx)4642 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4643 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4644 vfs_context_t ctx)
4645 {
4646 int _err = 0;
4647 struct vnop_rename_args a;
4648
4649 a.a_desc = &vnop_rename_desc;
4650 a.a_fdvp = fdvp;
4651 a.a_fvp = fvp;
4652 a.a_fcnp = fcnp;
4653 a.a_tdvp = tdvp;
4654 a.a_tvp = tvp;
4655 a.a_tcnp = tcnp;
4656 a.a_context = ctx;
4657
4658 /* do the rename of the main file. */
4659 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4660 DTRACE_FSINFO(rename, vnode_t, fdvp);
4661
4662 if (_err) {
4663 return _err;
4664 }
4665
4666 return post_rename(fdvp, fvp, tdvp, tvp);
4667 }
4668
4669 static errno_t
post_rename(vnode_t fdvp,vnode_t fvp,vnode_t tdvp,vnode_t tvp)4670 post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp)
4671 {
4672 if (tvp && tvp != fvp) {
4673 vnode_setneedinactive(tvp);
4674 }
4675
4676 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4677 int events = NOTE_WRITE;
4678 if (vnode_isdir(fvp)) {
4679 /* Link count on dir changed only if we are moving a dir and...
4680 * --Moved to new dir, not overwriting there
4681 * --Kept in same dir and DID overwrite
4682 */
4683 if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4684 events |= NOTE_LINK;
4685 }
4686 }
4687
4688 lock_vnode_and_post(fdvp, events);
4689 if (fdvp != tdvp) {
4690 lock_vnode_and_post(tdvp, events);
4691 }
4692
4693 /* If you're replacing the target, post a deletion for it */
4694 if (tvp) {
4695 lock_vnode_and_post(tvp, NOTE_DELETE);
4696 }
4697
4698 lock_vnode_and_post(fvp, NOTE_RENAME);
4699
4700 return 0;
4701 }
4702
4703 #if 0
4704 /*
4705 *#
4706 *#% renamex fdvp U U U
4707 *#% renamex fvp U U U
4708 *#% renamex tdvp L U U
4709 *#% renamex tvp X U U
4710 *#
4711 */
4712 struct vnop_renamex_args {
4713 struct vnodeop_desc *a_desc;
4714 vnode_t a_fdvp;
4715 vnode_t a_fvp;
4716 struct componentname *a_fcnp;
4717 vnode_t a_tdvp;
4718 vnode_t a_tvp;
4719 struct componentname *a_tcnp;
4720 vfs_rename_flags_t a_flags;
4721 vfs_context_t a_context;
4722 };
4723 #endif /* 0*/
4724 errno_t
VNOP_RENAMEX(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_rename_flags_t flags,vfs_context_t ctx)4725 VNOP_RENAMEX(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4726 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4727 vfs_rename_flags_t flags, vfs_context_t ctx)
4728 {
4729 int _err = 0;
4730 struct vnop_renamex_args a;
4731
4732 a.a_desc = &vnop_renamex_desc;
4733 a.a_fdvp = fdvp;
4734 a.a_fvp = fvp;
4735 a.a_fcnp = fcnp;
4736 a.a_tdvp = tdvp;
4737 a.a_tvp = tvp;
4738 a.a_tcnp = tcnp;
4739 a.a_flags = flags;
4740 a.a_context = ctx;
4741
4742 /* do the rename of the main file. */
4743 _err = (*fdvp->v_op[vnop_renamex_desc.vdesc_offset])(&a);
4744 DTRACE_FSINFO(renamex, vnode_t, fdvp);
4745
4746 if (_err) {
4747 return _err;
4748 }
4749
4750 return post_rename(fdvp, fvp, tdvp, tvp);
4751 }
4752
4753
4754 int
VNOP_COMPOUND_RENAME(struct vnode * fdvp,struct vnode ** fvpp,struct componentname * fcnp,struct vnode_attr * fvap,struct vnode * tdvp,struct vnode ** tvpp,struct componentname * tcnp,struct vnode_attr * tvap,uint32_t flags,vfs_context_t ctx)4755 VNOP_COMPOUND_RENAME(
4756 struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4757 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4758 uint32_t flags, vfs_context_t ctx)
4759 {
4760 int _err = 0;
4761 int events;
4762 struct vnop_compound_rename_args a;
4763 int no_fvp, no_tvp;
4764
4765 no_fvp = (*fvpp) == NULLVP;
4766 no_tvp = (*tvpp) == NULLVP;
4767
4768 a.a_desc = &vnop_compound_rename_desc;
4769
4770 a.a_fdvp = fdvp;
4771 a.a_fvpp = fvpp;
4772 a.a_fcnp = fcnp;
4773 a.a_fvap = fvap;
4774
4775 a.a_tdvp = tdvp;
4776 a.a_tvpp = tvpp;
4777 a.a_tcnp = tcnp;
4778 a.a_tvap = tvap;
4779
4780 a.a_flags = flags;
4781 a.a_context = ctx;
4782 a.a_rename_authorizer = vn_authorize_rename;
4783 a.a_reserved = NULL;
4784
4785 /* do the rename of the main file. */
4786 _err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4787 DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
4788
4789 if (_err == 0) {
4790 if (*tvpp && *tvpp != *fvpp) {
4791 vnode_setneedinactive(*tvpp);
4792 }
4793 }
4794
4795 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4796 if (_err == 0 && *fvpp != *tvpp) {
4797 if (!*fvpp) {
4798 panic("No fvpp after compound rename?");
4799 }
4800
4801 events = NOTE_WRITE;
4802 if (vnode_isdir(*fvpp)) {
4803 /* Link count on dir changed only if we are moving a dir and...
4804 * --Moved to new dir, not overwriting there
4805 * --Kept in same dir and DID overwrite
4806 */
4807 if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4808 events |= NOTE_LINK;
4809 }
4810 }
4811
4812 lock_vnode_and_post(fdvp, events);
4813 if (fdvp != tdvp) {
4814 lock_vnode_and_post(tdvp, events);
4815 }
4816
4817 /* If you're replacing the target, post a deletion for it */
4818 if (*tvpp) {
4819 lock_vnode_and_post(*tvpp, NOTE_DELETE);
4820 }
4821
4822 lock_vnode_and_post(*fvpp, NOTE_RENAME);
4823 }
4824
4825 if (no_fvp) {
4826 lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4827 }
4828 if (no_tvp && *tvpp != NULLVP) {
4829 lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4830 }
4831
4832 if (_err && _err != EKEEPLOOKING) {
4833 if (*fvpp) {
4834 vnode_put(*fvpp);
4835 *fvpp = NULLVP;
4836 }
4837 if (*tvpp) {
4838 vnode_put(*tvpp);
4839 *tvpp = NULLVP;
4840 }
4841 }
4842
4843 return _err;
4844 }
4845
4846 int
vn_mkdir(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)4847 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4848 struct vnode_attr *vap, vfs_context_t ctx)
4849 {
4850 if (ndp->ni_cnd.cn_nameiop != CREATE) {
4851 panic("Non-CREATE nameiop in vn_mkdir()?");
4852 }
4853
4854 if (vnode_compound_mkdir_available(dvp)) {
4855 return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4856 } else {
4857 return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4858 }
4859 }
4860
4861 #if 0
4862 /*
4863 *#
4864 *#% mkdir dvp L U U
4865 *#% mkdir vpp - L -
4866 *#
4867 */
4868 struct vnop_mkdir_args {
4869 struct vnodeop_desc *a_desc;
4870 vnode_t a_dvp;
4871 vnode_t *a_vpp;
4872 struct componentname *a_cnp;
4873 struct vnode_attr *a_vap;
4874 vfs_context_t a_context;
4875 };
4876 #endif /* 0*/
4877 errno_t
VNOP_MKDIR(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)4878 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4879 struct vnode_attr *vap, vfs_context_t ctx)
4880 {
4881 int _err;
4882 struct vnop_mkdir_args a;
4883
4884 a.a_desc = &vnop_mkdir_desc;
4885 a.a_dvp = dvp;
4886 a.a_vpp = vpp;
4887 a.a_cnp = cnp;
4888 a.a_vap = vap;
4889 a.a_context = ctx;
4890
4891 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4892 if (_err == 0 && *vpp) {
4893 DTRACE_FSINFO(mkdir, vnode_t, *vpp);
4894 }
4895 #if CONFIG_APPLEDOUBLE
4896 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4897 /*
4898 * Remove stale Apple Double file (if any).
4899 */
4900 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4901 }
4902 #endif /* CONFIG_APPLEDOUBLE */
4903
4904 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4905
4906 return _err;
4907 }
4908
4909 int
VNOP_COMPOUND_MKDIR(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)4910 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4911 struct vnode_attr *vap, vfs_context_t ctx)
4912 {
4913 int _err;
4914 struct vnop_compound_mkdir_args a;
4915
4916 a.a_desc = &vnop_compound_mkdir_desc;
4917 a.a_dvp = dvp;
4918 a.a_vpp = vpp;
4919 a.a_cnp = &ndp->ni_cnd;
4920 a.a_vap = vap;
4921 a.a_flags = 0;
4922 a.a_context = ctx;
4923 #if 0
4924 a.a_mkdir_authorizer = vn_authorize_mkdir;
4925 #endif /* 0 */
4926 a.a_reserved = NULL;
4927
4928 _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
4929 if (_err == 0 && *vpp) {
4930 DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
4931 }
4932 #if CONFIG_APPLEDOUBLE
4933 if (_err == 0 && !NATIVE_XATTR(dvp)) {
4934 /*
4935 * Remove stale Apple Double file (if any).
4936 */
4937 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
4938 }
4939 #endif /* CONFIG_APPLEDOUBLE */
4940
4941 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4942
4943 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
4944 if (*vpp && _err && _err != EKEEPLOOKING) {
4945 vnode_put(*vpp);
4946 *vpp = NULLVP;
4947 }
4948
4949 return _err;
4950 }
4951
4952 int
vn_rmdir(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)4953 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
4954 {
4955 if (vnode_compound_rmdir_available(dvp)) {
4956 return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
4957 } else {
4958 if (*vpp == NULLVP) {
4959 panic("NULL vp, but not a compound VNOP?");
4960 }
4961 if (vap != NULL) {
4962 panic("Non-NULL vap, but not a compound VNOP?");
4963 }
4964 return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
4965 }
4966 }
4967
4968 #if 0
4969 /*
4970 *#
4971 *#% rmdir dvp L U U
4972 *#% rmdir vp L U U
4973 *#
4974 */
4975 struct vnop_rmdir_args {
4976 struct vnodeop_desc *a_desc;
4977 vnode_t a_dvp;
4978 vnode_t a_vp;
4979 struct componentname *a_cnp;
4980 vfs_context_t a_context;
4981 };
4982
4983 #endif /* 0*/
4984 errno_t
VNOP_RMDIR(struct vnode * dvp,struct vnode * vp,struct componentname * cnp,vfs_context_t ctx)4985 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
4986 {
4987 int _err;
4988 struct vnop_rmdir_args a;
4989
4990 a.a_desc = &vnop_rmdir_desc;
4991 a.a_dvp = dvp;
4992 a.a_vp = vp;
4993 a.a_cnp = cnp;
4994 a.a_context = ctx;
4995
4996 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
4997 DTRACE_FSINFO(rmdir, vnode_t, vp);
4998
4999 if (_err == 0) {
5000 vnode_setneedinactive(vp);
5001 #if CONFIG_APPLEDOUBLE
5002 if (!(NATIVE_XATTR(dvp))) {
5003 /*
5004 * Remove any associated extended attribute file (._ AppleDouble file).
5005 */
5006 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
5007 }
5008 #endif
5009 }
5010
5011 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
5012 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
5013 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5014
5015 return _err;
5016 }
5017
5018 int
VNOP_COMPOUND_RMDIR(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)5019 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
5020 struct vnode_attr *vap, vfs_context_t ctx)
5021 {
5022 int _err;
5023 struct vnop_compound_rmdir_args a;
5024 int no_vp;
5025
5026 a.a_desc = &vnop_mkdir_desc;
5027 a.a_dvp = dvp;
5028 a.a_vpp = vpp;
5029 a.a_cnp = &ndp->ni_cnd;
5030 a.a_vap = vap;
5031 a.a_flags = 0;
5032 a.a_context = ctx;
5033 a.a_rmdir_authorizer = vn_authorize_rmdir;
5034 a.a_reserved = NULL;
5035
5036 no_vp = (*vpp == NULLVP);
5037
5038 _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
5039 if (_err == 0 && *vpp) {
5040 DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
5041 }
5042 #if CONFIG_APPLEDOUBLE
5043 if (_err == 0 && !NATIVE_XATTR(dvp)) {
5044 /*
5045 * Remove stale Apple Double file (if any).
5046 */
5047 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
5048 }
5049 #endif
5050
5051 if (*vpp) {
5052 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
5053 }
5054 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5055
5056 if (no_vp) {
5057 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
5058
5059 #if 0 /* Removing orphaned ._ files requires a vp.... */
5060 if (*vpp && _err && _err != EKEEPLOOKING) {
5061 vnode_put(*vpp);
5062 *vpp = NULLVP;
5063 }
5064 #endif /* 0 */
5065 }
5066
5067 return _err;
5068 }
5069
5070 #if CONFIG_APPLEDOUBLE
5071 /*
5072 * Remove a ._ AppleDouble file
5073 */
5074 #define AD_STALE_SECS (180)
5075 static void
xattrfile_remove(vnode_t dvp,const char * basename,vfs_context_t ctx,int force)5076 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
5077 {
5078 vnode_t xvp;
5079 struct nameidata nd;
5080 char smallname[64];
5081 char *filename = NULL;
5082 size_t alloc_len;
5083 size_t copy_len;
5084
5085 if ((basename == NULL) || (basename[0] == '\0') ||
5086 (basename[0] == '.' && basename[1] == '_')) {
5087 return;
5088 }
5089 filename = &smallname[0];
5090 alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
5091 if (alloc_len >= sizeof(smallname)) {
5092 alloc_len++; /* snprintf result doesn't include '\0' */
5093 filename = kalloc_data(alloc_len, Z_WAITOK);
5094 copy_len = snprintf(filename, alloc_len, "._%s", basename);
5095 }
5096 NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
5097 CAST_USER_ADDR_T(filename), ctx);
5098 nd.ni_dvp = dvp;
5099 if (namei(&nd) != 0) {
5100 goto out2;
5101 }
5102
5103 xvp = nd.ni_vp;
5104 nameidone(&nd);
5105 if (xvp->v_type != VREG) {
5106 goto out1;
5107 }
5108
5109 /*
5110 * When creating a new object and a "._" file already
5111 * exists, check to see if it's a stale "._" file. These are
5112 * typically AppleDouble (AD) files generated via XNU's
5113 * VFS compatibility shims for storing XATTRs and streams
5114 * on filesystems that do not support them natively.
5115 */
5116 if (!force) {
5117 struct vnode_attr va;
5118
5119 VATTR_INIT(&va);
5120 VATTR_WANTED(&va, va_data_size);
5121 VATTR_WANTED(&va, va_modify_time);
5122 VATTR_WANTED(&va, va_change_time);
5123
5124 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
5125 VATTR_IS_SUPPORTED(&va, va_data_size) &&
5126 va.va_data_size != 0) {
5127 struct timeval tv_compare = {};
5128 struct timeval tv_now = {};
5129
5130 /*
5131 * If the file exists (and has non-zero size), then use the newer of
5132 * chgtime / modtime to compare against present time. Note that setting XATTRs or updating
5133 * streams through the compatibility interfaces may not trigger chgtime to be updated, so
5134 * checking either modtime or chgtime is useful.
5135 */
5136 if (VATTR_IS_SUPPORTED(&va, va_modify_time) && (va.va_modify_time.tv_sec)) {
5137 if (VATTR_IS_SUPPORTED(&va, va_change_time) && (va.va_change_time.tv_sec)) {
5138 tv_compare.tv_sec = va.va_change_time.tv_sec;
5139 if (tv_compare.tv_sec < va.va_modify_time.tv_sec) {
5140 tv_compare.tv_sec = va.va_modify_time.tv_sec;
5141 }
5142 } else {
5143 /* fall back to mod-time alone if chgtime not supported or set to 0 */
5144 tv_compare.tv_sec = va.va_modify_time.tv_sec;
5145 }
5146 }
5147
5148 /* Now, we have a time to compare against, compare against AD_STALE_SEC */
5149 microtime(&tv_now);
5150 if ((tv_compare.tv_sec > 0) &&
5151 (tv_now.tv_sec > tv_compare.tv_sec) &&
5152 ((tv_now.tv_sec - tv_compare.tv_sec) > AD_STALE_SECS)) {
5153 force = 1; /* must be stale */
5154 }
5155 }
5156 }
5157
5158 if (force) {
5159 int error;
5160
5161 error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
5162 if (error == 0) {
5163 vnode_setneedinactive(xvp);
5164 }
5165
5166 post_event_if_success(xvp, error, NOTE_DELETE);
5167 post_event_if_success(dvp, error, NOTE_WRITE);
5168 }
5169
5170 out1:
5171 vnode_put(dvp);
5172 vnode_put(xvp);
5173 out2:
5174 if (filename && filename != &smallname[0]) {
5175 kfree_data(filename, alloc_len);
5176 }
5177 }
5178
5179 /*
5180 * Shadow uid/gid/mod to a ._ AppleDouble file
5181 */
5182 static void
xattrfile_setattr(vnode_t dvp,const char * basename,struct vnode_attr * vap,vfs_context_t ctx)5183 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
5184 vfs_context_t ctx)
5185 {
5186 vnode_t xvp;
5187 struct nameidata nd;
5188 char smallname[64];
5189 char *filename = NULL;
5190 size_t alloc_len;
5191 size_t copy_len;
5192
5193 if ((dvp == NULLVP) ||
5194 (basename == NULL) || (basename[0] == '\0') ||
5195 (basename[0] == '.' && basename[1] == '_')) {
5196 return;
5197 }
5198 filename = &smallname[0];
5199 alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
5200 if (alloc_len >= sizeof(smallname)) {
5201 alloc_len++; /* snprintf result doesn't include '\0' */
5202 filename = kalloc_data(alloc_len, Z_WAITOK);
5203 copy_len = snprintf(filename, alloc_len, "._%s", basename);
5204 }
5205 NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
5206 CAST_USER_ADDR_T(filename), ctx);
5207 nd.ni_dvp = dvp;
5208 if (namei(&nd) != 0) {
5209 goto out2;
5210 }
5211
5212 xvp = nd.ni_vp;
5213 nameidone(&nd);
5214
5215 if (xvp->v_type == VREG) {
5216 struct vnop_setattr_args a;
5217
5218 a.a_desc = &vnop_setattr_desc;
5219 a.a_vp = xvp;
5220 a.a_vap = vap;
5221 a.a_context = ctx;
5222
5223 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
5224 }
5225
5226 vnode_put(xvp);
5227 out2:
5228 if (filename && filename != &smallname[0]) {
5229 kfree_data(filename, alloc_len);
5230 }
5231 }
5232 #endif /* CONFIG_APPLEDOUBLE */
5233
5234 #if 0
5235 /*
5236 *#
5237 *#% symlink dvp L U U
5238 *#% symlink vpp - U -
5239 *#
5240 */
5241 struct vnop_symlink_args {
5242 struct vnodeop_desc *a_desc;
5243 vnode_t a_dvp;
5244 vnode_t *a_vpp;
5245 struct componentname *a_cnp;
5246 struct vnode_attr *a_vap;
5247 char *a_target;
5248 vfs_context_t a_context;
5249 };
5250
5251 #endif /* 0*/
5252 errno_t
VNOP_SYMLINK(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct vnode_attr * vap,char * target,vfs_context_t ctx)5253 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
5254 struct vnode_attr *vap, char *target, vfs_context_t ctx)
5255 {
5256 int _err;
5257 struct vnop_symlink_args a;
5258
5259 a.a_desc = &vnop_symlink_desc;
5260 a.a_dvp = dvp;
5261 a.a_vpp = vpp;
5262 a.a_cnp = cnp;
5263 a.a_vap = vap;
5264 a.a_target = target;
5265 a.a_context = ctx;
5266
5267 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
5268 DTRACE_FSINFO(symlink, vnode_t, dvp);
5269 #if CONFIG_APPLEDOUBLE
5270 if (_err == 0 && !NATIVE_XATTR(dvp)) {
5271 /*
5272 * Remove stale Apple Double file (if any). Posts its own knotes
5273 */
5274 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
5275 }
5276 #endif /* CONFIG_APPLEDOUBLE */
5277
5278 post_event_if_success(dvp, _err, NOTE_WRITE);
5279
5280 return _err;
5281 }
5282
5283 #if 0
5284 /*
5285 *#
5286 *#% readdir vp L L L
5287 *#
5288 */
5289 struct vnop_readdir_args {
5290 struct vnodeop_desc *a_desc;
5291 vnode_t a_vp;
5292 struct uio *a_uio;
5293 int a_flags;
5294 int *a_eofflag;
5295 int *a_numdirent;
5296 vfs_context_t a_context;
5297 };
5298
5299 #endif /* 0*/
5300 errno_t
VNOP_READDIR(struct vnode * vp,struct uio * uio,int flags,int * eofflag,int * numdirent,vfs_context_t ctx)5301 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
5302 int *numdirent, vfs_context_t ctx)
5303 {
5304 int _err;
5305 struct vnop_readdir_args a;
5306 #if CONFIG_DTRACE
5307 user_ssize_t resid = uio_resid(uio);
5308 #endif
5309
5310 a.a_desc = &vnop_readdir_desc;
5311 a.a_vp = vp;
5312 a.a_uio = uio;
5313 a.a_flags = flags;
5314 a.a_eofflag = eofflag;
5315 a.a_numdirent = numdirent;
5316 a.a_context = ctx;
5317
5318 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
5319 DTRACE_FSINFO_IO(readdir,
5320 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5321
5322 return _err;
5323 }
5324
5325 #if 0
5326 /*
5327 *#
5328 *#% readdirattr vp L L L
5329 *#
5330 */
5331 struct vnop_readdirattr_args {
5332 struct vnodeop_desc *a_desc;
5333 vnode_t a_vp;
5334 struct attrlist *a_alist;
5335 struct uio *a_uio;
5336 uint32_t a_maxcount;
5337 uint32_t a_options;
5338 uint32_t *a_newstate;
5339 int *a_eofflag;
5340 uint32_t *a_actualcount;
5341 vfs_context_t a_context;
5342 };
5343
5344 #endif /* 0*/
5345 errno_t
VNOP_READDIRATTR(struct vnode * vp,struct attrlist * alist,struct uio * uio,uint32_t maxcount,uint32_t options,uint32_t * newstate,int * eofflag,uint32_t * actualcount,vfs_context_t ctx)5346 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
5347 uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
5348 {
5349 int _err;
5350 struct vnop_readdirattr_args a;
5351 #if CONFIG_DTRACE
5352 user_ssize_t resid = uio_resid(uio);
5353 #endif
5354
5355 a.a_desc = &vnop_readdirattr_desc;
5356 a.a_vp = vp;
5357 a.a_alist = alist;
5358 a.a_uio = uio;
5359 a.a_maxcount = maxcount;
5360 a.a_options = options;
5361 a.a_newstate = newstate;
5362 a.a_eofflag = eofflag;
5363 a.a_actualcount = actualcount;
5364 a.a_context = ctx;
5365
5366 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
5367 DTRACE_FSINFO_IO(readdirattr,
5368 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5369
5370 return _err;
5371 }
5372
5373 #if 0
5374 struct vnop_getttrlistbulk_args {
5375 struct vnodeop_desc *a_desc;
5376 vnode_t a_vp;
5377 struct attrlist *a_alist;
5378 struct vnode_attr *a_vap;
5379 struct uio *a_uio;
5380 void *a_private
5381 uint64_t a_options;
5382 int *a_eofflag;
5383 uint32_t *a_actualcount;
5384 vfs_context_t a_context;
5385 };
5386 #endif /* 0*/
5387 errno_t
VNOP_GETATTRLISTBULK(struct vnode * vp,struct attrlist * alist,struct vnode_attr * vap,struct uio * uio,void * private,uint64_t options,int32_t * eofflag,int32_t * actualcount,vfs_context_t ctx)5388 VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist,
5389 struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options,
5390 int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx)
5391 {
5392 int _err;
5393 struct vnop_getattrlistbulk_args a;
5394 #if CONFIG_DTRACE
5395 user_ssize_t resid = uio_resid(uio);
5396 #endif
5397
5398 a.a_desc = &vnop_getattrlistbulk_desc;
5399 a.a_vp = vp;
5400 a.a_alist = alist;
5401 a.a_vap = vap;
5402 a.a_uio = uio;
5403 a.a_private = private;
5404 a.a_options = options;
5405 a.a_eofflag = eofflag;
5406 a.a_actualcount = actualcount;
5407 a.a_context = ctx;
5408
5409 _err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a);
5410 DTRACE_FSINFO_IO(getattrlistbulk,
5411 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5412
5413 return _err;
5414 }
5415
5416 #if 0
5417 /*
5418 *#
5419 *#% readlink vp L L L
5420 *#
5421 */
5422 struct vnop_readlink_args {
5423 struct vnodeop_desc *a_desc;
5424 vnode_t a_vp;
5425 struct uio *a_uio;
5426 vfs_context_t a_context;
5427 };
5428 #endif /* 0 */
5429
5430 /*
5431 * Returns: 0 Success
5432 * lock_fsnode:ENOENT No such file or directory [only for VFS
5433 * that is not thread safe & vnode is
5434 * currently being/has been terminated]
5435 * <vfs_readlink>:EINVAL
5436 * <vfs_readlink>:???
5437 *
5438 * Note: The return codes from the underlying VFS's readlink routine
5439 * can't be fully enumerated here, since third party VFS authors
5440 * may not limit their error returns to the ones documented here,
5441 * even though this may result in some programs functioning
5442 * incorrectly.
5443 *
5444 * The return codes documented above are those which may currently
5445 * be returned by HFS from hfs_vnop_readlink, not including
5446 * additional error code which may be propagated from underlying
5447 * routines.
5448 */
5449 errno_t
VNOP_READLINK(struct vnode * vp,struct uio * uio,vfs_context_t ctx)5450 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
5451 {
5452 int _err;
5453 struct vnop_readlink_args a;
5454 #if CONFIG_DTRACE
5455 user_ssize_t resid = uio_resid(uio);
5456 #endif
5457 a.a_desc = &vnop_readlink_desc;
5458 a.a_vp = vp;
5459 a.a_uio = uio;
5460 a.a_context = ctx;
5461
5462 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
5463 DTRACE_FSINFO_IO(readlink,
5464 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5465
5466 return _err;
5467 }
5468
5469 #if 0
5470 /*
5471 *#
5472 *#% inactive vp L U U
5473 *#
5474 */
5475 struct vnop_inactive_args {
5476 struct vnodeop_desc *a_desc;
5477 vnode_t a_vp;
5478 vfs_context_t a_context;
5479 };
5480 #endif /* 0*/
5481 errno_t
VNOP_INACTIVE(struct vnode * vp,vfs_context_t ctx)5482 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
5483 {
5484 int _err;
5485 struct vnop_inactive_args a;
5486
5487 a.a_desc = &vnop_inactive_desc;
5488 a.a_vp = vp;
5489 a.a_context = ctx;
5490
5491 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
5492 DTRACE_FSINFO(inactive, vnode_t, vp);
5493
5494 #if NAMEDSTREAMS
5495 /* For file systems that do not support namedstream natively, mark
5496 * the shadow stream file vnode to be recycled as soon as the last
5497 * reference goes away. To avoid re-entering reclaim code, do not
5498 * call recycle on terminating namedstream vnodes.
5499 */
5500 if (vnode_isnamedstream(vp) &&
5501 (vp->v_parent != NULLVP) &&
5502 vnode_isshadow(vp) &&
5503 ((vp->v_lflag & VL_TERMINATE) == 0)) {
5504 vnode_recycle(vp);
5505 }
5506 #endif
5507
5508 return _err;
5509 }
5510
5511
5512 #if 0
5513 /*
5514 *#
5515 *#% reclaim vp U U U
5516 *#
5517 */
5518 struct vnop_reclaim_args {
5519 struct vnodeop_desc *a_desc;
5520 vnode_t a_vp;
5521 vfs_context_t a_context;
5522 };
5523 #endif /* 0*/
5524 errno_t
VNOP_RECLAIM(struct vnode * vp,vfs_context_t ctx)5525 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
5526 {
5527 int _err;
5528 struct vnop_reclaim_args a;
5529
5530 a.a_desc = &vnop_reclaim_desc;
5531 a.a_vp = vp;
5532 a.a_context = ctx;
5533
5534 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
5535 DTRACE_FSINFO(reclaim, vnode_t, vp);
5536
5537 return _err;
5538 }
5539
5540
5541 /*
5542 * Returns: 0 Success
5543 * lock_fsnode:ENOENT No such file or directory [only for VFS
5544 * that is not thread safe & vnode is
5545 * currently being/has been terminated]
5546 * <vnop_pathconf_desc>:??? [per FS implementation specific]
5547 */
5548 #if 0
5549 /*
5550 *#
5551 *#% pathconf vp L L L
5552 *#
5553 */
5554 struct vnop_pathconf_args {
5555 struct vnodeop_desc *a_desc;
5556 vnode_t a_vp;
5557 int a_name;
5558 int32_t *a_retval;
5559 vfs_context_t a_context;
5560 };
5561 #endif /* 0*/
5562 errno_t
VNOP_PATHCONF(struct vnode * vp,int name,int32_t * retval,vfs_context_t ctx)5563 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
5564 {
5565 int _err;
5566 struct vnop_pathconf_args a;
5567
5568 a.a_desc = &vnop_pathconf_desc;
5569 a.a_vp = vp;
5570 a.a_name = name;
5571 a.a_retval = retval;
5572 a.a_context = ctx;
5573
5574 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
5575 DTRACE_FSINFO(pathconf, vnode_t, vp);
5576
5577 return _err;
5578 }
5579
5580 /*
5581 * Returns: 0 Success
5582 * err_advlock:ENOTSUP
5583 * lf_advlock:???
5584 * <vnop_advlock_desc>:???
5585 *
5586 * Notes: VFS implementations of advisory locking using calls through
5587 * <vnop_advlock_desc> because lock enforcement does not occur
5588 * locally should try to limit themselves to the return codes
5589 * documented above for lf_advlock and err_advlock.
5590 */
5591 #if 0
5592 /*
5593 *#
5594 *#% advlock vp U U U
5595 *#
5596 */
5597 struct vnop_advlock_args {
5598 struct vnodeop_desc *a_desc;
5599 vnode_t a_vp;
5600 caddr_t a_id;
5601 int a_op;
5602 struct flock *a_fl;
5603 int a_flags;
5604 vfs_context_t a_context;
5605 };
5606 #endif /* 0*/
5607 errno_t
VNOP_ADVLOCK(struct vnode * vp,caddr_t id,int op,struct flock * fl,int flags,vfs_context_t ctx,struct timespec * timeout)5608 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
5609 {
5610 int _err;
5611 struct vnop_advlock_args a;
5612
5613 a.a_desc = &vnop_advlock_desc;
5614 a.a_vp = vp;
5615 a.a_id = id;
5616 a.a_op = op;
5617 a.a_fl = fl;
5618 a.a_flags = flags;
5619 a.a_context = ctx;
5620 a.a_timeout = timeout;
5621
5622 /* Disallow advisory locking on non-seekable vnodes */
5623 if (vnode_isfifo(vp)) {
5624 _err = err_advlock(&a);
5625 } else {
5626 if ((vp->v_flag & VLOCKLOCAL)) {
5627 /* Advisory locking done at this layer */
5628 _err = lf_advlock(&a);
5629 } else if (flags & F_OFD_LOCK) {
5630 /* Non-local locking doesn't work for OFD locks */
5631 _err = err_advlock(&a);
5632 } else if (op == F_TRANSFER) {
5633 /* Non-local locking doesn't have F_TRANSFER */
5634 _err = err_advlock(&a);
5635 } else {
5636 /* Advisory locking done by underlying filesystem */
5637 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
5638 }
5639 DTRACE_FSINFO(advlock, vnode_t, vp);
5640 if (op == F_UNLCK &&
5641 (flags & (F_FLOCK | F_OFD_LOCK)) != 0) {
5642 post_event_if_success(vp, _err, NOTE_FUNLOCK);
5643 }
5644 }
5645
5646 return _err;
5647 }
5648
5649
5650
5651 #if 0
5652 /*
5653 *#
5654 *#% allocate vp L L L
5655 *#
5656 */
5657 struct vnop_allocate_args {
5658 struct vnodeop_desc *a_desc;
5659 vnode_t a_vp;
5660 off_t a_length;
5661 u_int32_t a_flags;
5662 off_t *a_bytesallocated;
5663 off_t a_offset;
5664 vfs_context_t a_context;
5665 };
5666
5667 #endif /* 0*/
5668 errno_t
VNOP_ALLOCATE(struct vnode * vp,off_t length,u_int32_t flags,off_t * bytesallocated,off_t offset,vfs_context_t ctx)5669 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
5670 {
5671 int _err;
5672 struct vnop_allocate_args a;
5673
5674 a.a_desc = &vnop_allocate_desc;
5675 a.a_vp = vp;
5676 a.a_length = length;
5677 a.a_flags = flags;
5678 a.a_bytesallocated = bytesallocated;
5679 a.a_offset = offset;
5680 a.a_context = ctx;
5681
5682 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
5683 DTRACE_FSINFO(allocate, vnode_t, vp);
5684 #if CONFIG_FSE
5685 if (_err == 0) {
5686 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
5687 }
5688 #endif
5689
5690 return _err;
5691 }
5692
5693 #if 0
5694 /*
5695 *#
5696 *#% pagein vp = = =
5697 *#
5698 */
5699 struct vnop_pagein_args {
5700 struct vnodeop_desc *a_desc;
5701 vnode_t a_vp;
5702 upl_t a_pl;
5703 upl_offset_t a_pl_offset;
5704 off_t a_f_offset;
5705 size_t a_size;
5706 int a_flags;
5707 vfs_context_t a_context;
5708 };
5709 #endif /* 0*/
5710 errno_t
VNOP_PAGEIN(struct vnode * vp,upl_t pl,upl_offset_t pl_offset,off_t f_offset,size_t size,int flags,vfs_context_t ctx)5711 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5712 {
5713 int _err;
5714 struct vnop_pagein_args a;
5715
5716 a.a_desc = &vnop_pagein_desc;
5717 a.a_vp = vp;
5718 a.a_pl = pl;
5719 a.a_pl_offset = pl_offset;
5720 a.a_f_offset = f_offset;
5721 a.a_size = size;
5722 a.a_flags = flags;
5723 a.a_context = ctx;
5724
5725 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
5726 DTRACE_FSINFO(pagein, vnode_t, vp);
5727
5728 return _err;
5729 }
5730
5731 #if 0
5732 /*
5733 *#
5734 *#% pageout vp = = =
5735 *#
5736 */
5737 struct vnop_pageout_args {
5738 struct vnodeop_desc *a_desc;
5739 vnode_t a_vp;
5740 upl_t a_pl;
5741 upl_offset_t a_pl_offset;
5742 off_t a_f_offset;
5743 size_t a_size;
5744 int a_flags;
5745 vfs_context_t a_context;
5746 };
5747
5748 #endif /* 0*/
5749 errno_t
VNOP_PAGEOUT(struct vnode * vp,upl_t pl,upl_offset_t pl_offset,off_t f_offset,size_t size,int flags,vfs_context_t ctx)5750 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5751 {
5752 int _err;
5753 struct vnop_pageout_args a;
5754
5755 a.a_desc = &vnop_pageout_desc;
5756 a.a_vp = vp;
5757 a.a_pl = pl;
5758 a.a_pl_offset = pl_offset;
5759 a.a_f_offset = f_offset;
5760 a.a_size = size;
5761 a.a_flags = flags;
5762 a.a_context = ctx;
5763
5764 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
5765 DTRACE_FSINFO(pageout, vnode_t, vp);
5766
5767 post_event_if_success(vp, _err, NOTE_WRITE);
5768
5769 return _err;
5770 }
5771
5772 int
vn_remove(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,struct vnode_attr * vap,vfs_context_t ctx)5773 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5774 {
5775 if (vnode_compound_remove_available(dvp)) {
5776 return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5777 } else {
5778 return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5779 }
5780 }
5781
5782 #if CONFIG_SEARCHFS
5783
5784 #if 0
5785 /*
5786 *#
5787 *#% searchfs vp L L L
5788 *#
5789 */
5790 struct vnop_searchfs_args {
5791 struct vnodeop_desc *a_desc;
5792 vnode_t a_vp;
5793 void *a_searchparams1;
5794 void *a_searchparams2;
5795 struct attrlist *a_searchattrs;
5796 uint32_t a_maxmatches;
5797 struct timeval *a_timelimit;
5798 struct attrlist *a_returnattrs;
5799 uint32_t *a_nummatches;
5800 uint32_t a_scriptcode;
5801 uint32_t a_options;
5802 struct uio *a_uio;
5803 struct searchstate *a_searchstate;
5804 vfs_context_t a_context;
5805 };
5806
5807 #endif /* 0*/
5808 errno_t
VNOP_SEARCHFS(struct vnode * vp,void * searchparams1,void * searchparams2,struct attrlist * searchattrs,uint32_t maxmatches,struct timeval * timelimit,struct attrlist * returnattrs,uint32_t * nummatches,uint32_t scriptcode,uint32_t options,struct uio * uio,struct searchstate * searchstate,vfs_context_t ctx)5809 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
5810 {
5811 int _err;
5812 struct vnop_searchfs_args a;
5813
5814 a.a_desc = &vnop_searchfs_desc;
5815 a.a_vp = vp;
5816 a.a_searchparams1 = searchparams1;
5817 a.a_searchparams2 = searchparams2;
5818 a.a_searchattrs = searchattrs;
5819 a.a_maxmatches = maxmatches;
5820 a.a_timelimit = timelimit;
5821 a.a_returnattrs = returnattrs;
5822 a.a_nummatches = nummatches;
5823 a.a_scriptcode = scriptcode;
5824 a.a_options = options;
5825 a.a_uio = uio;
5826 a.a_searchstate = searchstate;
5827 a.a_context = ctx;
5828
5829 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5830 DTRACE_FSINFO(searchfs, vnode_t, vp);
5831
5832 return _err;
5833 }
5834 #endif /* CONFIG_SEARCHFS */
5835
5836 #if 0
5837 /*
5838 *#
5839 *#% copyfile fvp U U U
5840 *#% copyfile tdvp L U U
5841 *#% copyfile tvp X U U
5842 *#
5843 */
5844 struct vnop_copyfile_args {
5845 struct vnodeop_desc *a_desc;
5846 vnode_t a_fvp;
5847 vnode_t a_tdvp;
5848 vnode_t a_tvp;
5849 struct componentname *a_tcnp;
5850 int a_mode;
5851 int a_flags;
5852 vfs_context_t a_context;
5853 };
5854 #endif /* 0*/
5855 errno_t
VNOP_COPYFILE(struct vnode * fvp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,int mode,int flags,vfs_context_t ctx)5856 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5857 int mode, int flags, vfs_context_t ctx)
5858 {
5859 int _err;
5860 struct vnop_copyfile_args a;
5861 a.a_desc = &vnop_copyfile_desc;
5862 a.a_fvp = fvp;
5863 a.a_tdvp = tdvp;
5864 a.a_tvp = tvp;
5865 a.a_tcnp = tcnp;
5866 a.a_mode = mode;
5867 a.a_flags = flags;
5868 a.a_context = ctx;
5869 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
5870 DTRACE_FSINFO(copyfile, vnode_t, fvp);
5871 return _err;
5872 }
5873
5874 #if 0
5875 struct vnop_clonefile_args {
5876 struct vnodeop_desc *a_desc;
5877 vnode_t a_fvp;
5878 vnode_t a_dvp;
5879 vnode_t *a_vpp;
5880 struct componentname *a_cnp;
5881 struct vnode_attr *a_vap;
5882 uint32_t a_flags;
5883 vfs_context_t a_context;
5884 int (*a_dir_clone_authorizer)( /* Authorization callback */
5885 struct vnode_attr *vap, /* attribute to be authorized */
5886 kauth_action_t action, /* action for which attribute is to be authorized */
5887 struct vnode_attr *dvap, /* target directory attributes */
5888 vnode_t sdvp, /* source directory vnode pointer (optional) */
5889 mount_t mp, /* mount point of filesystem */
5890 dir_clone_authorizer_op_t vattr_op, /* specific operation requested : setup, authorization or cleanup */
5891 uint32_t flags; /* value passed in a_flags to the VNOP */
5892 vfs_context_t ctx, /* As passed to VNOP */
5893 void *reserved); /* Always NULL */
5894 void *a_reserved; /* Currently unused */
5895 };
5896 #endif /* 0 */
5897
5898 errno_t
VNOP_CLONEFILE(vnode_t fvp,vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,uint32_t flags,vfs_context_t ctx)5899 VNOP_CLONEFILE(vnode_t fvp, vnode_t dvp, vnode_t *vpp,
5900 struct componentname *cnp, struct vnode_attr *vap, uint32_t flags,
5901 vfs_context_t ctx)
5902 {
5903 int _err;
5904 struct vnop_clonefile_args a;
5905 a.a_desc = &vnop_clonefile_desc;
5906 a.a_fvp = fvp;
5907 a.a_dvp = dvp;
5908 a.a_vpp = vpp;
5909 a.a_cnp = cnp;
5910 a.a_vap = vap;
5911 a.a_flags = flags;
5912 a.a_context = ctx;
5913
5914 if (vnode_vtype(fvp) == VDIR) {
5915 a.a_dir_clone_authorizer = vnode_attr_authorize_dir_clone;
5916 } else {
5917 a.a_dir_clone_authorizer = NULL;
5918 }
5919
5920 _err = (*dvp->v_op[vnop_clonefile_desc.vdesc_offset])(&a);
5921
5922 if (_err == 0 && *vpp) {
5923 DTRACE_FSINFO(clonefile, vnode_t, *vpp);
5924 if (kdebug_enable) {
5925 kdebug_lookup(*vpp, cnp);
5926 }
5927 }
5928
5929 post_event_if_success(dvp, _err, NOTE_WRITE);
5930
5931 return _err;
5932 }
5933
5934 errno_t
VNOP_GETXATTR(vnode_t vp,const char * name,uio_t uio,size_t * size,int options,vfs_context_t ctx)5935 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
5936 {
5937 struct vnop_getxattr_args a;
5938 int error;
5939
5940 a.a_desc = &vnop_getxattr_desc;
5941 a.a_vp = vp;
5942 a.a_name = name;
5943 a.a_uio = uio;
5944 a.a_size = size;
5945 a.a_options = options;
5946 a.a_context = ctx;
5947
5948 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
5949 DTRACE_FSINFO(getxattr, vnode_t, vp);
5950
5951 return error;
5952 }
5953
5954 errno_t
VNOP_SETXATTR(vnode_t vp,const char * name,uio_t uio,int options,vfs_context_t ctx)5955 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
5956 {
5957 struct vnop_setxattr_args a;
5958 int error;
5959
5960 a.a_desc = &vnop_setxattr_desc;
5961 a.a_vp = vp;
5962 a.a_name = name;
5963 a.a_uio = uio;
5964 a.a_options = options;
5965 a.a_context = ctx;
5966
5967 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
5968 DTRACE_FSINFO(setxattr, vnode_t, vp);
5969
5970 if (error == 0) {
5971 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
5972 }
5973
5974 post_event_if_success(vp, error, NOTE_ATTRIB);
5975
5976 return error;
5977 }
5978
5979 errno_t
VNOP_REMOVEXATTR(vnode_t vp,const char * name,int options,vfs_context_t ctx)5980 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
5981 {
5982 struct vnop_removexattr_args a;
5983 int error;
5984
5985 a.a_desc = &vnop_removexattr_desc;
5986 a.a_vp = vp;
5987 a.a_name = name;
5988 a.a_options = options;
5989 a.a_context = ctx;
5990
5991 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
5992 DTRACE_FSINFO(removexattr, vnode_t, vp);
5993
5994 post_event_if_success(vp, error, NOTE_ATTRIB);
5995
5996 return error;
5997 }
5998
5999 errno_t
VNOP_LISTXATTR(vnode_t vp,uio_t uio,size_t * size,int options,vfs_context_t ctx)6000 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
6001 {
6002 struct vnop_listxattr_args a;
6003 int error;
6004
6005 a.a_desc = &vnop_listxattr_desc;
6006 a.a_vp = vp;
6007 a.a_uio = uio;
6008 a.a_size = size;
6009 a.a_options = options;
6010 a.a_context = ctx;
6011
6012 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
6013 DTRACE_FSINFO(listxattr, vnode_t, vp);
6014
6015 return error;
6016 }
6017
6018
6019 #if 0
6020 /*
6021 *#
6022 *#% blktooff vp = = =
6023 *#
6024 */
6025 struct vnop_blktooff_args {
6026 struct vnodeop_desc *a_desc;
6027 vnode_t a_vp;
6028 daddr64_t a_lblkno;
6029 off_t *a_offset;
6030 };
6031 #endif /* 0*/
6032 errno_t
VNOP_BLKTOOFF(struct vnode * vp,daddr64_t lblkno,off_t * offset)6033 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
6034 {
6035 int _err;
6036 struct vnop_blktooff_args a;
6037
6038 a.a_desc = &vnop_blktooff_desc;
6039 a.a_vp = vp;
6040 a.a_lblkno = lblkno;
6041 a.a_offset = offset;
6042
6043 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
6044 DTRACE_FSINFO(blktooff, vnode_t, vp);
6045
6046 return _err;
6047 }
6048
6049 #if 0
6050 /*
6051 *#
6052 *#% offtoblk vp = = =
6053 *#
6054 */
6055 struct vnop_offtoblk_args {
6056 struct vnodeop_desc *a_desc;
6057 vnode_t a_vp;
6058 off_t a_offset;
6059 daddr64_t *a_lblkno;
6060 };
6061 #endif /* 0*/
6062 errno_t
VNOP_OFFTOBLK(struct vnode * vp,off_t offset,daddr64_t * lblkno)6063 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
6064 {
6065 int _err;
6066 struct vnop_offtoblk_args a;
6067
6068 a.a_desc = &vnop_offtoblk_desc;
6069 a.a_vp = vp;
6070 a.a_offset = offset;
6071 a.a_lblkno = lblkno;
6072
6073 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
6074 DTRACE_FSINFO(offtoblk, vnode_t, vp);
6075
6076 return _err;
6077 }
6078
6079 #if 0
6080 /*
6081 *#
6082 *#% ap vp L L L
6083 *#
6084 */
6085 struct vnop_verify_args {
6086 struct vnodeop_desc *a_desc;
6087 vnode_t a_vp;
6088 off_t a_foffset;
6089 char *a_buf;
6090 size_t a_bufsize;
6091 size_t *a_verifyblksize;
6092 void **a_verify_ctxp;
6093 int a_flags;
6094 vfs_context_t a_context;
6095 };
6096 #endif
6097
6098 errno_t
VNOP_VERIFY(struct vnode * vp,off_t foffset,uint8_t * buf,size_t bufsize,size_t * verify_block_size,void ** verify_ctxp,vnode_verify_flags_t flags,vfs_context_t ctx)6099 VNOP_VERIFY(struct vnode *vp, off_t foffset, uint8_t *buf, size_t bufsize,
6100 size_t *verify_block_size, void **verify_ctxp, vnode_verify_flags_t flags,
6101 vfs_context_t ctx)
6102 {
6103 int _err;
6104 struct vnop_verify_args a;
6105
6106 if (ctx == NULL) {
6107 ctx = vfs_context_kernel();
6108 }
6109 a.a_desc = &vnop_verify_desc;
6110 a.a_vp = vp;
6111 a.a_foffset = foffset;
6112 a.a_buf = buf;
6113 a.a_bufsize = bufsize;
6114 a.a_verifyblksize = verify_block_size;
6115 a.a_flags = flags;
6116 a.a_verify_ctxp = verify_ctxp;
6117 a.a_context = ctx;
6118
6119 _err = (*vp->v_op[vnop_verify_desc.vdesc_offset])(&a);
6120 DTRACE_FSINFO(verify, vnode_t, vp);
6121
6122 /* It is not an error for a filesystem to not support this VNOP */
6123 if (_err == ENOTSUP) {
6124 if (!buf && verify_block_size) {
6125 *verify_block_size = 0;
6126 }
6127
6128 _err = 0;
6129 }
6130
6131 return _err;
6132 }
6133
6134 #if 0
6135 /*
6136 *#
6137 *#% blockmap vp L L L
6138 *#
6139 */
6140 struct vnop_blockmap_args {
6141 struct vnodeop_desc *a_desc;
6142 vnode_t a_vp;
6143 off_t a_foffset;
6144 size_t a_size;
6145 daddr64_t *a_bpn;
6146 size_t *a_run;
6147 void *a_poff;
6148 int a_flags;
6149 vfs_context_t a_context;
6150 };
6151 #endif /* 0*/
6152 errno_t
VNOP_BLOCKMAP(struct vnode * vp,off_t foffset,size_t size,daddr64_t * bpn,size_t * run,void * poff,int flags,vfs_context_t ctx)6153 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
6154 {
6155 int _err;
6156 struct vnop_blockmap_args a;
6157 size_t localrun = 0;
6158
6159 if (ctx == NULL) {
6160 ctx = vfs_context_current();
6161 }
6162 a.a_desc = &vnop_blockmap_desc;
6163 a.a_vp = vp;
6164 a.a_foffset = foffset;
6165 a.a_size = size;
6166 a.a_bpn = bpn;
6167 a.a_run = &localrun;
6168 a.a_poff = poff;
6169 a.a_flags = flags;
6170 a.a_context = ctx;
6171
6172 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
6173 DTRACE_FSINFO(blockmap, vnode_t, vp);
6174
6175 /*
6176 * We used a local variable to request information from the underlying
6177 * filesystem about the length of the I/O run in question. If
6178 * we get malformed output from the filesystem, we cap it to the length
6179 * requested, at most. Update 'run' on the way out.
6180 */
6181 if (_err == 0) {
6182 if (localrun > size) {
6183 localrun = size;
6184 }
6185
6186 if (run) {
6187 *run = localrun;
6188 }
6189 }
6190
6191 return _err;
6192 }
6193
6194 #if 0
6195 struct vnop_strategy_args {
6196 struct vnodeop_desc *a_desc;
6197 struct buf *a_bp;
6198 };
6199
6200 #endif /* 0*/
6201 errno_t
VNOP_STRATEGY(struct buf * bp)6202 VNOP_STRATEGY(struct buf *bp)
6203 {
6204 int _err;
6205 struct vnop_strategy_args a;
6206 vnode_t vp = buf_vnode(bp);
6207 a.a_desc = &vnop_strategy_desc;
6208 a.a_bp = bp;
6209 _err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
6210 DTRACE_FSINFO(strategy, vnode_t, vp);
6211 return _err;
6212 }
6213
6214 #if 0
6215 struct vnop_bwrite_args {
6216 struct vnodeop_desc *a_desc;
6217 buf_t a_bp;
6218 };
6219 #endif /* 0*/
6220 errno_t
VNOP_BWRITE(struct buf * bp)6221 VNOP_BWRITE(struct buf *bp)
6222 {
6223 int _err;
6224 struct vnop_bwrite_args a;
6225 vnode_t vp = buf_vnode(bp);
6226 a.a_desc = &vnop_bwrite_desc;
6227 a.a_bp = bp;
6228 _err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
6229 DTRACE_FSINFO(bwrite, vnode_t, vp);
6230 return _err;
6231 }
6232
6233 #if 0
6234 struct vnop_kqfilt_add_args {
6235 struct vnodeop_desc *a_desc;
6236 struct vnode *a_vp;
6237 struct knote *a_kn;
6238 vfs_context_t a_context;
6239 };
6240 #endif
6241 errno_t
VNOP_KQFILT_ADD(struct vnode * vp,struct knote * kn,vfs_context_t ctx)6242 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
6243 {
6244 int _err;
6245 struct vnop_kqfilt_add_args a;
6246
6247 a.a_desc = VDESC(vnop_kqfilt_add);
6248 a.a_vp = vp;
6249 a.a_kn = kn;
6250 a.a_context = ctx;
6251
6252 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
6253 DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
6254
6255 return _err;
6256 }
6257
6258 #if 0
6259 struct vnop_kqfilt_remove_args {
6260 struct vnodeop_desc *a_desc;
6261 struct vnode *a_vp;
6262 uintptr_t a_ident;
6263 vfs_context_t a_context;
6264 };
6265 #endif
6266 errno_t
VNOP_KQFILT_REMOVE(struct vnode * vp,uintptr_t ident,vfs_context_t ctx)6267 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
6268 {
6269 int _err;
6270 struct vnop_kqfilt_remove_args a;
6271
6272 a.a_desc = VDESC(vnop_kqfilt_remove);
6273 a.a_vp = vp;
6274 a.a_ident = ident;
6275 a.a_context = ctx;
6276
6277 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
6278 DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
6279
6280 return _err;
6281 }
6282
6283 errno_t
VNOP_MONITOR(vnode_t vp,uint32_t events,uint32_t flags,void * handle,vfs_context_t ctx)6284 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
6285 {
6286 int _err;
6287 struct vnop_monitor_args a;
6288
6289 a.a_desc = VDESC(vnop_monitor);
6290 a.a_vp = vp;
6291 a.a_events = events;
6292 a.a_flags = flags;
6293 a.a_handle = handle;
6294 a.a_context = ctx;
6295
6296 _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
6297 DTRACE_FSINFO(monitor, vnode_t, vp);
6298
6299 return _err;
6300 }
6301
6302 #if 0
6303 struct vnop_setlabel_args {
6304 struct vnodeop_desc *a_desc;
6305 struct vnode *a_vp;
6306 struct label *a_vl;
6307 vfs_context_t a_context;
6308 };
6309 #endif
6310 errno_t
VNOP_SETLABEL(struct vnode * vp,struct label * label,vfs_context_t ctx)6311 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
6312 {
6313 int _err;
6314 struct vnop_setlabel_args a;
6315
6316 a.a_desc = VDESC(vnop_setlabel);
6317 a.a_vp = vp;
6318 a.a_vl = label;
6319 a.a_context = ctx;
6320
6321 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
6322 DTRACE_FSINFO(setlabel, vnode_t, vp);
6323
6324 return _err;
6325 }
6326
6327
6328 #if NAMEDSTREAMS
6329 /*
6330 * Get a named streamed
6331 */
6332 errno_t
VNOP_GETNAMEDSTREAM(vnode_t vp,vnode_t * svpp,const char * name,enum nsoperation operation,int flags,vfs_context_t ctx)6333 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
6334 {
6335 int _err;
6336 struct vnop_getnamedstream_args a;
6337
6338 a.a_desc = &vnop_getnamedstream_desc;
6339 a.a_vp = vp;
6340 a.a_svpp = svpp;
6341 a.a_name = name;
6342 a.a_operation = operation;
6343 a.a_flags = flags;
6344 a.a_context = ctx;
6345
6346 _err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
6347 DTRACE_FSINFO(getnamedstream, vnode_t, vp);
6348 return _err;
6349 }
6350
6351 /*
6352 * Create a named streamed
6353 */
6354 errno_t
VNOP_MAKENAMEDSTREAM(vnode_t vp,vnode_t * svpp,const char * name,int flags,vfs_context_t ctx)6355 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
6356 {
6357 int _err;
6358 struct vnop_makenamedstream_args a;
6359
6360 a.a_desc = &vnop_makenamedstream_desc;
6361 a.a_vp = vp;
6362 a.a_svpp = svpp;
6363 a.a_name = name;
6364 a.a_flags = flags;
6365 a.a_context = ctx;
6366
6367 _err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
6368 DTRACE_FSINFO(makenamedstream, vnode_t, vp);
6369 return _err;
6370 }
6371
6372
6373 /*
6374 * Remove a named streamed
6375 */
6376 errno_t
VNOP_REMOVENAMEDSTREAM(vnode_t vp,vnode_t svp,const char * name,int flags,vfs_context_t ctx)6377 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
6378 {
6379 int _err;
6380 struct vnop_removenamedstream_args a;
6381
6382 a.a_desc = &vnop_removenamedstream_desc;
6383 a.a_vp = vp;
6384 a.a_svp = svp;
6385 a.a_name = name;
6386 a.a_flags = flags;
6387 a.a_context = ctx;
6388
6389 _err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
6390 DTRACE_FSINFO(removenamedstream, vnode_t, vp);
6391 return _err;
6392 }
6393 #endif
6394