1 /*
2 * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kpi_vfs.c
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75 /*
76 * External virtual filesystem routines
77 */
78
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/disk.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf.h>
93 #include <sys/errno.h>
94 #include <kern/kalloc.h>
95 #include <sys/domain.h>
96 #include <sys/mbuf.h>
97 #include <sys/syslog.h>
98 #include <sys/ubc.h>
99 #include <sys/ubc_internal.h>
100 #include <sys/vm.h>
101 #include <sys/sysctl.h>
102 #include <sys/filedesc.h>
103 #include <sys/event.h>
104 #include <sys/fsevents.h>
105 #include <sys/user.h>
106 #include <sys/lockf.h>
107 #include <sys/xattr.h>
108 #include <sys/kdebug.h>
109 #include <vfs/vfs_disk_conditioner.h>
110
111 #include <kern/assert.h>
112 #include <kern/zalloc.h>
113 #include <kern/task.h>
114 #include <kern/policy_internal.h>
115
116 #include <libkern/OSByteOrder.h>
117
118 #include <miscfs/specfs/specdev.h>
119
120 #include <mach/mach_types.h>
121 #include <mach/memory_object_types.h>
122 #include <mach/task.h>
123
124 #if CONFIG_MACF
125 #include <security/mac_framework.h>
126 #endif
127
128 #if NULLFS
129 #include <miscfs/nullfs/nullfs.h>
130 #endif
131
132 #include <sys/sdt.h>
133
134 #define ESUCCESS 0
135 #undef mount_t
136 #undef vnode_t
137
138 #define COMPAT_ONLY
139
140 #define NATIVE_XATTR(VP) \
141 ((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
142
143 #if CONFIG_APPLEDOUBLE
144 static void xattrfile_remove(vnode_t dvp, const char *basename,
145 vfs_context_t ctx, int force);
146 static void xattrfile_setattr(vnode_t dvp, const char * basename,
147 struct vnode_attr * vap, vfs_context_t ctx);
148 #endif /* CONFIG_APPLEDOUBLE */
149
150 extern lck_rw_t rootvnode_rw_lock;
151
152 static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp);
153
154 KALLOC_TYPE_DEFINE(KT_VFS_CONTEXT, struct vfs_context, KT_PRIV_ACCT);
155
156 extern int fstypenumstart;
157 char vfs_typenum_arr[13];
158
159 LCK_GRP_DECLARE(typenum_arr_grp, "typenum array group");
160 LCK_MTX_DECLARE(vfs_typenum_mtx, &typenum_arr_grp);
161 /*
162 * vnode_setneedinactive
163 *
164 * Description: Indicate that when the last iocount on this vnode goes away,
165 * and the usecount is also zero, we should inform the filesystem
166 * via VNOP_INACTIVE.
167 *
168 * Parameters: vnode_t vnode to mark
169 *
170 * Returns: Nothing
171 *
172 * Notes: Notably used when we're deleting a file--we need not have a
173 * usecount, so VNOP_INACTIVE may not get called by anyone. We
174 * want it called when we drop our iocount.
175 */
176 void
vnode_setneedinactive(vnode_t vp)177 vnode_setneedinactive(vnode_t vp)
178 {
179 cache_purge(vp);
180
181 vnode_lock_spin(vp);
182 vp->v_lflag |= VL_NEEDINACTIVE;
183 vnode_unlock(vp);
184 }
185
186
187 /* ====================================================================== */
188 /* ************ EXTERNAL KERNEL APIS ********************************** */
189 /* ====================================================================== */
190
191 /*
192 * implementations of exported VFS operations
193 */
194 int
VFS_MOUNT(mount_t mp,vnode_t devvp,user_addr_t data,vfs_context_t ctx)195 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
196 {
197 int error;
198
199 if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0)) {
200 return ENOTSUP;
201 }
202
203 if (vfs_context_is64bit(ctx)) {
204 if (vfs_64bitready(mp)) {
205 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
206 } else {
207 error = ENOTSUP;
208 }
209 } else {
210 error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
211 }
212
213 return error;
214 }
215
216 int
VFS_START(mount_t mp,int flags,vfs_context_t ctx)217 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
218 {
219 int error;
220
221 if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0)) {
222 return ENOTSUP;
223 }
224
225 error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
226
227 return error;
228 }
229
230 int
VFS_UNMOUNT(mount_t mp,int flags,vfs_context_t ctx)231 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
232 {
233 int error;
234
235 if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0)) {
236 return ENOTSUP;
237 }
238
239 error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
240
241 return error;
242 }
243
244 /*
245 * Returns: 0 Success
246 * ENOTSUP Not supported
247 * <vfs_root>:ENOENT
248 * <vfs_root>:???
249 *
250 * Note: The return codes from the underlying VFS's root routine can't
251 * be fully enumerated here, since third party VFS authors may not
252 * limit their error returns to the ones documented here, even
253 * though this may result in some programs functioning incorrectly.
254 *
255 * The return codes documented above are those which may currently
256 * be returned by HFS from hfs_vfs_root, which is a simple wrapper
257 * for a call to hfs_vget on the volume mount point, not including
258 * additional error codes which may be propagated from underlying
259 * routines called by hfs_vget.
260 */
261 int
VFS_ROOT(mount_t mp,struct vnode ** vpp,vfs_context_t ctx)262 VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx)
263 {
264 int error;
265
266 if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0)) {
267 return ENOTSUP;
268 }
269
270 if (ctx == NULL) {
271 ctx = vfs_context_current();
272 }
273
274 error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
275
276 return error;
277 }
278
279 int
VFS_QUOTACTL(mount_t mp,int cmd,uid_t uid,caddr_t datap,vfs_context_t ctx)280 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
281 {
282 int error;
283
284 if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0)) {
285 return ENOTSUP;
286 }
287
288 error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
289
290 return error;
291 }
292
293 int
VFS_GETATTR(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)294 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
295 {
296 int error;
297
298 if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0)) {
299 return ENOTSUP;
300 }
301
302 if (ctx == NULL) {
303 ctx = vfs_context_current();
304 }
305
306 error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
307
308 return error;
309 }
310
311 int
VFS_SETATTR(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)312 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
313 {
314 int error;
315
316 if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0)) {
317 return ENOTSUP;
318 }
319
320 if (ctx == NULL) {
321 ctx = vfs_context_current();
322 }
323
324 error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
325
326 return error;
327 }
328
329 int
VFS_SYNC(mount_t mp,int flags,vfs_context_t ctx)330 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
331 {
332 int error;
333
334 if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0)) {
335 return ENOTSUP;
336 }
337
338 if (ctx == NULL) {
339 ctx = vfs_context_current();
340 }
341
342 error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
343
344 return error;
345 }
346
347 int
VFS_VGET(mount_t mp,ino64_t ino,struct vnode ** vpp,vfs_context_t ctx)348 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
349 {
350 int error;
351
352 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0)) {
353 return ENOTSUP;
354 }
355
356 if (ctx == NULL) {
357 ctx = vfs_context_current();
358 }
359
360 error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
361
362 return error;
363 }
364
365 int
VFS_FHTOVP(mount_t mp,int fhlen,unsigned char * fhp,vnode_t * vpp,vfs_context_t ctx)366 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_t ctx)
367 {
368 int error;
369
370 if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0)) {
371 return ENOTSUP;
372 }
373
374 if (ctx == NULL) {
375 ctx = vfs_context_current();
376 }
377
378 error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
379
380 return error;
381 }
382
383 int
VFS_VPTOFH(struct vnode * vp,int * fhlenp,unsigned char * fhp,vfs_context_t ctx)384 VFS_VPTOFH(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t ctx)
385 {
386 int error;
387
388 if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0)) {
389 return ENOTSUP;
390 }
391
392 if (ctx == NULL) {
393 ctx = vfs_context_current();
394 }
395
396 error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
397
398 return error;
399 }
400
401 int
VFS_IOCTL(struct mount * mp,u_long command,caddr_t data,int flags,vfs_context_t context)402 VFS_IOCTL(struct mount *mp, u_long command, caddr_t data,
403 int flags, vfs_context_t context)
404 {
405 if (mp == dead_mountp || !mp->mnt_op->vfs_ioctl) {
406 return ENOTSUP;
407 }
408
409 return mp->mnt_op->vfs_ioctl(mp, command, data, flags,
410 context ?: vfs_context_current());
411 }
412
413 int
VFS_VGET_SNAPDIR(mount_t mp,vnode_t * vpp,vfs_context_t ctx)414 VFS_VGET_SNAPDIR(mount_t mp, vnode_t *vpp, vfs_context_t ctx)
415 {
416 int error;
417
418 if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget_snapdir == 0)) {
419 return ENOTSUP;
420 }
421
422 if (ctx == NULL) {
423 ctx = vfs_context_current();
424 }
425
426 error = (*mp->mnt_op->vfs_vget_snapdir)(mp, vpp, ctx);
427
428 return error;
429 }
430
431 /* returns the cached throttle mask for the mount_t */
432 uint64_t
vfs_throttle_mask(mount_t mp)433 vfs_throttle_mask(mount_t mp)
434 {
435 return mp->mnt_throttle_mask;
436 }
437
438 /* returns a copy of vfs type name for the mount_t */
439 void
vfs_name(mount_t mp,char * buffer)440 vfs_name(mount_t mp, char *buffer)
441 {
442 strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
443 }
444
445 /* returns vfs type number for the mount_t */
446 int
vfs_typenum(mount_t mp)447 vfs_typenum(mount_t mp)
448 {
449 return mp->mnt_vtable->vfc_typenum;
450 }
451
452 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */
453 void*
vfs_mntlabel(mount_t mp)454 vfs_mntlabel(mount_t mp)
455 {
456 return (void*)mac_mount_label(mp);
457 }
458
459 uint64_t
vfs_mount_id(mount_t mp)460 vfs_mount_id(mount_t mp)
461 {
462 return mp->mnt_mount_id;
463 }
464
465 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
466 uint64_t
vfs_flags(mount_t mp)467 vfs_flags(mount_t mp)
468 {
469 return (uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
470 }
471
472 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
473 void
vfs_setflags(mount_t mp,uint64_t flags)474 vfs_setflags(mount_t mp, uint64_t flags)
475 {
476 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
477
478 mount_lock(mp);
479 mp->mnt_flag |= lflags;
480 mount_unlock(mp);
481 }
482
483 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
484 void
vfs_clearflags(mount_t mp,uint64_t flags)485 vfs_clearflags(mount_t mp, uint64_t flags)
486 {
487 uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
488
489 mount_lock(mp);
490 mp->mnt_flag &= ~lflags;
491 mount_unlock(mp);
492 }
493
494 /* Is the mount_t ronly and upgrade read/write requested? */
495 int
vfs_iswriteupgrade(mount_t mp)496 vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */
497 {
498 return (mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR);
499 }
500
501
502 /* Is the mount_t mounted ronly */
503 int
vfs_isrdonly(mount_t mp)504 vfs_isrdonly(mount_t mp)
505 {
506 return mp->mnt_flag & MNT_RDONLY;
507 }
508
509 /* Is the mount_t mounted for filesystem synchronous writes? */
510 int
vfs_issynchronous(mount_t mp)511 vfs_issynchronous(mount_t mp)
512 {
513 return mp->mnt_flag & MNT_SYNCHRONOUS;
514 }
515
516 /* Is the mount_t mounted read/write? */
517 int
vfs_isrdwr(mount_t mp)518 vfs_isrdwr(mount_t mp)
519 {
520 return (mp->mnt_flag & MNT_RDONLY) == 0;
521 }
522
523
524 /* Is mount_t marked for update (ie MNT_UPDATE) */
525 int
vfs_isupdate(mount_t mp)526 vfs_isupdate(mount_t mp)
527 {
528 return mp->mnt_flag & MNT_UPDATE;
529 }
530
531
532 /* Is mount_t marked for reload (ie MNT_RELOAD) */
533 int
vfs_isreload(mount_t mp)534 vfs_isreload(mount_t mp)
535 {
536 return (mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD);
537 }
538
539 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
540 int
vfs_isforce(mount_t mp)541 vfs_isforce(mount_t mp)
542 {
543 if (mp->mnt_lflag & MNT_LFORCE) {
544 return 1;
545 } else {
546 return 0;
547 }
548 }
549
550 int
vfs_isunmount(mount_t mp)551 vfs_isunmount(mount_t mp)
552 {
553 if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
554 return 1;
555 } else {
556 return 0;
557 }
558 }
559
560 int
vfs_64bitready(mount_t mp)561 vfs_64bitready(mount_t mp)
562 {
563 if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
564 return 1;
565 } else {
566 return 0;
567 }
568 }
569
570
571 int
vfs_authcache_ttl(mount_t mp)572 vfs_authcache_ttl(mount_t mp)
573 {
574 if ((mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
575 return mp->mnt_authcache_ttl;
576 } else {
577 return CACHED_RIGHT_INFINITE_TTL;
578 }
579 }
580
581 void
vfs_setauthcache_ttl(mount_t mp,int ttl)582 vfs_setauthcache_ttl(mount_t mp, int ttl)
583 {
584 mount_lock(mp);
585 mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
586 mp->mnt_authcache_ttl = ttl;
587 mount_unlock(mp);
588 }
589
590 void
vfs_clearauthcache_ttl(mount_t mp)591 vfs_clearauthcache_ttl(mount_t mp)
592 {
593 mount_lock(mp);
594 mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
595 /*
596 * back to the default TTL value in case
597 * MNTK_AUTH_OPAQUE is set on this mount
598 */
599 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
600 mount_unlock(mp);
601 }
602
603 int
vfs_authopaque(mount_t mp)604 vfs_authopaque(mount_t mp)
605 {
606 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE)) {
607 return 1;
608 } else {
609 return 0;
610 }
611 }
612
613 int
vfs_authopaqueaccess(mount_t mp)614 vfs_authopaqueaccess(mount_t mp)
615 {
616 if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS)) {
617 return 1;
618 } else {
619 return 0;
620 }
621 }
622
623 void
vfs_setauthopaque(mount_t mp)624 vfs_setauthopaque(mount_t mp)
625 {
626 mount_lock(mp);
627 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
628 mount_unlock(mp);
629 }
630
631 void
vfs_setauthopaqueaccess(mount_t mp)632 vfs_setauthopaqueaccess(mount_t mp)
633 {
634 mount_lock(mp);
635 mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
636 mount_unlock(mp);
637 }
638
639 void
vfs_clearauthopaque(mount_t mp)640 vfs_clearauthopaque(mount_t mp)
641 {
642 mount_lock(mp);
643 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
644 mount_unlock(mp);
645 }
646
647 void
vfs_clearauthopaqueaccess(mount_t mp)648 vfs_clearauthopaqueaccess(mount_t mp)
649 {
650 mount_lock(mp);
651 mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
652 mount_unlock(mp);
653 }
654
655 void
vfs_setextendedsecurity(mount_t mp)656 vfs_setextendedsecurity(mount_t mp)
657 {
658 mount_lock(mp);
659 mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
660 mount_unlock(mp);
661 }
662
663 void
vfs_setmntsystem(mount_t mp)664 vfs_setmntsystem(mount_t mp)
665 {
666 mount_lock(mp);
667 mp->mnt_kern_flag |= MNTK_SYSTEM;
668 mount_unlock(mp);
669 }
670
671 void
vfs_setmntsystemdata(mount_t mp)672 vfs_setmntsystemdata(mount_t mp)
673 {
674 mount_lock(mp);
675 mp->mnt_kern_flag |= MNTK_SYSTEMDATA;
676 mount_unlock(mp);
677 }
678
679 void
vfs_setmntswap(mount_t mp)680 vfs_setmntswap(mount_t mp)
681 {
682 mount_lock(mp);
683 mp->mnt_kern_flag |= (MNTK_SYSTEM | MNTK_SWAP_MOUNT);
684 mount_unlock(mp);
685 }
686
687 void
vfs_clearextendedsecurity(mount_t mp)688 vfs_clearextendedsecurity(mount_t mp)
689 {
690 mount_lock(mp);
691 mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
692 mount_unlock(mp);
693 }
694
695 void
vfs_setnoswap(mount_t mp)696 vfs_setnoswap(mount_t mp)
697 {
698 mount_lock(mp);
699 mp->mnt_kern_flag |= MNTK_NOSWAP;
700 mount_unlock(mp);
701 }
702
703 void
vfs_clearnoswap(mount_t mp)704 vfs_clearnoswap(mount_t mp)
705 {
706 mount_lock(mp);
707 mp->mnt_kern_flag &= ~MNTK_NOSWAP;
708 mount_unlock(mp);
709 }
710
711 int
vfs_extendedsecurity(mount_t mp)712 vfs_extendedsecurity(mount_t mp)
713 {
714 return mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY;
715 }
716
717 /* returns the max size of short symlink in this mount_t */
718 uint32_t
vfs_maxsymlen(mount_t mp)719 vfs_maxsymlen(mount_t mp)
720 {
721 return mp->mnt_maxsymlinklen;
722 }
723
724 /* set max size of short symlink on mount_t */
725 void
vfs_setmaxsymlen(mount_t mp,uint32_t symlen)726 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
727 {
728 mp->mnt_maxsymlinklen = symlen;
729 }
730
731 boolean_t
vfs_is_basesystem(mount_t mp)732 vfs_is_basesystem(mount_t mp)
733 {
734 return ((mp->mnt_supl_kern_flag & MNTK_SUPL_BASESYSTEM) == 0) ? false : true;
735 }
736
737 /* return a pointer to the RO vfs_statfs associated with mount_t */
738 struct vfsstatfs *
vfs_statfs(mount_t mp)739 vfs_statfs(mount_t mp)
740 {
741 return &mp->mnt_vfsstat;
742 }
743
744 int
vfs_getattr(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)745 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
746 {
747 int error;
748
749 if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0) {
750 return error;
751 }
752
753 /*
754 * If we have a filesystem create time, use it to default some others.
755 */
756 if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
757 if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time)) {
758 VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
759 }
760 }
761
762 return 0;
763 }
764
765 int
vfs_setattr(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)766 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
767 {
768 int error;
769
770 /*
771 * with a read-only system volume, we need to allow rename of the root volume
772 * even if it's read-only. Don't return EROFS here if setattr changes only
773 * the volume name
774 */
775 if (vfs_isrdonly(mp) &&
776 !((strcmp(mp->mnt_vfsstat.f_fstypename, "apfs") == 0) && (vfa->f_active == VFSATTR_f_vol_name))) {
777 return EROFS;
778 }
779
780 error = VFS_SETATTR(mp, vfa, ctx);
781
782 /*
783 * If we had alternate ways of setting vfs attributes, we'd
784 * fall back here.
785 */
786
787 return error;
788 }
789
790 /* return the private data handle stored in mount_t */
791 void *
vfs_fsprivate(mount_t mp)792 vfs_fsprivate(mount_t mp)
793 {
794 return mp->mnt_data;
795 }
796
797 /* set the private data handle in mount_t */
798 void
vfs_setfsprivate(mount_t mp,void * mntdata)799 vfs_setfsprivate(mount_t mp, void *mntdata)
800 {
801 mount_lock(mp);
802 mp->mnt_data = mntdata;
803 mount_unlock(mp);
804 }
805
806 /* query whether the mount point supports native EAs */
807 int
vfs_nativexattrs(mount_t mp)808 vfs_nativexattrs(mount_t mp)
809 {
810 return mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS;
811 }
812
813 /*
814 * return the block size of the underlying
815 * device associated with mount_t
816 */
817 int
vfs_devblocksize(mount_t mp)818 vfs_devblocksize(mount_t mp)
819 {
820 return mp->mnt_devblocksize;
821 }
822
823 /*
824 * Returns vnode with an iocount that must be released with vnode_put()
825 */
826 vnode_t
vfs_vnodecovered(mount_t mp)827 vfs_vnodecovered(mount_t mp)
828 {
829 vnode_t vp = mp->mnt_vnodecovered;
830 if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
831 return NULL;
832 } else {
833 return vp;
834 }
835 }
836 /*
837 * Similar to vfs_vnodecovered() except this variant doesn't block and returns
838 * NULL if the covered vnode is being reclaimed.
839 * Returns vnode with an iocount that must be released with vnode_put().
840 */
841 vnode_t
vfs_vnodecovered_noblock(mount_t mp)842 vfs_vnodecovered_noblock(mount_t mp)
843 {
844 vnode_t vp = mp->mnt_vnodecovered;
845
846 if ((vp == NULL) || (vnode_getwithref_noblock(vp) != 0)) {
847 return NULL;
848 } else {
849 return vp;
850 }
851 }
852
853 int
vfs_setdevvp(mount_t mp,vnode_t devvp)854 vfs_setdevvp(mount_t mp, vnode_t devvp)
855 {
856 if (mp == NULL) {
857 return 0;
858 }
859
860 if (devvp) {
861 if (devvp->v_type != VBLK) {
862 return EINVAL;
863 }
864
865 if (major(devvp->v_rdev) >= nblkdev) {
866 return ENXIO;
867 }
868 }
869
870 mp->mnt_devvp = devvp;
871
872 return 0;
873 }
874
875 /*
876 * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
877 * The iocount must be released with vnode_put(). Note that this KPI is subtle
878 * with respect to the validity of using this device vnode for anything substantial
879 * (which is discouraged). If commands are sent to the device driver without
880 * taking proper steps to ensure that the device is still open, chaos may ensue.
881 * Similarly, this routine should only be called if there is some guarantee that
882 * the mount itself is still valid.
883 */
884 vnode_t
vfs_devvp(mount_t mp)885 vfs_devvp(mount_t mp)
886 {
887 vnode_t vp = mp->mnt_devvp;
888
889 if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
890 return vp;
891 }
892
893 return NULLVP;
894 }
895
896 /*
897 * return the io attributes associated with mount_t
898 */
899 void
vfs_ioattr(mount_t mp,struct vfsioattr * ioattrp)900 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
901 {
902 ioattrp->io_reserved[0] = NULL;
903 ioattrp->io_reserved[1] = NULL;
904 if (mp == NULL) {
905 ioattrp->io_maxreadcnt = MAXPHYS;
906 ioattrp->io_maxwritecnt = MAXPHYS;
907 ioattrp->io_segreadcnt = 32;
908 ioattrp->io_segwritecnt = 32;
909 ioattrp->io_maxsegreadsize = MAXPHYS;
910 ioattrp->io_maxsegwritesize = MAXPHYS;
911 ioattrp->io_devblocksize = DEV_BSIZE;
912 ioattrp->io_flags = 0;
913 ioattrp->io_max_swappin_available = 0;
914 } else {
915 ioattrp->io_maxreadcnt = mp->mnt_maxreadcnt;
916 ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
917 ioattrp->io_segreadcnt = mp->mnt_segreadcnt;
918 ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
919 ioattrp->io_maxsegreadsize = mp->mnt_maxsegreadsize;
920 ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
921 ioattrp->io_devblocksize = mp->mnt_devblocksize;
922 ioattrp->io_flags = mp->mnt_ioflags;
923 ioattrp->io_max_swappin_available = mp->mnt_max_swappin_available;
924 }
925 }
926
927
928 /*
929 * set the IO attributes associated with mount_t
930 */
931 void
vfs_setioattr(mount_t mp,struct vfsioattr * ioattrp)932 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
933 {
934 if (mp == NULL) {
935 return;
936 }
937 mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt;
938 mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
939 mp->mnt_segreadcnt = ioattrp->io_segreadcnt;
940 mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
941 mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
942 mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
943 mp->mnt_devblocksize = ioattrp->io_devblocksize;
944 mp->mnt_ioflags = ioattrp->io_flags;
945 mp->mnt_max_swappin_available = ioattrp->io_max_swappin_available;
946 }
947
948 /*
949 * Add a new filesystem into the kernel specified in passed in
950 * vfstable structure. It fills in the vnode
951 * dispatch vector that is to be passed to when vnodes are created.
952 * It returns a handle which is to be used to when the FS is to be removed
953 */
954 typedef int (*PFI)(void *);
955 extern int vfs_opv_numops;
956 errno_t
vfs_fsadd(struct vfs_fsentry * vfe,vfstable_t * handle)957 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle)
958 {
959 struct vfstable *newvfstbl = NULL;
960 int i, j;
961 int(***opv_desc_vector_p)(void *);
962 int(**opv_desc_vector)(void *);
963 const struct vnodeopv_entry_desc *opve_descp;
964 int desccount;
965 int descsize;
966 PFI *descptr;
967
968 /*
969 * This routine is responsible for all the initialization that would
970 * ordinarily be done as part of the system startup;
971 */
972
973 if (vfe == (struct vfs_fsentry *)0) {
974 return EINVAL;
975 }
976
977 desccount = vfe->vfe_vopcnt;
978 if ((desccount <= 0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
979 || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL)) {
980 return EINVAL;
981 }
982
983 /* Non-threadsafe filesystems are not supported */
984 if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
985 return EINVAL;
986 }
987
988 newvfstbl = kalloc_type(struct vfstable, Z_WAITOK | Z_ZERO);
989 newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
990 strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
991 if ((vfe->vfe_flags & VFS_TBLNOTYPENUM)) {
992 int tmp;
993 int found = 0;
994 lck_mtx_lock(&vfs_typenum_mtx);
995 for (tmp = fstypenumstart; tmp < OID_AUTO_START; tmp++) {
996 if (isclr(vfs_typenum_arr, tmp)) {
997 newvfstbl->vfc_typenum = tmp;
998 setbit(vfs_typenum_arr, tmp);
999 found = 1;
1000 break;
1001 }
1002 }
1003 if (!found) {
1004 lck_mtx_unlock(&vfs_typenum_mtx);
1005 return EINVAL;
1006 }
1007 if (maxvfstypenum < OID_AUTO_START) {
1008 /* getvfsbyname checks up to but not including maxvfstypenum */
1009 maxvfstypenum = newvfstbl->vfc_typenum + 1;
1010 }
1011 lck_mtx_unlock(&vfs_typenum_mtx);
1012 } else {
1013 newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
1014 lck_mtx_lock(&vfs_typenum_mtx);
1015 setbit(vfs_typenum_arr, newvfstbl->vfc_typenum);
1016 if (newvfstbl->vfc_typenum >= maxvfstypenum) {
1017 maxvfstypenum = newvfstbl->vfc_typenum + 1;
1018 }
1019 lck_mtx_unlock(&vfs_typenum_mtx);
1020 }
1021
1022
1023 newvfstbl->vfc_refcount = 0;
1024 newvfstbl->vfc_flags = 0;
1025 newvfstbl->vfc_mountroot = NULL;
1026 newvfstbl->vfc_next = NULL;
1027 newvfstbl->vfc_vfsflags = 0;
1028 if (vfe->vfe_flags & VFS_TBL64BITREADY) {
1029 newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
1030 }
1031 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2) {
1032 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
1033 }
1034 if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2) {
1035 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
1036 }
1037 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL) {
1038 newvfstbl->vfc_flags |= MNT_LOCAL;
1039 }
1040 if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0) {
1041 newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
1042 } else {
1043 newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
1044 }
1045
1046 if (vfe->vfe_flags & VFS_TBLNATIVEXATTR) {
1047 newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
1048 }
1049 if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT) {
1050 newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
1051 }
1052 if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED) {
1053 newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
1054 }
1055 if (vfe->vfe_flags & VFS_TBLNOMACLABEL) {
1056 newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
1057 }
1058 if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME) {
1059 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
1060 }
1061 if (vfe->vfe_flags & VFS_TBLVNOP_SECLUDE_RENAME) {
1062 newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_SECLUDE_RENAME;
1063 }
1064 if (vfe->vfe_flags & VFS_TBLCANMOUNTROOT) {
1065 newvfstbl->vfc_vfsflags |= VFC_VFSCANMOUNTROOT;
1066 }
1067
1068 /*
1069 * Allocate and init the vectors.
1070 * Also handle backwards compatibility.
1071 *
1072 * We allocate one large block to hold all <desccount>
1073 * vnode operation vectors stored contiguously.
1074 */
1075 /* XXX - shouldn't be M_TEMP */
1076
1077 descsize = desccount * vfs_opv_numops;
1078 descptr = kalloc_type(PFI, descsize, Z_WAITOK | Z_ZERO);
1079
1080 newvfstbl->vfc_descptr = descptr;
1081 newvfstbl->vfc_descsize = descsize;
1082
1083 newvfstbl->vfc_sysctl = NULL;
1084
1085 for (i = 0; i < desccount; i++) {
1086 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1087 /*
1088 * Fill in the caller's pointer to the start of the i'th vector.
1089 * They'll need to supply it when calling vnode_create.
1090 */
1091 opv_desc_vector = descptr + i * vfs_opv_numops;
1092 *opv_desc_vector_p = opv_desc_vector;
1093
1094 for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
1095 opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
1096
1097 /* Silently skip known-disabled operations */
1098 if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) {
1099 printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
1100 vfe->vfe_opvdescs[i], opve_descp->opve_op->vdesc_name);
1101 continue;
1102 }
1103
1104 /*
1105 * Sanity check: is this operation listed
1106 * in the list of operations? We check this
1107 * by seeing if its offset is zero. Since
1108 * the default routine should always be listed
1109 * first, it should be the only one with a zero
1110 * offset. Any other operation with a zero
1111 * offset is probably not listed in
1112 * vfs_op_descs, and so is probably an error.
1113 *
1114 * A panic here means the layer programmer
1115 * has committed the all-too common bug
1116 * of adding a new operation to the layer's
1117 * list of vnode operations but
1118 * not adding the operation to the system-wide
1119 * list of supported operations.
1120 */
1121 if (opve_descp->opve_op->vdesc_offset == 0 &&
1122 opve_descp->opve_op != VDESC(vnop_default)) {
1123 printf("vfs_fsadd: operation %s not listed in %s.\n",
1124 opve_descp->opve_op->vdesc_name,
1125 "vfs_op_descs");
1126 panic("vfs_fsadd: bad operation");
1127 }
1128 /*
1129 * Fill in this entry.
1130 */
1131 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
1132 opve_descp->opve_impl;
1133 }
1134
1135 /*
1136 * Finally, go back and replace unfilled routines
1137 * with their default. (Sigh, an O(n^3) algorithm. I
1138 * could make it better, but that'd be work, and n is small.)
1139 */
1140 opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1141
1142 /*
1143 * Force every operations vector to have a default routine.
1144 */
1145 opv_desc_vector = *opv_desc_vector_p;
1146 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) {
1147 panic("vfs_fsadd: operation vector without default routine.");
1148 }
1149 for (j = 0; j < vfs_opv_numops; j++) {
1150 if (opv_desc_vector[j] == NULL) {
1151 opv_desc_vector[j] =
1152 opv_desc_vector[VOFFSET(vnop_default)];
1153 }
1154 }
1155 } /* end of each vnodeopv_desc parsing */
1156
1157 *handle = vfstable_add(newvfstbl);
1158
1159 if (newvfstbl->vfc_vfsops->vfs_init) {
1160 struct vfsconf vfsc;
1161 bzero(&vfsc, sizeof(struct vfsconf));
1162 vfsc.vfc_reserved1 = 0;
1163 bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
1164 vfsc.vfc_typenum = (*handle)->vfc_typenum;
1165 vfsc.vfc_refcount = (*handle)->vfc_refcount;
1166 vfsc.vfc_flags = (*handle)->vfc_flags;
1167 vfsc.vfc_reserved2 = 0;
1168 vfsc.vfc_reserved3 = 0;
1169
1170 (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
1171 }
1172
1173 kfree_type(struct vfstable, newvfstbl);
1174
1175 return 0;
1176 }
1177
1178 /*
1179 * Removes the filesystem from kernel.
1180 * The argument passed in is the handle that was given when
1181 * file system was added
1182 */
1183 errno_t
vfs_fsremove(vfstable_t handle)1184 vfs_fsremove(vfstable_t handle)
1185 {
1186 struct vfstable * vfstbl = (struct vfstable *)handle;
1187 void *old_desc = NULL;
1188 size_t descsize = 0;
1189 errno_t err;
1190
1191 /* Preflight check for any mounts */
1192 mount_list_lock();
1193 if (vfstbl->vfc_refcount != 0) {
1194 mount_list_unlock();
1195 return EBUSY;
1196 }
1197
1198 /* Free the spot in vfs_typenum_arr */
1199 lck_mtx_lock(&vfs_typenum_mtx);
1200 clrbit(vfs_typenum_arr, handle->vfc_typenum);
1201 if (maxvfstypenum == handle->vfc_typenum) {
1202 maxvfstypenum--;
1203 }
1204 lck_mtx_unlock(&vfs_typenum_mtx);
1205
1206 /*
1207 * save the old descriptor; the free cannot occur unconditionally,
1208 * since vfstable_del() may fail.
1209 */
1210 if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1211 old_desc = vfstbl->vfc_descptr;
1212 descsize = vfstbl->vfc_descsize;
1213 }
1214 err = vfstable_del(vfstbl);
1215
1216 mount_list_unlock();
1217
1218 /* free the descriptor if the delete was successful */
1219 if (err == 0) {
1220 kfree_type(PFI, descsize, old_desc);
1221 }
1222
1223 return err;
1224 }
1225
1226 void
vfs_setowner(mount_t mp,uid_t uid,gid_t gid)1227 vfs_setowner(mount_t mp, uid_t uid, gid_t gid)
1228 {
1229 mp->mnt_fsowner = uid;
1230 mp->mnt_fsgroup = gid;
1231 }
1232
1233 /*
1234 * Callers should be careful how they use this; accessing
1235 * mnt_last_write_completed_timestamp is not thread-safe. Writing to
1236 * it isn't either. Point is: be prepared to deal with strange values
1237 * being returned.
1238 */
1239 uint64_t
vfs_idle_time(mount_t mp)1240 vfs_idle_time(mount_t mp)
1241 {
1242 if (mp->mnt_pending_write_size) {
1243 return 0;
1244 }
1245
1246 struct timeval now;
1247
1248 microuptime(&now);
1249
1250 return (now.tv_sec
1251 - mp->mnt_last_write_completed_timestamp.tv_sec) * 1000000
1252 + now.tv_usec - mp->mnt_last_write_completed_timestamp.tv_usec;
1253 }
1254
1255 /*
1256 * vfs_context_create_with_proc() takes a reference on an arbitrary
1257 * thread in the process. To distinguish this reference-counted thread
1258 * from the usual non-reference-counted thread, we set the least significant
1259 * bit of of vc_thread.
1260 */
1261 #define VFS_CONTEXT_THREAD_IS_REFERENCED(ctx) \
1262 (!!(((uintptr_t)(ctx)->vc_thread) & 1UL))
1263
1264 #define VFS_CONTEXT_SET_REFERENCED_THREAD(ctx, thr) \
1265 (ctx)->vc_thread = (thread_t)(((uintptr_t)(thr)) | 1UL)
1266
1267 #define VFS_CONTEXT_GET_THREAD(ctx) \
1268 ((thread_t)(((uintptr_t)(ctx)->vc_thread) & ~1UL))
1269
1270 int
vfs_context_pid(vfs_context_t ctx)1271 vfs_context_pid(vfs_context_t ctx)
1272 {
1273 return proc_pid(vfs_context_proc(ctx));
1274 }
1275
1276 int
vfs_context_copy_audit_token(vfs_context_t ctx,audit_token_t * token)1277 vfs_context_copy_audit_token(vfs_context_t ctx, audit_token_t *token)
1278 {
1279 kern_return_t err;
1280 task_t task;
1281 mach_msg_type_number_t info_size = TASK_AUDIT_TOKEN_COUNT;
1282
1283 task = vfs_context_task(ctx);
1284
1285 if (task == NULL) {
1286 // Not sure how this would happen; we are supposed to be
1287 // in the middle of using the context. Regardless, don't
1288 // wander off a NULL pointer.
1289 return ESRCH;
1290 }
1291
1292 err = task_info(task, TASK_AUDIT_TOKEN, (integer_t *)token, &info_size);
1293 return (err) ? ESRCH : 0;
1294 }
1295
1296 int
vfs_context_suser(vfs_context_t ctx)1297 vfs_context_suser(vfs_context_t ctx)
1298 {
1299 return suser(ctx->vc_ucred, NULL);
1300 }
1301
1302 /*
1303 * Return bit field of signals posted to all threads in the context's process.
1304 *
1305 * XXX Signals should be tied to threads, not processes, for most uses of this
1306 * XXX call.
1307 */
1308 int
vfs_context_issignal(vfs_context_t ctx,sigset_t mask)1309 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1310 {
1311 proc_t p = vfs_context_proc(ctx);
1312 if (p) {
1313 return proc_pendingsignals(p, mask);
1314 }
1315 return 0;
1316 }
1317
1318 int
vfs_context_is64bit(vfs_context_t ctx)1319 vfs_context_is64bit(vfs_context_t ctx)
1320 {
1321 uthread_t uth;
1322 thread_t t;
1323
1324 if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1325 uth = get_bsdthread_info(t);
1326 } else {
1327 uth = current_uthread();
1328 }
1329 return uthread_is64bit(uth);
1330 }
1331
1332 boolean_t
vfs_context_can_resolve_triggers(vfs_context_t ctx)1333 vfs_context_can_resolve_triggers(vfs_context_t ctx)
1334 {
1335 proc_t proc = vfs_context_proc(ctx);
1336
1337 if (proc) {
1338 if (proc->p_vfs_iopolicy &
1339 P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE) {
1340 return false;
1341 }
1342 return true;
1343 }
1344 return false;
1345 }
1346
1347 boolean_t
vfs_context_can_break_leases(vfs_context_t ctx)1348 vfs_context_can_break_leases(vfs_context_t ctx)
1349 {
1350 proc_t proc = vfs_context_proc(ctx);
1351
1352 if (proc) {
1353 /*
1354 * We do not have a separate I/O policy for this,
1355 * because the scenarios where we would not want
1356 * local file lease breaks are currently exactly
1357 * the same as where we would not want dataless
1358 * file materialization (mainly, system daemons
1359 * passively snooping file activity).
1360 */
1361 if (proc->p_vfs_iopolicy &
1362 P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES) {
1363 return true;
1364 }
1365 return false;
1366 }
1367 return true;
1368 }
1369
1370 bool
vfs_context_allow_fs_blksize_nocache_write(vfs_context_t ctx)1371 vfs_context_allow_fs_blksize_nocache_write(vfs_context_t ctx)
1372 {
1373 thread_t t;
1374 proc_t p;
1375
1376 if ((ctx == NULL) || (t = VFS_CONTEXT_GET_THREAD(ctx)) == NULL) {
1377 return false;
1378 }
1379
1380 p = (proc_t)get_bsdthreadtask_info(t);
1381 if (p && (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_NOCACHE_WRITE_FS_BLKSIZE)) {
1382 return true;
1383 }
1384
1385 return false;
1386 }
1387
1388 boolean_t
vfs_context_skip_mtime_update(vfs_context_t ctx)1389 vfs_context_skip_mtime_update(vfs_context_t ctx)
1390 {
1391 proc_t p = vfs_context_proc(ctx);
1392 thread_t t = vfs_context_thread(ctx);
1393 uthread_t ut = t ? get_bsdthread_info(t) : NULL;
1394
1395 if (ut && (os_atomic_load(&ut->uu_flag, relaxed) & UT_SKIP_MTIME_UPDATE)) {
1396 return true;
1397 }
1398
1399 /*
1400 * If the 'UT_SKIP_MTIME_UPDATE_IGNORE' policy is set for this thread then
1401 * we override the default behavior and ignore the process's mtime update
1402 * policy.
1403 */
1404 if (ut && (os_atomic_load(&ut->uu_flag, relaxed) & UT_SKIP_MTIME_UPDATE_IGNORE)) {
1405 return false;
1406 }
1407
1408 if (p && (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_SKIP_MTIME_UPDATE)) {
1409 return true;
1410 }
1411
1412 return false;
1413 }
1414
1415 boolean_t
vfs_context_allow_entitled_reserve_access(vfs_context_t ctx)1416 vfs_context_allow_entitled_reserve_access(vfs_context_t ctx)
1417 {
1418 thread_t t;
1419 uthread_t uth;
1420 proc_t p;
1421
1422 if ((ctx == NULL) || (t = VFS_CONTEXT_GET_THREAD(ctx)) == NULL) {
1423 return false;
1424 }
1425
1426 uth = get_bsdthread_info(t);
1427 if (uth && (os_atomic_load(&uth->uu_flag, relaxed) & UT_FS_ENTITLED_RESERVE_ACCESS)) {
1428 return true;
1429 }
1430
1431 p = (proc_t)get_bsdthreadtask_info(t);
1432 if (p && (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_ENTITLED_RESERVE_ACCESS)) {
1433 return true;
1434 }
1435
1436 return false;
1437 }
1438
1439 /*
1440 * vfs_context_proc
1441 *
1442 * Description: Given a vfs_context_t, return the proc_t associated with it.
1443 *
1444 * Parameters: vfs_context_t The context to use
1445 *
1446 * Returns: proc_t The process for this context
1447 *
1448 * Notes: This function will return the current_proc() if any of the
1449 * following conditions are true:
1450 *
1451 * o The supplied context pointer is NULL
1452 * o There is no Mach thread associated with the context
1453 * o There is no Mach task associated with the Mach thread
1454 * o There is no proc_t associated with the Mach task
1455 * o The proc_t has no per process open file table
1456 *
1457 * This causes this function to return a value matching as
1458 * closely as possible the previous behaviour.
1459 */
1460 proc_t
vfs_context_proc(vfs_context_t ctx)1461 vfs_context_proc(vfs_context_t ctx)
1462 {
1463 proc_t proc = NULL;
1464 thread_t t;
1465
1466 if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1467 proc = (proc_t)get_bsdthreadtask_info(t);
1468 }
1469
1470 return proc == NULL ? current_proc() : proc;
1471 }
1472
1473 /*
1474 * vfs_context_get_special_port
1475 *
1476 * Description: Return the requested special port from the task associated
1477 * with the given context.
1478 *
1479 * Parameters: vfs_context_t The context to use
1480 * int Index of special port
1481 * ipc_port_t * Pointer to returned port
1482 *
1483 * Returns: kern_return_t see task_get_special_port()
1484 */
1485 kern_return_t
vfs_context_get_special_port(vfs_context_t ctx,int which,ipc_port_t * portp)1486 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1487 {
1488 return task_get_special_port(vfs_context_task(ctx), which, portp);
1489 }
1490
1491 /*
1492 * vfs_context_set_special_port
1493 *
1494 * Description: Set the requested special port in the task associated
1495 * with the given context.
1496 *
1497 * Parameters: vfs_context_t The context to use
1498 * int Index of special port
1499 * ipc_port_t New special port
1500 *
1501 * Returns: kern_return_t see task_set_special_port_internal()
1502 */
1503 kern_return_t
vfs_context_set_special_port(vfs_context_t ctx,int which,ipc_port_t port)1504 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1505 {
1506 return task_set_special_port_internal(vfs_context_task(ctx),
1507 which, port);
1508 }
1509
1510 /*
1511 * vfs_context_thread
1512 *
1513 * Description: Return the Mach thread associated with a vfs_context_t
1514 *
1515 * Parameters: vfs_context_t The context to use
1516 *
1517 * Returns: thread_t The thread for this context, or
1518 * NULL, if there is not one.
1519 *
1520 * Notes: NULL thread_t's are legal, but discouraged. They occur only
1521 * as a result of a static vfs_context_t declaration in a function
1522 * and will result in this function returning NULL.
1523 *
1524 * This is intentional; this function should NOT return the
1525 * current_thread() in this case.
1526 */
1527 thread_t
vfs_context_thread(vfs_context_t ctx)1528 vfs_context_thread(vfs_context_t ctx)
1529 {
1530 return VFS_CONTEXT_GET_THREAD(ctx);
1531 }
1532
1533 /*
1534 * vfs_context_task
1535 *
1536 * Description: Return the Mach task associated with a vfs_context_t
1537 *
1538 * Parameters: vfs_context_t The context to use
1539 *
1540 * Returns: task_t The task for this context, or
1541 * NULL, if there is not one.
1542 *
1543 * Notes: NULL task_t's are legal, but discouraged. They occur only
1544 * as a result of a static vfs_context_t declaration in a function
1545 * and will result in this function returning NULL.
1546 *
1547 * This is intentional; this function should NOT return the
1548 * task associated with current_thread() in this case.
1549 */
1550 task_t
vfs_context_task(vfs_context_t ctx)1551 vfs_context_task(vfs_context_t ctx)
1552 {
1553 task_t task = NULL;
1554 thread_t t;
1555
1556 if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1557 task = get_threadtask(t);
1558 }
1559
1560 return task;
1561 }
1562
1563 /*
1564 * vfs_context_cwd
1565 *
1566 * Description: Returns a reference on the vnode for the current working
1567 * directory for the supplied context
1568 *
1569 * Parameters: vfs_context_t The context to use
1570 *
1571 * Returns: vnode_t The current working directory
1572 * for this context
1573 *
1574 * Notes: The function first attempts to obtain the current directory
1575 * from the thread, and if it is not present there, falls back
1576 * to obtaining it from the process instead. If it can't be
1577 * obtained from either place, we return NULLVP.
1578 */
1579 vnode_t
vfs_context_cwd(vfs_context_t ctx)1580 vfs_context_cwd(vfs_context_t ctx)
1581 {
1582 vnode_t cwd = NULLVP;
1583 thread_t t;
1584
1585 if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1586 uthread_t uth = get_bsdthread_info(t);
1587 proc_t proc;
1588
1589 /*
1590 * Get the cwd from the thread; if there isn't one, get it
1591 * from the process, instead.
1592 */
1593 if ((cwd = uth->uu_cdir) == NULLVP &&
1594 (proc = (proc_t)get_bsdthreadtask_info(t)) != NULL) {
1595 cwd = proc->p_fd.fd_cdir;
1596 }
1597 }
1598
1599 return cwd;
1600 }
1601
1602 /*
1603 * vfs_context_create
1604 *
1605 * Description: Allocate and initialize a new context.
1606 *
1607 * Parameters: vfs_context_t: Context to copy, or NULL for new
1608 *
1609 * Returns: Pointer to new context
1610 *
1611 * Notes: Copy cred and thread from argument, if available; else
1612 * initialize with current thread and new cred. Returns
1613 * with a reference held on the credential.
1614 */
1615 vfs_context_t
vfs_context_create(vfs_context_t ctx)1616 vfs_context_create(vfs_context_t ctx)
1617 {
1618 vfs_context_t newcontext;
1619
1620 newcontext = zalloc_flags(KT_VFS_CONTEXT, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1621
1622 if (ctx == NULL) {
1623 ctx = vfs_context_current();
1624 }
1625 *newcontext = *ctx;
1626 if (IS_VALID_CRED(ctx->vc_ucred)) {
1627 kauth_cred_ref(ctx->vc_ucred);
1628 }
1629
1630 return newcontext;
1631 }
1632
1633 /*
1634 * vfs_context_create_with_proc
1635 *
1636 * Description: Create a new context with credentials taken from
1637 * the specified proc.
1638 *
1639 * Parameters: proc_t: The process whose crendials to use.
1640 *
1641 * Returns: Pointer to new context.
1642 *
1643 * Notes: The context will also take a reference on an arbitrary
1644 * thread in the process as well as the process's credentials.
1645 */
1646 vfs_context_t
vfs_context_create_with_proc(proc_t p)1647 vfs_context_create_with_proc(proc_t p)
1648 {
1649 vfs_context_t newcontext;
1650 thread_t thread;
1651 kauth_cred_t cred;
1652
1653 if (p == current_proc()) {
1654 return vfs_context_create(NULL);
1655 }
1656
1657 newcontext = zalloc_flags(KT_VFS_CONTEXT, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1658
1659 proc_lock(p);
1660 thread = proc_thread(p); /* XXX */
1661 if (thread != NULL) {
1662 thread_reference(thread);
1663 }
1664 proc_unlock(p);
1665
1666 cred = kauth_cred_proc_ref(p);
1667
1668 if (thread != NULL) {
1669 VFS_CONTEXT_SET_REFERENCED_THREAD(newcontext, thread);
1670 }
1671 newcontext->vc_ucred = cred;
1672
1673 return newcontext;
1674 }
1675
1676 vfs_context_t
vfs_context_current(void)1677 vfs_context_current(void)
1678 {
1679 static_assert(offsetof(struct thread_ro, tro_owner) ==
1680 offsetof(struct vfs_context, vc_thread));
1681 static_assert(offsetof(struct thread_ro, tro_cred) ==
1682 offsetof(struct vfs_context, vc_ucred));
1683
1684 return (vfs_context_t)current_thread_ro();
1685 }
1686
1687 vfs_context_t
vfs_context_kernel(void)1688 vfs_context_kernel(void)
1689 {
1690 return &vfs_context0;
1691 }
1692
1693 int
vfs_context_rele(vfs_context_t ctx)1694 vfs_context_rele(vfs_context_t ctx)
1695 {
1696 if (ctx) {
1697 if (IS_VALID_CRED(ctx->vc_ucred)) {
1698 kauth_cred_unref(&ctx->vc_ucred);
1699 }
1700 if (VFS_CONTEXT_THREAD_IS_REFERENCED(ctx)) {
1701 assert(VFS_CONTEXT_GET_THREAD(ctx) != NULL);
1702 thread_deallocate(VFS_CONTEXT_GET_THREAD(ctx));
1703 }
1704 zfree(KT_VFS_CONTEXT, ctx);
1705 }
1706 return 0;
1707 }
1708
1709
1710 kauth_cred_t
vfs_context_ucred(vfs_context_t ctx)1711 vfs_context_ucred(vfs_context_t ctx)
1712 {
1713 return ctx->vc_ucred;
1714 }
1715
1716 /*
1717 * Return true if the context is owned by the superuser.
1718 */
1719 int
vfs_context_issuser(vfs_context_t ctx)1720 vfs_context_issuser(vfs_context_t ctx)
1721 {
1722 return kauth_cred_issuser(vfs_context_ucred(ctx));
1723 }
1724
1725 int
vfs_context_iskernel(vfs_context_t ctx)1726 vfs_context_iskernel(vfs_context_t ctx)
1727 {
1728 return ctx == &vfs_context0;
1729 }
1730
1731 /*
1732 * Given a context, for all fields of vfs_context_t which
1733 * are not held with a reference, set those fields to the
1734 * values for the current execution context.
1735 *
1736 * Returns: 0 for success, nonzero for failure
1737 *
1738 * The intended use is:
1739 * 1. vfs_context_create() gets the caller a context
1740 * 2. vfs_context_bind() sets the unrefcounted data
1741 * 3. vfs_context_rele() releases the context
1742 *
1743 */
1744 int
vfs_context_bind(vfs_context_t ctx)1745 vfs_context_bind(vfs_context_t ctx)
1746 {
1747 assert(!VFS_CONTEXT_THREAD_IS_REFERENCED(ctx));
1748 ctx->vc_thread = current_thread();
1749 return 0;
1750 }
1751
1752 int
vfs_set_thread_fs_private(uint8_t tag,uint64_t fs_private)1753 vfs_set_thread_fs_private(uint8_t tag, uint64_t fs_private)
1754 {
1755 struct uthread *ut;
1756
1757 if (tag != FS_PRIVATE_TAG_APFS) {
1758 return ENOTSUP;
1759 }
1760
1761 ut = current_uthread();
1762 ut->t_fs_private = fs_private;
1763
1764 return 0;
1765 }
1766
1767 int
vfs_get_thread_fs_private(uint8_t tag,uint64_t * fs_private)1768 vfs_get_thread_fs_private(uint8_t tag, uint64_t *fs_private)
1769 {
1770 struct uthread *ut;
1771
1772 if (tag != FS_PRIVATE_TAG_APFS) {
1773 return ENOTSUP;
1774 }
1775
1776 ut = current_uthread();
1777 *fs_private = ut->t_fs_private;
1778
1779 return 0;
1780 }
1781
1782 int
vfs_isswapmount(mount_t mnt)1783 vfs_isswapmount(mount_t mnt)
1784 {
1785 return mnt && ISSET(mnt->mnt_kern_flag, MNTK_SWAP_MOUNT) ? 1 : 0;
1786 }
1787
1788 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1789
1790
1791 /*
1792 * Convert between vnode types and inode formats (since POSIX.1
1793 * defines mode word of stat structure in terms of inode formats).
1794 */
1795 enum vtype
vnode_iftovt(int mode)1796 vnode_iftovt(int mode)
1797 {
1798 return iftovt_tab[((mode) & S_IFMT) >> 12];
1799 }
1800
1801 int
vnode_vttoif(enum vtype indx)1802 vnode_vttoif(enum vtype indx)
1803 {
1804 return vttoif_tab[(int)(indx)];
1805 }
1806
1807 int
vnode_makeimode(int indx,int mode)1808 vnode_makeimode(int indx, int mode)
1809 {
1810 return (int)(VTTOIF(indx) | (mode));
1811 }
1812
1813
1814 /*
1815 * vnode manipulation functions.
1816 */
1817
1818 /* returns system root vnode iocount; It should be released using vnode_put() */
1819 vnode_t
vfs_rootvnode(void)1820 vfs_rootvnode(void)
1821 {
1822 vnode_t vp = NULLVP;
1823
1824 if (rootvnode) {
1825 lck_rw_lock_shared(&rootvnode_rw_lock);
1826 vp = rootvnode;
1827 if (vp && (vnode_get(vp) != 0)) {
1828 vp = NULLVP;
1829 }
1830 lck_rw_unlock_shared(&rootvnode_rw_lock);
1831 }
1832
1833 return vp;
1834 }
1835
1836 uint32_t
vnode_vid(vnode_t vp)1837 vnode_vid(vnode_t vp)
1838 {
1839 return (uint32_t)(vp->v_id);
1840 }
1841
1842 mount_t
vnode_mount(vnode_t vp)1843 vnode_mount(vnode_t vp)
1844 {
1845 return vp->v_mount;
1846 }
1847
1848 #if CONFIG_IOSCHED
1849 vnode_t
vnode_mountdevvp(vnode_t vp)1850 vnode_mountdevvp(vnode_t vp)
1851 {
1852 if (vp->v_mount) {
1853 return vp->v_mount->mnt_devvp;
1854 } else {
1855 return (vnode_t)0;
1856 }
1857 }
1858 #endif
1859
1860 boolean_t
vnode_isonexternalstorage(vnode_t vp)1861 vnode_isonexternalstorage(vnode_t vp)
1862 {
1863 if (vp) {
1864 if (vp->v_mount) {
1865 if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_PERIPHERAL_DRIVE) {
1866 return TRUE;
1867 }
1868 }
1869 }
1870 return FALSE;
1871 }
1872
1873 boolean_t
vnode_isonssd(vnode_t vp)1874 vnode_isonssd(vnode_t vp)
1875 {
1876 if (vp) {
1877 mount_t mp = vp->v_mount;
1878 if (mp && disk_conditioner_mount_is_ssd(mp)) {
1879 return TRUE;
1880 }
1881 }
1882 return FALSE;
1883 }
1884
1885 mount_t
vnode_mountedhere(vnode_t vp)1886 vnode_mountedhere(vnode_t vp)
1887 {
1888 mount_t mp;
1889
1890 if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1891 (mp->mnt_vnodecovered == vp)) {
1892 return mp;
1893 } else {
1894 return (mount_t)NULL;
1895 }
1896 }
1897
1898 /* returns vnode type of vnode_t */
1899 enum vtype
vnode_vtype(vnode_t vp)1900 vnode_vtype(vnode_t vp)
1901 {
1902 return vp->v_type;
1903 }
1904
1905 /* returns FS specific node saved in vnode */
1906 void *
vnode_fsnode(vnode_t vp)1907 vnode_fsnode(vnode_t vp)
1908 {
1909 return vp->v_data;
1910 }
1911
1912 void
vnode_clearfsnode(vnode_t vp)1913 vnode_clearfsnode(vnode_t vp)
1914 {
1915 vp->v_data = NULL;
1916 }
1917
1918 dev_t
vnode_specrdev(vnode_t vp)1919 vnode_specrdev(vnode_t vp)
1920 {
1921 return vp->v_rdev;
1922 }
1923
1924
1925 /* Accessor functions */
1926 /* is vnode_t a root vnode */
1927 int
vnode_isvroot(vnode_t vp)1928 vnode_isvroot(vnode_t vp)
1929 {
1930 return (vp->v_flag & VROOT)? 1 : 0;
1931 }
1932
1933 /* is vnode_t a system vnode */
1934 int
vnode_issystem(vnode_t vp)1935 vnode_issystem(vnode_t vp)
1936 {
1937 return (vp->v_flag & VSYSTEM)? 1 : 0;
1938 }
1939
1940 /* is vnode_t a swap file vnode */
1941 int
vnode_isswap(vnode_t vp)1942 vnode_isswap(vnode_t vp)
1943 {
1944 return (vp->v_flag & VSWAP)? 1 : 0;
1945 }
1946
1947 /* is vnode_t a tty */
1948 int
vnode_istty(vnode_t vp)1949 vnode_istty(vnode_t vp)
1950 {
1951 return (vp->v_flag & VISTTY) ? 1 : 0;
1952 }
1953
1954 /* if vnode_t mount operation in progress */
1955 int
vnode_ismount(vnode_t vp)1956 vnode_ismount(vnode_t vp)
1957 {
1958 return (vp->v_flag & VMOUNT)? 1 : 0;
1959 }
1960
1961 /* is this vnode under recyle now */
1962 int
vnode_isrecycled(vnode_t vp)1963 vnode_isrecycled(vnode_t vp)
1964 {
1965 int ret;
1966
1967 vnode_lock_spin(vp);
1968 ret = (vp->v_lflag & (VL_TERMINATE | VL_DEAD))? 1 : 0;
1969 vnode_unlock(vp);
1970 return ret;
1971 }
1972
1973 /* is this vnode marked for termination */
1974 int
vnode_willberecycled(vnode_t vp)1975 vnode_willberecycled(vnode_t vp)
1976 {
1977 return (vp->v_lflag & VL_MARKTERM) ? 1 : 0;
1978 }
1979
1980
1981 /* vnode was created by background task requesting rapid aging
1982 * and has not since been referenced by a normal task */
1983 int
vnode_israge(vnode_t vp)1984 vnode_israge(vnode_t vp)
1985 {
1986 return (vp->v_flag & VRAGE)? 1 : 0;
1987 }
1988
1989 int
vnode_needssnapshots(__unused vnode_t vp)1990 vnode_needssnapshots(__unused vnode_t vp)
1991 {
1992 return 0;
1993 }
1994
1995
1996 /* Check the process/thread to see if we should skip atime updates */
1997 int
vfs_ctx_skipatime(vfs_context_t ctx)1998 vfs_ctx_skipatime(vfs_context_t ctx)
1999 {
2000 struct uthread *ut;
2001 proc_t proc;
2002 thread_t thr;
2003
2004 proc = vfs_context_proc(ctx);
2005 thr = vfs_context_thread(ctx);
2006
2007 /* Validate pointers in case we were invoked via a kernel context */
2008 if (thr && proc) {
2009 ut = get_bsdthread_info(thr);
2010
2011 if (proc->p_lflag & P_LRAGE_VNODES) {
2012 return 1;
2013 }
2014
2015 if (ut) {
2016 if (ut->uu_flag & (UT_RAGE_VNODES | UT_ATIME_UPDATE)) {
2017 return 1;
2018 }
2019 }
2020
2021 if (proc->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) {
2022 return 1;
2023 }
2024 }
2025 return 0;
2026 }
2027
2028 /* is vnode_t marked to not keep data cached once it's been consumed */
2029 int
vnode_isnocache(vnode_t vp)2030 vnode_isnocache(vnode_t vp)
2031 {
2032 return (vp->v_flag & VNOCACHE_DATA)? 1 : 0;
2033 }
2034
2035 /*
2036 * has sequential readahead been disabled on this vnode
2037 */
2038 int
vnode_isnoreadahead(vnode_t vp)2039 vnode_isnoreadahead(vnode_t vp)
2040 {
2041 return (vp->v_flag & VRAOFF)? 1 : 0;
2042 }
2043
2044 int
vnode_is_openevt(vnode_t vp)2045 vnode_is_openevt(vnode_t vp)
2046 {
2047 return (vp->v_flag & VOPENEVT)? 1 : 0;
2048 }
2049
2050 /* is vnode_t a standard one? */
2051 int
vnode_isstandard(vnode_t vp)2052 vnode_isstandard(vnode_t vp)
2053 {
2054 return (vp->v_flag & VSTANDARD)? 1 : 0;
2055 }
2056
2057 /* don't vflush() if SKIPSYSTEM */
2058 int
vnode_isnoflush(vnode_t vp)2059 vnode_isnoflush(vnode_t vp)
2060 {
2061 return (vp->v_flag & VNOFLUSH)? 1 : 0;
2062 }
2063
2064 /* is vnode_t a regular file */
2065 int
vnode_isreg(vnode_t vp)2066 vnode_isreg(vnode_t vp)
2067 {
2068 return (vp->v_type == VREG)? 1 : 0;
2069 }
2070
2071 /* is vnode_t a directory? */
2072 int
vnode_isdir(vnode_t vp)2073 vnode_isdir(vnode_t vp)
2074 {
2075 return (vp->v_type == VDIR)? 1 : 0;
2076 }
2077
2078 /* is vnode_t a symbolic link ? */
2079 int
vnode_islnk(vnode_t vp)2080 vnode_islnk(vnode_t vp)
2081 {
2082 return (vp->v_type == VLNK)? 1 : 0;
2083 }
2084
2085 int
vnode_lookup_continue_needed(vnode_t vp,struct componentname * cnp)2086 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
2087 {
2088 struct nameidata *ndp = cnp->cn_ndp;
2089
2090 if (ndp == NULL) {
2091 panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL");
2092 }
2093
2094 if (vnode_isdir(vp)) {
2095 if (vp->v_mountedhere != NULL) {
2096 goto yes;
2097 }
2098
2099 #if CONFIG_TRIGGERS
2100 if (vp->v_resolve) {
2101 goto yes;
2102 }
2103 #endif /* CONFIG_TRIGGERS */
2104 }
2105
2106
2107 if (vnode_islnk(vp)) {
2108 /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
2109 if (cnp->cn_flags & FOLLOW) {
2110 goto yes;
2111 }
2112 if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
2113 goto yes;
2114 }
2115 }
2116
2117 return 0;
2118
2119 yes:
2120 ndp->ni_flag |= NAMEI_CONTLOOKUP;
2121 return EKEEPLOOKING;
2122 }
2123
2124 /* is vnode_t a fifo ? */
2125 int
vnode_isfifo(vnode_t vp)2126 vnode_isfifo(vnode_t vp)
2127 {
2128 return (vp->v_type == VFIFO)? 1 : 0;
2129 }
2130
2131 /* is vnode_t a block device? */
2132 int
vnode_isblk(vnode_t vp)2133 vnode_isblk(vnode_t vp)
2134 {
2135 return (vp->v_type == VBLK)? 1 : 0;
2136 }
2137
2138 int
vnode_isspec(vnode_t vp)2139 vnode_isspec(vnode_t vp)
2140 {
2141 return ((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0;
2142 }
2143
2144 /* is vnode_t a char device? */
2145 int
vnode_ischr(vnode_t vp)2146 vnode_ischr(vnode_t vp)
2147 {
2148 return (vp->v_type == VCHR)? 1 : 0;
2149 }
2150
2151 /* is vnode_t a socket? */
2152 int
vnode_issock(vnode_t vp)2153 vnode_issock(vnode_t vp)
2154 {
2155 return (vp->v_type == VSOCK)? 1 : 0;
2156 }
2157
2158 /* is vnode_t a device with multiple active vnodes referring to it? */
2159 int
vnode_isaliased(vnode_t vp)2160 vnode_isaliased(vnode_t vp)
2161 {
2162 enum vtype vt = vp->v_type;
2163 if (!((vt == VCHR) || (vt == VBLK))) {
2164 return 0;
2165 } else {
2166 return vp->v_specflags & SI_ALIASED;
2167 }
2168 }
2169
2170 /* is vnode_t a named stream? */
2171 int
vnode_isnamedstream(vnode_t vp)2172 vnode_isnamedstream(
2173 #if NAMEDSTREAMS
2174 vnode_t vp
2175 #else
2176 __unused vnode_t vp
2177 #endif
2178 )
2179 {
2180 #if NAMEDSTREAMS
2181 return (vp->v_flag & VISNAMEDSTREAM) ? 1 : 0;
2182 #else
2183 return 0;
2184 #endif
2185 }
2186
2187 int
vnode_isshadow(vnode_t vp)2188 vnode_isshadow(
2189 #if NAMEDSTREAMS
2190 vnode_t vp
2191 #else
2192 __unused vnode_t vp
2193 #endif
2194 )
2195 {
2196 #if NAMEDSTREAMS
2197 return (vp->v_flag & VISSHADOW) ? 1 : 0;
2198 #else
2199 return 0;
2200 #endif
2201 }
2202
2203 /* does vnode have associated named stream vnodes ? */
2204 int
vnode_hasnamedstreams(vnode_t vp)2205 vnode_hasnamedstreams(
2206 #if NAMEDSTREAMS
2207 vnode_t vp
2208 #else
2209 __unused vnode_t vp
2210 #endif
2211 )
2212 {
2213 #if NAMEDSTREAMS
2214 return (vp->v_lflag & VL_HASSTREAMS) ? 1 : 0;
2215 #else
2216 return 0;
2217 #endif
2218 }
2219 /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */
2220 void
vnode_setnocache(vnode_t vp)2221 vnode_setnocache(vnode_t vp)
2222 {
2223 vnode_lock_spin(vp);
2224 vp->v_flag |= VNOCACHE_DATA;
2225 vnode_unlock(vp);
2226 }
2227
2228 void
vnode_clearnocache(vnode_t vp)2229 vnode_clearnocache(vnode_t vp)
2230 {
2231 vnode_lock_spin(vp);
2232 vp->v_flag &= ~VNOCACHE_DATA;
2233 vnode_unlock(vp);
2234 }
2235
2236 void
vnode_set_openevt(vnode_t vp)2237 vnode_set_openevt(vnode_t vp)
2238 {
2239 vnode_lock_spin(vp);
2240 vp->v_flag |= VOPENEVT;
2241 vnode_unlock(vp);
2242 }
2243
2244 void
vnode_clear_openevt(vnode_t vp)2245 vnode_clear_openevt(vnode_t vp)
2246 {
2247 vnode_lock_spin(vp);
2248 vp->v_flag &= ~VOPENEVT;
2249 vnode_unlock(vp);
2250 }
2251
2252
2253 void
vnode_setnoreadahead(vnode_t vp)2254 vnode_setnoreadahead(vnode_t vp)
2255 {
2256 vnode_lock_spin(vp);
2257 vp->v_flag |= VRAOFF;
2258 vnode_unlock(vp);
2259 }
2260
2261 void
vnode_clearnoreadahead(vnode_t vp)2262 vnode_clearnoreadahead(vnode_t vp)
2263 {
2264 vnode_lock_spin(vp);
2265 vp->v_flag &= ~VRAOFF;
2266 vnode_unlock(vp);
2267 }
2268
2269 int
vnode_isfastdevicecandidate(vnode_t vp)2270 vnode_isfastdevicecandidate(vnode_t vp)
2271 {
2272 return (vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0;
2273 }
2274
2275 void
vnode_setfastdevicecandidate(vnode_t vp)2276 vnode_setfastdevicecandidate(vnode_t vp)
2277 {
2278 vnode_lock_spin(vp);
2279 vp->v_flag |= VFASTDEVCANDIDATE;
2280 vnode_unlock(vp);
2281 }
2282
2283 void
vnode_clearfastdevicecandidate(vnode_t vp)2284 vnode_clearfastdevicecandidate(vnode_t vp)
2285 {
2286 vnode_lock_spin(vp);
2287 vp->v_flag &= ~VFASTDEVCANDIDATE;
2288 vnode_unlock(vp);
2289 }
2290
2291 int
vnode_isautocandidate(vnode_t vp)2292 vnode_isautocandidate(vnode_t vp)
2293 {
2294 return (vp->v_flag & VAUTOCANDIDATE)? 1 : 0;
2295 }
2296
2297 void
vnode_setautocandidate(vnode_t vp)2298 vnode_setautocandidate(vnode_t vp)
2299 {
2300 vnode_lock_spin(vp);
2301 vp->v_flag |= VAUTOCANDIDATE;
2302 vnode_unlock(vp);
2303 }
2304
2305 void
vnode_clearautocandidate(vnode_t vp)2306 vnode_clearautocandidate(vnode_t vp)
2307 {
2308 vnode_lock_spin(vp);
2309 vp->v_flag &= ~VAUTOCANDIDATE;
2310 vnode_unlock(vp);
2311 }
2312
2313
2314
2315
2316 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
2317 void
vnode_setnoflush(vnode_t vp)2318 vnode_setnoflush(vnode_t vp)
2319 {
2320 vnode_lock_spin(vp);
2321 vp->v_flag |= VNOFLUSH;
2322 vnode_unlock(vp);
2323 }
2324
2325 void
vnode_clearnoflush(vnode_t vp)2326 vnode_clearnoflush(vnode_t vp)
2327 {
2328 vnode_lock_spin(vp);
2329 vp->v_flag &= ~VNOFLUSH;
2330 vnode_unlock(vp);
2331 }
2332
2333 /* Get the memory object control associated with the vnode */
2334 memory_object_control_t
vnode_memoryobject(vnode_t vp)2335 vnode_memoryobject(vnode_t vp)
2336 {
2337 return ubc_getobject(vp, UBC_FLAGS_NONE);
2338 }
2339
2340 /* is vnode_t a blkdevice and has a FS mounted on it */
2341 int
vnode_ismountedon(vnode_t vp)2342 vnode_ismountedon(vnode_t vp)
2343 {
2344 return (vp->v_specflags & SI_MOUNTEDON)? 1 : 0;
2345 }
2346
2347 void
vnode_setmountedon(vnode_t vp)2348 vnode_setmountedon(vnode_t vp)
2349 {
2350 vnode_lock_spin(vp);
2351 vp->v_specflags |= SI_MOUNTEDON;
2352 vnode_unlock(vp);
2353 }
2354
2355 void
vnode_clearmountedon(vnode_t vp)2356 vnode_clearmountedon(vnode_t vp)
2357 {
2358 vnode_lock_spin(vp);
2359 vp->v_specflags &= ~SI_MOUNTEDON;
2360 vnode_unlock(vp);
2361 }
2362
2363
2364 void
vnode_settag(vnode_t vp,int tag)2365 vnode_settag(vnode_t vp, int tag)
2366 {
2367 /*
2368 * We only assign enum values to v_tag, but add an assert to make sure we
2369 * catch it in dev/debug builds if this ever change.
2370 */
2371 assert(tag >= SHRT_MIN && tag <= SHRT_MAX);
2372 vp->v_tag = (uint16_t)tag;
2373 }
2374
2375 int
vnode_tag(vnode_t vp)2376 vnode_tag(vnode_t vp)
2377 {
2378 return vp->v_tag;
2379 }
2380
2381 vnode_t
vnode_parent(vnode_t vp)2382 vnode_parent(vnode_t vp)
2383 {
2384 return vp->v_parent;
2385 }
2386
2387 void
vnode_setparent(vnode_t vp,vnode_t dvp)2388 vnode_setparent(vnode_t vp, vnode_t dvp)
2389 {
2390 vp->v_parent = dvp;
2391 }
2392
2393 void
vnode_setname(vnode_t vp,char * name)2394 vnode_setname(vnode_t vp, char * name)
2395 {
2396 vp->v_name = name;
2397 }
2398
2399 /* return the registered FS name when adding the FS to kernel */
2400 void
vnode_vfsname(vnode_t vp,char * buf)2401 vnode_vfsname(vnode_t vp, char * buf)
2402 {
2403 strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
2404 }
2405
2406 /* return the FS type number */
2407 int
vnode_vfstypenum(vnode_t vp)2408 vnode_vfstypenum(vnode_t vp)
2409 {
2410 return vp->v_mount->mnt_vtable->vfc_typenum;
2411 }
2412
2413 int
vnode_vfs64bitready(vnode_t vp)2414 vnode_vfs64bitready(vnode_t vp)
2415 {
2416 /*
2417 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
2418 */
2419 if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
2420 return 1;
2421 } else {
2422 return 0;
2423 }
2424 }
2425
2426
2427
2428 /* return the visible flags on associated mount point of vnode_t */
2429 uint32_t
vnode_vfsvisflags(vnode_t vp)2430 vnode_vfsvisflags(vnode_t vp)
2431 {
2432 return vp->v_mount->mnt_flag & MNT_VISFLAGMASK;
2433 }
2434
2435 /* return the command modifier flags on associated mount point of vnode_t */
2436 uint32_t
vnode_vfscmdflags(vnode_t vp)2437 vnode_vfscmdflags(vnode_t vp)
2438 {
2439 return vp->v_mount->mnt_flag & MNT_CMDFLAGS;
2440 }
2441
2442 /* return the max symlink of short links of vnode_t */
2443 uint32_t
vnode_vfsmaxsymlen(vnode_t vp)2444 vnode_vfsmaxsymlen(vnode_t vp)
2445 {
2446 return vp->v_mount->mnt_maxsymlinklen;
2447 }
2448
2449 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2450 struct vfsstatfs *
vnode_vfsstatfs(vnode_t vp)2451 vnode_vfsstatfs(vnode_t vp)
2452 {
2453 return &vp->v_mount->mnt_vfsstat;
2454 }
2455
2456 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2457 void *
vnode_vfsfsprivate(vnode_t vp)2458 vnode_vfsfsprivate(vnode_t vp)
2459 {
2460 return vp->v_mount->mnt_data;
2461 }
2462
2463 /* is vnode_t in a rdonly mounted FS */
2464 int
vnode_vfsisrdonly(vnode_t vp)2465 vnode_vfsisrdonly(vnode_t vp)
2466 {
2467 return (vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0;
2468 }
2469
2470 int
vnode_compound_rename_available(vnode_t vp)2471 vnode_compound_rename_available(vnode_t vp)
2472 {
2473 return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
2474 }
2475 int
vnode_compound_rmdir_available(vnode_t vp)2476 vnode_compound_rmdir_available(vnode_t vp)
2477 {
2478 return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
2479 }
2480 int
vnode_compound_mkdir_available(vnode_t vp)2481 vnode_compound_mkdir_available(vnode_t vp)
2482 {
2483 return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
2484 }
2485 int
vnode_compound_remove_available(vnode_t vp)2486 vnode_compound_remove_available(vnode_t vp)
2487 {
2488 return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
2489 }
2490 int
vnode_compound_open_available(vnode_t vp)2491 vnode_compound_open_available(vnode_t vp)
2492 {
2493 return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
2494 }
2495
2496 int
vnode_compound_op_available(vnode_t vp,compound_vnop_id_t opid)2497 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
2498 {
2499 return (vp->v_mount->mnt_compound_ops & opid) != 0;
2500 }
2501
2502 /*
2503 * Returns vnode ref to current working directory; if a per-thread current
2504 * working directory is in effect, return that instead of the per process one.
2505 *
2506 * XXX Published, but not used.
2507 */
2508 vnode_t
current_workingdir(void)2509 current_workingdir(void)
2510 {
2511 return vfs_context_cwd(vfs_context_current());
2512 }
2513
2514 /*
2515 * Get a filesec and optional acl contents from an extended attribute.
2516 * Function will attempt to retrive ACL, UUID, and GUID information using a
2517 * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2518 *
2519 * Parameters: vp The vnode on which to operate.
2520 * fsecp The filesec (and ACL, if any) being
2521 * retrieved.
2522 * ctx The vnode context in which the
2523 * operation is to be attempted.
2524 *
2525 * Returns: 0 Success
2526 * !0 errno value
2527 *
2528 * Notes: The kauth_filesec_t in '*fsecp', if retrieved, will be in
2529 * host byte order, as will be the ACL contents, if any.
2530 * Internally, we will cannonize these values from network (PPC)
2531 * byte order after we retrieve them so that the on-disk contents
2532 * of the extended attribute are identical for both PPC and Intel
2533 * (if we were not being required to provide this service via
2534 * fallback, this would be the job of the filesystem
2535 * 'VNOP_GETATTR' call).
2536 *
2537 * We use ntohl() because it has a transitive property on Intel
2538 * machines and no effect on PPC mancines. This guarantees us
2539 *
2540 * XXX: Deleting rather than ignoreing a corrupt security structure is
2541 * probably the only way to reset it without assistance from an
2542 * file system integrity checking tool. Right now we ignore it.
2543 *
2544 * XXX: We should enummerate the possible errno values here, and where
2545 * in the code they originated.
2546 */
2547 static int
vnode_get_filesec(vnode_t vp,kauth_filesec_t * fsecp,vfs_context_t ctx)2548 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2549 {
2550 kauth_filesec_t fsec;
2551 uio_t fsec_uio;
2552 size_t fsec_size;
2553 size_t xsize, rsize;
2554 int error;
2555 uint32_t host_fsec_magic;
2556 uint32_t host_acl_entrycount;
2557
2558 fsec = NULL;
2559 fsec_uio = NULL;
2560
2561 /* find out how big the EA is */
2562 error = vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx);
2563 if (error != 0) {
2564 /* no EA, no filesec */
2565 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2566 error = 0;
2567 }
2568 /* either way, we are done */
2569 goto out;
2570 }
2571
2572 /*
2573 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2574 * ACE entrly ACL, and if it's larger than that, it must have the right
2575 * number of bytes such that it contains an atomic number of ACEs,
2576 * rather than partial entries. Otherwise, we ignore it.
2577 */
2578 if (!KAUTH_FILESEC_VALID(xsize)) {
2579 KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2580 error = 0;
2581 goto out;
2582 }
2583
2584 /* how many entries would fit? */
2585 fsec_size = KAUTH_FILESEC_COUNT(xsize);
2586 if (fsec_size > KAUTH_ACL_MAX_ENTRIES) {
2587 KAUTH_DEBUG(" ERROR - Bogus (too large) kauth_fiilesec_t: %ld bytes", xsize);
2588 error = 0;
2589 goto out;
2590 }
2591
2592 /* get buffer and uio */
2593 if (((fsec = kauth_filesec_alloc((int)fsec_size)) == NULL) ||
2594 ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2595 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2596 KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL");
2597 error = ENOMEM;
2598 goto out;
2599 }
2600
2601 /* read security attribute */
2602 rsize = xsize;
2603 if ((error = vn_getxattr(vp,
2604 KAUTH_FILESEC_XATTR,
2605 fsec_uio,
2606 &rsize,
2607 XATTR_NOSECURITY,
2608 ctx)) != 0) {
2609 /* no attribute - no security data */
2610 if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2611 error = 0;
2612 }
2613 /* either way, we are done */
2614 goto out;
2615 }
2616
2617 /*
2618 * Validate security structure; the validation must take place in host
2619 * byte order. If it's corrupt, we will just ignore it.
2620 */
2621
2622 /* Validate the size before trying to convert it */
2623 if (rsize < KAUTH_FILESEC_SIZE(0)) {
2624 KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2625 goto out;
2626 }
2627
2628 /* Validate the magic number before trying to convert it */
2629 host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2630 if (fsec->fsec_magic != host_fsec_magic) {
2631 KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2632 goto out;
2633 }
2634
2635 /* Validate the entry count before trying to convert it. */
2636 host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2637 if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2638 if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2639 KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2640 goto out;
2641 }
2642 if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2643 KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2644 goto out;
2645 }
2646 }
2647
2648 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2649
2650 *fsecp = fsec;
2651 fsec = NULL;
2652 error = 0;
2653 out:
2654 if (fsec != NULL) {
2655 kauth_filesec_free(fsec);
2656 }
2657 if (fsec_uio != NULL) {
2658 uio_free(fsec_uio);
2659 }
2660 if (error) {
2661 *fsecp = NULL;
2662 }
2663 return error;
2664 }
2665
2666 /*
2667 * Set a filesec and optional acl contents into an extended attribute.
2668 * function will attempt to store ACL, UUID, and GUID information using a
2669 * write to a named extended attribute (KAUTH_FILESEC_XATTR). The 'acl'
2670 * may or may not point to the `fsec->fsec_acl`, depending on whether the
2671 * original caller supplied an acl.
2672 *
2673 * Parameters: vp The vnode on which to operate.
2674 * fsec The filesec being set.
2675 * acl The acl to be associated with 'fsec'.
2676 * ctx The vnode context in which the
2677 * operation is to be attempted.
2678 *
2679 * Returns: 0 Success
2680 * !0 errno value
2681 *
2682 * Notes: Both the fsec and the acl are always valid.
2683 *
2684 * The kauth_filesec_t in 'fsec', if any, is in host byte order,
2685 * as are the acl contents, if they are used. Internally, we will
2686 * cannonize these values into network (PPC) byte order before we
2687 * attempt to write them so that the on-disk contents of the
2688 * extended attribute are identical for both PPC and Intel (if we
2689 * were not being required to provide this service via fallback,
2690 * this would be the job of the filesystem 'VNOP_SETATTR' call).
2691 * We reverse this process on the way out, so we leave with the
2692 * same byte order we started with.
2693 *
2694 * XXX: We should enummerate the possible errno values here, and where
2695 * in the code they originated.
2696 */
2697 static int
vnode_set_filesec(vnode_t vp,kauth_filesec_t fsec,kauth_acl_t acl,vfs_context_t ctx)2698 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2699 {
2700 uio_t fsec_uio;
2701 int error;
2702 uint32_t saved_acl_copysize;
2703
2704 fsec_uio = NULL;
2705
2706 if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2707 KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL");
2708 error = ENOMEM;
2709 goto out;
2710 }
2711 /*
2712 * Save the pre-converted ACL copysize, because it gets swapped too
2713 * if we are running with the wrong endianness.
2714 */
2715 saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2716
2717 kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2718
2719 uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2720 uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2721 error = vn_setxattr(vp,
2722 KAUTH_FILESEC_XATTR,
2723 fsec_uio,
2724 XATTR_NOSECURITY, /* we have auth'ed already */
2725 ctx);
2726 VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2727
2728 kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2729
2730 out:
2731 if (fsec_uio != NULL) {
2732 uio_free(fsec_uio);
2733 }
2734 return error;
2735 }
2736
2737 /*
2738 * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2739 */
2740 void
vnode_attr_handle_uid_and_gid(struct vnode_attr * vap,mount_t mp,vfs_context_t ctx)2741 vnode_attr_handle_uid_and_gid(struct vnode_attr *vap, mount_t mp, vfs_context_t ctx)
2742 {
2743 uid_t nuid;
2744 gid_t ngid;
2745 bool is_suser = vfs_context_issuser(ctx) ? true : false;
2746
2747 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2748 if (is_suser && VATTR_IS_SUPPORTED(vap, va_uid)) {
2749 nuid = vap->va_uid;
2750 } else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2751 nuid = mp->mnt_fsowner;
2752 if (nuid == KAUTH_UID_NONE) {
2753 nuid = 99;
2754 }
2755 } else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2756 nuid = vap->va_uid;
2757 } else {
2758 /* this will always be something sensible */
2759 nuid = mp->mnt_fsowner;
2760 }
2761 if ((nuid == 99) && !is_suser) {
2762 nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2763 }
2764 VATTR_RETURN(vap, va_uid, nuid);
2765 }
2766 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2767 if (is_suser && VATTR_IS_SUPPORTED(vap, va_gid)) {
2768 ngid = vap->va_gid;
2769 } else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2770 ngid = mp->mnt_fsgroup;
2771 if (ngid == KAUTH_GID_NONE) {
2772 ngid = 99;
2773 }
2774 } else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2775 ngid = vap->va_gid;
2776 } else {
2777 /* this will always be something sensible */
2778 ngid = mp->mnt_fsgroup;
2779 }
2780 if ((ngid == 99) && !is_suser) {
2781 ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2782 }
2783 VATTR_RETURN(vap, va_gid, ngid);
2784 }
2785 }
2786
2787 /*
2788 * Returns: 0 Success
2789 * ENOMEM Not enough space [only if has filesec]
2790 * EINVAL Requested unknown attributes
2791 * VNOP_GETATTR: ???
2792 * vnode_get_filesec: ???
2793 * kauth_cred_guid2uid: ???
2794 * kauth_cred_guid2gid: ???
2795 * vfs_update_vfsstat: ???
2796 */
2797 int
vnode_getattr(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)2798 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2799 {
2800 kauth_filesec_t fsec;
2801 kauth_acl_t facl;
2802 bool is_appendonly;
2803 int error;
2804
2805 /*
2806 * Reject attempts to fetch unknown attributes.
2807 */
2808 if (vap->va_active & ~VNODE_ATTR_ALL) {
2809 return EINVAL;
2810 }
2811
2812 /* don't ask for extended security data if the filesystem doesn't support it */
2813 if (!vfs_extendedsecurity(vnode_mount(vp))) {
2814 VATTR_CLEAR_ACTIVE(vap, va_acl);
2815 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2816 VATTR_CLEAR_ACTIVE(vap, va_guuid);
2817 }
2818
2819 /*
2820 * If the caller wants size values we might have to synthesise, give the
2821 * filesystem the opportunity to supply better intermediate results.
2822 */
2823 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2824 VATTR_IS_ACTIVE(vap, va_total_size) ||
2825 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2826 VATTR_SET_ACTIVE(vap, va_data_size);
2827 VATTR_SET_ACTIVE(vap, va_data_alloc);
2828 VATTR_SET_ACTIVE(vap, va_total_size);
2829 VATTR_SET_ACTIVE(vap, va_total_alloc);
2830 }
2831
2832 vap->va_vaflags &= ~VA_USEFSID;
2833
2834 is_appendonly = vnode_isappendonly(vp);
2835
2836 error = VNOP_GETATTR(vp, vap, ctx);
2837 if (error) {
2838 KAUTH_DEBUG("ERROR - returning %d", error);
2839 goto out;
2840 }
2841
2842 /*
2843 * If extended security data was requested but not returned, try the fallback
2844 * path.
2845 */
2846 if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2847 fsec = NULL;
2848
2849 if (XATTR_VNODE_SUPPORTED(vp)) {
2850 /* try to get the filesec */
2851 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2852 goto out;
2853 }
2854 }
2855 /* if no filesec, no attributes */
2856 if (fsec == NULL) {
2857 VATTR_RETURN(vap, va_acl, NULL);
2858 VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2859 VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2860 } else {
2861 /* looks good, try to return what we were asked for */
2862 VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2863 VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2864
2865 /* only return the ACL if we were actually asked for it */
2866 if (VATTR_IS_ACTIVE(vap, va_acl)) {
2867 if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2868 VATTR_RETURN(vap, va_acl, NULL);
2869 } else {
2870 facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2871 if (facl == NULL) {
2872 kauth_filesec_free(fsec);
2873 error = ENOMEM;
2874 goto out;
2875 }
2876 __nochk_bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2877 VATTR_RETURN(vap, va_acl, facl);
2878 }
2879 }
2880 kauth_filesec_free(fsec);
2881 }
2882 }
2883 /*
2884 * If someone gave us an unsolicited filesec, toss it. We promise that
2885 * we're OK with a filesystem giving us anything back, but our callers
2886 * only expect what they asked for.
2887 */
2888 if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2889 if (vap->va_acl != NULL) {
2890 kauth_acl_free(vap->va_acl);
2891 }
2892 VATTR_CLEAR_SUPPORTED(vap, va_acl);
2893 }
2894
2895 #if 0 /* enable when we have a filesystem only supporting UUIDs */
2896 /*
2897 * Handle the case where we need a UID/GID, but only have extended
2898 * security information.
2899 */
2900 if (VATTR_NOT_RETURNED(vap, va_uid) &&
2901 VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2902 !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2903 if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0) {
2904 VATTR_RETURN(vap, va_uid, nuid);
2905 }
2906 }
2907 if (VATTR_NOT_RETURNED(vap, va_gid) &&
2908 VATTR_IS_SUPPORTED(vap, va_guuid) &&
2909 !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2910 if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0) {
2911 VATTR_RETURN(vap, va_gid, ngid);
2912 }
2913 }
2914 #endif
2915
2916 vnode_attr_handle_uid_and_gid(vap, vp->v_mount, ctx);
2917
2918 /*
2919 * Synthesise some values that can be reasonably guessed.
2920 */
2921 if (!VATTR_IS_SUPPORTED(vap, va_iosize)) {
2922 assert(vp->v_mount->mnt_vfsstat.f_iosize <= UINT32_MAX);
2923 VATTR_RETURN(vap, va_iosize, (uint32_t)vp->v_mount->mnt_vfsstat.f_iosize);
2924 }
2925
2926 if (!VATTR_IS_SUPPORTED(vap, va_flags)) {
2927 VATTR_RETURN(vap, va_flags, 0);
2928 } else if (VATTR_IS_ACTIVE(vap, va_flags)) {
2929 if ((vap->va_flags & APPEND) && !is_appendonly) {
2930 os_atomic_or(&vp->v_ext_flag, VE_APPENDONLY, relaxed);
2931 } else if (!(vap->va_flags & APPEND) && is_appendonly) {
2932 os_atomic_andnot(&vp->v_ext_flag, VE_APPENDONLY, relaxed);
2933 }
2934 }
2935
2936 if (!VATTR_IS_SUPPORTED(vap, va_filerev)) {
2937 VATTR_RETURN(vap, va_filerev, 0);
2938 }
2939
2940 if (!VATTR_IS_SUPPORTED(vap, va_gen)) {
2941 VATTR_RETURN(vap, va_gen, 0);
2942 }
2943
2944 /*
2945 * Default sizes. Ordering here is important, as later defaults build on earlier ones.
2946 */
2947 if (VATTR_IS_SUPPORTED(vap, va_data_size)) {
2948 /* va_data_size (uint64_t) is often assigned to off_t (int64_t), which can result in a negative size. */
2949 if (vap->va_data_size > INT64_MAX) {
2950 vap->va_data_size = INT64_MAX;
2951 }
2952 } else {
2953 VATTR_RETURN(vap, va_data_size, 0);
2954 }
2955
2956 /* do we want any of the possibly-computed values? */
2957 if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2958 VATTR_IS_ACTIVE(vap, va_total_size) ||
2959 VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2960 /* make sure f_bsize is valid */
2961 if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2962 if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0) {
2963 goto out;
2964 }
2965 }
2966
2967 /* default va_data_alloc from va_data_size */
2968 if (!VATTR_IS_SUPPORTED(vap, va_data_alloc)) {
2969 VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2970 }
2971
2972 /* default va_total_size from va_data_size */
2973 if (!VATTR_IS_SUPPORTED(vap, va_total_size)) {
2974 VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2975 }
2976
2977 /* default va_total_alloc from va_total_size which is guaranteed at this point */
2978 if (!VATTR_IS_SUPPORTED(vap, va_total_alloc)) {
2979 VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2980 }
2981 }
2982
2983 /*
2984 * If we don't have a change time, pull it from the modtime.
2985 */
2986 if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time)) {
2987 VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2988 }
2989
2990 /*
2991 * This is really only supported for the creation VNOPs, but since the field is there
2992 * we should populate it correctly.
2993 */
2994 VATTR_RETURN(vap, va_type, vp->v_type);
2995
2996 /*
2997 * The fsid can be obtained from the mountpoint directly.
2998 */
2999 if (VATTR_IS_ACTIVE(vap, va_fsid) &&
3000 (!VATTR_IS_SUPPORTED(vap, va_fsid) ||
3001 vap->va_vaflags & VA_REALFSID || !(vap->va_vaflags & VA_USEFSID))) {
3002 VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
3003 }
3004
3005 out:
3006 vap->va_vaflags &= ~VA_USEFSID;
3007
3008 return error;
3009 }
3010
3011 /*
3012 * Choose 32 bit or 64 bit fsid
3013 */
3014 uint64_t
vnode_get_va_fsid(struct vnode_attr * vap)3015 vnode_get_va_fsid(struct vnode_attr *vap)
3016 {
3017 if (VATTR_IS_SUPPORTED(vap, va_fsid64)) {
3018 return (uint64_t)vap->va_fsid64.val[0] + ((uint64_t)vap->va_fsid64.val[1] << 32);
3019 }
3020 return vap->va_fsid;
3021 }
3022
3023 /*
3024 * Set the attributes on a vnode in a vnode context.
3025 *
3026 * Parameters: vp The vnode whose attributes to set.
3027 * vap A pointer to the attributes to set.
3028 * ctx The vnode context in which the
3029 * operation is to be attempted.
3030 *
3031 * Returns: 0 Success
3032 * !0 errno value
3033 *
3034 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
3035 *
3036 * The contents of the data area pointed to by 'vap' may be
3037 * modified if the vnode is on a filesystem which has been
3038 * mounted with ingore ownership flags, or by the underlyng
3039 * VFS itself, or by the fallback code, if the underlying VFS
3040 * does not support ACL, UUID, or GUUID attributes directly.
3041 *
3042 * XXX: We should enummerate the possible errno values here, and where
3043 * in the code they originated.
3044 */
3045 int
vnode_setattr(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3046 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
3047 {
3048 int error;
3049 #if CONFIG_FSE
3050 uint64_t active;
3051 int is_perm_change = 0;
3052 int is_stat_change = 0;
3053 #endif
3054
3055 /*
3056 * Reject attempts to set unknown attributes.
3057 */
3058 if (vap->va_active & ~VNODE_ATTR_ALL) {
3059 return EINVAL;
3060 }
3061
3062 /*
3063 * Make sure the filesystem is mounted R/W.
3064 * If not, return an error.
3065 */
3066 if (vfs_isrdonly(vp->v_mount)) {
3067 error = EROFS;
3068 goto out;
3069 }
3070
3071 #if DEVELOPMENT || DEBUG
3072 /*
3073 * XXX VSWAP: Check for entitlements or special flag here
3074 * so we can restrict access appropriately.
3075 */
3076 #else /* DEVELOPMENT || DEBUG */
3077
3078 if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
3079 error = EPERM;
3080 goto out;
3081 }
3082 #endif /* DEVELOPMENT || DEBUG */
3083
3084 #if NAMEDSTREAMS
3085 /* For streams, va_data_size is the only setable attribute. */
3086 if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
3087 error = EPERM;
3088 goto out;
3089 }
3090 #endif
3091 /* Check for truncation */
3092 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
3093 switch (vp->v_type) {
3094 case VREG:
3095 /* For regular files it's ok */
3096 break;
3097 case VDIR:
3098 /* Not allowed to truncate directories */
3099 error = EISDIR;
3100 goto out;
3101 default:
3102 /* For everything else we will clear the bit and let underlying FS decide on the rest */
3103 VATTR_CLEAR_ACTIVE(vap, va_data_size);
3104 if (vap->va_active) {
3105 break;
3106 }
3107 /* If it was the only bit set, return success, to handle cases like redirect to /dev/null */
3108 return 0;
3109 }
3110 }
3111
3112 /*
3113 * If ownership is being ignored on this volume, we silently discard
3114 * ownership changes.
3115 */
3116 if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
3117 VATTR_CLEAR_ACTIVE(vap, va_uid);
3118 VATTR_CLEAR_ACTIVE(vap, va_gid);
3119 }
3120
3121 /*
3122 * Make sure that extended security is enabled if we're going to try
3123 * to set any.
3124 */
3125 if (!vfs_extendedsecurity(vnode_mount(vp)) &&
3126 (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
3127 KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
3128 error = ENOTSUP;
3129 goto out;
3130 }
3131
3132 /* Never allow the setting of any unsupported superuser flags. */
3133 if (VATTR_IS_ACTIVE(vap, va_flags)) {
3134 vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE);
3135 }
3136
3137 #if CONFIG_FSE
3138 /*
3139 * Remember all of the active attributes that we're
3140 * attempting to modify.
3141 */
3142 active = vap->va_active & ~VNODE_ATTR_RDONLY;
3143 #endif
3144
3145 error = VNOP_SETATTR(vp, vap, ctx);
3146
3147 if ((error == 0) && !VATTR_ALL_SUPPORTED(vap)) {
3148 error = vnode_setattr_fallback(vp, vap, ctx);
3149 }
3150
3151 #if CONFIG_FSE
3152 #define PERMISSION_BITS (VNODE_ATTR_BIT(va_uid) | VNODE_ATTR_BIT(va_uuuid) | \
3153 VNODE_ATTR_BIT(va_gid) | VNODE_ATTR_BIT(va_guuid) | \
3154 VNODE_ATTR_BIT(va_mode) | VNODE_ATTR_BIT(va_acl))
3155
3156 /*
3157 * Now that we've changed them, decide whether to send an
3158 * FSevent.
3159 */
3160 if ((active & PERMISSION_BITS) & vap->va_supported) {
3161 is_perm_change = 1;
3162 } else {
3163 /*
3164 * We've already checked the permission bits, and we
3165 * also want to filter out access time / backup time
3166 * changes.
3167 */
3168 active &= ~(PERMISSION_BITS |
3169 VNODE_ATTR_BIT(va_access_time) |
3170 VNODE_ATTR_BIT(va_backup_time));
3171
3172 /* Anything left to notify about? */
3173 if (active & vap->va_supported) {
3174 is_stat_change = 1;
3175 }
3176 }
3177
3178 if (error == 0) {
3179 if (is_perm_change) {
3180 if (need_fsevent(FSE_CHOWN, vp)) {
3181 add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
3182 }
3183 } else if (is_stat_change && need_fsevent(FSE_STAT_CHANGED, vp)) {
3184 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
3185 }
3186 }
3187 #undef PERMISSION_BITS
3188 #endif
3189
3190 out:
3191 return error;
3192 }
3193
3194 /*
3195 * Fallback for setting the attributes on a vnode in a vnode context. This
3196 * Function will attempt to store ACL, UUID, and GUID information utilizing
3197 * a read/modify/write operation against an EA used as a backing store for
3198 * the object.
3199 *
3200 * Parameters: vp The vnode whose attributes to set.
3201 * vap A pointer to the attributes to set.
3202 * ctx The vnode context in which the
3203 * operation is to be attempted.
3204 *
3205 * Returns: 0 Success
3206 * !0 errno value
3207 *
3208 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order,
3209 * as are the fsec and lfsec, if they are used.
3210 *
3211 * The contents of the data area pointed to by 'vap' may be
3212 * modified to indicate that the attribute is supported for
3213 * any given requested attribute.
3214 *
3215 * XXX: We should enummerate the possible errno values here, and where
3216 * in the code they originated.
3217 */
3218 int
vnode_setattr_fallback(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3219 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
3220 {
3221 kauth_filesec_t fsec;
3222 kauth_acl_t facl;
3223 struct kauth_filesec lfsec;
3224 int error;
3225
3226 error = 0;
3227
3228 /*
3229 * Extended security fallback via extended attributes.
3230 *
3231 * Note that we do not free the filesec; the caller is expected to
3232 * do this.
3233 */
3234 if (VATTR_NOT_RETURNED(vap, va_acl) ||
3235 VATTR_NOT_RETURNED(vap, va_uuuid) ||
3236 VATTR_NOT_RETURNED(vap, va_guuid)) {
3237 VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
3238
3239 /*
3240 * Fail for file types that we don't permit extended security
3241 * to be set on.
3242 */
3243 if (!XATTR_VNODE_SUPPORTED(vp)) {
3244 VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
3245 error = EINVAL;
3246 goto out;
3247 }
3248
3249 /*
3250 * If we don't have all the extended security items, we need
3251 * to fetch the existing data to perform a read-modify-write
3252 * operation.
3253 */
3254 fsec = NULL;
3255 if (!VATTR_IS_ACTIVE(vap, va_acl) ||
3256 !VATTR_IS_ACTIVE(vap, va_uuuid) ||
3257 !VATTR_IS_ACTIVE(vap, va_guuid)) {
3258 if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
3259 KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
3260 goto out;
3261 }
3262 }
3263 /* if we didn't get a filesec, use our local one */
3264 if (fsec == NULL) {
3265 KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
3266 fsec = &lfsec;
3267 } else {
3268 KAUTH_DEBUG("SETATTR - updating existing filesec");
3269 }
3270 /* find the ACL */
3271 facl = &fsec->fsec_acl;
3272
3273 /* if we're using the local filesec, we need to initialise it */
3274 if (fsec == &lfsec) {
3275 fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
3276 fsec->fsec_owner = kauth_null_guid;
3277 fsec->fsec_group = kauth_null_guid;
3278 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
3279 facl->acl_flags = 0;
3280 }
3281
3282 /*
3283 * Update with the supplied attributes.
3284 */
3285 if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
3286 KAUTH_DEBUG("SETATTR - updating owner UUID");
3287 fsec->fsec_owner = vap->va_uuuid;
3288 VATTR_SET_SUPPORTED(vap, va_uuuid);
3289 }
3290 if (VATTR_IS_ACTIVE(vap, va_guuid)) {
3291 KAUTH_DEBUG("SETATTR - updating group UUID");
3292 fsec->fsec_group = vap->va_guuid;
3293 VATTR_SET_SUPPORTED(vap, va_guuid);
3294 }
3295 if (VATTR_IS_ACTIVE(vap, va_acl)) {
3296 if (vap->va_acl == NULL) {
3297 KAUTH_DEBUG("SETATTR - removing ACL");
3298 facl->acl_entrycount = KAUTH_FILESEC_NOACL;
3299 } else {
3300 KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
3301 facl = vap->va_acl;
3302 }
3303 VATTR_SET_SUPPORTED(vap, va_acl);
3304 }
3305
3306 /*
3307 * If the filesec data is all invalid, we can just remove
3308 * the EA completely.
3309 */
3310 if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
3311 kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
3312 kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
3313 error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
3314 /* no attribute is ok, nothing to delete */
3315 if (error == ENOATTR) {
3316 error = 0;
3317 }
3318 VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
3319 } else {
3320 /* write the EA */
3321 error = vnode_set_filesec(vp, fsec, facl, ctx);
3322 VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
3323 }
3324
3325 /* if we fetched a filesec, dispose of the buffer */
3326 if (fsec != &lfsec) {
3327 kauth_filesec_free(fsec);
3328 }
3329 }
3330 out:
3331
3332 return error;
3333 }
3334
3335 /*
3336 * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
3337 * event on a vnode.
3338 */
3339 int
vnode_notify(vnode_t vp,uint32_t events,struct vnode_attr * vap)3340 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
3341 {
3342 /* These are the same as the corresponding knotes, at least for now. Cheating a little. */
3343 uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
3344 | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
3345 uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
3346 | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
3347 uint32_t knote_events = (events & knote_mask);
3348
3349 /* Permissions are not explicitly part of the kqueue model */
3350 if (events & VNODE_EVENT_PERMS) {
3351 knote_events |= NOTE_ATTRIB;
3352 }
3353
3354 /* Directory contents information just becomes NOTE_WRITE */
3355 if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
3356 knote_events |= NOTE_WRITE;
3357 }
3358
3359 if (knote_events) {
3360 lock_vnode_and_post(vp, knote_events);
3361 #if CONFIG_FSE
3362 if (vap != NULL) {
3363 create_fsevent_from_kevent(vp, events, vap);
3364 }
3365 #else
3366 (void)vap;
3367 #endif
3368 }
3369
3370 return 0;
3371 }
3372
3373
3374
3375 int
vnode_isdyldsharedcache(vnode_t vp)3376 vnode_isdyldsharedcache(vnode_t vp)
3377 {
3378 return (vp->v_flag & VSHARED_DYLD) ? 1 : 0;
3379 }
3380
3381
3382 /*
3383 * For a filesystem that isn't tracking its own vnode watchers:
3384 * check whether a vnode is being monitored.
3385 */
3386 int
vnode_ismonitored(vnode_t vp)3387 vnode_ismonitored(vnode_t vp)
3388 {
3389 return vp->v_knotes.slh_first != NULL;
3390 }
3391
3392 int
vnode_getbackingvnode(vnode_t in_vp,vnode_t * out_vpp)3393 vnode_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp)
3394 {
3395 if (out_vpp) {
3396 *out_vpp = NULLVP;
3397 }
3398 #if NULLFS
3399 return nullfs_getbackingvnode(in_vp, out_vpp);
3400 #else
3401 #pragma unused(in_vp)
3402 return ENOENT;
3403 #endif
3404 }
3405
3406 /*
3407 * Initialize a struct vnode_attr and activate the attributes required
3408 * by the vnode_notify() call.
3409 */
3410 int
vfs_get_notify_attributes(struct vnode_attr * vap)3411 vfs_get_notify_attributes(struct vnode_attr *vap)
3412 {
3413 VATTR_INIT(vap);
3414 vap->va_active = VNODE_NOTIFY_ATTRS;
3415 return 0;
3416 }
3417
3418 #if CONFIG_TRIGGERS
3419 int
vfs_settriggercallback(fsid_t * fsid,vfs_trigger_callback_t vtc,void * data,uint32_t flags __unused,vfs_context_t ctx)3420 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
3421 {
3422 int error;
3423 mount_t mp;
3424
3425 mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
3426 if (mp == NULL) {
3427 return ENOENT;
3428 }
3429
3430 error = vfs_busy(mp, LK_NOWAIT);
3431 mount_iterdrop(mp);
3432
3433 if (error != 0) {
3434 return ENOENT;
3435 }
3436
3437 mount_lock(mp);
3438 if (mp->mnt_triggercallback != NULL) {
3439 error = EBUSY;
3440 mount_unlock(mp);
3441 goto out;
3442 }
3443
3444 mp->mnt_triggercallback = vtc;
3445 mp->mnt_triggerdata = data;
3446 mount_unlock(mp);
3447
3448 mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
3449
3450 out:
3451 vfs_unbusy(mp);
3452 return 0;
3453 }
3454 #endif /* CONFIG_TRIGGERS */
3455
3456 /*
3457 * Definition of vnode operations.
3458 */
3459
3460 #if 0
3461 /*
3462 *#
3463 *#% lookup dvp L ? ?
3464 *#% lookup vpp - L -
3465 */
3466 struct vnop_lookup_args {
3467 struct vnodeop_desc *a_desc;
3468 vnode_t a_dvp;
3469 vnode_t *a_vpp;
3470 struct componentname *a_cnp;
3471 vfs_context_t a_context;
3472 };
3473 #endif /* 0*/
3474
3475 /*
3476 * Returns: 0 Success
3477 * lock_fsnode:ENOENT No such file or directory [only for VFS
3478 * that is not thread safe & vnode is
3479 * currently being/has been terminated]
3480 * <vfs_lookup>:ENAMETOOLONG
3481 * <vfs_lookup>:ENOENT
3482 * <vfs_lookup>:EJUSTRETURN
3483 * <vfs_lookup>:EPERM
3484 * <vfs_lookup>:EISDIR
3485 * <vfs_lookup>:ENOTDIR
3486 * <vfs_lookup>:???
3487 *
3488 * Note: The return codes from the underlying VFS's lookup routine can't
3489 * be fully enumerated here, since third party VFS authors may not
3490 * limit their error returns to the ones documented here, even
3491 * though this may result in some programs functioning incorrectly.
3492 *
3493 * The return codes documented above are those which may currently
3494 * be returned by HFS from hfs_lookup, not including additional
3495 * error code which may be propagated from underlying routines.
3496 */
3497 errno_t
VNOP_LOOKUP(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,vfs_context_t ctx)3498 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
3499 {
3500 int _err;
3501 struct vnop_lookup_args a;
3502
3503 a.a_desc = &vnop_lookup_desc;
3504 a.a_dvp = dvp;
3505 a.a_vpp = vpp;
3506 a.a_cnp = cnp;
3507 a.a_context = ctx;
3508
3509 _err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
3510 if (_err == 0 && *vpp) {
3511 DTRACE_FSINFO(lookup, vnode_t, *vpp);
3512 }
3513
3514 return _err;
3515 }
3516
3517 #if 0
3518 struct vnop_compound_open_args {
3519 struct vnodeop_desc *a_desc;
3520 vnode_t a_dvp;
3521 vnode_t *a_vpp;
3522 struct componentname *a_cnp;
3523 int32_t a_flags;
3524 int32_t a_fmode;
3525 struct vnode_attr *a_vap;
3526 vfs_context_t a_context;
3527 void *a_reserved;
3528 };
3529 #endif /* 0 */
3530
3531 int
VNOP_COMPOUND_OPEN(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,int32_t fmode,uint32_t * statusp,struct vnode_attr * vap,vfs_context_t ctx)3532 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
3533 {
3534 int _err;
3535 struct vnop_compound_open_args a;
3536 int did_create = 0;
3537 int want_create;
3538 uint32_t tmp_status = 0;
3539 struct componentname *cnp = &ndp->ni_cnd;
3540
3541 want_create = (flags & O_CREAT);
3542
3543 a.a_desc = &vnop_compound_open_desc;
3544 a.a_dvp = dvp;
3545 a.a_vpp = vpp; /* Could be NULL */
3546 a.a_cnp = cnp;
3547 a.a_flags = flags;
3548 a.a_fmode = fmode;
3549 a.a_status = (statusp != NULL) ? statusp : &tmp_status;
3550 a.a_vap = vap;
3551 a.a_context = ctx;
3552 a.a_open_create_authorizer = vn_authorize_create;
3553 a.a_open_existing_authorizer = vn_authorize_open_existing;
3554 a.a_reserved = NULL;
3555
3556 if (dvp == NULLVP) {
3557 panic("No dvp?");
3558 }
3559 if (want_create && !vap) {
3560 panic("Want create, but no vap?");
3561 }
3562 if (!want_create && vap) {
3563 panic("Don't want create, but have a vap?");
3564 }
3565
3566 _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
3567 if (want_create) {
3568 if (_err == 0 && *vpp) {
3569 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3570 } else {
3571 DTRACE_FSINFO(compound_open, vnode_t, dvp);
3572 }
3573 } else {
3574 DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3575 }
3576
3577 did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
3578
3579 if (did_create && !want_create) {
3580 panic("Filesystem did a create, even though none was requested?");
3581 }
3582
3583 if (did_create) {
3584 #if CONFIG_APPLEDOUBLE
3585 if (!NATIVE_XATTR(dvp)) {
3586 /*
3587 * Remove stale Apple Double file (if any).
3588 */
3589 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3590 }
3591 #endif /* CONFIG_APPLEDOUBLE */
3592 /* On create, provide kqueue notification */
3593 post_event_if_success(dvp, _err, NOTE_WRITE);
3594 }
3595
3596 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
3597 #if 0 /* FSEvents... */
3598 if (*vpp && _err && _err != EKEEPLOOKING) {
3599 vnode_put(*vpp);
3600 *vpp = NULLVP;
3601 }
3602 #endif /* 0 */
3603
3604 return _err;
3605 }
3606
3607 #if 0
3608 struct vnop_create_args {
3609 struct vnodeop_desc *a_desc;
3610 vnode_t a_dvp;
3611 vnode_t *a_vpp;
3612 struct componentname *a_cnp;
3613 struct vnode_attr *a_vap;
3614 vfs_context_t a_context;
3615 };
3616 #endif /* 0*/
3617 errno_t
VNOP_CREATE(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)3618 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3619 {
3620 int _err;
3621 struct vnop_create_args a;
3622
3623 a.a_desc = &vnop_create_desc;
3624 a.a_dvp = dvp;
3625 a.a_vpp = vpp;
3626 a.a_cnp = cnp;
3627 a.a_vap = vap;
3628 a.a_context = ctx;
3629
3630 _err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
3631 if (_err == 0 && *vpp) {
3632 DTRACE_FSINFO(create, vnode_t, *vpp);
3633 }
3634
3635 #if CONFIG_APPLEDOUBLE
3636 if (_err == 0 && !NATIVE_XATTR(dvp)) {
3637 /*
3638 * Remove stale Apple Double file (if any).
3639 */
3640 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3641 }
3642 #endif /* CONFIG_APPLEDOUBLE */
3643
3644 post_event_if_success(dvp, _err, NOTE_WRITE);
3645
3646 return _err;
3647 }
3648
3649 #if 0
3650 /*
3651 *#
3652 *#% whiteout dvp L L L
3653 *#% whiteout cnp - - -
3654 *#% whiteout flag - - -
3655 *#
3656 */
3657 struct vnop_whiteout_args {
3658 struct vnodeop_desc *a_desc;
3659 vnode_t a_dvp;
3660 struct componentname *a_cnp;
3661 int a_flags;
3662 vfs_context_t a_context;
3663 };
3664 #endif /* 0*/
3665 errno_t
VNOP_WHITEOUT(__unused vnode_t dvp,__unused struct componentname * cnp,__unused int flags,__unused vfs_context_t ctx)3666 VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp,
3667 __unused int flags, __unused vfs_context_t ctx)
3668 {
3669 return ENOTSUP; // XXX OBSOLETE
3670 }
3671
3672 #if 0
3673 /*
3674 *#
3675 *#% mknod dvp L U U
3676 *#% mknod vpp - X -
3677 *#
3678 */
3679 struct vnop_mknod_args {
3680 struct vnodeop_desc *a_desc;
3681 vnode_t a_dvp;
3682 vnode_t *a_vpp;
3683 struct componentname *a_cnp;
3684 struct vnode_attr *a_vap;
3685 vfs_context_t a_context;
3686 };
3687 #endif /* 0*/
3688 errno_t
VNOP_MKNOD(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)3689 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3690 {
3691 int _err;
3692 struct vnop_mknod_args a;
3693
3694 a.a_desc = &vnop_mknod_desc;
3695 a.a_dvp = dvp;
3696 a.a_vpp = vpp;
3697 a.a_cnp = cnp;
3698 a.a_vap = vap;
3699 a.a_context = ctx;
3700
3701 _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
3702 if (_err == 0 && *vpp) {
3703 DTRACE_FSINFO(mknod, vnode_t, *vpp);
3704 }
3705
3706 post_event_if_success(dvp, _err, NOTE_WRITE);
3707
3708 return _err;
3709 }
3710
3711 #if 0
3712 /*
3713 *#
3714 *#% open vp L L L
3715 *#
3716 */
3717 struct vnop_open_args {
3718 struct vnodeop_desc *a_desc;
3719 vnode_t a_vp;
3720 int a_mode;
3721 vfs_context_t a_context;
3722 };
3723 #endif /* 0*/
3724 errno_t
VNOP_OPEN(vnode_t vp,int mode,vfs_context_t ctx)3725 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3726 {
3727 int _err;
3728 struct vnop_open_args a;
3729
3730 if (ctx == NULL) {
3731 ctx = vfs_context_current();
3732 }
3733 a.a_desc = &vnop_open_desc;
3734 a.a_vp = vp;
3735 a.a_mode = mode;
3736 a.a_context = ctx;
3737
3738 _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3739 DTRACE_FSINFO(open, vnode_t, vp);
3740
3741 return _err;
3742 }
3743
3744 #if 0
3745 /*
3746 *#
3747 *#% close vp U U U
3748 *#
3749 */
3750 struct vnop_close_args {
3751 struct vnodeop_desc *a_desc;
3752 vnode_t a_vp;
3753 int a_fflag;
3754 vfs_context_t a_context;
3755 };
3756 #endif /* 0*/
3757 errno_t
VNOP_CLOSE(vnode_t vp,int fflag,vfs_context_t ctx)3758 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3759 {
3760 int _err;
3761 struct vnop_close_args a;
3762
3763 if (ctx == NULL) {
3764 ctx = vfs_context_current();
3765 }
3766 a.a_desc = &vnop_close_desc;
3767 a.a_vp = vp;
3768 a.a_fflag = fflag;
3769 a.a_context = ctx;
3770
3771 _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3772 DTRACE_FSINFO(close, vnode_t, vp);
3773
3774 return _err;
3775 }
3776
3777 #if 0
3778 /*
3779 *#
3780 *#% access vp L L L
3781 *#
3782 */
3783 struct vnop_access_args {
3784 struct vnodeop_desc *a_desc;
3785 vnode_t a_vp;
3786 int a_action;
3787 vfs_context_t a_context;
3788 };
3789 #endif /* 0*/
3790 errno_t
VNOP_ACCESS(vnode_t vp,int action,vfs_context_t ctx)3791 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3792 {
3793 int _err;
3794 struct vnop_access_args a;
3795
3796 if (ctx == NULL) {
3797 ctx = vfs_context_current();
3798 }
3799 a.a_desc = &vnop_access_desc;
3800 a.a_vp = vp;
3801 a.a_action = action;
3802 a.a_context = ctx;
3803
3804 _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3805 DTRACE_FSINFO(access, vnode_t, vp);
3806
3807 return _err;
3808 }
3809
3810 #if 0
3811 /*
3812 *#
3813 *#% getattr vp = = =
3814 *#
3815 */
3816 struct vnop_getattr_args {
3817 struct vnodeop_desc *a_desc;
3818 vnode_t a_vp;
3819 struct vnode_attr *a_vap;
3820 vfs_context_t a_context;
3821 };
3822 #endif /* 0*/
3823 errno_t
VNOP_GETATTR(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3824 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3825 {
3826 int _err;
3827 struct vnop_getattr_args a;
3828
3829 a.a_desc = &vnop_getattr_desc;
3830 a.a_vp = vp;
3831 a.a_vap = vap;
3832 a.a_context = ctx;
3833
3834 _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3835 DTRACE_FSINFO(getattr, vnode_t, vp);
3836
3837 return _err;
3838 }
3839
3840 #if 0
3841 /*
3842 *#
3843 *#% setattr vp L L L
3844 *#
3845 */
3846 struct vnop_setattr_args {
3847 struct vnodeop_desc *a_desc;
3848 vnode_t a_vp;
3849 struct vnode_attr *a_vap;
3850 vfs_context_t a_context;
3851 };
3852 #endif /* 0*/
3853 errno_t
VNOP_SETATTR(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3854 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3855 {
3856 int _err;
3857 struct vnop_setattr_args a;
3858
3859 a.a_desc = &vnop_setattr_desc;
3860 a.a_vp = vp;
3861 a.a_vap = vap;
3862 a.a_context = ctx;
3863
3864 _err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3865 DTRACE_FSINFO(setattr, vnode_t, vp);
3866
3867 #if CONFIG_APPLEDOUBLE
3868 /*
3869 * Shadow uid/gid/mod change to extended attribute file.
3870 */
3871 if (_err == 0 && !NATIVE_XATTR(vp)) {
3872 struct vnode_attr *va;
3873 int change = 0;
3874
3875 va = kalloc_type(struct vnode_attr, Z_WAITOK);
3876 VATTR_INIT(va);
3877 if (VATTR_IS_ACTIVE(vap, va_uid)) {
3878 VATTR_SET(va, va_uid, vap->va_uid);
3879 change = 1;
3880 }
3881 if (VATTR_IS_ACTIVE(vap, va_gid)) {
3882 VATTR_SET(va, va_gid, vap->va_gid);
3883 change = 1;
3884 }
3885 if (VATTR_IS_ACTIVE(vap, va_mode)) {
3886 VATTR_SET(va, va_mode, vap->va_mode);
3887 change = 1;
3888 }
3889 if (change) {
3890 vnode_t dvp;
3891 const char *vname;
3892
3893 dvp = vnode_getparent(vp);
3894 vname = vnode_getname(vp);
3895
3896 xattrfile_setattr(dvp, vname, va, ctx);
3897 if (dvp != NULLVP) {
3898 vnode_put(dvp);
3899 }
3900 if (vname != NULL) {
3901 vnode_putname(vname);
3902 }
3903 }
3904 kfree_type(struct vnode_attr, va);
3905 }
3906 #endif /* CONFIG_APPLEDOUBLE */
3907
3908 /*
3909 * If we have changed any of the things about the file that are likely
3910 * to result in changes to authorization results, blow the vnode auth
3911 * cache
3912 */
3913 if (_err == 0 && (
3914 VATTR_IS_SUPPORTED(vap, va_mode) ||
3915 VATTR_IS_SUPPORTED(vap, va_uid) ||
3916 VATTR_IS_SUPPORTED(vap, va_gid) ||
3917 VATTR_IS_SUPPORTED(vap, va_flags) ||
3918 VATTR_IS_SUPPORTED(vap, va_acl) ||
3919 VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3920 VATTR_IS_SUPPORTED(vap, va_guuid))) {
3921 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3922
3923 #if NAMEDSTREAMS
3924 if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3925 vnode_t svp;
3926 if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3927 vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3928 vnode_put(svp);
3929 }
3930 }
3931 #endif /* NAMEDSTREAMS */
3932 }
3933
3934
3935 post_event_if_success(vp, _err, NOTE_ATTRIB);
3936
3937 return _err;
3938 }
3939
3940
3941 #if 0
3942 /*
3943 *#
3944 *#% read vp L L L
3945 *#
3946 */
3947 struct vnop_read_args {
3948 struct vnodeop_desc *a_desc;
3949 vnode_t a_vp;
3950 struct uio *a_uio;
3951 int a_ioflag;
3952 vfs_context_t a_context;
3953 };
3954 #endif /* 0*/
3955 errno_t
VNOP_READ(vnode_t vp,struct uio * uio,int ioflag,vfs_context_t ctx)3956 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3957 {
3958 int _err;
3959 struct vnop_read_args a;
3960 #if CONFIG_DTRACE
3961 user_ssize_t resid = uio_resid(uio);
3962 #endif
3963
3964 if (ctx == NULL) {
3965 return EINVAL;
3966 }
3967
3968 a.a_desc = &vnop_read_desc;
3969 a.a_vp = vp;
3970 a.a_uio = uio;
3971 a.a_ioflag = ioflag;
3972 a.a_context = ctx;
3973
3974 _err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3975 DTRACE_FSINFO_IO(read,
3976 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3977
3978 return _err;
3979 }
3980
3981
3982 #if 0
3983 /*
3984 *#
3985 *#% write vp L L L
3986 *#
3987 */
3988 struct vnop_write_args {
3989 struct vnodeop_desc *a_desc;
3990 vnode_t a_vp;
3991 struct uio *a_uio;
3992 int a_ioflag;
3993 vfs_context_t a_context;
3994 };
3995 #endif /* 0*/
3996 errno_t
VNOP_WRITE(vnode_t vp,struct uio * uio,int ioflag,vfs_context_t ctx)3997 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3998 {
3999 struct vnop_write_args a;
4000 int _err;
4001 #if CONFIG_DTRACE
4002 user_ssize_t resid = uio_resid(uio);
4003 #endif
4004
4005 if (ctx == NULL) {
4006 return EINVAL;
4007 }
4008
4009 a.a_desc = &vnop_write_desc;
4010 a.a_vp = vp;
4011 a.a_uio = uio;
4012 a.a_ioflag = ioflag;
4013 a.a_context = ctx;
4014
4015 _err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
4016 DTRACE_FSINFO_IO(write,
4017 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
4018
4019 post_event_if_success(vp, _err, NOTE_WRITE);
4020
4021 return _err;
4022 }
4023
4024
4025 #if 0
4026 /*
4027 *#
4028 *#% ioctl vp U U U
4029 *#
4030 */
4031 struct vnop_ioctl_args {
4032 struct vnodeop_desc *a_desc;
4033 vnode_t a_vp;
4034 u_long a_command;
4035 caddr_t a_data;
4036 int a_fflag;
4037 vfs_context_t a_context;
4038 };
4039 #endif /* 0*/
4040 errno_t
VNOP_IOCTL(vnode_t vp,u_long command,caddr_t data,int fflag,vfs_context_t ctx)4041 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
4042 {
4043 int _err;
4044 struct vnop_ioctl_args a;
4045
4046 if (ctx == NULL) {
4047 ctx = vfs_context_current();
4048 }
4049
4050 /*
4051 * This check should probably have been put in the TTY code instead...
4052 *
4053 * We have to be careful about what we assume during startup and shutdown.
4054 * We have to be able to use the root filesystem's device vnode even when
4055 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
4056 * structure. If there is no data pointer, it doesn't matter whether
4057 * the device is 64-bit ready. Any command (like DKIOCSYNCHRONIZE)
4058 * which passes NULL for its data pointer can therefore be used during
4059 * mount or unmount of the root filesystem.
4060 *
4061 * Depending on what root filesystems need to do during mount/unmount, we
4062 * may need to loosen this check again in the future.
4063 */
4064 if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
4065 if (data != NULL && !vnode_vfs64bitready(vp)) {
4066 return ENOTTY;
4067 }
4068 }
4069
4070 if ((command == DKIOCISSOLIDSTATE) && (vp == rootvp) && rootvp_is_ssd && data) {
4071 *data = 1;
4072 return 0;
4073 }
4074
4075 a.a_desc = &vnop_ioctl_desc;
4076 a.a_vp = vp;
4077 a.a_command = command;
4078 a.a_data = data;
4079 a.a_fflag = fflag;
4080 a.a_context = ctx;
4081
4082 _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
4083 DTRACE_FSINFO(ioctl, vnode_t, vp);
4084
4085 return _err;
4086 }
4087
4088
4089 #if 0
4090 /*
4091 *#
4092 *#% select vp U U U
4093 *#
4094 */
4095 struct vnop_select_args {
4096 struct vnodeop_desc *a_desc;
4097 vnode_t a_vp;
4098 int a_which;
4099 int a_fflags;
4100 void *a_wql;
4101 vfs_context_t a_context;
4102 };
4103 #endif /* 0*/
4104 errno_t
VNOP_SELECT(vnode_t vp,int which,int fflags,void * wql,vfs_context_t ctx)4105 VNOP_SELECT(vnode_t vp, int which, int fflags, void * wql, vfs_context_t ctx)
4106 {
4107 int _err;
4108 struct vnop_select_args a;
4109
4110 if (ctx == NULL) {
4111 ctx = vfs_context_current();
4112 }
4113 a.a_desc = &vnop_select_desc;
4114 a.a_vp = vp;
4115 a.a_which = which;
4116 a.a_fflags = fflags;
4117 a.a_context = ctx;
4118 a.a_wql = wql;
4119
4120 _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
4121 DTRACE_FSINFO(select, vnode_t, vp);
4122
4123 return _err;
4124 }
4125
4126
4127 #if 0
4128 /*
4129 *#
4130 *#% exchange fvp L L L
4131 *#% exchange tvp L L L
4132 *#
4133 */
4134 struct vnop_exchange_args {
4135 struct vnodeop_desc *a_desc;
4136 vnode_t a_fvp;
4137 vnode_t a_tvp;
4138 int a_options;
4139 vfs_context_t a_context;
4140 };
4141 #endif /* 0*/
4142 errno_t
VNOP_EXCHANGE(vnode_t fvp,vnode_t tvp,int options,vfs_context_t ctx)4143 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
4144 {
4145 int _err;
4146 struct vnop_exchange_args a;
4147
4148 a.a_desc = &vnop_exchange_desc;
4149 a.a_fvp = fvp;
4150 a.a_tvp = tvp;
4151 a.a_options = options;
4152 a.a_context = ctx;
4153
4154 _err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
4155 DTRACE_FSINFO(exchange, vnode_t, fvp);
4156
4157 /* Don't post NOTE_WRITE because file descriptors follow the data ... */
4158 post_event_if_success(fvp, _err, NOTE_ATTRIB);
4159 post_event_if_success(tvp, _err, NOTE_ATTRIB);
4160
4161 return _err;
4162 }
4163
4164
4165 #if 0
4166 /*
4167 *#
4168 *#% revoke vp U U U
4169 *#
4170 */
4171 struct vnop_revoke_args {
4172 struct vnodeop_desc *a_desc;
4173 vnode_t a_vp;
4174 int a_flags;
4175 vfs_context_t a_context;
4176 };
4177 #endif /* 0*/
4178 errno_t
VNOP_REVOKE(vnode_t vp,int flags,vfs_context_t ctx)4179 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
4180 {
4181 struct vnop_revoke_args a;
4182 int _err;
4183
4184 a.a_desc = &vnop_revoke_desc;
4185 a.a_vp = vp;
4186 a.a_flags = flags;
4187 a.a_context = ctx;
4188
4189 _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
4190 DTRACE_FSINFO(revoke, vnode_t, vp);
4191
4192 return _err;
4193 }
4194
4195
4196 #if 0
4197 /*
4198 *#
4199 *# mmap_check - vp U U U
4200 *#
4201 */
4202 struct vnop_mmap_check_args {
4203 struct vnodeop_desc *a_desc;
4204 vnode_t a_vp;
4205 int a_flags;
4206 vfs_context_t a_context;
4207 };
4208 #endif /* 0 */
4209 errno_t
VNOP_MMAP_CHECK(vnode_t vp,int flags,vfs_context_t ctx)4210 VNOP_MMAP_CHECK(vnode_t vp, int flags, vfs_context_t ctx)
4211 {
4212 int _err;
4213 struct vnop_mmap_check_args a;
4214
4215 a.a_desc = &vnop_mmap_check_desc;
4216 a.a_vp = vp;
4217 a.a_flags = flags;
4218 a.a_context = ctx;
4219
4220 _err = (*vp->v_op[vnop_mmap_check_desc.vdesc_offset])(&a);
4221 if (_err == ENOTSUP) {
4222 _err = 0;
4223 }
4224 DTRACE_FSINFO(mmap_check, vnode_t, vp);
4225
4226 return _err;
4227 }
4228
4229 #if 0
4230 /*
4231 *#
4232 *# mmap - vp U U U
4233 *#
4234 */
4235 struct vnop_mmap_args {
4236 struct vnodeop_desc *a_desc;
4237 vnode_t a_vp;
4238 int a_fflags;
4239 vfs_context_t a_context;
4240 };
4241 #endif /* 0*/
4242 errno_t
VNOP_MMAP(vnode_t vp,int fflags,vfs_context_t ctx)4243 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
4244 {
4245 int _err;
4246 struct vnop_mmap_args a;
4247
4248 a.a_desc = &vnop_mmap_desc;
4249 a.a_vp = vp;
4250 a.a_fflags = fflags;
4251 a.a_context = ctx;
4252
4253 _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
4254 DTRACE_FSINFO(mmap, vnode_t, vp);
4255
4256 return _err;
4257 }
4258
4259
4260 #if 0
4261 /*
4262 *#
4263 *# mnomap - vp U U U
4264 *#
4265 */
4266 struct vnop_mnomap_args {
4267 struct vnodeop_desc *a_desc;
4268 vnode_t a_vp;
4269 vfs_context_t a_context;
4270 };
4271 #endif /* 0*/
4272 errno_t
VNOP_MNOMAP(vnode_t vp,vfs_context_t ctx)4273 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
4274 {
4275 int _err;
4276 struct vnop_mnomap_args a;
4277
4278 a.a_desc = &vnop_mnomap_desc;
4279 a.a_vp = vp;
4280 a.a_context = ctx;
4281
4282 _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
4283 DTRACE_FSINFO(mnomap, vnode_t, vp);
4284
4285 return _err;
4286 }
4287
4288
4289 #if 0
4290 /*
4291 *#
4292 *#% fsync vp L L L
4293 *#
4294 */
4295 struct vnop_fsync_args {
4296 struct vnodeop_desc *a_desc;
4297 vnode_t a_vp;
4298 int a_waitfor;
4299 vfs_context_t a_context;
4300 };
4301 #endif /* 0*/
4302 errno_t
VNOP_FSYNC(vnode_t vp,int waitfor,vfs_context_t ctx)4303 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
4304 {
4305 struct vnop_fsync_args a;
4306 int _err;
4307
4308 a.a_desc = &vnop_fsync_desc;
4309 a.a_vp = vp;
4310 a.a_waitfor = waitfor;
4311 a.a_context = ctx;
4312
4313 _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
4314 DTRACE_FSINFO(fsync, vnode_t, vp);
4315
4316 return _err;
4317 }
4318
4319
4320 #if 0
4321 /*
4322 *#
4323 *#% remove dvp L U U
4324 *#% remove vp L U U
4325 *#
4326 */
4327 struct vnop_remove_args {
4328 struct vnodeop_desc *a_desc;
4329 vnode_t a_dvp;
4330 vnode_t a_vp;
4331 struct componentname *a_cnp;
4332 int a_flags;
4333 vfs_context_t a_context;
4334 };
4335 #endif /* 0*/
4336 errno_t
VNOP_REMOVE(vnode_t dvp,vnode_t vp,struct componentname * cnp,int flags,vfs_context_t ctx)4337 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
4338 {
4339 int _err;
4340 struct vnop_remove_args a;
4341
4342 a.a_desc = &vnop_remove_desc;
4343 a.a_dvp = dvp;
4344 a.a_vp = vp;
4345 a.a_cnp = cnp;
4346 a.a_flags = flags;
4347 a.a_context = ctx;
4348
4349 _err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
4350 DTRACE_FSINFO(remove, vnode_t, vp);
4351
4352 if (_err == 0) {
4353 vnode_setneedinactive(vp);
4354 #if CONFIG_APPLEDOUBLE
4355 if (!(NATIVE_XATTR(dvp))) {
4356 /*
4357 * Remove any associated extended attribute file (._ AppleDouble file).
4358 */
4359 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4360 }
4361 #endif /* CONFIG_APPLEDOUBLE */
4362 }
4363
4364 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4365 post_event_if_success(dvp, _err, NOTE_WRITE);
4366
4367 return _err;
4368 }
4369
4370 int
VNOP_COMPOUND_REMOVE(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,struct vnode_attr * vap,vfs_context_t ctx)4371 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
4372 {
4373 int _err;
4374 struct vnop_compound_remove_args a;
4375 int no_vp = (*vpp == NULLVP);
4376
4377 a.a_desc = &vnop_compound_remove_desc;
4378 a.a_dvp = dvp;
4379 a.a_vpp = vpp;
4380 a.a_cnp = &ndp->ni_cnd;
4381 a.a_flags = flags;
4382 a.a_vap = vap;
4383 a.a_context = ctx;
4384 a.a_remove_authorizer = vn_authorize_unlink;
4385
4386 _err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
4387 if (_err == 0 && *vpp) {
4388 DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
4389 } else {
4390 DTRACE_FSINFO(compound_remove, vnode_t, dvp);
4391 }
4392 if (_err == 0) {
4393 vnode_setneedinactive(*vpp);
4394 #if CONFIG_APPLEDOUBLE
4395 if (!(NATIVE_XATTR(dvp))) {
4396 /*
4397 * Remove any associated extended attribute file (._ AppleDouble file).
4398 */
4399 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
4400 }
4401 #endif /* CONFIG_APPLEDOUBLE */
4402 }
4403
4404 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4405 post_event_if_success(dvp, _err, NOTE_WRITE);
4406
4407 if (no_vp) {
4408 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4409 if (*vpp && _err && _err != EKEEPLOOKING) {
4410 vnode_put(*vpp);
4411 *vpp = NULLVP;
4412 }
4413 }
4414
4415 //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
4416
4417 return _err;
4418 }
4419
4420 #if 0
4421 /*
4422 *#
4423 *#% link vp U U U
4424 *#% link tdvp L U U
4425 *#
4426 */
4427 struct vnop_link_args {
4428 struct vnodeop_desc *a_desc;
4429 vnode_t a_vp;
4430 vnode_t a_tdvp;
4431 struct componentname *a_cnp;
4432 vfs_context_t a_context;
4433 };
4434 #endif /* 0*/
4435 errno_t
VNOP_LINK(vnode_t vp,vnode_t tdvp,struct componentname * cnp,vfs_context_t ctx)4436 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
4437 {
4438 int _err;
4439 struct vnop_link_args a;
4440
4441 #if CONFIG_APPLEDOUBLE
4442 /*
4443 * For file systems with non-native extended attributes,
4444 * disallow linking to an existing "._" Apple Double file.
4445 */
4446 if (!NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
4447 const char *vname;
4448
4449 vname = vnode_getname(vp);
4450 if (vname != NULL) {
4451 _err = 0;
4452 if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
4453 _err = EPERM;
4454 }
4455 vnode_putname(vname);
4456 if (_err) {
4457 return _err;
4458 }
4459 }
4460 }
4461 #endif /* CONFIG_APPLEDOUBLE */
4462
4463 a.a_desc = &vnop_link_desc;
4464 a.a_vp = vp;
4465 a.a_tdvp = tdvp;
4466 a.a_cnp = cnp;
4467 a.a_context = ctx;
4468
4469 _err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
4470 DTRACE_FSINFO(link, vnode_t, vp);
4471
4472 post_event_if_success(vp, _err, NOTE_LINK);
4473 post_event_if_success(tdvp, _err, NOTE_WRITE);
4474
4475 return _err;
4476 }
4477
4478 errno_t
vn_rename(struct vnode * fdvp,struct vnode ** fvpp,struct componentname * fcnp,struct vnode_attr * fvap,struct vnode * tdvp,struct vnode ** tvpp,struct componentname * tcnp,struct vnode_attr * tvap,vfs_rename_flags_t flags,vfs_context_t ctx)4479 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4480 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4481 vfs_rename_flags_t flags, vfs_context_t ctx)
4482 {
4483 int _err;
4484 struct nameidata *fromnd = NULL;
4485 struct nameidata *tond = NULL;
4486 #if CONFIG_APPLEDOUBLE
4487 vnode_t src_attr_vp = NULLVP;
4488 vnode_t dst_attr_vp = NULLVP;
4489 char smallname1[48];
4490 char smallname2[48];
4491 char *xfromname = NULL;
4492 char *xtoname = NULL;
4493 #endif /* CONFIG_APPLEDOUBLE */
4494 int batched;
4495 uint32_t tdfflags; // Target directory file flags
4496
4497 batched = vnode_compound_rename_available(fdvp);
4498
4499 if (!batched) {
4500 if (*fvpp == NULLVP) {
4501 panic("Not batched, and no fvp?");
4502 }
4503 }
4504
4505 #if CONFIG_APPLEDOUBLE
4506 /*
4507 * We need to preflight any potential AppleDouble file for the source file
4508 * before doing the rename operation, since we could potentially be doing
4509 * this operation on a network filesystem, and would end up duplicating
4510 * the work. Also, save the source and destination names. Skip it if the
4511 * source has a "._" prefix.
4512 */
4513
4514 size_t xfromname_len = 0;
4515 size_t xtoname_len = 0;
4516 if (!NATIVE_XATTR(fdvp) &&
4517 !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
4518 int error;
4519
4520 /* Get source attribute file name. */
4521 xfromname_len = fcnp->cn_namelen + 3;
4522 if (xfromname_len > sizeof(smallname1)) {
4523 xfromname = kalloc_data(xfromname_len, Z_WAITOK);
4524 } else {
4525 xfromname = &smallname1[0];
4526 }
4527 strlcpy(xfromname, "._", xfromname_len);
4528 strlcat(xfromname, fcnp->cn_nameptr, xfromname_len);
4529
4530 /* Get destination attribute file name. */
4531 xtoname_len = tcnp->cn_namelen + 3;
4532 if (xtoname_len > sizeof(smallname2)) {
4533 xtoname = kalloc_data(xtoname_len, Z_WAITOK);
4534 } else {
4535 xtoname = &smallname2[0];
4536 }
4537 strlcpy(xtoname, "._", xtoname_len);
4538 strlcat(xtoname, tcnp->cn_nameptr, xtoname_len);
4539
4540 /*
4541 * Look up source attribute file, keep reference on it if exists.
4542 * Note that we do the namei with the nameiop of RENAME, which is different than
4543 * in the rename syscall. It's OK if the source file does not exist, since this
4544 * is only for AppleDouble files.
4545 */
4546 fromnd = kalloc_type(struct nameidata, Z_WAITOK);
4547 NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
4548 UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
4549 fromnd->ni_dvp = fdvp;
4550 error = namei(fromnd);
4551
4552 /*
4553 * If there was an error looking up source attribute file,
4554 * we'll behave as if it didn't exist.
4555 */
4556
4557 if (error == 0) {
4558 if (fromnd->ni_vp) {
4559 /* src_attr_vp indicates need to call vnode_put / nameidone later */
4560 src_attr_vp = fromnd->ni_vp;
4561
4562 if (fromnd->ni_vp->v_type != VREG) {
4563 src_attr_vp = NULLVP;
4564 vnode_put(fromnd->ni_vp);
4565 }
4566 }
4567 /*
4568 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4569 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4570 * have a vnode here, so we drop our namei buffer for the source attribute file
4571 */
4572 if (src_attr_vp == NULLVP) {
4573 nameidone(fromnd);
4574 }
4575 }
4576 }
4577 #endif /* CONFIG_APPLEDOUBLE */
4578
4579 if (batched) {
4580 _err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
4581 if (_err != 0) {
4582 printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
4583 }
4584 } else {
4585 if (flags) {
4586 _err = VNOP_RENAMEX(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, flags, ctx);
4587 if (_err == ENOTSUP && flags == VFS_RENAME_SECLUDE) {
4588 // Legacy...
4589 if ((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) {
4590 fcnp->cn_flags |= CN_SECLUDE_RENAME;
4591 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4592 }
4593 }
4594 } else {
4595 _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4596 }
4597 }
4598
4599 /*
4600 * If moved to a new directory that is restricted,
4601 * set the restricted flag on the item moved.
4602 */
4603 if (_err == 0) {
4604 _err = vnode_flags(tdvp, &tdfflags, ctx);
4605 if (_err == 0) {
4606 uint32_t inherit_flags = tdfflags & (UF_DATAVAULT | SF_RESTRICTED);
4607 if (inherit_flags) {
4608 uint32_t fflags;
4609 _err = vnode_flags(*fvpp, &fflags, ctx);
4610 if (_err == 0 && fflags != (fflags | inherit_flags)) {
4611 struct vnode_attr va;
4612 VATTR_INIT(&va);
4613 VATTR_SET(&va, va_flags, fflags | inherit_flags);
4614 _err = vnode_setattr(*fvpp, &va, ctx);
4615 }
4616 }
4617 }
4618 }
4619
4620 #if CONFIG_MACF
4621 if (_err == 0) {
4622 if (flags & VFS_RENAME_SWAP) {
4623 mac_vnode_notify_rename_swap(
4624 ctx, /* ctx */
4625 fdvp, /* fdvp */
4626 *fvpp, /* fvp */
4627 fcnp, /* fcnp */
4628 tdvp, /* tdvp */
4629 *tvpp, /* tvp */
4630 tcnp /* tcnp */
4631 );
4632 } else {
4633 mac_vnode_notify_rename(
4634 ctx, /* ctx */
4635 *fvpp, /* fvp */
4636 tdvp, /* tdvp */
4637 tcnp /* tcnp */
4638 );
4639 }
4640 }
4641 #endif
4642
4643 #if CONFIG_APPLEDOUBLE
4644 /*
4645 * Rename any associated extended attribute file (._ AppleDouble file).
4646 */
4647 if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
4648 int error = 0;
4649
4650 /*
4651 * Get destination attribute file vnode.
4652 * Note that tdvp already has an iocount reference. Make sure to check that we
4653 * get a valid vnode from namei.
4654 */
4655 tond = kalloc_type(struct nameidata, Z_WAITOK);
4656 NDINIT(tond, RENAME, OP_RENAME,
4657 NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4658 CAST_USER_ADDR_T(xtoname), ctx);
4659 tond->ni_dvp = tdvp;
4660 error = namei(tond);
4661
4662 if (error) {
4663 goto ad_error;
4664 }
4665
4666 if (tond->ni_vp) {
4667 dst_attr_vp = tond->ni_vp;
4668 }
4669
4670 if (src_attr_vp) {
4671 const char *old_name = src_attr_vp->v_name;
4672 vnode_t old_parent = src_attr_vp->v_parent;
4673
4674 if (batched) {
4675 error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
4676 tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
4677 0, ctx);
4678 } else {
4679 error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
4680 tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
4681 }
4682
4683 if (error == 0 && old_name == src_attr_vp->v_name &&
4684 old_parent == src_attr_vp->v_parent) {
4685 int update_flags = VNODE_UPDATE_NAME;
4686
4687 if (fdvp != tdvp) {
4688 update_flags |= VNODE_UPDATE_PARENT;
4689 }
4690
4691 if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
4692 vnode_update_identity(src_attr_vp, tdvp,
4693 tond->ni_cnd.cn_nameptr,
4694 tond->ni_cnd.cn_namelen,
4695 tond->ni_cnd.cn_hash,
4696 update_flags);
4697 }
4698 }
4699
4700 /* kevent notifications for moving resource files
4701 * _err is zero if we're here, so no need to notify directories, code
4702 * below will do that. only need to post the rename on the source and
4703 * possibly a delete on the dest
4704 */
4705 post_event_if_success(src_attr_vp, error, NOTE_RENAME);
4706 if (dst_attr_vp) {
4707 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4708 }
4709 } else if (dst_attr_vp) {
4710 /*
4711 * Just delete destination attribute file vnode if it exists, since
4712 * we didn't have a source attribute file.
4713 * Note that tdvp already has an iocount reference.
4714 */
4715
4716 struct vnop_remove_args args;
4717
4718 args.a_desc = &vnop_remove_desc;
4719 args.a_dvp = tdvp;
4720 args.a_vp = dst_attr_vp;
4721 args.a_cnp = &tond->ni_cnd;
4722 args.a_context = ctx;
4723
4724 if (error == 0) {
4725 error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
4726
4727 if (error == 0) {
4728 vnode_setneedinactive(dst_attr_vp);
4729 }
4730 }
4731
4732 /* kevent notification for deleting the destination's attribute file
4733 * if it existed. Only need to post the delete on the destination, since
4734 * the code below will handle the directories.
4735 */
4736 post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4737 }
4738 }
4739 ad_error:
4740 if (src_attr_vp) {
4741 vnode_put(src_attr_vp);
4742 nameidone(fromnd);
4743 }
4744 if (dst_attr_vp) {
4745 vnode_put(dst_attr_vp);
4746 nameidone(tond);
4747 }
4748 if (xfromname && xfromname != &smallname1[0]) {
4749 kfree_data(xfromname, xfromname_len);
4750 }
4751 if (xtoname && xtoname != &smallname2[0]) {
4752 kfree_data(xtoname, xtoname_len);
4753 }
4754 #endif /* CONFIG_APPLEDOUBLE */
4755 kfree_type(struct nameidata, fromnd);
4756 kfree_type(struct nameidata, tond);
4757 return _err;
4758 }
4759
4760
4761 #if 0
4762 /*
4763 *#
4764 *#% rename fdvp U U U
4765 *#% rename fvp U U U
4766 *#% rename tdvp L U U
4767 *#% rename tvp X U U
4768 *#
4769 */
4770 struct vnop_rename_args {
4771 struct vnodeop_desc *a_desc;
4772 vnode_t a_fdvp;
4773 vnode_t a_fvp;
4774 struct componentname *a_fcnp;
4775 vnode_t a_tdvp;
4776 vnode_t a_tvp;
4777 struct componentname *a_tcnp;
4778 vfs_context_t a_context;
4779 };
4780 #endif /* 0*/
4781 errno_t
VNOP_RENAME(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx)4782 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4783 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4784 vfs_context_t ctx)
4785 {
4786 int _err = 0;
4787 struct vnop_rename_args a;
4788
4789 a.a_desc = &vnop_rename_desc;
4790 a.a_fdvp = fdvp;
4791 a.a_fvp = fvp;
4792 a.a_fcnp = fcnp;
4793 a.a_tdvp = tdvp;
4794 a.a_tvp = tvp;
4795 a.a_tcnp = tcnp;
4796 a.a_context = ctx;
4797
4798 /* do the rename of the main file. */
4799 _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4800 DTRACE_FSINFO(rename, vnode_t, fdvp);
4801
4802 if (_err) {
4803 return _err;
4804 }
4805
4806 return post_rename(fdvp, fvp, tdvp, tvp);
4807 }
4808
4809 static errno_t
post_rename(vnode_t fdvp,vnode_t fvp,vnode_t tdvp,vnode_t tvp)4810 post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp)
4811 {
4812 if (tvp && tvp != fvp) {
4813 vnode_setneedinactive(tvp);
4814 }
4815
4816 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4817 int events = NOTE_WRITE;
4818 if (vnode_isdir(fvp)) {
4819 /* Link count on dir changed only if we are moving a dir and...
4820 * --Moved to new dir, not overwriting there
4821 * --Kept in same dir and DID overwrite
4822 */
4823 if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4824 events |= NOTE_LINK;
4825 }
4826 }
4827
4828 lock_vnode_and_post(fdvp, events);
4829 if (fdvp != tdvp) {
4830 lock_vnode_and_post(tdvp, events);
4831 }
4832
4833 /* If you're replacing the target, post a deletion for it */
4834 if (tvp) {
4835 lock_vnode_and_post(tvp, NOTE_DELETE);
4836 }
4837
4838 lock_vnode_and_post(fvp, NOTE_RENAME);
4839
4840 return 0;
4841 }
4842
4843 #if 0
4844 /*
4845 *#
4846 *#% renamex fdvp U U U
4847 *#% renamex fvp U U U
4848 *#% renamex tdvp L U U
4849 *#% renamex tvp X U U
4850 *#
4851 */
4852 struct vnop_renamex_args {
4853 struct vnodeop_desc *a_desc;
4854 vnode_t a_fdvp;
4855 vnode_t a_fvp;
4856 struct componentname *a_fcnp;
4857 vnode_t a_tdvp;
4858 vnode_t a_tvp;
4859 struct componentname *a_tcnp;
4860 vfs_rename_flags_t a_flags;
4861 vfs_context_t a_context;
4862 };
4863 #endif /* 0*/
4864 errno_t
VNOP_RENAMEX(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_rename_flags_t flags,vfs_context_t ctx)4865 VNOP_RENAMEX(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4866 struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4867 vfs_rename_flags_t flags, vfs_context_t ctx)
4868 {
4869 int _err = 0;
4870 struct vnop_renamex_args a;
4871
4872 a.a_desc = &vnop_renamex_desc;
4873 a.a_fdvp = fdvp;
4874 a.a_fvp = fvp;
4875 a.a_fcnp = fcnp;
4876 a.a_tdvp = tdvp;
4877 a.a_tvp = tvp;
4878 a.a_tcnp = tcnp;
4879 a.a_flags = flags;
4880 a.a_context = ctx;
4881
4882 /* do the rename of the main file. */
4883 _err = (*fdvp->v_op[vnop_renamex_desc.vdesc_offset])(&a);
4884 DTRACE_FSINFO(renamex, vnode_t, fdvp);
4885
4886 if (_err) {
4887 return _err;
4888 }
4889
4890 return post_rename(fdvp, fvp, tdvp, tvp);
4891 }
4892
4893
4894 int
VNOP_COMPOUND_RENAME(struct vnode * fdvp,struct vnode ** fvpp,struct componentname * fcnp,struct vnode_attr * fvap,struct vnode * tdvp,struct vnode ** tvpp,struct componentname * tcnp,struct vnode_attr * tvap,uint32_t flags,vfs_context_t ctx)4895 VNOP_COMPOUND_RENAME(
4896 struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4897 struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4898 uint32_t flags, vfs_context_t ctx)
4899 {
4900 int _err = 0;
4901 int events;
4902 struct vnop_compound_rename_args a;
4903 int no_fvp, no_tvp;
4904
4905 no_fvp = (*fvpp) == NULLVP;
4906 no_tvp = (*tvpp) == NULLVP;
4907
4908 a.a_desc = &vnop_compound_rename_desc;
4909
4910 a.a_fdvp = fdvp;
4911 a.a_fvpp = fvpp;
4912 a.a_fcnp = fcnp;
4913 a.a_fvap = fvap;
4914
4915 a.a_tdvp = tdvp;
4916 a.a_tvpp = tvpp;
4917 a.a_tcnp = tcnp;
4918 a.a_tvap = tvap;
4919
4920 a.a_flags = flags;
4921 a.a_context = ctx;
4922 a.a_rename_authorizer = vn_authorize_rename;
4923 a.a_reserved = NULL;
4924
4925 /* do the rename of the main file. */
4926 _err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4927 DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
4928
4929 if (_err == 0) {
4930 if (*tvpp && *tvpp != *fvpp) {
4931 vnode_setneedinactive(*tvpp);
4932 }
4933 }
4934
4935 /* Wrote at least one directory. If transplanted a dir, also changed link counts */
4936 if (_err == 0 && *fvpp != *tvpp) {
4937 if (!*fvpp) {
4938 panic("No fvpp after compound rename?");
4939 }
4940
4941 events = NOTE_WRITE;
4942 if (vnode_isdir(*fvpp)) {
4943 /* Link count on dir changed only if we are moving a dir and...
4944 * --Moved to new dir, not overwriting there
4945 * --Kept in same dir and DID overwrite
4946 */
4947 if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4948 events |= NOTE_LINK;
4949 }
4950 }
4951
4952 lock_vnode_and_post(fdvp, events);
4953 if (fdvp != tdvp) {
4954 lock_vnode_and_post(tdvp, events);
4955 }
4956
4957 /* If you're replacing the target, post a deletion for it */
4958 if (*tvpp) {
4959 lock_vnode_and_post(*tvpp, NOTE_DELETE);
4960 }
4961
4962 lock_vnode_and_post(*fvpp, NOTE_RENAME);
4963 }
4964
4965 if (no_fvp) {
4966 lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4967 }
4968 if (no_tvp && *tvpp != NULLVP) {
4969 lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4970 }
4971
4972 if (_err && _err != EKEEPLOOKING) {
4973 if (*fvpp) {
4974 vnode_put(*fvpp);
4975 *fvpp = NULLVP;
4976 }
4977 if (*tvpp) {
4978 vnode_put(*tvpp);
4979 *tvpp = NULLVP;
4980 }
4981 }
4982
4983 return _err;
4984 }
4985
4986 int
vn_mkdir(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)4987 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4988 struct vnode_attr *vap, vfs_context_t ctx)
4989 {
4990 if (ndp->ni_cnd.cn_nameiop != CREATE) {
4991 panic("Non-CREATE nameiop in vn_mkdir()?");
4992 }
4993
4994 if (vnode_compound_mkdir_available(dvp)) {
4995 return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4996 } else {
4997 return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4998 }
4999 }
5000
5001 #if 0
5002 /*
5003 *#
5004 *#% mkdir dvp L U U
5005 *#% mkdir vpp - L -
5006 *#
5007 */
5008 struct vnop_mkdir_args {
5009 struct vnodeop_desc *a_desc;
5010 vnode_t a_dvp;
5011 vnode_t *a_vpp;
5012 struct componentname *a_cnp;
5013 struct vnode_attr *a_vap;
5014 vfs_context_t a_context;
5015 };
5016 #endif /* 0*/
5017 errno_t
VNOP_MKDIR(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)5018 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
5019 struct vnode_attr *vap, vfs_context_t ctx)
5020 {
5021 int _err;
5022 struct vnop_mkdir_args a;
5023
5024 a.a_desc = &vnop_mkdir_desc;
5025 a.a_dvp = dvp;
5026 a.a_vpp = vpp;
5027 a.a_cnp = cnp;
5028 a.a_vap = vap;
5029 a.a_context = ctx;
5030
5031 _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
5032 if (_err == 0 && *vpp) {
5033 DTRACE_FSINFO(mkdir, vnode_t, *vpp);
5034 }
5035 #if CONFIG_APPLEDOUBLE
5036 if (_err == 0 && !NATIVE_XATTR(dvp)) {
5037 /*
5038 * Remove stale Apple Double file (if any).
5039 */
5040 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
5041 }
5042 #endif /* CONFIG_APPLEDOUBLE */
5043
5044 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5045
5046 return _err;
5047 }
5048
5049 int
VNOP_COMPOUND_MKDIR(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)5050 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
5051 struct vnode_attr *vap, vfs_context_t ctx)
5052 {
5053 int _err;
5054 struct vnop_compound_mkdir_args a;
5055
5056 a.a_desc = &vnop_compound_mkdir_desc;
5057 a.a_dvp = dvp;
5058 a.a_vpp = vpp;
5059 a.a_cnp = &ndp->ni_cnd;
5060 a.a_vap = vap;
5061 a.a_flags = 0;
5062 a.a_context = ctx;
5063 #if 0
5064 a.a_mkdir_authorizer = vn_authorize_mkdir;
5065 #endif /* 0 */
5066 a.a_reserved = NULL;
5067
5068 _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
5069 if (_err == 0 && *vpp) {
5070 DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
5071 }
5072 #if CONFIG_APPLEDOUBLE
5073 if (_err == 0 && !NATIVE_XATTR(dvp)) {
5074 /*
5075 * Remove stale Apple Double file (if any).
5076 */
5077 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
5078 }
5079 #endif /* CONFIG_APPLEDOUBLE */
5080
5081 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5082
5083 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
5084 if (*vpp && _err && _err != EKEEPLOOKING) {
5085 vnode_put(*vpp);
5086 *vpp = NULLVP;
5087 }
5088
5089 return _err;
5090 }
5091
5092 int
vn_rmdir(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)5093 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
5094 {
5095 if (vnode_compound_rmdir_available(dvp)) {
5096 return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
5097 } else {
5098 if (*vpp == NULLVP) {
5099 panic("NULL vp, but not a compound VNOP?");
5100 }
5101 if (vap != NULL) {
5102 panic("Non-NULL vap, but not a compound VNOP?");
5103 }
5104 return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
5105 }
5106 }
5107
5108 #if 0
5109 /*
5110 *#
5111 *#% rmdir dvp L U U
5112 *#% rmdir vp L U U
5113 *#
5114 */
5115 struct vnop_rmdir_args {
5116 struct vnodeop_desc *a_desc;
5117 vnode_t a_dvp;
5118 vnode_t a_vp;
5119 struct componentname *a_cnp;
5120 vfs_context_t a_context;
5121 };
5122
5123 #endif /* 0*/
5124 errno_t
VNOP_RMDIR(struct vnode * dvp,struct vnode * vp,struct componentname * cnp,vfs_context_t ctx)5125 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
5126 {
5127 int _err;
5128 struct vnop_rmdir_args a;
5129
5130 a.a_desc = &vnop_rmdir_desc;
5131 a.a_dvp = dvp;
5132 a.a_vp = vp;
5133 a.a_cnp = cnp;
5134 a.a_context = ctx;
5135
5136 _err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
5137 DTRACE_FSINFO(rmdir, vnode_t, vp);
5138
5139 if (_err == 0) {
5140 vnode_setneedinactive(vp);
5141 #if CONFIG_APPLEDOUBLE
5142 if (!(NATIVE_XATTR(dvp))) {
5143 /*
5144 * Remove any associated extended attribute file (._ AppleDouble file).
5145 */
5146 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
5147 }
5148 #endif
5149 }
5150
5151 /* If you delete a dir, it loses its "." reference --> NOTE_LINK */
5152 post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
5153 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5154
5155 return _err;
5156 }
5157
5158 int
VNOP_COMPOUND_RMDIR(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)5159 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
5160 struct vnode_attr *vap, vfs_context_t ctx)
5161 {
5162 int _err;
5163 struct vnop_compound_rmdir_args a;
5164 int no_vp;
5165
5166 a.a_desc = &vnop_mkdir_desc;
5167 a.a_dvp = dvp;
5168 a.a_vpp = vpp;
5169 a.a_cnp = &ndp->ni_cnd;
5170 a.a_vap = vap;
5171 a.a_flags = 0;
5172 a.a_context = ctx;
5173 a.a_rmdir_authorizer = vn_authorize_rmdir;
5174 a.a_reserved = NULL;
5175
5176 no_vp = (*vpp == NULLVP);
5177
5178 _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
5179 if (_err == 0 && *vpp) {
5180 DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
5181 }
5182 #if CONFIG_APPLEDOUBLE
5183 if (_err == 0 && !NATIVE_XATTR(dvp)) {
5184 /*
5185 * Remove stale Apple Double file (if any).
5186 */
5187 xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
5188 }
5189 #endif
5190
5191 if (*vpp) {
5192 post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
5193 }
5194 post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5195
5196 if (no_vp) {
5197 lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
5198
5199 #if 0 /* Removing orphaned ._ files requires a vp.... */
5200 if (*vpp && _err && _err != EKEEPLOOKING) {
5201 vnode_put(*vpp);
5202 *vpp = NULLVP;
5203 }
5204 #endif /* 0 */
5205 }
5206
5207 return _err;
5208 }
5209
5210 #if CONFIG_APPLEDOUBLE
5211 /*
5212 * Remove a ._ AppleDouble file
5213 */
5214 #define AD_STALE_SECS (180)
5215 static void
xattrfile_remove(vnode_t dvp,const char * basename,vfs_context_t ctx,int force)5216 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
5217 {
5218 vnode_t xvp;
5219 struct nameidata nd;
5220 char smallname[64];
5221 char *filename = NULL;
5222 size_t alloc_len;
5223 size_t copy_len;
5224
5225 if ((basename == NULL) || (basename[0] == '\0') ||
5226 (basename[0] == '.' && basename[1] == '_')) {
5227 return;
5228 }
5229 filename = &smallname[0];
5230 alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
5231 if (alloc_len >= sizeof(smallname)) {
5232 alloc_len++; /* snprintf result doesn't include '\0' */
5233 filename = kalloc_data(alloc_len, Z_WAITOK);
5234 copy_len = snprintf(filename, alloc_len, "._%s", basename);
5235 }
5236 NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
5237 CAST_USER_ADDR_T(filename), ctx);
5238 nd.ni_dvp = dvp;
5239 if (namei(&nd) != 0) {
5240 goto out2;
5241 }
5242
5243 xvp = nd.ni_vp;
5244 dvp = nd.ni_dvp;
5245 nameidone(&nd);
5246 if (xvp->v_type != VREG) {
5247 goto out1;
5248 }
5249
5250 /*
5251 * When creating a new object and a "._" file already
5252 * exists, check to see if it's a stale "._" file. These are
5253 * typically AppleDouble (AD) files generated via XNU's
5254 * VFS compatibility shims for storing XATTRs and streams
5255 * on filesystems that do not support them natively.
5256 */
5257 if (!force) {
5258 struct vnode_attr va;
5259
5260 VATTR_INIT(&va);
5261 VATTR_WANTED(&va, va_data_size);
5262 VATTR_WANTED(&va, va_modify_time);
5263 VATTR_WANTED(&va, va_change_time);
5264
5265 if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
5266 VATTR_IS_SUPPORTED(&va, va_data_size) &&
5267 va.va_data_size != 0) {
5268 struct timeval tv_compare = {};
5269 struct timeval tv_now = {};
5270
5271 /*
5272 * If the file exists (and has non-zero size), then use the newer of
5273 * chgtime / modtime to compare against present time. Note that setting XATTRs or updating
5274 * streams through the compatibility interfaces may not trigger chgtime to be updated, so
5275 * checking either modtime or chgtime is useful.
5276 */
5277 if (VATTR_IS_SUPPORTED(&va, va_modify_time) && (va.va_modify_time.tv_sec)) {
5278 if (VATTR_IS_SUPPORTED(&va, va_change_time) && (va.va_change_time.tv_sec)) {
5279 tv_compare.tv_sec = va.va_change_time.tv_sec;
5280 if (tv_compare.tv_sec < va.va_modify_time.tv_sec) {
5281 tv_compare.tv_sec = va.va_modify_time.tv_sec;
5282 }
5283 } else {
5284 /* fall back to mod-time alone if chgtime not supported or set to 0 */
5285 tv_compare.tv_sec = va.va_modify_time.tv_sec;
5286 }
5287 }
5288
5289 /* Now, we have a time to compare against, compare against AD_STALE_SEC */
5290 microtime(&tv_now);
5291 if ((tv_compare.tv_sec > 0) &&
5292 (tv_now.tv_sec > tv_compare.tv_sec) &&
5293 ((tv_now.tv_sec - tv_compare.tv_sec) > AD_STALE_SECS)) {
5294 force = 1; /* must be stale */
5295 }
5296 }
5297 }
5298
5299 if (force) {
5300 int error;
5301
5302 error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
5303 if (error == 0) {
5304 vnode_setneedinactive(xvp);
5305 }
5306
5307 post_event_if_success(xvp, error, NOTE_DELETE);
5308 post_event_if_success(dvp, error, NOTE_WRITE);
5309 }
5310
5311 out1:
5312 vnode_put(dvp);
5313 vnode_put(xvp);
5314 out2:
5315 if (filename && filename != &smallname[0]) {
5316 kfree_data(filename, alloc_len);
5317 }
5318 }
5319
5320 /*
5321 * Shadow uid/gid/mod to a ._ AppleDouble file
5322 */
5323 __attribute__((noinline))
5324 static void
xattrfile_setattr(vnode_t dvp,const char * basename,struct vnode_attr * vap,vfs_context_t ctx)5325 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
5326 vfs_context_t ctx)
5327 {
5328 vnode_t xvp;
5329 struct nameidata nd;
5330 char smallname[64];
5331 char *filename = NULL;
5332 size_t alloc_len;
5333 size_t copy_len;
5334
5335 if ((dvp == NULLVP) ||
5336 (basename == NULL) || (basename[0] == '\0') ||
5337 (basename[0] == '.' && basename[1] == '_')) {
5338 return;
5339 }
5340 filename = &smallname[0];
5341 alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
5342 if (alloc_len >= sizeof(smallname)) {
5343 alloc_len++; /* snprintf result doesn't include '\0' */
5344 filename = kalloc_data(alloc_len, Z_WAITOK);
5345 copy_len = snprintf(filename, alloc_len, "._%s", basename);
5346 }
5347 NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
5348 CAST_USER_ADDR_T(filename), ctx);
5349 nd.ni_dvp = dvp;
5350 if (namei(&nd) != 0) {
5351 goto out2;
5352 }
5353
5354 xvp = nd.ni_vp;
5355 nameidone(&nd);
5356
5357 if (xvp->v_type == VREG) {
5358 struct vnop_setattr_args a;
5359
5360 a.a_desc = &vnop_setattr_desc;
5361 a.a_vp = xvp;
5362 a.a_vap = vap;
5363 a.a_context = ctx;
5364
5365 (void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
5366 }
5367
5368 vnode_put(xvp);
5369 out2:
5370 if (filename && filename != &smallname[0]) {
5371 kfree_data(filename, alloc_len);
5372 }
5373 }
5374 #endif /* CONFIG_APPLEDOUBLE */
5375
5376 #if 0
5377 /*
5378 *#
5379 *#% symlink dvp L U U
5380 *#% symlink vpp - U -
5381 *#
5382 */
5383 struct vnop_symlink_args {
5384 struct vnodeop_desc *a_desc;
5385 vnode_t a_dvp;
5386 vnode_t *a_vpp;
5387 struct componentname *a_cnp;
5388 struct vnode_attr *a_vap;
5389 char *a_target;
5390 vfs_context_t a_context;
5391 };
5392
5393 #endif /* 0*/
5394 errno_t
VNOP_SYMLINK(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct vnode_attr * vap,char * target,vfs_context_t ctx)5395 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
5396 struct vnode_attr *vap, char *target, vfs_context_t ctx)
5397 {
5398 int _err;
5399 struct vnop_symlink_args a;
5400
5401 a.a_desc = &vnop_symlink_desc;
5402 a.a_dvp = dvp;
5403 a.a_vpp = vpp;
5404 a.a_cnp = cnp;
5405 a.a_vap = vap;
5406 a.a_target = target;
5407 a.a_context = ctx;
5408
5409 _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
5410 DTRACE_FSINFO(symlink, vnode_t, dvp);
5411 #if CONFIG_APPLEDOUBLE
5412 if (_err == 0 && !NATIVE_XATTR(dvp)) {
5413 /*
5414 * Remove stale Apple Double file (if any). Posts its own knotes
5415 */
5416 xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
5417 }
5418 #endif /* CONFIG_APPLEDOUBLE */
5419
5420 post_event_if_success(dvp, _err, NOTE_WRITE);
5421
5422 return _err;
5423 }
5424
5425 #if 0
5426 /*
5427 *#
5428 *#% readdir vp L L L
5429 *#
5430 */
5431 struct vnop_readdir_args {
5432 struct vnodeop_desc *a_desc;
5433 vnode_t a_vp;
5434 struct uio *a_uio;
5435 int a_flags;
5436 int *a_eofflag;
5437 int *a_numdirent;
5438 vfs_context_t a_context;
5439 };
5440
5441 #endif /* 0*/
5442 errno_t
VNOP_READDIR(struct vnode * vp,struct uio * uio,int flags,int * eofflag,int * numdirent,vfs_context_t ctx)5443 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
5444 int *numdirent, vfs_context_t ctx)
5445 {
5446 int _err;
5447 struct vnop_readdir_args a;
5448 #if CONFIG_DTRACE
5449 user_ssize_t resid = uio_resid(uio);
5450 #endif
5451
5452 a.a_desc = &vnop_readdir_desc;
5453 a.a_vp = vp;
5454 a.a_uio = uio;
5455 a.a_flags = flags;
5456 a.a_eofflag = eofflag;
5457 a.a_numdirent = numdirent;
5458 a.a_context = ctx;
5459
5460 _err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
5461 DTRACE_FSINFO_IO(readdir,
5462 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5463
5464 return _err;
5465 }
5466
5467 #if 0
5468 /*
5469 *#
5470 *#% readdirattr vp L L L
5471 *#
5472 */
5473 struct vnop_readdirattr_args {
5474 struct vnodeop_desc *a_desc;
5475 vnode_t a_vp;
5476 struct attrlist *a_alist;
5477 struct uio *a_uio;
5478 uint32_t a_maxcount;
5479 uint32_t a_options;
5480 uint32_t *a_newstate;
5481 int *a_eofflag;
5482 uint32_t *a_actualcount;
5483 vfs_context_t a_context;
5484 };
5485
5486 #endif /* 0*/
5487 errno_t
VNOP_READDIRATTR(struct vnode * vp,struct attrlist * alist,struct uio * uio,uint32_t maxcount,uint32_t options,uint32_t * newstate,int * eofflag,uint32_t * actualcount,vfs_context_t ctx)5488 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
5489 uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
5490 {
5491 int _err;
5492 struct vnop_readdirattr_args a;
5493 #if CONFIG_DTRACE
5494 user_ssize_t resid = uio_resid(uio);
5495 #endif
5496
5497 a.a_desc = &vnop_readdirattr_desc;
5498 a.a_vp = vp;
5499 a.a_alist = alist;
5500 a.a_uio = uio;
5501 a.a_maxcount = maxcount;
5502 a.a_options = options;
5503 a.a_newstate = newstate;
5504 a.a_eofflag = eofflag;
5505 a.a_actualcount = actualcount;
5506 a.a_context = ctx;
5507
5508 _err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
5509 DTRACE_FSINFO_IO(readdirattr,
5510 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5511
5512 return _err;
5513 }
5514
5515 #if 0
5516 struct vnop_getttrlistbulk_args {
5517 struct vnodeop_desc *a_desc;
5518 vnode_t a_vp;
5519 struct attrlist *a_alist;
5520 struct vnode_attr *a_vap;
5521 struct uio *a_uio;
5522 void *a_private
5523 uint64_t a_options;
5524 int *a_eofflag;
5525 uint32_t *a_actualcount;
5526 vfs_context_t a_context;
5527 };
5528 #endif /* 0*/
5529 errno_t
VNOP_GETATTRLISTBULK(struct vnode * vp,struct attrlist * alist,struct vnode_attr * vap,struct uio * uio,void * private,uint64_t options,int32_t * eofflag,int32_t * actualcount,vfs_context_t ctx)5530 VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist,
5531 struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options,
5532 int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx)
5533 {
5534 int _err;
5535 struct vnop_getattrlistbulk_args a;
5536 #if CONFIG_DTRACE
5537 user_ssize_t resid = uio_resid(uio);
5538 #endif
5539
5540 a.a_desc = &vnop_getattrlistbulk_desc;
5541 a.a_vp = vp;
5542 a.a_alist = alist;
5543 a.a_vap = vap;
5544 a.a_uio = uio;
5545 a.a_private = private;
5546 a.a_options = options;
5547 a.a_eofflag = eofflag;
5548 a.a_actualcount = actualcount;
5549 a.a_context = ctx;
5550
5551 _err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a);
5552 DTRACE_FSINFO_IO(getattrlistbulk,
5553 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5554
5555 return _err;
5556 }
5557
5558 #if 0
5559 /*
5560 *#
5561 *#% readlink vp L L L
5562 *#
5563 */
5564 struct vnop_readlink_args {
5565 struct vnodeop_desc *a_desc;
5566 vnode_t a_vp;
5567 struct uio *a_uio;
5568 vfs_context_t a_context;
5569 };
5570 #endif /* 0 */
5571
5572 /*
5573 * Returns: 0 Success
5574 * lock_fsnode:ENOENT No such file or directory [only for VFS
5575 * that is not thread safe & vnode is
5576 * currently being/has been terminated]
5577 * <vfs_readlink>:EINVAL
5578 * <vfs_readlink>:???
5579 *
5580 * Note: The return codes from the underlying VFS's readlink routine
5581 * can't be fully enumerated here, since third party VFS authors
5582 * may not limit their error returns to the ones documented here,
5583 * even though this may result in some programs functioning
5584 * incorrectly.
5585 *
5586 * The return codes documented above are those which may currently
5587 * be returned by HFS from hfs_vnop_readlink, not including
5588 * additional error code which may be propagated from underlying
5589 * routines.
5590 */
5591 errno_t
VNOP_READLINK(struct vnode * vp,struct uio * uio,vfs_context_t ctx)5592 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
5593 {
5594 int _err;
5595 struct vnop_readlink_args a;
5596 #if CONFIG_DTRACE
5597 user_ssize_t resid = uio_resid(uio);
5598 #endif
5599 a.a_desc = &vnop_readlink_desc;
5600 a.a_vp = vp;
5601 a.a_uio = uio;
5602 a.a_context = ctx;
5603
5604 _err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
5605 DTRACE_FSINFO_IO(readlink,
5606 vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5607
5608 return _err;
5609 }
5610
5611 #if 0
5612 /*
5613 *#
5614 *#% inactive vp L U U
5615 *#
5616 */
5617 struct vnop_inactive_args {
5618 struct vnodeop_desc *a_desc;
5619 vnode_t a_vp;
5620 vfs_context_t a_context;
5621 };
5622 #endif /* 0*/
5623 errno_t
VNOP_INACTIVE(struct vnode * vp,vfs_context_t ctx)5624 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
5625 {
5626 int _err;
5627 struct vnop_inactive_args a;
5628
5629 a.a_desc = &vnop_inactive_desc;
5630 a.a_vp = vp;
5631 a.a_context = ctx;
5632
5633 _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
5634 DTRACE_FSINFO(inactive, vnode_t, vp);
5635
5636 #if NAMEDSTREAMS
5637 /* For file systems that do not support namedstream natively, mark
5638 * the shadow stream file vnode to be recycled as soon as the last
5639 * reference goes away. To avoid re-entering reclaim code, do not
5640 * call recycle on terminating namedstream vnodes.
5641 */
5642 if (vnode_isnamedstream(vp) &&
5643 (vp->v_parent != NULLVP) &&
5644 vnode_isshadow(vp) &&
5645 ((vp->v_lflag & VL_TERMINATE) == 0)) {
5646 vnode_recycle(vp);
5647 }
5648 #endif
5649
5650 return _err;
5651 }
5652
5653
5654 #if 0
5655 /*
5656 *#
5657 *#% reclaim vp U U U
5658 *#
5659 */
5660 struct vnop_reclaim_args {
5661 struct vnodeop_desc *a_desc;
5662 vnode_t a_vp;
5663 vfs_context_t a_context;
5664 };
5665 #endif /* 0*/
5666 errno_t
VNOP_RECLAIM(struct vnode * vp,vfs_context_t ctx)5667 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
5668 {
5669 int _err;
5670 struct vnop_reclaim_args a;
5671
5672 a.a_desc = &vnop_reclaim_desc;
5673 a.a_vp = vp;
5674 a.a_context = ctx;
5675
5676 _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
5677 DTRACE_FSINFO(reclaim, vnode_t, vp);
5678
5679 return _err;
5680 }
5681
5682
5683 /*
5684 * Returns: 0 Success
5685 * lock_fsnode:ENOENT No such file or directory [only for VFS
5686 * that is not thread safe & vnode is
5687 * currently being/has been terminated]
5688 * <vnop_pathconf_desc>:??? [per FS implementation specific]
5689 */
5690 #if 0
5691 /*
5692 *#
5693 *#% pathconf vp L L L
5694 *#
5695 */
5696 struct vnop_pathconf_args {
5697 struct vnodeop_desc *a_desc;
5698 vnode_t a_vp;
5699 int a_name;
5700 int32_t *a_retval;
5701 vfs_context_t a_context;
5702 };
5703 #endif /* 0*/
5704 errno_t
VNOP_PATHCONF(struct vnode * vp,int name,int32_t * retval,vfs_context_t ctx)5705 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
5706 {
5707 int _err;
5708 struct vnop_pathconf_args a;
5709
5710 a.a_desc = &vnop_pathconf_desc;
5711 a.a_vp = vp;
5712 a.a_name = name;
5713 a.a_retval = retval;
5714 a.a_context = ctx;
5715
5716 _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
5717 DTRACE_FSINFO(pathconf, vnode_t, vp);
5718
5719 return _err;
5720 }
5721
5722 /*
5723 * Returns: 0 Success
5724 * err_advlock:ENOTSUP
5725 * lf_advlock:???
5726 * <vnop_advlock_desc>:???
5727 *
5728 * Notes: VFS implementations of advisory locking using calls through
5729 * <vnop_advlock_desc> because lock enforcement does not occur
5730 * locally should try to limit themselves to the return codes
5731 * documented above for lf_advlock and err_advlock.
5732 */
5733 #if 0
5734 /*
5735 *#
5736 *#% advlock vp U U U
5737 *#
5738 */
5739 struct vnop_advlock_args {
5740 struct vnodeop_desc *a_desc;
5741 vnode_t a_vp;
5742 caddr_t a_id;
5743 int a_op;
5744 struct flock *a_fl;
5745 int a_flags;
5746 vfs_context_t a_context;
5747 };
5748 #endif /* 0*/
5749 errno_t
VNOP_ADVLOCK(struct vnode * vp,caddr_t id,int op,struct flock * fl,int flags,vfs_context_t ctx,struct timespec * timeout)5750 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
5751 {
5752 int _err;
5753 struct vnop_advlock_args a;
5754
5755 a.a_desc = &vnop_advlock_desc;
5756 a.a_vp = vp;
5757 a.a_id = id;
5758 a.a_op = op;
5759 a.a_fl = fl;
5760 a.a_flags = flags;
5761 a.a_context = ctx;
5762 a.a_timeout = timeout;
5763
5764 /* Disallow advisory locking on non-seekable vnodes */
5765 if (vnode_isfifo(vp)) {
5766 _err = err_advlock(&a);
5767 } else {
5768 if ((vp->v_flag & VLOCKLOCAL)) {
5769 /* Advisory locking done at this layer */
5770 _err = lf_advlock(&a);
5771 } else if (flags & F_OFD_LOCK) {
5772 /* Non-local locking doesn't work for OFD locks */
5773 _err = err_advlock(&a);
5774 } else if (op == F_TRANSFER) {
5775 /* Non-local locking doesn't have F_TRANSFER */
5776 _err = err_advlock(&a);
5777 } else {
5778 /* Advisory locking done by underlying filesystem */
5779 _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
5780 }
5781 DTRACE_FSINFO(advlock, vnode_t, vp);
5782 if (op == F_UNLCK &&
5783 (flags & (F_FLOCK | F_OFD_LOCK)) != 0) {
5784 post_event_if_success(vp, _err, NOTE_FUNLOCK);
5785 }
5786 }
5787
5788 return _err;
5789 }
5790
5791
5792
5793 #if 0
5794 /*
5795 *#
5796 *#% allocate vp L L L
5797 *#
5798 */
5799 struct vnop_allocate_args {
5800 struct vnodeop_desc *a_desc;
5801 vnode_t a_vp;
5802 off_t a_length;
5803 u_int32_t a_flags;
5804 off_t *a_bytesallocated;
5805 off_t a_offset;
5806 vfs_context_t a_context;
5807 };
5808
5809 #endif /* 0*/
5810 errno_t
VNOP_ALLOCATE(struct vnode * vp,off_t length,u_int32_t flags,off_t * bytesallocated,off_t offset,vfs_context_t ctx)5811 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
5812 {
5813 int _err;
5814 struct vnop_allocate_args a;
5815
5816 a.a_desc = &vnop_allocate_desc;
5817 a.a_vp = vp;
5818 a.a_length = length;
5819 a.a_flags = flags;
5820 a.a_bytesallocated = bytesallocated;
5821 a.a_offset = offset;
5822 a.a_context = ctx;
5823
5824 _err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
5825 DTRACE_FSINFO(allocate, vnode_t, vp);
5826 #if CONFIG_FSE
5827 if (_err == 0) {
5828 add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
5829 }
5830 #endif
5831
5832 return _err;
5833 }
5834
5835 #if 0
5836 /*
5837 *#
5838 *#% pagein vp = = =
5839 *#
5840 */
5841 struct vnop_pagein_args {
5842 struct vnodeop_desc *a_desc;
5843 vnode_t a_vp;
5844 upl_t a_pl;
5845 upl_offset_t a_pl_offset;
5846 off_t a_f_offset;
5847 size_t a_size;
5848 int a_flags;
5849 vfs_context_t a_context;
5850 };
5851 #endif /* 0*/
5852 errno_t
VNOP_PAGEIN(struct vnode * vp,upl_t pl,upl_offset_t pl_offset,off_t f_offset,size_t size,int flags,vfs_context_t ctx)5853 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5854 {
5855 int _err;
5856 struct vnop_pagein_args a;
5857
5858 a.a_desc = &vnop_pagein_desc;
5859 a.a_vp = vp;
5860 a.a_pl = pl;
5861 a.a_pl_offset = pl_offset;
5862 a.a_f_offset = f_offset;
5863 a.a_size = size;
5864 a.a_flags = flags;
5865 a.a_context = ctx;
5866
5867 _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
5868 DTRACE_FSINFO(pagein, vnode_t, vp);
5869
5870 return _err;
5871 }
5872
5873 #if 0
5874 /*
5875 *#
5876 *#% pageout vp = = =
5877 *#
5878 */
5879 struct vnop_pageout_args {
5880 struct vnodeop_desc *a_desc;
5881 vnode_t a_vp;
5882 upl_t a_pl;
5883 upl_offset_t a_pl_offset;
5884 off_t a_f_offset;
5885 size_t a_size;
5886 int a_flags;
5887 vfs_context_t a_context;
5888 };
5889
5890 #endif /* 0*/
5891 errno_t
VNOP_PAGEOUT(struct vnode * vp,upl_t pl,upl_offset_t pl_offset,off_t f_offset,size_t size,int flags,vfs_context_t ctx)5892 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5893 {
5894 int _err;
5895 struct vnop_pageout_args a;
5896
5897 a.a_desc = &vnop_pageout_desc;
5898 a.a_vp = vp;
5899 a.a_pl = pl;
5900 a.a_pl_offset = pl_offset;
5901 a.a_f_offset = f_offset;
5902 a.a_size = size;
5903 a.a_flags = flags;
5904 a.a_context = ctx;
5905
5906 _err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
5907 DTRACE_FSINFO(pageout, vnode_t, vp);
5908
5909 post_event_if_success(vp, _err, NOTE_WRITE);
5910
5911 return _err;
5912 }
5913
5914 int
vn_remove(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,struct vnode_attr * vap,vfs_context_t ctx)5915 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5916 {
5917 if (vnode_compound_remove_available(dvp)) {
5918 return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5919 } else {
5920 return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5921 }
5922 }
5923
5924 #if CONFIG_SEARCHFS
5925
5926 #if 0
5927 /*
5928 *#
5929 *#% searchfs vp L L L
5930 *#
5931 */
5932 struct vnop_searchfs_args {
5933 struct vnodeop_desc *a_desc;
5934 vnode_t a_vp;
5935 void *a_searchparams1;
5936 void *a_searchparams2;
5937 struct attrlist *a_searchattrs;
5938 uint32_t a_maxmatches;
5939 struct timeval *a_timelimit;
5940 struct attrlist *a_returnattrs;
5941 uint32_t *a_nummatches;
5942 uint32_t a_scriptcode;
5943 uint32_t a_options;
5944 struct uio *a_uio;
5945 struct searchstate *a_searchstate;
5946 vfs_context_t a_context;
5947 };
5948
5949 #endif /* 0*/
5950 errno_t
VNOP_SEARCHFS(struct vnode * vp,void * searchparams1,void * searchparams2,struct attrlist * searchattrs,uint32_t maxmatches,struct timeval * timelimit,struct attrlist * returnattrs,uint32_t * nummatches,uint32_t scriptcode,uint32_t options,struct uio * uio,struct searchstate * searchstate,vfs_context_t ctx)5951 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
5952 {
5953 int _err;
5954 struct vnop_searchfs_args a;
5955
5956 a.a_desc = &vnop_searchfs_desc;
5957 a.a_vp = vp;
5958 a.a_searchparams1 = searchparams1;
5959 a.a_searchparams2 = searchparams2;
5960 a.a_searchattrs = searchattrs;
5961 a.a_maxmatches = maxmatches;
5962 a.a_timelimit = timelimit;
5963 a.a_returnattrs = returnattrs;
5964 a.a_nummatches = nummatches;
5965 a.a_scriptcode = scriptcode;
5966 a.a_options = options;
5967 a.a_uio = uio;
5968 a.a_searchstate = searchstate;
5969 a.a_context = ctx;
5970
5971 _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5972 DTRACE_FSINFO(searchfs, vnode_t, vp);
5973
5974 return _err;
5975 }
5976 #endif /* CONFIG_SEARCHFS */
5977
5978 #if 0
5979 /*
5980 *#
5981 *#% copyfile fvp U U U
5982 *#% copyfile tdvp L U U
5983 *#% copyfile tvp X U U
5984 *#
5985 */
5986 struct vnop_copyfile_args {
5987 struct vnodeop_desc *a_desc;
5988 vnode_t a_fvp;
5989 vnode_t a_tdvp;
5990 vnode_t a_tvp;
5991 struct componentname *a_tcnp;
5992 int a_mode;
5993 int a_flags;
5994 vfs_context_t a_context;
5995 };
5996 #endif /* 0*/
5997 errno_t
VNOP_COPYFILE(struct vnode * fvp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,int mode,int flags,vfs_context_t ctx)5998 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5999 int mode, int flags, vfs_context_t ctx)
6000 {
6001 int _err;
6002 struct vnop_copyfile_args a;
6003 a.a_desc = &vnop_copyfile_desc;
6004 a.a_fvp = fvp;
6005 a.a_tdvp = tdvp;
6006 a.a_tvp = tvp;
6007 a.a_tcnp = tcnp;
6008 a.a_mode = mode;
6009 a.a_flags = flags;
6010 a.a_context = ctx;
6011 _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
6012 DTRACE_FSINFO(copyfile, vnode_t, fvp);
6013 return _err;
6014 }
6015
6016 #if 0
6017 struct vnop_clonefile_args {
6018 struct vnodeop_desc *a_desc;
6019 vnode_t a_fvp;
6020 vnode_t a_dvp;
6021 vnode_t *a_vpp;
6022 struct componentname *a_cnp;
6023 struct vnode_attr *a_vap;
6024 uint32_t a_flags;
6025 vfs_context_t a_context;
6026 int (*a_dir_clone_authorizer)( /* Authorization callback */
6027 struct vnode_attr *vap, /* attribute to be authorized */
6028 kauth_action_t action, /* action for which attribute is to be authorized */
6029 struct vnode_attr *dvap, /* target directory attributes */
6030 vnode_t sdvp, /* source directory vnode pointer (optional) */
6031 mount_t mp, /* mount point of filesystem */
6032 dir_clone_authorizer_op_t vattr_op, /* specific operation requested : setup, authorization or cleanup */
6033 uint32_t flags; /* value passed in a_flags to the VNOP */
6034 vfs_context_t ctx, /* As passed to VNOP */
6035 void *reserved); /* Always NULL */
6036 void *a_reserved; /* Currently unused */
6037 };
6038 #endif /* 0 */
6039
6040 errno_t
VNOP_CLONEFILE(vnode_t fvp,vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,uint32_t flags,vfs_context_t ctx)6041 VNOP_CLONEFILE(vnode_t fvp, vnode_t dvp, vnode_t *vpp,
6042 struct componentname *cnp, struct vnode_attr *vap, uint32_t flags,
6043 vfs_context_t ctx)
6044 {
6045 int _err;
6046 struct vnop_clonefile_args a;
6047 a.a_desc = &vnop_clonefile_desc;
6048 a.a_fvp = fvp;
6049 a.a_dvp = dvp;
6050 a.a_vpp = vpp;
6051 a.a_cnp = cnp;
6052 a.a_vap = vap;
6053 a.a_flags = flags;
6054 a.a_context = ctx;
6055
6056 if (vnode_vtype(fvp) == VDIR) {
6057 a.a_dir_clone_authorizer = vnode_attr_authorize_dir_clone;
6058 } else {
6059 a.a_dir_clone_authorizer = NULL;
6060 }
6061
6062 _err = (*dvp->v_op[vnop_clonefile_desc.vdesc_offset])(&a);
6063
6064 if (_err == 0 && *vpp) {
6065 DTRACE_FSINFO(clonefile, vnode_t, *vpp);
6066 if (kdebug_enable) {
6067 kdebug_lookup(*vpp, cnp);
6068 }
6069 }
6070
6071 post_event_if_success(dvp, _err, NOTE_WRITE);
6072
6073 return _err;
6074 }
6075
6076 errno_t
VNOP_GETXATTR(vnode_t vp,const char * name,uio_t uio,size_t * size,int options,vfs_context_t ctx)6077 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
6078 {
6079 struct vnop_getxattr_args a;
6080 int error;
6081
6082 a.a_desc = &vnop_getxattr_desc;
6083 a.a_vp = vp;
6084 a.a_name = name;
6085 a.a_uio = uio;
6086 a.a_size = size;
6087 a.a_options = options;
6088 a.a_context = ctx;
6089
6090 error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
6091 DTRACE_FSINFO(getxattr, vnode_t, vp);
6092
6093 return error;
6094 }
6095
6096 errno_t
VNOP_SETXATTR(vnode_t vp,const char * name,uio_t uio,int options,vfs_context_t ctx)6097 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
6098 {
6099 struct vnop_setxattr_args a;
6100 int error;
6101
6102 a.a_desc = &vnop_setxattr_desc;
6103 a.a_vp = vp;
6104 a.a_name = name;
6105 a.a_uio = uio;
6106 a.a_options = options;
6107 a.a_context = ctx;
6108
6109 error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
6110 DTRACE_FSINFO(setxattr, vnode_t, vp);
6111
6112 if (error == 0) {
6113 vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
6114 }
6115
6116 post_event_if_success(vp, error, NOTE_ATTRIB);
6117
6118 return error;
6119 }
6120
6121 errno_t
VNOP_REMOVEXATTR(vnode_t vp,const char * name,int options,vfs_context_t ctx)6122 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
6123 {
6124 struct vnop_removexattr_args a;
6125 int error;
6126
6127 a.a_desc = &vnop_removexattr_desc;
6128 a.a_vp = vp;
6129 a.a_name = name;
6130 a.a_options = options;
6131 a.a_context = ctx;
6132
6133 error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
6134 DTRACE_FSINFO(removexattr, vnode_t, vp);
6135
6136 post_event_if_success(vp, error, NOTE_ATTRIB);
6137
6138 return error;
6139 }
6140
6141 errno_t
VNOP_LISTXATTR(vnode_t vp,uio_t uio,size_t * size,int options,vfs_context_t ctx)6142 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
6143 {
6144 struct vnop_listxattr_args a;
6145 int error;
6146
6147 a.a_desc = &vnop_listxattr_desc;
6148 a.a_vp = vp;
6149 a.a_uio = uio;
6150 a.a_size = size;
6151 a.a_options = options;
6152 a.a_context = ctx;
6153
6154 error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
6155 DTRACE_FSINFO(listxattr, vnode_t, vp);
6156
6157 return error;
6158 }
6159
6160
6161 #if 0
6162 /*
6163 *#
6164 *#% blktooff vp = = =
6165 *#
6166 */
6167 struct vnop_blktooff_args {
6168 struct vnodeop_desc *a_desc;
6169 vnode_t a_vp;
6170 daddr64_t a_lblkno;
6171 off_t *a_offset;
6172 };
6173 #endif /* 0*/
6174 errno_t
VNOP_BLKTOOFF(struct vnode * vp,daddr64_t lblkno,off_t * offset)6175 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
6176 {
6177 int _err;
6178 struct vnop_blktooff_args a;
6179
6180 a.a_desc = &vnop_blktooff_desc;
6181 a.a_vp = vp;
6182 a.a_lblkno = lblkno;
6183 a.a_offset = offset;
6184
6185 _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
6186 DTRACE_FSINFO(blktooff, vnode_t, vp);
6187
6188 return _err;
6189 }
6190
6191 #if 0
6192 /*
6193 *#
6194 *#% offtoblk vp = = =
6195 *#
6196 */
6197 struct vnop_offtoblk_args {
6198 struct vnodeop_desc *a_desc;
6199 vnode_t a_vp;
6200 off_t a_offset;
6201 daddr64_t *a_lblkno;
6202 };
6203 #endif /* 0*/
6204 errno_t
VNOP_OFFTOBLK(struct vnode * vp,off_t offset,daddr64_t * lblkno)6205 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
6206 {
6207 int _err;
6208 struct vnop_offtoblk_args a;
6209
6210 a.a_desc = &vnop_offtoblk_desc;
6211 a.a_vp = vp;
6212 a.a_offset = offset;
6213 a.a_lblkno = lblkno;
6214
6215 _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
6216 DTRACE_FSINFO(offtoblk, vnode_t, vp);
6217
6218 return _err;
6219 }
6220
6221 #if 0
6222 /*
6223 *#
6224 *#% ap vp L L L
6225 *#
6226 */
6227 struct vnop_verify_args {
6228 struct vnodeop_desc *a_desc;
6229 vnode_t a_vp;
6230 off_t a_foffset;
6231 char *a_buf;
6232 size_t a_bufsize;
6233 size_t *a_verifyblksize;
6234 void **a_verify_ctxp;
6235 int a_flags;
6236 vfs_context_t a_context;
6237 vnode_verifY_kind_t *a_verifykind;
6238 };
6239 #endif
6240
6241 errno_t
VNOP_VERIFY(struct vnode * vp,off_t foffset,uint8_t * buf,size_t bufsize,size_t * verify_block_size,void ** verify_ctxp,vnode_verify_flags_t flags,vfs_context_t ctx,vnode_verify_kind_t * verify_kind)6242 VNOP_VERIFY(struct vnode *vp, off_t foffset, uint8_t *buf, size_t bufsize,
6243 size_t *verify_block_size, void **verify_ctxp, vnode_verify_flags_t flags,
6244 vfs_context_t ctx, vnode_verify_kind_t *verify_kind)
6245 {
6246 int _err;
6247 struct vnop_verify_args a;
6248
6249 assert(!(flags & VNODE_VERIFY_CONTEXT_ALLOC) || ((foffset >= 0) && bufsize));
6250 assert(!(flags & (VNODE_VERIFY_CONTEXT_FREE | VNODE_VERIFY_WITH_CONTEXT)) || verify_ctxp);
6251 assert(!(flags & (VNODE_VERIFY_PRECOMPUTED | VNODE_VERIFY_WITH_CONTEXT)) ||
6252 ((foffset >= 0) && buf && bufsize));
6253
6254 if (ctx == NULL) {
6255 ctx = vfs_context_kernel();
6256 }
6257 a.a_desc = &vnop_verify_desc;
6258 a.a_vp = vp;
6259 a.a_foffset = foffset;
6260 a.a_buf = buf;
6261 a.a_bufsize = bufsize;
6262 a.a_verifyblksize = verify_block_size;
6263 a.a_flags = flags;
6264 a.a_verify_ctxp = verify_ctxp;
6265 a.a_context = ctx;
6266 if (verify_kind != NULL) {
6267 *verify_kind = VK_HASH_NONE;
6268 }
6269 a.a_verifykind = verify_kind;
6270
6271 _err = (*vp->v_op[vnop_verify_desc.vdesc_offset])(&a);
6272 DTRACE_FSINFO(verify, vnode_t, vp);
6273
6274 /* It is not an error for a filesystem to not support this VNOP */
6275 if (_err == ENOTSUP) {
6276 if (!buf && verify_block_size) {
6277 *verify_block_size = 0;
6278 }
6279
6280 _err = 0;
6281 }
6282
6283 return _err;
6284 }
6285
6286 #if 0
6287 /*
6288 *#
6289 *#% blockmap vp L L L
6290 *#
6291 */
6292 struct vnop_blockmap_args {
6293 struct vnodeop_desc *a_desc;
6294 vnode_t a_vp;
6295 off_t a_foffset;
6296 size_t a_size;
6297 daddr64_t *a_bpn;
6298 size_t *a_run;
6299 void *a_poff;
6300 int a_flags;
6301 vfs_context_t a_context;
6302 };
6303 #endif /* 0*/
6304 errno_t
VNOP_BLOCKMAP(struct vnode * vp,off_t foffset,size_t size,daddr64_t * bpn,size_t * run,void * poff,int flags,vfs_context_t ctx)6305 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
6306 {
6307 int _err;
6308 struct vnop_blockmap_args a;
6309 size_t localrun = 0;
6310
6311 if (ctx == NULL) {
6312 ctx = vfs_context_current();
6313 }
6314 a.a_desc = &vnop_blockmap_desc;
6315 a.a_vp = vp;
6316 a.a_foffset = foffset;
6317 a.a_size = size;
6318 a.a_bpn = bpn;
6319 a.a_run = &localrun;
6320 a.a_poff = poff;
6321 a.a_flags = flags;
6322 a.a_context = ctx;
6323
6324 _err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
6325 DTRACE_FSINFO(blockmap, vnode_t, vp);
6326
6327 /*
6328 * We used a local variable to request information from the underlying
6329 * filesystem about the length of the I/O run in question. If
6330 * we get malformed output from the filesystem, we cap it to the length
6331 * requested, at most. Update 'run' on the way out.
6332 */
6333 if (_err == 0) {
6334 if (localrun > size) {
6335 localrun = size;
6336 }
6337
6338 if (run) {
6339 *run = localrun;
6340 }
6341 }
6342
6343 return _err;
6344 }
6345
6346 #if 0
6347 struct vnop_strategy_args {
6348 struct vnodeop_desc *a_desc;
6349 struct buf *a_bp;
6350 };
6351
6352 #endif /* 0*/
6353 errno_t
VNOP_STRATEGY(struct buf * bp)6354 VNOP_STRATEGY(struct buf *bp)
6355 {
6356 int _err;
6357 struct vnop_strategy_args a;
6358 vnode_t vp = buf_vnode(bp);
6359 a.a_desc = &vnop_strategy_desc;
6360 a.a_bp = bp;
6361 _err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
6362 DTRACE_FSINFO(strategy, vnode_t, vp);
6363 return _err;
6364 }
6365
6366 #if 0
6367 struct vnop_bwrite_args {
6368 struct vnodeop_desc *a_desc;
6369 buf_t a_bp;
6370 };
6371 #endif /* 0*/
6372 errno_t
VNOP_BWRITE(struct buf * bp)6373 VNOP_BWRITE(struct buf *bp)
6374 {
6375 int _err;
6376 struct vnop_bwrite_args a;
6377 vnode_t vp = buf_vnode(bp);
6378 a.a_desc = &vnop_bwrite_desc;
6379 a.a_bp = bp;
6380 _err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
6381 DTRACE_FSINFO(bwrite, vnode_t, vp);
6382 return _err;
6383 }
6384
6385 #if 0
6386 struct vnop_kqfilt_add_args {
6387 struct vnodeop_desc *a_desc;
6388 struct vnode *a_vp;
6389 struct knote *a_kn;
6390 vfs_context_t a_context;
6391 };
6392 #endif
6393 errno_t
VNOP_KQFILT_ADD(struct vnode * vp,struct knote * kn,vfs_context_t ctx)6394 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
6395 {
6396 int _err;
6397 struct vnop_kqfilt_add_args a;
6398
6399 a.a_desc = VDESC(vnop_kqfilt_add);
6400 a.a_vp = vp;
6401 a.a_kn = kn;
6402 a.a_context = ctx;
6403
6404 _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
6405 DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
6406
6407 return _err;
6408 }
6409
6410 #if 0
6411 struct vnop_kqfilt_remove_args {
6412 struct vnodeop_desc *a_desc;
6413 struct vnode *a_vp;
6414 uintptr_t a_ident;
6415 vfs_context_t a_context;
6416 };
6417 #endif
6418 errno_t
VNOP_KQFILT_REMOVE(struct vnode * vp,uintptr_t ident,vfs_context_t ctx)6419 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
6420 {
6421 int _err;
6422 struct vnop_kqfilt_remove_args a;
6423
6424 a.a_desc = VDESC(vnop_kqfilt_remove);
6425 a.a_vp = vp;
6426 a.a_ident = ident;
6427 a.a_context = ctx;
6428
6429 _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
6430 DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
6431
6432 return _err;
6433 }
6434
6435 errno_t
VNOP_MONITOR(vnode_t vp,uint32_t events,uint32_t flags,void * handle,vfs_context_t ctx)6436 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
6437 {
6438 int _err;
6439 struct vnop_monitor_args a;
6440
6441 a.a_desc = VDESC(vnop_monitor);
6442 a.a_vp = vp;
6443 a.a_events = events;
6444 a.a_flags = flags;
6445 a.a_handle = handle;
6446 a.a_context = ctx;
6447
6448 _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
6449 DTRACE_FSINFO(monitor, vnode_t, vp);
6450
6451 return _err;
6452 }
6453
6454 #if 0
6455 struct vnop_setlabel_args {
6456 struct vnodeop_desc *a_desc;
6457 struct vnode *a_vp;
6458 struct label *a_vl;
6459 vfs_context_t a_context;
6460 };
6461 #endif
6462 errno_t
VNOP_SETLABEL(struct vnode * vp,struct label * label,vfs_context_t ctx)6463 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
6464 {
6465 int _err;
6466 struct vnop_setlabel_args a;
6467
6468 a.a_desc = VDESC(vnop_setlabel);
6469 a.a_vp = vp;
6470 a.a_vl = label;
6471 a.a_context = ctx;
6472
6473 _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
6474 DTRACE_FSINFO(setlabel, vnode_t, vp);
6475
6476 return _err;
6477 }
6478
6479
6480 #if NAMEDSTREAMS
6481 /*
6482 * Get a named streamed
6483 */
6484 errno_t
VNOP_GETNAMEDSTREAM(vnode_t vp,vnode_t * svpp,const char * name,enum nsoperation operation,int flags,vfs_context_t ctx)6485 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
6486 {
6487 int _err;
6488 struct vnop_getnamedstream_args a;
6489
6490 a.a_desc = &vnop_getnamedstream_desc;
6491 a.a_vp = vp;
6492 a.a_svpp = svpp;
6493 a.a_name = name;
6494 a.a_operation = operation;
6495 a.a_flags = flags;
6496 a.a_context = ctx;
6497
6498 _err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
6499 DTRACE_FSINFO(getnamedstream, vnode_t, vp);
6500 return _err;
6501 }
6502
6503 /*
6504 * Create a named streamed
6505 */
6506 errno_t
VNOP_MAKENAMEDSTREAM(vnode_t vp,vnode_t * svpp,const char * name,int flags,vfs_context_t ctx)6507 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
6508 {
6509 int _err;
6510 struct vnop_makenamedstream_args a;
6511
6512 a.a_desc = &vnop_makenamedstream_desc;
6513 a.a_vp = vp;
6514 a.a_svpp = svpp;
6515 a.a_name = name;
6516 a.a_flags = flags;
6517 a.a_context = ctx;
6518
6519 _err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
6520 DTRACE_FSINFO(makenamedstream, vnode_t, vp);
6521 return _err;
6522 }
6523
6524
6525 /*
6526 * Remove a named streamed
6527 */
6528 errno_t
VNOP_REMOVENAMEDSTREAM(vnode_t vp,vnode_t svp,const char * name,int flags,vfs_context_t ctx)6529 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
6530 {
6531 int _err;
6532 struct vnop_removenamedstream_args a;
6533
6534 a.a_desc = &vnop_removenamedstream_desc;
6535 a.a_vp = vp;
6536 a.a_svp = svp;
6537 a.a_name = name;
6538 a.a_flags = flags;
6539 a.a_context = ctx;
6540
6541 _err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
6542 DTRACE_FSINFO(removenamedstream, vnode_t, vp);
6543 return _err;
6544 }
6545 #endif
6546