1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /*-
25 * Portions Copyright (c) 1992, 1993
26 * The Regents of the University of California. All rights reserved.
27 *
28 * This code is derived from software contributed to Berkeley by
29 * John Heidemann of the UCLA Ficus project.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 *
55 * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95
56 *
57 * Ancestors:
58 * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92
59 * ...and...
60 * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
61 *
62 * $FreeBSD$
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/conf.h>
68 #include <sys/kernel.h>
69 #include <sys/lock.h>
70 #include <sys/malloc.h>
71 #include <sys/mount.h>
72 #include <sys/mount_internal.h>
73 #include <sys/namei.h>
74 #include <sys/sysctl.h>
75 #include <sys/vnode.h>
76 #include <sys/vnode_internal.h>
77 #include <sys/xattr.h>
78 #include <sys/ubc.h>
79 #include <sys/types.h>
80 #include <sys/dirent.h>
81
82 #include "bindfs.h"
83
84 #define BIND_ROOT_INO 2
85
86 vop_t * bindfs_vnodeop_p = NULL;
87
88 static int
bindfs_default(__unused struct vnop_generic_args * args)89 bindfs_default(__unused struct vnop_generic_args * args)
90 {
91 return ENOTSUP;
92 }
93
94 static int
bindfs_getattr(struct vnop_getattr_args * args)95 bindfs_getattr(struct vnop_getattr_args * args)
96 {
97 int error;
98 BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
99
100 struct vnode * lowervp = BINDVPTOLOWERVP(args->a_vp);
101
102 error = vnode_getwithref(lowervp);
103 if (error == 0) {
104 error = VNOP_GETATTR(lowervp, args->a_vap, args->a_context);
105 vnode_put(lowervp);
106
107 if (error == 0) {
108 if (VATTR_IS_ACTIVE(args->a_vap, va_fsid)) {
109 /* fix up fsid so it doesn't say the underlying fs*/
110 VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(vnode_mount(args->a_vp))->f_fsid.val[0]);
111 }
112 if (VATTR_IS_ACTIVE(args->a_vap, va_fsid64)) {
113 /* fix up fsid so it doesn't say the underlying fs*/
114 VATTR_RETURN(args->a_vap, va_fsid64, vfs_statfs(vnode_mount(args->a_vp))->f_fsid);
115 }
116 struct vnode * parent = vnode_parent(args->a_vp);
117 if (vnode_isvroot(args->a_vp)) {
118 // We can use the lower answers for most questions about the root vnode but need to fix up a few things
119 if (VATTR_IS_ACTIVE(args->a_vap, va_fileid)) {
120 VATTR_RETURN(args->a_vap, va_fileid, BIND_ROOT_INO);
121 }
122 if (VATTR_IS_ACTIVE(args->a_vap, va_linkid)) {
123 VATTR_RETURN(args->a_vap, va_linkid, BIND_ROOT_INO);
124 }
125 if (VATTR_IS_ACTIVE(args->a_vap, va_parentid)) {
126 // The parent of the root is itself
127 VATTR_RETURN(args->a_vap, va_parentid, BIND_ROOT_INO);
128 }
129 } else if (parent != NULL && vnode_isvroot(parent)) {
130 if (VATTR_IS_ACTIVE(args->a_vap, va_parentid)) {
131 // This vnode's parent is the root.
132 VATTR_RETURN(args->a_vap, va_parentid, BIND_ROOT_INO);
133 }
134 }
135 }
136 }
137
138 return error;
139 }
140
141 static int
bindfs_open(struct vnop_open_args * args)142 bindfs_open(struct vnop_open_args * args)
143 {
144 int error;
145 struct vnode *vp, *lvp;
146
147 BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
148
149 vp = args->a_vp;
150 lvp = BINDVPTOLOWERVP(vp);
151 error = vnode_getwithref(lvp);
152 if (error == 0) {
153 error = VNOP_OPEN(lvp, args->a_mode, args->a_context);
154 vnode_put(lvp);
155 }
156
157 return error;
158 }
159
160 static int
bindfs_close(struct vnop_close_args * args)161 bindfs_close(struct vnop_close_args * args)
162 {
163 int error;
164 struct vnode *vp, *lvp;
165
166 BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
167
168 vp = args->a_vp;
169 lvp = BINDVPTOLOWERVP(vp);
170
171 error = vnode_getwithref(lvp);
172 if (error == 0) {
173 error = VNOP_CLOSE(lvp, args->a_fflag, args->a_context);
174 vnode_put(lvp);
175 }
176 return error;
177 }
178
179 /*
180 * We have to carry on the locking protocol on the bind layer vnodes
181 * as we progress through the tree. We also have to enforce read-only
182 * if this layer is mounted read-only.
183 */
184 static int
bind_lookup(struct vnop_lookup_args * ap)185 bind_lookup(struct vnop_lookup_args * ap)
186 {
187 struct componentname * cnp = ap->a_cnp;
188 struct vnode * dvp = ap->a_dvp;
189 struct vnode *vp, *ldvp, *lvp;
190 struct mount * mp;
191 struct bind_mount * bind_mp;
192 int error;
193
194 BINDFSDEBUG("%s parent: %p component: %.*s\n", __FUNCTION__, ap->a_dvp, cnp->cn_namelen, cnp->cn_nameptr);
195
196 mp = vnode_mount(dvp);
197 /* rename and delete are not allowed. this is a read only file system */
198 if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME || cnp->cn_nameiop == CREATE) {
199 return EROFS;
200 }
201 bind_mp = MOUNTTOBINDMOUNT(mp);
202
203 // . and .. handling
204 if (cnp->cn_nameptr[0] == '.') {
205 if (cnp->cn_namelen == 1) {
206 vp = dvp;
207 } else if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
208 vp = (vnode_isvroot(dvp)) ? dvp : vnode_parent(dvp);
209 } else {
210 goto notdot;
211 }
212
213 error = vp ? vnode_get(vp) : ENOENT;
214
215 if (error == 0) {
216 *ap->a_vpp = vp;
217 }
218
219 return error;
220 }
221
222 notdot:
223 ldvp = BINDVPTOLOWERVP(dvp);
224 vp = lvp = NULL;
225
226 /*
227 * Hold ldvp. The reference on it, owned by dvp, is lost in
228 * case of dvp reclamation.
229 */
230 error = vnode_getwithref(ldvp);
231 if (error) {
232 return error;
233 }
234
235 error = VNOP_LOOKUP(ldvp, &lvp, cnp, ap->a_context);
236
237 vnode_put(ldvp);
238
239 if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
240 if (ldvp == lvp) {
241 vp = dvp;
242 error = vnode_get(vp);
243 } else {
244 error = bind_nodeget(mp, lvp, dvp, &vp, cnp, 0);
245 }
246 if (error == 0) {
247 *ap->a_vpp = vp;
248 }
249 /* if we got lvp, drop the iocount from VNOP_LOOKUP */
250 if (lvp != NULL) {
251 vnode_put(lvp);
252 }
253 }
254
255 return error;
256 }
257
258 /*
259 * Don't think this needs to do anything
260 */
261 static int
bind_inactive(__unused struct vnop_inactive_args * ap)262 bind_inactive(__unused struct vnop_inactive_args * ap)
263 {
264 BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
265
266 return 0;
267 }
268
269 static int
bind_reclaim(struct vnop_reclaim_args * ap)270 bind_reclaim(struct vnop_reclaim_args * ap)
271 {
272 struct vnode * vp;
273 struct bind_node * xp;
274 struct vnode * lowervp;
275
276 BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
277
278 vp = ap->a_vp;
279
280 xp = VTOBIND(vp);
281 lowervp = xp->bind_lowervp;
282
283 vnode_removefsref(vp);
284
285 bind_hashrem(xp);
286 vnode_rele(lowervp);
287
288 cache_purge(vp);
289 vnode_clearfsnode(vp);
290
291 kfree_type(struct bind_node, xp);
292
293 return 0;
294 }
295
296 /* Get dirent length padded to 4 byte alignment */
297 #define DIRENT_LEN(namelen) \
298 ((sizeof(struct dirent) + (namelen + 1) - (__DARWIN_MAXNAMLEN + 1) + 3) & ~3)
299
300 /* Get the end of this dirent */
301 #define DIRENT_END(dep) \
302 (((char *)(dep)) + (dep)->d_reclen - 1)
303
304 static int
bindfs_readdir(struct vnop_readdir_args * ap)305 bindfs_readdir(struct vnop_readdir_args * ap)
306 {
307 struct vnode *vp, *lvp, *dvp;
308 int error;
309 uio_t uio = ap->a_uio;
310
311 BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
312 /* assumption is that any vp that comes through here had to go through lookup
313 */
314
315 if (ap->a_flags & (VNODE_READDIR_EXTENDED | VNODE_READDIR_REQSEEKOFF)) {
316 return EINVAL;
317 }
318
319 vp = ap->a_vp;
320 dvp = vnode_parent(vp);
321 lvp = BINDVPTOLOWERVP(vp);
322 error = vnode_getwithref(lvp);
323 if (error != 0) {
324 goto lb_end;
325 }
326
327 if (vnode_isvroot(vp) || (dvp != NULL && vnode_isvroot(dvp))) {
328 size_t bufsize;
329 void * bufptr;
330 uio_t auio;
331 struct dirent *dep;
332 size_t bytesread;
333 bufsize = 3 * MIN((user_size_t)uio_resid(uio), 87371u) / 8;
334 bufptr = kalloc_data(bufsize, Z_WAITOK);
335 if (bufptr == NULL) {
336 return ENOMEM;
337 }
338 auio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ);
339 uio_addiov(auio, (uintptr_t)bufptr, bufsize);
340 uio_setoffset(auio, uio_offset(uio));
341 error = VNOP_READDIR(lvp, auio, ap->a_flags, ap->a_eofflag, ap->a_numdirent, ap->a_context);
342 vnode_put(lvp);
343 if (error != 0) {
344 goto lb_end;
345 }
346
347 dep = (struct dirent *)bufptr;
348 bytesread = bufsize - uio_resid(auio);
349 while (error == 0 && (char *)dep < ((char *)bufptr + bytesread)) {
350 if (DIRENT_END(dep) > ((char *)bufptr + bytesread) ||
351 DIRENT_LEN(dep->d_namlen) > dep->d_reclen) {
352 printf("%s: %s: Bad dirent received from directory %s\n", __func__,
353 vfs_statfs(vnode_mount(vp))->f_mntonname,
354 vp->v_name ? vp->v_name : "<unknown>");
355 error = EIO;
356 break;
357 }
358 if (dep->d_name[0] == '.') {
359 /* re-write the inode number for the mount root */
360 /* if vp is the mount root then . = 2 and .. = 2 */
361 /* if the parent of vp is the mount root then .. = 2 */
362 if ((vnode_isvroot(vp) && dep->d_namlen == 1) ||
363 (dep->d_namlen == 2 && dep->d_name[1] == '.')) {
364 dep->d_ino = BIND_ROOT_INO;
365 }
366 }
367 /* Copy entry64 to user's buffer. */
368 error = uiomove((caddr_t)dep, dep->d_reclen, uio);
369 /* Move to next entry. */
370 dep = (struct dirent *)((char *)dep + dep->d_reclen);
371 }
372 /* Update the real offset using the offset we got from VNOP_READDIR. */
373 if (error == 0) {
374 uio_setoffset(uio, uio_offset(auio));
375 }
376 uio_free(auio);
377 kfree_data(bufptr, bufsize);
378 } else {
379 error = VNOP_READDIR(lvp, ap->a_uio, ap->a_flags, ap->a_eofflag, ap->a_numdirent, ap->a_context);
380 vnode_put(lvp);
381 }
382
383 lb_end:
384 return error;
385 }
386
387 static int
bindfs_readlink(struct vnop_readlink_args * ap)388 bindfs_readlink(struct vnop_readlink_args * ap)
389 {
390 BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
391 int error;
392 struct vnode *vp, *lvp;
393
394 vp = ap->a_vp;
395 lvp = BINDVPTOLOWERVP(vp);
396
397 error = vnode_getwithref(lvp);
398 if (error == 0) {
399 error = VNOP_READLINK(lvp, ap->a_uio, ap->a_context);
400 vnode_put(lvp);
401
402 if (error) {
403 printf("bindfs: readlink failed: %d\n", error);
404 }
405 }
406
407 return error;
408 }
409
410 static int
bindfs_pathconf(__unused struct vnop_pathconf_args * args)411 bindfs_pathconf(__unused struct vnop_pathconf_args * args)
412 {
413 BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
414 return EINVAL;
415 }
416
417 static int
bindfs_fsync(__unused struct vnop_fsync_args * args)418 bindfs_fsync(__unused struct vnop_fsync_args * args)
419 {
420 BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
421 return 0;
422 }
423
424 static int
bindfs_mmap(struct vnop_mmap_args * args)425 bindfs_mmap(struct vnop_mmap_args * args)
426 {
427 int error;
428 struct vnode *vp, *lvp;
429
430 BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
431
432 vp = args->a_vp;
433 lvp = BINDVPTOLOWERVP(vp);
434 error = vnode_getwithref(lvp);
435 if (error == 0) {
436 error = VNOP_MMAP(lvp, args->a_fflags, args->a_context);
437 vnode_put(lvp);
438 }
439
440 return error;
441 }
442
443 static int
bindfs_mnomap(struct vnop_mnomap_args * args)444 bindfs_mnomap(struct vnop_mnomap_args * args)
445 {
446 int error;
447 struct vnode *vp, *lvp;
448
449 BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
450
451 vp = args->a_vp;
452 lvp = BINDVPTOLOWERVP(vp);
453 error = vnode_getwithref(lvp);
454 if (error == 0) {
455 error = VNOP_MNOMAP(lvp, args->a_context);
456 vnode_put(lvp);
457 }
458
459 return error;
460 }
461
462 static int
bindfs_getxattr(struct vnop_getxattr_args * args)463 bindfs_getxattr(struct vnop_getxattr_args * args)
464 {
465 int error;
466 struct vnode *vp, *lvp;
467
468 BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
469
470 vp = args->a_vp;
471 lvp = BINDVPTOLOWERVP(vp);
472 error = vnode_getwithref(lvp);
473 if (error == 0) {
474 error = VNOP_GETXATTR(lvp, args->a_name, args->a_uio, args->a_size, args->a_options, args->a_context);
475 vnode_put(lvp);
476 }
477
478 return error;
479 }
480
481 static int
bindfs_listxattr(struct vnop_listxattr_args * args)482 bindfs_listxattr(struct vnop_listxattr_args * args)
483 {
484 int error;
485 struct vnode *vp, *lvp;
486
487 BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
488
489 vp = args->a_vp;
490 lvp = BINDVPTOLOWERVP(vp);
491 error = vnode_getwithref(lvp);
492 if (error == 0) {
493 error = VNOP_LISTXATTR(lvp, args->a_uio, args->a_size, args->a_options, args->a_context);
494 vnode_put(lvp);
495 }
496
497 return error;
498 }
499
500 /* relies on v1 paging */
501 static int
bindfs_pagein(struct vnop_pagein_args * ap)502 bindfs_pagein(struct vnop_pagein_args * ap)
503 {
504 int error = EIO;
505 struct vnode *vp, *lvp;
506
507 BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
508
509 vp = ap->a_vp;
510 lvp = BINDVPTOLOWERVP(vp);
511
512 if (vnode_vtype(vp) != VREG) {
513 return ENOTSUP;
514 }
515
516 /*
517 * Ask VM/UBC/VFS to do our bidding
518 */
519 if (vnode_getwithvid(lvp, BINDVPTOLOWERVID(vp)) == 0) {
520 vm_offset_t ioaddr;
521 uio_t auio;
522 kern_return_t kret;
523 off_t bytes_to_commit;
524 off_t lowersize;
525 upl_t upl = ap->a_pl;
526 user_ssize_t bytes_remaining = 0;
527
528 auio = uio_create(1, ap->a_f_offset, UIO_SYSSPACE, UIO_READ);
529 if (auio == NULL) {
530 error = EIO;
531 goto exit_no_unmap;
532 }
533
534 kret = ubc_upl_map(upl, &ioaddr);
535 if (KERN_SUCCESS != kret) {
536 panic("bindfs_pagein: ubc_upl_map() failed with (%d)", kret);
537 }
538
539 ioaddr += ap->a_pl_offset;
540
541 error = uio_addiov(auio, (user_addr_t)ioaddr, ap->a_size);
542 if (error) {
543 goto exit;
544 }
545
546 lowersize = ubc_getsize(lvp);
547 if (lowersize != ubc_getsize(vp)) {
548 (void)ubc_setsize(vp, lowersize); /* ignore failures, nothing can be done */
549 }
550
551 error = VNOP_READ(lvp, auio, ((ap->a_flags & UPL_IOSYNC) ? IO_SYNC : 0), ap->a_context);
552
553 bytes_remaining = uio_resid(auio);
554 if (bytes_remaining > 0 && bytes_remaining <= (user_ssize_t)ap->a_size) {
555 /* zero bytes that weren't read in to the upl */
556 bzero((void*)((uintptr_t)(ioaddr + ap->a_size - bytes_remaining)), (size_t) bytes_remaining);
557 }
558
559 exit:
560 kret = ubc_upl_unmap(upl);
561 if (KERN_SUCCESS != kret) {
562 panic("bindfs_pagein: ubc_upl_unmap() failed with (%d)", kret);
563 }
564
565 if (auio != NULL) {
566 uio_free(auio);
567 }
568
569 exit_no_unmap:
570 if ((ap->a_flags & UPL_NOCOMMIT) == 0) {
571 if (!error && (bytes_remaining >= 0) && (bytes_remaining <= (user_ssize_t)ap->a_size)) {
572 /* only commit what was read in (page aligned)*/
573 bytes_to_commit = ap->a_size - bytes_remaining;
574 if (bytes_to_commit) {
575 /* need to make sure bytes_to_commit and byte_remaining are page aligned before calling ubc_upl_commit_range*/
576 if (bytes_to_commit & PAGE_MASK) {
577 bytes_to_commit = (bytes_to_commit & (~PAGE_MASK)) + (PAGE_MASK + 1);
578 assert(bytes_to_commit <= (off_t)ap->a_size);
579
580 bytes_remaining = ap->a_size - bytes_to_commit;
581 }
582 ubc_upl_commit_range(upl, ap->a_pl_offset, (upl_size_t)bytes_to_commit, UPL_COMMIT_FREE_ON_EMPTY);
583 }
584
585 /* abort anything thats left */
586 if (bytes_remaining) {
587 ubc_upl_abort_range(upl, ap->a_pl_offset + (upl_offset_t)bytes_to_commit, (upl_size_t)bytes_remaining, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
588 }
589 } else {
590 ubc_upl_abort_range(upl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
591 }
592 }
593 vnode_put(lvp);
594 } else if ((ap->a_flags & UPL_NOCOMMIT) == 0) {
595 ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
596 }
597 return error;
598 }
599
600 static int
bindfs_read(struct vnop_read_args * ap)601 bindfs_read(struct vnop_read_args * ap)
602 {
603 int error = EIO;
604
605 struct vnode *vp, *lvp;
606
607 BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
608
609 vp = ap->a_vp;
610 lvp = BINDVPTOLOWERVP(vp);
611
612 /*
613 * First some house keeping
614 */
615 if (vnode_getwithvid(lvp, BINDVPTOLOWERVID(vp)) == 0) {
616 if (!vnode_isreg(lvp) && !vnode_islnk(lvp)) {
617 error = EPERM;
618 goto end;
619 }
620
621 if (uio_resid(ap->a_uio) == 0) {
622 error = 0;
623 goto end;
624 }
625
626 /*
627 * Now ask VM/UBC/VFS to do our bidding
628 */
629
630 error = VNOP_READ(lvp, ap->a_uio, ap->a_ioflag, ap->a_context);
631 if (error) {
632 printf("bindfs: VNOP_READ failed: %d\n", error);
633 }
634 end:
635 vnode_put(lvp);
636 }
637 return error;
638 }
639
640 /*
641 * Global vfs data structures
642 */
643
644 static const struct vnodeopv_entry_desc bindfs_vnodeop_entries[] = {
645 {.opve_op = &vnop_default_desc, .opve_impl = (vop_t)bindfs_default}, /* default */
646 {.opve_op = &vnop_getattr_desc, .opve_impl = (vop_t)bindfs_getattr}, /* getattr */
647 {.opve_op = &vnop_open_desc, .opve_impl = (vop_t)bindfs_open}, /* open */
648 {.opve_op = &vnop_close_desc, .opve_impl = (vop_t)bindfs_close}, /* close */
649 {.opve_op = &vnop_inactive_desc, .opve_impl = (vop_t)bind_inactive}, /* inactive */
650 {.opve_op = &vnop_reclaim_desc, .opve_impl = (vop_t)bind_reclaim}, /* reclaim */
651 {.opve_op = &vnop_lookup_desc, .opve_impl = (vop_t)bind_lookup}, /* lookup */
652 {.opve_op = &vnop_readdir_desc, .opve_impl = (vop_t)bindfs_readdir}, /* readdir */
653 {.opve_op = &vnop_readlink_desc, .opve_impl = (vop_t)bindfs_readlink}, /* readlink */
654 {.opve_op = &vnop_pathconf_desc, .opve_impl = (vop_t)bindfs_pathconf}, /* pathconf */
655 {.opve_op = &vnop_fsync_desc, .opve_impl = (vop_t)bindfs_fsync}, /* fsync */
656 {.opve_op = &vnop_mmap_desc, .opve_impl = (vop_t)bindfs_mmap}, /* mmap */
657 {.opve_op = &vnop_mnomap_desc, .opve_impl = (vop_t)bindfs_mnomap}, /* mnomap */
658 {.opve_op = &vnop_getxattr_desc, .opve_impl = (vop_t)bindfs_getxattr}, /* getxattr */
659 {.opve_op = &vnop_pagein_desc, .opve_impl = (vop_t)bindfs_pagein}, /* pagein */
660 {.opve_op = &vnop_read_desc, .opve_impl = (vop_t)bindfs_read}, /* read */
661 {.opve_op = &vnop_listxattr_desc, .opve_impl = (vop_t)bindfs_listxattr}, /* listxattr */
662 {.opve_op = NULL, .opve_impl = NULL},
663 };
664
665 const struct vnodeopv_desc bindfs_vnodeop_opv_desc = {.opv_desc_vector_p = &bindfs_vnodeop_p, .opv_desc_ops = bindfs_vnodeop_entries};
666
667 //BINDFS Specific helper function
668
669 int
bindfs_getbackingvnode(vnode_t in_vp,vnode_t * out_vpp)670 bindfs_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp)
671 {
672 int result = EINVAL;
673
674 if (out_vpp == NULL || in_vp == NULL) {
675 goto end;
676 }
677
678 struct vfsstatfs * sp = NULL;
679 mount_t mp = vnode_mount(in_vp);
680
681 sp = vfs_statfs(mp);
682 //If this isn't a bindfs vnode or it is but it's a special vnode
683 if (strcmp(sp->f_fstypename, "bindfs") != 0) {
684 *out_vpp = NULLVP;
685 result = ENOENT;
686 goto end;
687 }
688
689 vnode_t lvp = BINDVPTOLOWERVP(in_vp);
690 if ((result = vnode_getwithvid(lvp, BINDVPTOLOWERVID(in_vp)))) {
691 goto end;
692 }
693
694 *out_vpp = lvp;
695
696 end:
697 return result;
698 }
699