xref: /xnu-11215.1.10/bsd/miscfs/bindfs/bind_vnops.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. Please obtain a copy of the License at
10  * http://www.opensource.apple.com/apsl/ and read it before using this
11  * file.
12  *
13  * The Original Code and all software distributed under the License are
14  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18  * Please see the License for the specific language governing rights and
19  * limitations under the License.
20  *
21  * @APPLE_LICENSE_HEADER_END@
22  */
23 
24 /*-
25  * Portions Copyright (c) 1992, 1993
26  *  The Regents of the University of California.  All rights reserved.
27  *
28  * This code is derived from software contributed to Berkeley by
29  * John Heidemann of the UCLA Ficus project.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  * 4. Neither the name of the University nor the names of its contributors
40  *    may be used to endorse or promote products derived from this software
41  *    without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  *
55  *  @(#)null_vnops.c    8.6 (Berkeley) 5/27/95
56  *
57  * Ancestors:
58  *  @(#)lofs_vnops.c    1.2 (Berkeley) 6/18/92
59  *  ...and...
60  *  @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
61  *
62  * $FreeBSD$
63  */
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/conf.h>
68 #include <sys/kernel.h>
69 #include <sys/lock.h>
70 #include <sys/malloc.h>
71 #include <sys/mount.h>
72 #include <sys/mount_internal.h>
73 #include <sys/namei.h>
74 #include <sys/sysctl.h>
75 #include <sys/vnode.h>
76 #include <sys/vnode_internal.h>
77 #include <sys/xattr.h>
78 #include <sys/ubc.h>
79 #include <sys/types.h>
80 #include <sys/dirent.h>
81 
82 #include "bindfs.h"
83 
84 #define BIND_ROOT_INO 2
85 
86 vop_t * bindfs_vnodeop_p = NULL;
87 
88 static int
bindfs_default(__unused struct vnop_generic_args * args)89 bindfs_default(__unused struct vnop_generic_args * args)
90 {
91 	return ENOTSUP;
92 }
93 
94 static int
bindfs_getattr(struct vnop_getattr_args * args)95 bindfs_getattr(struct vnop_getattr_args * args)
96 {
97 	int error;
98 	BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
99 
100 	struct vnode * lowervp = BINDVPTOLOWERVP(args->a_vp);
101 
102 	error = vnode_getwithref(lowervp);
103 	if (error == 0) {
104 		error = VNOP_GETATTR(lowervp, args->a_vap, args->a_context);
105 		vnode_put(lowervp);
106 
107 		if (error == 0) {
108 			if (VATTR_IS_ACTIVE(args->a_vap, va_fsid)) {
109 				/* fix up fsid so it doesn't say the underlying fs*/
110 				VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(vnode_mount(args->a_vp))->f_fsid.val[0]);
111 			}
112 			if (VATTR_IS_ACTIVE(args->a_vap, va_fsid64)) {
113 				/* fix up fsid so it doesn't say the underlying fs*/
114 				VATTR_RETURN(args->a_vap, va_fsid64, vfs_statfs(vnode_mount(args->a_vp))->f_fsid);
115 			}
116 			struct vnode * parent = vnode_parent(args->a_vp);
117 			if (vnode_isvroot(args->a_vp)) {
118 				// We can use the lower answers for most questions about the root vnode but need to fix up a few things
119 				if (VATTR_IS_ACTIVE(args->a_vap, va_fileid)) {
120 					VATTR_RETURN(args->a_vap, va_fileid, BIND_ROOT_INO);
121 				}
122 				if (VATTR_IS_ACTIVE(args->a_vap, va_linkid)) {
123 					VATTR_RETURN(args->a_vap, va_linkid, BIND_ROOT_INO);
124 				}
125 				if (VATTR_IS_ACTIVE(args->a_vap, va_parentid)) {
126 					// The parent of the root is itself
127 					VATTR_RETURN(args->a_vap, va_parentid, BIND_ROOT_INO);
128 				}
129 			} else if (parent != NULL && vnode_isvroot(parent)) {
130 				if (VATTR_IS_ACTIVE(args->a_vap, va_parentid)) {
131 					// This vnode's parent is the root.
132 					VATTR_RETURN(args->a_vap, va_parentid, BIND_ROOT_INO);
133 				}
134 			}
135 		}
136 	}
137 
138 	return error;
139 }
140 
141 static int
bindfs_open(struct vnop_open_args * args)142 bindfs_open(struct vnop_open_args * args)
143 {
144 	int error;
145 	struct vnode *vp, *lvp;
146 
147 	BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
148 
149 	vp    = args->a_vp;
150 	lvp   = BINDVPTOLOWERVP(vp);
151 	error = vnode_getwithref(lvp);
152 	if (error == 0) {
153 		error = VNOP_OPEN(lvp, args->a_mode, args->a_context);
154 		vnode_put(lvp);
155 	}
156 
157 	return error;
158 }
159 
160 static int
bindfs_close(struct vnop_close_args * args)161 bindfs_close(struct vnop_close_args * args)
162 {
163 	int error;
164 	struct vnode *vp, *lvp;
165 
166 	BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
167 
168 	vp  = args->a_vp;
169 	lvp = BINDVPTOLOWERVP(vp);
170 
171 	error = vnode_getwithref(lvp);
172 	if (error == 0) {
173 		error = VNOP_CLOSE(lvp, args->a_fflag, args->a_context);
174 		vnode_put(lvp);
175 	}
176 	return error;
177 }
178 
179 /*
180  * We have to carry on the locking protocol on the bind layer vnodes
181  * as we progress through the tree. We also have to enforce read-only
182  * if this layer is mounted read-only.
183  */
184 static int
bind_lookup(struct vnop_lookup_args * ap)185 bind_lookup(struct vnop_lookup_args * ap)
186 {
187 	struct componentname * cnp = ap->a_cnp;
188 	struct vnode * dvp         = ap->a_dvp;
189 	struct vnode *vp, *ldvp, *lvp;
190 	struct mount * mp;
191 	struct bind_mount * bind_mp;
192 	int error;
193 
194 	BINDFSDEBUG("%s parent: %p component: %.*s\n", __FUNCTION__, ap->a_dvp, cnp->cn_namelen, cnp->cn_nameptr);
195 
196 	mp = vnode_mount(dvp);
197 	/* rename and delete are not allowed. this is a read only file system */
198 	if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME || cnp->cn_nameiop == CREATE) {
199 		return EROFS;
200 	}
201 	bind_mp = MOUNTTOBINDMOUNT(mp);
202 
203 	// . and .. handling
204 	if (cnp->cn_nameptr[0] == '.') {
205 		if (cnp->cn_namelen == 1) {
206 			vp = dvp;
207 		} else if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
208 			vp = (vnode_isvroot(dvp)) ? dvp : vnode_parent(dvp);
209 		} else {
210 			goto notdot;
211 		}
212 
213 		error = vp ? vnode_get(vp) : ENOENT;
214 
215 		if (error == 0) {
216 			*ap->a_vpp = vp;
217 		}
218 
219 		return error;
220 	}
221 
222 notdot:
223 	ldvp = BINDVPTOLOWERVP(dvp);
224 	vp = lvp = NULL;
225 
226 	/*
227 	 * Hold ldvp.  The reference on it, owned by dvp, is lost in
228 	 * case of dvp reclamation.
229 	 */
230 	error = vnode_getwithref(ldvp);
231 	if (error) {
232 		return error;
233 	}
234 
235 	error = VNOP_LOOKUP(ldvp, &lvp, cnp, ap->a_context);
236 
237 	vnode_put(ldvp);
238 
239 	if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
240 		if (ldvp == lvp) {
241 			vp    = dvp;
242 			error = vnode_get(vp);
243 		} else {
244 			error = bind_nodeget(mp, lvp, dvp, &vp, cnp, 0);
245 		}
246 		if (error == 0) {
247 			*ap->a_vpp = vp;
248 		}
249 		/* if we got lvp, drop the iocount from VNOP_LOOKUP */
250 		if (lvp != NULL) {
251 			vnode_put(lvp);
252 		}
253 	}
254 
255 	return error;
256 }
257 
258 /*
259  * Don't think this needs to do anything
260  */
261 static int
bind_inactive(__unused struct vnop_inactive_args * ap)262 bind_inactive(__unused struct vnop_inactive_args * ap)
263 {
264 	BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
265 
266 	return 0;
267 }
268 
269 static int
bind_reclaim(struct vnop_reclaim_args * ap)270 bind_reclaim(struct vnop_reclaim_args * ap)
271 {
272 	struct vnode * vp;
273 	struct bind_node * xp;
274 	struct vnode * lowervp;
275 
276 	BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
277 
278 	vp = ap->a_vp;
279 
280 	xp      = VTOBIND(vp);
281 	lowervp = xp->bind_lowervp;
282 
283 	vnode_removefsref(vp);
284 
285 	bind_hashrem(xp);
286 	vnode_rele(lowervp);
287 
288 	cache_purge(vp);
289 	vnode_clearfsnode(vp);
290 
291 	kfree_type(struct bind_node, xp);
292 
293 	return 0;
294 }
295 
296 /* Get dirent length padded to 4 byte alignment */
297 #define DIRENT_LEN(namelen) \
298 	((sizeof(struct dirent) + (namelen + 1) - (__DARWIN_MAXNAMLEN + 1) + 3) & ~3)
299 
300 /* Get the end of this dirent */
301 #define DIRENT_END(dep) \
302 	(((char *)(dep)) + (dep)->d_reclen - 1)
303 
304 static int
bindfs_readdir(struct vnop_readdir_args * ap)305 bindfs_readdir(struct vnop_readdir_args * ap)
306 {
307 	struct vnode *vp, *lvp, *dvp;
308 	int error;
309 	uio_t uio = ap->a_uio;
310 
311 	BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
312 	/* assumption is that any vp that comes through here had to go through lookup
313 	 */
314 
315 	if (ap->a_flags & (VNODE_READDIR_EXTENDED | VNODE_READDIR_REQSEEKOFF)) {
316 		return EINVAL;
317 	}
318 
319 	vp    = ap->a_vp;
320 	dvp = vnode_parent(vp);
321 	lvp   = BINDVPTOLOWERVP(vp);
322 	error = vnode_getwithref(lvp);
323 	if (error != 0) {
324 		goto lb_end;
325 	}
326 
327 	if (vnode_isvroot(vp) || (dvp != NULL && vnode_isvroot(dvp))) {
328 		size_t bufsize;
329 		void * bufptr;
330 		uio_t auio;
331 		struct dirent *dep;
332 		size_t bytesread;
333 		bufsize = 3 * MIN((user_size_t)uio_resid(uio), 87371u) / 8;
334 		bufptr = kalloc_data(bufsize, Z_WAITOK);
335 		if (bufptr == NULL) {
336 			vnode_put(lvp);
337 			return ENOMEM;
338 		}
339 		auio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ);
340 		uio_addiov(auio, (uintptr_t)bufptr, bufsize);
341 		uio_setoffset(auio, uio_offset(uio));
342 		error = VNOP_READDIR(lvp, auio, ap->a_flags, ap->a_eofflag, ap->a_numdirent, ap->a_context);
343 		vnode_put(lvp);
344 		if (error != 0) {
345 			goto lb_end;
346 		}
347 
348 		dep = (struct dirent *)bufptr;
349 		bytesread = bufsize - uio_resid(auio);
350 		while (error == 0 && (char *)dep < ((char *)bufptr + bytesread)) {
351 			if (DIRENT_END(dep) > ((char *)bufptr + bytesread) ||
352 			    DIRENT_LEN(dep->d_namlen) > dep->d_reclen) {
353 				printf("%s: %s: Bad dirent received from directory %s\n", __func__,
354 				    vfs_statfs(vnode_mount(vp))->f_mntonname,
355 				    vp->v_name ? vp->v_name : "<unknown>");
356 				error = EIO;
357 				break;
358 			}
359 			if (dep->d_name[0] == '.') {
360 				/* re-write the inode number for the mount root */
361 				/* if vp is the mount root then . = 2 and .. = 2 */
362 				/* if the parent of vp is the mount root then .. = 2 */
363 				if ((vnode_isvroot(vp) && dep->d_namlen == 1) ||
364 				    (dep->d_namlen == 2 && dep->d_name[1] == '.')) {
365 					dep->d_ino = BIND_ROOT_INO;
366 				}
367 			}
368 			/* Copy entry64 to user's buffer. */
369 			error = uiomove((caddr_t)dep, dep->d_reclen, uio);
370 			/* Move to next entry. */
371 			dep = (struct dirent *)((char *)dep + dep->d_reclen);
372 		}
373 		/* Update the real offset using the offset we got from VNOP_READDIR. */
374 		if (error == 0) {
375 			uio_setoffset(uio, uio_offset(auio));
376 		}
377 		uio_free(auio);
378 		kfree_data(bufptr, bufsize);
379 	} else {
380 		error = VNOP_READDIR(lvp, ap->a_uio, ap->a_flags, ap->a_eofflag, ap->a_numdirent, ap->a_context);
381 		vnode_put(lvp);
382 	}
383 
384 lb_end:
385 	return error;
386 }
387 
388 static int
bindfs_readlink(struct vnop_readlink_args * ap)389 bindfs_readlink(struct vnop_readlink_args * ap)
390 {
391 	BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
392 	int error;
393 	struct vnode *vp, *lvp;
394 
395 	vp  = ap->a_vp;
396 	lvp = BINDVPTOLOWERVP(vp);
397 
398 	error = vnode_getwithref(lvp);
399 	if (error == 0) {
400 		error = VNOP_READLINK(lvp, ap->a_uio, ap->a_context);
401 		vnode_put(lvp);
402 
403 		if (error) {
404 			printf("bindfs: readlink failed: %d\n", error);
405 		}
406 	}
407 
408 	return error;
409 }
410 
411 static int
bindfs_pathconf(__unused struct vnop_pathconf_args * args)412 bindfs_pathconf(__unused struct vnop_pathconf_args * args)
413 {
414 	BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
415 	return EINVAL;
416 }
417 
418 static int
bindfs_fsync(__unused struct vnop_fsync_args * args)419 bindfs_fsync(__unused struct vnop_fsync_args * args)
420 {
421 	BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
422 	return 0;
423 }
424 
425 static int
bindfs_mmap(struct vnop_mmap_args * args)426 bindfs_mmap(struct vnop_mmap_args * args)
427 {
428 	int error;
429 	struct vnode *vp, *lvp;
430 
431 	BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
432 
433 	vp    = args->a_vp;
434 	lvp   = BINDVPTOLOWERVP(vp);
435 	error = vnode_getwithref(lvp);
436 	if (error == 0) {
437 		error = VNOP_MMAP(lvp, args->a_fflags, args->a_context);
438 		vnode_put(lvp);
439 	}
440 
441 	return error;
442 }
443 
444 static int
bindfs_mnomap(struct vnop_mnomap_args * args)445 bindfs_mnomap(struct vnop_mnomap_args * args)
446 {
447 	int error;
448 	struct vnode *vp, *lvp;
449 
450 	BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
451 
452 	vp    = args->a_vp;
453 	lvp   = BINDVPTOLOWERVP(vp);
454 	error = vnode_getwithref(lvp);
455 	if (error == 0) {
456 		error = VNOP_MNOMAP(lvp, args->a_context);
457 		vnode_put(lvp);
458 	}
459 
460 	return error;
461 }
462 
463 static int
bindfs_getxattr(struct vnop_getxattr_args * args)464 bindfs_getxattr(struct vnop_getxattr_args * args)
465 {
466 	int error;
467 	struct vnode *vp, *lvp;
468 
469 	BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
470 
471 	vp    = args->a_vp;
472 	lvp   = BINDVPTOLOWERVP(vp);
473 	error = vnode_getwithref(lvp);
474 	if (error == 0) {
475 		error = VNOP_GETXATTR(lvp, args->a_name, args->a_uio, args->a_size, args->a_options, args->a_context);
476 		vnode_put(lvp);
477 	}
478 
479 	return error;
480 }
481 
482 static int
bindfs_listxattr(struct vnop_listxattr_args * args)483 bindfs_listxattr(struct vnop_listxattr_args * args)
484 {
485 	int error;
486 	struct vnode *vp, *lvp;
487 
488 	BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
489 
490 	vp    = args->a_vp;
491 	lvp   = BINDVPTOLOWERVP(vp);
492 	error = vnode_getwithref(lvp);
493 	if (error == 0) {
494 		error = VNOP_LISTXATTR(lvp, args->a_uio, args->a_size, args->a_options, args->a_context);
495 		vnode_put(lvp);
496 	}
497 
498 	return error;
499 }
500 
501 /* relies on v1 paging */
502 static int
bindfs_pagein(struct vnop_pagein_args * ap)503 bindfs_pagein(struct vnop_pagein_args * ap)
504 {
505 	int error = EIO;
506 	struct vnode *vp, *lvp;
507 
508 	BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
509 
510 	vp  = ap->a_vp;
511 	lvp = BINDVPTOLOWERVP(vp);
512 
513 	if (vnode_vtype(vp) != VREG) {
514 		return ENOTSUP;
515 	}
516 
517 	/*
518 	 * Ask VM/UBC/VFS to do our bidding
519 	 */
520 	if (vnode_getwithvid(lvp, BINDVPTOLOWERVID(vp)) == 0) {
521 		vm_offset_t ioaddr;
522 		uio_t auio;
523 		kern_return_t kret;
524 		off_t bytes_to_commit;
525 		off_t lowersize;
526 		upl_t upl      = ap->a_pl;
527 		user_ssize_t bytes_remaining = 0;
528 
529 		auio = uio_create(1, ap->a_f_offset, UIO_SYSSPACE, UIO_READ);
530 		if (auio == NULL) {
531 			error = EIO;
532 			goto exit_no_unmap;
533 		}
534 
535 		kret = ubc_upl_map(upl, &ioaddr);
536 		if (KERN_SUCCESS != kret) {
537 			panic("bindfs_pagein: ubc_upl_map() failed with (%d)", kret);
538 		}
539 
540 		ioaddr += ap->a_pl_offset;
541 
542 		error = uio_addiov(auio, (user_addr_t)ioaddr, ap->a_size);
543 		if (error) {
544 			goto exit;
545 		}
546 
547 		lowersize = ubc_getsize(lvp);
548 		if (lowersize != ubc_getsize(vp)) {
549 			(void)ubc_setsize(vp, lowersize); /* ignore failures, nothing can be done */
550 		}
551 
552 		error = VNOP_READ(lvp, auio, ((ap->a_flags & UPL_IOSYNC) ? IO_SYNC : 0), ap->a_context);
553 
554 		bytes_remaining = uio_resid(auio);
555 		if (bytes_remaining > 0 && bytes_remaining <= (user_ssize_t)ap->a_size) {
556 			/* zero bytes that weren't read in to the upl */
557 			bzero((void*)((uintptr_t)(ioaddr + ap->a_size - bytes_remaining)), (size_t) bytes_remaining);
558 		}
559 
560 exit:
561 		kret = ubc_upl_unmap(upl);
562 		if (KERN_SUCCESS != kret) {
563 			panic("bindfs_pagein: ubc_upl_unmap() failed with (%d)", kret);
564 		}
565 
566 		if (auio != NULL) {
567 			uio_free(auio);
568 		}
569 
570 exit_no_unmap:
571 		if ((ap->a_flags & UPL_NOCOMMIT) == 0) {
572 			if (!error && (bytes_remaining >= 0) && (bytes_remaining <= (user_ssize_t)ap->a_size)) {
573 				/* only commit what was read in (page aligned)*/
574 				bytes_to_commit = ap->a_size - bytes_remaining;
575 				if (bytes_to_commit) {
576 					/* need to make sure bytes_to_commit and byte_remaining are page aligned before calling ubc_upl_commit_range*/
577 					if (bytes_to_commit & PAGE_MASK) {
578 						bytes_to_commit = (bytes_to_commit & (~PAGE_MASK)) + (PAGE_MASK + 1);
579 						assert(bytes_to_commit <= (off_t)ap->a_size);
580 
581 						bytes_remaining = ap->a_size - bytes_to_commit;
582 					}
583 					ubc_upl_commit_range(upl, ap->a_pl_offset, (upl_size_t)bytes_to_commit, UPL_COMMIT_FREE_ON_EMPTY);
584 				}
585 
586 				/* abort anything thats left */
587 				if (bytes_remaining) {
588 					ubc_upl_abort_range(upl, ap->a_pl_offset + (upl_offset_t)bytes_to_commit, (upl_size_t)bytes_remaining, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
589 				}
590 			} else {
591 				ubc_upl_abort_range(upl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
592 			}
593 		}
594 		vnode_put(lvp);
595 	} else if ((ap->a_flags & UPL_NOCOMMIT) == 0) {
596 		ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
597 	}
598 	return error;
599 }
600 
601 static int
bindfs_read(struct vnop_read_args * ap)602 bindfs_read(struct vnop_read_args * ap)
603 {
604 	int error = EIO;
605 
606 	struct vnode *vp, *lvp;
607 
608 	BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
609 
610 	vp  = ap->a_vp;
611 	lvp = BINDVPTOLOWERVP(vp);
612 
613 	/*
614 	 * First some house keeping
615 	 */
616 	if (vnode_getwithvid(lvp, BINDVPTOLOWERVID(vp)) == 0) {
617 		if (!vnode_isreg(lvp) && !vnode_islnk(lvp)) {
618 			error = EPERM;
619 			goto end;
620 		}
621 
622 		if (uio_resid(ap->a_uio) == 0) {
623 			error = 0;
624 			goto end;
625 		}
626 
627 		/*
628 		 * Now ask VM/UBC/VFS to do our bidding
629 		 */
630 
631 		error = VNOP_READ(lvp, ap->a_uio, ap->a_ioflag, ap->a_context);
632 		if (error) {
633 			printf("bindfs: VNOP_READ failed: %d\n", error);
634 		}
635 end:
636 		vnode_put(lvp);
637 	}
638 	return error;
639 }
640 
641 /*
642  * Global vfs data structures
643  */
644 
645 static const struct vnodeopv_entry_desc bindfs_vnodeop_entries[] = {
646 	{.opve_op = &vnop_default_desc, .opve_impl = (vop_t)bindfs_default},      /* default */
647 	{.opve_op = &vnop_getattr_desc, .opve_impl = (vop_t)bindfs_getattr},      /* getattr */
648 	{.opve_op = &vnop_open_desc, .opve_impl = (vop_t)bindfs_open},            /* open */
649 	{.opve_op = &vnop_close_desc, .opve_impl = (vop_t)bindfs_close},          /* close */
650 	{.opve_op = &vnop_inactive_desc, .opve_impl = (vop_t)bind_inactive},      /* inactive */
651 	{.opve_op = &vnop_reclaim_desc, .opve_impl = (vop_t)bind_reclaim},        /* reclaim */
652 	{.opve_op = &vnop_lookup_desc, .opve_impl = (vop_t)bind_lookup},          /* lookup */
653 	{.opve_op = &vnop_readdir_desc, .opve_impl = (vop_t)bindfs_readdir},      /* readdir */
654 	{.opve_op = &vnop_readlink_desc, .opve_impl = (vop_t)bindfs_readlink},    /* readlink */
655 	{.opve_op = &vnop_pathconf_desc, .opve_impl = (vop_t)bindfs_pathconf},    /* pathconf */
656 	{.opve_op = &vnop_fsync_desc, .opve_impl = (vop_t)bindfs_fsync},          /* fsync */
657 	{.opve_op = &vnop_mmap_desc, .opve_impl = (vop_t)bindfs_mmap},            /* mmap */
658 	{.opve_op = &vnop_mnomap_desc, .opve_impl = (vop_t)bindfs_mnomap},        /* mnomap */
659 	{.opve_op = &vnop_getxattr_desc, .opve_impl = (vop_t)bindfs_getxattr},    /* getxattr */
660 	{.opve_op = &vnop_pagein_desc, .opve_impl = (vop_t)bindfs_pagein},        /* pagein */
661 	{.opve_op = &vnop_read_desc, .opve_impl = (vop_t)bindfs_read},            /* read */
662 	{.opve_op = &vnop_listxattr_desc, .opve_impl = (vop_t)bindfs_listxattr},  /* listxattr */
663 	{.opve_op = NULL, .opve_impl = NULL},
664 };
665 
666 const struct vnodeopv_desc bindfs_vnodeop_opv_desc = {.opv_desc_vector_p = &bindfs_vnodeop_p, .opv_desc_ops = bindfs_vnodeop_entries};
667 
668 //BINDFS Specific helper function
669 
670 int
bindfs_getbackingvnode(vnode_t in_vp,vnode_t * out_vpp)671 bindfs_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp)
672 {
673 	int result = EINVAL;
674 
675 	if (out_vpp == NULL || in_vp == NULL) {
676 		goto end;
677 	}
678 
679 	struct vfsstatfs * sp   = NULL;
680 	mount_t mp = vnode_mount(in_vp);
681 
682 	sp = vfs_statfs(mp);
683 	//If this isn't a bindfs vnode or it is but it's a special vnode
684 	if (strcmp(sp->f_fstypename, "bindfs") != 0) {
685 		*out_vpp = NULLVP;
686 		result = ENOENT;
687 		goto end;
688 	}
689 
690 	vnode_t lvp = BINDVPTOLOWERVP(in_vp);
691 	if ((result = vnode_getwithvid(lvp, BINDVPTOLOWERVID(in_vp)))) {
692 		goto end;
693 	}
694 
695 	*out_vpp = lvp;
696 
697 end:
698 	return result;
699 }
700