xref: /xnu-8792.61.2/bsd/miscfs/nullfs/null_vnops.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. Please obtain a copy of the License at
10  * http://www.opensource.apple.com/apsl/ and read it before using this
11  * file.
12  *
13  * The Original Code and all software distributed under the License are
14  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18  * Please see the License for the specific language governing rights and
19  * limitations under the License.
20  *
21  * @APPLE_LICENSE_HEADER_END@
22  */
23 
24 /*-
25  * Portions Copyright (c) 1992, 1993
26  *  The Regents of the University of California.  All rights reserved.
27  *
28  * This code is derived from software contributed to Berkeley by
29  * John Heidemann of the UCLA Ficus project.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  * 4. Neither the name of the University nor the names of its contributors
40  *    may be used to endorse or promote products derived from this software
41  *    without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  *
55  *  @(#)null_vnops.c    8.6 (Berkeley) 5/27/95
56  *
57  * Ancestors:
58  *  @(#)lofs_vnops.c    1.2 (Berkeley) 6/18/92
59  *  ...and...
60  *  @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project
61  *
62  * $FreeBSD$
63  */
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/conf.h>
68 #include <sys/kernel.h>
69 #include <sys/lock.h>
70 #include <sys/malloc.h>
71 #include <sys/mount.h>
72 #include <sys/mount_internal.h>
73 #include <sys/namei.h>
74 #include <sys/sysctl.h>
75 #include <sys/vnode.h>
76 #include <sys/xattr.h>
77 #include <sys/ubc.h>
78 #include <sys/types.h>
79 #include <sys/dirent.h>
80 #include <sys/kauth.h>
81 
82 #include "nullfs.h"
83 
84 #define NULL_ROOT_INO 2
85 #define NULL_SECOND_INO 3
86 #define NULL_THIRD_INO 4
87 
88 vop_t * nullfs_vnodeop_p = NULL;
89 
90 /* the mountpoint lock should be held going into this function */
91 static int
nullfs_isspecialvp(struct vnode * vp)92 nullfs_isspecialvp(struct vnode * vp)
93 {
94 	struct null_mount * null_mp;
95 
96 	null_mp = MOUNTTONULLMOUNT(vnode_mount(vp));
97 
98 	/* only check for root and second here, third is special in a different way,
99 	 * related only to lookup and readdir */
100 	if (vp && (vp == null_mp->nullm_rootvp || vp == null_mp->nullm_secondvp)) {
101 		return 1;
102 	}
103 	return 0;
104 }
105 
106 /* helper function to handle locking where possible */
107 static int
nullfs_checkspecialvp(struct vnode * vp)108 nullfs_checkspecialvp(struct vnode* vp)
109 {
110 	int result = 0;
111 	struct null_mount * null_mp;
112 
113 	null_mp = MOUNTTONULLMOUNT(vnode_mount(vp));
114 
115 	lck_mtx_lock(&null_mp->nullm_lock);
116 	result = (nullfs_isspecialvp(vp));
117 	lck_mtx_unlock(&null_mp->nullm_lock);
118 
119 	return result;
120 }
121 
122 vfs_context_t
nullfs_get_patched_context(struct null_mount * null_mp,vfs_context_t ctx)123 nullfs_get_patched_context(struct null_mount * null_mp, vfs_context_t ctx)
124 {
125 	struct vfs_context* ectx = ctx;
126 	if ((null_mp->nullm_flags & NULLM_UNVEIL) == NULLM_UNVEIL) {
127 		ectx = vfs_context_create(ctx);
128 		ectx->vc_ucred = kauth_cred_setuidgid(ectx->vc_ucred, null_mp->uid, null_mp->gid);
129 	}
130 	return ectx;
131 }
132 
133 void
nullfs_cleanup_patched_context(struct null_mount * null_mp,vfs_context_t ctx)134 nullfs_cleanup_patched_context(struct null_mount * null_mp, vfs_context_t ctx)
135 {
136 	if ((null_mp->nullm_flags & NULLM_UNVEIL) == NULLM_UNVEIL) {
137 		vfs_context_rele(ctx);
138 	}
139 }
140 
141 static int
nullfs_default(__unused struct vnop_generic_args * args)142 nullfs_default(__unused struct vnop_generic_args * args)
143 {
144 	NULLFSDEBUG("%s (default)\n", ((struct vnodeop_desc_fake *)args->a_desc)->vdesc_name);
145 	return ENOTSUP;
146 }
147 
148 static int
nullfs_special_getattr(struct vnop_getattr_args * args)149 nullfs_special_getattr(struct vnop_getattr_args * args)
150 {
151 	mount_t mp                  = vnode_mount(args->a_vp);
152 	struct null_mount * null_mp = MOUNTTONULLMOUNT(mp);
153 
154 	ino_t ino = NULL_ROOT_INO;
155 	struct vnode_attr covered_rootattr;
156 	vnode_t checkvp = null_mp->nullm_lowerrootvp;
157 	vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context);
158 
159 	VATTR_INIT(&covered_rootattr);
160 	VATTR_WANTED(&covered_rootattr, va_uid);
161 	VATTR_WANTED(&covered_rootattr, va_gid);
162 	VATTR_WANTED(&covered_rootattr, va_create_time);
163 	VATTR_WANTED(&covered_rootattr, va_modify_time);
164 	VATTR_WANTED(&covered_rootattr, va_access_time);
165 
166 	/* prefer to get this from the lower root vp, but if not (i.e. forced unmount
167 	 * of lower fs) try the mount point covered vnode */
168 	if (vnode_getwithvid(checkvp, null_mp->nullm_lowerrootvid)) {
169 		checkvp = vfs_vnodecovered(mp);
170 		if (checkvp == NULL) {
171 			nullfs_cleanup_patched_context(null_mp, ectx);
172 			return EIO;
173 		}
174 	}
175 
176 	int error = vnode_getattr(checkvp, &covered_rootattr, ectx);
177 
178 	vnode_put(checkvp);
179 	if (error) {
180 		/* we should have been able to get attributes fore one of the two choices so
181 		 * fail if we didn't */
182 		nullfs_cleanup_patched_context(null_mp, ectx);
183 		return error;
184 	}
185 
186 	/* we got the attributes of the vnode we cover so plow ahead */
187 	if (args->a_vp == null_mp->nullm_secondvp) {
188 		ino = NULL_SECOND_INO;
189 	}
190 
191 	VATTR_RETURN(args->a_vap, va_type, vnode_vtype(args->a_vp));
192 	VATTR_RETURN(args->a_vap, va_rdev, 0);
193 	VATTR_RETURN(args->a_vap, va_nlink, 3);      /* always just ., .., and the child */
194 	VATTR_RETURN(args->a_vap, va_total_size, 0); // hoping this is ok
195 
196 	VATTR_RETURN(args->a_vap, va_data_size, 0); // hoping this is ok
197 	VATTR_RETURN(args->a_vap, va_data_alloc, 0);
198 	VATTR_RETURN(args->a_vap, va_iosize, vfs_statfs(mp)->f_iosize);
199 	VATTR_RETURN(args->a_vap, va_fileid, ino);
200 	VATTR_RETURN(args->a_vap, va_linkid, ino);
201 	if (VATTR_IS_ACTIVE(args->a_vap, va_fsid)) {
202 		VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(mp)->f_fsid.val[0]); // return the fsid of the mount point
203 	}
204 	if (VATTR_IS_ACTIVE(args->a_vap, va_fsid64)) {
205 		VATTR_RETURN(args->a_vap, va_fsid64, vfs_statfs(mp)->f_fsid);
206 	}
207 	VATTR_RETURN(args->a_vap, va_filerev, 0);
208 	VATTR_RETURN(args->a_vap, va_gen, 0);
209 	VATTR_RETURN(args->a_vap, va_flags, UF_HIDDEN); /* mark our fake directories as hidden. People
210 	                                                 *  shouldn't be enocouraged to poke around in them */
211 
212 	if (ino == NULL_SECOND_INO) {
213 		VATTR_RETURN(args->a_vap, va_parentid, NULL_ROOT_INO); /* no parent at the root, so
214 		                                                        *  the only other vnode that
215 		                                                        *  goes through this path is
216 		                                                        *  second and its parent is
217 		                                                        *  1.*/
218 	}
219 
220 	if (VATTR_IS_ACTIVE(args->a_vap, va_mode)) {
221 		/* force dr_xr_xr_x */
222 		VATTR_RETURN(args->a_vap, va_mode, S_IFDIR | S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
223 	}
224 	if (VATTR_IS_ACTIVE(args->a_vap, va_uid)) {
225 		VATTR_RETURN(args->a_vap, va_uid, covered_rootattr.va_uid);
226 	}
227 	if (VATTR_IS_ACTIVE(args->a_vap, va_gid)) {
228 		VATTR_RETURN(args->a_vap, va_gid, covered_rootattr.va_gid);
229 	}
230 
231 	if (VATTR_IS_ACTIVE(args->a_vap, va_create_time)) {
232 		VATTR_SET_SUPPORTED(args->a_vap, va_create_time);
233 		args->a_vap->va_create_time.tv_sec  = covered_rootattr.va_create_time.tv_sec;
234 		args->a_vap->va_create_time.tv_nsec = covered_rootattr.va_create_time.tv_nsec;
235 	}
236 	if (VATTR_IS_ACTIVE(args->a_vap, va_modify_time)) {
237 		VATTR_SET_SUPPORTED(args->a_vap, va_modify_time);
238 		args->a_vap->va_modify_time.tv_sec  = covered_rootattr.va_modify_time.tv_sec;
239 		args->a_vap->va_modify_time.tv_nsec = covered_rootattr.va_modify_time.tv_nsec;
240 	}
241 	if (VATTR_IS_ACTIVE(args->a_vap, va_access_time)) {
242 		VATTR_SET_SUPPORTED(args->a_vap, va_access_time);
243 		args->a_vap->va_modify_time.tv_sec  = covered_rootattr.va_access_time.tv_sec;
244 		args->a_vap->va_modify_time.tv_nsec = covered_rootattr.va_access_time.tv_nsec;
245 	}
246 
247 	nullfs_cleanup_patched_context(null_mp, ectx);
248 	return 0;
249 }
250 
251 static int
nullfs_getattr(struct vnop_getattr_args * args)252 nullfs_getattr(struct vnop_getattr_args * args)
253 {
254 	int error;
255 	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp));
256 	kauth_cred_t cred = vfs_context_ucred(args->a_context);
257 	NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
258 
259 	if (nullfs_checkspecialvp(args->a_vp)) {
260 		error = nullfs_special_getattr(args);
261 		return error;
262 	}
263 
264 	/* this will return a different inode for third than read dir will */
265 	struct vnode * lowervp = NULLVPTOLOWERVP(args->a_vp);
266 	vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context);
267 	error = vnode_getwithref(lowervp);
268 
269 	if (error == 0) {
270 		error = VNOP_GETATTR(lowervp, args->a_vap, ectx);
271 		vnode_put(lowervp);
272 
273 		if (error == 0) {
274 			/* fix up fsid so it doesn't say the underlying fs*/
275 			if (VATTR_IS_ACTIVE(args->a_vap, va_fsid)) {
276 				VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(vnode_mount(args->a_vp))->f_fsid.val[0]);
277 			}
278 			if (VATTR_IS_ACTIVE(args->a_vap, va_fsid64)) {
279 				VATTR_RETURN(args->a_vap, va_fsid64, vfs_statfs(vnode_mount(args->a_vp))->f_fsid);
280 			}
281 
282 			/* Conjure up permissions */
283 			if ((null_mp->nullm_flags & NULLM_UNVEIL) == NULLM_UNVEIL) {
284 				if (VATTR_IS_ACTIVE(args->a_vap, va_mode)) {
285 					mode_t mode = args->a_vap->va_mode; // We will take away permisions if we don't have them
286 
287 					// Check for authorizations
288 					// If we can read:
289 					if (vnode_authorize(lowervp, NULL, KAUTH_VNODE_GENERIC_READ_BITS, ectx) == 0) {
290 						mode |= S_IRUSR;
291 					} else {
292 						mode &= ~S_IRUSR;
293 					}
294 
295 					// Or execute
296 					// Directories need an execute bit...
297 					if (vnode_authorize(lowervp, NULL, KAUTH_VNODE_GENERIC_EXECUTE_BITS, ectx) == 0) {
298 						mode |= S_IXUSR;
299 					} else {
300 						mode &= ~S_IXUSR;
301 					}
302 
303 					NULLFSDEBUG("Settings bits to %d\n", mode);
304 					VATTR_RETURN(args->a_vap, va_mode, mode);
305 				}
306 				if (VATTR_IS_ACTIVE(args->a_vap, va_uid)) {
307 					VATTR_RETURN(args->a_vap, va_uid, kauth_cred_getuid(cred));
308 				}
309 				if (VATTR_IS_ACTIVE(args->a_vap, va_gid)) {
310 					VATTR_RETURN(args->a_vap, va_gid, kauth_cred_getgid(cred));
311 				}
312 			}
313 		}
314 	}
315 
316 	nullfs_cleanup_patched_context(null_mp, ectx);
317 	return error;
318 }
319 
320 static int
nullfs_open(struct vnop_open_args * args)321 nullfs_open(struct vnop_open_args * args)
322 {
323 	int error;
324 	struct vnode *vp, *lvp;
325 	mount_t mp                  = vnode_mount(args->a_vp);
326 	struct null_mount * null_mp = MOUNTTONULLMOUNT(mp);
327 	NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
328 
329 	if (nullfs_checkspecialvp(args->a_vp)) {
330 		return 0; /* nothing extra needed */
331 	}
332 
333 	vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context);
334 	vp    = args->a_vp;
335 	lvp   = NULLVPTOLOWERVP(vp);
336 	error = vnode_getwithref(lvp);
337 	if (error == 0) {
338 		error = VNOP_OPEN(lvp, args->a_mode, ectx);
339 		vnode_put(lvp);
340 	}
341 
342 	nullfs_cleanup_patched_context(null_mp, ectx);
343 	return error;
344 }
345 
346 static int
nullfs_close(struct vnop_close_args * args)347 nullfs_close(struct vnop_close_args * args)
348 {
349 	int error;
350 	struct vnode *vp, *lvp;
351 	mount_t mp                  = vnode_mount(args->a_vp);
352 	struct null_mount * null_mp = MOUNTTONULLMOUNT(mp);
353 
354 	NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
355 
356 	if (nullfs_checkspecialvp(args->a_vp)) {
357 		return 0; /* nothing extra needed */
358 	}
359 
360 	vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context);
361 	vp  = args->a_vp;
362 	lvp = NULLVPTOLOWERVP(vp);
363 
364 	error = vnode_getwithref(lvp);
365 	if (error == 0) {
366 		error = VNOP_CLOSE(lvp, args->a_fflag, ectx);
367 		vnode_put(lvp);
368 	}
369 
370 	nullfs_cleanup_patched_context(null_mp, ectx);
371 	return error;
372 }
373 
374 /* get lvp's parent, if possible, even if it isn't set.
375  *
376  *  lvp is expected to have an iocount before and after this call.
377  *
378  *  if a dvpp is populated the returned vnode has an iocount. */
379 static int
null_get_lowerparent(vnode_t lvp,vnode_t * dvpp,vfs_context_t ctx)380 null_get_lowerparent(vnode_t lvp, vnode_t * dvpp, vfs_context_t ctx)
381 {
382 	int error = 0;
383 	struct vnode_attr va;
384 	mount_t mp  = vnode_mount(lvp);
385 	vnode_t dvp = vnode_parent(lvp);
386 
387 	if (dvp) {
388 		error = vnode_get(dvp);
389 		goto end;
390 	}
391 
392 	error = ENOENT;
393 	if (!(mp->mnt_kern_flag & MNTK_PATH_FROM_ID)) {
394 		goto end;
395 	}
396 
397 	VATTR_INIT(&va);
398 	VATTR_WANTED(&va, va_parentid);
399 
400 	error = vnode_getattr(lvp, &va, ctx);
401 
402 	if (error || !VATTR_IS_SUPPORTED(&va, va_parentid)) {
403 		if (!error) {
404 			error = ENOTSUP;
405 		}
406 		goto end;
407 	}
408 
409 	error = VFS_VGET(mp, (ino64_t)va.va_parentid, &dvp, ctx);
410 
411 end:
412 	if (error == 0) {
413 		*dvpp = dvp;
414 	}
415 	return error;
416 }
417 
418 /* the mountpoint lock should be held going into this function */
419 static int
null_special_lookup(struct vnop_lookup_args * ap)420 null_special_lookup(struct vnop_lookup_args * ap)
421 {
422 	struct componentname * cnp  = ap->a_cnp;
423 	struct vnode * dvp          = ap->a_dvp;
424 	struct vnode * ldvp         = NULL;
425 	struct vnode * lvp          = NULL;
426 	struct vnode * vp           = NULL;
427 	struct vnode * tempvp       = NULL;
428 	struct mount * mp           = vnode_mount(dvp);
429 	struct null_mount * null_mp = MOUNTTONULLMOUNT(mp);
430 	int error                   = ENOENT;
431 	vfs_context_t ectx     = nullfs_get_patched_context(null_mp, ap->a_context);
432 
433 	// null_mp->nullm_lock is locked
434 	if (dvp == null_mp->nullm_rootvp) {
435 		/* handle . and .. */
436 		if (cnp->cn_nameptr[0] == '.') {
437 			if (cnp->cn_namelen == 1 || (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.')) {
438 				/* this is the root so both . and .. give back the root */
439 				vp = dvp;
440 				lck_mtx_unlock(&null_mp->nullm_lock);
441 				error = vnode_get(vp);
442 				goto end;
443 			}
444 		}
445 
446 		/* our virtual wrapper directory should be d but D is acceptable if the
447 		 * lower file system is case insensitive */
448 		if (cnp->cn_namelen == 1 &&
449 		    (cnp->cn_nameptr[0] == 'd' || (null_mp->nullm_flags & NULLM_CASEINSENSITIVE ? cnp->cn_nameptr[0] == 'D' : 0))) {
450 			error = 0;
451 			if (null_mp->nullm_secondvp == NULL) {
452 				// drop the lock before making a new vnode
453 				lck_mtx_unlock(&null_mp->nullm_lock);
454 				error = null_getnewvnode(mp, NULL, dvp, &vp, cnp, 0);
455 				if (error) {
456 					goto end;
457 				}
458 				// Get the lock before modifying nullm_secondvp
459 				lck_mtx_lock(&null_mp->nullm_lock);
460 				if (null_mp->nullm_secondvp == NULL) {
461 					null_mp->nullm_secondvp = vp;
462 					lck_mtx_unlock(&null_mp->nullm_lock);
463 				} else {
464 					/* Another thread already set null_mp->nullm_secondvp while the
465 					 * lock was dropped so recycle the vnode we just made */
466 					tempvp = vp;
467 					vp = null_mp->nullm_secondvp;
468 					lck_mtx_unlock(&null_mp->nullm_lock);
469 					/* recycle will call reclaim which will get rid of the internals */
470 					vnode_recycle(tempvp);
471 					vnode_put(tempvp);
472 
473 					error = vnode_get(vp);
474 				}
475 			} else {
476 				vp = null_mp->nullm_secondvp;
477 				lck_mtx_unlock(&null_mp->nullm_lock);
478 				error = vnode_get(vp);
479 			}
480 		} else {
481 			lck_mtx_unlock(&null_mp->nullm_lock);
482 		}
483 	} else if (dvp == null_mp->nullm_secondvp) {
484 		/* handle . and .. */
485 		if (cnp->cn_nameptr[0] == '.') {
486 			if (cnp->cn_namelen == 1) {
487 				vp = dvp;
488 				lck_mtx_unlock(&null_mp->nullm_lock);
489 				error = vnode_get(vp);
490 				goto end;
491 			} else if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
492 				/* parent here is the root vp */
493 				vp    = null_mp->nullm_rootvp;
494 				lck_mtx_unlock(&null_mp->nullm_lock);
495 				error = vnode_get(vp);
496 				goto end;
497 			}
498 		}
499 		/* nullmp->nullm_lowerrootvp was set at mount time so don't need to lock to
500 		 * access it */
501 		/* Drop the global lock since we aren't accessing rootvp or secondvp any more */
502 		lck_mtx_unlock(&null_mp->nullm_lock);
503 		error = vnode_getwithvid(null_mp->nullm_lowerrootvp, null_mp->nullm_lowerrootvid);
504 		if (error) {
505 			goto end;
506 		}
507 
508 		/* We don't want to mess with case insensitivity and unicode, so the plan to
509 		 *  check here is
510 		 *   1. try to get the lower root's parent
511 		 *   2. If we get a parent, then perform a lookup on the lower file system
512 		 *  using the parent and the passed in cnp
513 		 *   3. If that worked and we got a vp, then see if the vp is lowerrootvp. If
514 		 *  so we got a match
515 		 *   4. Anything else results in ENOENT.
516 		 */
517 		error = null_get_lowerparent(null_mp->nullm_lowerrootvp, &ldvp, ectx);
518 
519 		if (error == 0) {
520 			error = VNOP_LOOKUP(ldvp, &lvp, cnp, ectx);
521 			vnode_put(ldvp);
522 
523 			if (error == 0) {
524 				// nullm_lowerrootvp is only touched during mount and unmount so we don't need the lock to check it.
525 				if (lvp == null_mp->nullm_lowerrootvp) {
526 					/* always check the hashmap for a vnode for this, the root of the
527 					 * mirrored system */
528 					error = null_nodeget(mp, lvp, dvp, &vp, cnp, 0);
529 				} else {
530 					error = ENOENT;
531 				}
532 				vnode_put(lvp);
533 			}
534 		}
535 		vnode_put(null_mp->nullm_lowerrootvp);
536 	}
537 
538 end:
539 	nullfs_cleanup_patched_context(null_mp, ectx);
540 	if (error == 0) {
541 		*ap->a_vpp = vp;
542 	}
543 	return error;
544 }
545 
546 /*
547  * We have to carry on the locking protocol on the null layer vnodes
548  * as we progress through the tree. We also have to enforce read-only
549  * if this layer is mounted read-only.
550  */
551 static int
null_lookup(struct vnop_lookup_args * ap)552 null_lookup(struct vnop_lookup_args * ap)
553 {
554 	struct componentname * cnp = ap->a_cnp;
555 	struct vnode * dvp         = ap->a_dvp;
556 	struct vnode *vp, *ldvp, *lvp;
557 	struct mount * mp;
558 	struct null_mount * null_mp;
559 	int error;
560 	vfs_context_t ectx;
561 
562 	NULLFSDEBUG("%s parent: %p component: %.*s\n", __FUNCTION__, ap->a_dvp, cnp->cn_namelen, cnp->cn_nameptr);
563 
564 	mp = vnode_mount(dvp);
565 	/* rename and delete are not allowed. this is a read only file system */
566 	if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME || cnp->cn_nameiop == CREATE) {
567 		return EROFS;
568 	}
569 	null_mp = MOUNTTONULLMOUNT(mp);
570 
571 
572 	lck_mtx_lock(&null_mp->nullm_lock);
573 	if (nullfs_isspecialvp(dvp)) {
574 		error = null_special_lookup(ap);
575 		// null_special_lookup drops the lock
576 		return error;
577 	}
578 	lck_mtx_unlock(&null_mp->nullm_lock);
579 
580 	// . and .. handling
581 	if (cnp->cn_nameptr[0] == '.') {
582 		if (cnp->cn_namelen == 1) {
583 			vp = dvp;
584 		} else if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
585 			/* mount point crossing is handled in null_special_lookup */
586 			vp = vnode_parent(dvp);
587 		} else {
588 			goto notdot;
589 		}
590 
591 		error = vp ? vnode_get(vp) : ENOENT;
592 
593 		if (error == 0) {
594 			*ap->a_vpp = vp;
595 		}
596 
597 		return error;
598 	}
599 
600 notdot:
601 	ectx = nullfs_get_patched_context(null_mp, ap->a_context);
602 	ldvp = NULLVPTOLOWERVP(dvp);
603 	vp = lvp = NULL;
604 
605 	/*
606 	 * Hold ldvp.  The reference on it, owned by dvp, is lost in
607 	 * case of dvp reclamation.
608 	 */
609 	error = vnode_getwithref(ldvp);
610 	if (error) {
611 		nullfs_cleanup_patched_context(null_mp, ectx);
612 		return error;
613 	}
614 
615 	error = VNOP_LOOKUP(ldvp, &lvp, cnp, ectx);
616 
617 	vnode_put(ldvp);
618 
619 	if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
620 		if (ldvp == lvp) {
621 			vp    = dvp;
622 			error = vnode_get(vp);
623 		} else {
624 			error = null_nodeget(mp, lvp, dvp, &vp, cnp, 0);
625 		}
626 		if (error == 0) {
627 			*ap->a_vpp = vp;
628 		}
629 		/* if we got lvp, drop the iocount from VNOP_LOOKUP */
630 		if (lvp != NULL) {
631 			vnode_put(lvp);
632 		}
633 	}
634 
635 	nullfs_cleanup_patched_context(null_mp, ectx);
636 	return error;
637 }
638 
639 /*
640  * Don't think this needs to do anything
641  */
642 static int
null_inactive(__unused struct vnop_inactive_args * ap)643 null_inactive(__unused struct vnop_inactive_args * ap)
644 {
645 	NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
646 
647 	return 0;
648 }
649 
650 static int
null_reclaim(struct vnop_reclaim_args * ap)651 null_reclaim(struct vnop_reclaim_args * ap)
652 {
653 	struct vnode * vp;
654 	struct null_node * xp;
655 	struct vnode * lowervp;
656 	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));
657 
658 	NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
659 
660 	vp = ap->a_vp;
661 
662 	xp      = VTONULL(vp);
663 	lowervp = xp->null_lowervp;
664 
665 	lck_mtx_lock(&null_mp->nullm_lock);
666 
667 	vnode_removefsref(vp);
668 
669 	if (lowervp != NULL) {
670 		/* root and second don't have a lowervp, so nothing to release and nothing
671 		 * got hashed */
672 		if (xp->null_flags & NULL_FLAG_HASHED) {
673 			/* only call this if we actually made it into the hash list. reclaim gets
674 			 *  called also to
675 			 *  clean up a vnode that got created when it didn't need to under race
676 			 *  conditions */
677 			null_hashrem(xp);
678 		}
679 		vnode_rele(lowervp);
680 	}
681 
682 	if (vp == null_mp->nullm_rootvp) {
683 		null_mp->nullm_rootvp = NULL;
684 	} else if (vp == null_mp->nullm_secondvp) {
685 		null_mp->nullm_secondvp = NULL;
686 	}
687 
688 	lck_mtx_unlock(&null_mp->nullm_lock);
689 
690 	cache_purge(vp);
691 	vnode_clearfsnode(vp);
692 
693 	kfree_type(struct null_node, xp);
694 
695 	return 0;
696 }
697 
698 #define DIRENT_SZ(dp) ((sizeof(struct dirent) - NAME_MAX) + (((dp)->d_namlen + 1 + 3) & ~3))
699 
700 static int
store_entry_special(ino_t ino,const char * name,struct uio * uio)701 store_entry_special(ino_t ino, const char * name, struct uio * uio)
702 {
703 	struct dirent e;
704 	size_t namelen = strlen(name);
705 	int error      = EINVAL;
706 
707 	if (namelen + 1 <= NAME_MAX) {
708 		memset(&e, 0, sizeof(e));
709 
710 		e.d_ino  = ino;
711 		e.d_type = DT_DIR;
712 
713 		e.d_namlen = namelen; /* don't include NUL */
714 		e.d_reclen = DIRENT_SZ(&e);
715 		if (uio_resid(uio) >= e.d_reclen) {
716 			strlcpy(e.d_name, name, NAME_MAX);
717 			error = uiomove((caddr_t)&e, e.d_reclen, uio);
718 		} else {
719 			error = EMSGSIZE;
720 		}
721 	}
722 	return error;
723 }
724 
725 static int
nullfs_special_readdir(struct vnop_readdir_args * ap)726 nullfs_special_readdir(struct vnop_readdir_args * ap)
727 {
728 	struct vnode * vp           = ap->a_vp;
729 	struct uio * uio            = ap->a_uio;
730 	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(vp));
731 	off_t offset                = uio_offset(uio);
732 	int error                   = ERANGE;
733 	int items                   = 0;
734 	ino_t ino                   = 0;
735 	const char * name           = NULL;
736 	boolean_t locked = TRUE;
737 
738 	if (ap->a_flags & (VNODE_READDIR_EXTENDED | VNODE_READDIR_REQSEEKOFF)) {
739 		lck_mtx_unlock(&null_mp->nullm_lock);
740 		return EINVAL;
741 	}
742 
743 	if (offset == 0) {
744 		/* . case */
745 		if (vp == null_mp->nullm_rootvp) {
746 			ino = NULL_ROOT_INO;
747 		} else { /* only get here if vp matches nullm_rootvp or nullm_secondvp */
748 			ino = NULL_SECOND_INO;
749 		}
750 		error = store_entry_special(ino, ".", uio);
751 		if (error) {
752 			goto out;
753 		}
754 		offset++;
755 		items++;
756 	}
757 	if (offset == 1) {
758 		/* .. case */
759 		/* only get here if vp matches nullm_rootvp or nullm_secondvp */
760 		ino = NULL_ROOT_INO;
761 
762 		error = store_entry_special(ino, "..", uio);
763 		if (error) {
764 			goto out;
765 		}
766 		offset++;
767 		items++;
768 	}
769 	if (offset == 2) {
770 		/* the directory case */
771 		if (vp == null_mp->nullm_rootvp) {
772 			ino  = NULL_SECOND_INO;
773 			name = "d";
774 		} else { /* only get here if vp matches nullm_rootvp or nullm_secondvp */
775 			// drop the lock before performing operations on nullm_lowerrootvp
776 			lck_mtx_unlock(&null_mp->nullm_lock);
777 			locked = FALSE;
778 			ino = NULL_THIRD_INO;
779 			if (vnode_getwithvid(null_mp->nullm_lowerrootvp, null_mp->nullm_lowerrootvid)) {
780 				/* In this case the lower file system has been ripped out from under us,
781 				 *  but we don't want to error out
782 				 *  Instead we just want d to look empty. */
783 				error = 0;
784 				goto out;
785 			}
786 			name = vnode_getname_printable(null_mp->nullm_lowerrootvp);
787 		}
788 		error = store_entry_special(ino, name, uio);
789 
790 		if (ino == NULL_THIRD_INO) {
791 			vnode_putname_printable(name);
792 			vnode_put(null_mp->nullm_lowerrootvp);
793 		}
794 
795 		if (error) {
796 			goto out;
797 		}
798 		offset++;
799 		items++;
800 	}
801 
802 out:
803 	if (locked) {
804 		lck_mtx_unlock(&null_mp->nullm_lock);
805 	}
806 	if (error == EMSGSIZE) {
807 		error = 0; /* return success if we ran out of space, but we wanted to make
808 		            *  sure that we didn't update offset and items incorrectly */
809 	}
810 	uio_setoffset(uio, offset);
811 	if (ap->a_numdirent) {
812 		*ap->a_numdirent = items;
813 	}
814 	return error;
815 }
816 
817 static int
nullfs_readdir(struct vnop_readdir_args * ap)818 nullfs_readdir(struct vnop_readdir_args * ap)
819 {
820 	struct vnode *vp, *lvp;
821 	int error;
822 	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));
823 
824 	NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
825 	/* assumption is that any vp that comes through here had to go through lookup
826 	 */
827 
828 	lck_mtx_lock(&null_mp->nullm_lock);
829 	if (nullfs_isspecialvp(ap->a_vp)) {
830 		error = nullfs_special_readdir(ap);
831 		// nullfs_special_readdir drops the lock
832 		return error;
833 	}
834 	lck_mtx_unlock(&null_mp->nullm_lock);
835 
836 	vfs_context_t ectx = nullfs_get_patched_context(null_mp, ap->a_context);
837 	vp    = ap->a_vp;
838 	lvp   = NULLVPTOLOWERVP(vp);
839 	error = vnode_getwithref(lvp);
840 	if (error == 0) {
841 		error = VNOP_READDIR(lvp, ap->a_uio, ap->a_flags, ap->a_eofflag, ap->a_numdirent, ectx);
842 		vnode_put(lvp);
843 	}
844 
845 	nullfs_cleanup_patched_context(null_mp, ectx);
846 	return error;
847 }
848 
849 static int
nullfs_readlink(struct vnop_readlink_args * ap)850 nullfs_readlink(struct vnop_readlink_args * ap)
851 {
852 	NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
853 	int error;
854 	struct vnode *vp, *lvp;
855 	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));
856 
857 	if (nullfs_checkspecialvp(ap->a_vp)) {
858 		return ENOTSUP; /* the special vnodes aren't links */
859 	}
860 
861 	vfs_context_t ectx = nullfs_get_patched_context(null_mp, ap->a_context);
862 	vp  = ap->a_vp;
863 	lvp = NULLVPTOLOWERVP(vp);
864 
865 	error = vnode_getwithref(lvp);
866 	if (error == 0) {
867 		error = VNOP_READLINK(lvp, ap->a_uio, ectx);
868 		vnode_put(lvp);
869 
870 		if (error) {
871 			NULLFSDEBUG("readlink failed: %d\n", error);
872 		}
873 	}
874 
875 	nullfs_cleanup_patched_context(null_mp, ectx);
876 	return error;
877 }
878 
879 static int
nullfs_pathconf(__unused struct vnop_pathconf_args * args)880 nullfs_pathconf(__unused struct vnop_pathconf_args * args)
881 {
882 	NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
883 	return EINVAL;
884 }
885 
886 static int
nullfs_fsync(__unused struct vnop_fsync_args * args)887 nullfs_fsync(__unused struct vnop_fsync_args * args)
888 {
889 	NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
890 	return 0;
891 }
892 
893 static int
nullfs_mmap(struct vnop_mmap_args * args)894 nullfs_mmap(struct vnop_mmap_args * args)
895 {
896 	int error;
897 	struct vnode *vp, *lvp;
898 	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp));
899 
900 	NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
901 
902 	if (nullfs_checkspecialvp(args->a_vp)) {
903 		return 0; /* nothing extra needed */
904 	}
905 
906 	vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context);
907 	vp    = args->a_vp;
908 	lvp   = NULLVPTOLOWERVP(vp);
909 	error = vnode_getwithref(lvp);
910 	if (error == 0) {
911 		error = VNOP_MMAP(lvp, args->a_fflags, ectx);
912 		vnode_put(lvp);
913 	}
914 
915 	nullfs_cleanup_patched_context(null_mp, ectx);
916 	return error;
917 }
918 
919 static int
nullfs_mnomap(struct vnop_mnomap_args * args)920 nullfs_mnomap(struct vnop_mnomap_args * args)
921 {
922 	int error;
923 	struct vnode *vp, *lvp;
924 	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp));
925 
926 	NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
927 
928 	if (nullfs_checkspecialvp(args->a_vp)) {
929 		return 0; /* nothing extra needed */
930 	}
931 
932 	vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context);
933 	vp    = args->a_vp;
934 	lvp   = NULLVPTOLOWERVP(vp);
935 	error = vnode_getwithref(lvp);
936 	if (error == 0) {
937 		error = VNOP_MNOMAP(lvp, ectx);
938 		vnode_put(lvp);
939 	}
940 
941 	nullfs_cleanup_patched_context(null_mp, ectx);
942 	return error;
943 }
944 
945 static int
nullfs_getxattr(struct vnop_getxattr_args * args)946 nullfs_getxattr(struct vnop_getxattr_args * args)
947 {
948 	int error;
949 	struct vnode *vp, *lvp;
950 	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp));
951 
952 	NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
953 
954 	if (nullfs_checkspecialvp(args->a_vp)) {
955 		return ENOATTR; /* no xattrs on the special vnodes */
956 	}
957 
958 	vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context);
959 	vp    = args->a_vp;
960 	lvp   = NULLVPTOLOWERVP(vp);
961 	error = vnode_getwithref(lvp);
962 	if (error == 0) {
963 		error = VNOP_GETXATTR(lvp, args->a_name, args->a_uio, args->a_size, args->a_options, ectx);
964 		vnode_put(lvp);
965 	}
966 
967 	nullfs_cleanup_patched_context(null_mp, ectx);
968 	return error;
969 }
970 
971 static int
nullfs_listxattr(struct vnop_listxattr_args * args)972 nullfs_listxattr(struct vnop_listxattr_args * args)
973 {
974 	int error;
975 	struct vnode *vp, *lvp;
976 	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp));
977 
978 	NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);
979 
980 	if (nullfs_checkspecialvp(args->a_vp)) {
981 		return 0; /* no xattrs on the special vnodes */
982 	}
983 
984 	vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context);
985 	vp    = args->a_vp;
986 	lvp   = NULLVPTOLOWERVP(vp);
987 	error = vnode_getwithref(lvp);
988 	if (error == 0) {
989 		error = VNOP_LISTXATTR(lvp, args->a_uio, args->a_size, args->a_options, ectx);
990 		vnode_put(lvp);
991 	}
992 
993 	nullfs_cleanup_patched_context(null_mp, ectx);
994 	return error;
995 }
996 
997 /* relies on v1 paging */
998 static int
nullfs_pagein(struct vnop_pagein_args * ap)999 nullfs_pagein(struct vnop_pagein_args * ap)
1000 {
1001 	int error = EIO;
1002 	struct vnode *vp, *lvp;
1003 	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));
1004 	NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
1005 
1006 	vp  = ap->a_vp;
1007 	lvp = NULLVPTOLOWERVP(vp);
1008 
1009 	if (vnode_vtype(vp) != VREG) {
1010 		return ENOTSUP;
1011 	}
1012 
1013 	vfs_context_t ectx = nullfs_get_patched_context(null_mp, ap->a_context);
1014 	/*
1015 	 * Ask VM/UBC/VFS to do our bidding
1016 	 */
1017 	if (vnode_getwithvid(lvp, NULLVPTOLOWERVID(vp)) == 0) {
1018 		vm_offset_t ioaddr;
1019 		uio_t auio;
1020 		kern_return_t kret;
1021 		off_t bytes_to_commit;
1022 		off_t lowersize;
1023 		upl_t upl      = ap->a_pl;
1024 		user_ssize_t bytes_remaining = 0;
1025 
1026 		auio = uio_create(1, ap->a_f_offset, UIO_SYSSPACE, UIO_READ);
1027 		if (auio == NULL) {
1028 			error = EIO;
1029 			goto exit_no_unmap;
1030 		}
1031 
1032 		kret = ubc_upl_map(upl, &ioaddr);
1033 		if (KERN_SUCCESS != kret) {
1034 			panic("nullfs_pagein: ubc_upl_map() failed with (%d)", kret);
1035 		}
1036 
1037 		ioaddr += ap->a_pl_offset;
1038 
1039 		error = uio_addiov(auio, (user_addr_t)ioaddr, ap->a_size);
1040 		if (error) {
1041 			goto exit;
1042 		}
1043 
1044 		lowersize = ubc_getsize(lvp);
1045 		if (lowersize != ubc_getsize(vp)) {
1046 			(void)ubc_setsize(vp, lowersize); /* ignore failures, nothing can be done */
1047 		}
1048 
1049 		error = VNOP_READ(lvp, auio, ((ap->a_flags & UPL_IOSYNC) ? IO_SYNC : 0), ectx);
1050 
1051 		bytes_remaining = uio_resid(auio);
1052 		if (bytes_remaining > 0 && bytes_remaining <= (user_ssize_t)ap->a_size) {
1053 			/* zero bytes that weren't read in to the upl */
1054 			bzero((void*)((uintptr_t)(ioaddr + ap->a_size - bytes_remaining)), (size_t) bytes_remaining);
1055 		}
1056 
1057 exit:
1058 		kret = ubc_upl_unmap(upl);
1059 		if (KERN_SUCCESS != kret) {
1060 			panic("nullfs_pagein: ubc_upl_unmap() failed with (%d)", kret);
1061 		}
1062 
1063 		if (auio != NULL) {
1064 			uio_free(auio);
1065 		}
1066 
1067 exit_no_unmap:
1068 		if ((ap->a_flags & UPL_NOCOMMIT) == 0) {
1069 			if (!error && (bytes_remaining >= 0) && (bytes_remaining <= (user_ssize_t)ap->a_size)) {
1070 				/* only commit what was read in (page aligned)*/
1071 				bytes_to_commit = ap->a_size - bytes_remaining;
1072 				if (bytes_to_commit) {
1073 					/* need to make sure bytes_to_commit and byte_remaining are page aligned before calling ubc_upl_commit_range*/
1074 					if (bytes_to_commit & PAGE_MASK) {
1075 						bytes_to_commit = (bytes_to_commit & (~PAGE_MASK)) + (PAGE_MASK + 1);
1076 						assert(bytes_to_commit <= (off_t)ap->a_size);
1077 
1078 						bytes_remaining = ap->a_size - bytes_to_commit;
1079 					}
1080 					ubc_upl_commit_range(upl, ap->a_pl_offset, (upl_size_t)bytes_to_commit, UPL_COMMIT_FREE_ON_EMPTY);
1081 				}
1082 
1083 				/* abort anything thats left */
1084 				if (bytes_remaining) {
1085 					ubc_upl_abort_range(upl, ap->a_pl_offset + bytes_to_commit, (upl_size_t)bytes_remaining, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
1086 				}
1087 			} else {
1088 				ubc_upl_abort_range(upl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
1089 			}
1090 		}
1091 		vnode_put(lvp);
1092 	} else if ((ap->a_flags & UPL_NOCOMMIT) == 0) {
1093 		ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
1094 	}
1095 
1096 	nullfs_cleanup_patched_context(null_mp, ectx);
1097 	return error;
1098 }
1099 
1100 static int
nullfs_read(struct vnop_read_args * ap)1101 nullfs_read(struct vnop_read_args * ap)
1102 {
1103 	int error = EIO;
1104 
1105 	struct vnode *vp, *lvp;
1106 	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));
1107 	NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
1108 
1109 	if (nullfs_checkspecialvp(ap->a_vp)) {
1110 		return ENOTSUP; /* the special vnodes can't be read */
1111 	}
1112 
1113 	vfs_context_t ectx = nullfs_get_patched_context(null_mp, ap->a_context);
1114 	vp  = ap->a_vp;
1115 	lvp = NULLVPTOLOWERVP(vp);
1116 
1117 	/*
1118 	 * First some house keeping
1119 	 */
1120 	if (vnode_getwithvid(lvp, NULLVPTOLOWERVID(vp)) == 0) {
1121 		if (!vnode_isreg(lvp) && !vnode_islnk(lvp)) {
1122 			error = EPERM;
1123 			goto end;
1124 		}
1125 
1126 		if (uio_resid(ap->a_uio) == 0) {
1127 			error = 0;
1128 			goto end;
1129 		}
1130 
1131 		/*
1132 		 * Now ask VM/UBC/VFS to do our bidding
1133 		 */
1134 
1135 		error = VNOP_READ(lvp, ap->a_uio, ap->a_ioflag, ectx);
1136 		if (error) {
1137 			NULLFSDEBUG("VNOP_READ failed: %d\n", error);
1138 		}
1139 end:
1140 		vnode_put(lvp);
1141 	}
1142 
1143 	nullfs_cleanup_patched_context(null_mp, ectx);
1144 	return error;
1145 }
1146 
1147 /*
1148  * Global vfs data structures
1149  */
1150 
1151 static const struct vnodeopv_entry_desc nullfs_vnodeop_entries[] = {
1152 	{.opve_op = &vnop_default_desc, .opve_impl = (vop_t)nullfs_default}, {.opve_op = &vnop_getattr_desc, .opve_impl = (vop_t)nullfs_getattr},
1153 	{.opve_op = &vnop_open_desc, .opve_impl = (vop_t)nullfs_open}, {.opve_op = &vnop_close_desc, .opve_impl = (vop_t)nullfs_close},
1154 	{.opve_op = &vnop_inactive_desc, .opve_impl = (vop_t)null_inactive}, {.opve_op = &vnop_reclaim_desc, .opve_impl = (vop_t)null_reclaim},
1155 	{.opve_op = &vnop_lookup_desc, .opve_impl = (vop_t)null_lookup}, {.opve_op = &vnop_readdir_desc, .opve_impl = (vop_t)nullfs_readdir},
1156 	{.opve_op = &vnop_readlink_desc, .opve_impl = (vop_t)nullfs_readlink}, {.opve_op = &vnop_pathconf_desc, .opve_impl = (vop_t)nullfs_pathconf},
1157 	{.opve_op = &vnop_fsync_desc, .opve_impl = (vop_t)nullfs_fsync}, {.opve_op = &vnop_mmap_desc, .opve_impl = (vop_t)nullfs_mmap},
1158 	{.opve_op = &vnop_mnomap_desc, .opve_impl = (vop_t)nullfs_mnomap}, {.opve_op = &vnop_getxattr_desc, .opve_impl = (vop_t)nullfs_getxattr},
1159 	{.opve_op = &vnop_pagein_desc, .opve_impl = (vop_t)nullfs_pagein}, {.opve_op = &vnop_read_desc, .opve_impl = (vop_t)nullfs_read},
1160 	{.opve_op = &vnop_listxattr_desc, .opve_impl = (vop_t)nullfs_listxattr}, {.opve_op = NULL, .opve_impl = NULL},
1161 };
1162 
1163 const struct vnodeopv_desc nullfs_vnodeop_opv_desc = {.opv_desc_vector_p = &nullfs_vnodeop_p, .opv_desc_ops = nullfs_vnodeop_entries};
1164 
1165 //NULLFS Specific helper function
1166 
1167 int
nullfs_getbackingvnode(vnode_t in_vp,vnode_t * out_vpp)1168 nullfs_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp)
1169 {
1170 	int result = EINVAL;
1171 
1172 	if (out_vpp == NULL || in_vp == NULL) {
1173 		goto end;
1174 	}
1175 
1176 	struct vfsstatfs * sp   = NULL;
1177 	mount_t mp = vnode_mount(in_vp);
1178 
1179 	sp = vfs_statfs(mp);
1180 	//If this isn't a nullfs vnode or it is but it's a special vnode
1181 	if (strcmp(sp->f_fstypename, "nullfs") != 0 || nullfs_checkspecialvp(in_vp)) {
1182 		*out_vpp = NULLVP;
1183 		result = ENOENT;
1184 		goto end;
1185 	}
1186 
1187 	vnode_t lvp = NULLVPTOLOWERVP(in_vp);
1188 	if ((result = vnode_getwithvid(lvp, NULLVPTOLOWERVID(in_vp)))) {
1189 		goto end;
1190 	}
1191 
1192 	*out_vpp = lvp;
1193 
1194 end:
1195 	return result;
1196 }
1197