xref: /xnu-8796.141.3/bsd/vfs/vfs_cache.c (revision 1b191cb58250d0705d8a51287127505aa4bc0789)
1 /*
2  * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1989, 1993, 1995
31  *	The Regents of the University of California.  All rights reserved.
32  *
33  * This code is derived from software contributed to Berkeley by
34  * Poul-Henning Kamp of the FreeBSD Project.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. All advertising materials mentioning features or use of this software
45  *    must display the following acknowledgement:
46  *	This product includes software developed by the University of
47  *	California, Berkeley and its contributors.
48  * 4. Neither the name of the University nor the names of its contributors
49  *    may be used to endorse or promote products derived from this software
50  *    without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62  * SUCH DAMAGE.
63  *
64  *
65  *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
66  */
67 /*
68  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69  * support for mandatory and extensible security protections.  This notice
70  * is included in support of clause 2.2 (b) of the Apple Public License,
71  * Version 2.0.
72  */
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/time.h>
76 #include <sys/mount_internal.h>
77 #include <sys/vnode_internal.h>
78 #include <miscfs/specfs/specdev.h>
79 #include <sys/namei.h>
80 #include <sys/errno.h>
81 #include <kern/kalloc.h>
82 #include <sys/kauth.h>
83 #include <sys/user.h>
84 #include <sys/paths.h>
85 #include <os/overflow.h>
86 
87 #if CONFIG_MACF
88 #include <security/mac_framework.h>
89 #endif
90 
91 /*
92  * Name caching works as follows:
93  *
94  * Names found by directory scans are retained in a cache
95  * for future reference.  It is managed LRU, so frequently
96  * used names will hang around.  Cache is indexed by hash value
97  * obtained from (vp, name) where vp refers to the directory
98  * containing name.
99  *
100  * If it is a "negative" entry, (i.e. for a name that is known NOT to
101  * exist) the vnode pointer will be NULL.
102  *
103  * Upon reaching the last segment of a path, if the reference
104  * is for DELETE, or NOCACHE is set (rewrite), and the
105  * name is located in the cache, it will be dropped.
106  */
107 
108 /*
109  * Structures associated with name cacheing.
110  */
111 
112 ZONE_DEFINE_TYPE(namecache_zone, "namecache", struct namecache, ZC_NONE);
113 
114 struct smrq_list_head *nchashtbl;       /* Hash Table */
115 u_long  nchashmask;
116 u_long  nchash;                         /* size of hash table - 1 */
117 long    numcache;                       /* number of cache entries allocated */
118 int     desiredNodes;
119 int     desiredNegNodes;
120 int     ncs_negtotal;
121 TUNABLE_WRITEABLE(int, nc_disabled, "-novfscache", 0);
122 __options_decl(nc_smr_level_t, uint32_t, {
123 	NC_SMR_DISABLED = 0,
124 	NC_SMR_LOOKUP = 1
125 });
126 TUNABLE(nc_smr_level_t, nc_smr_enabled, "ncsmr", NC_SMR_LOOKUP);
127 TAILQ_HEAD(, namecache) nchead;         /* chain of all name cache entries */
128 TAILQ_HEAD(, namecache) neghead;        /* chain of only negative cache entries */
129 
130 
131 #if COLLECT_STATS
132 
133 struct  nchstats nchstats;              /* cache effectiveness statistics */
134 
135 #define NCHSTAT(v) {            \
136 	nchstats.v++;           \
137 }
138 #define NAME_CACHE_LOCK_SHARED()        name_cache_lock()
139 #define NAME_CACHE_LOCK_SHARED_TO_EXCLUSIVE() TRUE
140 
141 #else
142 
143 #define NCHSTAT(v)
144 #define NAME_CACHE_LOCK_SHARED()        name_cache_lock_shared()
145 #define NAME_CACHE_LOCK_SHARED_TO_EXCLUSIVE()             name_cache_lock_shared_to_exclusive()
146 
147 #endif
148 
149 #define NAME_CACHE_LOCK()               name_cache_lock()
150 #define NAME_CACHE_UNLOCK()             name_cache_unlock()
151 
152 /* vars for name cache list lock */
153 static LCK_GRP_DECLARE(namecache_lck_grp, "Name Cache");
154 static LCK_RW_DECLARE(namecache_rw_lock, &namecache_lck_grp);
155 
156 typedef struct string_t {
157 	LIST_ENTRY(string_t)  hash_chain;
158 	char                  *str;
159 	uint32_t              strbuflen;
160 	uint32_t              refcount;
161 } string_t;
162 
163 ZONE_DEFINE_TYPE(stringcache_zone, "vfsstringcache", string_t, ZC_NONE);
164 
165 static LCK_GRP_DECLARE(strcache_lck_grp, "String Cache");
166 static LCK_ATTR_DECLARE(strcache_lck_attr, 0, 0);
167 LCK_RW_DECLARE_ATTR(strtable_rw_lock, &strcache_lck_grp, &strcache_lck_attr);
168 
169 static LCK_GRP_DECLARE(rootvnode_lck_grp, "rootvnode");
170 LCK_RW_DECLARE(rootvnode_rw_lock, &rootvnode_lck_grp);
171 
172 #define NUM_STRCACHE_LOCKS 1024
173 
174 lck_mtx_t strcache_mtx_locks[NUM_STRCACHE_LOCKS];
175 
176 SYSCTL_NODE(_vfs, OID_AUTO, ncstats, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "vfs name cache stats");
177 
178 SYSCTL_COMPAT_INT(_vfs_ncstats, OID_AUTO, nc_smr_enabled,
179     CTLFLAG_RD | CTLFLAG_LOCKED,
180     &nc_smr_enabled, 0, "");
181 
182 #if COLLECT_NC_SMR_STATS
183 struct ncstats {
184 	uint64_t cl_smr_hits;
185 	uint64_t cl_smr_miss;
186 	uint64_t cl_smr_negative_hits;
187 	uint64_t cl_smr_fallback;
188 	uint64_t cl_lock_hits;
189 	uint64_t clp_next;
190 	uint64_t clp_next_fail;
191 	uint64_t clp_smr_next;
192 	uint64_t clp_smr_next_fail;
193 	uint64_t clp_smr_fallback;
194 	uint64_t nc_lock_shared;
195 	uint64_t nc_lock;
196 } ncstats = {0};
197 
198 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, cl_smr_hits,
199     CTLFLAG_RD | CTLFLAG_LOCKED,
200     &ncstats.cl_smr_hits, "");
201 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, cl_smr_misses,
202     CTLFLAG_RD | CTLFLAG_LOCKED,
203     &ncstats.cl_smr_miss, "");
204 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, cl_smr_negative_hits,
205     CTLFLAG_RD | CTLFLAG_LOCKED,
206     &ncstats.cl_smr_negative_hits, "");
207 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, cl_smr_fallback,
208     CTLFLAG_RD | CTLFLAG_LOCKED,
209     &ncstats.cl_smr_fallback, "");
210 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, cl_lock_hits,
211     CTLFLAG_RD | CTLFLAG_LOCKED,
212     &ncstats.cl_lock_hits, "");
213 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, clp_next,
214     CTLFLAG_RD | CTLFLAG_LOCKED,
215     &ncstats.clp_next, "");
216 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, clp_next_fail,
217     CTLFLAG_RD | CTLFLAG_LOCKED,
218     &ncstats.clp_next_fail, "");
219 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, clp_smr_next,
220     CTLFLAG_RD | CTLFLAG_LOCKED,
221     &ncstats.clp_smr_next, "");
222 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, clp_smr_next_fail,
223     CTLFLAG_RD | CTLFLAG_LOCKED,
224     &ncstats.clp_smr_next_fail, "");
225 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, nc_lock_shared,
226     CTLFLAG_RD | CTLFLAG_LOCKED,
227     &ncstats.nc_lock_shared, "");
228 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, nc_lock,
229     CTLFLAG_RD | CTLFLAG_LOCKED,
230     &ncstats.nc_lock, "");
231 
232 #define NC_SMR_STATS(v)  os_atomic_inc(&ncstats.v, relaxed)
233 #else
234 #define NC_SMR_STATS(v)
235 #endif /* COLLECT_NC_SMR_STATS */
236 
237 static vnode_t cache_lookup_locked(vnode_t dvp, struct componentname *cnp, uint32_t *vidp);
238 static vnode_t cache_lookup_smr(vnode_t dvp, struct componentname *cnp, uint32_t *vidp);
239 static const char *add_name_internal(const char *, uint32_t, u_int, boolean_t, u_int);
240 static void init_string_table(void);
241 static void cache_delete(struct namecache *, int);
242 static void cache_enter_locked(vnode_t dvp, vnode_t vp, struct componentname *cnp, const char *strname);
243 static void cache_purge_locked(vnode_t vp, kauth_cred_t *credp);
244 static void namecache_smr_free(void *, size_t);
245 static void string_smr_free(void *, size_t);
246 
247 
248 #ifdef DUMP_STRING_TABLE
249 /*
250  * Internal dump function used for debugging
251  */
252 void dump_string_table(void);
253 #endif  /* DUMP_STRING_TABLE */
254 
255 static void init_crc32(void);
256 static unsigned int crc32tab[256];
257 
258 
259 #define NCHHASH(dvp, hash_val) \
260 	(&nchashtbl[(dvp->v_id ^ (hash_val)) & nchashmask])
261 
262 /*
263  * This function tries to check if a directory vp is a subdirectory of dvp
264  * only from valid v_parent pointers. It is called with the name cache lock
265  * held and does not drop the lock anytime inside the function.
266  *
267  * It returns a boolean that indicates whether or not it was able to
268  * successfully infer the parent/descendent relationship via the v_parent
269  * pointers, or if it could not infer such relationship and that the decision
270  * must be delegated to the owning filesystem.
271  *
272  * If it does not defer the decision, i.e. it was successfuly able to determine
273  * the parent/descendent relationship,  *is_subdir tells the caller if vp is a
274  * subdirectory of dvp.
275  *
276  * If the decision is deferred, *next_vp is where it stopped i.e. *next_vp
277  * is the vnode whose parent is to be determined from the filesystem.
278  * *is_subdir, in this case, is not indicative of anything and should be
279  * ignored.
280  *
281  * The return value and output args should be used as follows :
282  *
283  * defer = cache_check_vnode_issubdir(vp, dvp, is_subdir, next_vp);
284  * if (!defer) {
285  *      if (*is_subdir)
286  *              vp is subdirectory;
287  *      else
288  *              vp is not a subdirectory;
289  * } else {
290  *      if (*next_vp)
291  *              check this vnode's parent from the filesystem
292  *      else
293  *              error (likely because of forced unmount).
294  * }
295  *
296  */
297 static boolean_t
cache_check_vnode_issubdir(vnode_t vp,vnode_t dvp,boolean_t * is_subdir,vnode_t * next_vp)298 cache_check_vnode_issubdir(vnode_t vp, vnode_t dvp, boolean_t *is_subdir,
299     vnode_t *next_vp)
300 {
301 	vnode_t tvp = vp;
302 	int defer = FALSE;
303 
304 	*is_subdir = FALSE;
305 	*next_vp = NULLVP;
306 	while (1) {
307 		mount_t tmp;
308 
309 		if (tvp == dvp) {
310 			*is_subdir = TRUE;
311 			break;
312 		} else if (tvp == rootvnode) {
313 			/* *is_subdir = FALSE */
314 			break;
315 		}
316 
317 		tmp = tvp->v_mount;
318 		while ((tvp->v_flag & VROOT) && tmp && tmp->mnt_vnodecovered &&
319 		    tvp != dvp && tvp != rootvnode) {
320 			tvp = tmp->mnt_vnodecovered;
321 			tmp = tvp->v_mount;
322 		}
323 
324 		/*
325 		 * If dvp is not at the top of a mount "stack" then
326 		 * vp is not a subdirectory of dvp either.
327 		 */
328 		if (tvp == dvp || tvp == rootvnode) {
329 			/* *is_subdir = FALSE */
330 			break;
331 		}
332 
333 		if (!tmp) {
334 			defer = TRUE;
335 			*next_vp = NULLVP;
336 			break;
337 		}
338 
339 		if ((tvp->v_flag & VISHARDLINK) || !(tvp->v_parent)) {
340 			defer = TRUE;
341 			*next_vp = tvp;
342 			break;
343 		}
344 
345 		tvp = tvp->v_parent;
346 	}
347 
348 	return defer;
349 }
350 
351 /* maximum times retry from potentially transient errors in vnode_issubdir */
352 #define MAX_ERROR_RETRY 3
353 
354 /*
355  * This function checks if a given directory (vp) is a subdirectory of dvp.
356  * It walks backwards from vp and if it hits dvp in its parent chain,
357  * it is a subdirectory. If it encounters the root directory, it is not
358  * a subdirectory.
359  *
360  * This function returns an error if it is unsuccessful and 0 on success.
361  *
362  * On entry (and exit) vp has an iocount and if this function has to take
363  * any iocounts on other vnodes in the parent chain traversal, it releases them.
364  */
365 int
vnode_issubdir(vnode_t vp,vnode_t dvp,int * is_subdir,vfs_context_t ctx)366 vnode_issubdir(vnode_t vp, vnode_t dvp, int *is_subdir, vfs_context_t ctx)
367 {
368 	vnode_t start_vp, tvp;
369 	vnode_t vp_with_iocount;
370 	int error = 0;
371 	char dotdotbuf[] = "..";
372 	int error_retry_count = 0; /* retry count for potentially transient
373 	                            *  errors */
374 
375 	*is_subdir = FALSE;
376 	tvp = start_vp = vp;
377 	/*
378 	 * Anytime we acquire an iocount in this function, we save the vnode
379 	 * in this variable and release it before exiting.
380 	 */
381 	vp_with_iocount = NULLVP;
382 
383 	while (1) {
384 		boolean_t defer;
385 		vnode_t pvp;
386 		uint32_t vid = 0;
387 		struct componentname cn;
388 		boolean_t is_subdir_locked = FALSE;
389 
390 		if (tvp == dvp) {
391 			*is_subdir = TRUE;
392 			break;
393 		} else if (tvp == rootvnode) {
394 			/* *is_subdir = FALSE */
395 			break;
396 		}
397 
398 		NAME_CACHE_LOCK_SHARED();
399 
400 		defer = cache_check_vnode_issubdir(tvp, dvp, &is_subdir_locked,
401 		    &tvp);
402 
403 		if (defer && tvp) {
404 			vid = vnode_vid(tvp);
405 			vnode_hold(tvp);
406 		}
407 
408 		NAME_CACHE_UNLOCK();
409 
410 		if (!defer) {
411 			*is_subdir = is_subdir_locked;
412 			break;
413 		}
414 
415 		if (!tvp) {
416 			if (error_retry_count++ < MAX_ERROR_RETRY) {
417 				tvp = vp;
418 				continue;
419 			}
420 			error = ENOENT;
421 			break;
422 		}
423 
424 		if (tvp != start_vp) {
425 			if (vp_with_iocount) {
426 				vnode_put(vp_with_iocount);
427 				vp_with_iocount = NULLVP;
428 			}
429 
430 			error = vnode_getwithvid(tvp, vid);
431 			vnode_drop(tvp);
432 			if (error) {
433 				if (error_retry_count++ < MAX_ERROR_RETRY) {
434 					tvp = vp;
435 					error = 0;
436 					continue;
437 				}
438 				break;
439 			}
440 			vp_with_iocount = tvp;
441 		} else {
442 			tvp = vnode_drop(tvp);
443 		}
444 
445 		bzero(&cn, sizeof(cn));
446 		cn.cn_nameiop = LOOKUP;
447 		cn.cn_flags = ISLASTCN | ISDOTDOT;
448 		cn.cn_context = ctx;
449 		cn.cn_pnbuf = &dotdotbuf[0];
450 		cn.cn_pnlen = sizeof(dotdotbuf);
451 		cn.cn_nameptr = cn.cn_pnbuf;
452 		cn.cn_namelen = 2;
453 
454 		pvp = NULLVP;
455 		if ((error = VNOP_LOOKUP(tvp, &pvp, &cn, ctx))) {
456 			break;
457 		}
458 
459 		if (!(tvp->v_flag & VISHARDLINK) && tvp->v_parent != pvp) {
460 			(void)vnode_update_identity(tvp, pvp, NULL, 0, 0,
461 			    VNODE_UPDATE_PARENT);
462 		}
463 
464 		if (vp_with_iocount) {
465 			vnode_put(vp_with_iocount);
466 		}
467 
468 		vp_with_iocount = tvp = pvp;
469 	}
470 
471 	if (vp_with_iocount) {
472 		vnode_put(vp_with_iocount);
473 	}
474 
475 	return error;
476 }
477 
478 /*
479  * This function builds the path in "buff" from the supplied vnode.
480  * The length of the buffer *INCLUDING* the trailing zero byte is
481  * returned in outlen.  NOTE: the length includes the trailing zero
482  * byte and thus the length is one greater than what strlen would
483  * return.  This is important and lots of code elsewhere in the kernel
484  * assumes this behavior.
485  *
486  * This function can call vnop in file system if the parent vnode
487  * does not exist or when called for hardlinks via volfs path.
488  * If BUILDPATH_NO_FS_ENTER is set in flags, it only uses values present
489  * in the name cache and does not enter the file system.
490  *
491  * If BUILDPATH_CHECK_MOVED is set in flags, we return EAGAIN when
492  * we encounter ENOENT during path reconstruction.  ENOENT means that
493  * one of the parents moved while we were building the path.  The
494  * caller can special handle this case by calling build_path again.
495  *
496  * If BUILDPATH_VOLUME_RELATIVE is set in flags, we return path
497  * that is relative to the nearest mount point, i.e. do not
498  * cross over mount points during building the path.
499  *
500  * passed in vp must have a valid io_count reference
501  *
502  * If parent vnode is non-NULL it also must have an io count.  This
503  * allows build_path_with_parent to be safely called for operations
504  * unlink, rmdir and rename that already have io counts on the target
505  * and the directory. In this way build_path_with_parent does not have
506  * to try and obtain an additional io count on the parent.  Taking an
507  * io count ont the parent can lead to dead lock if a forced unmount
508  * occures at the right moment. For a fuller explaination on how this
509  * can occur see the comment for vn_getpath_with_parent.
510  *
511  */
512 int
build_path_with_parent(vnode_t first_vp,vnode_t parent_vp,char * buff,int buflen,int * outlen,size_t * mntpt_outlen,int flags,vfs_context_t ctx)513 build_path_with_parent(vnode_t first_vp, vnode_t parent_vp, char *buff, int buflen,
514     int *outlen, size_t *mntpt_outlen, int flags, vfs_context_t ctx)
515 {
516 	vnode_t vp, tvp;
517 	vnode_t vp_with_iocount;
518 	vnode_t proc_root_dir_vp;
519 	char *end;
520 	char *mntpt_end;
521 	const char *str;
522 	unsigned int  len;
523 	int  ret = 0;
524 	int  fixhardlink;
525 
526 	if (first_vp == NULLVP) {
527 		return EINVAL;
528 	}
529 
530 	if (buflen <= 1) {
531 		return ENOSPC;
532 	}
533 
534 	/*
535 	 * Grab the process fd so we can evaluate fd_rdir.
536 	 */
537 	if (!(flags & BUILDPATH_NO_PROCROOT)) {
538 		proc_root_dir_vp = vfs_context_proc(ctx)->p_fd.fd_rdir;
539 	} else {
540 		proc_root_dir_vp = NULL;
541 	}
542 
543 	vp_with_iocount = NULLVP;
544 again:
545 	vp = first_vp;
546 
547 	end = &buff[buflen - 1];
548 	*end = '\0';
549 	mntpt_end = NULL;
550 
551 	/*
552 	 * Catch a special corner case here: chroot to /full/path/to/dir, chdir to
553 	 * it, then open it. Without this check, the path to it will be
554 	 * /full/path/to/dir instead of "/".
555 	 */
556 	if (proc_root_dir_vp == first_vp) {
557 		*--end = '/';
558 		goto out;
559 	}
560 
561 	/*
562 	 * holding the NAME_CACHE_LOCK in shared mode is
563 	 * sufficient to stabilize both the vp->v_parent chain
564 	 * and the 'vp->v_mount->mnt_vnodecovered' chain
565 	 *
566 	 * if we need to drop this lock, we must first grab the v_id
567 	 * from the vnode we're currently working with... if that
568 	 * vnode doesn't already have an io_count reference (the vp
569 	 * passed in comes with one), we must grab a reference
570 	 * after we drop the NAME_CACHE_LOCK via vnode_getwithvid...
571 	 * deadlocks may result if you call vnode_get while holding
572 	 * the NAME_CACHE_LOCK... we lazily release the reference
573 	 * we pick up the next time we encounter a need to drop
574 	 * the NAME_CACHE_LOCK or before we return from this routine
575 	 */
576 	NAME_CACHE_LOCK_SHARED();
577 
578 #if CONFIG_FIRMLINKS
579 	if (!(flags & BUILDPATH_NO_FIRMLINK) &&
580 	    (vp->v_flag & VFMLINKTARGET) && vp->v_fmlink && (vp->v_fmlink->v_type == VDIR)) {
581 		vp = vp->v_fmlink;
582 	}
583 #endif
584 
585 	/*
586 	 * Check if this is the root of a file system.
587 	 */
588 	while (vp && vp->v_flag & VROOT) {
589 		if (vp->v_mount == NULL) {
590 			ret = EINVAL;
591 			goto out_unlock;
592 		}
593 		if ((vp->v_mount->mnt_flag & MNT_ROOTFS) || (vp == proc_root_dir_vp)) {
594 			/*
595 			 * It's the root of the root file system, so it's
596 			 * just "/".
597 			 */
598 			*--end = '/';
599 
600 			goto out_unlock;
601 		} else {
602 			/*
603 			 * This the root of the volume and the caller does not
604 			 * want to cross mount points.  Therefore just return
605 			 * '/' as the relative path.
606 			 */
607 #if CONFIG_FIRMLINKS
608 			if (!(flags & BUILDPATH_NO_FIRMLINK) &&
609 			    (vp->v_flag & VFMLINKTARGET) && vp->v_fmlink && (vp->v_fmlink->v_type == VDIR)) {
610 				vp = vp->v_fmlink;
611 			} else
612 #endif
613 			if (flags & BUILDPATH_VOLUME_RELATIVE) {
614 				*--end = '/';
615 				goto out_unlock;
616 			} else {
617 				vp = vp->v_mount->mnt_vnodecovered;
618 				if (!mntpt_end && vp) {
619 					mntpt_end = end;
620 				}
621 			}
622 		}
623 	}
624 
625 	while ((vp != NULLVP) && (vp->v_parent != vp)) {
626 		int  vid;
627 
628 		/*
629 		 * For hardlinks the v_name may be stale, so if its OK
630 		 * to enter a file system, ask the file system for the
631 		 * name and parent (below).
632 		 */
633 		fixhardlink = (vp->v_flag & VISHARDLINK) &&
634 		    (vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
635 		    !(flags & BUILDPATH_NO_FS_ENTER);
636 
637 		if (!fixhardlink) {
638 			str = vp->v_name;
639 
640 			if (str == NULL || *str == '\0') {
641 				if (vp->v_parent != NULL) {
642 					ret = EINVAL;
643 				} else {
644 					ret = ENOENT;
645 				}
646 				goto out_unlock;
647 			}
648 			len = (unsigned int)strlen(str);
649 			/*
650 			 * Check that there's enough space (including space for the '/')
651 			 */
652 			if ((unsigned int)(end - buff) < (len + 1)) {
653 				ret = ENOSPC;
654 				goto out_unlock;
655 			}
656 			/*
657 			 * Copy the name backwards.
658 			 */
659 			str += len;
660 
661 			for (; len > 0; len--) {
662 				*--end = *--str;
663 			}
664 			/*
665 			 * Add a path separator.
666 			 */
667 			*--end = '/';
668 		}
669 
670 		/*
671 		 * Walk up the parent chain.
672 		 */
673 		if (((vp->v_parent != NULLVP) && !fixhardlink) ||
674 		    (flags & BUILDPATH_NO_FS_ENTER)) {
675 			/*
676 			 * In this if () block we are not allowed to enter the filesystem
677 			 * to conclusively get the most accurate parent identifier.
678 			 * As a result, if 'vp' does not identify '/' and it
679 			 * does not have a valid v_parent, then error out
680 			 * and disallow further path construction
681 			 */
682 			if ((vp->v_parent == NULLVP) && (rootvnode != vp)) {
683 				/*
684 				 * Only '/' is allowed to have a NULL parent
685 				 * pointer. Upper level callers should ideally
686 				 * re-drive name lookup on receiving a ENOENT.
687 				 */
688 				ret = ENOENT;
689 
690 				/* The code below will exit early if 'tvp = vp' == NULL */
691 			}
692 			vp = vp->v_parent;
693 
694 			/*
695 			 * if the vnode we have in hand isn't a directory and it
696 			 * has a v_parent, then we started with the resource fork
697 			 * so skip up to avoid getting a duplicate copy of the
698 			 * file name in the path.
699 			 */
700 			if (vp && !vnode_isdir(vp) && vp->v_parent) {
701 				vp = vp->v_parent;
702 			}
703 		} else {
704 			/*
705 			 * No parent, go get it if supported.
706 			 */
707 			struct vnode_attr  va;
708 			vnode_t  dvp;
709 
710 			/*
711 			 * Make sure file system supports obtaining a path from id.
712 			 */
713 			if (!(vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID)) {
714 				ret = ENOENT;
715 				goto out_unlock;
716 			}
717 			vid = vp->v_id;
718 
719 			vnode_hold(vp);
720 			NAME_CACHE_UNLOCK();
721 
722 			if (vp != first_vp && vp != parent_vp && vp != vp_with_iocount) {
723 				if (vp_with_iocount) {
724 					vnode_put(vp_with_iocount);
725 					vp_with_iocount = NULLVP;
726 				}
727 				if (vnode_getwithvid(vp, vid)) {
728 					vnode_drop(vp);
729 					goto again;
730 				}
731 				vp_with_iocount = vp;
732 			}
733 
734 			vnode_drop(vp);
735 
736 			VATTR_INIT(&va);
737 			VATTR_WANTED(&va, va_parentid);
738 
739 			if (fixhardlink) {
740 				VATTR_WANTED(&va, va_name);
741 				va.va_name = zalloc(ZV_NAMEI);
742 			} else {
743 				va.va_name = NULL;
744 			}
745 			/*
746 			 * Ask the file system for its parent id and for its name (optional).
747 			 */
748 			ret = vnode_getattr(vp, &va, ctx);
749 
750 			if (fixhardlink) {
751 				if ((ret == 0) && (VATTR_IS_SUPPORTED(&va, va_name))) {
752 					str = va.va_name;
753 					vnode_update_identity(vp, NULL, str, (unsigned int)strlen(str), 0, VNODE_UPDATE_NAME);
754 				} else if (vp->v_name) {
755 					str = vp->v_name;
756 					ret = 0;
757 				} else {
758 					ret = ENOENT;
759 					goto bad_news;
760 				}
761 				len = (unsigned int)strlen(str);
762 
763 				/*
764 				 * Check that there's enough space.
765 				 */
766 				if ((unsigned int)(end - buff) < (len + 1)) {
767 					ret = ENOSPC;
768 				} else {
769 					/* Copy the name backwards. */
770 					str += len;
771 
772 					for (; len > 0; len--) {
773 						*--end = *--str;
774 					}
775 					/*
776 					 * Add a path separator.
777 					 */
778 					*--end = '/';
779 				}
780 bad_news:
781 				zfree(ZV_NAMEI, va.va_name);
782 			}
783 			if (ret || !VATTR_IS_SUPPORTED(&va, va_parentid)) {
784 				ret = ENOENT;
785 				goto out;
786 			}
787 			/*
788 			 * Ask the file system for the parent vnode.
789 			 */
790 			if ((ret = VFS_VGET(vp->v_mount, (ino64_t)va.va_parentid, &dvp, ctx))) {
791 				goto out;
792 			}
793 
794 			if (!fixhardlink && (vp->v_parent != dvp)) {
795 				vnode_update_identity(vp, dvp, NULL, 0, 0, VNODE_UPDATE_PARENT);
796 			}
797 
798 			if (vp_with_iocount) {
799 				vnode_put(vp_with_iocount);
800 			}
801 			vp = dvp;
802 			vp_with_iocount = vp;
803 
804 			NAME_CACHE_LOCK_SHARED();
805 
806 			/*
807 			 * if the vnode we have in hand isn't a directory and it
808 			 * has a v_parent, then we started with the resource fork
809 			 * so skip up to avoid getting a duplicate copy of the
810 			 * file name in the path.
811 			 */
812 			if (vp && !vnode_isdir(vp) && vp->v_parent) {
813 				vp = vp->v_parent;
814 			}
815 		}
816 
817 		if (vp && (flags & BUILDPATH_CHECKACCESS)) {
818 			vid = vp->v_id;
819 
820 			vnode_hold(vp);
821 			NAME_CACHE_UNLOCK();
822 
823 			if (vp != first_vp && vp != parent_vp && vp != vp_with_iocount) {
824 				if (vp_with_iocount) {
825 					vnode_put(vp_with_iocount);
826 					vp_with_iocount = NULLVP;
827 				}
828 				if (vnode_getwithvid(vp, vid)) {
829 					vnode_drop(vp);
830 					goto again;
831 				}
832 				vp_with_iocount = vp;
833 			}
834 			vnode_drop(vp);
835 
836 			if ((ret = vnode_authorize(vp, NULL, KAUTH_VNODE_SEARCH, ctx))) {
837 				goto out;       /* no peeking */
838 			}
839 			NAME_CACHE_LOCK_SHARED();
840 		}
841 
842 		/*
843 		 * When a mount point is crossed switch the vp.
844 		 * Continue until we find the root or we find
845 		 * a vnode that's not the root of a mounted
846 		 * file system.
847 		 */
848 		tvp = vp;
849 
850 		while (tvp) {
851 			if (tvp == proc_root_dir_vp) {
852 				goto out_unlock;        /* encountered the root */
853 			}
854 
855 #if CONFIG_FIRMLINKS
856 			if (!(flags & BUILDPATH_NO_FIRMLINK) &&
857 			    (tvp->v_flag & VFMLINKTARGET) && tvp->v_fmlink && (tvp->v_fmlink->v_type == VDIR)) {
858 				tvp = tvp->v_fmlink;
859 				break;
860 			}
861 #endif
862 
863 			if (!(tvp->v_flag & VROOT) || !tvp->v_mount) {
864 				break;                  /* not the root of a mounted FS */
865 			}
866 			if (flags & BUILDPATH_VOLUME_RELATIVE) {
867 				/* Do not cross over mount points */
868 				tvp = NULL;
869 			} else {
870 				tvp = tvp->v_mount->mnt_vnodecovered;
871 				if (!mntpt_end && tvp) {
872 					mntpt_end = end;
873 				}
874 			}
875 		}
876 		if (tvp == NULLVP) {
877 			goto out_unlock;
878 		}
879 		vp = tvp;
880 	}
881 out_unlock:
882 	NAME_CACHE_UNLOCK();
883 out:
884 	if (vp_with_iocount) {
885 		vnode_put(vp_with_iocount);
886 	}
887 	/*
888 	 * Slide the name down to the beginning of the buffer.
889 	 */
890 	memmove(buff, end, &buff[buflen] - end);
891 
892 	/*
893 	 * length includes the trailing zero byte
894 	 */
895 	*outlen = (int)(&buff[buflen] - end);
896 	if (mntpt_outlen && mntpt_end) {
897 		*mntpt_outlen = (size_t)*outlen - (size_t)(&buff[buflen] - mntpt_end);
898 	}
899 
900 	/* One of the parents was moved during path reconstruction.
901 	 * The caller is interested in knowing whether any of the
902 	 * parents moved via BUILDPATH_CHECK_MOVED, so return EAGAIN.
903 	 */
904 	if ((ret == ENOENT) && (flags & BUILDPATH_CHECK_MOVED)) {
905 		ret = EAGAIN;
906 	}
907 
908 	return ret;
909 }
910 
911 int
build_path(vnode_t first_vp,char * buff,int buflen,int * outlen,int flags,vfs_context_t ctx)912 build_path(vnode_t first_vp, char *buff, int buflen, int *outlen, int flags, vfs_context_t ctx)
913 {
914 	return build_path_with_parent(first_vp, NULL, buff, buflen, outlen, NULL, flags, ctx);
915 }
916 
917 /*
918  * return NULLVP if vp's parent doesn't
919  * exist, or we can't get a valid iocount
920  * else return the parent of vp
921  */
922 vnode_t
vnode_getparent(vnode_t vp)923 vnode_getparent(vnode_t vp)
924 {
925 	vnode_t pvp = NULLVP;
926 	int     pvid;
927 
928 	NAME_CACHE_LOCK_SHARED();
929 
930 	pvp = vp->v_parent;
931 
932 	/*
933 	 * v_parent is stable behind the name_cache lock
934 	 * however, the only thing we can really guarantee
935 	 * is that we've grabbed a valid iocount on the
936 	 * parent of 'vp' at the time we took the name_cache lock...
937 	 * once we drop the lock, vp could get re-parented
938 	 */
939 	if (pvp != NULLVP) {
940 		pvid = pvp->v_id;
941 
942 		vnode_hold(pvp);
943 		NAME_CACHE_UNLOCK();
944 
945 		if (vnode_getwithvid(pvp, pvid) != 0) {
946 			vnode_drop(pvp);
947 			pvp = NULL;
948 		} else {
949 			vnode_drop(pvp);
950 		}
951 	} else {
952 		NAME_CACHE_UNLOCK();
953 	}
954 	return pvp;
955 }
956 
957 /*
958  * Similar to vnode_getparent() but only returned parent vnode (with iocount
959  * held) if the actual parent vnode is different than the given 'pvp'.
960  */
961 __private_extern__ vnode_t
vnode_getparent_if_different(vnode_t vp,vnode_t pvp)962 vnode_getparent_if_different(vnode_t vp, vnode_t pvp)
963 {
964 	vnode_t real_pvp = NULLVP;
965 	int     pvid;
966 
967 	if (vp->v_parent == pvp) {
968 		goto out;
969 	}
970 
971 	NAME_CACHE_LOCK_SHARED();
972 
973 	real_pvp = vp->v_parent;
974 	if (real_pvp == NULLVP) {
975 		NAME_CACHE_UNLOCK();
976 		goto out;
977 	}
978 
979 	/*
980 	 * Do the check again after namecache lock is acquired as the parent vnode
981 	 * could have changed.
982 	 */
983 	if (real_pvp != pvp) {
984 		pvid = real_pvp->v_id;
985 
986 		vnode_hold(real_pvp);
987 		NAME_CACHE_UNLOCK();
988 
989 		if (vnode_getwithvid(real_pvp, pvid) != 0) {
990 			vnode_drop(real_pvp);
991 			real_pvp = NULLVP;
992 		} else {
993 			vnode_drop(real_pvp);
994 		}
995 	} else {
996 		real_pvp = NULLVP;
997 		NAME_CACHE_UNLOCK();
998 	}
999 
1000 out:
1001 	return real_pvp;
1002 }
1003 
1004 const char *
vnode_getname(vnode_t vp)1005 vnode_getname(vnode_t vp)
1006 {
1007 	const char *name = NULL;
1008 
1009 	NAME_CACHE_LOCK_SHARED();
1010 
1011 	if (vp->v_name) {
1012 		name = vfs_addname(vp->v_name, (unsigned int)strlen(vp->v_name), 0, 0);
1013 	}
1014 	NAME_CACHE_UNLOCK();
1015 
1016 	return name;
1017 }
1018 
1019 void
vnode_putname(const char * name)1020 vnode_putname(const char *name)
1021 {
1022 	vfs_removename(name);
1023 }
1024 
1025 static const char unknown_vnodename[] = "(unknown vnode name)";
1026 
1027 const char *
vnode_getname_printable(vnode_t vp)1028 vnode_getname_printable(vnode_t vp)
1029 {
1030 	const char *name = vnode_getname(vp);
1031 	if (name != NULL) {
1032 		return name;
1033 	}
1034 
1035 	switch (vp->v_type) {
1036 	case VCHR:
1037 	case VBLK:
1038 	{
1039 		/*
1040 		 * Create an artificial dev name from
1041 		 * major and minor device number
1042 		 */
1043 		char dev_name[64];
1044 		(void) snprintf(dev_name, sizeof(dev_name),
1045 		    "%c(%u, %u)", VCHR == vp->v_type ? 'c':'b',
1046 		    major(vp->v_rdev), minor(vp->v_rdev));
1047 		/*
1048 		 * Add the newly created dev name to the name
1049 		 * cache to allow easier cleanup. Also,
1050 		 * vfs_addname allocates memory for the new name
1051 		 * and returns it.
1052 		 */
1053 		NAME_CACHE_LOCK_SHARED();
1054 		name = vfs_addname(dev_name, (unsigned int)strlen(dev_name), 0, 0);
1055 		NAME_CACHE_UNLOCK();
1056 		return name;
1057 	}
1058 	default:
1059 		return unknown_vnodename;
1060 	}
1061 }
1062 
1063 void
vnode_putname_printable(const char * name)1064 vnode_putname_printable(const char *name)
1065 {
1066 	if (name == unknown_vnodename) {
1067 		return;
1068 	}
1069 	vnode_putname(name);
1070 }
1071 
1072 
1073 /*
1074  * if VNODE_UPDATE_PARENT, and we can take
1075  * a reference on dvp, then update vp with
1076  * it's new parent... if vp already has a parent,
1077  * then drop the reference vp held on it
1078  *
1079  * if VNODE_UPDATE_NAME,
1080  * then drop string ref on v_name if it exists, and if name is non-NULL
1081  * then pick up a string reference on name and record it in v_name...
1082  * optionally pass in the length and hashval of name if known
1083  *
1084  * if VNODE_UPDATE_CACHE, flush the name cache entries associated with vp
1085  */
1086 void
vnode_update_identity(vnode_t vp,vnode_t dvp,const char * name,int name_len,uint32_t name_hashval,int flags)1087 vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, uint32_t name_hashval, int flags)
1088 {
1089 	struct  namecache *ncp;
1090 	vnode_t old_parentvp = NULLVP;
1091 	int isstream = (vp->v_flag & VISNAMEDSTREAM);
1092 	int kusecountbumped = 0;
1093 	kauth_cred_t tcred = NULL;
1094 	const char *vname = NULL;
1095 	const char *tname = NULL;
1096 
1097 	if (name_len < 0) {
1098 		return;
1099 	}
1100 
1101 	if (flags & VNODE_UPDATE_PARENT) {
1102 		if (dvp && vnode_ref(dvp) != 0) {
1103 			dvp = NULLVP;
1104 		}
1105 		/* Don't count a stream's parent ref during unmounts */
1106 		if (isstream && dvp && (dvp != vp) && (dvp != vp->v_parent) && (dvp->v_type == VREG)) {
1107 			vnode_lock_spin(dvp);
1108 			++dvp->v_kusecount;
1109 			kusecountbumped = 1;
1110 			vnode_unlock(dvp);
1111 		}
1112 	} else {
1113 		dvp = NULLVP;
1114 	}
1115 	if ((flags & VNODE_UPDATE_NAME)) {
1116 		if (name != vp->v_name) {
1117 			if (name && *name) {
1118 				if (name_len == 0) {
1119 					name_len = (int)strlen(name);
1120 				}
1121 				tname = vfs_addname(name, name_len, name_hashval, 0);
1122 			}
1123 		} else {
1124 			flags &= ~VNODE_UPDATE_NAME;
1125 		}
1126 	}
1127 	if ((flags & (VNODE_UPDATE_PURGE | VNODE_UPDATE_PARENT | VNODE_UPDATE_CACHE | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGEFIRMLINK))) {
1128 		NAME_CACHE_LOCK();
1129 
1130 #if CONFIG_FIRMLINKS
1131 		if (flags & VNODE_UPDATE_PURGEFIRMLINK) {
1132 			vnode_t old_fvp = vp->v_fmlink;
1133 			if (old_fvp) {
1134 				vnode_lock_spin(vp);
1135 				vp->v_flag &= ~VFMLINKTARGET;
1136 				vp->v_fmlink = NULLVP;
1137 				vnode_unlock(vp);
1138 				NAME_CACHE_UNLOCK();
1139 
1140 				/*
1141 				 * vnode_rele can result in cascading series of
1142 				 * usecount releases. The combination of calling
1143 				 * vnode_recycle and dont_reenter (3rd arg to
1144 				 * vnode_rele_internal) ensures we don't have
1145 				 * that issue.
1146 				 */
1147 				vnode_recycle(old_fvp);
1148 				vnode_rele_internal(old_fvp, O_EVTONLY, 1, 0);
1149 
1150 				NAME_CACHE_LOCK();
1151 			}
1152 		}
1153 #endif
1154 
1155 		if ((flags & VNODE_UPDATE_PURGE)) {
1156 			if (vp->v_parent) {
1157 				vp->v_parent->v_nc_generation++;
1158 			}
1159 
1160 			while ((ncp = LIST_FIRST(&vp->v_nclinks))) {
1161 				cache_delete(ncp, 1);
1162 			}
1163 
1164 			while ((ncp = TAILQ_FIRST(&vp->v_ncchildren))) {
1165 				cache_delete(ncp, 1);
1166 			}
1167 
1168 			/*
1169 			 * Use a temp variable to avoid kauth_cred_drop() while NAME_CACHE_LOCK is held
1170 			 */
1171 			tcred = vnode_cred(vp);
1172 			vp->v_cred = NOCRED;
1173 			vp->v_authorized_actions = 0;
1174 			vp->v_cred_timestamp = 0;
1175 		}
1176 		if ((flags & VNODE_UPDATE_NAME)) {
1177 			vname = vp->v_name;
1178 			vp->v_name = tname;
1179 		}
1180 		if (flags & VNODE_UPDATE_PARENT) {
1181 			if (dvp != vp && dvp != vp->v_parent) {
1182 				old_parentvp = vp->v_parent;
1183 				vp->v_parent = dvp;
1184 				dvp = NULLVP;
1185 
1186 				if (old_parentvp) {
1187 					flags |= VNODE_UPDATE_CACHE;
1188 				}
1189 			}
1190 		}
1191 		if (flags & VNODE_UPDATE_CACHE) {
1192 			while ((ncp = LIST_FIRST(&vp->v_nclinks))) {
1193 				cache_delete(ncp, 1);
1194 			}
1195 		}
1196 		NAME_CACHE_UNLOCK();
1197 
1198 		if (vname != NULL) {
1199 			vfs_removename(vname);
1200 		}
1201 
1202 		kauth_cred_set(&tcred, NOCRED);
1203 	}
1204 	if (dvp != NULLVP) {
1205 		/* Back-out the ref we took if we lost a race for vp->v_parent. */
1206 		if (kusecountbumped) {
1207 			vnode_lock_spin(dvp);
1208 			if (dvp->v_kusecount > 0) {
1209 				--dvp->v_kusecount;
1210 			}
1211 			vnode_unlock(dvp);
1212 		}
1213 		vnode_rele(dvp);
1214 	}
1215 	if (old_parentvp) {
1216 		struct  uthread *ut;
1217 		vnode_t vreclaims = NULLVP;
1218 
1219 		if (isstream) {
1220 			vnode_lock_spin(old_parentvp);
1221 			if ((old_parentvp->v_type != VDIR) && (old_parentvp->v_kusecount > 0)) {
1222 				--old_parentvp->v_kusecount;
1223 			}
1224 			vnode_unlock(old_parentvp);
1225 		}
1226 		ut = current_uthread();
1227 
1228 		/*
1229 		 * indicated to vnode_rele that it shouldn't do a
1230 		 * vnode_reclaim at this time... instead it will
1231 		 * chain the vnode to the uu_vreclaims list...
1232 		 * we'll be responsible for calling vnode_reclaim
1233 		 * on each of the vnodes in this list...
1234 		 */
1235 		ut->uu_defer_reclaims = 1;
1236 		ut->uu_vreclaims = NULLVP;
1237 
1238 		while ((vp = old_parentvp) != NULLVP) {
1239 			vnode_hold(vp);
1240 			vnode_lock_spin(vp);
1241 			vnode_rele_internal(vp, 0, 0, 1);
1242 
1243 			/*
1244 			 * check to see if the vnode is now in the state
1245 			 * that would have triggered a vnode_reclaim in vnode_rele
1246 			 * if it is, we save it's parent pointer and then NULL
1247 			 * out the v_parent field... we'll drop the reference
1248 			 * that was held on the next iteration of this loop...
1249 			 * this short circuits a potential deep recursion if we
1250 			 * have a long chain of parents in this state...
1251 			 * we'll sit in this loop until we run into
1252 			 * a parent in this chain that is not in this state
1253 			 *
1254 			 * make our check and the vnode_rele atomic
1255 			 * with respect to the current vnode we're working on
1256 			 * by holding the vnode lock
1257 			 * if vnode_rele deferred the vnode_reclaim and has put
1258 			 * this vnode on the list to be reaped by us, than
1259 			 * it has left this vnode with an iocount == 1
1260 			 */
1261 			if (ut->uu_vreclaims == vp) {
1262 				/*
1263 				 * This vnode is on the head of the uu_vreclaims chain
1264 				 * which means vnode_rele wanted to do a vnode_reclaim
1265 				 * on this vnode. Pull the parent pointer now so that when we do the
1266 				 * vnode_reclaim for each of the vnodes in the uu_vreclaims
1267 				 * list, we won't recurse back through here
1268 				 *
1269 				 * need to do a convert here in case vnode_rele_internal
1270 				 * returns with the lock held in the spin mode... it
1271 				 * can drop and retake the lock under certain circumstances
1272 				 */
1273 				vnode_lock_convert(vp);
1274 
1275 				NAME_CACHE_LOCK();
1276 				old_parentvp = vp->v_parent;
1277 				vp->v_parent = NULLVP;
1278 				NAME_CACHE_UNLOCK();
1279 			} else {
1280 				/*
1281 				 * we're done... we ran into a vnode that isn't
1282 				 * being terminated
1283 				 */
1284 				old_parentvp = NULLVP;
1285 			}
1286 			vnode_drop_and_unlock(vp);
1287 		}
1288 		vreclaims = ut->uu_vreclaims;
1289 		ut->uu_vreclaims = NULLVP;
1290 		ut->uu_defer_reclaims = 0;
1291 
1292 		while ((vp = vreclaims) != NULLVP) {
1293 			vreclaims = vp->v_defer_reclaimlist;
1294 
1295 			/*
1296 			 * vnode_put will drive the vnode_reclaim if
1297 			 * we are still the only reference on this vnode
1298 			 */
1299 			vnode_put(vp);
1300 		}
1301 	}
1302 }
1303 
1304 #if CONFIG_FIRMLINKS
1305 errno_t
vnode_setasfirmlink(vnode_t vp,vnode_t target_vp)1306 vnode_setasfirmlink(vnode_t vp, vnode_t target_vp)
1307 {
1308 	int error = 0;
1309 	vnode_t old_target_vp = NULLVP;
1310 	vnode_t old_target_vp_v_fmlink = NULLVP;
1311 	kauth_cred_t target_vp_cred = NULL;
1312 	kauth_cred_t old_target_vp_cred = NULL;
1313 
1314 	if (!vp) {
1315 		return EINVAL;
1316 	}
1317 
1318 	if (target_vp) {
1319 		if (vp->v_fmlink == target_vp) { /* Will be checked again under the name cache lock */
1320 			return 0;
1321 		}
1322 
1323 		/*
1324 		 * Firmlink source and target will take both a usecount
1325 		 * and kusecount on each other.
1326 		 */
1327 		if ((error = vnode_ref_ext(target_vp, O_EVTONLY, VNODE_REF_FORCE))) {
1328 			return error;
1329 		}
1330 
1331 		if ((error = vnode_ref_ext(vp, O_EVTONLY, VNODE_REF_FORCE))) {
1332 			vnode_rele_ext(target_vp, O_EVTONLY, 1);
1333 			return error;
1334 		}
1335 	}
1336 
1337 	NAME_CACHE_LOCK();
1338 
1339 	old_target_vp = vp->v_fmlink;
1340 	if (target_vp && (target_vp == old_target_vp)) {
1341 		NAME_CACHE_UNLOCK();
1342 		return 0;
1343 	}
1344 	vp->v_fmlink = target_vp;
1345 
1346 	vnode_lock_spin(vp);
1347 	vp->v_flag &= ~VFMLINKTARGET;
1348 	vnode_unlock(vp);
1349 
1350 	if (target_vp) {
1351 		target_vp->v_fmlink = vp;
1352 		vnode_lock_spin(target_vp);
1353 		target_vp->v_flag |= VFMLINKTARGET;
1354 		vnode_unlock(target_vp);
1355 		cache_purge_locked(vp, &target_vp_cred);
1356 	}
1357 
1358 	if (old_target_vp) {
1359 		old_target_vp_v_fmlink = old_target_vp->v_fmlink;
1360 		old_target_vp->v_fmlink = NULLVP;
1361 		vnode_lock_spin(old_target_vp);
1362 		old_target_vp->v_flag &= ~VFMLINKTARGET;
1363 		vnode_unlock(old_target_vp);
1364 		cache_purge_locked(vp, &old_target_vp_cred);
1365 	}
1366 
1367 	NAME_CACHE_UNLOCK();
1368 
1369 	kauth_cred_set(&target_vp_cred, NOCRED);
1370 
1371 	if (old_target_vp) {
1372 		kauth_cred_set(&old_target_vp_cred, NOCRED);
1373 
1374 		vnode_rele_ext(old_target_vp, O_EVTONLY, 1);
1375 		if (old_target_vp_v_fmlink) {
1376 			vnode_rele_ext(old_target_vp_v_fmlink, O_EVTONLY, 1);
1377 		}
1378 	}
1379 
1380 	return 0;
1381 }
1382 
1383 errno_t
vnode_getfirmlink(vnode_t vp,vnode_t * target_vp)1384 vnode_getfirmlink(vnode_t vp, vnode_t *target_vp)
1385 {
1386 	int error;
1387 
1388 	if (!vp->v_fmlink) {
1389 		return ENODEV;
1390 	}
1391 
1392 	NAME_CACHE_LOCK_SHARED();
1393 	if (vp->v_fmlink && !(vp->v_flag & VFMLINKTARGET) &&
1394 	    (vnode_get(vp->v_fmlink) == 0)) {
1395 		vnode_t tvp = vp->v_fmlink;
1396 
1397 		vnode_lock_spin(tvp);
1398 		if (tvp->v_lflag & (VL_TERMINATE | VL_DEAD)) {
1399 			vnode_unlock(tvp);
1400 			NAME_CACHE_UNLOCK();
1401 			vnode_put(tvp);
1402 			return ENOENT;
1403 		}
1404 		if (!(tvp->v_flag & VFMLINKTARGET)) {
1405 			panic("firmlink target for vnode %p does not have flag set", vp);
1406 		}
1407 		vnode_unlock(tvp);
1408 		*target_vp = tvp;
1409 		error = 0;
1410 	} else {
1411 		*target_vp = NULLVP;
1412 		error = ENODEV;
1413 	}
1414 	NAME_CACHE_UNLOCK();
1415 	return error;
1416 }
1417 
1418 #else /* CONFIG_FIRMLINKS */
1419 
1420 errno_t
vnode_setasfirmlink(__unused vnode_t vp,__unused vnode_t src_vp)1421 vnode_setasfirmlink(__unused vnode_t vp, __unused vnode_t src_vp)
1422 {
1423 	return ENOTSUP;
1424 }
1425 
1426 errno_t
vnode_getfirmlink(__unused vnode_t vp,__unused vnode_t * target_vp)1427 vnode_getfirmlink(__unused vnode_t vp, __unused vnode_t *target_vp)
1428 {
1429 	return ENOTSUP;
1430 }
1431 
1432 #endif
1433 
1434 /*
1435  * Mark a vnode as having multiple hard links.  HFS makes use of this
1436  * because it keeps track of each link separately, and wants to know
1437  * which link was actually used.
1438  *
1439  * This will cause the name cache to force a VNOP_LOOKUP on the vnode
1440  * so that HFS can post-process the lookup.  Also, volfs will call
1441  * VNOP_GETATTR2 to determine the parent, instead of using v_parent.
1442  */
1443 void
vnode_setmultipath(vnode_t vp)1444 vnode_setmultipath(vnode_t vp)
1445 {
1446 	vnode_lock_spin(vp);
1447 
1448 	/*
1449 	 * In theory, we're changing the vnode's identity as far as the
1450 	 * name cache is concerned, so we ought to grab the name cache lock
1451 	 * here.  However, there is already a race, and grabbing the name
1452 	 * cache lock only makes the race window slightly smaller.
1453 	 *
1454 	 * The race happens because the vnode already exists in the name
1455 	 * cache, and could be found by one thread before another thread
1456 	 * can set the hard link flag.
1457 	 */
1458 
1459 	vp->v_flag |= VISHARDLINK;
1460 
1461 	vnode_unlock(vp);
1462 }
1463 
1464 
1465 
1466 /*
1467  * backwards compatibility
1468  */
1469 void
vnode_uncache_credentials(vnode_t vp)1470 vnode_uncache_credentials(vnode_t vp)
1471 {
1472 	vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
1473 }
1474 
1475 
1476 /*
1477  * use the exclusive form of NAME_CACHE_LOCK to protect the update of the
1478  * following fields in the vnode: v_cred_timestamp, v_cred, v_authorized_actions
1479  * we use this lock so that we can look at the v_cred and v_authorized_actions
1480  * atomically while behind the NAME_CACHE_LOCK in shared mode in 'cache_lookup_path',
1481  * which is the super-hot path... if we are updating the authorized actions for this
1482  * vnode, we are already in the super-slow and far less frequented path so its not
1483  * that bad that we take the lock exclusive for this case... of course we strive
1484  * to hold it for the minimum amount of time possible
1485  */
1486 
1487 void
vnode_uncache_authorized_action(vnode_t vp,kauth_action_t action)1488 vnode_uncache_authorized_action(vnode_t vp, kauth_action_t action)
1489 {
1490 	kauth_cred_t tcred = NOCRED;
1491 
1492 	NAME_CACHE_LOCK();
1493 
1494 	vp->v_authorized_actions &= ~action;
1495 
1496 	if (action == KAUTH_INVALIDATE_CACHED_RIGHTS &&
1497 	    IS_VALID_CRED(vp->v_cred)) {
1498 		/*
1499 		 * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held
1500 		 */
1501 		tcred = vnode_cred(vp);
1502 		vp->v_cred = NOCRED;
1503 	}
1504 	NAME_CACHE_UNLOCK();
1505 
1506 	kauth_cred_set(&tcred, NOCRED);
1507 }
1508 
1509 
1510 /* disable vnode_cache_is_authorized() by setting vnode_cache_defeat */
1511 static TUNABLE(int, bootarg_vnode_cache_defeat, "-vnode_cache_defeat", 0);
1512 
1513 boolean_t
vnode_cache_is_authorized(vnode_t vp,vfs_context_t ctx,kauth_action_t action)1514 vnode_cache_is_authorized(vnode_t vp, vfs_context_t ctx, kauth_action_t action)
1515 {
1516 	kauth_cred_t    ucred;
1517 	boolean_t       retval = FALSE;
1518 
1519 	/* Boot argument to defeat rights caching */
1520 	if (bootarg_vnode_cache_defeat) {
1521 		return FALSE;
1522 	}
1523 
1524 	if ((vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
1525 		/*
1526 		 * a TTL is enabled on the rights cache... handle it here
1527 		 * a TTL of 0 indicates that no rights should be cached
1528 		 */
1529 		if (vp->v_mount->mnt_authcache_ttl) {
1530 			if (!(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL)) {
1531 				/*
1532 				 * For filesystems marked only MNTK_AUTH_OPAQUE (generally network ones),
1533 				 * we will only allow a SEARCH right on a directory to be cached...
1534 				 * that cached right always has a default TTL associated with it
1535 				 */
1536 				if (action != KAUTH_VNODE_SEARCH || vp->v_type != VDIR) {
1537 					vp = NULLVP;
1538 				}
1539 			}
1540 			if (vp != NULLVP && vnode_cache_is_stale(vp) == TRUE) {
1541 				vnode_uncache_authorized_action(vp, vp->v_authorized_actions);
1542 				vp = NULLVP;
1543 			}
1544 		} else {
1545 			vp = NULLVP;
1546 		}
1547 	}
1548 	if (vp != NULLVP) {
1549 		ucred = vfs_context_ucred(ctx);
1550 
1551 		NAME_CACHE_LOCK_SHARED();
1552 
1553 		if (vnode_cred(vp) == ucred && (vp->v_authorized_actions & action) == action) {
1554 			retval = TRUE;
1555 		}
1556 
1557 		NAME_CACHE_UNLOCK();
1558 	}
1559 	return retval;
1560 }
1561 
1562 
1563 void
vnode_cache_authorized_action(vnode_t vp,vfs_context_t ctx,kauth_action_t action)1564 vnode_cache_authorized_action(vnode_t vp, vfs_context_t ctx, kauth_action_t action)
1565 {
1566 	kauth_cred_t tcred = NOCRED;
1567 	kauth_cred_t ucred;
1568 	struct timeval tv;
1569 	boolean_t ttl_active = FALSE;
1570 
1571 	ucred = vfs_context_ucred(ctx);
1572 
1573 	if (!IS_VALID_CRED(ucred) || action == 0) {
1574 		return;
1575 	}
1576 
1577 	if ((vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
1578 		/*
1579 		 * a TTL is enabled on the rights cache... handle it here
1580 		 * a TTL of 0 indicates that no rights should be cached
1581 		 */
1582 		if (vp->v_mount->mnt_authcache_ttl == 0) {
1583 			return;
1584 		}
1585 
1586 		if (!(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL)) {
1587 			/*
1588 			 * only cache SEARCH action for filesystems marked
1589 			 * MNTK_AUTH_OPAQUE on VDIRs...
1590 			 * the lookup_path code will time these out
1591 			 */
1592 			if ((action & ~KAUTH_VNODE_SEARCH) || vp->v_type != VDIR) {
1593 				return;
1594 			}
1595 		}
1596 		ttl_active = TRUE;
1597 
1598 		microuptime(&tv);
1599 	}
1600 	NAME_CACHE_LOCK();
1601 
1602 	if (vnode_cred(vp) != ucred) {
1603 		/*
1604 		 * Use a temp variable to avoid kauth_cred_drop() while NAME_CACHE_LOCK is held
1605 		 */
1606 		tcred = vnode_cred(vp);
1607 		vp->v_cred = NOCRED;
1608 		kauth_cred_set(&vp->v_cred, ucred);
1609 		vp->v_authorized_actions = 0;
1610 	}
1611 	if (ttl_active == TRUE && vp->v_authorized_actions == 0) {
1612 		/*
1613 		 * only reset the timestamnp on the
1614 		 * first authorization cached after the previous
1615 		 * timer has expired or we're switching creds...
1616 		 * 'vnode_cache_is_authorized' will clear the
1617 		 * authorized actions if the TTL is active and
1618 		 * it has expired
1619 		 */
1620 		vp->v_cred_timestamp = (int)tv.tv_sec;
1621 	}
1622 	vp->v_authorized_actions |= action;
1623 
1624 	NAME_CACHE_UNLOCK();
1625 
1626 	kauth_cred_set(&tcred, NOCRED);
1627 }
1628 
1629 
1630 boolean_t
vnode_cache_is_stale(vnode_t vp)1631 vnode_cache_is_stale(vnode_t vp)
1632 {
1633 	struct timeval  tv;
1634 	boolean_t       retval;
1635 
1636 	microuptime(&tv);
1637 
1638 	if ((tv.tv_sec - vp->v_cred_timestamp) > vp->v_mount->mnt_authcache_ttl) {
1639 		retval = TRUE;
1640 	} else {
1641 		retval = FALSE;
1642 	}
1643 
1644 	return retval;
1645 }
1646 
1647 VFS_SMR_DECLARE;
1648 
1649 /*
1650  * Components of nameidata (or objects it can point to) which may
1651  * need restoring in case fast path lookup fails.
1652  */
1653 struct nameidata_state {
1654 	u_long  ni_loopcnt;
1655 	char *ni_next;
1656 	u_int ni_pathlen;
1657 	int32_t ni_flag;
1658 	char *cn_nameptr;
1659 	int cn_namelen;
1660 	int cn_flags;
1661 	uint32_t cn_hash;
1662 };
1663 
1664 static void
save_ndp_state(struct nameidata * ndp,struct componentname * cnp,struct nameidata_state * saved_statep)1665 save_ndp_state(struct nameidata *ndp, struct componentname *cnp, struct nameidata_state *saved_statep)
1666 {
1667 	saved_statep->ni_loopcnt = ndp->ni_loopcnt;
1668 	saved_statep->ni_next = ndp->ni_next;
1669 	saved_statep->ni_pathlen = ndp->ni_pathlen;
1670 	saved_statep->ni_flag = ndp->ni_flag;
1671 	saved_statep->cn_nameptr = cnp->cn_nameptr;
1672 	saved_statep->cn_namelen = cnp->cn_namelen;
1673 	saved_statep->cn_flags = cnp->cn_flags;
1674 	saved_statep->cn_hash = cnp->cn_hash;
1675 }
1676 
1677 static void
restore_ndp_state(struct nameidata * ndp,struct componentname * cnp,struct nameidata_state * saved_statep)1678 restore_ndp_state(struct nameidata *ndp, struct componentname *cnp, struct nameidata_state *saved_statep)
1679 {
1680 	ndp->ni_loopcnt = saved_statep->ni_loopcnt;
1681 	ndp->ni_next = saved_statep->ni_next;
1682 	ndp->ni_pathlen = saved_statep->ni_pathlen;
1683 	ndp->ni_flag = saved_statep->ni_flag;
1684 	cnp->cn_nameptr = saved_statep->cn_nameptr;
1685 	cnp->cn_namelen = saved_statep->cn_namelen;
1686 	cnp->cn_flags = saved_statep->cn_flags;
1687 	cnp->cn_hash = saved_statep->cn_hash;
1688 }
1689 
1690 static inline bool
vid_is_same(vnode_t vp,uint32_t vid)1691 vid_is_same(vnode_t vp, uint32_t vid)
1692 {
1693 	return !(os_atomic_load(&vp->v_lflag, relaxed) & (VL_DRAIN | VL_TERMINATE | VL_DEAD)) && (vnode_vid(vp) == vid);
1694 }
1695 
1696 static inline bool
can_check_v_mountedhere(vnode_t vp)1697 can_check_v_mountedhere(vnode_t vp)
1698 {
1699 	return (os_atomic_load(&vp->v_usecount, relaxed) > 0) &&
1700 	       (os_atomic_load(&vp->v_flag, relaxed) & VMOUNTEDHERE) &&
1701 	       !(os_atomic_load(&vp->v_lflag, relaxed) & (VL_TERMINATE | VL_DEAD) &&
1702 	       (vp->v_type == VDIR));
1703 }
1704 
1705 /*
1706  * Returns:	0			Success
1707  *		ERECYCLE		vnode was recycled from underneath us.  Force lookup to be re-driven from namei.
1708  *                                              This errno value should not be seen by anyone outside of the kernel.
1709  */
1710 int
cache_lookup_path(struct nameidata * ndp,struct componentname * cnp,vnode_t dp,vfs_context_t ctx,int * dp_authorized,vnode_t last_dp)1711 cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp,
1712     vfs_context_t ctx, int *dp_authorized, vnode_t last_dp)
1713 {
1714 	struct nameidata_state saved_state;
1715 	char            *cp;            /* pointer into pathname argument */
1716 	uint32_t        vid;
1717 	uint32_t        vvid = 0;       /* protected by vp != NULLVP */
1718 	vnode_t         vp = NULLVP;
1719 	vnode_t         tdp = NULLVP;
1720 	vnode_t         start_dp = dp;
1721 	kauth_cred_t    ucred;
1722 	boolean_t       ttl_enabled = FALSE;
1723 	struct timeval  tv;
1724 	mount_t         mp;
1725 	mount_t         dmp;
1726 	unsigned int    hash;
1727 	int             error = 0;
1728 	boolean_t       dotdotchecked = FALSE;
1729 	bool            locked = false;
1730 	bool            needs_lock = false;
1731 	bool            dp_iocount_taken = false;
1732 
1733 #if CONFIG_TRIGGERS
1734 	vnode_t         trigger_vp;
1735 #endif /* CONFIG_TRIGGERS */
1736 
1737 	ucred = vfs_context_ucred(ctx);
1738 retry:
1739 	if (nc_smr_enabled && !needs_lock) {
1740 		save_ndp_state(ndp, cnp, &saved_state);
1741 		vfs_smr_enter();
1742 	} else {
1743 		NAME_CACHE_LOCK_SHARED();
1744 		locked = true;
1745 	}
1746 	ndp->ni_flag &= ~(NAMEI_TRAILINGSLASH);
1747 
1748 	dmp = dp->v_mount;
1749 	vid = dp->v_id;
1750 	if (dmp && (dmp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
1751 		ttl_enabled = TRUE;
1752 		microuptime(&tv);
1753 	}
1754 	for (;;) {
1755 		/*
1756 		 * Search a directory.
1757 		 *
1758 		 * The cn_hash value is for use by cache_lookup
1759 		 * The last component of the filename is left accessible via
1760 		 * cnp->cn_nameptr for callers that need the name.
1761 		 */
1762 		hash = 0;
1763 		cp = cnp->cn_nameptr;
1764 
1765 		while (*cp && (*cp != '/')) {
1766 			hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8;
1767 		}
1768 		/*
1769 		 * the crc generator can legitimately generate
1770 		 * a 0... however, 0 for us means that we
1771 		 * haven't computed a hash, so use 1 instead
1772 		 */
1773 		if (hash == 0) {
1774 			hash = 1;
1775 		}
1776 		cnp->cn_hash = hash;
1777 		cnp->cn_namelen = (int)(cp - cnp->cn_nameptr);
1778 
1779 		ndp->ni_pathlen -= cnp->cn_namelen;
1780 		ndp->ni_next = cp;
1781 
1782 		/*
1783 		 * Replace multiple slashes by a single slash and trailing slashes
1784 		 * by a null.  This must be done before VNOP_LOOKUP() because some
1785 		 * fs's don't know about trailing slashes.  Remember if there were
1786 		 * trailing slashes to handle symlinks, existing non-directories
1787 		 * and non-existing files that won't be directories specially later.
1788 		 */
1789 		while (*cp == '/' && (cp[1] == '/' || cp[1] == '\0')) {
1790 			cp++;
1791 			ndp->ni_pathlen--;
1792 
1793 			if (*cp == '\0') {
1794 				ndp->ni_flag |= NAMEI_TRAILINGSLASH;
1795 				*ndp->ni_next = '\0';
1796 			}
1797 		}
1798 		ndp->ni_next = cp;
1799 
1800 		cnp->cn_flags &= ~(MAKEENTRY | ISLASTCN | ISDOTDOT);
1801 
1802 		if (*cp == '\0') {
1803 			cnp->cn_flags |= ISLASTCN;
1804 		}
1805 
1806 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') {
1807 			cnp->cn_flags |= ISDOTDOT;
1808 		}
1809 
1810 #if NAMEDRSRCFORK
1811 		/*
1812 		 * Process a request for a file's resource fork.
1813 		 *
1814 		 * Consume the _PATH_RSRCFORKSPEC suffix and tag the path.
1815 		 */
1816 		if ((ndp->ni_pathlen == sizeof(_PATH_RSRCFORKSPEC)) &&
1817 		    (cp[1] == '.' && cp[2] == '.') &&
1818 		    bcmp(cp, _PATH_RSRCFORKSPEC, sizeof(_PATH_RSRCFORKSPEC)) == 0) {
1819 			/* Skip volfs file systems that don't support native streams. */
1820 			if ((dmp != NULL) &&
1821 			    (dmp->mnt_flag & MNT_DOVOLFS) &&
1822 			    (dmp->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) {
1823 				goto skiprsrcfork;
1824 			}
1825 			cnp->cn_flags |= CN_WANTSRSRCFORK;
1826 			cnp->cn_flags |= ISLASTCN;
1827 			ndp->ni_next[0] = '\0';
1828 			ndp->ni_pathlen = 1;
1829 		}
1830 skiprsrcfork:
1831 #endif
1832 
1833 		*dp_authorized = 0;
1834 
1835 #if CONFIG_FIRMLINKS
1836 		if ((cnp->cn_flags & ISDOTDOT) && (dp->v_flag & VFMLINKTARGET) && dp->v_fmlink) {
1837 			/*
1838 			 * If this is a firmlink target then dp has to be switched to the
1839 			 * firmlink "source" before exiting this loop.
1840 			 *
1841 			 * For a firmlink "target", the policy is to pick the parent of the
1842 			 * firmlink "source" as the parent. This means that you can never
1843 			 * get to the "real" parent of firmlink target via a dotdot lookup.
1844 			 */
1845 			vnode_t v_fmlink = dp->v_fmlink;
1846 			uint32_t old_vid = vid;
1847 			mp = dmp;
1848 			if (v_fmlink) {
1849 				vid = v_fmlink->v_id;
1850 				dmp = v_fmlink->v_mount;
1851 				if ((dp->v_fmlink == v_fmlink) && dmp) {
1852 					dp = v_fmlink;
1853 				} else {
1854 					vid = old_vid;
1855 					dmp = mp;
1856 				}
1857 			}
1858 		}
1859 #endif
1860 
1861 
1862 		if (ttl_enabled &&
1863 		    (dmp->mnt_authcache_ttl == 0 ||
1864 		    ((tv.tv_sec - dp->v_cred_timestamp) > dmp->mnt_authcache_ttl))) {
1865 			break;
1866 		}
1867 
1868 		/*
1869 		 * NAME_CACHE_LOCK holds these fields stable
1870 		 *
1871 		 * We can't cache KAUTH_VNODE_SEARCHBYANYONE for root correctly
1872 		 * so we make an ugly check for root here. root is always
1873 		 * allowed and breaking out of here only to find out that is
1874 		 * authorized by virtue of being root is very very expensive.
1875 		 * However, the check for not root is valid only for filesystems
1876 		 * which use local authorization.
1877 		 *
1878 		 * XXX: Remove the check for root when we can reliably set
1879 		 * KAUTH_VNODE_SEARCHBYANYONE as root.
1880 		 */
1881 		int v_authorized_actions = os_atomic_load(&dp->v_authorized_actions, relaxed);
1882 		if ((vnode_cred(dp) != ucred || !(v_authorized_actions & KAUTH_VNODE_SEARCH)) &&
1883 		    !(v_authorized_actions & KAUTH_VNODE_SEARCHBYANYONE) &&
1884 		    (ttl_enabled || !vfs_context_issuser(ctx))) {
1885 			break;
1886 		}
1887 
1888 		/*
1889 		 * indicate that we're allowed to traverse this directory...
1890 		 * even if we fail the cache lookup or decide to bail for
1891 		 * some other reason, this information is valid and is used
1892 		 * to avoid doing a vnode_authorize before the call to VNOP_LOOKUP
1893 		 */
1894 		*dp_authorized = 1;
1895 
1896 		if ((cnp->cn_flags & (ISLASTCN | ISDOTDOT))) {
1897 			if (cnp->cn_nameiop != LOOKUP) {
1898 				break;
1899 			}
1900 			if (cnp->cn_flags & LOCKPARENT) {
1901 				break;
1902 			}
1903 			if (cnp->cn_flags & NOCACHE) {
1904 				break;
1905 			}
1906 
1907 			if (cnp->cn_flags & ISDOTDOT) {
1908 				/*
1909 				 * Force directory hardlinks to go to
1910 				 * file system for ".." requests.
1911 				 */
1912 				if ((dp->v_flag & VISHARDLINK)) {
1913 					break;
1914 				}
1915 				/*
1916 				 * Quit here only if we can't use
1917 				 * the parent directory pointer or
1918 				 * don't have one.  Otherwise, we'll
1919 				 * use it below.
1920 				 */
1921 				if ((dp->v_flag & VROOT) ||
1922 				    dp == ndp->ni_rootdir ||
1923 				    dp->v_parent == NULLVP) {
1924 					break;
1925 				}
1926 			}
1927 		}
1928 
1929 		if ((cnp->cn_flags & CN_SKIPNAMECACHE)) {
1930 			/*
1931 			 * Force lookup to go to the filesystem with
1932 			 * all cnp fields set up.
1933 			 */
1934 			break;
1935 		}
1936 
1937 		/*
1938 		 * "." and ".." aren't supposed to be cached, so check
1939 		 * for them before checking the cache.
1940 		 */
1941 		if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
1942 			vp = dp;
1943 			vvid = vid;
1944 		} else if ((cnp->cn_flags & ISDOTDOT)) {
1945 			/*
1946 			 * If this is a chrooted process, we need to check if
1947 			 * the process is trying to break out of its chrooted
1948 			 * jail. We do that by trying to determine if dp is
1949 			 * a subdirectory of ndp->ni_rootdir. If we aren't
1950 			 * able to determine that by the v_parent pointers, we
1951 			 * will leave the fast path.
1952 			 *
1953 			 * Since this function may see dotdot components
1954 			 * many times and it has the name cache lock held for
1955 			 * the entire duration, we optimise this by doing this
1956 			 * check only once per cache_lookup_path call.
1957 			 * If dotdotchecked is set, it means we've done this
1958 			 * check once already and don't need to do it again.
1959 			 */
1960 			if (!locked && (ndp->ni_rootdir != rootvnode)) {
1961 				vfs_smr_leave();
1962 				needs_lock = true;
1963 				goto prep_lock_retry;
1964 			} else if (locked && !dotdotchecked && (ndp->ni_rootdir != rootvnode)) {
1965 				vnode_t tvp = dp;
1966 				boolean_t defer = FALSE;
1967 				boolean_t is_subdir = FALSE;
1968 
1969 				defer = cache_check_vnode_issubdir(tvp,
1970 				    ndp->ni_rootdir, &is_subdir, &tvp);
1971 
1972 				if (defer) {
1973 					/* defer to Filesystem */
1974 					break;
1975 				} else if (!is_subdir) {
1976 					/*
1977 					 * This process is trying to break  out
1978 					 * of its chrooted jail, so all its
1979 					 * dotdot accesses will be translated to
1980 					 * its root directory.
1981 					 */
1982 					vp = ndp->ni_rootdir;
1983 				} else {
1984 					/*
1985 					 * All good, let this dotdot access
1986 					 * proceed normally
1987 					 */
1988 					vp = dp->v_parent;
1989 				}
1990 				dotdotchecked = TRUE;
1991 			} else {
1992 				vp = dp->v_parent;
1993 			}
1994 			if (!vp) {
1995 				break;
1996 			}
1997 			vvid = vp->v_id;
1998 		} else {
1999 			if (!locked) {
2000 				vp = cache_lookup_smr(dp, cnp, &vvid);
2001 				if (!vid_is_same(dp, vid)) {
2002 					vp = NULLVP;
2003 					needs_lock = true;
2004 					vfs_smr_leave();
2005 					goto prep_lock_retry;
2006 				}
2007 			} else {
2008 				vp = cache_lookup_locked(dp, cnp, &vvid);
2009 			}
2010 
2011 
2012 			if (!vp) {
2013 				break;
2014 			}
2015 
2016 			if ((vp->v_flag & VISHARDLINK)) {
2017 				/*
2018 				 * The file system wants a VNOP_LOOKUP on this vnode
2019 				 */
2020 				vp = NULL;
2021 				break;
2022 			}
2023 
2024 #if CONFIG_FIRMLINKS
2025 			vnode_t v_fmlink = vp->v_fmlink;
2026 			if (v_fmlink && !(vp->v_flag & VFMLINKTARGET)) {
2027 				if (cnp->cn_flags & CN_FIRMLINK_NOFOLLOW ||
2028 				    ((vp->v_type != VDIR) && (vp->v_type != VLNK))) {
2029 					/* Leave it to the filesystem */
2030 					vp = NULLVP;
2031 					break;
2032 				}
2033 
2034 				/*
2035 				 * Always switch to the target unless it is a VLNK
2036 				 * and it is the last component and we have NOFOLLOW
2037 				 * semantics
2038 				 */
2039 				if (vp->v_type == VDIR) {
2040 					vp = v_fmlink;
2041 					vvid = vnode_vid(vp);
2042 				} else if ((cnp->cn_flags & FOLLOW) ||
2043 				    (ndp->ni_flag & NAMEI_TRAILINGSLASH) || *ndp->ni_next == '/') {
2044 					if (ndp->ni_loopcnt >= MAXSYMLINKS - 1) {
2045 						vp = NULLVP;
2046 						break;
2047 					}
2048 					ndp->ni_loopcnt++;
2049 					vp = v_fmlink;
2050 					vvid = vnode_vid(vp);
2051 				}
2052 			}
2053 #endif
2054 		}
2055 		if ((cnp->cn_flags & ISLASTCN)) {
2056 			break;
2057 		}
2058 
2059 		if (vp->v_type != VDIR) {
2060 			if (vp->v_type != VLNK) {
2061 				vp = NULL;
2062 			}
2063 			break;
2064 		}
2065 
2066 		/*
2067 		 * v_mountedhere is PAC protected which means vp has to be a VDIR
2068 		 * to access that pointer as v_mountedhere. However, if we don't
2069 		 * have the name cache lock or an iocount (which we won't in the
2070 		 * !locked case) we can't guarantee that. So we try to detect it
2071 		 * via other fields to avoid having to dereference v_mountedhere
2072 		 * when we don't need to. Note that in theory if entire reclaim
2073 		 * happens between the time we check can_check_v_mountedhere()
2074 		 * and the subsequent access this will still fail but the fields
2075 		 * we check make that exceedingly unlikely and will result in
2076 		 * the chances of that happening being practically zero (but not
2077 		 * zero).
2078 		 */
2079 		if ((locked || can_check_v_mountedhere(vp)) &&
2080 		    (mp = vp->v_mountedhere) && ((cnp->cn_flags & NOCROSSMOUNT) == 0)) {
2081 			vnode_t tmp_vp;
2082 			int tmp_vid;
2083 
2084 			if (!(locked || vid_is_same(vp, vvid))) {
2085 				vp = NULL;
2086 				break;
2087 			}
2088 			tmp_vp = mp->mnt_realrootvp;
2089 			tmp_vid = mp->mnt_realrootvp_vid;
2090 			if (tmp_vp == NULLVP || mp->mnt_generation != mount_generation ||
2091 			    tmp_vid != tmp_vp->v_id) {
2092 				break;
2093 			}
2094 
2095 			if ((mp = tmp_vp->v_mount) == NULL) {
2096 				break;
2097 			}
2098 
2099 			vp = tmp_vp;
2100 			vvid = tmp_vid;
2101 			dmp = mp;
2102 			if (dmp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) {
2103 				ttl_enabled = TRUE;
2104 				microuptime(&tv);
2105 			} else {
2106 				ttl_enabled = FALSE;
2107 			}
2108 		}
2109 
2110 #if CONFIG_TRIGGERS
2111 		/*
2112 		 * After traversing all mountpoints stacked here, if we have a
2113 		 * trigger in hand, resolve it.  Note that we don't need to
2114 		 * leave the fast path if the mount has already happened.
2115 		 */
2116 		if (vp->v_resolve) {
2117 			break;
2118 		}
2119 #endif /* CONFIG_TRIGGERS */
2120 
2121 		if (!(locked || vid_is_same(vp, vvid))) {
2122 			vp = NULL;
2123 			break;
2124 		}
2125 
2126 		dp = vp;
2127 		vid = vvid;
2128 		vp = NULLVP;
2129 		vvid = 0;
2130 
2131 		cnp->cn_nameptr = ndp->ni_next + 1;
2132 		ndp->ni_pathlen--;
2133 		while (*cnp->cn_nameptr == '/') {
2134 			cnp->cn_nameptr++;
2135 			ndp->ni_pathlen--;
2136 		}
2137 	}
2138 	if (!locked) {
2139 		if (vp && !vnode_hold_smr(vp)) {
2140 			vp = NULLVP;
2141 			vvid = 0;
2142 		}
2143 		if (!vnode_hold_smr(dp)) {
2144 			vfs_smr_leave();
2145 			if (vp) {
2146 				vnode_drop(vp);
2147 				vp = NULLVP;
2148 				vvid = 0;
2149 			}
2150 			goto prep_lock_retry;
2151 		}
2152 		vfs_smr_leave();
2153 	} else {
2154 		if (vp != NULLVP) {
2155 			vvid = vp->v_id;
2156 			vnode_hold(vp);
2157 		}
2158 		vid = dp->v_id;
2159 
2160 		vnode_hold(dp);
2161 		NAME_CACHE_UNLOCK();
2162 	}
2163 
2164 	tdp = NULLVP;
2165 	if (!(cnp->cn_flags & DONOTAUTH) &&
2166 	    (vp != NULLVP) && (vp->v_type != VLNK) &&
2167 	    ((cnp->cn_flags & (ISLASTCN | LOCKPARENT | WANTPARENT | SAVESTART)) == ISLASTCN)) {
2168 		/*
2169 		 * if we've got a child and it's the last component, and
2170 		 * the lookup doesn't need to return the parent then we
2171 		 * can skip grabbing an iocount on the parent, since all
2172 		 * we're going to do with it is a vnode_put just before
2173 		 * we return from 'lookup'.  If it's a symbolic link,
2174 		 * we need the parent in case the link happens to be
2175 		 * a relative pathname.
2176 		 *
2177 		 * However, we can't make this optimisation if we have to call
2178 		 * a MAC hook.
2179 		 */
2180 		tdp = dp;
2181 		dp = NULLVP;
2182 	} else {
2183 need_dp:
2184 		/*
2185 		 * return the last directory we looked at
2186 		 * with an io reference held. If it was the one passed
2187 		 * in as a result of the last iteration of VNOP_LOOKUP,
2188 		 * it should already hold an io ref. No need to increase ref.
2189 		 */
2190 		if (last_dp != dp) {
2191 			if (dp == ndp->ni_usedvp) {
2192 				/*
2193 				 * if this vnode matches the one passed in via USEDVP
2194 				 * than this context already holds an io_count... just
2195 				 * use vnode_get to get an extra ref for lookup to play
2196 				 * with... can't use the getwithvid variant here because
2197 				 * it will block behind a vnode_drain which would result
2198 				 * in a deadlock (since we already own an io_count that the
2199 				 * vnode_drain is waiting on)... vnode_get grabs the io_count
2200 				 * immediately w/o waiting... it always succeeds
2201 				 */
2202 				vnode_get(dp);
2203 			} else if ((error = vnode_getwithvid_drainok(dp, vid))) {
2204 				/*
2205 				 * failure indicates the vnode
2206 				 * changed identity or is being
2207 				 * TERMINATED... in either case
2208 				 * punt this lookup.
2209 				 *
2210 				 * don't necessarily return ENOENT, though, because
2211 				 * we really want to go back to disk and make sure it's
2212 				 * there or not if someone else is changing this
2213 				 * vnode. That being said, the one case where we do want
2214 				 * to return ENOENT is when the vnode's mount point is
2215 				 * in the process of unmounting and we might cause a deadlock
2216 				 * in our attempt to take an iocount. An ENODEV error return
2217 				 * is from vnode_get* is an indication this but we change that
2218 				 * ENOENT for upper layers.
2219 				 */
2220 				if (error == ENODEV) {
2221 					error = ENOENT;
2222 				} else {
2223 					error = ERECYCLE;
2224 				}
2225 				vnode_drop(dp);
2226 				if (vp) {
2227 					vnode_drop(vp);
2228 				}
2229 				goto errorout;
2230 			}
2231 			dp_iocount_taken = true;
2232 		}
2233 		vnode_drop(dp);
2234 	}
2235 
2236 #if CONFIG_MACF
2237 	/*
2238 	 * Name cache provides authorization caching (see below)
2239 	 * that will short circuit MAC checks in lookup().
2240 	 * We must perform MAC check here.  On denial
2241 	 * dp_authorized will remain 0 and second check will
2242 	 * be perfomed in lookup().
2243 	 */
2244 	if (!(cnp->cn_flags & DONOTAUTH)) {
2245 		error = mac_vnode_check_lookup(ctx, dp, cnp);
2246 		if (error) {
2247 			*dp_authorized = 0;
2248 			if (dp_iocount_taken) {
2249 				vnode_put(dp);
2250 			}
2251 			if (vp) {
2252 				vnode_drop(vp);
2253 				vp = NULLVP;
2254 			}
2255 			goto errorout;
2256 		}
2257 	}
2258 #endif /* MAC */
2259 
2260 	if (vp != NULLVP) {
2261 		if ((vnode_getwithvid_drainok(vp, vvid))) {
2262 			vnode_drop(vp);
2263 			vp = NULLVP;
2264 
2265 			/*
2266 			 * can't get reference on the vp we'd like
2267 			 * to return... if we didn't grab a reference
2268 			 * on the directory (due to fast path bypass),
2269 			 * then we need to do it now... we can't return
2270 			 * with both ni_dvp and ni_vp NULL, and no
2271 			 * error condition
2272 			 */
2273 			if (dp == NULLVP) {
2274 				dp = tdp;
2275 				tdp = NULLVP;
2276 				goto need_dp;
2277 			}
2278 		} else {
2279 			vnode_drop(vp);
2280 		}
2281 		if (dp_iocount_taken && vp && (vp->v_type != VLNK) &&
2282 		    ((cnp->cn_flags & (ISLASTCN | LOCKPARENT | WANTPARENT | SAVESTART)) == ISLASTCN)) {
2283 			vnode_put(dp);
2284 			dp = NULLVP;
2285 		}
2286 	}
2287 
2288 	if (tdp) {
2289 		vnode_drop(tdp);
2290 		tdp = NULLVP;
2291 	}
2292 
2293 	ndp->ni_dvp = dp;
2294 	ndp->ni_vp  = vp;
2295 
2296 #if CONFIG_TRIGGERS
2297 	trigger_vp = vp ? vp : dp;
2298 	if ((error == 0) && (trigger_vp != NULLVP) && vnode_isdir(trigger_vp)) {
2299 		error = vnode_trigger_resolve(trigger_vp, ndp, ctx);
2300 		if (error) {
2301 			if (vp) {
2302 				vnode_put(vp);
2303 			}
2304 			if (dp) {
2305 				vnode_put(dp);
2306 			}
2307 			goto errorout;
2308 		}
2309 	}
2310 #endif /* CONFIG_TRIGGERS */
2311 
2312 errorout:
2313 	/*
2314 	 * If we came into cache_lookup_path after an iteration of the lookup loop that
2315 	 * resulted in a call to VNOP_LOOKUP, then VNOP_LOOKUP returned a vnode with a io ref
2316 	 * on it.  It is now the job of cache_lookup_path to drop the ref on this vnode
2317 	 * when it is no longer needed.  If we get to this point, and last_dp is not NULL
2318 	 * and it is ALSO not the dvp we want to return to caller of this function, it MUST be
2319 	 * the case that we got to a subsequent path component and this previous vnode is
2320 	 * no longer needed.  We can then drop the io ref on it.
2321 	 */
2322 	if ((last_dp != NULLVP) && (last_dp != ndp->ni_dvp)) {
2323 		vnode_put(last_dp);
2324 	}
2325 
2326 	//initialized to 0, should be the same if no error cases occurred.
2327 	return error;
2328 
2329 prep_lock_retry:
2330 	restore_ndp_state(ndp, cnp, &saved_state);
2331 	dp = start_dp;
2332 	goto retry;
2333 }
2334 
2335 
2336 static vnode_t
cache_lookup_locked(vnode_t dvp,struct componentname * cnp,uint32_t * vidp)2337 cache_lookup_locked(vnode_t dvp, struct componentname *cnp, uint32_t *vidp)
2338 {
2339 	struct namecache *ncp;
2340 	long namelen = cnp->cn_namelen;
2341 	unsigned int hashval = cnp->cn_hash;
2342 
2343 	if (nc_disabled) {
2344 		return NULL;
2345 	}
2346 
2347 	smrq_serialized_foreach(ncp, NCHHASH(dvp, cnp->cn_hash), nc_hash) {
2348 		if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) {
2349 			if (strncmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) {
2350 				break;
2351 			}
2352 		}
2353 	}
2354 	if (ncp == 0) {
2355 		/*
2356 		 * We failed to find an entry
2357 		 */
2358 		NCHSTAT(ncs_miss);
2359 		NC_SMR_STATS(clp_next_fail);
2360 		return NULL;
2361 	}
2362 	NCHSTAT(ncs_goodhits);
2363 
2364 	if (!ncp->nc_vp) {
2365 		return NULL;
2366 	}
2367 
2368 	*vidp = ncp->nc_vid;
2369 	NC_SMR_STATS(clp_next);
2370 
2371 	return ncp->nc_vp;
2372 }
2373 
2374 static vnode_t
cache_lookup_smr(vnode_t dvp,struct componentname * cnp,uint32_t * vidp)2375 cache_lookup_smr(vnode_t dvp, struct componentname *cnp, uint32_t *vidp)
2376 {
2377 	struct namecache *ncp;
2378 	long namelen = cnp->cn_namelen;
2379 	unsigned int hashval = cnp->cn_hash;
2380 	vnode_t vp = NULLVP;
2381 	uint32_t vid = 0;
2382 	uint32_t counter = 1;
2383 
2384 	if (nc_disabled) {
2385 		return NULL;
2386 	}
2387 
2388 	smrq_entered_foreach(ncp, NCHHASH(dvp, cnp->cn_hash), nc_hash) {
2389 		counter = os_atomic_load(&ncp->nc_counter, acquire);
2390 		if (!(counter & NC_VALID)) {
2391 			ncp = NULL;
2392 			goto out;
2393 		}
2394 		if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) {
2395 			const char *nc_name =
2396 			    os_atomic_load(&ncp->nc_name, relaxed);
2397 			if (nc_name &&
2398 			    strncmp(nc_name, cnp->cn_nameptr, namelen) == 0 &&
2399 			    nc_name[namelen] == 0) {
2400 				break;
2401 			} else if (!nc_name) {
2402 				ncp = NULL;
2403 				goto out;
2404 			}
2405 		}
2406 	}
2407 
2408 	/* We failed to find an entry */
2409 	if (ncp == 0) {
2410 		goto out;
2411 	}
2412 
2413 	vp = ncp->nc_vp;
2414 	vid = ncp->nc_vid;
2415 
2416 	/*
2417 	 * The validity of vp and vid depends on the value of the counter being
2418 	 * the same when we read it first in the loop and now. Anything else
2419 	 * and we can't use this vp & vid.
2420 	 * Hopefully this ncp wasn't reused 2 billion times between the time
2421 	 * we read it first and when we the counter value again.
2422 	 */
2423 	if (os_atomic_load(&ncp->nc_counter, acquire) != counter) {
2424 		vp = NULLVP;
2425 		goto out;
2426 	}
2427 
2428 	*vidp = vid;
2429 	NC_SMR_STATS(clp_smr_next);
2430 
2431 	return vp;
2432 
2433 out:
2434 	NC_SMR_STATS(clp_smr_next_fail);
2435 	return NULL;
2436 }
2437 
2438 
2439 unsigned int hash_string(const char *cp, int len);
2440 //
2441 // Have to take a len argument because we may only need to
2442 // hash part of a componentname.
2443 //
2444 unsigned int
hash_string(const char * cp,int len)2445 hash_string(const char *cp, int len)
2446 {
2447 	unsigned hash = 0;
2448 
2449 	if (len) {
2450 		while (len--) {
2451 			hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8;
2452 		}
2453 	} else {
2454 		while (*cp != '\0') {
2455 			hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8;
2456 		}
2457 	}
2458 	/*
2459 	 * the crc generator can legitimately generate
2460 	 * a 0... however, 0 for us means that we
2461 	 * haven't computed a hash, so use 1 instead
2462 	 */
2463 	if (hash == 0) {
2464 		hash = 1;
2465 	}
2466 	return hash;
2467 }
2468 
2469 
2470 /*
2471  * Lookup an entry in the cache
2472  *
2473  * We don't do this if the segment name is long, simply so the cache
2474  * can avoid holding long names (which would either waste space, or
2475  * add greatly to the complexity).
2476  *
2477  * Lookup is called with dvp pointing to the directory to search,
2478  * cnp pointing to the name of the entry being sought. If the lookup
2479  * succeeds, the vnode is returned in *vpp, and a status of -1 is
2480  * returned. If the lookup determines that the name does not exist
2481  * (negative cacheing), a status of ENOENT is returned. If the lookup
2482  * fails, a status of zero is returned.
2483  */
2484 
2485 static int
cache_lookup_fallback(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp)2486 cache_lookup_fallback(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
2487 {
2488 	struct namecache *ncp;
2489 	long namelen = cnp->cn_namelen;
2490 	unsigned int hashval = cnp->cn_hash;
2491 	boolean_t       have_exclusive = FALSE;
2492 	uint32_t vid;
2493 	vnode_t  vp;
2494 
2495 	NAME_CACHE_LOCK_SHARED();
2496 
2497 relook:
2498 	smrq_serialized_foreach(ncp, NCHHASH(dvp, cnp->cn_hash), nc_hash) {
2499 		if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) {
2500 			if (strncmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) {
2501 				break;
2502 			}
2503 		}
2504 	}
2505 	/* We failed to find an entry */
2506 	if (ncp == 0) {
2507 		NCHSTAT(ncs_miss);
2508 		NAME_CACHE_UNLOCK();
2509 		return 0;
2510 	}
2511 
2512 	/* We don't want to have an entry, so dump it */
2513 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
2514 		if (have_exclusive == TRUE) {
2515 			NCHSTAT(ncs_badhits);
2516 			cache_delete(ncp, 1);
2517 			NAME_CACHE_UNLOCK();
2518 			return 0;
2519 		}
2520 		if (!NAME_CACHE_LOCK_SHARED_TO_EXCLUSIVE()) {
2521 			NAME_CACHE_LOCK();
2522 		}
2523 		have_exclusive = TRUE;
2524 		goto relook;
2525 	}
2526 	vp = ncp->nc_vp;
2527 
2528 	/* We found a "positive" match, return the vnode */
2529 	if (vp) {
2530 		NCHSTAT(ncs_goodhits);
2531 
2532 		vid = ncp->nc_vid;
2533 		vnode_hold(vp);
2534 		NAME_CACHE_UNLOCK();
2535 
2536 		if (vnode_getwithvid(vp, vid)) {
2537 			vnode_drop(vp);
2538 #if COLLECT_STATS
2539 			NAME_CACHE_LOCK();
2540 			NCHSTAT(ncs_badvid);
2541 			NAME_CACHE_UNLOCK();
2542 #endif
2543 			return 0;
2544 		}
2545 		vnode_drop(vp);
2546 		*vpp = vp;
2547 		NC_SMR_STATS(cl_lock_hits);
2548 		return -1;
2549 	}
2550 
2551 	/* We found a negative match, and want to create it, so purge */
2552 	if (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) {
2553 		if (have_exclusive == TRUE) {
2554 			NCHSTAT(ncs_badhits);
2555 			cache_delete(ncp, 1);
2556 			NAME_CACHE_UNLOCK();
2557 			return 0;
2558 		}
2559 		if (!NAME_CACHE_LOCK_SHARED_TO_EXCLUSIVE()) {
2560 			NAME_CACHE_LOCK();
2561 		}
2562 		have_exclusive = TRUE;
2563 		goto relook;
2564 	}
2565 
2566 	/*
2567 	 * We found a "negative" match, ENOENT notifies client of this match.
2568 	 */
2569 	NCHSTAT(ncs_neghits);
2570 
2571 	NAME_CACHE_UNLOCK();
2572 	return ENOENT;
2573 }
2574 
2575 
2576 
2577 /*
2578  * Lookup an entry in the cache
2579  *
2580  * Lookup is called with dvp pointing to the directory to search,
2581  * cnp pointing to the name of the entry being sought. If the lookup
2582  * succeeds, the vnode is returned in *vpp, and a status of -1 is
2583  * returned. If the lookup determines that the name does not exist
2584  * (negative cacheing), a status of ENOENT is returned. If the lookup
2585  * fails, a status of zero is returned.
2586  */
2587 int
cache_lookup(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp)2588 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
2589 {
2590 	struct namecache *ncp;
2591 	long namelen = cnp->cn_namelen;
2592 	vnode_t  vp;
2593 	uint32_t vid = 0;
2594 	uint32_t counter = 1;
2595 	unsigned int hashval;
2596 
2597 	*vpp = NULLVP;
2598 
2599 	if (cnp->cn_hash == 0) {
2600 		cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2601 	}
2602 	hashval = cnp->cn_hash;
2603 
2604 	if (nc_disabled) {
2605 		return 0;
2606 	}
2607 
2608 	if (!nc_smr_enabled) {
2609 		goto out_fallback;
2610 	}
2611 
2612 	/* We don't want to have an entry, so dump it */
2613 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
2614 		goto out_fallback;
2615 	}
2616 
2617 	vfs_smr_enter();
2618 
2619 	smrq_entered_foreach(ncp, NCHHASH(dvp, cnp->cn_hash), nc_hash) {
2620 		counter = os_atomic_load(&ncp->nc_counter, acquire);
2621 		if (!(counter & NC_VALID)) {
2622 			vfs_smr_leave();
2623 			goto out_fallback;
2624 		}
2625 		if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) {
2626 			const char *nc_name =
2627 			    os_atomic_load(&ncp->nc_name, relaxed);
2628 			if (nc_name &&
2629 			    strncmp(nc_name, cnp->cn_nameptr, namelen) == 0 &&
2630 			    nc_name[namelen] == 0) {
2631 				break;
2632 			} else if (!nc_name) {
2633 				vfs_smr_leave();
2634 				goto out_fallback;
2635 			}
2636 		}
2637 	}
2638 
2639 	/* We failed to find an entry */
2640 	if (ncp == 0) {
2641 		NCHSTAT(ncs_miss);
2642 		vfs_smr_leave();
2643 		NC_SMR_STATS(cl_smr_miss);
2644 		return 0;
2645 	}
2646 
2647 	vp = ncp->nc_vp;
2648 	vid = ncp->nc_vid;
2649 
2650 	/*
2651 	 * The validity of vp and vid depends on the value of the counter being
2652 	 * the same when we read it first in the loop and now. Anything else
2653 	 * and we can't use this vp & vid.
2654 	 * Hopefully this ncp wasn't reused 2 billion times between the time
2655 	 * we read it first and when we the counter value again.
2656 	 */
2657 	if (os_atomic_load(&ncp->nc_counter, acquire) != counter) {
2658 		vfs_smr_leave();
2659 		goto out_fallback;
2660 	}
2661 
2662 	if (vp) {
2663 		bool holdcount_acquired = vnode_hold_smr(vp);
2664 
2665 		vfs_smr_leave();
2666 
2667 		if (!holdcount_acquired) {
2668 			goto out_fallback;
2669 		}
2670 
2671 		if (vnode_getwithvid(vp, vid) != 0) {
2672 			vnode_drop(vp);
2673 			goto out_fallback;
2674 		}
2675 		vnode_drop(vp);
2676 		NCHSTAT(ncs_goodhits);
2677 
2678 		*vpp = vp;
2679 		NC_SMR_STATS(cl_smr_hits);
2680 		return -1;
2681 	}
2682 
2683 	vfs_smr_leave();
2684 
2685 	/* We found a negative match, and want to create it, so purge */
2686 	if (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) {
2687 		goto out_fallback;
2688 	}
2689 
2690 	/*
2691 	 * We found a "negative" match, ENOENT notifies client of this match.
2692 	 */
2693 	NCHSTAT(ncs_neghits);
2694 	NC_SMR_STATS(cl_smr_negative_hits);
2695 	return ENOENT;
2696 
2697 out_fallback:
2698 	NC_SMR_STATS(cl_smr_fallback);
2699 	return cache_lookup_fallback(dvp, vpp, cnp);
2700 }
2701 
2702 const char *
cache_enter_create(vnode_t dvp,vnode_t vp,struct componentname * cnp)2703 cache_enter_create(vnode_t dvp, vnode_t vp, struct componentname *cnp)
2704 {
2705 	const char *strname;
2706 
2707 	if (cnp->cn_hash == 0) {
2708 		cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2709 	}
2710 
2711 	/*
2712 	 * grab 2 references on the string entered
2713 	 * one for the cache_enter_locked to consume
2714 	 * and the second to be consumed by v_name (vnode_create call point)
2715 	 */
2716 	strname = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, TRUE, 0);
2717 
2718 	NAME_CACHE_LOCK();
2719 
2720 	cache_enter_locked(dvp, vp, cnp, strname);
2721 
2722 	NAME_CACHE_UNLOCK();
2723 
2724 	return strname;
2725 }
2726 
2727 
2728 /*
2729  * Add an entry to the cache...
2730  * but first check to see if the directory
2731  * that this entry is to be associated with has
2732  * had any cache_purges applied since we took
2733  * our identity snapshot... this check needs to
2734  * be done behind the name cache lock
2735  */
2736 void
cache_enter_with_gen(struct vnode * dvp,struct vnode * vp,struct componentname * cnp,int gen)2737 cache_enter_with_gen(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int gen)
2738 {
2739 	if (cnp->cn_hash == 0) {
2740 		cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2741 	}
2742 
2743 	NAME_CACHE_LOCK();
2744 
2745 	if (dvp->v_nc_generation == gen) {
2746 		(void)cache_enter_locked(dvp, vp, cnp, NULL);
2747 	}
2748 
2749 	NAME_CACHE_UNLOCK();
2750 }
2751 
2752 
2753 /*
2754  * Add an entry to the cache.
2755  */
2756 void
cache_enter(struct vnode * dvp,struct vnode * vp,struct componentname * cnp)2757 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2758 {
2759 	const char *strname;
2760 
2761 	if (cnp->cn_hash == 0) {
2762 		cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2763 	}
2764 
2765 	/*
2766 	 * grab 1 reference on the string entered
2767 	 * for the cache_enter_locked to consume
2768 	 */
2769 	strname = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0);
2770 
2771 	NAME_CACHE_LOCK();
2772 
2773 	cache_enter_locked(dvp, vp, cnp, strname);
2774 
2775 	NAME_CACHE_UNLOCK();
2776 }
2777 
2778 
2779 static void
cache_enter_locked(struct vnode * dvp,struct vnode * vp,struct componentname * cnp,const char * strname)2780 cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, const char *strname)
2781 {
2782 	struct namecache *ncp, *negp;
2783 	struct smrq_list_head  *ncpp;
2784 
2785 	if (nc_disabled) {
2786 		return;
2787 	}
2788 
2789 	/*
2790 	 * if the entry is for -ve caching vp is null
2791 	 */
2792 	if ((vp != NULLVP) && (LIST_FIRST(&vp->v_nclinks))) {
2793 		/*
2794 		 * someone beat us to the punch..
2795 		 * this vnode is already in the cache
2796 		 */
2797 		if (strname != NULL) {
2798 			vfs_removename(strname);
2799 		}
2800 		return;
2801 	}
2802 	/*
2803 	 * We allocate a new entry if we are less than the maximum
2804 	 * allowed and the one at the front of the list is in use.
2805 	 * Otherwise we use the one at the front of the list.
2806 	 */
2807 	if (numcache < desiredNodes &&
2808 	    ((ncp = nchead.tqh_first) == NULL ||
2809 	    (ncp->nc_counter & NC_VALID))) {
2810 		/*
2811 		 * Allocate one more entry
2812 		 */
2813 		if (nc_smr_enabled) {
2814 			ncp = zalloc_smr(namecache_zone, Z_WAITOK_ZERO_NOFAIL);
2815 		} else {
2816 			ncp = zalloc(namecache_zone);
2817 		}
2818 		ncp->nc_counter = 0;
2819 		numcache++;
2820 	} else {
2821 		/*
2822 		 * reuse an old entry
2823 		 */
2824 		ncp = TAILQ_FIRST(&nchead);
2825 		TAILQ_REMOVE(&nchead, ncp, nc_entry);
2826 
2827 		if (ncp->nc_counter & NC_VALID) {
2828 			/*
2829 			 * still in use... we need to
2830 			 * delete it before re-using it
2831 			 */
2832 			NCHSTAT(ncs_stolen);
2833 			cache_delete(ncp, 0);
2834 		}
2835 	}
2836 	NCHSTAT(ncs_enters);
2837 
2838 	/*
2839 	 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
2840 	 */
2841 	if (vp) {
2842 		ncp->nc_vid = vnode_vid(vp);
2843 		vnode_hold(vp);
2844 	}
2845 	ncp->nc_vp = vp;
2846 	ncp->nc_dvp = dvp;
2847 	ncp->nc_hashval = cnp->cn_hash;
2848 
2849 	if (strname == NULL) {
2850 		ncp->nc_name = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0);
2851 	} else {
2852 		ncp->nc_name = strname;
2853 	}
2854 
2855 	//
2856 	// If the bytes of the name associated with the vnode differ,
2857 	// use the name associated with the vnode since the file system
2858 	// may have set that explicitly in the case of a lookup on a
2859 	// case-insensitive file system where the case of the looked up
2860 	// name differs from what is on disk.  For more details, see:
2861 	//   <rdar://problem/8044697> FSEvents doesn't always decompose diacritical unicode chars in the paths of the changed directories
2862 	//
2863 	const char *vn_name = vp ? vp->v_name : NULL;
2864 	unsigned int len = vn_name ? (unsigned int)strlen(vn_name) : 0;
2865 	if (vn_name && ncp && ncp->nc_name && strncmp(ncp->nc_name, vn_name, len) != 0) {
2866 		unsigned int hash = hash_string(vn_name, len);
2867 
2868 		vfs_removename(ncp->nc_name);
2869 		ncp->nc_name = add_name_internal(vn_name, len, hash, FALSE, 0);
2870 		ncp->nc_hashval = hash;
2871 	}
2872 
2873 	/*
2874 	 * make us the newest entry in the cache
2875 	 * i.e. we'll be the last to be stolen
2876 	 */
2877 	TAILQ_INSERT_TAIL(&nchead, ncp, nc_entry);
2878 
2879 	ncpp = NCHHASH(dvp, cnp->cn_hash);
2880 #if DIAGNOSTIC
2881 	{
2882 		struct namecache *p;
2883 
2884 		smrq_serialized_foreach(p, ncpp, nc_hash) {
2885 			if (p == ncp) {
2886 				panic("cache_enter: duplicate");
2887 			}
2888 		}
2889 	}
2890 #endif
2891 	/*
2892 	 * make us available to be found via lookup
2893 	 */
2894 	smrq_serialized_insert_head(ncpp, &ncp->nc_hash);
2895 
2896 	if (vp) {
2897 		/*
2898 		 * add to the list of name cache entries
2899 		 * that point at vp
2900 		 */
2901 		LIST_INSERT_HEAD(&vp->v_nclinks, ncp, nc_un.nc_link);
2902 	} else {
2903 		/*
2904 		 * this is a negative cache entry (vp == NULL)
2905 		 * stick it on the negative cache list.
2906 		 */
2907 		TAILQ_INSERT_TAIL(&neghead, ncp, nc_un.nc_negentry);
2908 
2909 		ncs_negtotal++;
2910 
2911 		if (ncs_negtotal > desiredNegNodes) {
2912 			/*
2913 			 * if we've reached our desired limit
2914 			 * of negative cache entries, delete
2915 			 * the oldest
2916 			 */
2917 			negp = TAILQ_FIRST(&neghead);
2918 			cache_delete(negp, 1);
2919 		}
2920 	}
2921 
2922 	/*
2923 	 * add us to the list of name cache entries that
2924 	 * are children of dvp
2925 	 */
2926 	if (vp) {
2927 		TAILQ_INSERT_TAIL(&dvp->v_ncchildren, ncp, nc_child);
2928 	} else {
2929 		TAILQ_INSERT_HEAD(&dvp->v_ncchildren, ncp, nc_child);
2930 	}
2931 
2932 	/*
2933 	 * nc_counter represents a sequence counter and 1 bit valid flag.
2934 	 * When the counter value is odd, it represents a valid and in use
2935 	 * namecache structure. We increment the value on every state transition
2936 	 * (invalid to valid (here) and valid to invalid (in cache delete).
2937 	 * Lockless readers have to read the value before reading other fields
2938 	 * and ensure that the field is valid and remains the same after the fields
2939 	 * have been read.
2940 	 */
2941 	uint32_t old_count = os_atomic_inc_orig(&ncp->nc_counter, release);
2942 	if (old_count & NC_VALID) {
2943 		/* This is a invalid to valid transition */
2944 		panic("Incorrect state for old nc_counter(%d), should be even", old_count);
2945 	}
2946 }
2947 
2948 
2949 /*
2950  * Initialize CRC-32 remainder table.
2951  */
2952 static void
init_crc32(void)2953 init_crc32(void)
2954 {
2955 	/*
2956 	 * the CRC-32 generator polynomial is:
2957 	 *   x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^10
2958 	 *        + x^8  + x^7  + x^5  + x^4  + x^2  + x + 1
2959 	 */
2960 	unsigned int crc32_polynomial = 0x04c11db7;
2961 	unsigned int i, j;
2962 
2963 	/*
2964 	 * pre-calculate the CRC-32 remainder for each possible octet encoding
2965 	 */
2966 	for (i = 0; i < 256; i++) {
2967 		unsigned int crc_rem = i << 24;
2968 
2969 		for (j = 0; j < 8; j++) {
2970 			if (crc_rem & 0x80000000) {
2971 				crc_rem = (crc_rem << 1) ^ crc32_polynomial;
2972 			} else {
2973 				crc_rem = (crc_rem << 1);
2974 			}
2975 		}
2976 		crc32tab[i] = crc_rem;
2977 	}
2978 }
2979 
2980 
2981 /*
2982  * Name cache initialization, from vfs_init() when we are booting
2983  */
2984 void
nchinit(void)2985 nchinit(void)
2986 {
2987 	desiredNegNodes = (desiredvnodes / 10);
2988 	desiredNodes = desiredvnodes + desiredNegNodes;
2989 
2990 	if (nc_smr_enabled) {
2991 		zone_enable_smr(namecache_zone, VFS_SMR(), &namecache_smr_free);
2992 		zone_enable_smr(stringcache_zone, VFS_SMR(), &string_smr_free);
2993 	}
2994 	TAILQ_INIT(&nchead);
2995 	TAILQ_INIT(&neghead);
2996 
2997 	init_crc32();
2998 
2999 	nchashtbl = hashinit(MAX(CONFIG_NC_HASH, (2 * desiredNodes)), M_CACHE, &nchash);
3000 	nchashmask = nchash;
3001 	nchash++;
3002 
3003 	init_string_table();
3004 
3005 	for (int i = 0; i < NUM_STRCACHE_LOCKS; i++) {
3006 		lck_mtx_init(&strcache_mtx_locks[i], &strcache_lck_grp, &strcache_lck_attr);
3007 	}
3008 }
3009 
3010 void
name_cache_lock_shared(void)3011 name_cache_lock_shared(void)
3012 {
3013 	lck_rw_lock_shared(&namecache_rw_lock);
3014 	NC_SMR_STATS(nc_lock_shared);
3015 }
3016 
3017 void
name_cache_lock(void)3018 name_cache_lock(void)
3019 {
3020 	lck_rw_lock_exclusive(&namecache_rw_lock);
3021 	NC_SMR_STATS(nc_lock);
3022 }
3023 
3024 boolean_t
name_cache_lock_shared_to_exclusive(void)3025 name_cache_lock_shared_to_exclusive(void)
3026 {
3027 	return lck_rw_lock_shared_to_exclusive(&namecache_rw_lock);
3028 }
3029 
3030 void
name_cache_unlock(void)3031 name_cache_unlock(void)
3032 {
3033 	lck_rw_done(&namecache_rw_lock);
3034 }
3035 
3036 
3037 int
resize_namecache(int newsize)3038 resize_namecache(int newsize)
3039 {
3040 	struct smrq_list_head   *new_table;
3041 	struct smrq_list_head   *old_table;
3042 	struct smrq_list_head   *old_head;
3043 	struct namecache    *entry;
3044 	uint32_t            i, hashval;
3045 	int                 dNodes, dNegNodes, nelements;
3046 	u_long              new_size, old_size;
3047 
3048 	if (newsize < 0) {
3049 		return EINVAL;
3050 	}
3051 
3052 	dNegNodes = (newsize / 10);
3053 	dNodes = newsize + dNegNodes;
3054 	// we don't support shrinking yet
3055 	if (dNodes <= desiredNodes) {
3056 		return 0;
3057 	}
3058 
3059 	if (os_mul_overflow(dNodes, 2, &nelements)) {
3060 		return EINVAL;
3061 	}
3062 
3063 	new_table = hashinit(nelements, M_CACHE, &nchashmask);
3064 	new_size  = nchashmask + 1;
3065 
3066 	if (new_table == NULL) {
3067 		return ENOMEM;
3068 	}
3069 
3070 	NAME_CACHE_LOCK();
3071 	// do the switch!
3072 	old_table = nchashtbl;
3073 	nchashtbl = new_table;
3074 	old_size  = nchash;
3075 	nchash    = new_size;
3076 
3077 	// walk the old table and insert all the entries into
3078 	// the new table
3079 	//
3080 	for (i = 0; i < old_size; i++) {
3081 		old_head = &old_table[i];
3082 		smrq_serialized_foreach_safe(entry, old_head, nc_hash) {
3083 			//
3084 			// XXXdbg - Beware: this assumes that hash_string() does
3085 			//                  the same thing as what happens in
3086 			//                  lookup() over in vfs_lookup.c
3087 			hashval = hash_string(entry->nc_name, 0);
3088 			entry->nc_hashval = hashval;
3089 
3090 			smrq_serialized_insert_head(NCHHASH(entry->nc_dvp, hashval), &entry->nc_hash);
3091 		}
3092 	}
3093 	desiredNodes = dNodes;
3094 	desiredNegNodes = dNegNodes;
3095 
3096 	NAME_CACHE_UNLOCK();
3097 	hashdestroy(old_table, M_CACHE, old_size - 1);
3098 
3099 	return 0;
3100 }
3101 
3102 static void
namecache_smr_free(void * _ncp,__unused size_t _size)3103 namecache_smr_free(void *_ncp, __unused size_t _size)
3104 {
3105 	struct namecache *ncp = _ncp;
3106 
3107 	bzero(ncp, sizeof(*ncp));
3108 }
3109 
3110 static void
cache_delete(struct namecache * ncp,int free_entry)3111 cache_delete(struct namecache *ncp, int free_entry)
3112 {
3113 	NCHSTAT(ncs_deletes);
3114 
3115 	/*
3116 	 * See comment at the end of cache_enter_locked expalining the usage of
3117 	 * nc_counter.
3118 	 */
3119 	uint32_t old_count = os_atomic_inc_orig(&ncp->nc_counter, release);
3120 	if (!(old_count & NC_VALID)) {
3121 		/* This should be a valid to invalid transition */
3122 		panic("Incorrect state for old nc_counter(%d), should be odd", old_count);
3123 	}
3124 
3125 	if (ncp->nc_vp) {
3126 		LIST_REMOVE(ncp, nc_un.nc_link);
3127 	} else {
3128 		TAILQ_REMOVE(&neghead, ncp, nc_un.nc_negentry);
3129 		ncs_negtotal--;
3130 	}
3131 	TAILQ_REMOVE(&(ncp->nc_dvp->v_ncchildren), ncp, nc_child);
3132 
3133 	smrq_serialized_remove((NCHHASH(ncp->nc_dvp, ncp->nc_hashval)), &ncp->nc_hash);
3134 
3135 	const char *nc_name = ncp->nc_name;
3136 	ncp->nc_name = NULL;
3137 	vfs_removename(nc_name);
3138 	if (ncp->nc_vp) {
3139 		vnode_t vp = ncp->nc_vp;
3140 
3141 		ncp->nc_vp = NULLVP;
3142 		vnode_drop(vp);
3143 	}
3144 
3145 	if (free_entry) {
3146 		TAILQ_REMOVE(&nchead, ncp, nc_entry);
3147 		if (nc_smr_enabled) {
3148 			zfree_smr(namecache_zone, ncp);
3149 		} else {
3150 			zfree(namecache_zone, ncp);
3151 		}
3152 		numcache--;
3153 	}
3154 }
3155 
3156 
3157 /*
3158  * purge the entry associated with the
3159  * specified vnode from the name cache
3160  */
3161 static void
cache_purge_locked(vnode_t vp,kauth_cred_t * credp)3162 cache_purge_locked(vnode_t vp, kauth_cred_t *credp)
3163 {
3164 	struct namecache *ncp;
3165 
3166 	*credp = NULL;
3167 	if ((LIST_FIRST(&vp->v_nclinks) == NULL) &&
3168 	    (TAILQ_FIRST(&vp->v_ncchildren) == NULL) &&
3169 	    (vnode_cred(vp) == NOCRED) &&
3170 	    (vp->v_parent == NULLVP)) {
3171 		return;
3172 	}
3173 
3174 	if (vp->v_parent) {
3175 		vp->v_parent->v_nc_generation++;
3176 	}
3177 
3178 	while ((ncp = LIST_FIRST(&vp->v_nclinks))) {
3179 		cache_delete(ncp, 1);
3180 	}
3181 
3182 	while ((ncp = TAILQ_FIRST(&vp->v_ncchildren))) {
3183 		cache_delete(ncp, 1);
3184 	}
3185 
3186 	/*
3187 	 * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held
3188 	 */
3189 	*credp = vnode_cred(vp);
3190 	vp->v_cred = NOCRED;
3191 	vp->v_authorized_actions = 0;
3192 }
3193 
3194 void
cache_purge(vnode_t vp)3195 cache_purge(vnode_t vp)
3196 {
3197 	kauth_cred_t tcred = NULL;
3198 
3199 	if ((LIST_FIRST(&vp->v_nclinks) == NULL) &&
3200 	    (TAILQ_FIRST(&vp->v_ncchildren) == NULL) &&
3201 	    (vnode_cred(vp) == NOCRED) &&
3202 	    (vp->v_parent == NULLVP)) {
3203 		return;
3204 	}
3205 
3206 	NAME_CACHE_LOCK();
3207 
3208 	cache_purge_locked(vp, &tcred);
3209 
3210 	NAME_CACHE_UNLOCK();
3211 
3212 	kauth_cred_set(&tcred, NOCRED);
3213 }
3214 
3215 /*
3216  * Purge all negative cache entries that are children of the
3217  * given vnode.  A case-insensitive file system (or any file
3218  * system that has multiple equivalent names for the same
3219  * directory entry) can use this when creating or renaming
3220  * to remove negative entries that may no longer apply.
3221  */
3222 void
cache_purge_negatives(vnode_t vp)3223 cache_purge_negatives(vnode_t vp)
3224 {
3225 	struct namecache *ncp, *next_ncp;
3226 
3227 	NAME_CACHE_LOCK();
3228 
3229 	TAILQ_FOREACH_SAFE(ncp, &vp->v_ncchildren, nc_child, next_ncp) {
3230 		if (ncp->nc_vp) {
3231 			break;
3232 		}
3233 
3234 		cache_delete(ncp, 1);
3235 	}
3236 
3237 	NAME_CACHE_UNLOCK();
3238 }
3239 
3240 /*
3241  * Flush all entries referencing a particular filesystem.
3242  *
3243  * Since we need to check it anyway, we will flush all the invalid
3244  * entries at the same time.
3245  */
3246 void
cache_purgevfs(struct mount * mp)3247 cache_purgevfs(struct mount *mp)
3248 {
3249 	struct smrq_list_head *ncpp;
3250 	struct namecache *ncp;
3251 
3252 	NAME_CACHE_LOCK();
3253 	/* Scan hash tables for applicable entries */
3254 	for (ncpp = &nchashtbl[nchash - 1]; ncpp >= nchashtbl; ncpp--) {
3255 restart:
3256 		smrq_serialized_foreach(ncp, ncpp, nc_hash) {
3257 			if (ncp->nc_dvp->v_mount == mp) {
3258 				cache_delete(ncp, 0);
3259 				goto restart;
3260 			}
3261 		}
3262 	}
3263 	NAME_CACHE_UNLOCK();
3264 }
3265 
3266 
3267 
3268 //
3269 // String ref routines
3270 //
3271 static LIST_HEAD(stringhead, string_t) * string_ref_table;
3272 static u_long   string_table_mask;
3273 static uint32_t filled_buckets = 0;
3274 
3275 
3276 
3277 
3278 static void
resize_string_ref_table(void)3279 resize_string_ref_table(void)
3280 {
3281 	struct stringhead *new_table;
3282 	struct stringhead *old_table;
3283 	struct stringhead *old_head, *head;
3284 	string_t          *entry, *next;
3285 	uint32_t           i, hashval;
3286 	u_long             new_mask, old_mask;
3287 
3288 	/*
3289 	 * need to hold the table lock exclusively
3290 	 * in order to grow the table... need to recheck
3291 	 * the need to resize again after we've taken
3292 	 * the lock exclusively in case some other thread
3293 	 * beat us to the punch
3294 	 */
3295 	lck_rw_lock_exclusive(&strtable_rw_lock);
3296 
3297 	if (4 * filled_buckets < ((string_table_mask + 1) * 3)) {
3298 		lck_rw_done(&strtable_rw_lock);
3299 		return;
3300 	}
3301 	assert(string_table_mask < INT32_MAX);
3302 	new_table = hashinit((int)(string_table_mask + 1) * 2, M_CACHE, &new_mask);
3303 
3304 	if (new_table == NULL) {
3305 		printf("failed to resize the hash table.\n");
3306 		lck_rw_done(&strtable_rw_lock);
3307 		return;
3308 	}
3309 
3310 	// do the switch!
3311 	old_table         = string_ref_table;
3312 	string_ref_table  = new_table;
3313 	old_mask          = string_table_mask;
3314 	string_table_mask = new_mask;
3315 	filled_buckets    = 0;
3316 
3317 	// walk the old table and insert all the entries into
3318 	// the new table
3319 	//
3320 	for (i = 0; i <= old_mask; i++) {
3321 		old_head = &old_table[i];
3322 		for (entry = old_head->lh_first; entry != NULL; entry = next) {
3323 			hashval = hash_string((const char *)entry->str, 0);
3324 			head = &string_ref_table[hashval & string_table_mask];
3325 			if (head->lh_first == NULL) {
3326 				filled_buckets++;
3327 			}
3328 			next = entry->hash_chain.le_next;
3329 			LIST_INSERT_HEAD(head, entry, hash_chain);
3330 		}
3331 	}
3332 	lck_rw_done(&strtable_rw_lock);
3333 
3334 	hashdestroy(old_table, M_CACHE, old_mask);
3335 }
3336 
3337 
3338 static void
init_string_table(void)3339 init_string_table(void)
3340 {
3341 	string_ref_table = hashinit(CONFIG_VFS_NAMES, M_CACHE, &string_table_mask);
3342 }
3343 
3344 
3345 const char *
vfs_addname(const char * name,uint32_t len,u_int hashval,u_int flags)3346 vfs_addname(const char *name, uint32_t len, u_int hashval, u_int flags)
3347 {
3348 	return add_name_internal(name, len, hashval, FALSE, flags);
3349 }
3350 
3351 
3352 static const char *
add_name_internal(const char * name,uint32_t len,u_int hashval,boolean_t need_extra_ref,__unused u_int flags)3353 add_name_internal(const char *name, uint32_t len, u_int hashval, boolean_t need_extra_ref, __unused u_int flags)
3354 {
3355 	struct stringhead *head;
3356 	string_t          *entry;
3357 	uint32_t          chain_len = 0;
3358 	uint32_t          hash_index;
3359 	uint32_t          lock_index;
3360 	char              *ptr;
3361 
3362 	if (len > MAXPATHLEN) {
3363 		len = MAXPATHLEN;
3364 	}
3365 
3366 	/*
3367 	 * if the length already accounts for the null-byte, then
3368 	 * subtract one so later on we don't index past the end
3369 	 * of the string.
3370 	 */
3371 	if (len > 0 && name[len - 1] == '\0') {
3372 		len--;
3373 	}
3374 	if (hashval == 0) {
3375 		hashval = hash_string(name, len);
3376 	}
3377 
3378 	/*
3379 	 * take this lock 'shared' to keep the hash stable
3380 	 * if someone else decides to grow the pool they
3381 	 * will take this lock exclusively
3382 	 */
3383 	lck_rw_lock_shared(&strtable_rw_lock);
3384 
3385 	/*
3386 	 * If the table gets more than 3/4 full, resize it
3387 	 */
3388 	if (4 * filled_buckets >= ((string_table_mask + 1) * 3)) {
3389 		lck_rw_done(&strtable_rw_lock);
3390 
3391 		resize_string_ref_table();
3392 
3393 		lck_rw_lock_shared(&strtable_rw_lock);
3394 	}
3395 	hash_index = hashval & string_table_mask;
3396 	lock_index = hash_index % NUM_STRCACHE_LOCKS;
3397 
3398 	head = &string_ref_table[hash_index];
3399 
3400 	lck_mtx_lock_spin(&strcache_mtx_locks[lock_index]);
3401 
3402 	for (entry = head->lh_first; entry != NULL; chain_len++, entry = entry->hash_chain.le_next) {
3403 		if (strncmp(entry->str, name, len) == 0 && entry->str[len] == 0) {
3404 			entry->refcount++;
3405 			break;
3406 		}
3407 	}
3408 	if (entry == NULL) {
3409 		const uint32_t buflen = len + 1;
3410 
3411 		lck_mtx_convert_spin(&strcache_mtx_locks[lock_index]);
3412 		/*
3413 		 * it wasn't already there so add it.
3414 		 */
3415 		if (nc_smr_enabled) {
3416 			entry = zalloc_smr(stringcache_zone, Z_WAITOK_ZERO_NOFAIL);
3417 		} else {
3418 			entry = zalloc(stringcache_zone);
3419 		}
3420 
3421 		if (head->lh_first == NULL) {
3422 			OSAddAtomic(1, &filled_buckets);
3423 		}
3424 		ptr = kalloc_data(buflen, Z_WAITOK);
3425 		strncpy(ptr, name, len);
3426 		ptr[len] = '\0';
3427 		entry->str = ptr;
3428 		entry->strbuflen = buflen;
3429 		entry->refcount = 1;
3430 		LIST_INSERT_HEAD(head, entry, hash_chain);
3431 	}
3432 	if (need_extra_ref == TRUE) {
3433 		entry->refcount++;
3434 	}
3435 
3436 	lck_mtx_unlock(&strcache_mtx_locks[lock_index]);
3437 	lck_rw_done(&strtable_rw_lock);
3438 
3439 	return (const char *)entry->str;
3440 }
3441 
3442 static void
string_smr_free(void * _entry,__unused size_t size)3443 string_smr_free(void *_entry, __unused size_t size)
3444 {
3445 	string_t *entry = _entry;
3446 
3447 	kfree_data(entry->str, entry->strbuflen);
3448 	bzero(entry, sizeof(*entry));
3449 }
3450 
3451 int
vfs_removename(const char * nameref)3452 vfs_removename(const char *nameref)
3453 {
3454 	struct stringhead *head;
3455 	string_t          *entry;
3456 	uint32_t           hashval;
3457 	uint32_t           hash_index;
3458 	uint32_t           lock_index;
3459 	int                retval = ENOENT;
3460 
3461 	hashval = hash_string(nameref, 0);
3462 
3463 	/*
3464 	 * take this lock 'shared' to keep the hash stable
3465 	 * if someone else decides to grow the pool they
3466 	 * will take this lock exclusively
3467 	 */
3468 	lck_rw_lock_shared(&strtable_rw_lock);
3469 	/*
3470 	 * must compute the head behind the table lock
3471 	 * since the size and location of the table
3472 	 * can change on the fly
3473 	 */
3474 	hash_index = hashval & string_table_mask;
3475 	lock_index = hash_index % NUM_STRCACHE_LOCKS;
3476 
3477 	head = &string_ref_table[hash_index];
3478 
3479 	lck_mtx_lock_spin(&strcache_mtx_locks[lock_index]);
3480 
3481 	for (entry = head->lh_first; entry != NULL; entry = entry->hash_chain.le_next) {
3482 		if (entry->str == nameref) {
3483 			entry->refcount--;
3484 
3485 			if (entry->refcount == 0) {
3486 				LIST_REMOVE(entry, hash_chain);
3487 
3488 				if (head->lh_first == NULL) {
3489 					OSAddAtomic(-1, &filled_buckets);
3490 				}
3491 			} else {
3492 				entry = NULL;
3493 			}
3494 			retval = 0;
3495 			break;
3496 		}
3497 	}
3498 	lck_mtx_unlock(&strcache_mtx_locks[lock_index]);
3499 	lck_rw_done(&strtable_rw_lock);
3500 
3501 	if (entry) {
3502 		assert(entry->refcount == 0);
3503 		if (nc_smr_enabled) {
3504 			zfree_smr(stringcache_zone, entry);
3505 		} else {
3506 			kfree_data(entry->str, entry->strbuflen);
3507 			entry->str = NULL;
3508 			entry->strbuflen = 0;
3509 			zfree(stringcache_zone, entry);
3510 		}
3511 	}
3512 
3513 	return retval;
3514 }
3515 
3516 
3517 #ifdef DUMP_STRING_TABLE
3518 void
dump_string_table(void)3519 dump_string_table(void)
3520 {
3521 	struct stringhead *head;
3522 	string_t          *entry;
3523 	u_long            i;
3524 
3525 	lck_rw_lock_shared(&strtable_rw_lock);
3526 
3527 	for (i = 0; i <= string_table_mask; i++) {
3528 		head = &string_ref_table[i];
3529 		for (entry = head->lh_first; entry != NULL; entry = entry->hash_chain.le_next) {
3530 			printf("%6d - %s\n", entry->refcount, entry->str);
3531 		}
3532 	}
3533 	lck_rw_done(&strtable_rw_lock);
3534 }
3535 #endif  /* DUMP_STRING_TABLE */
3536