xref: /xnu-11215.1.10/bsd/vfs/vfs_cache.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1989, 1993, 1995
31  *	The Regents of the University of California.  All rights reserved.
32  *
33  * This code is derived from software contributed to Berkeley by
34  * Poul-Henning Kamp of the FreeBSD Project.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. All advertising materials mentioning features or use of this software
45  *    must display the following acknowledgement:
46  *	This product includes software developed by the University of
47  *	California, Berkeley and its contributors.
48  * 4. Neither the name of the University nor the names of its contributors
49  *    may be used to endorse or promote products derived from this software
50  *    without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62  * SUCH DAMAGE.
63  *
64  *
65  *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
66  */
67 /*
68  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69  * support for mandatory and extensible security protections.  This notice
70  * is included in support of clause 2.2 (b) of the Apple Public License,
71  * Version 2.0.
72  */
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/time.h>
76 #include <sys/mount_internal.h>
77 #include <sys/vnode_internal.h>
78 #include <miscfs/specfs/specdev.h>
79 #include <sys/namei.h>
80 #include <sys/errno.h>
81 #include <kern/kalloc.h>
82 #include <sys/kauth.h>
83 #include <sys/user.h>
84 #include <sys/paths.h>
85 #include <os/overflow.h>
86 
87 #if CONFIG_MACF
88 #include <security/mac_framework.h>
89 #endif
90 
91 /*
92  * Name caching works as follows:
93  *
94  * Names found by directory scans are retained in a cache
95  * for future reference.  It is managed LRU, so frequently
96  * used names will hang around.  Cache is indexed by hash value
97  * obtained from (vp, name) where vp refers to the directory
98  * containing name.
99  *
100  * If it is a "negative" entry, (i.e. for a name that is known NOT to
101  * exist) the vnode pointer will be NULL.
102  *
103  * Upon reaching the last segment of a path, if the reference
104  * is for DELETE, or NOCACHE is set (rewrite), and the
105  * name is located in the cache, it will be dropped.
106  */
107 
108 /*
109  * Structures associated with name cacheing.
110  */
111 
112 ZONE_DEFINE_TYPE(namecache_zone, "namecache", struct namecache, ZC_NONE);
113 
114 struct smrq_list_head *nchashtbl;       /* Hash Table */
115 u_long  nchashmask;
116 u_long  nchash;                         /* size of hash table - 1 */
117 long    numcache;                       /* number of cache entries allocated */
118 int     desiredNodes;
119 int     desiredNegNodes;
120 int     ncs_negtotal;
121 TUNABLE_WRITEABLE(int, nc_disabled, "-novfscache", 0);
122 __options_decl(nc_smr_level_t, uint32_t, {
123 	NC_SMR_DISABLED = 0,
124 	NC_SMR_LOOKUP = 1
125 });
126 TUNABLE(nc_smr_level_t, nc_smr_enabled, "ncsmr", NC_SMR_LOOKUP);
127 TAILQ_HEAD(, namecache) nchead;         /* chain of all name cache entries */
128 TAILQ_HEAD(, namecache) neghead;        /* chain of only negative cache entries */
129 
130 
131 #if COLLECT_STATS
132 
133 struct  nchstats nchstats;              /* cache effectiveness statistics */
134 
135 #define NCHSTAT(v) {            \
136 	nchstats.v++;           \
137 }
138 #define NAME_CACHE_LOCK_SHARED()        name_cache_lock()
139 #define NAME_CACHE_LOCK_SHARED_TO_EXCLUSIVE() TRUE
140 
141 #else
142 
143 #define NCHSTAT(v)
144 #define NAME_CACHE_LOCK_SHARED()        name_cache_lock_shared()
145 #define NAME_CACHE_LOCK_SHARED_TO_EXCLUSIVE()             name_cache_lock_shared_to_exclusive()
146 
147 #endif
148 
149 #define NAME_CACHE_LOCK()               name_cache_lock()
150 #define NAME_CACHE_UNLOCK()             name_cache_unlock()
151 
152 /* vars for name cache list lock */
153 static LCK_GRP_DECLARE(namecache_lck_grp, "Name Cache");
154 static LCK_RW_DECLARE(namecache_rw_lock, &namecache_lck_grp);
155 
156 typedef struct string_t {
157 	LIST_ENTRY(string_t)  hash_chain;
158 	char                  *str;
159 	uint32_t              strbuflen;
160 	uint32_t              refcount;
161 } string_t;
162 
163 ZONE_DEFINE_TYPE(stringcache_zone, "vfsstringcache", string_t, ZC_NONE);
164 
165 static LCK_GRP_DECLARE(strcache_lck_grp, "String Cache");
166 static LCK_ATTR_DECLARE(strcache_lck_attr, 0, 0);
167 LCK_RW_DECLARE_ATTR(strtable_rw_lock, &strcache_lck_grp, &strcache_lck_attr);
168 
169 static LCK_GRP_DECLARE(rootvnode_lck_grp, "rootvnode");
170 LCK_RW_DECLARE(rootvnode_rw_lock, &rootvnode_lck_grp);
171 
172 #define NUM_STRCACHE_LOCKS 1024
173 
174 lck_mtx_t strcache_mtx_locks[NUM_STRCACHE_LOCKS];
175 
176 SYSCTL_NODE(_vfs, OID_AUTO, ncstats, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "vfs name cache stats");
177 
178 SYSCTL_COMPAT_INT(_vfs_ncstats, OID_AUTO, nc_smr_enabled,
179     CTLFLAG_RD | CTLFLAG_LOCKED,
180     &nc_smr_enabled, 0, "");
181 
182 #if COLLECT_NC_SMR_STATS
183 struct ncstats {
184 	uint64_t cl_smr_hits;
185 	uint64_t cl_smr_miss;
186 	uint64_t cl_smr_negative_hits;
187 	uint64_t cl_smr_fallback;
188 	uint64_t cl_lock_hits;
189 	uint64_t clp_next;
190 	uint64_t clp_next_fail;
191 	uint64_t clp_smr_next;
192 	uint64_t clp_smr_next_fail;
193 	uint64_t clp_smr_fallback;
194 	uint64_t nc_lock_shared;
195 	uint64_t nc_lock;
196 } ncstats = {0};
197 
198 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, cl_smr_hits,
199     CTLFLAG_RD | CTLFLAG_LOCKED,
200     &ncstats.cl_smr_hits, "");
201 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, cl_smr_misses,
202     CTLFLAG_RD | CTLFLAG_LOCKED,
203     &ncstats.cl_smr_miss, "");
204 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, cl_smr_negative_hits,
205     CTLFLAG_RD | CTLFLAG_LOCKED,
206     &ncstats.cl_smr_negative_hits, "");
207 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, cl_smr_fallback,
208     CTLFLAG_RD | CTLFLAG_LOCKED,
209     &ncstats.cl_smr_fallback, "");
210 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, cl_lock_hits,
211     CTLFLAG_RD | CTLFLAG_LOCKED,
212     &ncstats.cl_lock_hits, "");
213 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, clp_next,
214     CTLFLAG_RD | CTLFLAG_LOCKED,
215     &ncstats.clp_next, "");
216 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, clp_next_fail,
217     CTLFLAG_RD | CTLFLAG_LOCKED,
218     &ncstats.clp_next_fail, "");
219 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, clp_smr_next,
220     CTLFLAG_RD | CTLFLAG_LOCKED,
221     &ncstats.clp_smr_next, "");
222 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, clp_smr_next_fail,
223     CTLFLAG_RD | CTLFLAG_LOCKED,
224     &ncstats.clp_smr_next_fail, "");
225 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, nc_lock_shared,
226     CTLFLAG_RD | CTLFLAG_LOCKED,
227     &ncstats.nc_lock_shared, "");
228 SYSCTL_LONG(_vfs_ncstats, OID_AUTO, nc_lock,
229     CTLFLAG_RD | CTLFLAG_LOCKED,
230     &ncstats.nc_lock, "");
231 
232 #define NC_SMR_STATS(v)  os_atomic_inc(&ncstats.v, relaxed)
233 #else
234 #define NC_SMR_STATS(v)
235 #endif /* COLLECT_NC_SMR_STATS */
236 
237 static vnode_t cache_lookup_locked(vnode_t dvp, struct componentname *cnp, uint32_t *vidp);
238 static vnode_t cache_lookup_smr(vnode_t dvp, struct componentname *cnp, uint32_t *vidp);
239 static const char *add_name_internal(const char *, uint32_t, u_int, boolean_t, u_int);
240 static void init_string_table(void);
241 static void cache_delete(struct namecache *, int);
242 static void cache_enter_locked(vnode_t dvp, vnode_t vp, struct componentname *cnp, const char *strname);
243 static void cache_purge_locked(vnode_t vp, kauth_cred_t *credp);
244 static void namecache_smr_free(void *, size_t);
245 static void string_smr_free(void *, size_t);
246 
247 
248 #ifdef DUMP_STRING_TABLE
249 /*
250  * Internal dump function used for debugging
251  */
252 void dump_string_table(void);
253 #endif  /* DUMP_STRING_TABLE */
254 
255 static void init_crc32(void);
256 static unsigned int crc32tab[256];
257 
258 
259 #define NCHHASH(dvp, hash_val) \
260 	(&nchashtbl[(dvp->v_id ^ (hash_val)) & nchashmask])
261 
262 /*
263  * This function tries to check if a directory vp is a subdirectory of dvp
264  * only from valid v_parent pointers. It is called with the name cache lock
265  * held and does not drop the lock anytime inside the function.
266  *
267  * It returns a boolean that indicates whether or not it was able to
268  * successfully infer the parent/descendent relationship via the v_parent
269  * pointers, or if it could not infer such relationship and that the decision
270  * must be delegated to the owning filesystem.
271  *
272  * If it does not defer the decision, i.e. it was successfuly able to determine
273  * the parent/descendent relationship,  *is_subdir tells the caller if vp is a
274  * subdirectory of dvp.
275  *
276  * If the decision is deferred, *next_vp is where it stopped i.e. *next_vp
277  * is the vnode whose parent is to be determined from the filesystem.
278  * *is_subdir, in this case, is not indicative of anything and should be
279  * ignored.
280  *
281  * The return value and output args should be used as follows :
282  *
283  * defer = cache_check_vnode_issubdir(vp, dvp, is_subdir, next_vp);
284  * if (!defer) {
285  *      if (*is_subdir)
286  *              vp is subdirectory;
287  *      else
288  *              vp is not a subdirectory;
289  * } else {
290  *      if (*next_vp)
291  *              check this vnode's parent from the filesystem
292  *      else
293  *              error (likely because of forced unmount).
294  * }
295  *
296  */
297 static boolean_t
cache_check_vnode_issubdir(vnode_t vp,vnode_t dvp,boolean_t * is_subdir,vnode_t * next_vp)298 cache_check_vnode_issubdir(vnode_t vp, vnode_t dvp, boolean_t *is_subdir,
299     vnode_t *next_vp)
300 {
301 	vnode_t tvp = vp;
302 	int defer = FALSE;
303 
304 	*is_subdir = FALSE;
305 	*next_vp = NULLVP;
306 	while (1) {
307 		mount_t tmp;
308 
309 		if (tvp == dvp) {
310 			*is_subdir = TRUE;
311 			break;
312 		} else if (tvp == rootvnode) {
313 			/* *is_subdir = FALSE */
314 			break;
315 		}
316 
317 		tmp = tvp->v_mount;
318 		while ((tvp->v_flag & VROOT) && tmp && tmp->mnt_vnodecovered &&
319 		    tvp != dvp && tvp != rootvnode) {
320 			tvp = tmp->mnt_vnodecovered;
321 			tmp = tvp->v_mount;
322 		}
323 
324 		/*
325 		 * If dvp is not at the top of a mount "stack" then
326 		 * vp is not a subdirectory of dvp either.
327 		 */
328 		if (tvp == dvp || tvp == rootvnode) {
329 			/* *is_subdir = FALSE */
330 			break;
331 		}
332 
333 		if (!tmp) {
334 			defer = TRUE;
335 			*next_vp = NULLVP;
336 			break;
337 		}
338 
339 		if ((tvp->v_flag & VISHARDLINK) || !(tvp->v_parent)) {
340 			defer = TRUE;
341 			*next_vp = tvp;
342 			break;
343 		}
344 
345 		tvp = tvp->v_parent;
346 	}
347 
348 	return defer;
349 }
350 
351 /* maximum times retry from potentially transient errors in vnode_issubdir */
352 #define MAX_ERROR_RETRY 3
353 
354 /*
355  * This function checks if a given directory (vp) is a subdirectory of dvp.
356  * It walks backwards from vp and if it hits dvp in its parent chain,
357  * it is a subdirectory. If it encounters the root directory, it is not
358  * a subdirectory.
359  *
360  * This function returns an error if it is unsuccessful and 0 on success.
361  *
362  * On entry (and exit) vp has an iocount and if this function has to take
363  * any iocounts on other vnodes in the parent chain traversal, it releases them.
364  */
365 int
vnode_issubdir(vnode_t vp,vnode_t dvp,int * is_subdir,vfs_context_t ctx)366 vnode_issubdir(vnode_t vp, vnode_t dvp, int *is_subdir, vfs_context_t ctx)
367 {
368 	vnode_t start_vp, tvp;
369 	vnode_t vp_with_iocount;
370 	int error = 0;
371 	char dotdotbuf[] = "..";
372 	int error_retry_count = 0; /* retry count for potentially transient
373 	                            *  errors */
374 
375 	*is_subdir = FALSE;
376 	tvp = start_vp = vp;
377 	/*
378 	 * Anytime we acquire an iocount in this function, we save the vnode
379 	 * in this variable and release it before exiting.
380 	 */
381 	vp_with_iocount = NULLVP;
382 
383 	while (1) {
384 		boolean_t defer;
385 		vnode_t pvp;
386 		uint32_t vid = 0;
387 		struct componentname cn;
388 		boolean_t is_subdir_locked = FALSE;
389 
390 		if (tvp == dvp) {
391 			*is_subdir = TRUE;
392 			break;
393 		} else if (tvp == rootvnode) {
394 			/* *is_subdir = FALSE */
395 			break;
396 		}
397 
398 		NAME_CACHE_LOCK_SHARED();
399 
400 		defer = cache_check_vnode_issubdir(tvp, dvp, &is_subdir_locked,
401 		    &tvp);
402 
403 		if (defer && tvp) {
404 			vid = vnode_vid(tvp);
405 			vnode_hold(tvp);
406 		}
407 
408 		NAME_CACHE_UNLOCK();
409 
410 		if (!defer) {
411 			*is_subdir = is_subdir_locked;
412 			break;
413 		}
414 
415 		if (!tvp) {
416 			if (error_retry_count++ < MAX_ERROR_RETRY) {
417 				tvp = vp;
418 				continue;
419 			}
420 			error = ENOENT;
421 			break;
422 		}
423 
424 		if (tvp != start_vp) {
425 			if (vp_with_iocount) {
426 				vnode_put(vp_with_iocount);
427 				vp_with_iocount = NULLVP;
428 			}
429 
430 			error = vnode_getwithvid(tvp, vid);
431 			vnode_drop(tvp);
432 			if (error) {
433 				if (error_retry_count++ < MAX_ERROR_RETRY) {
434 					tvp = vp;
435 					error = 0;
436 					continue;
437 				}
438 				break;
439 			}
440 			vp_with_iocount = tvp;
441 		} else {
442 			tvp = vnode_drop(tvp);
443 		}
444 
445 		bzero(&cn, sizeof(cn));
446 		cn.cn_nameiop = LOOKUP;
447 		cn.cn_flags = ISLASTCN | ISDOTDOT;
448 		cn.cn_context = ctx;
449 		cn.cn_pnbuf = &dotdotbuf[0];
450 		cn.cn_pnlen = sizeof(dotdotbuf);
451 		cn.cn_nameptr = cn.cn_pnbuf;
452 		cn.cn_namelen = 2;
453 
454 		pvp = NULLVP;
455 		if ((error = VNOP_LOOKUP(tvp, &pvp, &cn, ctx))) {
456 			break;
457 		}
458 
459 		if (!(tvp->v_flag & VISHARDLINK) && tvp->v_parent != pvp) {
460 			(void)vnode_update_identity(tvp, pvp, NULL, 0, 0,
461 			    VNODE_UPDATE_PARENT);
462 		}
463 
464 		if (vp_with_iocount) {
465 			vnode_put(vp_with_iocount);
466 		}
467 
468 		vp_with_iocount = tvp = pvp;
469 	}
470 
471 	if (vp_with_iocount) {
472 		vnode_put(vp_with_iocount);
473 	}
474 
475 	return error;
476 }
477 
478 /*
479  * This function builds the path in "buff" from the supplied vnode.
480  * The length of the buffer *INCLUDING* the trailing zero byte is
481  * returned in outlen.  NOTE: the length includes the trailing zero
482  * byte and thus the length is one greater than what strlen would
483  * return.  This is important and lots of code elsewhere in the kernel
484  * assumes this behavior.
485  *
486  * This function can call vnop in file system if the parent vnode
487  * does not exist or when called for hardlinks via volfs path.
488  * If BUILDPATH_NO_FS_ENTER is set in flags, it only uses values present
489  * in the name cache and does not enter the file system.
490  *
491  * If BUILDPATH_CHECK_MOVED is set in flags, we return EAGAIN when
492  * we encounter ENOENT during path reconstruction.  ENOENT means that
493  * one of the parents moved while we were building the path.  The
494  * caller can special handle this case by calling build_path again.
495  *
496  * If BUILDPATH_VOLUME_RELATIVE is set in flags, we return path
497  * that is relative to the nearest mount point, i.e. do not
498  * cross over mount points during building the path.
499  *
500  * passed in vp must have a valid io_count reference
501  *
502  * If parent vnode is non-NULL it also must have an io count.  This
503  * allows build_path_with_parent to be safely called for operations
504  * unlink, rmdir and rename that already have io counts on the target
505  * and the directory. In this way build_path_with_parent does not have
506  * to try and obtain an additional io count on the parent.  Taking an
507  * io count ont the parent can lead to dead lock if a forced unmount
508  * occures at the right moment. For a fuller explaination on how this
509  * can occur see the comment for vn_getpath_with_parent.
510  *
511  */
512 int
build_path_with_parent(vnode_t first_vp,vnode_t parent_vp,char * buff,int buflen,int * outlen,size_t * mntpt_outlen,int flags,vfs_context_t ctx)513 build_path_with_parent(vnode_t first_vp, vnode_t parent_vp, char *buff, int buflen,
514     int *outlen, size_t *mntpt_outlen, int flags, vfs_context_t ctx)
515 {
516 	vnode_t vp, tvp;
517 	vnode_t vp_with_iocount;
518 	vnode_t proc_root_dir_vp;
519 	char *end;
520 	char *mntpt_end;
521 	const char *str;
522 	unsigned int  len;
523 	int  ret = 0;
524 	int  fixhardlink;
525 
526 	if (first_vp == NULLVP) {
527 		return EINVAL;
528 	}
529 
530 	if (buflen <= 1) {
531 		return ENOSPC;
532 	}
533 
534 	/*
535 	 * Grab the process fd so we can evaluate fd_rdir.
536 	 */
537 	if (!(flags & BUILDPATH_NO_PROCROOT)) {
538 		proc_root_dir_vp = vfs_context_proc(ctx)->p_fd.fd_rdir;
539 	} else {
540 		proc_root_dir_vp = NULL;
541 	}
542 
543 	vp_with_iocount = NULLVP;
544 again:
545 	vp = first_vp;
546 
547 	end = &buff[buflen - 1];
548 	*end = '\0';
549 	mntpt_end = NULL;
550 
551 	/*
552 	 * Catch a special corner case here: chroot to /full/path/to/dir, chdir to
553 	 * it, then open it. Without this check, the path to it will be
554 	 * /full/path/to/dir instead of "/".
555 	 */
556 	if (proc_root_dir_vp == first_vp) {
557 		*--end = '/';
558 		goto out;
559 	}
560 
561 	/*
562 	 * holding the NAME_CACHE_LOCK in shared mode is
563 	 * sufficient to stabilize both the vp->v_parent chain
564 	 * and the 'vp->v_mount->mnt_vnodecovered' chain
565 	 *
566 	 * if we need to drop this lock, we must first grab the v_id
567 	 * from the vnode we're currently working with... if that
568 	 * vnode doesn't already have an io_count reference (the vp
569 	 * passed in comes with one), we must grab a reference
570 	 * after we drop the NAME_CACHE_LOCK via vnode_getwithvid...
571 	 * deadlocks may result if you call vnode_get while holding
572 	 * the NAME_CACHE_LOCK... we lazily release the reference
573 	 * we pick up the next time we encounter a need to drop
574 	 * the NAME_CACHE_LOCK or before we return from this routine
575 	 */
576 	NAME_CACHE_LOCK_SHARED();
577 
578 #if CONFIG_FIRMLINKS
579 	if (!(flags & BUILDPATH_NO_FIRMLINK) &&
580 	    (vp->v_flag & VFMLINKTARGET) && vp->v_fmlink && (vp->v_fmlink->v_type == VDIR)) {
581 		vp = vp->v_fmlink;
582 	}
583 #endif
584 
585 	/*
586 	 * Check if this is the root of a file system.
587 	 */
588 	while (vp && vp->v_flag & VROOT) {
589 		if (vp->v_mount == NULL) {
590 			ret = EINVAL;
591 			goto out_unlock;
592 		}
593 		if ((vp->v_mount->mnt_flag & MNT_ROOTFS) || (vp == proc_root_dir_vp)) {
594 			/*
595 			 * It's the root of the root file system, so it's
596 			 * just "/".
597 			 */
598 			*--end = '/';
599 
600 			goto out_unlock;
601 		} else {
602 			/*
603 			 * This the root of the volume and the caller does not
604 			 * want to cross mount points.  Therefore just return
605 			 * '/' as the relative path.
606 			 */
607 #if CONFIG_FIRMLINKS
608 			if (!(flags & BUILDPATH_NO_FIRMLINK) &&
609 			    (vp->v_flag & VFMLINKTARGET) && vp->v_fmlink && (vp->v_fmlink->v_type == VDIR)) {
610 				vp = vp->v_fmlink;
611 			} else
612 #endif
613 			if (flags & BUILDPATH_VOLUME_RELATIVE) {
614 				*--end = '/';
615 				goto out_unlock;
616 			} else {
617 				vp = vp->v_mount->mnt_vnodecovered;
618 				if (!mntpt_end && vp) {
619 					mntpt_end = end;
620 				}
621 			}
622 		}
623 	}
624 
625 	while ((vp != NULLVP) && (vp->v_parent != vp)) {
626 		int  vid;
627 
628 		/*
629 		 * For hardlinks the v_name may be stale, so if its OK
630 		 * to enter a file system, ask the file system for the
631 		 * name and parent (below).
632 		 */
633 		fixhardlink = (vp->v_flag & VISHARDLINK) &&
634 		    (vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
635 		    !(flags & BUILDPATH_NO_FS_ENTER);
636 
637 		if (!fixhardlink) {
638 			str = vp->v_name;
639 
640 			if (str == NULL || *str == '\0') {
641 				if (vp->v_parent != NULL) {
642 					ret = EINVAL;
643 				} else {
644 					ret = ENOENT;
645 				}
646 				goto out_unlock;
647 			}
648 			len = (unsigned int)strlen(str);
649 			/*
650 			 * Check that there's enough space (including space for the '/')
651 			 */
652 			if ((unsigned int)(end - buff) < (len + 1)) {
653 				ret = ENOSPC;
654 				goto out_unlock;
655 			}
656 			/*
657 			 * Copy the name backwards.
658 			 */
659 			str += len;
660 
661 			for (; len > 0; len--) {
662 				*--end = *--str;
663 			}
664 			/*
665 			 * Add a path separator.
666 			 */
667 			*--end = '/';
668 		}
669 
670 		/*
671 		 * Walk up the parent chain.
672 		 */
673 		if (((vp->v_parent != NULLVP) && !fixhardlink) ||
674 		    (flags & BUILDPATH_NO_FS_ENTER)) {
675 			/*
676 			 * In this if () block we are not allowed to enter the filesystem
677 			 * to conclusively get the most accurate parent identifier.
678 			 * As a result, if 'vp' does not identify '/' and it
679 			 * does not have a valid v_parent, then error out
680 			 * and disallow further path construction
681 			 */
682 			if ((vp->v_parent == NULLVP) && (rootvnode != vp)) {
683 				/*
684 				 * Only '/' is allowed to have a NULL parent
685 				 * pointer. Upper level callers should ideally
686 				 * re-drive name lookup on receiving a ENOENT.
687 				 */
688 				ret = ENOENT;
689 
690 				/* The code below will exit early if 'tvp = vp' == NULL */
691 			}
692 			vp = vp->v_parent;
693 
694 			/*
695 			 * if the vnode we have in hand isn't a directory and it
696 			 * has a v_parent, then we started with the resource fork
697 			 * so skip up to avoid getting a duplicate copy of the
698 			 * file name in the path.
699 			 */
700 			if (vp && !vnode_isdir(vp) && vp->v_parent) {
701 				vp = vp->v_parent;
702 			}
703 		} else {
704 			/*
705 			 * No parent, go get it if supported.
706 			 */
707 			struct vnode_attr  va;
708 			vnode_t  dvp;
709 
710 			/*
711 			 * Make sure file system supports obtaining a path from id.
712 			 */
713 			if (!(vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID)) {
714 				ret = ENOENT;
715 				goto out_unlock;
716 			}
717 			vid = vp->v_id;
718 
719 			vnode_hold(vp);
720 			NAME_CACHE_UNLOCK();
721 
722 			if (vp != first_vp && vp != parent_vp && vp != vp_with_iocount) {
723 				if (vp_with_iocount) {
724 					vnode_put(vp_with_iocount);
725 					vp_with_iocount = NULLVP;
726 				}
727 				if (vnode_getwithvid(vp, vid)) {
728 					vnode_drop(vp);
729 					goto again;
730 				}
731 				vp_with_iocount = vp;
732 			}
733 
734 			vnode_drop(vp);
735 
736 			VATTR_INIT(&va);
737 			VATTR_WANTED(&va, va_parentid);
738 
739 			if (fixhardlink) {
740 				VATTR_WANTED(&va, va_name);
741 				va.va_name = zalloc(ZV_NAMEI);
742 			} else {
743 				va.va_name = NULL;
744 			}
745 			/*
746 			 * Ask the file system for its parent id and for its name (optional).
747 			 */
748 			ret = vnode_getattr(vp, &va, ctx);
749 
750 			if (ret || !VATTR_IS_SUPPORTED(&va, va_parentid)) {
751 				ret = ENOENT;
752 				goto out;
753 			}
754 
755 			/*
756 			 * Ask the file system for the parent vnode.
757 			 */
758 			if ((ret = VFS_VGET(vp->v_mount, (ino64_t)va.va_parentid, &dvp, ctx))) {
759 				goto out;
760 			}
761 
762 			/* No exit from here before switching vp_with_iocount to dvp */
763 
764 			if (fixhardlink) {
765 				if (VATTR_IS_SUPPORTED(&va, va_name)) {
766 					str = va.va_name;
767 				} else {
768 					ret = ENOENT;
769 					goto bad_news;
770 				}
771 				len = (unsigned int)strlen(str);
772 
773 				vnode_update_identity(vp, dvp, str, len, 0, VNODE_UPDATE_NAME | VNODE_UPDATE_PARENT);
774 
775 				/*
776 				 * Check that there's enough space.
777 				 */
778 				if ((unsigned int)(end - buff) < (len + 1)) {
779 					ret = ENOSPC;
780 				} else {
781 					/* Copy the name backwards. */
782 					str += len;
783 
784 					for (; len > 0; len--) {
785 						*--end = *--str;
786 					}
787 					/*
788 					 * Add a path separator.
789 					 */
790 					*--end = '/';
791 				}
792 bad_news:
793 				zfree(ZV_NAMEI, va.va_name);
794 			} else if (vp->v_parent != dvp) {
795 				vnode_update_identity(vp, dvp, NULL, 0, 0, VNODE_UPDATE_PARENT);
796 			}
797 
798 			if (vp_with_iocount) {
799 				vnode_put(vp_with_iocount);
800 			}
801 			vp = dvp;
802 			vp_with_iocount = vp;
803 
804 			NAME_CACHE_LOCK_SHARED();
805 
806 			/*
807 			 * if the vnode we have in hand isn't a directory and it
808 			 * has a v_parent, then we started with the resource fork
809 			 * so skip up to avoid getting a duplicate copy of the
810 			 * file name in the path.
811 			 */
812 			if (vp && !vnode_isdir(vp) && vp->v_parent) {
813 				vp = vp->v_parent;
814 			}
815 		}
816 
817 		if (vp && (flags & BUILDPATH_CHECKACCESS)) {
818 			vid = vp->v_id;
819 
820 			vnode_hold(vp);
821 			NAME_CACHE_UNLOCK();
822 
823 			if (vp != first_vp && vp != parent_vp && vp != vp_with_iocount) {
824 				if (vp_with_iocount) {
825 					vnode_put(vp_with_iocount);
826 					vp_with_iocount = NULLVP;
827 				}
828 				if (vnode_getwithvid(vp, vid)) {
829 					vnode_drop(vp);
830 					goto again;
831 				}
832 				vp_with_iocount = vp;
833 			}
834 			vnode_drop(vp);
835 
836 			if ((ret = vnode_authorize(vp, NULL, KAUTH_VNODE_SEARCH, ctx))) {
837 				goto out;       /* no peeking */
838 			}
839 			NAME_CACHE_LOCK_SHARED();
840 		}
841 
842 		/*
843 		 * When a mount point is crossed switch the vp.
844 		 * Continue until we find the root or we find
845 		 * a vnode that's not the root of a mounted
846 		 * file system.
847 		 */
848 		tvp = vp;
849 
850 		while (tvp) {
851 			if (tvp == proc_root_dir_vp) {
852 				goto out_unlock;        /* encountered the root */
853 			}
854 
855 #if CONFIG_FIRMLINKS
856 			if (!(flags & BUILDPATH_NO_FIRMLINK) &&
857 			    (tvp->v_flag & VFMLINKTARGET) && tvp->v_fmlink && (tvp->v_fmlink->v_type == VDIR)) {
858 				tvp = tvp->v_fmlink;
859 				break;
860 			}
861 #endif
862 
863 			if (!(tvp->v_flag & VROOT) || !tvp->v_mount) {
864 				break;                  /* not the root of a mounted FS */
865 			}
866 			if (flags & BUILDPATH_VOLUME_RELATIVE) {
867 				/* Do not cross over mount points */
868 				tvp = NULL;
869 			} else {
870 				tvp = tvp->v_mount->mnt_vnodecovered;
871 				if (!mntpt_end && tvp) {
872 					mntpt_end = end;
873 				}
874 			}
875 		}
876 		if (tvp == NULLVP) {
877 			goto out_unlock;
878 		}
879 		vp = tvp;
880 	}
881 out_unlock:
882 	NAME_CACHE_UNLOCK();
883 out:
884 	if (vp_with_iocount) {
885 		vnode_put(vp_with_iocount);
886 	}
887 	/*
888 	 * Slide the name down to the beginning of the buffer.
889 	 */
890 	memmove(buff, end, &buff[buflen] - end);
891 
892 	/*
893 	 * length includes the trailing zero byte
894 	 */
895 	*outlen = (int)(&buff[buflen] - end);
896 	if (mntpt_outlen && mntpt_end) {
897 		*mntpt_outlen = (size_t)*outlen - (size_t)(&buff[buflen] - mntpt_end);
898 	}
899 
900 	/* One of the parents was moved during path reconstruction.
901 	 * The caller is interested in knowing whether any of the
902 	 * parents moved via BUILDPATH_CHECK_MOVED, so return EAGAIN.
903 	 */
904 	if ((ret == ENOENT) && (flags & BUILDPATH_CHECK_MOVED)) {
905 		ret = EAGAIN;
906 	}
907 
908 	return ret;
909 }
910 
911 int
build_path(vnode_t first_vp,char * buff,int buflen,int * outlen,int flags,vfs_context_t ctx)912 build_path(vnode_t first_vp, char *buff, int buflen, int *outlen, int flags, vfs_context_t ctx)
913 {
914 	return build_path_with_parent(first_vp, NULL, buff, buflen, outlen, NULL, flags, ctx);
915 }
916 
917 /*
918  * Combined version of vnode_getparent() and vnode_getname() to acquire both vnode name and parent
919  * without releasing the name cache lock in interim.
920  */
921 void
vnode_getparent_and_name(vnode_t vp,vnode_t * out_pvp,const char ** out_name)922 vnode_getparent_and_name(vnode_t vp, vnode_t *out_pvp, const char **out_name)
923 {
924 	vnode_t pvp = NULLVP;
925 	int     locked = 0;
926 	int     pvid;
927 
928 	NAME_CACHE_LOCK_SHARED();
929 	locked = 1;
930 
931 	if (out_name) {
932 		const char *name = NULL;
933 		if (vp->v_name) {
934 			name = vfs_addname(vp->v_name, (unsigned int)strlen(vp->v_name), 0, 0);
935 		}
936 		*out_name = name;
937 	}
938 
939 	if (!out_pvp) {
940 		goto out;
941 	}
942 
943 	pvp = vp->v_parent;
944 
945 	/*
946 	 * v_parent is stable behind the name_cache lock
947 	 * however, the only thing we can really guarantee
948 	 * is that we've grabbed a valid iocount on the
949 	 * parent of 'vp' at the time we took the name_cache lock...
950 	 * once we drop the lock, vp could get re-parented
951 	 */
952 	if (pvp != NULLVP) {
953 		pvid = pvp->v_id;
954 
955 		vnode_hold(pvp);
956 		NAME_CACHE_UNLOCK();
957 		locked = 0;
958 
959 		if (vnode_getwithvid(pvp, pvid) != 0) {
960 			vnode_drop(pvp);
961 			pvp = NULL;
962 		} else {
963 			vnode_drop(pvp);
964 		}
965 	}
966 	*out_pvp = pvp;
967 
968 out:
969 	if (locked) {
970 		NAME_CACHE_UNLOCK();
971 	}
972 }
973 
974 /*
975  * return NULLVP if vp's parent doesn't
976  * exist, or we can't get a valid iocount
977  * else return the parent of vp
978  */
979 vnode_t
vnode_getparent(vnode_t vp)980 vnode_getparent(vnode_t vp)
981 {
982 	vnode_t pvp = NULLVP;
983 	vnode_getparent_and_name(vp, &pvp, NULL);
984 
985 	return pvp;
986 }
987 
988 /*
989  * Similar to vnode_getparent() but only returned parent vnode (with iocount
990  * held) if the actual parent vnode is different than the given 'pvp'.
991  */
992 __private_extern__ vnode_t
vnode_getparent_if_different(vnode_t vp,vnode_t pvp)993 vnode_getparent_if_different(vnode_t vp, vnode_t pvp)
994 {
995 	vnode_t real_pvp = NULLVP;
996 	int     pvid;
997 
998 	if (vp->v_parent == pvp) {
999 		goto out;
1000 	}
1001 
1002 	NAME_CACHE_LOCK_SHARED();
1003 
1004 	real_pvp = vp->v_parent;
1005 	if (real_pvp == NULLVP) {
1006 		NAME_CACHE_UNLOCK();
1007 		goto out;
1008 	}
1009 
1010 	/*
1011 	 * Do the check again after namecache lock is acquired as the parent vnode
1012 	 * could have changed.
1013 	 */
1014 	if (real_pvp != pvp) {
1015 		pvid = real_pvp->v_id;
1016 
1017 		vnode_hold(real_pvp);
1018 		NAME_CACHE_UNLOCK();
1019 
1020 		if (vnode_getwithvid(real_pvp, pvid) != 0) {
1021 			vnode_drop(real_pvp);
1022 			real_pvp = NULLVP;
1023 		} else {
1024 			vnode_drop(real_pvp);
1025 		}
1026 	} else {
1027 		real_pvp = NULLVP;
1028 		NAME_CACHE_UNLOCK();
1029 	}
1030 
1031 out:
1032 	return real_pvp;
1033 }
1034 
1035 const char *
vnode_getname(vnode_t vp)1036 vnode_getname(vnode_t vp)
1037 {
1038 	const char *name = NULL;
1039 	vnode_getparent_and_name(vp, NULL, &name);
1040 
1041 	return name;
1042 }
1043 
1044 void
vnode_putname(const char * name)1045 vnode_putname(const char *name)
1046 {
1047 	if (name) {
1048 		vfs_removename(name);
1049 	}
1050 }
1051 
1052 static const char unknown_vnodename[] = "(unknown vnode name)";
1053 
1054 const char *
vnode_getname_printable(vnode_t vp)1055 vnode_getname_printable(vnode_t vp)
1056 {
1057 	const char *name = vnode_getname(vp);
1058 	if (name != NULL) {
1059 		return name;
1060 	}
1061 
1062 	switch (vp->v_type) {
1063 	case VCHR:
1064 	case VBLK:
1065 	{
1066 		/*
1067 		 * Create an artificial dev name from
1068 		 * major and minor device number
1069 		 */
1070 		char dev_name[64];
1071 		(void) snprintf(dev_name, sizeof(dev_name),
1072 		    "%c(%u, %u)", VCHR == vp->v_type ? 'c':'b',
1073 		    major(vp->v_rdev), minor(vp->v_rdev));
1074 		/*
1075 		 * Add the newly created dev name to the name
1076 		 * cache to allow easier cleanup. Also,
1077 		 * vfs_addname allocates memory for the new name
1078 		 * and returns it.
1079 		 */
1080 		NAME_CACHE_LOCK_SHARED();
1081 		name = vfs_addname(dev_name, (unsigned int)strlen(dev_name), 0, 0);
1082 		NAME_CACHE_UNLOCK();
1083 		return name;
1084 	}
1085 	default:
1086 		return unknown_vnodename;
1087 	}
1088 }
1089 
1090 void
vnode_putname_printable(const char * name)1091 vnode_putname_printable(const char *name)
1092 {
1093 	if (name == unknown_vnodename) {
1094 		return;
1095 	}
1096 	vnode_putname(name);
1097 }
1098 
1099 
1100 /*
1101  * if VNODE_UPDATE_PARENT, and we can take
1102  * a reference on dvp, then update vp with
1103  * it's new parent... if vp already has a parent,
1104  * then drop the reference vp held on it
1105  *
1106  * if VNODE_UPDATE_NAME,
1107  * then drop string ref on v_name if it exists, and if name is non-NULL
1108  * then pick up a string reference on name and record it in v_name...
1109  * optionally pass in the length and hashval of name if known
1110  *
1111  * if VNODE_UPDATE_CACHE, flush the name cache entries associated with vp
1112  */
1113 void
vnode_update_identity(vnode_t vp,vnode_t dvp,const char * name,int name_len,uint32_t name_hashval,int flags)1114 vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, uint32_t name_hashval, int flags)
1115 {
1116 	struct  namecache *ncp;
1117 	vnode_t old_parentvp = NULLVP;
1118 	int isstream = (vp->v_flag & VISNAMEDSTREAM);
1119 	int kusecountbumped = 0;
1120 	kauth_cred_t tcred = NULL;
1121 	const char *vname = NULL;
1122 	const char *tname = NULL;
1123 
1124 	if (name_len < 0) {
1125 		return;
1126 	}
1127 
1128 	if (flags & VNODE_UPDATE_PARENT) {
1129 		if (dvp && vnode_ref(dvp) != 0) {
1130 			dvp = NULLVP;
1131 		}
1132 		/* Don't count a stream's parent ref during unmounts */
1133 		if (isstream && dvp && (dvp != vp) && (dvp != vp->v_parent) && (dvp->v_type == VREG)) {
1134 			vnode_lock_spin(dvp);
1135 			++dvp->v_kusecount;
1136 			kusecountbumped = 1;
1137 			vnode_unlock(dvp);
1138 		}
1139 	} else {
1140 		dvp = NULLVP;
1141 	}
1142 	if ((flags & VNODE_UPDATE_NAME)) {
1143 		if (name != vp->v_name) {
1144 			if (name && *name) {
1145 				if (name_len == 0) {
1146 					name_len = (int)strlen(name);
1147 				}
1148 				tname = vfs_addname(name, name_len, name_hashval, 0);
1149 			}
1150 		} else {
1151 			flags &= ~VNODE_UPDATE_NAME;
1152 		}
1153 	}
1154 	if ((flags & (VNODE_UPDATE_PURGE | VNODE_UPDATE_PARENT | VNODE_UPDATE_CACHE | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGEFIRMLINK))) {
1155 		NAME_CACHE_LOCK();
1156 
1157 #if CONFIG_FIRMLINKS
1158 		if (flags & VNODE_UPDATE_PURGEFIRMLINK) {
1159 			vnode_t old_fvp = vp->v_fmlink;
1160 			if (old_fvp) {
1161 				vnode_lock_spin(vp);
1162 				vp->v_flag &= ~VFMLINKTARGET;
1163 				vp->v_fmlink = NULLVP;
1164 				vnode_unlock(vp);
1165 				NAME_CACHE_UNLOCK();
1166 
1167 				/*
1168 				 * vnode_rele can result in cascading series of
1169 				 * usecount releases. The combination of calling
1170 				 * vnode_recycle and dont_reenter (3rd arg to
1171 				 * vnode_rele_internal) ensures we don't have
1172 				 * that issue.
1173 				 */
1174 				vnode_recycle(old_fvp);
1175 				vnode_rele_internal(old_fvp, O_EVTONLY, 1, 0);
1176 
1177 				NAME_CACHE_LOCK();
1178 			}
1179 		}
1180 #endif
1181 
1182 		if ((flags & VNODE_UPDATE_PURGE)) {
1183 			if (vp->v_parent) {
1184 				vp->v_parent->v_nc_generation++;
1185 			}
1186 
1187 			while ((ncp = LIST_FIRST(&vp->v_nclinks))) {
1188 				cache_delete(ncp, 1);
1189 			}
1190 
1191 			while ((ncp = TAILQ_FIRST(&vp->v_ncchildren))) {
1192 				cache_delete(ncp, 1);
1193 			}
1194 
1195 			/*
1196 			 * Use a temp variable to avoid kauth_cred_drop() while NAME_CACHE_LOCK is held
1197 			 */
1198 			tcred = vnode_cred(vp);
1199 			vp->v_cred = NOCRED;
1200 			vp->v_authorized_actions = 0;
1201 			vp->v_cred_timestamp = 0;
1202 		}
1203 		if ((flags & VNODE_UPDATE_NAME)) {
1204 			vname = vp->v_name;
1205 			vp->v_name = tname;
1206 		}
1207 		if (flags & VNODE_UPDATE_PARENT) {
1208 			if (dvp != vp && dvp != vp->v_parent) {
1209 				old_parentvp = vp->v_parent;
1210 				vp->v_parent = dvp;
1211 				dvp = NULLVP;
1212 
1213 				if (old_parentvp) {
1214 					flags |= VNODE_UPDATE_CACHE;
1215 				}
1216 			}
1217 		}
1218 		if (flags & VNODE_UPDATE_CACHE) {
1219 			while ((ncp = LIST_FIRST(&vp->v_nclinks))) {
1220 				cache_delete(ncp, 1);
1221 			}
1222 		}
1223 		NAME_CACHE_UNLOCK();
1224 
1225 		if (vname != NULL) {
1226 			vfs_removename(vname);
1227 		}
1228 
1229 		if (IS_VALID_CRED(tcred)) {
1230 			kauth_cred_unref(&tcred);
1231 		}
1232 	}
1233 	if (dvp != NULLVP) {
1234 		/* Back-out the ref we took if we lost a race for vp->v_parent. */
1235 		if (kusecountbumped) {
1236 			vnode_lock_spin(dvp);
1237 			if (dvp->v_kusecount > 0) {
1238 				--dvp->v_kusecount;
1239 			}
1240 			vnode_unlock(dvp);
1241 		}
1242 		vnode_rele(dvp);
1243 	}
1244 	if (old_parentvp) {
1245 		struct  uthread *ut;
1246 		vnode_t vreclaims = NULLVP;
1247 
1248 		if (isstream) {
1249 			vnode_lock_spin(old_parentvp);
1250 			if ((old_parentvp->v_type != VDIR) && (old_parentvp->v_kusecount > 0)) {
1251 				--old_parentvp->v_kusecount;
1252 			}
1253 			vnode_unlock(old_parentvp);
1254 		}
1255 		ut = current_uthread();
1256 
1257 		/*
1258 		 * indicated to vnode_rele that it shouldn't do a
1259 		 * vnode_reclaim at this time... instead it will
1260 		 * chain the vnode to the uu_vreclaims list...
1261 		 * we'll be responsible for calling vnode_reclaim
1262 		 * on each of the vnodes in this list...
1263 		 */
1264 		ut->uu_defer_reclaims = 1;
1265 		ut->uu_vreclaims = NULLVP;
1266 
1267 		while ((vp = old_parentvp) != NULLVP) {
1268 			vnode_hold(vp);
1269 			vnode_lock_spin(vp);
1270 			vnode_rele_internal(vp, 0, 0, 1);
1271 
1272 			/*
1273 			 * check to see if the vnode is now in the state
1274 			 * that would have triggered a vnode_reclaim in vnode_rele
1275 			 * if it is, we save it's parent pointer and then NULL
1276 			 * out the v_parent field... we'll drop the reference
1277 			 * that was held on the next iteration of this loop...
1278 			 * this short circuits a potential deep recursion if we
1279 			 * have a long chain of parents in this state...
1280 			 * we'll sit in this loop until we run into
1281 			 * a parent in this chain that is not in this state
1282 			 *
1283 			 * make our check and the vnode_rele atomic
1284 			 * with respect to the current vnode we're working on
1285 			 * by holding the vnode lock
1286 			 * if vnode_rele deferred the vnode_reclaim and has put
1287 			 * this vnode on the list to be reaped by us, than
1288 			 * it has left this vnode with an iocount == 1
1289 			 */
1290 			if (ut->uu_vreclaims == vp) {
1291 				/*
1292 				 * This vnode is on the head of the uu_vreclaims chain
1293 				 * which means vnode_rele wanted to do a vnode_reclaim
1294 				 * on this vnode. Pull the parent pointer now so that when we do the
1295 				 * vnode_reclaim for each of the vnodes in the uu_vreclaims
1296 				 * list, we won't recurse back through here
1297 				 *
1298 				 * need to do a convert here in case vnode_rele_internal
1299 				 * returns with the lock held in the spin mode... it
1300 				 * can drop and retake the lock under certain circumstances
1301 				 */
1302 				vnode_lock_convert(vp);
1303 
1304 				NAME_CACHE_LOCK();
1305 				old_parentvp = vp->v_parent;
1306 				vp->v_parent = NULLVP;
1307 				NAME_CACHE_UNLOCK();
1308 			} else {
1309 				/*
1310 				 * we're done... we ran into a vnode that isn't
1311 				 * being terminated
1312 				 */
1313 				old_parentvp = NULLVP;
1314 			}
1315 			vnode_drop_and_unlock(vp);
1316 		}
1317 		vreclaims = ut->uu_vreclaims;
1318 		ut->uu_vreclaims = NULLVP;
1319 		ut->uu_defer_reclaims = 0;
1320 
1321 		while ((vp = vreclaims) != NULLVP) {
1322 			vreclaims = vp->v_defer_reclaimlist;
1323 
1324 			/*
1325 			 * vnode_put will drive the vnode_reclaim if
1326 			 * we are still the only reference on this vnode
1327 			 */
1328 			vnode_put(vp);
1329 		}
1330 	}
1331 }
1332 
1333 #if CONFIG_FIRMLINKS
1334 errno_t
vnode_setasfirmlink(vnode_t vp,vnode_t target_vp)1335 vnode_setasfirmlink(vnode_t vp, vnode_t target_vp)
1336 {
1337 	int error = 0;
1338 	vnode_t old_target_vp = NULLVP;
1339 	vnode_t old_target_vp_v_fmlink = NULLVP;
1340 	kauth_cred_t target_vp_cred = NULL;
1341 	kauth_cred_t old_target_vp_cred = NULL;
1342 
1343 	if (!vp) {
1344 		return EINVAL;
1345 	}
1346 
1347 	if (target_vp) {
1348 		if (vp->v_fmlink == target_vp) { /* Will be checked again under the name cache lock */
1349 			return 0;
1350 		}
1351 
1352 		/*
1353 		 * Firmlink source and target will take both a usecount
1354 		 * and kusecount on each other.
1355 		 */
1356 		if ((error = vnode_ref_ext(target_vp, O_EVTONLY, VNODE_REF_FORCE))) {
1357 			return error;
1358 		}
1359 
1360 		if ((error = vnode_ref_ext(vp, O_EVTONLY, VNODE_REF_FORCE))) {
1361 			vnode_rele_ext(target_vp, O_EVTONLY, 1);
1362 			return error;
1363 		}
1364 	}
1365 
1366 	NAME_CACHE_LOCK();
1367 
1368 	old_target_vp = vp->v_fmlink;
1369 	if (target_vp && (target_vp == old_target_vp)) {
1370 		NAME_CACHE_UNLOCK();
1371 		return 0;
1372 	}
1373 	vp->v_fmlink = target_vp;
1374 
1375 	vnode_lock_spin(vp);
1376 	vp->v_flag &= ~VFMLINKTARGET;
1377 	vnode_unlock(vp);
1378 
1379 	if (target_vp) {
1380 		target_vp->v_fmlink = vp;
1381 		vnode_lock_spin(target_vp);
1382 		target_vp->v_flag |= VFMLINKTARGET;
1383 		vnode_unlock(target_vp);
1384 		cache_purge_locked(vp, &target_vp_cred);
1385 	}
1386 
1387 	if (old_target_vp) {
1388 		old_target_vp_v_fmlink = old_target_vp->v_fmlink;
1389 		old_target_vp->v_fmlink = NULLVP;
1390 		vnode_lock_spin(old_target_vp);
1391 		old_target_vp->v_flag &= ~VFMLINKTARGET;
1392 		vnode_unlock(old_target_vp);
1393 		cache_purge_locked(vp, &old_target_vp_cred);
1394 	}
1395 
1396 	NAME_CACHE_UNLOCK();
1397 
1398 	if (IS_VALID_CRED(target_vp_cred)) {
1399 		kauth_cred_unref(&target_vp_cred);
1400 	}
1401 
1402 	if (old_target_vp) {
1403 		if (IS_VALID_CRED(old_target_vp_cred)) {
1404 			kauth_cred_unref(&old_target_vp_cred);
1405 		}
1406 
1407 		vnode_rele_ext(old_target_vp, O_EVTONLY, 1);
1408 		if (old_target_vp_v_fmlink) {
1409 			vnode_rele_ext(old_target_vp_v_fmlink, O_EVTONLY, 1);
1410 		}
1411 	}
1412 
1413 	return 0;
1414 }
1415 
1416 errno_t
vnode_getfirmlink(vnode_t vp,vnode_t * target_vp)1417 vnode_getfirmlink(vnode_t vp, vnode_t *target_vp)
1418 {
1419 	int error;
1420 
1421 	if (!vp->v_fmlink) {
1422 		return ENODEV;
1423 	}
1424 
1425 	NAME_CACHE_LOCK_SHARED();
1426 	if (vp->v_fmlink && !(vp->v_flag & VFMLINKTARGET) &&
1427 	    (vnode_get(vp->v_fmlink) == 0)) {
1428 		vnode_t tvp = vp->v_fmlink;
1429 
1430 		vnode_lock_spin(tvp);
1431 		if (tvp->v_lflag & (VL_TERMINATE | VL_DEAD)) {
1432 			vnode_unlock(tvp);
1433 			NAME_CACHE_UNLOCK();
1434 			vnode_put(tvp);
1435 			return ENOENT;
1436 		}
1437 		if (!(tvp->v_flag & VFMLINKTARGET)) {
1438 			panic("firmlink target for vnode %p does not have flag set", vp);
1439 		}
1440 		vnode_unlock(tvp);
1441 		*target_vp = tvp;
1442 		error = 0;
1443 	} else {
1444 		*target_vp = NULLVP;
1445 		error = ENODEV;
1446 	}
1447 	NAME_CACHE_UNLOCK();
1448 	return error;
1449 }
1450 
1451 #else /* CONFIG_FIRMLINKS */
1452 
1453 errno_t
vnode_setasfirmlink(__unused vnode_t vp,__unused vnode_t src_vp)1454 vnode_setasfirmlink(__unused vnode_t vp, __unused vnode_t src_vp)
1455 {
1456 	return ENOTSUP;
1457 }
1458 
1459 errno_t
vnode_getfirmlink(__unused vnode_t vp,__unused vnode_t * target_vp)1460 vnode_getfirmlink(__unused vnode_t vp, __unused vnode_t *target_vp)
1461 {
1462 	return ENOTSUP;
1463 }
1464 
1465 #endif
1466 
1467 /*
1468  * Mark a vnode as having multiple hard links.  HFS makes use of this
1469  * because it keeps track of each link separately, and wants to know
1470  * which link was actually used.
1471  *
1472  * This will cause the name cache to force a VNOP_LOOKUP on the vnode
1473  * so that HFS can post-process the lookup.  Also, volfs will call
1474  * VNOP_GETATTR2 to determine the parent, instead of using v_parent.
1475  */
1476 void
vnode_setmultipath(vnode_t vp)1477 vnode_setmultipath(vnode_t vp)
1478 {
1479 	vnode_lock_spin(vp);
1480 
1481 	/*
1482 	 * In theory, we're changing the vnode's identity as far as the
1483 	 * name cache is concerned, so we ought to grab the name cache lock
1484 	 * here.  However, there is already a race, and grabbing the name
1485 	 * cache lock only makes the race window slightly smaller.
1486 	 *
1487 	 * The race happens because the vnode already exists in the name
1488 	 * cache, and could be found by one thread before another thread
1489 	 * can set the hard link flag.
1490 	 */
1491 
1492 	vp->v_flag |= VISHARDLINK;
1493 
1494 	vnode_unlock(vp);
1495 }
1496 
1497 
1498 
1499 /*
1500  * backwards compatibility
1501  */
1502 void
vnode_uncache_credentials(vnode_t vp)1503 vnode_uncache_credentials(vnode_t vp)
1504 {
1505 	vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
1506 }
1507 
1508 
1509 /*
1510  * use the exclusive form of NAME_CACHE_LOCK to protect the update of the
1511  * following fields in the vnode: v_cred_timestamp, v_cred, v_authorized_actions
1512  * we use this lock so that we can look at the v_cred and v_authorized_actions
1513  * atomically while behind the NAME_CACHE_LOCK in shared mode in 'cache_lookup_path',
1514  * which is the super-hot path... if we are updating the authorized actions for this
1515  * vnode, we are already in the super-slow and far less frequented path so its not
1516  * that bad that we take the lock exclusive for this case... of course we strive
1517  * to hold it for the minimum amount of time possible
1518  */
1519 
1520 void
vnode_uncache_authorized_action(vnode_t vp,kauth_action_t action)1521 vnode_uncache_authorized_action(vnode_t vp, kauth_action_t action)
1522 {
1523 	kauth_cred_t tcred = NOCRED;
1524 
1525 	NAME_CACHE_LOCK();
1526 
1527 	vp->v_authorized_actions &= ~action;
1528 
1529 	if (action == KAUTH_INVALIDATE_CACHED_RIGHTS &&
1530 	    IS_VALID_CRED(vp->v_cred)) {
1531 		/*
1532 		 * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held
1533 		 */
1534 		tcred = vnode_cred(vp);
1535 		vp->v_cred = NOCRED;
1536 	}
1537 	NAME_CACHE_UNLOCK();
1538 
1539 	if (IS_VALID_CRED(tcred)) {
1540 		kauth_cred_unref(&tcred);
1541 	}
1542 }
1543 
1544 
1545 /* disable vnode_cache_is_authorized() by setting vnode_cache_defeat */
1546 static TUNABLE(int, bootarg_vnode_cache_defeat, "-vnode_cache_defeat", 0);
1547 
1548 boolean_t
vnode_cache_is_authorized(vnode_t vp,vfs_context_t ctx,kauth_action_t action)1549 vnode_cache_is_authorized(vnode_t vp, vfs_context_t ctx, kauth_action_t action)
1550 {
1551 	kauth_cred_t    ucred;
1552 	boolean_t       retval = FALSE;
1553 
1554 	/* Boot argument to defeat rights caching */
1555 	if (bootarg_vnode_cache_defeat) {
1556 		return FALSE;
1557 	}
1558 
1559 	if ((vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
1560 		/*
1561 		 * a TTL is enabled on the rights cache... handle it here
1562 		 * a TTL of 0 indicates that no rights should be cached
1563 		 */
1564 		if (vp->v_mount->mnt_authcache_ttl) {
1565 			if (!(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL)) {
1566 				/*
1567 				 * For filesystems marked only MNTK_AUTH_OPAQUE (generally network ones),
1568 				 * we will only allow a SEARCH right on a directory to be cached...
1569 				 * that cached right always has a default TTL associated with it
1570 				 */
1571 				if (action != KAUTH_VNODE_SEARCH || vp->v_type != VDIR) {
1572 					vp = NULLVP;
1573 				}
1574 			}
1575 			if (vp != NULLVP && vnode_cache_is_stale(vp) == TRUE) {
1576 				vnode_uncache_authorized_action(vp, vp->v_authorized_actions);
1577 				vp = NULLVP;
1578 			}
1579 		} else {
1580 			vp = NULLVP;
1581 		}
1582 	}
1583 	if (vp != NULLVP) {
1584 		ucred = vfs_context_ucred(ctx);
1585 
1586 		NAME_CACHE_LOCK_SHARED();
1587 
1588 		if (vnode_cred(vp) == ucred && (vp->v_authorized_actions & action) == action) {
1589 			retval = TRUE;
1590 		}
1591 
1592 		NAME_CACHE_UNLOCK();
1593 	}
1594 	return retval;
1595 }
1596 
1597 
1598 void
vnode_cache_authorized_action(vnode_t vp,vfs_context_t ctx,kauth_action_t action)1599 vnode_cache_authorized_action(vnode_t vp, vfs_context_t ctx, kauth_action_t action)
1600 {
1601 	kauth_cred_t tcred = NOCRED;
1602 	kauth_cred_t ucred;
1603 	struct timeval tv;
1604 	boolean_t ttl_active = FALSE;
1605 
1606 	ucred = vfs_context_ucred(ctx);
1607 
1608 	if (!IS_VALID_CRED(ucred) || action == 0) {
1609 		return;
1610 	}
1611 
1612 	if ((vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
1613 		/*
1614 		 * a TTL is enabled on the rights cache... handle it here
1615 		 * a TTL of 0 indicates that no rights should be cached
1616 		 */
1617 		if (vp->v_mount->mnt_authcache_ttl == 0) {
1618 			return;
1619 		}
1620 
1621 		if (!(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL)) {
1622 			/*
1623 			 * only cache SEARCH action for filesystems marked
1624 			 * MNTK_AUTH_OPAQUE on VDIRs...
1625 			 * the lookup_path code will time these out
1626 			 */
1627 			if ((action & ~KAUTH_VNODE_SEARCH) || vp->v_type != VDIR) {
1628 				return;
1629 			}
1630 		}
1631 		ttl_active = TRUE;
1632 
1633 		microuptime(&tv);
1634 	}
1635 	NAME_CACHE_LOCK();
1636 
1637 	tcred = vnode_cred(vp);
1638 	if (tcred == ucred) {
1639 		tcred = NOCRED;
1640 	} else {
1641 		/*
1642 		 * Use a temp variable to avoid kauth_cred_drop() while NAME_CACHE_LOCK is held
1643 		 */
1644 		kauth_cred_ref(ucred);
1645 		vp->v_cred = ucred;
1646 		vp->v_authorized_actions = 0;
1647 	}
1648 	if (ttl_active == TRUE && vp->v_authorized_actions == 0) {
1649 		/*
1650 		 * only reset the timestamnp on the
1651 		 * first authorization cached after the previous
1652 		 * timer has expired or we're switching creds...
1653 		 * 'vnode_cache_is_authorized' will clear the
1654 		 * authorized actions if the TTL is active and
1655 		 * it has expired
1656 		 */
1657 		vp->v_cred_timestamp = (int)tv.tv_sec;
1658 	}
1659 	vp->v_authorized_actions |= action;
1660 
1661 	NAME_CACHE_UNLOCK();
1662 
1663 	if (IS_VALID_CRED(tcred)) {
1664 		kauth_cred_unref(&tcred);
1665 	}
1666 }
1667 
1668 
1669 boolean_t
vnode_cache_is_stale(vnode_t vp)1670 vnode_cache_is_stale(vnode_t vp)
1671 {
1672 	struct timeval  tv;
1673 	boolean_t       retval;
1674 
1675 	microuptime(&tv);
1676 
1677 	if ((tv.tv_sec - vp->v_cred_timestamp) > vp->v_mount->mnt_authcache_ttl) {
1678 		retval = TRUE;
1679 	} else {
1680 		retval = FALSE;
1681 	}
1682 
1683 	return retval;
1684 }
1685 
1686 VFS_SMR_DECLARE;
1687 
1688 /*
1689  * Components of nameidata (or objects it can point to) which may
1690  * need restoring in case fast path lookup fails.
1691  */
1692 struct nameidata_state {
1693 	u_long  ni_loopcnt;
1694 	char *ni_next;
1695 	u_int ni_pathlen;
1696 	int32_t ni_flag;
1697 	char *cn_nameptr;
1698 	int cn_namelen;
1699 	int cn_flags;
1700 	uint32_t cn_hash;
1701 };
1702 
1703 static void
save_ndp_state(struct nameidata * ndp,struct componentname * cnp,struct nameidata_state * saved_statep)1704 save_ndp_state(struct nameidata *ndp, struct componentname *cnp, struct nameidata_state *saved_statep)
1705 {
1706 	saved_statep->ni_loopcnt = ndp->ni_loopcnt;
1707 	saved_statep->ni_next = ndp->ni_next;
1708 	saved_statep->ni_pathlen = ndp->ni_pathlen;
1709 	saved_statep->ni_flag = ndp->ni_flag;
1710 	saved_statep->cn_nameptr = cnp->cn_nameptr;
1711 	saved_statep->cn_namelen = cnp->cn_namelen;
1712 	saved_statep->cn_flags = cnp->cn_flags;
1713 	saved_statep->cn_hash = cnp->cn_hash;
1714 }
1715 
1716 static void
restore_ndp_state(struct nameidata * ndp,struct componentname * cnp,struct nameidata_state * saved_statep)1717 restore_ndp_state(struct nameidata *ndp, struct componentname *cnp, struct nameidata_state *saved_statep)
1718 {
1719 	ndp->ni_loopcnt = saved_statep->ni_loopcnt;
1720 	ndp->ni_next = saved_statep->ni_next;
1721 	ndp->ni_pathlen = saved_statep->ni_pathlen;
1722 	ndp->ni_flag = saved_statep->ni_flag;
1723 	cnp->cn_nameptr = saved_statep->cn_nameptr;
1724 	cnp->cn_namelen = saved_statep->cn_namelen;
1725 	cnp->cn_flags = saved_statep->cn_flags;
1726 	cnp->cn_hash = saved_statep->cn_hash;
1727 }
1728 
1729 static inline bool
vid_is_same(vnode_t vp,uint32_t vid)1730 vid_is_same(vnode_t vp, uint32_t vid)
1731 {
1732 	return !(os_atomic_load(&vp->v_lflag, relaxed) & (VL_DRAIN | VL_TERMINATE | VL_DEAD)) && (vnode_vid(vp) == vid);
1733 }
1734 
1735 static inline bool
can_check_v_mountedhere(vnode_t vp)1736 can_check_v_mountedhere(vnode_t vp)
1737 {
1738 	return (os_atomic_load(&vp->v_usecount, relaxed) > 0) &&
1739 	       (os_atomic_load(&vp->v_flag, relaxed) & VMOUNTEDHERE) &&
1740 	       !(os_atomic_load(&vp->v_lflag, relaxed) & (VL_TERMINATE | VL_DEAD) &&
1741 	       (vp->v_type == VDIR));
1742 }
1743 
1744 /*
1745  * Returns:	0			Success
1746  *		ERECYCLE		vnode was recycled from underneath us.  Force lookup to be re-driven from namei.
1747  *                                              This errno value should not be seen by anyone outside of the kernel.
1748  */
1749 int
cache_lookup_path(struct nameidata * ndp,struct componentname * cnp,vnode_t dp,vfs_context_t ctx,int * dp_authorized,vnode_t last_dp)1750 cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp,
1751     vfs_context_t ctx, int *dp_authorized, vnode_t last_dp)
1752 {
1753 	struct nameidata_state saved_state;
1754 	char            *cp;            /* pointer into pathname argument */
1755 	uint32_t        vid;
1756 	uint32_t        vvid = 0;       /* protected by vp != NULLVP */
1757 	vnode_t         vp = NULLVP;
1758 	vnode_t         tdp = NULLVP;
1759 	vnode_t         start_dp = dp;
1760 	kauth_cred_t    ucred;
1761 	boolean_t       ttl_enabled = FALSE;
1762 	struct timeval  tv;
1763 	mount_t         mp;
1764 	mount_t         dmp;
1765 	unsigned int    hash;
1766 	int             error = 0;
1767 	boolean_t       dotdotchecked = FALSE;
1768 	bool            locked = false;
1769 	bool            needs_lock = false;
1770 	bool            dp_iocount_taken = false;
1771 
1772 #if CONFIG_TRIGGERS
1773 	vnode_t         trigger_vp;
1774 #endif /* CONFIG_TRIGGERS */
1775 
1776 	ucred = vfs_context_ucred(ctx);
1777 retry:
1778 	if (nc_smr_enabled && !needs_lock) {
1779 		save_ndp_state(ndp, cnp, &saved_state);
1780 		vfs_smr_enter();
1781 	} else {
1782 		NAME_CACHE_LOCK_SHARED();
1783 		locked = true;
1784 	}
1785 	ndp->ni_flag &= ~(NAMEI_TRAILINGSLASH);
1786 
1787 	dmp = dp->v_mount;
1788 	vid = dp->v_id;
1789 	if (dmp && (dmp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
1790 		ttl_enabled = TRUE;
1791 		microuptime(&tv);
1792 	}
1793 	for (;;) {
1794 		/*
1795 		 * Search a directory.
1796 		 *
1797 		 * The cn_hash value is for use by cache_lookup
1798 		 * The last component of the filename is left accessible via
1799 		 * cnp->cn_nameptr for callers that need the name.
1800 		 */
1801 		hash = 0;
1802 		cp = cnp->cn_nameptr;
1803 
1804 		while (*cp && (*cp != '/')) {
1805 			hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8;
1806 		}
1807 		/*
1808 		 * the crc generator can legitimately generate
1809 		 * a 0... however, 0 for us means that we
1810 		 * haven't computed a hash, so use 1 instead
1811 		 */
1812 		if (hash == 0) {
1813 			hash = 1;
1814 		}
1815 		cnp->cn_hash = hash;
1816 		cnp->cn_namelen = (int)(cp - cnp->cn_nameptr);
1817 
1818 		ndp->ni_pathlen -= cnp->cn_namelen;
1819 		ndp->ni_next = cp;
1820 
1821 		/*
1822 		 * Replace multiple slashes by a single slash and trailing slashes
1823 		 * by a null.  This must be done before VNOP_LOOKUP() because some
1824 		 * fs's don't know about trailing slashes.  Remember if there were
1825 		 * trailing slashes to handle symlinks, existing non-directories
1826 		 * and non-existing files that won't be directories specially later.
1827 		 */
1828 		while (*cp == '/' && (cp[1] == '/' || cp[1] == '\0')) {
1829 			cp++;
1830 			ndp->ni_pathlen--;
1831 
1832 			if (*cp == '\0') {
1833 				ndp->ni_flag |= NAMEI_TRAILINGSLASH;
1834 				*ndp->ni_next = '\0';
1835 			}
1836 		}
1837 		ndp->ni_next = cp;
1838 
1839 		cnp->cn_flags &= ~(MAKEENTRY | ISLASTCN | ISDOTDOT);
1840 
1841 		if (*cp == '\0') {
1842 			cnp->cn_flags |= ISLASTCN;
1843 		}
1844 
1845 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') {
1846 			cnp->cn_flags |= ISDOTDOT;
1847 		}
1848 
1849 #if NAMEDRSRCFORK
1850 		/*
1851 		 * Process a request for a file's resource fork.
1852 		 *
1853 		 * Consume the _PATH_RSRCFORKSPEC suffix and tag the path.
1854 		 */
1855 		if ((ndp->ni_pathlen == sizeof(_PATH_RSRCFORKSPEC)) &&
1856 		    (cp[1] == '.' && cp[2] == '.') &&
1857 		    bcmp(cp, _PATH_RSRCFORKSPEC, sizeof(_PATH_RSRCFORKSPEC)) == 0) {
1858 			/* Skip volfs file systems that don't support native streams. */
1859 			if ((dmp != NULL) &&
1860 			    (dmp->mnt_flag & MNT_DOVOLFS) &&
1861 			    (dmp->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) {
1862 				goto skiprsrcfork;
1863 			}
1864 			cnp->cn_flags |= CN_WANTSRSRCFORK;
1865 			cnp->cn_flags |= ISLASTCN;
1866 			ndp->ni_next[0] = '\0';
1867 			ndp->ni_pathlen = 1;
1868 		}
1869 skiprsrcfork:
1870 #endif
1871 
1872 		*dp_authorized = 0;
1873 
1874 #if CONFIG_FIRMLINKS
1875 		if ((cnp->cn_flags & ISDOTDOT) && (dp->v_flag & VFMLINKTARGET) && dp->v_fmlink) {
1876 			/*
1877 			 * If this is a firmlink target then dp has to be switched to the
1878 			 * firmlink "source" before exiting this loop.
1879 			 *
1880 			 * For a firmlink "target", the policy is to pick the parent of the
1881 			 * firmlink "source" as the parent. This means that you can never
1882 			 * get to the "real" parent of firmlink target via a dotdot lookup.
1883 			 */
1884 			vnode_t v_fmlink = dp->v_fmlink;
1885 			uint32_t old_vid = vid;
1886 			mp = dmp;
1887 			if (v_fmlink) {
1888 				vid = v_fmlink->v_id;
1889 				dmp = v_fmlink->v_mount;
1890 				if ((dp->v_fmlink == v_fmlink) && dmp) {
1891 					dp = v_fmlink;
1892 				} else {
1893 					vid = old_vid;
1894 					dmp = mp;
1895 				}
1896 			}
1897 		}
1898 #endif
1899 
1900 
1901 		if (ttl_enabled &&
1902 		    (dmp->mnt_authcache_ttl == 0 ||
1903 		    ((tv.tv_sec - dp->v_cred_timestamp) > dmp->mnt_authcache_ttl))) {
1904 			break;
1905 		}
1906 
1907 		/*
1908 		 * NAME_CACHE_LOCK holds these fields stable
1909 		 *
1910 		 * We can't cache KAUTH_VNODE_SEARCHBYANYONE for root correctly
1911 		 * so we make an ugly check for root here. root is always
1912 		 * allowed and breaking out of here only to find out that is
1913 		 * authorized by virtue of being root is very very expensive.
1914 		 * However, the check for not root is valid only for filesystems
1915 		 * which use local authorization.
1916 		 *
1917 		 * XXX: Remove the check for root when we can reliably set
1918 		 * KAUTH_VNODE_SEARCHBYANYONE as root.
1919 		 */
1920 		int v_authorized_actions = os_atomic_load(&dp->v_authorized_actions, relaxed);
1921 		if ((vnode_cred(dp) != ucred || !(v_authorized_actions & KAUTH_VNODE_SEARCH)) &&
1922 		    !(v_authorized_actions & KAUTH_VNODE_SEARCHBYANYONE) &&
1923 		    (ttl_enabled || !vfs_context_issuser(ctx))) {
1924 			break;
1925 		}
1926 
1927 		/*
1928 		 * indicate that we're allowed to traverse this directory...
1929 		 * even if we fail the cache lookup or decide to bail for
1930 		 * some other reason, this information is valid and is used
1931 		 * to avoid doing a vnode_authorize before the call to VNOP_LOOKUP
1932 		 */
1933 		*dp_authorized = 1;
1934 
1935 		if ((cnp->cn_flags & (ISLASTCN | ISDOTDOT))) {
1936 			if (cnp->cn_nameiop != LOOKUP) {
1937 				break;
1938 			}
1939 			if (cnp->cn_flags & LOCKPARENT) {
1940 				break;
1941 			}
1942 			if (cnp->cn_flags & NOCACHE) {
1943 				break;
1944 			}
1945 
1946 			if (cnp->cn_flags & ISDOTDOT) {
1947 				/*
1948 				 * Force directory hardlinks to go to
1949 				 * file system for ".." requests.
1950 				 */
1951 				if ((dp->v_flag & VISHARDLINK)) {
1952 					break;
1953 				}
1954 				/*
1955 				 * Quit here only if we can't use
1956 				 * the parent directory pointer or
1957 				 * don't have one.  Otherwise, we'll
1958 				 * use it below.
1959 				 */
1960 				if ((dp->v_flag & VROOT) ||
1961 				    dp == ndp->ni_rootdir ||
1962 				    dp->v_parent == NULLVP) {
1963 					break;
1964 				}
1965 			}
1966 		}
1967 
1968 		if ((cnp->cn_flags & CN_SKIPNAMECACHE)) {
1969 			/*
1970 			 * Force lookup to go to the filesystem with
1971 			 * all cnp fields set up.
1972 			 */
1973 			break;
1974 		}
1975 
1976 		/*
1977 		 * "." and ".." aren't supposed to be cached, so check
1978 		 * for them before checking the cache.
1979 		 */
1980 		if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
1981 			vp = dp;
1982 			vvid = vid;
1983 		} else if ((cnp->cn_flags & ISDOTDOT)) {
1984 			/*
1985 			 * If this is a chrooted process, we need to check if
1986 			 * the process is trying to break out of its chrooted
1987 			 * jail. We do that by trying to determine if dp is
1988 			 * a subdirectory of ndp->ni_rootdir. If we aren't
1989 			 * able to determine that by the v_parent pointers, we
1990 			 * will leave the fast path.
1991 			 *
1992 			 * Since this function may see dotdot components
1993 			 * many times and it has the name cache lock held for
1994 			 * the entire duration, we optimise this by doing this
1995 			 * check only once per cache_lookup_path call.
1996 			 * If dotdotchecked is set, it means we've done this
1997 			 * check once already and don't need to do it again.
1998 			 */
1999 			if (!locked && (ndp->ni_rootdir != rootvnode)) {
2000 				vfs_smr_leave();
2001 				needs_lock = true;
2002 				goto prep_lock_retry;
2003 			} else if (locked && !dotdotchecked && (ndp->ni_rootdir != rootvnode)) {
2004 				vnode_t tvp = dp;
2005 				boolean_t defer = FALSE;
2006 				boolean_t is_subdir = FALSE;
2007 
2008 				defer = cache_check_vnode_issubdir(tvp,
2009 				    ndp->ni_rootdir, &is_subdir, &tvp);
2010 
2011 				if (defer) {
2012 					/* defer to Filesystem */
2013 					break;
2014 				} else if (!is_subdir) {
2015 					/*
2016 					 * This process is trying to break  out
2017 					 * of its chrooted jail, so all its
2018 					 * dotdot accesses will be translated to
2019 					 * its root directory.
2020 					 */
2021 					vp = ndp->ni_rootdir;
2022 				} else {
2023 					/*
2024 					 * All good, let this dotdot access
2025 					 * proceed normally
2026 					 */
2027 					vp = dp->v_parent;
2028 				}
2029 				dotdotchecked = TRUE;
2030 			} else {
2031 				vp = dp->v_parent;
2032 			}
2033 			if (!vp) {
2034 				break;
2035 			}
2036 			vvid = vp->v_id;
2037 		} else {
2038 			if (!locked) {
2039 				vp = cache_lookup_smr(dp, cnp, &vvid);
2040 				if (!vid_is_same(dp, vid)) {
2041 					vp = NULLVP;
2042 					needs_lock = true;
2043 					vfs_smr_leave();
2044 					goto prep_lock_retry;
2045 				}
2046 			} else {
2047 				vp = cache_lookup_locked(dp, cnp, &vvid);
2048 			}
2049 
2050 
2051 			if (!vp) {
2052 				break;
2053 			}
2054 
2055 			if ((vp->v_flag & VISHARDLINK)) {
2056 				/*
2057 				 * The file system wants a VNOP_LOOKUP on this vnode
2058 				 */
2059 				vp = NULL;
2060 				break;
2061 			}
2062 
2063 #if CONFIG_FIRMLINKS
2064 			vnode_t v_fmlink = vp->v_fmlink;
2065 			if (v_fmlink && !(vp->v_flag & VFMLINKTARGET)) {
2066 				if (cnp->cn_flags & CN_FIRMLINK_NOFOLLOW ||
2067 				    ((vp->v_type != VDIR) && (vp->v_type != VLNK))) {
2068 					/* Leave it to the filesystem */
2069 					vp = NULLVP;
2070 					break;
2071 				}
2072 
2073 				/*
2074 				 * Always switch to the target unless it is a VLNK
2075 				 * and it is the last component and we have NOFOLLOW
2076 				 * semantics
2077 				 */
2078 				if (vp->v_type == VDIR) {
2079 					vp = v_fmlink;
2080 					vvid = vnode_vid(vp);
2081 				} else if ((cnp->cn_flags & FOLLOW) ||
2082 				    (ndp->ni_flag & NAMEI_TRAILINGSLASH) || *ndp->ni_next == '/') {
2083 					if (ndp->ni_loopcnt >= MAXSYMLINKS - 1) {
2084 						vp = NULLVP;
2085 						break;
2086 					}
2087 					ndp->ni_loopcnt++;
2088 					vp = v_fmlink;
2089 					vvid = vnode_vid(vp);
2090 				}
2091 			}
2092 #endif
2093 		}
2094 		if ((cnp->cn_flags & ISLASTCN)) {
2095 			break;
2096 		}
2097 
2098 		if (vp->v_type != VDIR) {
2099 			if (vp->v_type != VLNK) {
2100 				vp = NULL;
2101 			}
2102 			break;
2103 		}
2104 
2105 		/*
2106 		 * v_mountedhere is PAC protected which means vp has to be a VDIR
2107 		 * to access that pointer as v_mountedhere. However, if we don't
2108 		 * have the name cache lock or an iocount (which we won't in the
2109 		 * !locked case) we can't guarantee that. So we try to detect it
2110 		 * via other fields to avoid having to dereference v_mountedhere
2111 		 * when we don't need to. Note that in theory if entire reclaim
2112 		 * happens between the time we check can_check_v_mountedhere()
2113 		 * and the subsequent access this will still fail but the fields
2114 		 * we check make that exceedingly unlikely and will result in
2115 		 * the chances of that happening being practically zero (but not
2116 		 * zero).
2117 		 */
2118 		if ((locked || can_check_v_mountedhere(vp)) &&
2119 		    (mp = vp->v_mountedhere) && ((cnp->cn_flags & NOCROSSMOUNT) == 0)) {
2120 			vnode_t tmp_vp;
2121 			int tmp_vid;
2122 
2123 			if (!(locked || vid_is_same(vp, vvid))) {
2124 				vp = NULL;
2125 				break;
2126 			}
2127 			tmp_vp = mp->mnt_realrootvp;
2128 			tmp_vid = mp->mnt_realrootvp_vid;
2129 			if (tmp_vp == NULLVP || mp->mnt_generation != mount_generation ||
2130 			    tmp_vid != tmp_vp->v_id) {
2131 				break;
2132 			}
2133 
2134 			if ((mp = tmp_vp->v_mount) == NULL) {
2135 				break;
2136 			}
2137 
2138 			vp = tmp_vp;
2139 			vvid = tmp_vid;
2140 			dmp = mp;
2141 			if (dmp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) {
2142 				ttl_enabled = TRUE;
2143 				microuptime(&tv);
2144 			} else {
2145 				ttl_enabled = FALSE;
2146 			}
2147 		}
2148 
2149 #if CONFIG_TRIGGERS
2150 		/*
2151 		 * After traversing all mountpoints stacked here, if we have a
2152 		 * trigger in hand, resolve it.  Note that we don't need to
2153 		 * leave the fast path if the mount has already happened.
2154 		 */
2155 		if (vp->v_resolve) {
2156 			break;
2157 		}
2158 #endif /* CONFIG_TRIGGERS */
2159 
2160 		if (!(locked || vid_is_same(vp, vvid))) {
2161 			vp = NULL;
2162 			break;
2163 		}
2164 
2165 		dp = vp;
2166 		vid = vvid;
2167 		vp = NULLVP;
2168 		vvid = 0;
2169 
2170 		cnp->cn_nameptr = ndp->ni_next + 1;
2171 		ndp->ni_pathlen--;
2172 		while (*cnp->cn_nameptr == '/') {
2173 			cnp->cn_nameptr++;
2174 			ndp->ni_pathlen--;
2175 		}
2176 	}
2177 	if (!locked) {
2178 		if (vp && !vnode_hold_smr(vp)) {
2179 			vp = NULLVP;
2180 			vvid = 0;
2181 		}
2182 		if (!vnode_hold_smr(dp)) {
2183 			vfs_smr_leave();
2184 			if (vp) {
2185 				vnode_drop(vp);
2186 				vp = NULLVP;
2187 				vvid = 0;
2188 			}
2189 			goto prep_lock_retry;
2190 		}
2191 		vfs_smr_leave();
2192 	} else {
2193 		if (vp != NULLVP) {
2194 			vvid = vp->v_id;
2195 			vnode_hold(vp);
2196 		}
2197 		vid = dp->v_id;
2198 
2199 		vnode_hold(dp);
2200 		NAME_CACHE_UNLOCK();
2201 	}
2202 
2203 	tdp = NULLVP;
2204 	if (!(cnp->cn_flags & DONOTAUTH) &&
2205 	    (vp != NULLVP) && (vp->v_type != VLNK) &&
2206 	    ((cnp->cn_flags & (ISLASTCN | LOCKPARENT | WANTPARENT | SAVESTART)) == ISLASTCN)) {
2207 		/*
2208 		 * if we've got a child and it's the last component, and
2209 		 * the lookup doesn't need to return the parent then we
2210 		 * can skip grabbing an iocount on the parent, since all
2211 		 * we're going to do with it is a vnode_put just before
2212 		 * we return from 'lookup'.  If it's a symbolic link,
2213 		 * we need the parent in case the link happens to be
2214 		 * a relative pathname.
2215 		 *
2216 		 * However, we can't make this optimisation if we have to call
2217 		 * a MAC hook.
2218 		 */
2219 		tdp = dp;
2220 		dp = NULLVP;
2221 	} else {
2222 need_dp:
2223 		/*
2224 		 * return the last directory we looked at
2225 		 * with an io reference held. If it was the one passed
2226 		 * in as a result of the last iteration of VNOP_LOOKUP,
2227 		 * it should already hold an io ref. No need to increase ref.
2228 		 */
2229 		if (last_dp != dp) {
2230 			if (dp == ndp->ni_usedvp) {
2231 				/*
2232 				 * if this vnode matches the one passed in via USEDVP
2233 				 * than this context already holds an io_count... just
2234 				 * use vnode_get to get an extra ref for lookup to play
2235 				 * with... can't use the getwithvid variant here because
2236 				 * it will block behind a vnode_drain which would result
2237 				 * in a deadlock (since we already own an io_count that the
2238 				 * vnode_drain is waiting on)... vnode_get grabs the io_count
2239 				 * immediately w/o waiting... it always succeeds
2240 				 */
2241 				vnode_get(dp);
2242 			} else if ((error = vnode_getwithvid_drainok(dp, vid))) {
2243 				/*
2244 				 * failure indicates the vnode
2245 				 * changed identity or is being
2246 				 * TERMINATED... in either case
2247 				 * punt this lookup.
2248 				 *
2249 				 * don't necessarily return ENOENT, though, because
2250 				 * we really want to go back to disk and make sure it's
2251 				 * there or not if someone else is changing this
2252 				 * vnode. That being said, the one case where we do want
2253 				 * to return ENOENT is when the vnode's mount point is
2254 				 * in the process of unmounting and we might cause a deadlock
2255 				 * in our attempt to take an iocount. An ENODEV error return
2256 				 * is from vnode_get* is an indication this but we change that
2257 				 * ENOENT for upper layers.
2258 				 */
2259 				if (error == ENODEV) {
2260 					error = ENOENT;
2261 				} else {
2262 					error = ERECYCLE;
2263 				}
2264 				vnode_drop(dp);
2265 				if (vp) {
2266 					vnode_drop(vp);
2267 				}
2268 				goto errorout;
2269 			}
2270 			dp_iocount_taken = true;
2271 		}
2272 		vnode_drop(dp);
2273 	}
2274 
2275 #if CONFIG_MACF
2276 	/*
2277 	 * Name cache provides authorization caching (see below)
2278 	 * that will short circuit MAC checks in lookup().
2279 	 * We must perform MAC check here.  On denial
2280 	 * dp_authorized will remain 0 and second check will
2281 	 * be perfomed in lookup().
2282 	 */
2283 	if (!(cnp->cn_flags & DONOTAUTH)) {
2284 		error = mac_vnode_check_lookup(ctx, dp, cnp);
2285 		if (error) {
2286 			*dp_authorized = 0;
2287 			if (dp_iocount_taken) {
2288 				vnode_put(dp);
2289 			}
2290 			if (vp) {
2291 				vnode_drop(vp);
2292 				vp = NULLVP;
2293 			}
2294 			goto errorout;
2295 		}
2296 	}
2297 #endif /* MAC */
2298 
2299 	if (vp != NULLVP) {
2300 		if ((vnode_getwithvid_drainok(vp, vvid))) {
2301 			vnode_drop(vp);
2302 			vp = NULLVP;
2303 
2304 			/*
2305 			 * can't get reference on the vp we'd like
2306 			 * to return... if we didn't grab a reference
2307 			 * on the directory (due to fast path bypass),
2308 			 * then we need to do it now... we can't return
2309 			 * with both ni_dvp and ni_vp NULL, and no
2310 			 * error condition
2311 			 */
2312 			if (dp == NULLVP) {
2313 				dp = tdp;
2314 				tdp = NULLVP;
2315 				goto need_dp;
2316 			}
2317 		} else {
2318 			vnode_drop(vp);
2319 		}
2320 		if (dp_iocount_taken && vp && (vp->v_type != VLNK) &&
2321 		    ((cnp->cn_flags & (ISLASTCN | LOCKPARENT | WANTPARENT | SAVESTART)) == ISLASTCN)) {
2322 			vnode_put(dp);
2323 			dp = NULLVP;
2324 		}
2325 	}
2326 
2327 	if (tdp) {
2328 		vnode_drop(tdp);
2329 		tdp = NULLVP;
2330 	}
2331 
2332 	ndp->ni_dvp = dp;
2333 	ndp->ni_vp  = vp;
2334 
2335 #if CONFIG_TRIGGERS
2336 	trigger_vp = vp ? vp : dp;
2337 	if ((error == 0) && (trigger_vp != NULLVP) && vnode_isdir(trigger_vp)) {
2338 		error = vnode_trigger_resolve(trigger_vp, ndp, ctx);
2339 		if (error) {
2340 			if (vp) {
2341 				vnode_put(vp);
2342 			}
2343 			if (dp) {
2344 				vnode_put(dp);
2345 			}
2346 			goto errorout;
2347 		}
2348 	}
2349 #endif /* CONFIG_TRIGGERS */
2350 
2351 errorout:
2352 	/*
2353 	 * If we came into cache_lookup_path after an iteration of the lookup loop that
2354 	 * resulted in a call to VNOP_LOOKUP, then VNOP_LOOKUP returned a vnode with a io ref
2355 	 * on it.  It is now the job of cache_lookup_path to drop the ref on this vnode
2356 	 * when it is no longer needed.  If we get to this point, and last_dp is not NULL
2357 	 * and it is ALSO not the dvp we want to return to caller of this function, it MUST be
2358 	 * the case that we got to a subsequent path component and this previous vnode is
2359 	 * no longer needed.  We can then drop the io ref on it.
2360 	 */
2361 	if ((last_dp != NULLVP) && (last_dp != ndp->ni_dvp)) {
2362 		vnode_put(last_dp);
2363 	}
2364 
2365 	//initialized to 0, should be the same if no error cases occurred.
2366 	return error;
2367 
2368 prep_lock_retry:
2369 	restore_ndp_state(ndp, cnp, &saved_state);
2370 	dp = start_dp;
2371 	goto retry;
2372 }
2373 
2374 
2375 static vnode_t
cache_lookup_locked(vnode_t dvp,struct componentname * cnp,uint32_t * vidp)2376 cache_lookup_locked(vnode_t dvp, struct componentname *cnp, uint32_t *vidp)
2377 {
2378 	struct namecache *ncp;
2379 	long namelen = cnp->cn_namelen;
2380 	unsigned int hashval = cnp->cn_hash;
2381 
2382 	if (nc_disabled) {
2383 		return NULL;
2384 	}
2385 
2386 	smrq_serialized_foreach(ncp, NCHHASH(dvp, cnp->cn_hash), nc_hash) {
2387 		if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) {
2388 			if (strncmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) {
2389 				break;
2390 			}
2391 		}
2392 	}
2393 	if (ncp == 0) {
2394 		/*
2395 		 * We failed to find an entry
2396 		 */
2397 		NCHSTAT(ncs_miss);
2398 		NC_SMR_STATS(clp_next_fail);
2399 		return NULL;
2400 	}
2401 	NCHSTAT(ncs_goodhits);
2402 
2403 	if (!ncp->nc_vp) {
2404 		return NULL;
2405 	}
2406 
2407 	*vidp = ncp->nc_vid;
2408 	NC_SMR_STATS(clp_next);
2409 
2410 	return ncp->nc_vp;
2411 }
2412 
2413 static vnode_t
cache_lookup_smr(vnode_t dvp,struct componentname * cnp,uint32_t * vidp)2414 cache_lookup_smr(vnode_t dvp, struct componentname *cnp, uint32_t *vidp)
2415 {
2416 	struct namecache *ncp;
2417 	long namelen = cnp->cn_namelen;
2418 	unsigned int hashval = cnp->cn_hash;
2419 	vnode_t vp = NULLVP;
2420 	uint32_t vid = 0;
2421 	uint32_t counter = 1;
2422 
2423 	if (nc_disabled) {
2424 		return NULL;
2425 	}
2426 
2427 	smrq_entered_foreach(ncp, NCHHASH(dvp, cnp->cn_hash), nc_hash) {
2428 		counter = os_atomic_load(&ncp->nc_counter, acquire);
2429 		if (!(counter & NC_VALID)) {
2430 			ncp = NULL;
2431 			goto out;
2432 		}
2433 		if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) {
2434 			const char *nc_name =
2435 			    os_atomic_load(&ncp->nc_name, relaxed);
2436 			if (nc_name &&
2437 			    strncmp(nc_name, cnp->cn_nameptr, namelen) == 0 &&
2438 			    nc_name[namelen] == 0) {
2439 				break;
2440 			} else if (!nc_name) {
2441 				ncp = NULL;
2442 				goto out;
2443 			}
2444 		}
2445 	}
2446 
2447 	/* We failed to find an entry */
2448 	if (ncp == 0) {
2449 		goto out;
2450 	}
2451 
2452 	vp = ncp->nc_vp;
2453 	vid = ncp->nc_vid;
2454 
2455 	/*
2456 	 * The validity of vp and vid depends on the value of the counter being
2457 	 * the same when we read it first in the loop and now. Anything else
2458 	 * and we can't use this vp & vid.
2459 	 * Hopefully this ncp wasn't reused 2 billion times between the time
2460 	 * we read it first and when we the counter value again.
2461 	 */
2462 	if (os_atomic_load(&ncp->nc_counter, acquire) != counter) {
2463 		vp = NULLVP;
2464 		goto out;
2465 	}
2466 
2467 	*vidp = vid;
2468 	NC_SMR_STATS(clp_smr_next);
2469 
2470 	return vp;
2471 
2472 out:
2473 	NC_SMR_STATS(clp_smr_next_fail);
2474 	return NULL;
2475 }
2476 
2477 
2478 unsigned int hash_string(const char *cp, int len);
2479 //
2480 // Have to take a len argument because we may only need to
2481 // hash part of a componentname.
2482 //
2483 unsigned int
hash_string(const char * cp,int len)2484 hash_string(const char *cp, int len)
2485 {
2486 	unsigned hash = 0;
2487 
2488 	if (len) {
2489 		while (len--) {
2490 			hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8;
2491 		}
2492 	} else {
2493 		while (*cp != '\0') {
2494 			hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8;
2495 		}
2496 	}
2497 	/*
2498 	 * the crc generator can legitimately generate
2499 	 * a 0... however, 0 for us means that we
2500 	 * haven't computed a hash, so use 1 instead
2501 	 */
2502 	if (hash == 0) {
2503 		hash = 1;
2504 	}
2505 	return hash;
2506 }
2507 
2508 
2509 /*
2510  * Lookup an entry in the cache
2511  *
2512  * We don't do this if the segment name is long, simply so the cache
2513  * can avoid holding long names (which would either waste space, or
2514  * add greatly to the complexity).
2515  *
2516  * Lookup is called with dvp pointing to the directory to search,
2517  * cnp pointing to the name of the entry being sought. If the lookup
2518  * succeeds, the vnode is returned in *vpp, and a status of -1 is
2519  * returned. If the lookup determines that the name does not exist
2520  * (negative cacheing), a status of ENOENT is returned. If the lookup
2521  * fails, a status of zero is returned.
2522  */
2523 
2524 static int
cache_lookup_fallback(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,int flags)2525 cache_lookup_fallback(struct vnode *dvp, struct vnode **vpp,
2526     struct componentname *cnp, int flags)
2527 {
2528 	struct namecache *ncp;
2529 	long namelen = cnp->cn_namelen;
2530 	unsigned int hashval = cnp->cn_hash;
2531 	boolean_t       have_exclusive = FALSE;
2532 	uint32_t vid;
2533 	vnode_t  vp;
2534 
2535 	NAME_CACHE_LOCK_SHARED();
2536 
2537 relook:
2538 	smrq_serialized_foreach(ncp, NCHHASH(dvp, cnp->cn_hash), nc_hash) {
2539 		if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) {
2540 			if (strncmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) {
2541 				break;
2542 			}
2543 		}
2544 	}
2545 	/* We failed to find an entry */
2546 	if (ncp == 0) {
2547 		NCHSTAT(ncs_miss);
2548 		NAME_CACHE_UNLOCK();
2549 		return 0;
2550 	}
2551 
2552 	/* We don't want to have an entry, so dump it */
2553 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
2554 		if (have_exclusive == TRUE) {
2555 			NCHSTAT(ncs_badhits);
2556 			cache_delete(ncp, 1);
2557 			NAME_CACHE_UNLOCK();
2558 			return 0;
2559 		}
2560 		if (!NAME_CACHE_LOCK_SHARED_TO_EXCLUSIVE()) {
2561 			NAME_CACHE_LOCK();
2562 		}
2563 		have_exclusive = TRUE;
2564 		goto relook;
2565 	}
2566 	vp = ncp->nc_vp;
2567 
2568 	/* We found a "positive" match, return the vnode */
2569 	if (vp) {
2570 		NCHSTAT(ncs_goodhits);
2571 
2572 		vid = ncp->nc_vid;
2573 		vnode_hold(vp);
2574 		NAME_CACHE_UNLOCK();
2575 
2576 		if (vnode_getwithvid(vp, vid)) {
2577 			vnode_drop(vp);
2578 #if COLLECT_STATS
2579 			NAME_CACHE_LOCK();
2580 			NCHSTAT(ncs_badvid);
2581 			NAME_CACHE_UNLOCK();
2582 #endif
2583 			return 0;
2584 		}
2585 		vnode_drop(vp);
2586 		*vpp = vp;
2587 		NC_SMR_STATS(cl_lock_hits);
2588 		return -1;
2589 	}
2590 
2591 	/* We found a negative match, and want to create it, so purge */
2592 	if (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) {
2593 		if (have_exclusive == TRUE) {
2594 			NCHSTAT(ncs_badhits);
2595 			cache_delete(ncp, 1);
2596 			NAME_CACHE_UNLOCK();
2597 			/*
2598 			 * Even though we're purging the entry, it
2599 			 * may be useful to the caller to know that
2600 			 * we got a neg hit (to, for example, avoid
2601 			 * an expensive IPC/RPC).
2602 			 */
2603 			return (flags & CACHE_LOOKUP_ALLHITS) ? ENOENT : 0;
2604 		}
2605 		if (!NAME_CACHE_LOCK_SHARED_TO_EXCLUSIVE()) {
2606 			NAME_CACHE_LOCK();
2607 		}
2608 		have_exclusive = TRUE;
2609 		goto relook;
2610 	}
2611 
2612 	/*
2613 	 * We found a "negative" match, ENOENT notifies client of this match.
2614 	 */
2615 	NCHSTAT(ncs_neghits);
2616 
2617 	NAME_CACHE_UNLOCK();
2618 	return ENOENT;
2619 }
2620 
2621 
2622 
2623 /*
2624  * Lookup an entry in the cache
2625  *
2626  * Lookup is called with dvp pointing to the directory to search,
2627  * cnp pointing to the name of the entry being sought. If the lookup
2628  * succeeds, the vnode is returned in *vpp, and a status of -1 is
2629  * returned. If the lookup determines that the name does not exist
2630  * (negative cacheing), a status of ENOENT is returned. If the lookup
2631  * fails, a status of zero is returned.
2632  */
2633 int
cache_lookup_ext(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,int flags)2634 cache_lookup_ext(struct vnode *dvp, struct vnode **vpp,
2635     struct componentname *cnp, int flags)
2636 {
2637 	struct namecache *ncp;
2638 	long namelen = cnp->cn_namelen;
2639 	vnode_t  vp;
2640 	uint32_t vid = 0;
2641 	uint32_t counter = 1;
2642 	unsigned int hashval;
2643 
2644 	*vpp = NULLVP;
2645 
2646 	if (cnp->cn_hash == 0) {
2647 		cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2648 	}
2649 	hashval = cnp->cn_hash;
2650 
2651 	if (nc_disabled) {
2652 		return 0;
2653 	}
2654 
2655 	if (!nc_smr_enabled) {
2656 		goto out_fallback;
2657 	}
2658 
2659 	/* We don't want to have an entry, so dump it */
2660 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
2661 		goto out_fallback;
2662 	}
2663 
2664 	vfs_smr_enter();
2665 
2666 	smrq_entered_foreach(ncp, NCHHASH(dvp, cnp->cn_hash), nc_hash) {
2667 		counter = os_atomic_load(&ncp->nc_counter, acquire);
2668 		if (!(counter & NC_VALID)) {
2669 			vfs_smr_leave();
2670 			goto out_fallback;
2671 		}
2672 		if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) {
2673 			const char *nc_name =
2674 			    os_atomic_load(&ncp->nc_name, relaxed);
2675 			if (nc_name &&
2676 			    strncmp(nc_name, cnp->cn_nameptr, namelen) == 0 &&
2677 			    nc_name[namelen] == 0) {
2678 				break;
2679 			} else if (!nc_name) {
2680 				vfs_smr_leave();
2681 				goto out_fallback;
2682 			}
2683 		}
2684 	}
2685 
2686 	/* We failed to find an entry */
2687 	if (ncp == 0) {
2688 		NCHSTAT(ncs_miss);
2689 		vfs_smr_leave();
2690 		NC_SMR_STATS(cl_smr_miss);
2691 		return 0;
2692 	}
2693 
2694 	vp = ncp->nc_vp;
2695 	vid = ncp->nc_vid;
2696 
2697 	/*
2698 	 * The validity of vp and vid depends on the value of the counter being
2699 	 * the same when we read it first in the loop and now. Anything else
2700 	 * and we can't use this vp & vid.
2701 	 * Hopefully this ncp wasn't reused 2 billion times between the time
2702 	 * we read it first and when we the counter value again.
2703 	 */
2704 	if (os_atomic_load(&ncp->nc_counter, acquire) != counter) {
2705 		vfs_smr_leave();
2706 		goto out_fallback;
2707 	}
2708 
2709 	if (vp) {
2710 		bool holdcount_acquired = vnode_hold_smr(vp);
2711 
2712 		vfs_smr_leave();
2713 
2714 		if (!holdcount_acquired) {
2715 			goto out_fallback;
2716 		}
2717 
2718 		if (vnode_getwithvid(vp, vid) != 0) {
2719 			vnode_drop(vp);
2720 			goto out_fallback;
2721 		}
2722 		vnode_drop(vp);
2723 		NCHSTAT(ncs_goodhits);
2724 
2725 		*vpp = vp;
2726 		NC_SMR_STATS(cl_smr_hits);
2727 		return -1;
2728 	}
2729 
2730 	vfs_smr_leave();
2731 
2732 	/* We found a negative match, and want to create it, so purge */
2733 	if (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) {
2734 		goto out_fallback;
2735 	}
2736 
2737 	/*
2738 	 * We found a "negative" match, ENOENT notifies client of this match.
2739 	 */
2740 	NCHSTAT(ncs_neghits);
2741 	NC_SMR_STATS(cl_smr_negative_hits);
2742 	return ENOENT;
2743 
2744 out_fallback:
2745 	NC_SMR_STATS(cl_smr_fallback);
2746 	return cache_lookup_fallback(dvp, vpp, cnp, flags);
2747 }
2748 
2749 int
cache_lookup(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp)2750 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
2751 {
2752 	return cache_lookup_ext(dvp, vpp, cnp, 0);
2753 }
2754 
2755 const char *
cache_enter_create(vnode_t dvp,vnode_t vp,struct componentname * cnp)2756 cache_enter_create(vnode_t dvp, vnode_t vp, struct componentname *cnp)
2757 {
2758 	const char *strname;
2759 
2760 	if (cnp->cn_hash == 0) {
2761 		cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2762 	}
2763 
2764 	/*
2765 	 * grab 2 references on the string entered
2766 	 * one for the cache_enter_locked to consume
2767 	 * and the second to be consumed by v_name (vnode_create call point)
2768 	 */
2769 	strname = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, TRUE, 0);
2770 
2771 	NAME_CACHE_LOCK();
2772 
2773 	cache_enter_locked(dvp, vp, cnp, strname);
2774 
2775 	NAME_CACHE_UNLOCK();
2776 
2777 	return strname;
2778 }
2779 
2780 
2781 /*
2782  * Add an entry to the cache...
2783  * but first check to see if the directory
2784  * that this entry is to be associated with has
2785  * had any cache_purges applied since we took
2786  * our identity snapshot... this check needs to
2787  * be done behind the name cache lock
2788  */
2789 void
cache_enter_with_gen(struct vnode * dvp,struct vnode * vp,struct componentname * cnp,int gen)2790 cache_enter_with_gen(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int gen)
2791 {
2792 	if (cnp->cn_hash == 0) {
2793 		cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2794 	}
2795 
2796 	NAME_CACHE_LOCK();
2797 
2798 	if (dvp->v_nc_generation == gen) {
2799 		(void)cache_enter_locked(dvp, vp, cnp, NULL);
2800 	}
2801 
2802 	NAME_CACHE_UNLOCK();
2803 }
2804 
2805 
2806 /*
2807  * Add an entry to the cache.
2808  */
2809 void
cache_enter(struct vnode * dvp,struct vnode * vp,struct componentname * cnp)2810 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
2811 {
2812 	const char *strname;
2813 
2814 	if (cnp->cn_hash == 0) {
2815 		cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen);
2816 	}
2817 
2818 	/*
2819 	 * grab 1 reference on the string entered
2820 	 * for the cache_enter_locked to consume
2821 	 */
2822 	strname = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0);
2823 
2824 	NAME_CACHE_LOCK();
2825 
2826 	cache_enter_locked(dvp, vp, cnp, strname);
2827 
2828 	NAME_CACHE_UNLOCK();
2829 }
2830 
2831 
2832 static void
cache_enter_locked(struct vnode * dvp,struct vnode * vp,struct componentname * cnp,const char * strname)2833 cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, const char *strname)
2834 {
2835 	struct namecache *ncp, *negp;
2836 	struct smrq_list_head  *ncpp;
2837 
2838 	if (nc_disabled) {
2839 		return;
2840 	}
2841 
2842 	/*
2843 	 * if the entry is for -ve caching vp is null
2844 	 */
2845 	if ((vp != NULLVP) && (LIST_FIRST(&vp->v_nclinks))) {
2846 		/*
2847 		 * someone beat us to the punch..
2848 		 * this vnode is already in the cache
2849 		 */
2850 		if (strname != NULL) {
2851 			vfs_removename(strname);
2852 		}
2853 		return;
2854 	}
2855 	/*
2856 	 * We allocate a new entry if we are less than the maximum
2857 	 * allowed and the one at the front of the list is in use.
2858 	 * Otherwise we use the one at the front of the list.
2859 	 */
2860 	if (numcache < desiredNodes &&
2861 	    ((ncp = nchead.tqh_first) == NULL ||
2862 	    (ncp->nc_counter & NC_VALID))) {
2863 		/*
2864 		 * Allocate one more entry
2865 		 */
2866 		if (nc_smr_enabled) {
2867 			ncp = zalloc_smr(namecache_zone, Z_WAITOK_ZERO_NOFAIL);
2868 		} else {
2869 			ncp = zalloc(namecache_zone);
2870 		}
2871 		ncp->nc_counter = 0;
2872 		numcache++;
2873 	} else {
2874 		/*
2875 		 * reuse an old entry
2876 		 */
2877 		ncp = TAILQ_FIRST(&nchead);
2878 		TAILQ_REMOVE(&nchead, ncp, nc_entry);
2879 
2880 		if (ncp->nc_counter & NC_VALID) {
2881 			/*
2882 			 * still in use... we need to
2883 			 * delete it before re-using it
2884 			 */
2885 			NCHSTAT(ncs_stolen);
2886 			cache_delete(ncp, 0);
2887 		}
2888 	}
2889 	NCHSTAT(ncs_enters);
2890 
2891 	/*
2892 	 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
2893 	 */
2894 	if (vp) {
2895 		ncp->nc_vid = vnode_vid(vp);
2896 		vnode_hold(vp);
2897 	}
2898 	ncp->nc_vp = vp;
2899 	ncp->nc_dvp = dvp;
2900 	ncp->nc_hashval = cnp->cn_hash;
2901 
2902 	if (strname == NULL) {
2903 		ncp->nc_name = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0);
2904 	} else {
2905 		ncp->nc_name = strname;
2906 	}
2907 
2908 	//
2909 	// If the bytes of the name associated with the vnode differ,
2910 	// use the name associated with the vnode since the file system
2911 	// may have set that explicitly in the case of a lookup on a
2912 	// case-insensitive file system where the case of the looked up
2913 	// name differs from what is on disk.  For more details, see:
2914 	//   <rdar://problem/8044697> FSEvents doesn't always decompose diacritical unicode chars in the paths of the changed directories
2915 	//
2916 	const char *vn_name = vp ? vp->v_name : NULL;
2917 	unsigned int len = vn_name ? (unsigned int)strlen(vn_name) : 0;
2918 	if (vn_name && ncp && ncp->nc_name && strncmp(ncp->nc_name, vn_name, len) != 0) {
2919 		unsigned int hash = hash_string(vn_name, len);
2920 
2921 		vfs_removename(ncp->nc_name);
2922 		ncp->nc_name = add_name_internal(vn_name, len, hash, FALSE, 0);
2923 		ncp->nc_hashval = hash;
2924 	}
2925 
2926 	/*
2927 	 * make us the newest entry in the cache
2928 	 * i.e. we'll be the last to be stolen
2929 	 */
2930 	TAILQ_INSERT_TAIL(&nchead, ncp, nc_entry);
2931 
2932 	ncpp = NCHHASH(dvp, cnp->cn_hash);
2933 #if DIAGNOSTIC
2934 	{
2935 		struct namecache *p;
2936 
2937 		smrq_serialized_foreach(p, ncpp, nc_hash) {
2938 			if (p == ncp) {
2939 				panic("cache_enter: duplicate");
2940 			}
2941 		}
2942 	}
2943 #endif
2944 	/*
2945 	 * make us available to be found via lookup
2946 	 */
2947 	smrq_serialized_insert_head(ncpp, &ncp->nc_hash);
2948 
2949 	if (vp) {
2950 		/*
2951 		 * add to the list of name cache entries
2952 		 * that point at vp
2953 		 */
2954 		LIST_INSERT_HEAD(&vp->v_nclinks, ncp, nc_un.nc_link);
2955 	} else {
2956 		/*
2957 		 * this is a negative cache entry (vp == NULL)
2958 		 * stick it on the negative cache list.
2959 		 */
2960 		TAILQ_INSERT_TAIL(&neghead, ncp, nc_un.nc_negentry);
2961 
2962 		ncs_negtotal++;
2963 
2964 		if (ncs_negtotal > desiredNegNodes) {
2965 			/*
2966 			 * if we've reached our desired limit
2967 			 * of negative cache entries, delete
2968 			 * the oldest
2969 			 */
2970 			negp = TAILQ_FIRST(&neghead);
2971 			cache_delete(negp, 1);
2972 		}
2973 	}
2974 
2975 	/*
2976 	 * add us to the list of name cache entries that
2977 	 * are children of dvp
2978 	 */
2979 	if (vp) {
2980 		TAILQ_INSERT_TAIL(&dvp->v_ncchildren, ncp, nc_child);
2981 	} else {
2982 		TAILQ_INSERT_HEAD(&dvp->v_ncchildren, ncp, nc_child);
2983 	}
2984 
2985 	/*
2986 	 * nc_counter represents a sequence counter and 1 bit valid flag.
2987 	 * When the counter value is odd, it represents a valid and in use
2988 	 * namecache structure. We increment the value on every state transition
2989 	 * (invalid to valid (here) and valid to invalid (in cache delete).
2990 	 * Lockless readers have to read the value before reading other fields
2991 	 * and ensure that the field is valid and remains the same after the fields
2992 	 * have been read.
2993 	 */
2994 	uint32_t old_count = os_atomic_inc_orig(&ncp->nc_counter, release);
2995 	if (old_count & NC_VALID) {
2996 		/* This is a invalid to valid transition */
2997 		panic("Incorrect state for old nc_counter(%d), should be even", old_count);
2998 	}
2999 }
3000 
3001 
3002 /*
3003  * Initialize CRC-32 remainder table.
3004  */
3005 static void
init_crc32(void)3006 init_crc32(void)
3007 {
3008 	/*
3009 	 * the CRC-32 generator polynomial is:
3010 	 *   x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^10
3011 	 *        + x^8  + x^7  + x^5  + x^4  + x^2  + x + 1
3012 	 */
3013 	unsigned int crc32_polynomial = 0x04c11db7;
3014 	unsigned int i, j;
3015 
3016 	/*
3017 	 * pre-calculate the CRC-32 remainder for each possible octet encoding
3018 	 */
3019 	for (i = 0; i < 256; i++) {
3020 		unsigned int crc_rem = i << 24;
3021 
3022 		for (j = 0; j < 8; j++) {
3023 			if (crc_rem & 0x80000000) {
3024 				crc_rem = (crc_rem << 1) ^ crc32_polynomial;
3025 			} else {
3026 				crc_rem = (crc_rem << 1);
3027 			}
3028 		}
3029 		crc32tab[i] = crc_rem;
3030 	}
3031 }
3032 
3033 
3034 /*
3035  * Name cache initialization, from vfs_init() when we are booting
3036  */
3037 void
nchinit(void)3038 nchinit(void)
3039 {
3040 	desiredNegNodes = (desiredvnodes / 10);
3041 	desiredNodes = desiredvnodes + desiredNegNodes;
3042 
3043 	if (nc_smr_enabled) {
3044 		zone_enable_smr(namecache_zone, VFS_SMR(), &namecache_smr_free);
3045 		zone_enable_smr(stringcache_zone, VFS_SMR(), &string_smr_free);
3046 	}
3047 	TAILQ_INIT(&nchead);
3048 	TAILQ_INIT(&neghead);
3049 
3050 	init_crc32();
3051 
3052 	nchashtbl = hashinit(MAX(CONFIG_NC_HASH, (2 * desiredNodes)), M_CACHE, &nchash);
3053 	nchashmask = nchash;
3054 	nchash++;
3055 
3056 	init_string_table();
3057 
3058 	for (int i = 0; i < NUM_STRCACHE_LOCKS; i++) {
3059 		lck_mtx_init(&strcache_mtx_locks[i], &strcache_lck_grp, &strcache_lck_attr);
3060 	}
3061 }
3062 
3063 void
name_cache_lock_shared(void)3064 name_cache_lock_shared(void)
3065 {
3066 	lck_rw_lock_shared(&namecache_rw_lock);
3067 	NC_SMR_STATS(nc_lock_shared);
3068 }
3069 
3070 void
name_cache_lock(void)3071 name_cache_lock(void)
3072 {
3073 	lck_rw_lock_exclusive(&namecache_rw_lock);
3074 	NC_SMR_STATS(nc_lock);
3075 }
3076 
3077 boolean_t
name_cache_lock_shared_to_exclusive(void)3078 name_cache_lock_shared_to_exclusive(void)
3079 {
3080 	return lck_rw_lock_shared_to_exclusive(&namecache_rw_lock);
3081 }
3082 
3083 void
name_cache_unlock(void)3084 name_cache_unlock(void)
3085 {
3086 	lck_rw_done(&namecache_rw_lock);
3087 }
3088 
3089 
3090 int
resize_namecache(int newsize)3091 resize_namecache(int newsize)
3092 {
3093 	struct smrq_list_head   *new_table;
3094 	struct smrq_list_head   *old_table;
3095 	struct smrq_list_head   *old_head;
3096 	struct namecache    *entry;
3097 	uint32_t            i, hashval;
3098 	int                 dNodes, dNegNodes, nelements;
3099 	u_long              new_size, old_size;
3100 
3101 	if (newsize < 0) {
3102 		return EINVAL;
3103 	}
3104 
3105 	dNegNodes = (newsize / 10);
3106 	dNodes = newsize + dNegNodes;
3107 	// we don't support shrinking yet
3108 	if (dNodes <= desiredNodes) {
3109 		return 0;
3110 	}
3111 
3112 	if (os_mul_overflow(dNodes, 2, &nelements)) {
3113 		return EINVAL;
3114 	}
3115 
3116 	new_table = hashinit(nelements, M_CACHE, &nchashmask);
3117 	new_size  = nchashmask + 1;
3118 
3119 	if (new_table == NULL) {
3120 		return ENOMEM;
3121 	}
3122 
3123 	NAME_CACHE_LOCK();
3124 
3125 	/* No need to switch if the hash table size hasn't changed. */
3126 	if (new_size == nchash) {
3127 		NAME_CACHE_UNLOCK();
3128 		hashdestroy(new_table, M_CACHE, new_size - 1);
3129 		return 0;
3130 	}
3131 
3132 	// do the switch!
3133 	old_table = nchashtbl;
3134 	nchashtbl = new_table;
3135 	old_size  = nchash;
3136 	nchash    = new_size;
3137 
3138 	// walk the old table and insert all the entries into
3139 	// the new table
3140 	//
3141 	for (i = 0; i < old_size; i++) {
3142 		old_head = &old_table[i];
3143 		smrq_serialized_foreach_safe(entry, old_head, nc_hash) {
3144 			//
3145 			// XXXdbg - Beware: this assumes that hash_string() does
3146 			//                  the same thing as what happens in
3147 			//                  lookup() over in vfs_lookup.c
3148 			hashval = hash_string(entry->nc_name, 0);
3149 			entry->nc_hashval = hashval;
3150 
3151 			smrq_serialized_insert_head(NCHHASH(entry->nc_dvp, hashval), &entry->nc_hash);
3152 		}
3153 	}
3154 	desiredNodes = dNodes;
3155 	desiredNegNodes = dNegNodes;
3156 
3157 	NAME_CACHE_UNLOCK();
3158 	hashdestroy(old_table, M_CACHE, old_size - 1);
3159 
3160 	return 0;
3161 }
3162 
3163 static void
namecache_smr_free(void * _ncp,__unused size_t _size)3164 namecache_smr_free(void *_ncp, __unused size_t _size)
3165 {
3166 	struct namecache *ncp = _ncp;
3167 
3168 	bzero(ncp, sizeof(*ncp));
3169 }
3170 
3171 static void
cache_delete(struct namecache * ncp,int free_entry)3172 cache_delete(struct namecache *ncp, int free_entry)
3173 {
3174 	NCHSTAT(ncs_deletes);
3175 
3176 	/*
3177 	 * See comment at the end of cache_enter_locked expalining the usage of
3178 	 * nc_counter.
3179 	 */
3180 	uint32_t old_count = os_atomic_inc_orig(&ncp->nc_counter, release);
3181 	if (!(old_count & NC_VALID)) {
3182 		/* This should be a valid to invalid transition */
3183 		panic("Incorrect state for old nc_counter(%d), should be odd", old_count);
3184 	}
3185 
3186 	if (ncp->nc_vp) {
3187 		LIST_REMOVE(ncp, nc_un.nc_link);
3188 	} else {
3189 		TAILQ_REMOVE(&neghead, ncp, nc_un.nc_negentry);
3190 		ncs_negtotal--;
3191 	}
3192 	TAILQ_REMOVE(&(ncp->nc_dvp->v_ncchildren), ncp, nc_child);
3193 
3194 	smrq_serialized_remove((NCHHASH(ncp->nc_dvp, ncp->nc_hashval)), &ncp->nc_hash);
3195 
3196 	const char *nc_name = ncp->nc_name;
3197 	ncp->nc_name = NULL;
3198 	vfs_removename(nc_name);
3199 	if (ncp->nc_vp) {
3200 		vnode_t vp = ncp->nc_vp;
3201 
3202 		ncp->nc_vp = NULLVP;
3203 		vnode_drop(vp);
3204 	}
3205 
3206 	if (free_entry) {
3207 		TAILQ_REMOVE(&nchead, ncp, nc_entry);
3208 		if (nc_smr_enabled) {
3209 			zfree_smr(namecache_zone, ncp);
3210 		} else {
3211 			zfree(namecache_zone, ncp);
3212 		}
3213 		numcache--;
3214 	}
3215 }
3216 
3217 
3218 /*
3219  * purge the entry associated with the
3220  * specified vnode from the name cache
3221  */
3222 static void
cache_purge_locked(vnode_t vp,kauth_cred_t * credp)3223 cache_purge_locked(vnode_t vp, kauth_cred_t *credp)
3224 {
3225 	struct namecache *ncp;
3226 
3227 	*credp = NULL;
3228 	if ((LIST_FIRST(&vp->v_nclinks) == NULL) &&
3229 	    (TAILQ_FIRST(&vp->v_ncchildren) == NULL) &&
3230 	    (vnode_cred(vp) == NOCRED) &&
3231 	    (vp->v_parent == NULLVP)) {
3232 		return;
3233 	}
3234 
3235 	if (vp->v_parent) {
3236 		vp->v_parent->v_nc_generation++;
3237 	}
3238 
3239 	while ((ncp = LIST_FIRST(&vp->v_nclinks))) {
3240 		cache_delete(ncp, 1);
3241 	}
3242 
3243 	while ((ncp = TAILQ_FIRST(&vp->v_ncchildren))) {
3244 		cache_delete(ncp, 1);
3245 	}
3246 
3247 	/*
3248 	 * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held
3249 	 */
3250 	*credp = vnode_cred(vp);
3251 	vp->v_cred = NOCRED;
3252 	vp->v_authorized_actions = 0;
3253 }
3254 
3255 void
cache_purge(vnode_t vp)3256 cache_purge(vnode_t vp)
3257 {
3258 	kauth_cred_t tcred = NULL;
3259 
3260 	if ((LIST_FIRST(&vp->v_nclinks) == NULL) &&
3261 	    (TAILQ_FIRST(&vp->v_ncchildren) == NULL) &&
3262 	    (vnode_cred(vp) == NOCRED) &&
3263 	    (vp->v_parent == NULLVP)) {
3264 		return;
3265 	}
3266 
3267 	NAME_CACHE_LOCK();
3268 
3269 	cache_purge_locked(vp, &tcred);
3270 
3271 	NAME_CACHE_UNLOCK();
3272 
3273 	if (IS_VALID_CRED(tcred)) {
3274 		kauth_cred_unref(&tcred);
3275 	}
3276 }
3277 
3278 /*
3279  * Purge all negative cache entries that are children of the
3280  * given vnode.  A case-insensitive file system (or any file
3281  * system that has multiple equivalent names for the same
3282  * directory entry) can use this when creating or renaming
3283  * to remove negative entries that may no longer apply.
3284  */
3285 void
cache_purge_negatives(vnode_t vp)3286 cache_purge_negatives(vnode_t vp)
3287 {
3288 	struct namecache *ncp, *next_ncp;
3289 
3290 	NAME_CACHE_LOCK();
3291 
3292 	TAILQ_FOREACH_SAFE(ncp, &vp->v_ncchildren, nc_child, next_ncp) {
3293 		if (ncp->nc_vp) {
3294 			break;
3295 		}
3296 
3297 		cache_delete(ncp, 1);
3298 	}
3299 
3300 	NAME_CACHE_UNLOCK();
3301 }
3302 
3303 /*
3304  * Flush all entries referencing a particular filesystem.
3305  *
3306  * Since we need to check it anyway, we will flush all the invalid
3307  * entries at the same time.
3308  */
3309 void
cache_purgevfs(struct mount * mp)3310 cache_purgevfs(struct mount *mp)
3311 {
3312 	struct smrq_list_head *ncpp;
3313 	struct namecache *ncp;
3314 
3315 	NAME_CACHE_LOCK();
3316 	/* Scan hash tables for applicable entries */
3317 	for (ncpp = &nchashtbl[nchash - 1]; ncpp >= nchashtbl; ncpp--) {
3318 restart:
3319 		smrq_serialized_foreach(ncp, ncpp, nc_hash) {
3320 			if (ncp->nc_dvp->v_mount == mp) {
3321 				cache_delete(ncp, 0);
3322 				goto restart;
3323 			}
3324 		}
3325 	}
3326 	NAME_CACHE_UNLOCK();
3327 }
3328 
3329 
3330 
3331 //
3332 // String ref routines
3333 //
3334 static LIST_HEAD(stringhead, string_t) * string_ref_table;
3335 static u_long   string_table_mask;
3336 static uint32_t filled_buckets = 0;
3337 
3338 
3339 
3340 
3341 static void
resize_string_ref_table(void)3342 resize_string_ref_table(void)
3343 {
3344 	struct stringhead *new_table;
3345 	struct stringhead *old_table;
3346 	struct stringhead *old_head, *head;
3347 	string_t          *entry, *next;
3348 	uint32_t           i, hashval;
3349 	u_long             new_mask, old_mask;
3350 
3351 	/*
3352 	 * need to hold the table lock exclusively
3353 	 * in order to grow the table... need to recheck
3354 	 * the need to resize again after we've taken
3355 	 * the lock exclusively in case some other thread
3356 	 * beat us to the punch
3357 	 */
3358 	lck_rw_lock_exclusive(&strtable_rw_lock);
3359 
3360 	if (4 * filled_buckets < ((string_table_mask + 1) * 3)) {
3361 		lck_rw_done(&strtable_rw_lock);
3362 		return;
3363 	}
3364 	assert(string_table_mask < INT32_MAX);
3365 	new_table = hashinit((int)(string_table_mask + 1) * 2, M_CACHE, &new_mask);
3366 
3367 	if (new_table == NULL) {
3368 		printf("failed to resize the hash table.\n");
3369 		lck_rw_done(&strtable_rw_lock);
3370 		return;
3371 	}
3372 
3373 	// do the switch!
3374 	old_table         = string_ref_table;
3375 	string_ref_table  = new_table;
3376 	old_mask          = string_table_mask;
3377 	string_table_mask = new_mask;
3378 	filled_buckets    = 0;
3379 
3380 	// walk the old table and insert all the entries into
3381 	// the new table
3382 	//
3383 	for (i = 0; i <= old_mask; i++) {
3384 		old_head = &old_table[i];
3385 		for (entry = old_head->lh_first; entry != NULL; entry = next) {
3386 			hashval = hash_string((const char *)entry->str, 0);
3387 			head = &string_ref_table[hashval & string_table_mask];
3388 			if (head->lh_first == NULL) {
3389 				filled_buckets++;
3390 			}
3391 			next = entry->hash_chain.le_next;
3392 			LIST_INSERT_HEAD(head, entry, hash_chain);
3393 		}
3394 	}
3395 	lck_rw_done(&strtable_rw_lock);
3396 
3397 	hashdestroy(old_table, M_CACHE, old_mask);
3398 }
3399 
3400 
3401 static void
init_string_table(void)3402 init_string_table(void)
3403 {
3404 	string_ref_table = hashinit(CONFIG_VFS_NAMES, M_CACHE, &string_table_mask);
3405 }
3406 
3407 
3408 const char *
vfs_addname(const char * name,uint32_t len,u_int hashval,u_int flags)3409 vfs_addname(const char *name, uint32_t len, u_int hashval, u_int flags)
3410 {
3411 	return add_name_internal(name, len, hashval, FALSE, flags);
3412 }
3413 
3414 
3415 static const char *
add_name_internal(const char * name,uint32_t len,u_int hashval,boolean_t need_extra_ref,__unused u_int flags)3416 add_name_internal(const char *name, uint32_t len, u_int hashval, boolean_t need_extra_ref, __unused u_int flags)
3417 {
3418 	struct stringhead *head;
3419 	string_t          *entry;
3420 	uint32_t          chain_len = 0;
3421 	uint32_t          hash_index;
3422 	uint32_t          lock_index;
3423 	char              *ptr;
3424 
3425 	if (len > MAXPATHLEN) {
3426 		len = MAXPATHLEN;
3427 	}
3428 
3429 	/*
3430 	 * if the length already accounts for the null-byte, then
3431 	 * subtract one so later on we don't index past the end
3432 	 * of the string.
3433 	 */
3434 	if (len > 0 && name[len - 1] == '\0') {
3435 		len--;
3436 	}
3437 	if (hashval == 0) {
3438 		hashval = hash_string(name, len);
3439 	}
3440 
3441 	/*
3442 	 * take this lock 'shared' to keep the hash stable
3443 	 * if someone else decides to grow the pool they
3444 	 * will take this lock exclusively
3445 	 */
3446 	lck_rw_lock_shared(&strtable_rw_lock);
3447 
3448 	/*
3449 	 * If the table gets more than 3/4 full, resize it
3450 	 */
3451 	if (4 * filled_buckets >= ((string_table_mask + 1) * 3)) {
3452 		lck_rw_done(&strtable_rw_lock);
3453 
3454 		resize_string_ref_table();
3455 
3456 		lck_rw_lock_shared(&strtable_rw_lock);
3457 	}
3458 	hash_index = hashval & string_table_mask;
3459 	lock_index = hash_index % NUM_STRCACHE_LOCKS;
3460 
3461 	head = &string_ref_table[hash_index];
3462 
3463 	lck_mtx_lock_spin(&strcache_mtx_locks[lock_index]);
3464 
3465 	for (entry = head->lh_first; entry != NULL; chain_len++, entry = entry->hash_chain.le_next) {
3466 		if (strncmp(entry->str, name, len) == 0 && entry->str[len] == 0) {
3467 			entry->refcount++;
3468 			break;
3469 		}
3470 	}
3471 	if (entry == NULL) {
3472 		const uint32_t buflen = len + 1;
3473 
3474 		lck_mtx_convert_spin(&strcache_mtx_locks[lock_index]);
3475 		/*
3476 		 * it wasn't already there so add it.
3477 		 */
3478 		if (nc_smr_enabled) {
3479 			entry = zalloc_smr(stringcache_zone, Z_WAITOK_ZERO_NOFAIL);
3480 		} else {
3481 			entry = zalloc(stringcache_zone);
3482 		}
3483 
3484 		if (head->lh_first == NULL) {
3485 			OSAddAtomic(1, &filled_buckets);
3486 		}
3487 		ptr = kalloc_data(buflen, Z_WAITOK);
3488 		strncpy(ptr, name, len);
3489 		ptr[len] = '\0';
3490 		entry->str = ptr;
3491 		entry->strbuflen = buflen;
3492 		entry->refcount = 1;
3493 		LIST_INSERT_HEAD(head, entry, hash_chain);
3494 	}
3495 	if (need_extra_ref == TRUE) {
3496 		entry->refcount++;
3497 	}
3498 
3499 	lck_mtx_unlock(&strcache_mtx_locks[lock_index]);
3500 	lck_rw_done(&strtable_rw_lock);
3501 
3502 	return (const char *)entry->str;
3503 }
3504 
3505 static void
string_smr_free(void * _entry,__unused size_t size)3506 string_smr_free(void *_entry, __unused size_t size)
3507 {
3508 	string_t *entry = _entry;
3509 
3510 	kfree_data(entry->str, entry->strbuflen);
3511 	bzero(entry, sizeof(*entry));
3512 }
3513 
3514 int
vfs_removename(const char * nameref)3515 vfs_removename(const char *nameref)
3516 {
3517 	struct stringhead *head;
3518 	string_t          *entry;
3519 	uint32_t           hashval;
3520 	uint32_t           hash_index;
3521 	uint32_t           lock_index;
3522 	int                retval = ENOENT;
3523 
3524 	hashval = hash_string(nameref, 0);
3525 
3526 	/*
3527 	 * take this lock 'shared' to keep the hash stable
3528 	 * if someone else decides to grow the pool they
3529 	 * will take this lock exclusively
3530 	 */
3531 	lck_rw_lock_shared(&strtable_rw_lock);
3532 	/*
3533 	 * must compute the head behind the table lock
3534 	 * since the size and location of the table
3535 	 * can change on the fly
3536 	 */
3537 	hash_index = hashval & string_table_mask;
3538 	lock_index = hash_index % NUM_STRCACHE_LOCKS;
3539 
3540 	head = &string_ref_table[hash_index];
3541 
3542 	lck_mtx_lock_spin(&strcache_mtx_locks[lock_index]);
3543 
3544 	for (entry = head->lh_first; entry != NULL; entry = entry->hash_chain.le_next) {
3545 		if (entry->str == nameref) {
3546 			entry->refcount--;
3547 
3548 			if (entry->refcount == 0) {
3549 				LIST_REMOVE(entry, hash_chain);
3550 
3551 				if (head->lh_first == NULL) {
3552 					OSAddAtomic(-1, &filled_buckets);
3553 				}
3554 			} else {
3555 				entry = NULL;
3556 			}
3557 			retval = 0;
3558 			break;
3559 		}
3560 	}
3561 	lck_mtx_unlock(&strcache_mtx_locks[lock_index]);
3562 	lck_rw_done(&strtable_rw_lock);
3563 
3564 	if (entry) {
3565 		assert(entry->refcount == 0);
3566 		if (nc_smr_enabled) {
3567 			zfree_smr(stringcache_zone, entry);
3568 		} else {
3569 			kfree_data(entry->str, entry->strbuflen);
3570 			entry->str = NULL;
3571 			entry->strbuflen = 0;
3572 			zfree(stringcache_zone, entry);
3573 		}
3574 	}
3575 
3576 	return retval;
3577 }
3578 
3579 
3580 #ifdef DUMP_STRING_TABLE
3581 void
dump_string_table(void)3582 dump_string_table(void)
3583 {
3584 	struct stringhead *head;
3585 	string_t          *entry;
3586 	u_long            i;
3587 
3588 	lck_rw_lock_shared(&strtable_rw_lock);
3589 
3590 	for (i = 0; i <= string_table_mask; i++) {
3591 		head = &string_ref_table[i];
3592 		for (entry = head->lh_first; entry != NULL; entry = entry->hash_chain.le_next) {
3593 			printf("%6d - %s\n", entry->refcount, entry->str);
3594 		}
3595 	}
3596 	lck_rw_done(&strtable_rw_lock);
3597 }
3598 #endif  /* DUMP_STRING_TABLE */
3599