xref: /xnu-8019.80.24/bsd/nfs/nfs_bio.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1989, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  *
33  * This code is derived from software contributed to Berkeley by
34  * Rick Macklem at The University of Guelph.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. All advertising materials mentioning features or use of this software
45  *    must display the following acknowledgement:
46  *	This product includes software developed by the University of
47  *	California, Berkeley and its contributors.
48  * 4. Neither the name of the University nor the names of its contributors
49  *    may be used to endorse or promote products derived from this software
50  *    without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62  * SUCH DAMAGE.
63  *
64  *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
65  * FreeBSD-Id: nfs_bio.c,v 1.44 1997/09/10 19:52:25 phk Exp $
66  */
67 
68 #include <nfs/nfs_conf.h>
69 #if CONFIG_NFS_CLIENT
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/resourcevar.h>
74 #include <sys/signalvar.h>
75 #include <sys/proc_internal.h>
76 #include <sys/kauth.h>
77 #include <sys/malloc.h>
78 #include <sys/vnode.h>
79 #include <sys/dirent.h>
80 #include <sys/mount_internal.h>
81 #include <sys/kernel.h>
82 #include <sys/ubc_internal.h>
83 #include <sys/uio_internal.h>
84 #include <sys/kpi_mbuf.h>
85 
86 #include <sys/vm.h>
87 #include <sys/vmparam.h>
88 
89 #include <sys/time.h>
90 #include <kern/clock.h>
91 #include <libkern/OSAtomic.h>
92 #include <kern/kalloc.h>
93 #include <kern/thread_call.h>
94 
95 #include <nfs/rpcv2.h>
96 #include <nfs/nfsproto.h>
97 #include <nfs/nfs.h>
98 #include <nfs/nfs_gss.h>
99 #include <nfs/nfsmount.h>
100 #include <nfs/nfsnode.h>
101 #include <sys/buf_internal.h>
102 #include <libkern/OSAtomic.h>
103 #include <os/refcnt.h>
104 
105 #define NFS_BIO_DBG(...) NFSCLNT_DBG(NFSCLNT_FAC_BIO, 7, ## __VA_ARGS__)
106 
107 kern_return_t   thread_terminate(thread_t); /* XXX */
108 
109 #define NFSBUFHASH(np, lbn)     \
110 	(&nfsbufhashtbl[((long)(np) / sizeof(*(np)) + (int)(lbn)) & nfsbufhash])
111 LIST_HEAD(nfsbufhashhead, nfsbuf) * nfsbufhashtbl;
112 struct nfsbuffreehead nfsbuffree, nfsbuffreemeta, nfsbufdelwri;
113 u_long nfsbufhash;
114 int nfsbufcnt, nfsbufmin, nfsbufmax, nfsbufmetacnt, nfsbufmetamax;
115 int nfsbuffreecnt, nfsbuffreemetacnt, nfsbufdelwricnt, nfsneedbuffer;
116 int nfs_nbdwrite;
117 int nfs_buf_timer_on = 0;
118 thread_t nfsbufdelwrithd = NULL;
119 
120 static ZONE_DECLARE(nfsbuf_zone, "NFS bio", sizeof(struct nfsbuf), ZC_NONE);
121 
122 static LCK_GRP_DECLARE(nfs_buf_lck_grp, "nfs buf");
123 LCK_MTX_DECLARE(nfs_buf_mutex, &nfs_buf_lck_grp);
124 
125 #define NFSBUF_FREE_PERIOD      30      /* seconds */
126 #define NFSBUF_LRU_STALE        120
127 #define NFSBUF_META_STALE       240
128 
129 /* number of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffree list */
130 #define LRU_TO_FREEUP                   6
131 /* number of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffreemeta list */
132 #define META_TO_FREEUP                  3
133 /* total number of nfsbufs nfs_buf_freeup() should attempt to free */
134 #define TOTAL_TO_FREEUP                 (LRU_TO_FREEUP+META_TO_FREEUP)
135 /* fraction of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffree list when called from timer */
136 #define LRU_FREEUP_FRAC_ON_TIMER        8
137 /* fraction of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffreemeta list when called from timer */
138 #define META_FREEUP_FRAC_ON_TIMER       16
139 /* fraction of total nfsbufs that nfsbuffreecnt should exceed before bothering to call nfs_buf_freeup() */
140 #define LRU_FREEUP_MIN_FRAC             4
141 /* fraction of total nfsbufs that nfsbuffreemetacnt should exceed before bothering to call nfs_buf_freeup() */
142 #define META_FREEUP_MIN_FRAC            2
143 
144 #define NFS_ROUND_BLOCK(p, blksize)         ((((uint64_t)(p) + blksize - 1) & ~((uint64_t)blksize - 1)) / blksize)
145 
146 #define NFS_BUF_FREEUP() \
147 	do { \
148 	/* only call nfs_buf_freeup() if it has work to do: */ \
149 	        if (((nfsbuffreecnt > nfsbufcnt/LRU_FREEUP_MIN_FRAC) || \
150 	             (nfsbuffreemetacnt > nfsbufcnt/META_FREEUP_MIN_FRAC)) && \
151 	            ((nfsbufcnt - TOTAL_TO_FREEUP) > nfsbufmin)) \
152 	                nfs_buf_freeup(0); \
153 	} while (0)
154 
155 void
nfs_buf_pgs_get_page_mask(nfsbufpgs * nfsbp,off_t page)156 nfs_buf_pgs_get_page_mask(nfsbufpgs *nfsbp, off_t page)
157 {
158 	off_t page_pos = page / NBPGS_ELEMENT_PAGES;
159 	off_t max_page = NBPGS_STRUCT_SIZE * 8;
160 	NBPGS_ERASE(nfsbp);
161 
162 	if (page >= max_page) {
163 		nfs_buf_pgs_bit_not(nfsbp);
164 		return;
165 	}
166 
167 	NBPGS_SET(nfsbp, page);
168 	nfsbp->pages[page_pos]--;
169 	for (off_t i = page_pos - 1; i >= 0; i--) {
170 		nfsbp->pages[i] = ~0;
171 	}
172 }
173 
174 void
nfs_buf_pgs_bit_not(nfsbufpgs * nfsbp)175 nfs_buf_pgs_bit_not(nfsbufpgs *nfsbp)
176 {
177 	for (uint32_t i = 0; i < NBPGS_ELEMENTS; i++) {
178 		nfsbp->pages[i] = ~nfsbp->pages[i];
179 	}
180 }
181 
182 void
nfs_buf_pgs_bit_and(nfsbufpgs * nfsbp_src1,nfsbufpgs * nfsbp_src2,nfsbufpgs * nfsbp_dst)183 nfs_buf_pgs_bit_and(nfsbufpgs *nfsbp_src1, nfsbufpgs *nfsbp_src2, nfsbufpgs *nfsbp_dst)
184 {
185 	for (uint32_t i = 0; i < NBPGS_ELEMENTS; i++) {
186 		nfsbp_dst->pages[i] = nfsbp_src1->pages[i] & nfsbp_src2->pages[i];
187 	}
188 }
189 
190 void
nfs_buf_pgs_set_pages_between(nfsbufpgs * nfsbp,off_t firstpg,off_t lastpg)191 nfs_buf_pgs_set_pages_between(nfsbufpgs *nfsbp, off_t firstpg, off_t lastpg)
192 {
193 	nfsbufpgs pagemaskfirst, pagemasklast;
194 
195 	nfs_buf_pgs_get_page_mask(&pagemasklast, lastpg);
196 	nfs_buf_pgs_get_page_mask(&pagemaskfirst, firstpg);
197 	nfs_buf_pgs_bit_not(&pagemaskfirst);
198 	nfs_buf_pgs_bit_and(&pagemaskfirst, &pagemasklast, nfsbp);
199 }
200 
201 int
nfs_buf_pgs_is_set(nfsbufpgs * nfsbp)202 nfs_buf_pgs_is_set(nfsbufpgs *nfsbp)
203 {
204 	for (uint32_t i = 0; i < NBPGS_ELEMENTS; i++) {
205 		if (nfsbp->pages[i] != 0) {
206 			return 1;
207 		}
208 	}
209 	return 0;
210 }
211 
212 /*
213  * Initialize nfsbuf lists
214  */
215 void
nfs_nbinit(void)216 nfs_nbinit(void)
217 {
218 	nfsbufcnt = nfsbufmetacnt =
219 	    nfsbuffreecnt = nfsbuffreemetacnt = nfsbufdelwricnt = 0;
220 	nfsbufmin = 128;
221 	/* size nfsbufmax to cover at most half sane_size (w/default buf size) */
222 	nfsbufmax = (int)(sane_size >> PAGE_SHIFT) / (2 * (NFS_RWSIZE >> PAGE_SHIFT));
223 	nfsbufmetamax = nfsbufmax / 4;
224 	nfsneedbuffer = 0;
225 	nfs_nbdwrite = 0;
226 
227 	nfsbufhashtbl = hashinit(nfsbufmax / 4, M_NFSBIO, &nfsbufhash);
228 	TAILQ_INIT(&nfsbuffree);
229 	TAILQ_INIT(&nfsbuffreemeta);
230 	TAILQ_INIT(&nfsbufdelwri);
231 }
232 
233 /*
234  * Check periodically for stale/unused nfs bufs
235  */
236 void
nfs_buf_timer(__unused void * param0,__unused void * param1)237 nfs_buf_timer(__unused void *param0, __unused void *param1)
238 {
239 	nfs_buf_freeup(1);
240 
241 	lck_mtx_lock(&nfs_buf_mutex);
242 	if (nfsbufcnt <= nfsbufmin) {
243 		nfs_buf_timer_on = 0;
244 		lck_mtx_unlock(&nfs_buf_mutex);
245 		return;
246 	}
247 	lck_mtx_unlock(&nfs_buf_mutex);
248 
249 	nfs_interval_timer_start(nfs_buf_timer_call,
250 	    NFSBUF_FREE_PERIOD * 1000);
251 }
252 
253 /*
254  * try to free up some excess, unused nfsbufs
255  */
256 void
nfs_buf_freeup(int timer)257 nfs_buf_freeup(int timer)
258 {
259 	struct nfsbuf *fbp;
260 	struct timeval now;
261 	int count;
262 	struct nfsbuffreehead nfsbuffreeup;
263 
264 	TAILQ_INIT(&nfsbuffreeup);
265 
266 	lck_mtx_lock(&nfs_buf_mutex);
267 
268 	microuptime(&now);
269 
270 	FSDBG(320, nfsbufcnt, nfsbuffreecnt, nfsbuffreemetacnt, 0);
271 
272 	count = timer ? nfsbuffreecnt / LRU_FREEUP_FRAC_ON_TIMER : LRU_TO_FREEUP;
273 	while ((nfsbufcnt > nfsbufmin) && (count-- > 0)) {
274 		fbp = TAILQ_FIRST(&nfsbuffree);
275 		if (!fbp) {
276 			break;
277 		}
278 		if (os_ref_get_count(&fbp->nb_refs) > 1) {
279 			break;
280 		}
281 		if (NBUFSTAMPVALID(fbp) &&
282 		    (fbp->nb_timestamp + (2 * NFSBUF_LRU_STALE)) > now.tv_sec) {
283 			break;
284 		}
285 		nfs_buf_remfree(fbp);
286 		/* disassociate buffer from any nfsnode */
287 		if (fbp->nb_np) {
288 			if (fbp->nb_vnbufs.le_next != NFSNOLIST) {
289 				LIST_REMOVE(fbp, nb_vnbufs);
290 				fbp->nb_vnbufs.le_next = NFSNOLIST;
291 			}
292 			fbp->nb_np = NULL;
293 		}
294 		LIST_REMOVE(fbp, nb_hash);
295 		TAILQ_INSERT_TAIL(&nfsbuffreeup, fbp, nb_free);
296 		nfsbufcnt--;
297 	}
298 
299 	count = timer ? nfsbuffreemetacnt / META_FREEUP_FRAC_ON_TIMER : META_TO_FREEUP;
300 	while ((nfsbufcnt > nfsbufmin) && (count-- > 0)) {
301 		fbp = TAILQ_FIRST(&nfsbuffreemeta);
302 		if (!fbp) {
303 			break;
304 		}
305 		if (os_ref_get_count(&fbp->nb_refs) > 1) {
306 			break;
307 		}
308 		if (NBUFSTAMPVALID(fbp) &&
309 		    (fbp->nb_timestamp + (2 * NFSBUF_META_STALE)) > now.tv_sec) {
310 			break;
311 		}
312 		nfs_buf_remfree(fbp);
313 		/* disassociate buffer from any nfsnode */
314 		if (fbp->nb_np) {
315 			if (fbp->nb_vnbufs.le_next != NFSNOLIST) {
316 				LIST_REMOVE(fbp, nb_vnbufs);
317 				fbp->nb_vnbufs.le_next = NFSNOLIST;
318 			}
319 			fbp->nb_np = NULL;
320 		}
321 		LIST_REMOVE(fbp, nb_hash);
322 		TAILQ_INSERT_TAIL(&nfsbuffreeup, fbp, nb_free);
323 		nfsbufcnt--;
324 		nfsbufmetacnt--;
325 	}
326 
327 	FSDBG(320, nfsbufcnt, nfsbuffreecnt, nfsbuffreemetacnt, 0);
328 	NFSBUFCNTCHK();
329 
330 	lck_mtx_unlock(&nfs_buf_mutex);
331 
332 	while ((fbp = TAILQ_FIRST(&nfsbuffreeup))) {
333 		TAILQ_REMOVE(&nfsbuffreeup, fbp, nb_free);
334 		/* nuke any creds */
335 		if (IS_VALID_CRED(fbp->nb_rcred)) {
336 			kauth_cred_unref(&fbp->nb_rcred);
337 		}
338 		if (IS_VALID_CRED(fbp->nb_wcred)) {
339 			kauth_cred_unref(&fbp->nb_wcred);
340 		}
341 		/* if buf was NB_META, dump buffer */
342 		if (ISSET(fbp->nb_flags, NB_META) && fbp->nb_data) {
343 			kfree_data(fbp->nb_data, fbp->nb_bufsize);
344 		}
345 		NFS_ZFREE(nfsbuf_zone, fbp);
346 	}
347 }
348 
349 /*
350  * remove a buffer from the freelist
351  * (must be called with nfs_buf_mutex held)
352  */
353 void
nfs_buf_remfree(struct nfsbuf * bp)354 nfs_buf_remfree(struct nfsbuf *bp)
355 {
356 	if (bp->nb_free.tqe_next == NFSNOLIST) {
357 		panic("nfsbuf not on free list");
358 	}
359 	if (ISSET(bp->nb_flags, NB_DELWRI)) {
360 		nfsbufdelwricnt--;
361 		TAILQ_REMOVE(&nfsbufdelwri, bp, nb_free);
362 	} else if (ISSET(bp->nb_flags, NB_META)) {
363 		nfsbuffreemetacnt--;
364 		TAILQ_REMOVE(&nfsbuffreemeta, bp, nb_free);
365 	} else {
366 		nfsbuffreecnt--;
367 		TAILQ_REMOVE(&nfsbuffree, bp, nb_free);
368 	}
369 	bp->nb_free.tqe_next = NFSNOLIST;
370 	NFSBUFCNTCHK();
371 }
372 
373 /*
374  * check for existence of nfsbuf in cache
375  */
376 boolean_t
nfs_buf_is_incore(nfsnode_t np,daddr64_t blkno)377 nfs_buf_is_incore(nfsnode_t np, daddr64_t blkno)
378 {
379 	boolean_t rv;
380 	lck_mtx_lock(&nfs_buf_mutex);
381 	if (nfs_buf_incore(np, blkno)) {
382 		rv = TRUE;
383 	} else {
384 		rv = FALSE;
385 	}
386 	lck_mtx_unlock(&nfs_buf_mutex);
387 	return rv;
388 }
389 
390 /*
391  * return incore buffer (must be called with nfs_buf_mutex held)
392  */
393 struct nfsbuf *
nfs_buf_incore(nfsnode_t np,daddr64_t blkno)394 nfs_buf_incore(nfsnode_t np, daddr64_t blkno)
395 {
396 	/* Search hash chain */
397 	struct nfsbuf * bp = NFSBUFHASH(np, blkno)->lh_first;
398 	for (; bp != NULL; bp = bp->nb_hash.le_next) {
399 		if ((bp->nb_lblkno == blkno) && (bp->nb_np == np)) {
400 			if (!ISSET(bp->nb_flags, NB_INVAL)) {
401 				FSDBG(547, bp, blkno, bp->nb_flags, bp->nb_np);
402 				return bp;
403 			}
404 		}
405 	}
406 	return NULL;
407 }
408 
409 /*
410  * Check if it's OK to drop a page.
411  *
412  * Called by vnode_pager() on pageout request of non-dirty page.
413  * We need to make sure that it's not part of a delayed write.
414  * If it is, we can't let the VM drop it because we may need it
415  * later when/if we need to write the data (again).
416  */
417 int
nfs_buf_page_inval_internal(vnode_t vp,off_t offset)418 nfs_buf_page_inval_internal(vnode_t vp, off_t offset)
419 {
420 	struct nfsmount *nmp = VTONMP(vp);
421 	struct nfsbuf *bp;
422 	int error = 0;
423 
424 	if (nfs_mount_gone(nmp)) {
425 		return ENXIO;
426 	}
427 
428 	lck_mtx_lock(&nfs_buf_mutex);
429 	bp = nfs_buf_incore(VTONFS(vp), (daddr64_t)(offset / nmp->nm_biosize));
430 	if (!bp) {
431 		goto out;
432 	}
433 	FSDBG(325, bp, bp->nb_flags, bp->nb_dirtyoff, bp->nb_dirtyend);
434 	if (ISSET(bp->nb_lflags, NBL_BUSY)) {
435 		error = EBUSY;
436 		goto out;
437 	}
438 	/*
439 	 * If there's a dirty range in the buffer, check to
440 	 * see if this page intersects with the dirty range.
441 	 * If it does, we can't let the pager drop the page.
442 	 */
443 	if (bp->nb_dirtyend > 0) {
444 		off_t start = offset - NBOFF(bp);
445 		if ((bp->nb_dirtyend > start) &&
446 		    (bp->nb_dirtyoff < (start + PAGE_SIZE))) {
447 			/*
448 			 * Before returning the bad news, move the
449 			 * buffer to the start of the delwri list and
450 			 * give the list a push to try to flush the
451 			 * buffer out.
452 			 */
453 			error = EBUSY;
454 			nfs_buf_remfree(bp);
455 			TAILQ_INSERT_HEAD(&nfsbufdelwri, bp, nb_free);
456 			nfsbufdelwricnt++;
457 			nfs_buf_delwri_push(1);
458 		}
459 	}
460 out:
461 	lck_mtx_unlock(&nfs_buf_mutex);
462 	return error;
463 }
464 
465 /*
466  * set up the UPL for a buffer
467  * (must NOT be called with nfs_buf_mutex held)
468  */
469 int
nfs_buf_upl_setup(struct nfsbuf * bp)470 nfs_buf_upl_setup(struct nfsbuf *bp)
471 {
472 	kern_return_t kret;
473 	upl_t upl;
474 	int upl_flags;
475 
476 	if (ISSET(bp->nb_flags, NB_PAGELIST)) {
477 		return 0;
478 	}
479 
480 	upl_flags = UPL_PRECIOUS;
481 	if (!ISSET(bp->nb_flags, NB_READ)) {
482 		/*
483 		 * We're doing a "write", so we intend to modify
484 		 * the pages we're gathering.
485 		 */
486 		upl_flags |= UPL_WILL_MODIFY;
487 	}
488 	kret = ubc_create_upl_kernel(NFSTOV(bp->nb_np), NBOFF(bp), bp->nb_bufsize,
489 	    &upl, NULL, upl_flags, VM_KERN_MEMORY_FILE);
490 	if (kret == KERN_INVALID_ARGUMENT) {
491 		/* vm object probably doesn't exist any more */
492 		bp->nb_pagelist = NULL;
493 		return EINVAL;
494 	}
495 	if (kret != KERN_SUCCESS) {
496 		printf("nfs_buf_upl_setup(): failed to get pagelist %d\n", kret);
497 		bp->nb_pagelist = NULL;
498 		return EIO;
499 	}
500 
501 	FSDBG(538, bp, NBOFF(bp), bp->nb_bufsize, bp->nb_np);
502 
503 	bp->nb_pagelist = upl;
504 	SET(bp->nb_flags, NB_PAGELIST);
505 	return 0;
506 }
507 
508 /*
509  * update buffer's valid/dirty info from UBC
510  * (must NOT be called with nfs_buf_mutex held)
511  */
512 void
nfs_buf_upl_check(struct nfsbuf * bp)513 nfs_buf_upl_check(struct nfsbuf *bp)
514 {
515 	upl_page_info_t *pl;
516 	off_t filesize, fileoffset;
517 	int i, npages;
518 
519 	if (!ISSET(bp->nb_flags, NB_PAGELIST)) {
520 		return;
521 	}
522 
523 	npages = round_page_32(bp->nb_bufsize) / PAGE_SIZE;
524 	filesize = ubc_getsize(NFSTOV(bp->nb_np));
525 	fileoffset = NBOFF(bp);
526 	if (fileoffset < filesize) {
527 		SET(bp->nb_flags, NB_CACHE);
528 	} else {
529 		CLR(bp->nb_flags, NB_CACHE);
530 	}
531 
532 	pl = ubc_upl_pageinfo(bp->nb_pagelist);
533 	NBPGS_ERASE(&bp->nb_valid);
534 	NBPGS_ERASE(&bp->nb_dirty);
535 
536 	for (i = 0; i < npages; i++, fileoffset += PAGE_SIZE_64) {
537 		/* anything beyond the end of the file is not valid or dirty */
538 		if (fileoffset >= filesize) {
539 			break;
540 		}
541 		if (!upl_valid_page(pl, i)) {
542 			CLR(bp->nb_flags, NB_CACHE);
543 			continue;
544 		}
545 		NBPGVALID_SET(bp, i);
546 		if (upl_dirty_page(pl, i)) {
547 			NBPGDIRTY_SET(bp, i);
548 		}
549 	}
550 	fileoffset = NBOFF(bp);
551 	if (ISSET(bp->nb_flags, NB_CACHE)) {
552 		bp->nb_validoff = 0;
553 		bp->nb_validend = bp->nb_bufsize;
554 		if (fileoffset + bp->nb_validend > filesize) {
555 			bp->nb_validend = filesize - fileoffset;
556 		}
557 	} else {
558 		bp->nb_validoff = bp->nb_validend = -1;
559 	}
560 	FSDBG(539, bp, fileoffset, bp->nb_valid, bp->nb_dirty);
561 	FSDBG(539, bp->nb_validoff, bp->nb_validend, bp->nb_dirtyoff, bp->nb_dirtyend);
562 }
563 
564 /*
565  * make sure that a buffer is mapped
566  * (must NOT be called with nfs_buf_mutex held)
567  */
568 int
nfs_buf_map(struct nfsbuf * bp)569 nfs_buf_map(struct nfsbuf *bp)
570 {
571 	kern_return_t kret;
572 
573 	if (bp->nb_data) {
574 		return 0;
575 	}
576 	if (!ISSET(bp->nb_flags, NB_PAGELIST)) {
577 		return EINVAL;
578 	}
579 
580 	kret = ubc_upl_map(bp->nb_pagelist, (vm_offset_t *)&(bp->nb_data));
581 	if (kret != KERN_SUCCESS) {
582 		panic("nfs_buf_map: ubc_upl_map() failed with (%d)", kret);
583 	}
584 	if (bp->nb_data == 0) {
585 		panic("ubc_upl_map mapped 0");
586 	}
587 	FSDBG(540, bp, bp->nb_flags, NBOFF(bp), bp->nb_data);
588 	return 0;
589 }
590 
591 /*
592  * normalize an nfsbuf's valid range
593  *
594  * the read/write code guarantees that we'll always have a valid
595  * region that is an integral number of pages.  If either end
596  * of the valid range isn't page-aligned, it gets corrected
597  * here as we extend the valid range through all of the
598  * contiguous valid pages.
599  */
600 void
nfs_buf_normalize_valid_range(nfsnode_t np,struct nfsbuf * bp)601 nfs_buf_normalize_valid_range(nfsnode_t np, struct nfsbuf *bp)
602 {
603 	off_t pg, npg;
604 	/* pull validoff back to start of contiguous valid page range */
605 	pg = bp->nb_validoff / PAGE_SIZE;
606 	while (pg >= 0 && NBPGVALID(bp, pg)) {
607 		pg--;
608 	}
609 	bp->nb_validoff = (pg + 1) * PAGE_SIZE;
610 	/* push validend forward to end of contiguous valid page range */
611 	npg = bp->nb_bufsize / PAGE_SIZE;
612 	pg = bp->nb_validend / PAGE_SIZE;
613 	while (pg < npg && NBPGVALID(bp, pg)) {
614 		pg++;
615 	}
616 	bp->nb_validend = pg * PAGE_SIZE;
617 	/* clip to EOF */
618 	if (NBOFF(bp) + bp->nb_validend > (off_t)np->n_size) {
619 		bp->nb_validend = np->n_size % bp->nb_bufsize;
620 	}
621 }
622 
623 /*
624  * process some entries on the delayed write queue
625  * (must be called with nfs_buf_mutex held)
626  */
627 void
nfs_buf_delwri_service(void)628 nfs_buf_delwri_service(void)
629 {
630 	struct nfsbuf *bp;
631 	nfsnode_t np;
632 	int error, i = 0;
633 
634 	while (i < 8 && (bp = TAILQ_FIRST(&nfsbufdelwri)) != NULL) {
635 		np = bp->nb_np;
636 		nfs_buf_remfree(bp);
637 		nfs_buf_refget(bp);
638 		while ((error = nfs_buf_acquire(bp, 0, 0, 0)) == EAGAIN) {
639 			;
640 		}
641 		nfs_buf_refrele(bp);
642 		if (error) {
643 			break;
644 		}
645 		if (!bp->nb_np) {
646 			/* buffer is no longer valid */
647 			nfs_buf_drop(bp);
648 			continue;
649 		}
650 		if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
651 			nfs_buf_check_write_verifier(np, bp);
652 		}
653 		if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
654 			/* put buffer at end of delwri list */
655 			TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free);
656 			nfsbufdelwricnt++;
657 			nfs_buf_drop(bp);
658 			lck_mtx_unlock(&nfs_buf_mutex);
659 			nfs_flushcommits(np, 1);
660 		} else {
661 			SET(bp->nb_flags, NB_ASYNC);
662 			lck_mtx_unlock(&nfs_buf_mutex);
663 			nfs_buf_write(bp);
664 		}
665 		i++;
666 		lck_mtx_lock(&nfs_buf_mutex);
667 	}
668 }
669 
670 /*
671  * thread to service the delayed write queue when asked
672  */
673 void
nfs_buf_delwri_thread(__unused void * arg,__unused wait_result_t wr)674 nfs_buf_delwri_thread(__unused void *arg, __unused wait_result_t wr)
675 {
676 	struct timespec ts = { .tv_sec = 30, .tv_nsec = 0 };
677 	int error = 0;
678 
679 	lck_mtx_lock(&nfs_buf_mutex);
680 	while (!error) {
681 		nfs_buf_delwri_service();
682 		error = msleep(&nfsbufdelwrithd, &nfs_buf_mutex, 0, "nfsbufdelwri", &ts);
683 	}
684 	nfsbufdelwrithd = NULL;
685 	lck_mtx_unlock(&nfs_buf_mutex);
686 	thread_terminate(nfsbufdelwrithd);
687 }
688 
689 /*
690  * try to push out some delayed/uncommitted writes
691  * ("locked" indicates whether nfs_buf_mutex is already held)
692  */
693 void
nfs_buf_delwri_push(int locked)694 nfs_buf_delwri_push(int locked)
695 {
696 	if (TAILQ_EMPTY(&nfsbufdelwri)) {
697 		return;
698 	}
699 	if (!locked) {
700 		lck_mtx_lock(&nfs_buf_mutex);
701 	}
702 	/* wake up the delayed write service thread */
703 	if (nfsbufdelwrithd) {
704 		wakeup(&nfsbufdelwrithd);
705 	} else if (kernel_thread_start(nfs_buf_delwri_thread, NULL, &nfsbufdelwrithd) == KERN_SUCCESS) {
706 		thread_deallocate(nfsbufdelwrithd);
707 	}
708 	/* otherwise, try to do some of the work ourselves */
709 	if (!nfsbufdelwrithd) {
710 		nfs_buf_delwri_service();
711 	}
712 	if (!locked) {
713 		lck_mtx_unlock(&nfs_buf_mutex);
714 	}
715 }
716 
717 /*
718  * Get an nfs buffer.
719  *
720  * Returns errno on error, 0 otherwise.
721  * Any buffer is returned in *bpp.
722  *
723  * If NBLK_ONLYVALID is set, only return buffer if found in cache.
724  * If NBLK_NOWAIT is set, don't wait for the buffer if it's marked BUSY.
725  *
726  * Check for existence of buffer in cache.
727  * Or attempt to reuse a buffer from one of the free lists.
728  * Or allocate a new buffer if we haven't already hit max allocation.
729  * Or wait for a free buffer.
730  *
731  * If available buffer found, prepare it, and return it.
732  *
733  * If the calling process is interrupted by a signal for
734  * an interruptible mount point, return EINTR.
735  */
736 int
nfs_buf_get(nfsnode_t np,daddr64_t blkno,uint32_t size,thread_t thd,int flags,struct nfsbuf ** bpp)737 nfs_buf_get(
738 	nfsnode_t np,
739 	daddr64_t blkno,
740 	uint32_t size,
741 	thread_t thd,
742 	int flags,
743 	struct nfsbuf **bpp)
744 {
745 	vnode_t vp = NFSTOV(np);
746 	struct nfsmount *nmp = VTONMP(vp);
747 	struct nfsbuf *bp;
748 	uint32_t bufsize;
749 	int slpflag = PCATCH;
750 	int operation = (flags & NBLK_OPMASK);
751 	int error = 0;
752 	struct timespec ts;
753 
754 	FSDBG_TOP(541, np, blkno, size, flags);
755 	*bpp = NULL;
756 
757 	bufsize = size;
758 	if (bufsize > NFS_MAXBSIZE) {
759 		panic("nfs_buf_get: buffer larger than NFS_MAXBSIZE requested");
760 	}
761 
762 	if (nfs_mount_gone(nmp)) {
763 		FSDBG_BOT(541, np, blkno, 0, ENXIO);
764 		return ENXIO;
765 	}
766 
767 	if (!UBCINFOEXISTS(vp)) {
768 		operation = NBLK_META;
769 	} else if (bufsize < (uint32_t)nmp->nm_biosize) {
770 		/* reg files should always have biosize blocks */
771 		bufsize = nmp->nm_biosize;
772 	}
773 
774 	/* if NBLK_WRITE, check for too many delayed/uncommitted writes */
775 	if ((operation == NBLK_WRITE) && (nfs_nbdwrite > NFS_A_LOT_OF_DELAYED_WRITES)) {
776 		FSDBG_TOP(542, np, blkno, nfs_nbdwrite, NFS_A_LOT_OF_DELAYED_WRITES);
777 
778 		/* poke the delwri list */
779 		nfs_buf_delwri_push(0);
780 
781 		/* sleep to let other threads run... */
782 		tsleep(&nfs_nbdwrite, PCATCH, "nfs_nbdwrite", 1);
783 		FSDBG_BOT(542, np, blkno, nfs_nbdwrite, NFS_A_LOT_OF_DELAYED_WRITES);
784 	}
785 
786 loop:
787 	lck_mtx_lock(&nfs_buf_mutex);
788 
789 	/* wait for any buffer invalidation/flushing to complete */
790 	while (np->n_bflag & NBINVALINPROG) {
791 		np->n_bflag |= NBINVALWANT;
792 		ts.tv_sec = 2;
793 		ts.tv_nsec = 0;
794 		msleep(&np->n_bflag, &nfs_buf_mutex, slpflag, "nfs_buf_get_invalwait", &ts);
795 		if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) {
796 			lck_mtx_unlock(&nfs_buf_mutex);
797 			FSDBG_BOT(541, np, blkno, 0, error);
798 			return error;
799 		}
800 		if (np->n_bflag & NBINVALINPROG) {
801 			slpflag = 0;
802 		}
803 	}
804 
805 	/* check for existence of nfsbuf in cache */
806 	if ((bp = nfs_buf_incore(np, blkno))) {
807 		/* if busy, set wanted and wait */
808 		if (ISSET(bp->nb_lflags, NBL_BUSY)) {
809 			if (flags & NBLK_NOWAIT) {
810 				lck_mtx_unlock(&nfs_buf_mutex);
811 				FSDBG_BOT(541, np, blkno, bp, 0xbcbcbcbc);
812 				return 0;
813 			}
814 			FSDBG_TOP(543, np, blkno, bp, bp->nb_flags);
815 			SET(bp->nb_lflags, NBL_WANTED);
816 
817 			ts.tv_sec = 2;
818 			ts.tv_nsec = 0;
819 			msleep(bp, &nfs_buf_mutex, slpflag | (PRIBIO + 1) | PDROP,
820 			    "nfsbufget", (slpflag == PCATCH) ? NULL : &ts);
821 			slpflag = 0;
822 			FSDBG_BOT(543, np, blkno, bp, bp->nb_flags);
823 			if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) {
824 				FSDBG_BOT(541, np, blkno, 0, error);
825 				return error;
826 			}
827 			goto loop;
828 		}
829 		if (bp->nb_bufsize != bufsize) {
830 			panic("nfsbuf size mismatch");
831 		}
832 		SET(bp->nb_lflags, NBL_BUSY);
833 		SET(bp->nb_flags, NB_CACHE);
834 		nfs_buf_remfree(bp);
835 		/* additional paranoia: */
836 		if (ISSET(bp->nb_flags, NB_PAGELIST)) {
837 			panic("pagelist buffer was not busy");
838 		}
839 		goto buffer_setup;
840 	}
841 
842 	if (flags & NBLK_ONLYVALID) {
843 		lck_mtx_unlock(&nfs_buf_mutex);
844 		FSDBG_BOT(541, np, blkno, 0, 0x0000cace);
845 		return 0;
846 	}
847 
848 	/*
849 	 * where to get a free buffer:
850 	 * - if meta and maxmeta reached, must reuse meta
851 	 * - alloc new if we haven't reached min bufs
852 	 * - if free lists are NOT empty
853 	 *   - if free list is stale, use it
854 	 *   - else if freemeta list is stale, use it
855 	 *   - else if max bufs allocated, use least-time-to-stale
856 	 * - alloc new if we haven't reached max allowed
857 	 * - start clearing out delwri list and try again
858 	 */
859 
860 	if ((operation == NBLK_META) && (nfsbufmetacnt >= nfsbufmetamax)) {
861 		/* if we've hit max meta buffers, must reuse a meta buffer */
862 		bp = TAILQ_FIRST(&nfsbuffreemeta);
863 	} else if ((nfsbufcnt > nfsbufmin) &&
864 	    (!TAILQ_EMPTY(&nfsbuffree) || !TAILQ_EMPTY(&nfsbuffreemeta))) {
865 		/* try to pull an nfsbuf off a free list */
866 		struct nfsbuf *lrubp, *metabp;
867 		struct timeval now;
868 		microuptime(&now);
869 
870 		/* if the next LRU or META buffer is invalid or stale, use it */
871 		lrubp = TAILQ_FIRST(&nfsbuffree);
872 		if (lrubp && (!NBUFSTAMPVALID(lrubp) ||
873 		    ((lrubp->nb_timestamp + NFSBUF_LRU_STALE) < now.tv_sec))) {
874 			bp = lrubp;
875 		}
876 		metabp = TAILQ_FIRST(&nfsbuffreemeta);
877 		if (!bp && metabp && (!NBUFSTAMPVALID(metabp) ||
878 		    ((metabp->nb_timestamp + NFSBUF_META_STALE) < now.tv_sec))) {
879 			bp = metabp;
880 		}
881 
882 		if (!bp && (nfsbufcnt >= nfsbufmax)) {
883 			/* we've already allocated all bufs, so */
884 			/* choose the buffer that'll go stale first */
885 			if (!metabp) {
886 				bp = lrubp;
887 			} else if (!lrubp) {
888 				bp = metabp;
889 			} else {
890 				time_t lru_stale_time, meta_stale_time;
891 				lru_stale_time = lrubp->nb_timestamp + NFSBUF_LRU_STALE;
892 				meta_stale_time = metabp->nb_timestamp + NFSBUF_META_STALE;
893 				if (lru_stale_time <= meta_stale_time) {
894 					bp = lrubp;
895 				} else {
896 					bp = metabp;
897 				}
898 			}
899 		}
900 	}
901 
902 	if (bp) {
903 		/* we have a buffer to reuse */
904 		FSDBG(544, np, blkno, bp, bp->nb_flags);
905 		nfs_buf_remfree(bp);
906 		if (ISSET(bp->nb_flags, NB_DELWRI)) {
907 			panic("nfs_buf_get: delwri");
908 		}
909 		SET(bp->nb_lflags, NBL_BUSY);
910 		/* disassociate buffer from previous nfsnode */
911 		if (bp->nb_np) {
912 			if (bp->nb_vnbufs.le_next != NFSNOLIST) {
913 				LIST_REMOVE(bp, nb_vnbufs);
914 				bp->nb_vnbufs.le_next = NFSNOLIST;
915 			}
916 			bp->nb_np = NULL;
917 		}
918 		LIST_REMOVE(bp, nb_hash);
919 		/* nuke any creds we're holding */
920 		if (IS_VALID_CRED(bp->nb_rcred)) {
921 			kauth_cred_unref(&bp->nb_rcred);
922 		}
923 		if (IS_VALID_CRED(bp->nb_wcred)) {
924 			kauth_cred_unref(&bp->nb_wcred);
925 		}
926 		/* if buf will no longer be NB_META, dump old buffer */
927 		if (operation == NBLK_META) {
928 			if (!ISSET(bp->nb_flags, NB_META)) {
929 				nfsbufmetacnt++;
930 			}
931 		} else if (ISSET(bp->nb_flags, NB_META)) {
932 			if (bp->nb_data) {
933 				kfree_data(bp->nb_data, bp->nb_bufsize);
934 				bp->nb_data = NULL;
935 			}
936 			nfsbufmetacnt--;
937 		}
938 		/* re-init buf fields */
939 		bp->nb_error = 0;
940 		bp->nb_validoff = bp->nb_validend = -1;
941 		bp->nb_dirtyoff = bp->nb_dirtyend = 0;
942 		NBPGS_ERASE(&bp->nb_valid);
943 		NBPGS_ERASE(&bp->nb_dirty);
944 		bp->nb_verf = 0;
945 	} else {
946 		/* no buffer to reuse */
947 		if ((nfsbufcnt < nfsbufmax) &&
948 		    ((operation != NBLK_META) || (nfsbufmetacnt < nfsbufmetamax))) {
949 			/* just alloc a new one */
950 			bp = zalloc_flags(nfsbuf_zone, Z_WAITOK | Z_ZERO);
951 			nfsbufcnt++;
952 
953 			/*
954 			 * If any excess bufs, make sure the timer
955 			 * is running to free them up later.
956 			 */
957 			if (nfsbufcnt > nfsbufmin && !nfs_buf_timer_on) {
958 				nfs_buf_timer_on = 1;
959 				nfs_interval_timer_start(nfs_buf_timer_call,
960 				    NFSBUF_FREE_PERIOD * 1000);
961 			}
962 
963 			if (operation == NBLK_META) {
964 				nfsbufmetacnt++;
965 			}
966 			NFSBUFCNTCHK();
967 			/* init nfsbuf */
968 			os_ref_init(&bp->nb_refs, NULL);
969 
970 			bp->nb_free.tqe_next = NFSNOLIST;
971 			bp->nb_validoff = bp->nb_validend = -1;
972 			FSDBG(545, np, blkno, bp, 0);
973 		} else {
974 			/* too many bufs... wait for buffers to free up */
975 			FSDBG_TOP(546, np, blkno, nfsbufcnt, nfsbufmax);
976 
977 			/* poke the delwri list */
978 			nfs_buf_delwri_push(1);
979 
980 			nfsneedbuffer = 1;
981 			msleep(&nfsneedbuffer, &nfs_buf_mutex, PCATCH | PDROP, "nfsbufget", NULL);
982 			FSDBG_BOT(546, np, blkno, nfsbufcnt, nfsbufmax);
983 			if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) {
984 				FSDBG_BOT(541, np, blkno, 0, error);
985 				return error;
986 			}
987 			goto loop;
988 		}
989 	}
990 
991 	/* set up nfsbuf */
992 	SET(bp->nb_lflags, NBL_BUSY);
993 	bp->nb_flags = 0;
994 	bp->nb_lblkno = blkno;
995 	/* insert buf in hash */
996 	LIST_INSERT_HEAD(NFSBUFHASH(np, blkno), bp, nb_hash);
997 	/* associate buffer with new nfsnode */
998 	bp->nb_np = np;
999 	LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs);
1000 
1001 buffer_setup:
1002 
1003 	/* unlock hash */
1004 	lck_mtx_unlock(&nfs_buf_mutex);
1005 
1006 	switch (operation) {
1007 	case NBLK_META:
1008 		SET(bp->nb_flags, NB_META);
1009 		if ((bp->nb_bufsize != bufsize) && bp->nb_data) {
1010 			kfree_data(bp->nb_data, bp->nb_bufsize);
1011 			bp->nb_data = NULL;
1012 			bp->nb_validoff = bp->nb_validend = -1;
1013 			bp->nb_dirtyoff = bp->nb_dirtyend = 0;
1014 			NBPGS_ERASE(&bp->nb_valid);
1015 			NBPGS_ERASE(&bp->nb_dirty);
1016 			CLR(bp->nb_flags, NB_CACHE);
1017 		}
1018 		if (!bp->nb_data) {
1019 			bp->nb_data = kalloc_data(bufsize, Z_WAITOK);
1020 		}
1021 		if (!bp->nb_data) {
1022 			/* Ack! couldn't allocate the data buffer! */
1023 			/* clean up buffer and return error */
1024 			lck_mtx_lock(&nfs_buf_mutex);
1025 			LIST_REMOVE(bp, nb_vnbufs);
1026 			bp->nb_vnbufs.le_next = NFSNOLIST;
1027 			bp->nb_np = NULL;
1028 			/* invalidate usage timestamp to allow immediate freeing */
1029 			NBUFSTAMPINVALIDATE(bp);
1030 			if (bp->nb_free.tqe_next != NFSNOLIST) {
1031 				panic("nfsbuf on freelist");
1032 			}
1033 			TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free);
1034 			nfsbuffreecnt++;
1035 			lck_mtx_unlock(&nfs_buf_mutex);
1036 			FSDBG_BOT(541, np, blkno, 0xb00, ENOMEM);
1037 			return ENOMEM;
1038 		}
1039 		bp->nb_bufsize = bufsize;
1040 		break;
1041 
1042 	case NBLK_READ:
1043 	case NBLK_WRITE:
1044 		/*
1045 		 * Set or clear NB_READ now to let the UPL subsystem know
1046 		 * if we intend to modify the pages or not.
1047 		 */
1048 		if (operation == NBLK_READ) {
1049 			SET(bp->nb_flags, NB_READ);
1050 		} else {
1051 			CLR(bp->nb_flags, NB_READ);
1052 		}
1053 		if (bufsize < PAGE_SIZE) {
1054 			bufsize = PAGE_SIZE;
1055 		}
1056 		bp->nb_bufsize = bufsize;
1057 		bp->nb_validoff = bp->nb_validend = -1;
1058 
1059 		if (UBCINFOEXISTS(vp)) {
1060 			/* set up upl */
1061 			if (nfs_buf_upl_setup(bp)) {
1062 				/* unable to create upl */
1063 				/* vm object must no longer exist */
1064 				/* clean up buffer and return error */
1065 				lck_mtx_lock(&nfs_buf_mutex);
1066 				LIST_REMOVE(bp, nb_vnbufs);
1067 				bp->nb_vnbufs.le_next = NFSNOLIST;
1068 				bp->nb_np = NULL;
1069 				/* invalidate usage timestamp to allow immediate freeing */
1070 				NBUFSTAMPINVALIDATE(bp);
1071 				if (bp->nb_free.tqe_next != NFSNOLIST) {
1072 					panic("nfsbuf on freelist");
1073 				}
1074 				TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free);
1075 				nfsbuffreecnt++;
1076 				lck_mtx_unlock(&nfs_buf_mutex);
1077 				FSDBG_BOT(541, np, blkno, 0x2bc, EIO);
1078 				return EIO;
1079 			}
1080 			nfs_buf_upl_check(bp);
1081 		}
1082 		break;
1083 
1084 	default:
1085 		panic("nfs_buf_get: %d unknown operation", operation);
1086 	}
1087 
1088 	*bpp = bp;
1089 
1090 	FSDBG_BOT(541, np, blkno, bp, bp->nb_flags);
1091 
1092 	return 0;
1093 }
1094 
1095 void
nfs_buf_release(struct nfsbuf * bp,int freeup)1096 nfs_buf_release(struct nfsbuf *bp, int freeup)
1097 {
1098 	nfsnode_t np = bp->nb_np;
1099 	vnode_t vp;
1100 	struct timeval now;
1101 	int wakeup_needbuffer, wakeup_buffer, wakeup_nbdwrite;
1102 
1103 	FSDBG_TOP(548, bp, NBOFF(bp), bp->nb_flags, bp->nb_data);
1104 	FSDBG(548, bp->nb_validoff, bp->nb_validend, bp->nb_dirtyoff, bp->nb_dirtyend);
1105 	FSDBG(548, bp->nb_valid, 0, bp->nb_dirty, 0);
1106 
1107 	vp = np ? NFSTOV(np) : NULL;
1108 	if (vp && UBCINFOEXISTS(vp) && bp->nb_bufsize) {
1109 		int upl_flags, rv;
1110 		upl_t upl;
1111 		uint32_t i;
1112 
1113 		if (!ISSET(bp->nb_flags, NB_PAGELIST) && !ISSET(bp->nb_flags, NB_INVAL)) {
1114 			rv = nfs_buf_upl_setup(bp);
1115 			if (rv) {
1116 				printf("nfs_buf_release: upl create failed %d\n", rv);
1117 			} else {
1118 				nfs_buf_upl_check(bp);
1119 			}
1120 		}
1121 		upl = bp->nb_pagelist;
1122 		if (!upl) {
1123 			goto pagelist_cleanup_done;
1124 		}
1125 		if (bp->nb_data) {
1126 			if (ubc_upl_unmap(upl) != KERN_SUCCESS) {
1127 				panic("ubc_upl_unmap failed");
1128 			}
1129 			bp->nb_data = NULL;
1130 		}
1131 		/*
1132 		 * Abort the pages on error or: if this is an invalid or
1133 		 * non-needcommit nocache buffer AND no pages are dirty.
1134 		 */
1135 		if (ISSET(bp->nb_flags, NB_ERROR) || (!nfs_buf_pgs_is_set(&bp->nb_dirty) && (ISSET(bp->nb_flags, NB_INVAL) ||
1136 		    (ISSET(bp->nb_flags, NB_NOCACHE) && !ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI)))))) {
1137 			if (ISSET(bp->nb_flags, (NB_READ | NB_INVAL | NB_NOCACHE))) {
1138 				upl_flags = UPL_ABORT_DUMP_PAGES;
1139 			} else {
1140 				upl_flags = 0;
1141 			}
1142 			ubc_upl_abort(upl, upl_flags);
1143 			goto pagelist_cleanup_done;
1144 		}
1145 		for (i = 0; i <= (bp->nb_bufsize - 1) / PAGE_SIZE; i++) {
1146 			if (!NBPGVALID(bp, i)) {
1147 				ubc_upl_abort_range(upl,
1148 				    i * PAGE_SIZE, PAGE_SIZE,
1149 				    UPL_ABORT_DUMP_PAGES |
1150 				    UPL_ABORT_FREE_ON_EMPTY);
1151 			} else {
1152 				if (NBPGDIRTY(bp, i)) {
1153 					upl_flags = UPL_COMMIT_SET_DIRTY;
1154 				} else {
1155 					upl_flags = UPL_COMMIT_CLEAR_DIRTY;
1156 				}
1157 
1158 				if (!ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI))) {
1159 					upl_flags |= UPL_COMMIT_CLEAR_PRECIOUS;
1160 				}
1161 
1162 				ubc_upl_commit_range(upl,
1163 				    i * PAGE_SIZE, PAGE_SIZE,
1164 				    upl_flags |
1165 				    UPL_COMMIT_INACTIVATE |
1166 				    UPL_COMMIT_FREE_ON_EMPTY);
1167 			}
1168 		}
1169 pagelist_cleanup_done:
1170 		/* invalidate any pages past EOF */
1171 		if (NBOFF(bp) + bp->nb_bufsize > (off_t)(np->n_size)) {
1172 			off_t start, end;
1173 			start = trunc_page_64(np->n_size) + PAGE_SIZE_64;
1174 			end = trunc_page_64(NBOFF(bp) + bp->nb_bufsize);
1175 			if (start < NBOFF(bp)) {
1176 				start = NBOFF(bp);
1177 			}
1178 			if (end > start) {
1179 				if ((rv = ubc_msync(vp, start, end, NULL, UBC_INVALIDATE))) {
1180 					printf("nfs_buf_release(): ubc_msync failed!, error %d\n", rv);
1181 				}
1182 			}
1183 		}
1184 		CLR(bp->nb_flags, NB_PAGELIST);
1185 		bp->nb_pagelist = NULL;
1186 	}
1187 
1188 	lck_mtx_lock(&nfs_buf_mutex);
1189 
1190 	wakeup_needbuffer = wakeup_buffer = wakeup_nbdwrite = 0;
1191 
1192 	/* Wake up any processes waiting for any buffer to become free. */
1193 	if (nfsneedbuffer) {
1194 		nfsneedbuffer = 0;
1195 		wakeup_needbuffer = 1;
1196 	}
1197 	/* Wake up any processes waiting for _this_ buffer to become free. */
1198 	if (ISSET(bp->nb_lflags, NBL_WANTED)) {
1199 		CLR(bp->nb_lflags, NBL_WANTED);
1200 		wakeup_buffer = 1;
1201 	}
1202 
1203 	/* If it's non-needcommit nocache, or an error, mark it invalid. */
1204 	if (ISSET(bp->nb_flags, NB_ERROR) ||
1205 	    (ISSET(bp->nb_flags, NB_NOCACHE) && !ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI)))) {
1206 		SET(bp->nb_flags, NB_INVAL);
1207 	}
1208 
1209 	if ((bp->nb_bufsize <= 0) || ISSET(bp->nb_flags, NB_INVAL)) {
1210 		/* If it's invalid or empty, dissociate it from its nfsnode */
1211 		if (bp->nb_vnbufs.le_next != NFSNOLIST) {
1212 			LIST_REMOVE(bp, nb_vnbufs);
1213 			bp->nb_vnbufs.le_next = NFSNOLIST;
1214 		}
1215 		bp->nb_np = NULL;
1216 		/* if this was a delayed write, wakeup anyone */
1217 		/* waiting for delayed writes to complete */
1218 		if (ISSET(bp->nb_flags, NB_DELWRI)) {
1219 			CLR(bp->nb_flags, NB_DELWRI);
1220 			nfs_nbdwrite--;
1221 			NFSBUFCNTCHK();
1222 			wakeup_nbdwrite = 1;
1223 		}
1224 		/* invalidate usage timestamp to allow immediate freeing */
1225 		NBUFSTAMPINVALIDATE(bp);
1226 		/* put buffer at head of free list */
1227 		if (bp->nb_free.tqe_next != NFSNOLIST) {
1228 			panic("nfsbuf on freelist");
1229 		}
1230 		SET(bp->nb_flags, NB_INVAL);
1231 		if (ISSET(bp->nb_flags, NB_META)) {
1232 			TAILQ_INSERT_HEAD(&nfsbuffreemeta, bp, nb_free);
1233 			nfsbuffreemetacnt++;
1234 		} else {
1235 			TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free);
1236 			nfsbuffreecnt++;
1237 		}
1238 	} else if (ISSET(bp->nb_flags, NB_DELWRI)) {
1239 		/* put buffer at end of delwri list */
1240 		if (bp->nb_free.tqe_next != NFSNOLIST) {
1241 			panic("nfsbuf on freelist");
1242 		}
1243 		TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free);
1244 		nfsbufdelwricnt++;
1245 		freeup = 0;
1246 	} else {
1247 		/* update usage timestamp */
1248 		microuptime(&now);
1249 		bp->nb_timestamp = now.tv_sec;
1250 		/* put buffer at end of free list */
1251 		if (bp->nb_free.tqe_next != NFSNOLIST) {
1252 			panic("nfsbuf on freelist");
1253 		}
1254 		if (ISSET(bp->nb_flags, NB_META)) {
1255 			TAILQ_INSERT_TAIL(&nfsbuffreemeta, bp, nb_free);
1256 			nfsbuffreemetacnt++;
1257 		} else {
1258 			TAILQ_INSERT_TAIL(&nfsbuffree, bp, nb_free);
1259 			nfsbuffreecnt++;
1260 		}
1261 	}
1262 
1263 	NFSBUFCNTCHK();
1264 
1265 	/* Unlock the buffer. */
1266 	CLR(bp->nb_flags, (NB_ASYNC | NB_STABLE));
1267 	CLR(bp->nb_lflags, NBL_BUSY);
1268 
1269 	FSDBG_BOT(548, bp, NBOFF(bp), bp->nb_flags, bp->nb_data);
1270 
1271 	lck_mtx_unlock(&nfs_buf_mutex);
1272 
1273 	if (wakeup_needbuffer) {
1274 		wakeup(&nfsneedbuffer);
1275 	}
1276 	if (wakeup_buffer) {
1277 		wakeup(bp);
1278 	}
1279 	if (wakeup_nbdwrite) {
1280 		wakeup(&nfs_nbdwrite);
1281 	}
1282 	if (freeup) {
1283 		NFS_BUF_FREEUP();
1284 	}
1285 }
1286 
1287 /*
1288  * Wait for operations on the buffer to complete.
1289  * When they do, extract and return the I/O's error value.
1290  */
1291 int
nfs_buf_iowait(struct nfsbuf * bp)1292 nfs_buf_iowait(struct nfsbuf *bp)
1293 {
1294 	FSDBG_TOP(549, bp, NBOFF(bp), bp->nb_flags, bp->nb_error);
1295 
1296 	lck_mtx_lock(&nfs_buf_mutex);
1297 
1298 	while (!ISSET(bp->nb_flags, NB_DONE)) {
1299 		msleep(bp, &nfs_buf_mutex, PRIBIO + 1, "nfs_buf_iowait", NULL);
1300 	}
1301 
1302 	lck_mtx_unlock(&nfs_buf_mutex);
1303 
1304 	FSDBG_BOT(549, bp, NBOFF(bp), bp->nb_flags, bp->nb_error);
1305 
1306 	/* check for interruption of I/O, then errors. */
1307 	if (ISSET(bp->nb_flags, NB_EINTR)) {
1308 		CLR(bp->nb_flags, NB_EINTR);
1309 		return EINTR;
1310 	} else if (ISSET(bp->nb_flags, NB_ERROR)) {
1311 		return bp->nb_error ? bp->nb_error : EIO;
1312 	}
1313 	return 0;
1314 }
1315 
1316 /*
1317  * Mark I/O complete on a buffer.
1318  */
1319 void
nfs_buf_iodone(struct nfsbuf * bp)1320 nfs_buf_iodone(struct nfsbuf *bp)
1321 {
1322 	FSDBG_TOP(550, bp, NBOFF(bp), bp->nb_flags, bp->nb_error);
1323 
1324 	if (ISSET(bp->nb_flags, NB_DONE)) {
1325 		panic("nfs_buf_iodone already");
1326 	}
1327 
1328 	if (!ISSET(bp->nb_flags, NB_READ)) {
1329 		CLR(bp->nb_flags, NB_WRITEINPROG);
1330 		/*
1331 		 * vnode_writedone() takes care of waking up
1332 		 * any throttled write operations
1333 		 */
1334 		vnode_writedone(NFSTOV(bp->nb_np));
1335 		nfs_node_lock_force(bp->nb_np);
1336 		bp->nb_np->n_numoutput--;
1337 		nfs_node_unlock(bp->nb_np);
1338 	}
1339 	if (ISSET(bp->nb_flags, NB_ASYNC)) {    /* if async, release it */
1340 		SET(bp->nb_flags, NB_DONE);             /* note that it's done */
1341 		nfs_buf_release(bp, 1);
1342 	} else {                                        /* or just wakeup the buffer */
1343 		lck_mtx_lock(&nfs_buf_mutex);
1344 		SET(bp->nb_flags, NB_DONE);             /* note that it's done */
1345 		CLR(bp->nb_lflags, NBL_WANTED);
1346 		lck_mtx_unlock(&nfs_buf_mutex);
1347 		wakeup(bp);
1348 	}
1349 
1350 	FSDBG_BOT(550, bp, NBOFF(bp), bp->nb_flags, bp->nb_error);
1351 }
1352 
1353 void
nfs_buf_write_delayed(struct nfsbuf * bp)1354 nfs_buf_write_delayed(struct nfsbuf *bp)
1355 {
1356 	nfsnode_t np = bp->nb_np;
1357 
1358 	FSDBG_TOP(551, bp, NBOFF(bp), bp->nb_flags, 0);
1359 	FSDBG(551, bp, bp->nb_dirtyoff, bp->nb_dirtyend, bp->nb_dirty);
1360 
1361 	/*
1362 	 * If the block hasn't been seen before:
1363 	 *	(1) Mark it as having been seen,
1364 	 *	(2) Make sure it's on its node's correct block list,
1365 	 */
1366 	if (!ISSET(bp->nb_flags, NB_DELWRI)) {
1367 		SET(bp->nb_flags, NB_DELWRI);
1368 		/* move to dirty list */
1369 		lck_mtx_lock(&nfs_buf_mutex);
1370 		nfs_nbdwrite++;
1371 		NFSBUFCNTCHK();
1372 		if (bp->nb_vnbufs.le_next != NFSNOLIST) {
1373 			LIST_REMOVE(bp, nb_vnbufs);
1374 		}
1375 		LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs);
1376 		lck_mtx_unlock(&nfs_buf_mutex);
1377 	}
1378 
1379 	/*
1380 	 * If the vnode has "too many" write operations in progress
1381 	 * wait for them to finish the IO
1382 	 */
1383 	vnode_waitforwrites(NFSTOV(np), VNODE_ASYNC_THROTTLE, 0, 0, "nfs_buf_write_delayed");
1384 
1385 	/* the file is in a modified state, so make sure the flag's set */
1386 	nfs_node_lock_force(np);
1387 	np->n_flag |= NMODIFIED;
1388 	nfs_node_unlock(np);
1389 
1390 	/*
1391 	 * If we have too many delayed write buffers,
1392 	 * just fall back to doing the async write.
1393 	 */
1394 	if (nfs_nbdwrite < 0) {
1395 		panic("nfs_buf_write_delayed: Negative nfs_nbdwrite");
1396 	}
1397 	if (nfs_nbdwrite > NFS_A_LOT_OF_DELAYED_WRITES) {
1398 		/* issue async write */
1399 		SET(bp->nb_flags, NB_ASYNC);
1400 		nfs_buf_write(bp);
1401 		FSDBG_BOT(551, bp, NBOFF(bp), bp->nb_flags, bp->nb_error);
1402 		return;
1403 	}
1404 
1405 	/* Otherwise, the "write" is done, so mark and release the buffer. */
1406 	SET(bp->nb_flags, NB_DONE);
1407 	nfs_buf_release(bp, 1);
1408 	FSDBG_BOT(551, bp, NBOFF(bp), bp->nb_flags, 0);
1409 	return;
1410 }
1411 
1412 /*
1413  * Check that a "needcommit" buffer can still be committed.
1414  * If the write verifier has changed, we need to clear the
1415  * the needcommit flag.
1416  */
1417 void
nfs_buf_check_write_verifier(nfsnode_t np,struct nfsbuf * bp)1418 nfs_buf_check_write_verifier(nfsnode_t np, struct nfsbuf *bp)
1419 {
1420 	struct nfsmount *nmp;
1421 
1422 	if (!ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
1423 		return;
1424 	}
1425 
1426 	nmp = NFSTONMP(np);
1427 	if (nfs_mount_gone(nmp)) {
1428 		return;
1429 	}
1430 	if (!ISSET(bp->nb_flags, NB_STALEWVERF) && (bp->nb_verf == nmp->nm_verf)) {
1431 		return;
1432 	}
1433 
1434 	/* write verifier changed, clear commit/wverf flags */
1435 	CLR(bp->nb_flags, (NB_NEEDCOMMIT | NB_STALEWVERF));
1436 	bp->nb_verf = 0;
1437 	nfs_node_lock_force(np);
1438 	np->n_needcommitcnt--;
1439 	CHECK_NEEDCOMMITCNT(np);
1440 	nfs_node_unlock(np);
1441 }
1442 
1443 /*
1444  * add a reference to a buffer so it doesn't disappear while being used
1445  * (must be called with nfs_buf_mutex held)
1446  */
1447 void
nfs_buf_refget(struct nfsbuf * bp)1448 nfs_buf_refget(struct nfsbuf *bp)
1449 {
1450 	os_ref_retain_locked(&bp->nb_refs);
1451 }
1452 /*
1453  * release a reference on a buffer
1454  * (must be called with nfs_buf_mutex held)
1455  */
1456 void
nfs_buf_refrele(struct nfsbuf * bp)1457 nfs_buf_refrele(struct nfsbuf *bp)
1458 {
1459 	(void) os_ref_release_locked(&bp->nb_refs);
1460 }
1461 
1462 /*
1463  * mark a particular buffer as BUSY
1464  * (must be called with nfs_buf_mutex held)
1465  */
1466 errno_t
nfs_buf_acquire(struct nfsbuf * bp,int flags,int slpflag,int slptimeo)1467 nfs_buf_acquire(struct nfsbuf *bp, int flags, int slpflag, int slptimeo)
1468 {
1469 	errno_t error;
1470 	struct timespec ts;
1471 
1472 	if (ISSET(bp->nb_lflags, NBL_BUSY)) {
1473 		/*
1474 		 * since the lck_mtx_lock may block, the buffer
1475 		 * may become BUSY, so we need to recheck for
1476 		 * a NOWAIT request
1477 		 */
1478 		if (flags & NBAC_NOWAIT) {
1479 			return EBUSY;
1480 		}
1481 		SET(bp->nb_lflags, NBL_WANTED);
1482 
1483 		ts.tv_sec = (slptimeo / 100);
1484 		/* the hz value is 100; which leads to 10ms */
1485 		ts.tv_nsec = (slptimeo % 100) * 10  * NSEC_PER_USEC * 1000;
1486 
1487 		error = msleep(bp, &nfs_buf_mutex, slpflag | (PRIBIO + 1),
1488 		    "nfs_buf_acquire", &ts);
1489 		if (error) {
1490 			return error;
1491 		}
1492 		return EAGAIN;
1493 	}
1494 	if (flags & NBAC_REMOVE) {
1495 		nfs_buf_remfree(bp);
1496 	}
1497 	SET(bp->nb_lflags, NBL_BUSY);
1498 
1499 	return 0;
1500 }
1501 
1502 /*
1503  * simply drop the BUSY status of a buffer
1504  * (must be called with nfs_buf_mutex held)
1505  */
1506 void
nfs_buf_drop(struct nfsbuf * bp)1507 nfs_buf_drop(struct nfsbuf *bp)
1508 {
1509 	int need_wakeup = 0;
1510 
1511 	if (!ISSET(bp->nb_lflags, NBL_BUSY)) {
1512 		panic("nfs_buf_drop: buffer not busy!");
1513 	}
1514 	if (ISSET(bp->nb_lflags, NBL_WANTED)) {
1515 		/* delay the actual wakeup until after we clear NBL_BUSY */
1516 		need_wakeup = 1;
1517 	}
1518 	/* Unlock the buffer. */
1519 	CLR(bp->nb_lflags, (NBL_BUSY | NBL_WANTED));
1520 
1521 	if (need_wakeup) {
1522 		wakeup(bp);
1523 	}
1524 }
1525 
1526 /*
1527  * prepare for iterating over an nfsnode's buffer list
1528  * this lock protects the queue manipulation
1529  * (must be called with nfs_buf_mutex held)
1530  */
1531 int
nfs_buf_iterprepare(nfsnode_t np,struct nfsbuflists * iterheadp,int flags)1532 nfs_buf_iterprepare(nfsnode_t np, struct nfsbuflists *iterheadp, int flags)
1533 {
1534 	struct nfsbuflists *listheadp;
1535 
1536 	if (flags & NBI_DIRTY) {
1537 		listheadp = &np->n_dirtyblkhd;
1538 	} else {
1539 		listheadp = &np->n_cleanblkhd;
1540 	}
1541 
1542 	if ((flags & NBI_NOWAIT) && (np->n_bufiterflags & NBI_ITER)) {
1543 		LIST_INIT(iterheadp);
1544 		return EWOULDBLOCK;
1545 	}
1546 
1547 	while (np->n_bufiterflags & NBI_ITER) {
1548 		np->n_bufiterflags |= NBI_ITERWANT;
1549 		msleep(&np->n_bufiterflags, &nfs_buf_mutex, 0, "nfs_buf_iterprepare", NULL);
1550 	}
1551 	if (LIST_EMPTY(listheadp)) {
1552 		LIST_INIT(iterheadp);
1553 		return EINVAL;
1554 	}
1555 	np->n_bufiterflags |= NBI_ITER;
1556 
1557 	iterheadp->lh_first = listheadp->lh_first;
1558 	listheadp->lh_first->nb_vnbufs.le_prev = &iterheadp->lh_first;
1559 	LIST_INIT(listheadp);
1560 
1561 	return 0;
1562 }
1563 
1564 /*
1565  * clean up after iterating over an nfsnode's buffer list
1566  * this lock protects the queue manipulation
1567  * (must be called with nfs_buf_mutex held)
1568  */
1569 void
nfs_buf_itercomplete(nfsnode_t np,struct nfsbuflists * iterheadp,int flags)1570 nfs_buf_itercomplete(nfsnode_t np, struct nfsbuflists *iterheadp, int flags)
1571 {
1572 	struct nfsbuflists * listheadp;
1573 	struct nfsbuf *bp;
1574 
1575 	if (flags & NBI_DIRTY) {
1576 		listheadp = &np->n_dirtyblkhd;
1577 	} else {
1578 		listheadp = &np->n_cleanblkhd;
1579 	}
1580 
1581 	while (!LIST_EMPTY(iterheadp)) {
1582 		bp = LIST_FIRST(iterheadp);
1583 		LIST_REMOVE(bp, nb_vnbufs);
1584 		LIST_INSERT_HEAD(listheadp, bp, nb_vnbufs);
1585 	}
1586 
1587 	np->n_bufiterflags &= ~NBI_ITER;
1588 	if (np->n_bufiterflags & NBI_ITERWANT) {
1589 		np->n_bufiterflags &= ~NBI_ITERWANT;
1590 		wakeup(&np->n_bufiterflags);
1591 	}
1592 }
1593 
1594 
1595 /*
1596  * Read an NFS buffer for a file.
1597  */
1598 int
nfs_buf_read(struct nfsbuf * bp)1599 nfs_buf_read(struct nfsbuf *bp)
1600 {
1601 	int error = 0;
1602 	nfsnode_t np;
1603 	thread_t thd;
1604 	kauth_cred_t cred;
1605 
1606 	np = bp->nb_np;
1607 	cred = bp->nb_rcred;
1608 	if (IS_VALID_CRED(cred)) {
1609 		kauth_cred_ref(cred);
1610 	}
1611 	thd = ISSET(bp->nb_flags, NB_ASYNC) ? NULL : current_thread();
1612 
1613 	/* sanity checks */
1614 	if (!ISSET(bp->nb_flags, NB_READ)) {
1615 		panic("nfs_buf_read: !NB_READ");
1616 	}
1617 	if (ISSET(bp->nb_flags, NB_DONE)) {
1618 		CLR(bp->nb_flags, NB_DONE);
1619 	}
1620 
1621 	NFS_BUF_MAP(bp);
1622 
1623 	OSAddAtomic64(1, &nfsclntstats.read_bios);
1624 
1625 	error = nfs_buf_read_rpc(bp, thd, cred);
1626 	/*
1627 	 * For async I/O, the callbacks will finish up the
1628 	 * read.  Otherwise, the read has already been finished.
1629 	 */
1630 
1631 	if (IS_VALID_CRED(cred)) {
1632 		kauth_cred_unref(&cred);
1633 	}
1634 	return error;
1635 }
1636 
1637 /*
1638  * finish the reading of a buffer
1639  */
1640 void
nfs_buf_read_finish(struct nfsbuf * bp)1641 nfs_buf_read_finish(struct nfsbuf *bp)
1642 {
1643 	nfsnode_t np = bp->nb_np;
1644 	struct nfsmount *nmp;
1645 
1646 	if (!ISSET(bp->nb_flags, NB_ERROR)) {
1647 		/* update valid range */
1648 		bp->nb_validoff = 0;
1649 		bp->nb_validend = bp->nb_endio;
1650 		if (bp->nb_endio < bp->nb_bufsize) {
1651 			/*
1652 			 * The read may be short because we have unflushed writes
1653 			 * that are extending the file size and the reads hit the
1654 			 * (old) EOF on the server.  So, just make sure nb_validend
1655 			 * correctly tracks EOF.
1656 			 * Note that the missing data should have already been zeroed
1657 			 * in nfs_buf_read_rpc_finish().
1658 			 */
1659 			off_t boff = NBOFF(bp);
1660 			if ((off_t)np->n_size >= (boff + bp->nb_bufsize)) {
1661 				bp->nb_validend = bp->nb_bufsize;
1662 			} else if ((off_t)np->n_size >= boff) {
1663 				bp->nb_validend = np->n_size - boff;
1664 			} else {
1665 				bp->nb_validend = 0;
1666 			}
1667 		}
1668 		if ((nmp = NFSTONMP(np)) && (nmp->nm_vers == NFS_VER2) &&
1669 		    ((NBOFF(bp) + bp->nb_validend) > 0x100000000LL)) {
1670 			bp->nb_validend = 0x100000000LL - NBOFF(bp);
1671 		}
1672 		nfs_buf_pgs_get_page_mask(&bp->nb_valid, round_page_64(bp->nb_validend) / PAGE_SIZE);
1673 		if (bp->nb_validend & PAGE_MASK) {
1674 			/* zero-fill remainder of last page */
1675 			bzero(bp->nb_data + bp->nb_validend, PAGE_SIZE - (bp->nb_validend & PAGE_MASK));
1676 		}
1677 	}
1678 	nfs_buf_iodone(bp);
1679 }
1680 
1681 /*
1682  * initiate the NFS READ RPC(s) for a buffer
1683  */
1684 int
nfs_buf_read_rpc(struct nfsbuf * bp,thread_t thd,kauth_cred_t cred)1685 nfs_buf_read_rpc(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred)
1686 {
1687 	struct nfsmount *nmp;
1688 	nfsnode_t np = bp->nb_np;
1689 	int error = 0, nfsvers, async;
1690 	int offset;
1691 	uint64_t length, nrpcs;
1692 	uint32_t nmrsize;
1693 	size_t len;
1694 	off_t boff;
1695 	struct nfsreq *req;
1696 	struct nfsreq_cbinfo cb;
1697 
1698 	nmp = NFSTONMP(np);
1699 	if (nfs_mount_gone(nmp)) {
1700 		bp->nb_error = error = ENXIO;
1701 		SET(bp->nb_flags, NB_ERROR);
1702 		nfs_buf_iodone(bp);
1703 		return error;
1704 	}
1705 	nfsvers = nmp->nm_vers;
1706 	nmrsize = nmp->nm_rsize;
1707 
1708 	boff = NBOFF(bp);
1709 	offset = 0;
1710 	length = bp->nb_bufsize;
1711 
1712 	if (nfsvers == NFS_VER2) {
1713 		if (boff > 0xffffffffLL) {
1714 			bp->nb_error = error = EFBIG;
1715 			SET(bp->nb_flags, NB_ERROR);
1716 			nfs_buf_iodone(bp);
1717 			return error;
1718 		}
1719 		if ((boff + length - 1) > 0xffffffffLL) {
1720 			length = 0x100000000LL - boff;
1721 		}
1722 	}
1723 
1724 	/* Note: Can only do async I/O if nfsiods are configured. */
1725 	async = (bp->nb_flags & NB_ASYNC);
1726 	cb.rcb_func = async ? nfs_buf_read_rpc_finish : NULL;
1727 	cb.rcb_bp = bp;
1728 
1729 	bp->nb_offio = bp->nb_endio = 0;
1730 	bp->nb_rpcs = nrpcs = (length + nmrsize - 1) / nmrsize;
1731 	if (async && (nrpcs > 1)) {
1732 		SET(bp->nb_flags, NB_MULTASYNCRPC);
1733 	} else {
1734 		CLR(bp->nb_flags, NB_MULTASYNCRPC);
1735 	}
1736 
1737 	while (length > 0) {
1738 		if (ISSET(bp->nb_flags, NB_ERROR)) {
1739 			error = bp->nb_error;
1740 			break;
1741 		}
1742 		len = (length > nmrsize) ? nmrsize : (uint32_t)length;
1743 		cb.rcb_args.offset = offset;
1744 		cb.rcb_args.length = len;
1745 #if CONFIG_NFS4
1746 		if (nmp->nm_vers >= NFS_VER4) {
1747 			cb.rcb_args.stategenid = nmp->nm_stategenid;
1748 		}
1749 #endif
1750 		req = NULL;
1751 		error = nmp->nm_funcs->nf_read_rpc_async(np, boff + offset, len, thd, cred, &cb, &req);
1752 		if (error) {
1753 			break;
1754 		}
1755 		offset += len;
1756 		length -= len;
1757 		if (async) {
1758 			continue;
1759 		}
1760 		nfs_buf_read_rpc_finish(req);
1761 		if (ISSET(bp->nb_flags, NB_ERROR)) {
1762 			error = bp->nb_error;
1763 			break;
1764 		}
1765 	}
1766 
1767 	if (length > 0) {
1768 		/*
1769 		 * Something bad happened while trying to send the RPC(s).
1770 		 * Wait for any outstanding requests to complete.
1771 		 */
1772 		bp->nb_error = error;
1773 		SET(bp->nb_flags, NB_ERROR);
1774 		if (ISSET(bp->nb_flags, NB_MULTASYNCRPC)) {
1775 			nrpcs = (length + nmrsize - 1) / nmrsize;
1776 			lck_mtx_lock(&nfs_buf_mutex);
1777 			bp->nb_rpcs -= nrpcs;
1778 			if (bp->nb_rpcs == 0) {
1779 				/* No RPCs left, so the buffer's done */
1780 				lck_mtx_unlock(&nfs_buf_mutex);
1781 				nfs_buf_iodone(bp);
1782 			} else {
1783 				/* wait for the last RPC to mark it done */
1784 				while (bp->nb_rpcs > 0) {
1785 					msleep(&bp->nb_rpcs, &nfs_buf_mutex, 0,
1786 					    "nfs_buf_read_rpc_cancel", NULL);
1787 				}
1788 				lck_mtx_unlock(&nfs_buf_mutex);
1789 			}
1790 		} else {
1791 			nfs_buf_iodone(bp);
1792 		}
1793 	}
1794 
1795 	return error;
1796 }
1797 
1798 /*
1799  * finish up an NFS READ RPC on a buffer
1800  */
1801 void
nfs_buf_read_rpc_finish(struct nfsreq * req)1802 nfs_buf_read_rpc_finish(struct nfsreq *req)
1803 {
1804 	struct nfsmount *nmp;
1805 	size_t rlen, length;
1806 	struct nfsreq_cbinfo cb;
1807 	struct nfsbuf *bp;
1808 	int error = 0, nfsvers, eof = 0, multasyncrpc, finished;
1809 	off_t offset;
1810 	void *wakeme = NULL;
1811 	struct nfsreq *rreq = NULL;
1812 	nfsnode_t np;
1813 	thread_t thd;
1814 	kauth_cred_t cred;
1815 	uio_t auio;
1816 
1817 finish:
1818 	np = req->r_np;
1819 	thd = req->r_thread;
1820 	cred = req->r_cred;
1821 	if (IS_VALID_CRED(cred)) {
1822 		kauth_cred_ref(cred);
1823 	}
1824 	cb = req->r_callback;
1825 	bp = cb.rcb_bp;
1826 	if (cb.rcb_func) { /* take an extra reference on the nfsreq in case we want to resend it later due to grace error */
1827 		nfs_request_ref(req, 0);
1828 	}
1829 
1830 	nmp = NFSTONMP(np);
1831 	if (nfs_mount_gone(nmp)) {
1832 		SET(bp->nb_flags, NB_ERROR);
1833 		bp->nb_error = error = ENXIO;
1834 	}
1835 	if (error || ISSET(bp->nb_flags, NB_ERROR)) {
1836 		/* just drop it */
1837 		nfs_request_async_cancel(req);
1838 		goto out;
1839 	}
1840 
1841 	nfsvers = nmp->nm_vers;
1842 	offset = cb.rcb_args.offset;
1843 	rlen = length = cb.rcb_args.length;
1844 
1845 	auio = uio_create(1, NBOFF(bp) + offset, UIO_SYSSPACE, UIO_READ);
1846 	uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length);
1847 
1848 	/* finish the RPC */
1849 	error = nmp->nm_funcs->nf_read_rpc_async_finish(np, req, auio, &rlen, &eof);
1850 
1851 	/* Free allocated uio buffer */
1852 	uio_free(auio);
1853 
1854 	if ((error == EINPROGRESS) && cb.rcb_func) {
1855 		/* async request restarted */
1856 		if (cb.rcb_func) {
1857 			nfs_request_rele(req);
1858 		}
1859 		if (IS_VALID_CRED(cred)) {
1860 			kauth_cred_unref(&cred);
1861 		}
1862 		return;
1863 	}
1864 #if CONFIG_NFS4
1865 	if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && !ISSET(bp->nb_flags, NB_ERROR)) {
1866 		lck_mtx_lock(&nmp->nm_lock);
1867 		if ((error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE) && (cb.rcb_args.stategenid == nmp->nm_stategenid)) {
1868 			NP(np, "nfs_buf_read_rpc_finish: error %d @ 0x%llx, 0x%x 0x%x, initiating recovery",
1869 			    error, NBOFF(bp) + offset, cb.rcb_args.stategenid, nmp->nm_stategenid);
1870 			nfs_need_recover(nmp, error);
1871 		}
1872 		lck_mtx_unlock(&nmp->nm_lock);
1873 		if (np->n_flag & NREVOKE) {
1874 			error = EIO;
1875 		} else {
1876 			if (error == NFSERR_GRACE) {
1877 				if (cb.rcb_func) {
1878 					/*
1879 					 * For an async I/O request, handle a grace delay just like
1880 					 * jukebox errors.  Set the resend time and queue it up.
1881 					 */
1882 					struct timeval now;
1883 					if (req->r_nmrep.nmc_mhead) {
1884 						mbuf_freem(req->r_nmrep.nmc_mhead);
1885 						req->r_nmrep.nmc_mhead = NULL;
1886 					}
1887 					req->r_error = 0;
1888 					microuptime(&now);
1889 					lck_mtx_lock(&req->r_mtx);
1890 					req->r_resendtime = now.tv_sec + 2;
1891 					req->r_xid = 0;                 // get a new XID
1892 					req->r_flags |= R_RESTART;
1893 					req->r_start = 0;
1894 					nfs_asyncio_resend(req);
1895 					lck_mtx_unlock(&req->r_mtx);
1896 					if (IS_VALID_CRED(cred)) {
1897 						kauth_cred_unref(&cred);
1898 					}
1899 					/* Note: nfsreq reference taken will be dropped later when finished */
1900 					return;
1901 				}
1902 				/* otherwise, just pause a couple seconds and retry */
1903 				tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
1904 			}
1905 			if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
1906 				rlen = 0;
1907 				goto readagain;
1908 			}
1909 		}
1910 	}
1911 #endif
1912 	if (error) {
1913 		SET(bp->nb_flags, NB_ERROR);
1914 		bp->nb_error = error;
1915 		goto out;
1916 	}
1917 
1918 	if ((rlen > 0) && (bp->nb_endio < (offset + (int)rlen))) {
1919 		bp->nb_endio = offset + rlen;
1920 	}
1921 
1922 	if ((nfsvers == NFS_VER2) || eof || (rlen == 0)) {
1923 		/* zero out the remaining data (up to EOF) */
1924 		off_t rpcrem, eofrem, rem;
1925 		rpcrem = (length - rlen);
1926 		eofrem = np->n_size - (NBOFF(bp) + offset + rlen);
1927 		rem = (rpcrem < eofrem) ? rpcrem : eofrem;
1928 		if (rem > 0) {
1929 			NFS_BZERO(bp->nb_data + offset + rlen, rem);
1930 		}
1931 	} else if ((rlen < length) && !ISSET(bp->nb_flags, NB_ERROR)) {
1932 		/*
1933 		 * short read
1934 		 *
1935 		 * We haven't hit EOF and we didn't get all the data
1936 		 * requested, so we need to issue another read for the rest.
1937 		 * (Don't bother if the buffer already hit an error.)
1938 		 */
1939 #if CONFIG_NFS4
1940 readagain:
1941 #endif
1942 		offset += rlen;
1943 		length -= rlen;
1944 		cb.rcb_args.offset = offset;
1945 		cb.rcb_args.length = length;
1946 #if CONFIG_NFS4
1947 		if (nmp->nm_vers >= NFS_VER4) {
1948 			cb.rcb_args.stategenid = nmp->nm_stategenid;
1949 		}
1950 #endif
1951 		error = nmp->nm_funcs->nf_read_rpc_async(np, NBOFF(bp) + offset, length, thd, cred, &cb, &rreq);
1952 		if (!error) {
1953 			if (IS_VALID_CRED(cred)) {
1954 				kauth_cred_unref(&cred);
1955 			}
1956 			if (!cb.rcb_func) {
1957 				/* if !async we'll need to wait for this RPC to finish */
1958 				req = rreq;
1959 				rreq = NULL;
1960 				goto finish;
1961 			}
1962 			nfs_request_rele(req);
1963 			/*
1964 			 * We're done here.
1965 			 * Outstanding RPC count is unchanged.
1966 			 * Callback will be called when RPC is done.
1967 			 */
1968 			return;
1969 		}
1970 		SET(bp->nb_flags, NB_ERROR);
1971 		bp->nb_error = error;
1972 	}
1973 
1974 out:
1975 	if (cb.rcb_func) {
1976 		nfs_request_rele(req);
1977 	}
1978 	if (IS_VALID_CRED(cred)) {
1979 		kauth_cred_unref(&cred);
1980 	}
1981 
1982 	/*
1983 	 * Decrement outstanding RPC count on buffer
1984 	 * and call nfs_buf_read_finish on last RPC.
1985 	 *
1986 	 * (Note: when there are multiple async RPCs issued for a
1987 	 * buffer we need nfs_buffer_mutex to avoid problems when
1988 	 * aborting a partially-initiated set of RPCs)
1989 	 */
1990 
1991 	multasyncrpc = ISSET(bp->nb_flags, NB_MULTASYNCRPC);
1992 	if (multasyncrpc) {
1993 		lck_mtx_lock(&nfs_buf_mutex);
1994 	}
1995 
1996 	bp->nb_rpcs--;
1997 	finished = (bp->nb_rpcs == 0);
1998 
1999 	if (multasyncrpc) {
2000 		lck_mtx_unlock(&nfs_buf_mutex);
2001 	}
2002 
2003 	if (finished) {
2004 		if (multasyncrpc) {
2005 			wakeme = &bp->nb_rpcs;
2006 		}
2007 		nfs_buf_read_finish(bp);
2008 		if (wakeme) {
2009 			wakeup(wakeme);
2010 		}
2011 	}
2012 }
2013 
2014 /*
2015  * Do buffer readahead.
2016  * Initiate async I/O to read buffers not in cache.
2017  */
2018 int
nfs_buf_readahead(nfsnode_t np,int ioflag,daddr64_t * rabnp,daddr64_t lastrabn,thread_t thd,kauth_cred_t cred)2019 nfs_buf_readahead(nfsnode_t np, int ioflag, daddr64_t *rabnp, daddr64_t lastrabn, thread_t thd, kauth_cred_t cred)
2020 {
2021 	struct nfsmount *nmp = NFSTONMP(np);
2022 	struct nfsbuf *bp;
2023 	int error = 0;
2024 	uint32_t nra;
2025 
2026 	if (nfs_mount_gone(nmp)) {
2027 		return ENXIO;
2028 	}
2029 	if (nmp->nm_readahead <= 0) {
2030 		return 0;
2031 	}
2032 	if (*rabnp > lastrabn) {
2033 		return 0;
2034 	}
2035 
2036 	for (nra = 0; (nra < nmp->nm_readahead) && (*rabnp <= lastrabn); nra++, *rabnp = *rabnp + 1) {
2037 		/* check if block exists and is valid. */
2038 		if ((*rabnp * nmp->nm_biosize) >= (off_t)np->n_size) {
2039 			/* stop reading ahead if we're beyond EOF */
2040 			*rabnp = lastrabn;
2041 			break;
2042 		}
2043 		error = nfs_buf_get(np, *rabnp, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp);
2044 		if (error) {
2045 			break;
2046 		}
2047 		nfs_node_lock_force(np);
2048 		np->n_lastrahead = *rabnp;
2049 		nfs_node_unlock(np);
2050 		if (!bp) {
2051 			continue;
2052 		}
2053 		if ((ioflag & IO_NOCACHE) && ISSET(bp->nb_flags, NB_CACHE) &&
2054 		    !nfs_buf_pgs_is_set(&bp->nb_dirty) && !ISSET(bp->nb_flags, (NB_DELWRI | NB_NCRDAHEAD))) {
2055 			CLR(bp->nb_flags, NB_CACHE);
2056 			NBPGS_ERASE(&bp->nb_valid);
2057 			bp->nb_validoff = bp->nb_validend = -1;
2058 		}
2059 		if ((bp->nb_dirtyend <= 0) && !nfs_buf_pgs_is_set(&bp->nb_dirty) &&
2060 		    !ISSET(bp->nb_flags, (NB_CACHE | NB_DELWRI))) {
2061 			SET(bp->nb_flags, (NB_READ | NB_ASYNC));
2062 			if (ioflag & IO_NOCACHE) {
2063 				SET(bp->nb_flags, NB_NCRDAHEAD);
2064 			}
2065 			if (!IS_VALID_CRED(bp->nb_rcred) && IS_VALID_CRED(cred)) {
2066 				kauth_cred_ref(cred);
2067 				bp->nb_rcred = cred;
2068 			}
2069 			if ((error = nfs_buf_read(bp))) {
2070 				break;
2071 			}
2072 			continue;
2073 		}
2074 		nfs_buf_release(bp, 1);
2075 	}
2076 	return error;
2077 }
2078 
2079 /*
2080  * NFS buffer I/O for reading files.
2081  */
2082 int
nfs_bioread(nfsnode_t np,uio_t uio,int ioflag,vfs_context_t ctx)2083 nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx)
2084 {
2085 	vnode_t vp = NFSTOV(np);
2086 	struct nfsbuf *bp = NULL;
2087 	struct nfsmount *nmp = VTONMP(vp);
2088 	daddr64_t lbn, rabn = 0, lastrabn, maxrabn = -1;
2089 	off_t diff, on = 0, n = 0;
2090 	int error = 0, n32;
2091 	int nfsvers, biosize, modified, readaheads = 0;
2092 	thread_t thd;
2093 	kauth_cred_t cred;
2094 	int64_t io_resid;
2095 
2096 	FSDBG_TOP(514, np, uio_offset(uio), uio_resid(uio), ioflag);
2097 
2098 	nfsvers = nmp->nm_vers;
2099 	biosize = nmp->nm_biosize;
2100 	thd = vfs_context_thread(ctx);
2101 	cred = vfs_context_ucred(ctx);
2102 
2103 	if (vnode_vtype(vp) != VREG) {
2104 		printf("nfs_bioread: type %x unexpected\n", vnode_vtype(vp));
2105 		FSDBG_BOT(514, np, 0xd1e0016, 0, EINVAL);
2106 		return EINVAL;
2107 	}
2108 
2109 	/*
2110 	 * For NFS, cache consistency can only be maintained approximately.
2111 	 * Although RFC1094 does not specify the criteria, the following is
2112 	 * believed to be compatible with the reference port.
2113 	 *
2114 	 * If the file has changed since the last read RPC or you have
2115 	 * written to the file, you may have lost data cache consistency
2116 	 * with the server.  So, check for a change, and flush all of the
2117 	 * file's data out of the cache.
2118 	 * NB: This implies that cache data can be read when up to
2119 	 * NFS_MAXATTRTIMO seconds out of date. If you find that you
2120 	 * need current attributes, nfs_getattr() can be forced to fetch
2121 	 * new attributes (via NATTRINVALIDATE() or NGA_UNCACHED).
2122 	 */
2123 
2124 	if (ISSET(np->n_flag, NUPDATESIZE)) {
2125 		nfs_data_update_size(np, 0);
2126 	}
2127 
2128 	if ((error = nfs_node_lock(np))) {
2129 		FSDBG_BOT(514, np, 0xd1e0222, 0, error);
2130 		return error;
2131 	}
2132 
2133 	if (np->n_flag & NNEEDINVALIDATE) {
2134 		np->n_flag &= ~NNEEDINVALIDATE;
2135 		nfs_node_unlock(np);
2136 		error = nfs_vinvalbuf1(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
2137 		if (!error) {
2138 			error = nfs_node_lock(np);
2139 		}
2140 		if (error) {
2141 			FSDBG_BOT(514, np, 0xd1e0322, 0, error);
2142 			return error;
2143 		}
2144 	}
2145 
2146 	modified = (np->n_flag & NMODIFIED);
2147 	nfs_node_unlock(np);
2148 	/* nfs_getattr() will check changed and purge caches */
2149 	error = nfs_getattr(np, NULL, ctx, modified ? NGA_UNCACHED : NGA_CACHED);
2150 	if (error) {
2151 		FSDBG_BOT(514, np, 0xd1e0004, 0, error);
2152 		return error;
2153 	}
2154 
2155 	if (uio_resid(uio) == 0) {
2156 		FSDBG_BOT(514, np, 0xd1e0001, 0, 0);
2157 		return 0;
2158 	}
2159 	if (uio_offset(uio) < 0) {
2160 		FSDBG_BOT(514, np, 0xd1e0002, 0, EINVAL);
2161 		return EINVAL;
2162 	}
2163 
2164 	/*
2165 	 * set up readahead - which may be limited by:
2166 	 * + current request length (for IO_NOCACHE)
2167 	 * + readahead setting
2168 	 * + file size
2169 	 */
2170 	if (nmp->nm_readahead > 0) {
2171 		off_t end = uio_offset(uio) + uio_resid(uio);
2172 		if (end > (off_t)np->n_size) {
2173 			end = np->n_size;
2174 		}
2175 		rabn = uio_offset(uio) / biosize;
2176 		maxrabn = (end - 1) / biosize;
2177 		nfs_node_lock_force(np);
2178 		if (!(ioflag & IO_NOCACHE) &&
2179 		    (!rabn || (rabn == np->n_lastread) || (rabn == (np->n_lastread + 1)))) {
2180 			maxrabn += nmp->nm_readahead;
2181 			if ((maxrabn * biosize) >= (off_t)np->n_size) {
2182 				maxrabn = ((off_t)np->n_size - 1) / biosize;
2183 			}
2184 		}
2185 		if (maxrabn < np->n_lastrahead) {
2186 			np->n_lastrahead = -1;
2187 		}
2188 		if (rabn < np->n_lastrahead) {
2189 			rabn = np->n_lastrahead + 1;
2190 		}
2191 		nfs_node_unlock(np);
2192 	} else {
2193 		rabn = maxrabn = 0;
2194 	}
2195 
2196 	do {
2197 		nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
2198 		lbn = uio_offset(uio) / biosize;
2199 
2200 		/*
2201 		 * Copy directly from any cached pages without grabbing the bufs.
2202 		 * (If we are NOCACHE and we've issued readahead requests, we need
2203 		 * to grab the NB_NCRDAHEAD bufs to drop them.)
2204 		 */
2205 		if ((!(ioflag & IO_NOCACHE) || !readaheads) && uio_isuserspace(uio)) {
2206 			io_resid = uio_resid(uio);
2207 			diff = np->n_size - uio_offset(uio);
2208 			if (diff < io_resid) {
2209 				io_resid = diff;
2210 			}
2211 			if (io_resid > 0) {
2212 				int count = (io_resid > INT_MAX) ? INT_MAX : (int)io_resid;
2213 				error = cluster_copy_ubc_data(vp, uio, &count, 0);
2214 				if (error) {
2215 					nfs_data_unlock(np);
2216 					FSDBG_BOT(514, np, uio_offset(uio), 0xcacefeed, error);
2217 					return error;
2218 				}
2219 			}
2220 			/* count any biocache reads that we just copied directly */
2221 			if (lbn != (uio_offset(uio) / biosize)) {
2222 				OSAddAtomic64(NFS_ROUND_BLOCK(uio_offset(uio), biosize) - lbn, &nfsclntstats.biocache_reads);
2223 				FSDBG(514, np, 0xcacefeed, uio_offset(uio), error);
2224 			}
2225 		}
2226 
2227 		lbn = uio_offset(uio) / biosize;
2228 		on = uio_offset(uio) % biosize;
2229 		nfs_node_lock_force(np);
2230 		np->n_lastread = (uio_offset(uio) - 1) / biosize;
2231 		nfs_node_unlock(np);
2232 
2233 		if ((uio_resid(uio) <= 0) || (uio_offset(uio) >= (off_t)np->n_size)) {
2234 			nfs_data_unlock(np);
2235 			FSDBG_BOT(514, np, uio_offset(uio), uio_resid(uio), 0xaaaaaaaa);
2236 			return 0;
2237 		}
2238 
2239 		/* adjust readahead block number, if necessary */
2240 		if (rabn < lbn) {
2241 			rabn = lbn;
2242 		}
2243 		lastrabn = MIN(maxrabn, lbn + nmp->nm_readahead);
2244 		if (rabn <= lastrabn) { /* start readaheads */
2245 			error = nfs_buf_readahead(np, ioflag, &rabn, lastrabn, thd, cred);
2246 			if (error) {
2247 				nfs_data_unlock(np);
2248 				FSDBG_BOT(514, np, 0xd1e000b, 1, error);
2249 				return error;
2250 			}
2251 			readaheads = 1;
2252 			OSAddAtomic64(rabn - lbn, &nfsclntstats.biocache_reads);
2253 		} else {
2254 			OSAddAtomic64(1, &nfsclntstats.biocache_reads);
2255 		}
2256 
2257 		/*
2258 		 * If the block is in the cache and has the required data
2259 		 * in a valid region, just copy it out.
2260 		 * Otherwise, get the block and write back/read in,
2261 		 * as required.
2262 		 */
2263 again:
2264 		io_resid = uio_resid(uio);
2265 		n = (io_resid > (biosize - on)) ? (biosize - on) : io_resid;
2266 		diff = np->n_size - uio_offset(uio);
2267 		if (diff < n) {
2268 			n = diff;
2269 		}
2270 
2271 		error = nfs_buf_get(np, lbn, biosize, thd, NBLK_READ, &bp);
2272 		if (error) {
2273 			nfs_data_unlock(np);
2274 			FSDBG_BOT(514, np, 0xd1e000c, 0, error);
2275 			return error;
2276 		}
2277 
2278 		if ((ioflag & IO_NOCACHE) && ISSET(bp->nb_flags, NB_CACHE)) {
2279 			/*
2280 			 * IO_NOCACHE found a cached buffer.
2281 			 * Flush the buffer if it's dirty.
2282 			 * Invalidate the data if it wasn't just read
2283 			 * in as part of a "nocache readahead".
2284 			 */
2285 			if (nfs_buf_pgs_is_set(&bp->nb_dirty) || (bp->nb_dirtyend > 0)) {
2286 				/* so write the buffer out and try again */
2287 				SET(bp->nb_flags, NB_NOCACHE);
2288 				goto flushbuffer;
2289 			}
2290 			if (ISSET(bp->nb_flags, NB_NCRDAHEAD)) {
2291 				CLR(bp->nb_flags, NB_NCRDAHEAD);
2292 				SET(bp->nb_flags, NB_NOCACHE);
2293 			}
2294 		}
2295 
2296 		/* if any pages are valid... */
2297 		if (nfs_buf_pgs_is_set(&bp->nb_valid)) {
2298 			/* ...check for any invalid pages in the read range */
2299 			off_t pg, firstpg, lastpg, dirtypg;
2300 			dirtypg = firstpg = lastpg = -1;
2301 			pg = on / PAGE_SIZE;
2302 			while (pg <= (on + n - 1) / PAGE_SIZE) {
2303 				if (!NBPGVALID(bp, pg)) {
2304 					if (firstpg < 0) {
2305 						firstpg = pg;
2306 					}
2307 					lastpg = pg;
2308 				} else if (firstpg >= 0 && dirtypg < 0 && NBPGDIRTY(bp, pg)) {
2309 					dirtypg = pg;
2310 				}
2311 				pg++;
2312 			}
2313 
2314 			/* if there are no invalid pages, we're all set */
2315 			if (firstpg < 0) {
2316 				if (bp->nb_validoff < 0) {
2317 					/* valid range isn't set up, so */
2318 					/* set it to what we know is valid */
2319 					bp->nb_validoff = trunc_page_64(on);
2320 					bp->nb_validend = round_page_64(on + n);
2321 					nfs_buf_normalize_valid_range(np, bp);
2322 				}
2323 				goto buffer_ready;
2324 			}
2325 
2326 			/* there are invalid pages in the read range */
2327 			if (((dirtypg > firstpg) && (dirtypg < lastpg)) ||
2328 			    (((firstpg * PAGE_SIZE) < bp->nb_dirtyend) && (((lastpg + 1) * PAGE_SIZE) > bp->nb_dirtyoff))) {
2329 				/* there are also dirty page(s) (or range) in the read range, */
2330 				/* so write the buffer out and try again */
2331 flushbuffer:
2332 				CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL));
2333 				SET(bp->nb_flags, NB_ASYNC);
2334 				if (!IS_VALID_CRED(bp->nb_wcred)) {
2335 					kauth_cred_ref(cred);
2336 					bp->nb_wcred = cred;
2337 				}
2338 				error = nfs_buf_write(bp);
2339 				if (error) {
2340 					nfs_data_unlock(np);
2341 					FSDBG_BOT(514, np, 0xd1e000d, 0, error);
2342 					return error;
2343 				}
2344 				goto again;
2345 			}
2346 			if (!nfs_buf_pgs_is_set(&bp->nb_dirty) && bp->nb_dirtyend <= 0 &&
2347 			    (lastpg - firstpg + 1) > (biosize / PAGE_SIZE) / 2) {
2348 				/* we need to read in more than half the buffer and the */
2349 				/* buffer's not dirty, so just fetch the whole buffer */
2350 				NBPGS_ERASE(&bp->nb_valid);
2351 			} else {
2352 				/* read the page range in */
2353 				uio_t auio;
2354 
2355 				NFS_BUF_MAP(bp);
2356 				auio = uio_create(1, (NBOFF(bp) + firstpg * PAGE_SIZE_64), UIO_SYSSPACE, UIO_READ);
2357 				if (!auio) {
2358 					error = ENOMEM;
2359 				} else {
2360 					NFS_UIO_ADDIOV(auio, CAST_USER_ADDR_T(bp->nb_data + (firstpg * PAGE_SIZE)),
2361 					    ((lastpg - firstpg + 1) * PAGE_SIZE));
2362 					error = nfs_read_rpc(np, auio, ctx);
2363 				}
2364 				if (error) {
2365 					if (ioflag & IO_NOCACHE) {
2366 						SET(bp->nb_flags, NB_NOCACHE);
2367 					}
2368 					/* Free allocated uio buffer */
2369 					uio_free(auio);
2370 					nfs_buf_release(bp, 1);
2371 					nfs_data_unlock(np);
2372 					FSDBG_BOT(514, np, 0xd1e000e, 0, error);
2373 					return error;
2374 				}
2375 				/* Make sure that the valid range is set to cover this read. */
2376 				bp->nb_validoff = trunc_page_64(on);
2377 				bp->nb_validend = round_page_64(on + n);
2378 				nfs_buf_normalize_valid_range(np, bp);
2379 				if (uio_resid(auio) > 0) {
2380 					/* if short read, must have hit EOF, */
2381 					/* so zero the rest of the range */
2382 					bzero(CAST_DOWN(caddr_t, uio_curriovbase(auio)), uio_resid(auio));
2383 				}
2384 				/* mark the pages (successfully read) as valid */
2385 				for (pg = firstpg; pg <= lastpg; pg++) {
2386 					NBPGVALID_SET(bp, pg);
2387 				}
2388 
2389 				/* Free allocated uio buffer */
2390 				uio_free(auio);
2391 			}
2392 		}
2393 		/* if no pages are valid, read the whole block */
2394 		if (!nfs_buf_pgs_is_set(&bp->nb_valid)) {
2395 			if (!IS_VALID_CRED(bp->nb_rcred) && IS_VALID_CRED(cred)) {
2396 				kauth_cred_ref(cred);
2397 				bp->nb_rcred = cred;
2398 			}
2399 			SET(bp->nb_flags, NB_READ);
2400 			CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL));
2401 			error = nfs_buf_read(bp);
2402 			if (ioflag & IO_NOCACHE) {
2403 				SET(bp->nb_flags, NB_NOCACHE);
2404 			}
2405 			if (error) {
2406 				nfs_data_unlock(np);
2407 				nfs_buf_release(bp, 1);
2408 				FSDBG_BOT(514, np, 0xd1e000f, 0, error);
2409 				return error;
2410 			}
2411 		}
2412 buffer_ready:
2413 		/* validate read range against valid range and clip */
2414 		if (bp->nb_validend > 0) {
2415 			diff = (on >= bp->nb_validend) ? 0 : (bp->nb_validend - on);
2416 			if (diff < n) {
2417 				n = diff;
2418 			}
2419 		}
2420 		if (n > 0) {
2421 			NFS_BUF_MAP(bp);
2422 			n32 = n > INT_MAX ? INT_MAX : (int)n;
2423 			error = uiomove(bp->nb_data + on, n32, uio);
2424 			if (!error && n > n32) {
2425 				error = uiomove(bp->nb_data + on + n32, (int)(n - n32), uio);
2426 			}
2427 		}
2428 
2429 
2430 		nfs_buf_release(bp, 1);
2431 		nfs_data_unlock(np);
2432 		nfs_node_lock_force(np);
2433 		np->n_lastread = (uio_offset(uio) - 1) / biosize;
2434 		nfs_node_unlock(np);
2435 	} while (error == 0 && uio_resid(uio) > 0 && n > 0);
2436 	FSDBG_BOT(514, np, uio_offset(uio), uio_resid(uio), error);
2437 	return error;
2438 }
2439 
2440 /*
2441  * limit the number of outstanding async I/O writes
2442  */
2443 int
nfs_async_write_start(struct nfsmount * nmp)2444 nfs_async_write_start(struct nfsmount *nmp)
2445 {
2446 	int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
2447 	struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2448 
2449 	if (nfs_max_async_writes <= 0) {
2450 		return 0;
2451 	}
2452 	lck_mtx_lock(&nmp->nm_lock);
2453 	while ((nfs_max_async_writes > 0) && (nmp->nm_asyncwrites >= nfs_max_async_writes)) {
2454 		if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) {
2455 			break;
2456 		}
2457 		msleep(&nmp->nm_asyncwrites, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsasyncwrites", &ts);
2458 		slpflag = 0;
2459 	}
2460 	if (!error) {
2461 		nmp->nm_asyncwrites++;
2462 	}
2463 	lck_mtx_unlock(&nmp->nm_lock);
2464 	return error;
2465 }
2466 void
nfs_async_write_done(struct nfsmount * nmp)2467 nfs_async_write_done(struct nfsmount *nmp)
2468 {
2469 	if (nmp->nm_asyncwrites <= 0) {
2470 		return;
2471 	}
2472 	lck_mtx_lock(&nmp->nm_lock);
2473 	if (nmp->nm_asyncwrites-- >= nfs_max_async_writes) {
2474 		wakeup(&nmp->nm_asyncwrites);
2475 	}
2476 	lck_mtx_unlock(&nmp->nm_lock);
2477 }
2478 
2479 /*
2480  * write (or commit) the given NFS buffer
2481  *
2482  * Commit the buffer if we can.
2483  * Write out any dirty range.
2484  * If any dirty pages remain, write them out.
2485  * Mark buffer done.
2486  *
2487  * For async requests, all the work beyond sending the initial
2488  * write RPC is handled in the RPC callback(s).
2489  */
2490 int
nfs_buf_write(struct nfsbuf * bp)2491 nfs_buf_write(struct nfsbuf *bp)
2492 {
2493 	int error = 0, oldflags, async;
2494 	nfsnode_t np;
2495 	thread_t thd;
2496 	kauth_cred_t cred;
2497 	proc_t p = current_proc();
2498 	int iomode;
2499 	off_t doff, dend, firstpg, lastpg;
2500 
2501 	FSDBG_TOP(553, bp, NBOFF(bp), bp->nb_flags, 0);
2502 
2503 	if (!ISSET(bp->nb_lflags, NBL_BUSY)) {
2504 		panic("nfs_buf_write: buffer is not busy???");
2505 	}
2506 
2507 	np = bp->nb_np;
2508 	async = ISSET(bp->nb_flags, NB_ASYNC);
2509 	oldflags = bp->nb_flags;
2510 
2511 	CLR(bp->nb_flags, (NB_READ | NB_DONE | NB_ERROR | NB_DELWRI));
2512 	if (ISSET(oldflags, NB_DELWRI)) {
2513 		lck_mtx_lock(&nfs_buf_mutex);
2514 		nfs_nbdwrite--;
2515 		NFSBUFCNTCHK();
2516 		lck_mtx_unlock(&nfs_buf_mutex);
2517 		wakeup(&nfs_nbdwrite);
2518 	}
2519 
2520 	/* move to clean list */
2521 	if (ISSET(oldflags, (NB_ASYNC | NB_DELWRI))) {
2522 		lck_mtx_lock(&nfs_buf_mutex);
2523 		if (bp->nb_vnbufs.le_next != NFSNOLIST) {
2524 			LIST_REMOVE(bp, nb_vnbufs);
2525 		}
2526 		LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs);
2527 		lck_mtx_unlock(&nfs_buf_mutex);
2528 	}
2529 	nfs_node_lock_force(np);
2530 	np->n_numoutput++;
2531 	nfs_node_unlock(np);
2532 	vnode_startwrite(NFSTOV(np));
2533 
2534 	if (p && p->p_stats) {
2535 		OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock);
2536 	}
2537 
2538 	cred = bp->nb_wcred;
2539 	if (!IS_VALID_CRED(cred) && ISSET(bp->nb_flags, NB_READ)) {
2540 		cred = bp->nb_rcred;  /* shouldn't really happen, but... */
2541 	}
2542 	if (IS_VALID_CRED(cred)) {
2543 		kauth_cred_ref(cred);
2544 	}
2545 	thd = async ? NULL : current_thread();
2546 
2547 	/* We need to make sure the pages are locked before doing I/O.  */
2548 	if (!ISSET(bp->nb_flags, NB_META)) {
2549 		if (UBCINFOEXISTS(NFSTOV(np))) {
2550 			if (!ISSET(bp->nb_flags, NB_PAGELIST)) {
2551 				error = nfs_buf_upl_setup(bp);
2552 				if (error) {
2553 					printf("nfs_buf_write: upl create failed %d\n", error);
2554 					SET(bp->nb_flags, NB_ERROR);
2555 					bp->nb_error = error = EIO;
2556 					nfs_buf_iodone(bp);
2557 					goto out;
2558 				}
2559 				nfs_buf_upl_check(bp);
2560 			}
2561 		} else {
2562 			/* We should never be in nfs_buf_write() with no UBCINFO. */
2563 			printf("nfs_buf_write: ubcinfo already gone\n");
2564 			SET(bp->nb_flags, NB_ERROR);
2565 			bp->nb_error = error = EIO;
2566 			nfs_buf_iodone(bp);
2567 			goto out;
2568 		}
2569 	}
2570 
2571 	/* If NB_NEEDCOMMIT is set, a commit RPC may do the trick. */
2572 	if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
2573 		nfs_buf_check_write_verifier(np, bp);
2574 	}
2575 	if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
2576 		struct nfsmount *nmp = NFSTONMP(np);
2577 		if (nfs_mount_gone(nmp)) {
2578 			SET(bp->nb_flags, NB_ERROR);
2579 			bp->nb_error = error = EIO;
2580 			nfs_buf_iodone(bp);
2581 			goto out;
2582 		}
2583 		SET(bp->nb_flags, NB_WRITEINPROG);
2584 		error = nmp->nm_funcs->nf_commit_rpc(np, NBOFF(bp) + bp->nb_dirtyoff,
2585 		    bp->nb_dirtyend - bp->nb_dirtyoff, bp->nb_wcred, bp->nb_verf);
2586 		CLR(bp->nb_flags, NB_WRITEINPROG);
2587 		if (error) {
2588 			if (error != NFSERR_STALEWRITEVERF) {
2589 				SET(bp->nb_flags, NB_ERROR);
2590 				bp->nb_error = error;
2591 			}
2592 			nfs_buf_iodone(bp);
2593 			goto out;
2594 		}
2595 		bp->nb_dirtyoff = bp->nb_dirtyend = 0;
2596 		CLR(bp->nb_flags, NB_NEEDCOMMIT);
2597 		nfs_node_lock_force(np);
2598 		np->n_needcommitcnt--;
2599 		CHECK_NEEDCOMMITCNT(np);
2600 		nfs_node_unlock(np);
2601 	}
2602 	if (!error && (bp->nb_dirtyend > 0)) {
2603 		/* sanity check the dirty range */
2604 		if (NBOFF(bp) + bp->nb_dirtyend > (off_t) np->n_size) {
2605 			bp->nb_dirtyend = np->n_size - NBOFF(bp);
2606 			if (bp->nb_dirtyoff >= bp->nb_dirtyend) {
2607 				bp->nb_dirtyoff = bp->nb_dirtyend = 0;
2608 			}
2609 		}
2610 	}
2611 	if (!error && (bp->nb_dirtyend > 0)) {
2612 		/* there's a dirty range that needs to be written out */
2613 		nfsbufpgs pagemask, pagemaskand;
2614 		NFS_BUF_MAP(bp);
2615 
2616 		doff = bp->nb_dirtyoff;
2617 		dend = bp->nb_dirtyend;
2618 
2619 		/* if doff page is dirty, move doff to start of page */
2620 		if (NBPGDIRTY(bp, doff / PAGE_SIZE)) {
2621 			doff -= doff & PAGE_MASK;
2622 		}
2623 		/* try to expand write range to include preceding dirty pages */
2624 		if (!(doff & PAGE_MASK)) {
2625 			while ((doff > 0) && NBPGDIRTY(bp, (doff - 1) / PAGE_SIZE)) {
2626 				doff -= PAGE_SIZE;
2627 			}
2628 		}
2629 		/* if dend page is dirty, move dend to start of next page */
2630 		if ((dend & PAGE_MASK) && NBPGDIRTY(bp, dend / PAGE_SIZE)) {
2631 			dend = round_page_64(dend);
2632 		}
2633 		/* try to expand write range to include trailing dirty pages */
2634 		if (!(dend & PAGE_MASK)) {
2635 			while ((dend < (int)bp->nb_bufsize) && NBPGDIRTY(bp, dend / PAGE_SIZE)) {
2636 				dend += PAGE_SIZE;
2637 			}
2638 		}
2639 		/* make sure to keep dend clipped to EOF */
2640 		if ((NBOFF(bp) + dend) > (off_t) np->n_size) {
2641 			dend = np->n_size - NBOFF(bp);
2642 		}
2643 		/* calculate range of complete pages being written */
2644 		if (dend > doff) {
2645 			firstpg = doff / PAGE_SIZE;
2646 			lastpg = (dend - 1) / PAGE_SIZE;
2647 			/* calculate mask for that page range */
2648 			nfs_buf_pgs_set_pages_between(&pagemask, firstpg, lastpg + 1);
2649 		} else {
2650 			NBPGS_ERASE(&pagemask);
2651 		}
2652 
2653 		/*
2654 		 * compare page mask to nb_dirty; if there are other dirty pages
2655 		 * then write FILESYNC; otherwise, write UNSTABLE if async and
2656 		 * not needcommit/stable; otherwise write FILESYNC
2657 		 */
2658 		nfs_buf_pgs_bit_not(&pagemask);
2659 		nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &pagemaskand);
2660 		if (nfs_buf_pgs_is_set(&pagemaskand)) {
2661 			iomode = NFS_WRITE_FILESYNC;
2662 		} else if ((bp->nb_flags & (NB_ASYNC | NB_NEEDCOMMIT | NB_STABLE)) == NB_ASYNC) {
2663 			iomode = NFS_WRITE_UNSTABLE;
2664 		} else {
2665 			iomode = NFS_WRITE_FILESYNC;
2666 		}
2667 
2668 		/* write the whole contiguous dirty range */
2669 		bp->nb_offio = doff;
2670 		bp->nb_endio = dend;
2671 
2672 		OSAddAtomic64(1, &nfsclntstats.write_bios);
2673 
2674 		SET(bp->nb_flags, NB_WRITEINPROG);
2675 		error = nfs_buf_write_rpc(bp, iomode, thd, cred);
2676 		/*
2677 		 * For async I/O, the callbacks will finish up the
2678 		 * write and push out any dirty pages.  Otherwise,
2679 		 * the write has already been finished and any dirty
2680 		 * pages pushed out.
2681 		 */
2682 	} else {
2683 		if (!error && nfs_buf_pgs_is_set(&bp->nb_dirty)) { /* write out any dirty pages */
2684 			error = nfs_buf_write_dirty_pages(bp, thd, cred);
2685 		}
2686 		nfs_buf_iodone(bp);
2687 	}
2688 	/* note: bp is still valid only for !async case */
2689 out:
2690 	if (!async) {
2691 		error = nfs_buf_iowait(bp);
2692 		/* move to clean list */
2693 		if (oldflags & NB_DELWRI) {
2694 			lck_mtx_lock(&nfs_buf_mutex);
2695 			if (bp->nb_vnbufs.le_next != NFSNOLIST) {
2696 				LIST_REMOVE(bp, nb_vnbufs);
2697 			}
2698 			LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs);
2699 			lck_mtx_unlock(&nfs_buf_mutex);
2700 		}
2701 		FSDBG_BOT(553, bp, NBOFF(bp), bp->nb_flags, error);
2702 		nfs_buf_release(bp, 1);
2703 		/* check if we need to invalidate (and we can) */
2704 		if ((np->n_flag & NNEEDINVALIDATE) &&
2705 		    !(np->n_bflag & (NBINVALINPROG | NBFLUSHINPROG))) {
2706 			int invalidate = 0;
2707 			nfs_node_lock_force(np);
2708 			if (np->n_flag & NNEEDINVALIDATE) {
2709 				invalidate = 1;
2710 				np->n_flag &= ~NNEEDINVALIDATE;
2711 			}
2712 			nfs_node_unlock(np);
2713 			if (invalidate) {
2714 				/*
2715 				 * There was a write error and we need to
2716 				 * invalidate attrs and flush buffers in
2717 				 * order to sync up with the server.
2718 				 * (if this write was extending the file,
2719 				 * we may no longer know the correct size)
2720 				 *
2721 				 * But we couldn't call vinvalbuf while holding
2722 				 * the buffer busy.  So we call vinvalbuf() after
2723 				 * releasing the buffer.
2724 				 */
2725 				nfs_vinvalbuf2(NFSTOV(np), V_SAVE | V_IGNORE_WRITEERR, thd, cred, 1);
2726 			}
2727 		}
2728 	}
2729 
2730 	if (IS_VALID_CRED(cred)) {
2731 		kauth_cred_unref(&cred);
2732 	}
2733 	return error;
2734 }
2735 
2736 /*
2737  * finish the writing of a buffer
2738  */
2739 void
nfs_buf_write_finish(struct nfsbuf * bp,thread_t thd,kauth_cred_t cred)2740 nfs_buf_write_finish(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred)
2741 {
2742 	nfsnode_t np = bp->nb_np;
2743 	int error = (bp->nb_flags & NB_ERROR) ? bp->nb_error : 0;
2744 	off_t firstpg, lastpg;
2745 
2746 	if ((error == EINTR) || (error == ERESTART)) {
2747 		CLR(bp->nb_flags, NB_ERROR);
2748 		SET(bp->nb_flags, NB_EINTR);
2749 	}
2750 
2751 	if (!error) {
2752 		nfsbufpgs pagemask;
2753 		/* calculate range of complete pages being written */
2754 		if (bp->nb_endio > bp->nb_offio) {
2755 			firstpg = bp->nb_offio / PAGE_SIZE;
2756 			lastpg = (bp->nb_endio - 1) / PAGE_SIZE;
2757 			/* calculate mask for that page range written */
2758 			nfs_buf_pgs_set_pages_between(&pagemask, firstpg, lastpg + 1);
2759 		} else {
2760 			NBPGS_ERASE(&pagemask);
2761 		}
2762 		/* clear dirty bits for pages we've written */
2763 		nfs_buf_pgs_bit_not(&pagemask);
2764 		nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &bp->nb_dirty);
2765 	}
2766 
2767 	/* manage needcommit state */
2768 	if (!error && (bp->nb_commitlevel == NFS_WRITE_UNSTABLE)) {
2769 		if (!ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
2770 			nfs_node_lock_force(np);
2771 			np->n_needcommitcnt++;
2772 			nfs_node_unlock(np);
2773 			SET(bp->nb_flags, NB_NEEDCOMMIT);
2774 		}
2775 		/* make sure nb_dirtyoff/nb_dirtyend reflect actual range written */
2776 		bp->nb_dirtyoff = bp->nb_offio;
2777 		bp->nb_dirtyend = bp->nb_endio;
2778 	} else if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
2779 		nfs_node_lock_force(np);
2780 		np->n_needcommitcnt--;
2781 		CHECK_NEEDCOMMITCNT(np);
2782 		nfs_node_unlock(np);
2783 		CLR(bp->nb_flags, NB_NEEDCOMMIT);
2784 	}
2785 
2786 	CLR(bp->nb_flags, NB_WRITEINPROG);
2787 
2788 	/*
2789 	 * For an unstable write, the buffer is still treated as dirty until
2790 	 * a commit (or stable (re)write) is performed.  Buffers needing only
2791 	 * a commit are marked with the NB_DELWRI and NB_NEEDCOMMIT flags.
2792 	 *
2793 	 * If the write was interrupted we set NB_EINTR.  Don't set NB_ERROR
2794 	 * because that would cause the buffer to be dropped.  The buffer is
2795 	 * still valid and simply needs to be written again.
2796 	 */
2797 	if ((error == EINTR) || (error == ERESTART) || (!error && (bp->nb_flags & NB_NEEDCOMMIT))) {
2798 		CLR(bp->nb_flags, NB_INVAL);
2799 		if (!ISSET(bp->nb_flags, NB_DELWRI)) {
2800 			SET(bp->nb_flags, NB_DELWRI);
2801 			lck_mtx_lock(&nfs_buf_mutex);
2802 			nfs_nbdwrite++;
2803 			NFSBUFCNTCHK();
2804 			lck_mtx_unlock(&nfs_buf_mutex);
2805 		}
2806 		/*
2807 		 * Since for the NB_ASYNC case, we've reassigned the buffer to the
2808 		 * clean list, we have to reassign it back to the dirty one. Ugh.
2809 		 */
2810 		if (ISSET(bp->nb_flags, NB_ASYNC)) {
2811 			/* move to dirty list */
2812 			lck_mtx_lock(&nfs_buf_mutex);
2813 			if (bp->nb_vnbufs.le_next != NFSNOLIST) {
2814 				LIST_REMOVE(bp, nb_vnbufs);
2815 			}
2816 			LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs);
2817 			lck_mtx_unlock(&nfs_buf_mutex);
2818 		}
2819 	} else {
2820 		/* either there's an error or we don't need to commit */
2821 		if (error) {
2822 			/*
2823 			 * There was a write error and we need to invalidate
2824 			 * attrs and flush buffers in order to sync up with the
2825 			 * server.  (if this write was extending the file, we
2826 			 * may no longer know the correct size)
2827 			 *
2828 			 * But we can't call vinvalbuf while holding this
2829 			 * buffer busy.  Set a flag to do it after releasing
2830 			 * the buffer.
2831 			 */
2832 			nfs_node_lock_force(np);
2833 			np->n_error = error;
2834 			np->n_flag |= (NWRITEERR | NNEEDINVALIDATE);
2835 			NATTRINVALIDATE(np);
2836 			nfs_node_unlock(np);
2837 		}
2838 		/* clear the dirty range */
2839 		bp->nb_dirtyoff = bp->nb_dirtyend = 0;
2840 	}
2841 
2842 	if (!error && nfs_buf_pgs_is_set(&bp->nb_dirty)) {
2843 		nfs_buf_write_dirty_pages(bp, thd, cred);
2844 	}
2845 	nfs_buf_iodone(bp);
2846 }
2847 
2848 /*
2849  * write out any pages marked dirty in a buffer
2850  *
2851  * We do use unstable writes and follow up with a commit.
2852  * If we catch the write verifier changing we'll restart
2853  * do the writes filesync.
2854  */
2855 int
nfs_buf_write_dirty_pages(struct nfsbuf * bp,thread_t thd,kauth_cred_t cred)2856 nfs_buf_write_dirty_pages(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred)
2857 {
2858 	nfsnode_t np = bp->nb_np;
2859 	struct nfsmount *nmp = NFSTONMP(np);
2860 	int error = 0, commit, iomode, iomode2, len, pg, count, npages, off;
2861 	nfsbufpgs dirty;
2862 	uint64_t wverf;
2863 	uio_t auio;
2864 
2865 	if (!nfs_buf_pgs_is_set(&bp->nb_dirty)) {
2866 		return 0;
2867 	}
2868 
2869 	/* there are pages marked dirty that need to be written out */
2870 	OSAddAtomic64(1, &nfsclntstats.write_bios);
2871 	NFS_BUF_MAP(bp);
2872 	SET(bp->nb_flags, NB_WRITEINPROG);
2873 	npages = bp->nb_bufsize / PAGE_SIZE;
2874 	iomode = NFS_WRITE_UNSTABLE;
2875 
2876 	auio = uio_create(1, 0, UIO_SYSSPACE, UIO_WRITE);
2877 
2878 again:
2879 	NBPGS_COPY(&dirty, &bp->nb_dirty);
2880 	wverf = bp->nb_verf;
2881 	commit = NFS_WRITE_FILESYNC;
2882 	for (pg = 0; pg < npages; pg++) {
2883 		if (!NBPGDIRTY(bp, pg)) {
2884 			continue;
2885 		}
2886 		count = 1;
2887 		while (((pg + count) < npages) && NBPGDIRTY(bp, pg + count)) {
2888 			count++;
2889 		}
2890 		/* write count pages starting with page pg */
2891 		off = pg * PAGE_SIZE;
2892 		len = count * PAGE_SIZE;
2893 		/* clip writes to EOF */
2894 		if (NBOFF(bp) + off + len > (off_t) np->n_size) {
2895 			len -= (NBOFF(bp) + off + len) - np->n_size;
2896 		}
2897 		if (len > 0) {
2898 			iomode2 = iomode;
2899 			uio_reset(auio, NBOFF(bp) + off, UIO_SYSSPACE, UIO_WRITE);
2900 			uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + off), len);
2901 			error = nfs_write_rpc2(np, auio, thd, cred, &iomode2, &bp->nb_verf);
2902 			if (error) {
2903 				break;
2904 			}
2905 			if (iomode2 < commit) { /* Retain the lowest commitment level returned. */
2906 				commit = iomode2;
2907 			}
2908 			if ((commit != NFS_WRITE_FILESYNC) && (wverf != bp->nb_verf)) {
2909 				/* verifier changed, redo all the writes filesync */
2910 				iomode = NFS_WRITE_FILESYNC;
2911 				goto again;
2912 			}
2913 		}
2914 		/* clear dirty bits */
2915 		while (count--) {
2916 			NBPGS_UNSET(&dirty, pg);
2917 			if (count) { /* leave pg on last page */
2918 				pg++;
2919 			}
2920 		}
2921 	}
2922 	CLR(bp->nb_flags, NB_WRITEINPROG);
2923 
2924 	if (!error && (commit != NFS_WRITE_FILESYNC)) {
2925 		error = nmp->nm_funcs->nf_commit_rpc(np, NBOFF(bp), bp->nb_bufsize, cred, wverf);
2926 		if (error == NFSERR_STALEWRITEVERF) {
2927 			/* verifier changed, so we need to restart all the writes */
2928 			iomode = NFS_WRITE_FILESYNC;
2929 			goto again;
2930 		}
2931 	}
2932 	if (!error) {
2933 		NBPGS_COPY(&bp->nb_dirty, &dirty);
2934 	} else {
2935 		SET(bp->nb_flags, NB_ERROR);
2936 		bp->nb_error = error;
2937 	}
2938 
2939 	/* Free allocated uio buffer */
2940 	uio_free(auio);
2941 
2942 	return error;
2943 }
2944 
2945 /*
2946  * initiate the NFS WRITE RPC(s) for a buffer
2947  */
2948 int
nfs_buf_write_rpc(struct nfsbuf * bp,int iomode,thread_t thd,kauth_cred_t cred)2949 nfs_buf_write_rpc(struct nfsbuf *bp, int iomode, thread_t thd, kauth_cred_t cred)
2950 {
2951 	struct nfsmount *nmp;
2952 	nfsnode_t np = bp->nb_np;
2953 	int error = 0, nfsvers, async;
2954 	int64_t nrpcs;
2955 	size_t len;
2956 	uint32_t nmwsize;
2957 	struct nfsreq *req;
2958 	struct nfsreq_cbinfo cb;
2959 	uio_t auio;
2960 	off_t offset, length;
2961 
2962 	nmp = NFSTONMP(np);
2963 	if (nfs_mount_gone(nmp)) {
2964 		bp->nb_error = error = ENXIO;
2965 		SET(bp->nb_flags, NB_ERROR);
2966 		nfs_buf_iodone(bp);
2967 		return error;
2968 	}
2969 	nfsvers = nmp->nm_vers;
2970 	nmwsize = nmp->nm_wsize;
2971 
2972 	offset = bp->nb_offio;
2973 	length = bp->nb_endio - bp->nb_offio;
2974 
2975 	/* Note: Can only do async I/O if nfsiods are configured. */
2976 	async = (bp->nb_flags & NB_ASYNC) && (NFSIOD_MAX > 0);
2977 	bp->nb_commitlevel = NFS_WRITE_FILESYNC;
2978 	cb.rcb_func = async ? nfs_buf_write_rpc_finish : NULL;
2979 	cb.rcb_bp = bp;
2980 
2981 	if ((nfsvers == NFS_VER2) && ((NBOFF(bp) + bp->nb_endio) > 0xffffffffLL)) {
2982 		bp->nb_error = error = EFBIG;
2983 		SET(bp->nb_flags, NB_ERROR);
2984 		nfs_buf_iodone(bp);
2985 		return error;
2986 	}
2987 
2988 	if (length == 0) {
2989 		/* We should never get here  */
2990 #if DEVELOPMENT
2991 		printf("nfs_buf_write_rpc: Got request with zero length. np %p, bp %p, offset %lld\n", np, bp, offset);
2992 #else
2993 		printf("nfs_buf_write_rpc: Got request with zero length.\n");
2994 #endif /* DEVELOPMENT */
2995 		nfs_buf_iodone(bp);
2996 		return 0;
2997 	}
2998 
2999 	auio = uio_create(1, NBOFF(bp) + offset, UIO_SYSSPACE, UIO_WRITE);
3000 	NFS_UIO_ADDIOV(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length);
3001 
3002 	bp->nb_rpcs = nrpcs = (length + nmwsize - 1) / nmwsize;
3003 	if (async && (nrpcs > 1)) {
3004 		SET(bp->nb_flags, NB_MULTASYNCRPC);
3005 	} else {
3006 		CLR(bp->nb_flags, NB_MULTASYNCRPC);
3007 	}
3008 
3009 	while (length > 0) {
3010 		if (ISSET(bp->nb_flags, NB_ERROR)) {
3011 			error = bp->nb_error;
3012 			break;
3013 		}
3014 		len = (length > nmwsize) ? nmwsize : (uint32_t)length;
3015 		cb.rcb_args.offset = offset;
3016 		cb.rcb_args.length = len;
3017 #if CONFIG_NFS4
3018 		if (nmp->nm_vers >= NFS_VER4) {
3019 			cb.rcb_args.stategenid = nmp->nm_stategenid;
3020 		}
3021 #endif
3022 		if (async && ((error = nfs_async_write_start(nmp)))) {
3023 			break;
3024 		}
3025 		req = NULL;
3026 		error = nmp->nm_funcs->nf_write_rpc_async(np, auio, len, thd, cred,
3027 		    iomode, &cb, &req);
3028 		if (error) {
3029 			if (async) {
3030 				nfs_async_write_done(nmp);
3031 			}
3032 			break;
3033 		}
3034 		offset += len;
3035 		length -= len;
3036 		if (async) {
3037 			continue;
3038 		}
3039 		nfs_buf_write_rpc_finish(req);
3040 	}
3041 
3042 	if (length > 0) {
3043 		/*
3044 		 * Something bad happened while trying to send the RPCs.
3045 		 * Wait for any outstanding requests to complete.
3046 		 */
3047 		bp->nb_error = error;
3048 		SET(bp->nb_flags, NB_ERROR);
3049 		if (ISSET(bp->nb_flags, NB_MULTASYNCRPC)) {
3050 			nrpcs = (length + nmwsize - 1) / nmwsize;
3051 			lck_mtx_lock(&nfs_buf_mutex);
3052 			bp->nb_rpcs -= nrpcs;
3053 			if (bp->nb_rpcs == 0) {
3054 				/* No RPCs left, so the buffer's done */
3055 				lck_mtx_unlock(&nfs_buf_mutex);
3056 				nfs_buf_write_finish(bp, thd, cred);
3057 			} else {
3058 				/* wait for the last RPC to mark it done */
3059 				while (bp->nb_rpcs > 0) {
3060 					msleep(&bp->nb_rpcs, &nfs_buf_mutex, 0,
3061 					    "nfs_buf_write_rpc_cancel", NULL);
3062 				}
3063 				lck_mtx_unlock(&nfs_buf_mutex);
3064 			}
3065 		} else {
3066 			nfs_buf_write_finish(bp, thd, cred);
3067 		}
3068 		/* It may have just been an interrupt... that's OK */
3069 		if (!ISSET(bp->nb_flags, NB_ERROR)) {
3070 			error = 0;
3071 		}
3072 	}
3073 
3074 	/* Free allocated uio buffer */
3075 	uio_free(auio);
3076 
3077 	return error;
3078 }
3079 
3080 /*
3081  * finish up an NFS WRITE RPC on a buffer
3082  */
3083 void
nfs_buf_write_rpc_finish(struct nfsreq * req)3084 nfs_buf_write_rpc_finish(struct nfsreq *req)
3085 {
3086 	int error = 0, nfsvers, multasyncrpc, finished;
3087 	int committed = NFS_WRITE_FILESYNC;
3088 	uint64_t wverf = 0;
3089 	off_t offset;
3090 	size_t rlen, length;
3091 	void *wakeme = NULL;
3092 	struct nfsreq_cbinfo cb;
3093 	struct nfsreq *wreq = NULL;
3094 	struct nfsbuf *bp;
3095 	struct nfsmount *nmp;
3096 	nfsnode_t np;
3097 	thread_t thd;
3098 	kauth_cred_t cred;
3099 	uio_t auio;
3100 
3101 finish:
3102 	np = req->r_np;
3103 	thd = req->r_thread;
3104 	cred = req->r_cred;
3105 	if (IS_VALID_CRED(cred)) {
3106 		kauth_cred_ref(cred);
3107 	}
3108 	cb = req->r_callback;
3109 	bp = cb.rcb_bp;
3110 	if (cb.rcb_func) { /* take an extra reference on the nfsreq in case we want to resend it later due to grace error */
3111 		nfs_request_ref(req, 0);
3112 	}
3113 
3114 	nmp = NFSTONMP(np);
3115 	if (nfs_mount_gone(nmp)) {
3116 		SET(bp->nb_flags, NB_ERROR);
3117 		bp->nb_error = error = ENXIO;
3118 	}
3119 	if (error || ISSET(bp->nb_flags, NB_ERROR)) {
3120 		/* just drop it */
3121 		nfs_request_async_cancel(req);
3122 		goto out;
3123 	}
3124 	nfsvers = nmp->nm_vers;
3125 
3126 	offset = cb.rcb_args.offset;
3127 	rlen = length = cb.rcb_args.length;
3128 
3129 	/* finish the RPC */
3130 	error = nmp->nm_funcs->nf_write_rpc_async_finish(np, req, &committed, &rlen, &wverf);
3131 	if ((error == EINPROGRESS) && cb.rcb_func) {
3132 		/* async request restarted */
3133 		if (cb.rcb_func) {
3134 			nfs_request_rele(req);
3135 		}
3136 		if (IS_VALID_CRED(cred)) {
3137 			kauth_cred_unref(&cred);
3138 		}
3139 		return;
3140 	}
3141 #if CONFIG_NFS4
3142 	if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && !ISSET(bp->nb_flags, NB_ERROR)) {
3143 		lck_mtx_lock(&nmp->nm_lock);
3144 		if ((error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE) && (cb.rcb_args.stategenid == nmp->nm_stategenid)) {
3145 			NP(np, "nfs_buf_write_rpc_finish: error %d @ 0x%llx, 0x%x 0x%x, initiating recovery",
3146 			    error, NBOFF(bp) + offset, cb.rcb_args.stategenid, nmp->nm_stategenid);
3147 			nfs_need_recover(nmp, error);
3148 		}
3149 		lck_mtx_unlock(&nmp->nm_lock);
3150 		if (np->n_flag & NREVOKE) {
3151 			error = EIO;
3152 		} else {
3153 			if (error == NFSERR_GRACE) {
3154 				if (cb.rcb_func) {
3155 					/*
3156 					 * For an async I/O request, handle a grace delay just like
3157 					 * jukebox errors.  Set the resend time and queue it up.
3158 					 */
3159 					struct timeval now;
3160 					if (req->r_nmrep.nmc_mhead) {
3161 						mbuf_freem(req->r_nmrep.nmc_mhead);
3162 						req->r_nmrep.nmc_mhead = NULL;
3163 					}
3164 					req->r_error = 0;
3165 					microuptime(&now);
3166 					lck_mtx_lock(&req->r_mtx);
3167 					req->r_resendtime = now.tv_sec + 2;
3168 					req->r_xid = 0;                 // get a new XID
3169 					req->r_flags |= R_RESTART;
3170 					req->r_start = 0;
3171 					nfs_asyncio_resend(req);
3172 					lck_mtx_unlock(&req->r_mtx);
3173 					if (IS_VALID_CRED(cred)) {
3174 						kauth_cred_unref(&cred);
3175 					}
3176 					/* Note: nfsreq reference taken will be dropped later when finished */
3177 					return;
3178 				}
3179 				/* otherwise, just pause a couple seconds and retry */
3180 				tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
3181 			}
3182 			if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
3183 				rlen = 0;
3184 				goto writeagain;
3185 			}
3186 		}
3187 	}
3188 #endif
3189 	if (error) {
3190 		SET(bp->nb_flags, NB_ERROR);
3191 		bp->nb_error = error;
3192 	}
3193 	if (error || (nfsvers == NFS_VER2)) {
3194 		goto out;
3195 	}
3196 	if (rlen <= 0) {
3197 		SET(bp->nb_flags, NB_ERROR);
3198 		bp->nb_error = error = EIO;
3199 		goto out;
3200 	}
3201 
3202 	/* save lowest commit level returned */
3203 	if (committed < bp->nb_commitlevel) {
3204 		bp->nb_commitlevel = committed;
3205 	}
3206 
3207 	/* check the write verifier */
3208 	if (!bp->nb_verf) {
3209 		bp->nb_verf = wverf;
3210 	} else if (bp->nb_verf != wverf) {
3211 		/* verifier changed, so buffer will need to be rewritten */
3212 		bp->nb_flags |= NB_STALEWVERF;
3213 		bp->nb_commitlevel = NFS_WRITE_UNSTABLE;
3214 		bp->nb_verf = wverf;
3215 	}
3216 
3217 	if (!ISSET(bp->nb_flags, NB_STALEWVERF) && rlen > 0 && (bp->nb_offio < (offset + (int)rlen))) {
3218 		bp->nb_offio = offset + rlen;
3219 	}
3220 
3221 	/*
3222 	 * check for a short write
3223 	 *
3224 	 * If the server didn't write all the data, then we
3225 	 * need to issue another write for the rest of it.
3226 	 * (Don't bother if the buffer hit an error or stale wverf.)
3227 	 */
3228 	if ((rlen < length) && !(bp->nb_flags & (NB_STALEWVERF | NB_ERROR))) {
3229 #if CONFIG_NFS4
3230 writeagain:
3231 #endif
3232 		offset += rlen;
3233 		length -= rlen;
3234 
3235 		auio = uio_create(1, NBOFF(bp) + offset, UIO_SYSSPACE, UIO_WRITE);
3236 		uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length);
3237 
3238 		cb.rcb_args.offset = offset;
3239 		cb.rcb_args.length = length;
3240 #if CONFIG_NFS4
3241 		if (nmp->nm_vers >= NFS_VER4) {
3242 			cb.rcb_args.stategenid = nmp->nm_stategenid;
3243 		}
3244 #endif
3245 		// XXX iomode should really match the original request
3246 		error = nmp->nm_funcs->nf_write_rpc_async(np, auio, length, thd, cred,
3247 		    NFS_WRITE_FILESYNC, &cb, &wreq);
3248 
3249 		/* Free allocated uio buffer */
3250 		uio_free(auio);
3251 
3252 		if (!error) {
3253 			if (IS_VALID_CRED(cred)) {
3254 				kauth_cred_unref(&cred);
3255 			}
3256 			if (!cb.rcb_func) {
3257 				/* if !async we'll need to wait for this RPC to finish */
3258 				req = wreq;
3259 				wreq = NULL;
3260 				goto finish;
3261 			}
3262 			nfs_request_rele(req);
3263 			/*
3264 			 * We're done here.
3265 			 * Outstanding RPC count is unchanged.
3266 			 * Callback will be called when RPC is done.
3267 			 */
3268 			return;
3269 		}
3270 		SET(bp->nb_flags, NB_ERROR);
3271 		bp->nb_error = error;
3272 	}
3273 
3274 out:
3275 	if (cb.rcb_func) {
3276 		nfs_async_write_done(nmp);
3277 		nfs_request_rele(req);
3278 	}
3279 	/*
3280 	 * Decrement outstanding RPC count on buffer
3281 	 * and call nfs_buf_write_finish on last RPC.
3282 	 *
3283 	 * (Note: when there are multiple async RPCs issued for a
3284 	 * buffer we need nfs_buffer_mutex to avoid problems when
3285 	 * aborting a partially-initiated set of RPCs)
3286 	 */
3287 	multasyncrpc = ISSET(bp->nb_flags, NB_MULTASYNCRPC);
3288 	if (multasyncrpc) {
3289 		lck_mtx_lock(&nfs_buf_mutex);
3290 	}
3291 
3292 	bp->nb_rpcs--;
3293 	finished = (bp->nb_rpcs == 0);
3294 
3295 	if (multasyncrpc) {
3296 		lck_mtx_unlock(&nfs_buf_mutex);
3297 	}
3298 
3299 	if (finished) {
3300 		if (multasyncrpc) {
3301 			wakeme = &bp->nb_rpcs;
3302 		}
3303 		nfs_buf_write_finish(bp, thd, cred);
3304 		if (wakeme) {
3305 			wakeup(wakeme);
3306 		}
3307 	}
3308 
3309 	if (IS_VALID_CRED(cred)) {
3310 		kauth_cred_unref(&cred);
3311 	}
3312 
3313 	if (cb.rcb_func && np->n_needcommitcnt >= NFS_A_LOT_OF_NEEDCOMMITS) {
3314 		nfs_flushcommits(np, 1);
3315 	}
3316 }
3317 
3318 /*
3319  * Send commit(s) for the given node's "needcommit" buffers
3320  */
3321 int
nfs_flushcommits(nfsnode_t np,int nowait)3322 nfs_flushcommits(nfsnode_t np, int nowait)
3323 {
3324 	struct nfsmount *nmp;
3325 	struct nfsbuf *bp, *prevlbp, *lbp;
3326 	struct nfsbuflists blist, commitlist;
3327 	int error = 0, retv, wcred_set, flags;
3328 	u_quad_t off, endoff, toff;
3329 	uint64_t wverf, count;
3330 	kauth_cred_t wcred = NULL;
3331 	nfsbufpgs dirty;
3332 
3333 	FSDBG_TOP(557, np, 0, 0, 0);
3334 
3335 	/*
3336 	 * A nb_flags == (NB_DELWRI | NB_NEEDCOMMIT) block has been written to the
3337 	 * server, but nas not been committed to stable storage on the server
3338 	 * yet. The byte range is worked out for as many nfsbufs as we can handle
3339 	 * and the commit rpc is done.
3340 	 */
3341 	if (!LIST_EMPTY(&np->n_dirtyblkhd)) {
3342 		error = nfs_node_lock(np);
3343 		if (error) {
3344 			goto done;
3345 		}
3346 		np->n_flag |= NMODIFIED;
3347 		nfs_node_unlock(np);
3348 	}
3349 
3350 	off = (u_quad_t)-1;
3351 	endoff = 0;
3352 	wcred_set = 0;
3353 	LIST_INIT(&commitlist);
3354 
3355 	nmp = NFSTONMP(np);
3356 	if (nfs_mount_gone(nmp)) {
3357 		error = ENXIO;
3358 		goto done;
3359 	}
3360 	if (nmp->nm_vers == NFS_VER2) {
3361 		error = EINVAL;
3362 		goto done;
3363 	}
3364 
3365 	flags = NBI_DIRTY;
3366 	if (nowait) {
3367 		flags |= NBI_NOWAIT;
3368 	}
3369 	lck_mtx_lock(&nfs_buf_mutex);
3370 	wverf = nmp->nm_verf;
3371 	if (!nfs_buf_iterprepare(np, &blist, flags)) {
3372 		while ((bp = LIST_FIRST(&blist))) {
3373 			LIST_REMOVE(bp, nb_vnbufs);
3374 			LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs);
3375 			error = nfs_buf_acquire(bp, NBAC_NOWAIT, 0, 0);
3376 			if (error) {
3377 				continue;
3378 			}
3379 			if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
3380 				nfs_buf_check_write_verifier(np, bp);
3381 			}
3382 			if (((bp->nb_flags & (NB_DELWRI | NB_NEEDCOMMIT)) != (NB_DELWRI | NB_NEEDCOMMIT)) ||
3383 			    (bp->nb_verf != wverf)) {
3384 				nfs_buf_drop(bp);
3385 				continue;
3386 			}
3387 			nfs_buf_remfree(bp);
3388 
3389 			/* buffer UPLs will be grabbed *in order* below */
3390 
3391 			FSDBG(557, bp, bp->nb_flags, bp->nb_valid, bp->nb_dirty);
3392 			FSDBG(557, bp->nb_validoff, bp->nb_validend,
3393 			    bp->nb_dirtyoff, bp->nb_dirtyend);
3394 
3395 			/*
3396 			 * Work out if all buffers are using the same cred
3397 			 * so we can deal with them all with one commit.
3398 			 *
3399 			 * Note: creds in bp's must be obtained by kauth_cred_ref
3400 			 * on the same original cred in order for them to be equal.
3401 			 */
3402 			if (wcred_set == 0) {
3403 				wcred = bp->nb_wcred;
3404 				if (!IS_VALID_CRED(wcred)) {
3405 					panic("nfs: needcommit w/out wcred");
3406 				}
3407 				wcred_set = 1;
3408 			} else if ((wcred_set == 1) && wcred != bp->nb_wcred) {
3409 				wcred_set = -1;
3410 			}
3411 			SET(bp->nb_flags, NB_WRITEINPROG);
3412 
3413 			/*
3414 			 * Add this buffer to the list of buffers we are committing.
3415 			 * Buffers are inserted into the list in ascending order so that
3416 			 * we can take the UPLs in order after the list is complete.
3417 			 */
3418 			prevlbp = NULL;
3419 			LIST_FOREACH(lbp, &commitlist, nb_vnbufs) {
3420 				if (bp->nb_lblkno < lbp->nb_lblkno) {
3421 					break;
3422 				}
3423 				prevlbp = lbp;
3424 			}
3425 			LIST_REMOVE(bp, nb_vnbufs);
3426 			if (prevlbp) {
3427 				LIST_INSERT_AFTER(prevlbp, bp, nb_vnbufs);
3428 			} else {
3429 				LIST_INSERT_HEAD(&commitlist, bp, nb_vnbufs);
3430 			}
3431 
3432 			/* update commit range start, end */
3433 			toff = NBOFF(bp) + bp->nb_dirtyoff;
3434 			if (toff < off) {
3435 				off = toff;
3436 			}
3437 			toff += (u_quad_t)(bp->nb_dirtyend - bp->nb_dirtyoff);
3438 			if (toff > endoff) {
3439 				endoff = toff;
3440 			}
3441 		}
3442 		nfs_buf_itercomplete(np, &blist, NBI_DIRTY);
3443 	}
3444 	lck_mtx_unlock(&nfs_buf_mutex);
3445 
3446 	if (LIST_EMPTY(&commitlist)) {
3447 		error = ENOBUFS;
3448 		goto done;
3449 	}
3450 
3451 	/*
3452 	 * We need a UPL to prevent others from accessing the buffers during
3453 	 * our commit RPC(s).
3454 	 *
3455 	 * We used to also check for dirty pages here; if there were any we'd
3456 	 * abort the commit and force the entire buffer to be written again.
3457 	 * Instead of doing that, we just go ahead and commit the dirty range,
3458 	 * and then leave the buffer around with dirty pages that will be
3459 	 * written out later.
3460 	 */
3461 	LIST_FOREACH(bp, &commitlist, nb_vnbufs) {
3462 		if (!ISSET(bp->nb_flags, NB_PAGELIST)) {
3463 			retv = nfs_buf_upl_setup(bp);
3464 			if (retv) {
3465 				/* Unable to create the UPL, the VM object probably no longer exists. */
3466 				printf("nfs_flushcommits: upl create failed %d\n", retv);
3467 				NBPGS_ERASE(&bp->nb_valid);
3468 				NBPGS_ERASE(&bp->nb_dirty);
3469 			}
3470 		}
3471 		nfs_buf_upl_check(bp);
3472 	}
3473 
3474 	/*
3475 	 * Commit data on the server, as required.
3476 	 * If all bufs are using the same wcred, then use that with
3477 	 * one call for all of them, otherwise commit each one
3478 	 * separately.
3479 	 */
3480 	if (wcred_set == 1) {
3481 		/*
3482 		 * Note, it's possible the commit range could be >2^32-1.
3483 		 * If it is, we'll send one commit that covers the whole file.
3484 		 */
3485 		if ((endoff - off) > 0xffffffff) {
3486 			count = 0;
3487 		} else {
3488 			count = (endoff - off);
3489 		}
3490 		retv = nmp->nm_funcs->nf_commit_rpc(np, off, count, wcred, wverf);
3491 	} else {
3492 		retv = 0;
3493 		LIST_FOREACH(bp, &commitlist, nb_vnbufs) {
3494 			toff = NBOFF(bp) + bp->nb_dirtyoff;
3495 			count = bp->nb_dirtyend - bp->nb_dirtyoff;
3496 			retv = nmp->nm_funcs->nf_commit_rpc(np, toff, count, bp->nb_wcred, wverf);
3497 			if (retv) {
3498 				break;
3499 			}
3500 		}
3501 	}
3502 
3503 	/*
3504 	 * Now, either mark the blocks I/O done or mark the
3505 	 * blocks dirty, depending on whether the commit
3506 	 * succeeded.
3507 	 */
3508 	while ((bp = LIST_FIRST(&commitlist))) {
3509 		LIST_REMOVE(bp, nb_vnbufs);
3510 		FSDBG(557, bp, retv, bp->nb_flags, bp->nb_dirty);
3511 		nfs_node_lock_force(np);
3512 		CLR(bp->nb_flags, (NB_NEEDCOMMIT | NB_WRITEINPROG));
3513 		np->n_needcommitcnt--;
3514 		CHECK_NEEDCOMMITCNT(np);
3515 		nfs_node_unlock(np);
3516 
3517 		if (retv) {
3518 			/* move back to dirty list */
3519 			lck_mtx_lock(&nfs_buf_mutex);
3520 			LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs);
3521 			lck_mtx_unlock(&nfs_buf_mutex);
3522 			nfs_buf_release(bp, 1);
3523 			continue;
3524 		}
3525 
3526 		nfs_node_lock_force(np);
3527 		np->n_numoutput++;
3528 		nfs_node_unlock(np);
3529 		vnode_startwrite(NFSTOV(np));
3530 		if (ISSET(bp->nb_flags, NB_DELWRI)) {
3531 			lck_mtx_lock(&nfs_buf_mutex);
3532 			nfs_nbdwrite--;
3533 			NFSBUFCNTCHK();
3534 			lck_mtx_unlock(&nfs_buf_mutex);
3535 			wakeup(&nfs_nbdwrite);
3536 		}
3537 		CLR(bp->nb_flags, (NB_READ | NB_DONE | NB_ERROR | NB_DELWRI));
3538 		/* if block still has dirty pages, we don't want it to */
3539 		/* be released in nfs_buf_iodone().  So, don't set NB_ASYNC. */
3540 		NBPGS_COPY(&dirty, &bp->nb_dirty);
3541 		if (!nfs_buf_pgs_is_set(&dirty)) {
3542 			SET(bp->nb_flags, NB_ASYNC);
3543 		} else {
3544 			CLR(bp->nb_flags, NB_ASYNC);
3545 		}
3546 
3547 		/* move to clean list */
3548 		lck_mtx_lock(&nfs_buf_mutex);
3549 		LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs);
3550 		lck_mtx_unlock(&nfs_buf_mutex);
3551 
3552 		bp->nb_dirtyoff = bp->nb_dirtyend = 0;
3553 
3554 		nfs_buf_iodone(bp);
3555 		if (nfs_buf_pgs_is_set(&dirty)) {
3556 			/* throw it back in as a delayed write buffer */
3557 			CLR(bp->nb_flags, NB_DONE);
3558 			nfs_buf_write_delayed(bp);
3559 		}
3560 	}
3561 
3562 done:
3563 	FSDBG_BOT(557, np, 0, 0, error);
3564 	return error;
3565 }
3566 
3567 /*
3568  * Flush all the blocks associated with a vnode.
3569  *      Walk through the buffer pool and push any dirty pages
3570  *	associated with the vnode.
3571  */
3572 int
nfs_flush(nfsnode_t np,int waitfor,thread_t thd,int ignore_writeerr)3573 nfs_flush(nfsnode_t np, int waitfor, thread_t thd, int ignore_writeerr)
3574 {
3575 	struct nfsbuf *bp;
3576 	struct nfsbuflists blist;
3577 	struct nfsmount *nmp = NFSTONMP(np);
3578 	int error = 0, error2, slptimeo = 0, slpflag = 0;
3579 	int nfsvers, flags, passone = 1;
3580 
3581 	FSDBG_TOP(517, np, waitfor, ignore_writeerr, 0);
3582 
3583 	if (nfs_mount_gone(nmp)) {
3584 		error = ENXIO;
3585 		goto out;
3586 	}
3587 	nfsvers = nmp->nm_vers;
3588 	if (NMFLAG(nmp, INTR)) {
3589 		slpflag = PCATCH;
3590 	}
3591 
3592 	if (!LIST_EMPTY(&np->n_dirtyblkhd)) {
3593 		nfs_node_lock_force(np);
3594 		np->n_flag |= NMODIFIED;
3595 		nfs_node_unlock(np);
3596 	}
3597 
3598 	lck_mtx_lock(&nfs_buf_mutex);
3599 	while (np->n_bflag & NBFLUSHINPROG) {
3600 		np->n_bflag |= NBFLUSHWANT;
3601 		error = msleep(&np->n_bflag, &nfs_buf_mutex, slpflag, "nfs_flush", NULL);
3602 		if ((error && (error != EWOULDBLOCK)) ||
3603 		    ((error = nfs_sigintr(NFSTONMP(np), NULL, thd, 0)))) {
3604 			lck_mtx_unlock(&nfs_buf_mutex);
3605 			goto out;
3606 		}
3607 	}
3608 	np->n_bflag |= NBFLUSHINPROG;
3609 
3610 	/*
3611 	 * On the first pass, start async/unstable writes on all
3612 	 * delayed write buffers.  Then wait for all writes to complete
3613 	 * and call nfs_flushcommits() to commit any uncommitted buffers.
3614 	 * On all subsequent passes, start STABLE writes on any remaining
3615 	 * dirty buffers.  Then wait for all writes to complete.
3616 	 */
3617 again:
3618 	FSDBG(518, LIST_FIRST(&np->n_dirtyblkhd), np->n_flag, 0, 0);
3619 	if (!NFSTONMP(np)) {
3620 		lck_mtx_unlock(&nfs_buf_mutex);
3621 		error = ENXIO;
3622 		goto done;
3623 	}
3624 
3625 	/* Start/do any write(s) that are required. */
3626 	if (!nfs_buf_iterprepare(np, &blist, NBI_DIRTY)) {
3627 		while ((bp = LIST_FIRST(&blist))) {
3628 			LIST_REMOVE(bp, nb_vnbufs);
3629 			LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs);
3630 			flags = (passone || !(waitfor == MNT_WAIT || waitfor == MNT_DWAIT)) ? NBAC_NOWAIT : 0;
3631 			if (flags != NBAC_NOWAIT) {
3632 				nfs_buf_refget(bp);
3633 			}
3634 			while ((error = nfs_buf_acquire(bp, flags, slpflag, slptimeo))) {
3635 				FSDBG(524, bp, flags, bp->nb_lflags, bp->nb_flags);
3636 				if (error == EBUSY) {
3637 					break;
3638 				}
3639 				if (error) {
3640 					error2 = nfs_sigintr(NFSTONMP(np), NULL, thd, 0);
3641 					if (error2) {
3642 						if (flags != NBAC_NOWAIT) {
3643 							nfs_buf_refrele(bp);
3644 						}
3645 						nfs_buf_itercomplete(np, &blist, NBI_DIRTY);
3646 						lck_mtx_unlock(&nfs_buf_mutex);
3647 						error = error2;
3648 						goto done;
3649 					}
3650 					if (slpflag == PCATCH) {
3651 						slpflag = 0;
3652 						slptimeo = 2 * hz;
3653 					}
3654 				}
3655 			}
3656 			if (flags != NBAC_NOWAIT) {
3657 				nfs_buf_refrele(bp);
3658 			}
3659 			if (error == EBUSY) {
3660 				continue;
3661 			}
3662 			if (!bp->nb_np) {
3663 				/* buffer is no longer valid */
3664 				nfs_buf_drop(bp);
3665 				continue;
3666 			}
3667 			if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
3668 				nfs_buf_check_write_verifier(np, bp);
3669 			}
3670 			if (!ISSET(bp->nb_flags, NB_DELWRI)) {
3671 				/* buffer is no longer dirty */
3672 				nfs_buf_drop(bp);
3673 				continue;
3674 			}
3675 			FSDBG(525, bp, passone, bp->nb_lflags, bp->nb_flags);
3676 			if ((passone || !(waitfor == MNT_WAIT || waitfor == MNT_DWAIT)) &&
3677 			    ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
3678 				nfs_buf_drop(bp);
3679 				continue;
3680 			}
3681 			nfs_buf_remfree(bp);
3682 			lck_mtx_unlock(&nfs_buf_mutex);
3683 			if (ISSET(bp->nb_flags, NB_ERROR)) {
3684 				nfs_node_lock_force(np);
3685 				np->n_error = bp->nb_error ? bp->nb_error : EIO;
3686 				np->n_flag |= NWRITEERR;
3687 				nfs_node_unlock(np);
3688 				nfs_buf_release(bp, 1);
3689 				lck_mtx_lock(&nfs_buf_mutex);
3690 				continue;
3691 			}
3692 			SET(bp->nb_flags, NB_ASYNC);
3693 			if (!passone) {
3694 				/* NB_STABLE forces this to be written FILESYNC */
3695 				SET(bp->nb_flags, NB_STABLE);
3696 			}
3697 			nfs_buf_write(bp);
3698 			lck_mtx_lock(&nfs_buf_mutex);
3699 		}
3700 		nfs_buf_itercomplete(np, &blist, NBI_DIRTY);
3701 	}
3702 	lck_mtx_unlock(&nfs_buf_mutex);
3703 
3704 	if (waitfor == MNT_WAIT || waitfor == MNT_DWAIT) {
3705 		while ((error = vnode_waitforwrites(NFSTOV(np), 0, slpflag, slptimeo, "nfsflush"))) {
3706 			error2 = nfs_sigintr(NFSTONMP(np), NULL, thd, 0);
3707 			if (error2) {
3708 				error = error2;
3709 				goto done;
3710 			}
3711 			if (slpflag == PCATCH) {
3712 				slpflag = 0;
3713 				slptimeo = 2 * hz;
3714 			}
3715 		}
3716 	}
3717 
3718 	if (nfsvers != NFS_VER2) {
3719 		/* loop while it looks like there are still buffers to be */
3720 		/* commited and nfs_flushcommits() seems to be handling them. */
3721 		while (np->n_needcommitcnt) {
3722 			if (nfs_flushcommits(np, 0)) {
3723 				break;
3724 			}
3725 		}
3726 	}
3727 
3728 	if (passone) {
3729 		passone = 0;
3730 		if (!LIST_EMPTY(&np->n_dirtyblkhd)) {
3731 			nfs_node_lock_force(np);
3732 			np->n_flag |= NMODIFIED;
3733 			nfs_node_unlock(np);
3734 		}
3735 		lck_mtx_lock(&nfs_buf_mutex);
3736 		goto again;
3737 	}
3738 
3739 	if (waitfor == MNT_WAIT || waitfor == MNT_DWAIT) {
3740 		if (!LIST_EMPTY(&np->n_dirtyblkhd)) {
3741 			nfs_node_lock_force(np);
3742 			np->n_flag |= NMODIFIED;
3743 			nfs_node_unlock(np);
3744 		}
3745 		lck_mtx_lock(&nfs_buf_mutex);
3746 		if (!LIST_EMPTY(&np->n_dirtyblkhd)) {
3747 			goto again;
3748 		}
3749 		lck_mtx_unlock(&nfs_buf_mutex);
3750 		nfs_node_lock_force(np);
3751 		/*
3752 		 * OK, it looks like there are no dirty blocks.  If we have no
3753 		 * writes in flight and no one in the write code, we can clear
3754 		 * the modified flag.  In order to make sure we see the latest
3755 		 * attributes and size, we also invalidate the attributes and
3756 		 * advance the attribute cache XID to guarantee that attributes
3757 		 * newer than our clearing of NMODIFIED will get loaded next.
3758 		 * (If we don't do this, it's possible for the flush's final
3759 		 * write/commit (xid1) to be executed in parallel with a subsequent
3760 		 * getattr request (xid2).  The getattr could return attributes
3761 		 * from *before* the write/commit completed but the stale attributes
3762 		 * would be preferred because of the xid ordering.)
3763 		 */
3764 		if (!np->n_wrbusy && !np->n_numoutput) {
3765 			np->n_flag &= ~NMODIFIED;
3766 			NATTRINVALIDATE(np);
3767 			nfs_get_xid(&np->n_xid);
3768 		}
3769 	} else {
3770 		nfs_node_lock_force(np);
3771 	}
3772 
3773 	FSDBG(526, np->n_flag, np->n_error, 0, 0);
3774 	if (!ignore_writeerr && (np->n_flag & NWRITEERR)) {
3775 		error = np->n_error;
3776 		np->n_flag &= ~NWRITEERR;
3777 	}
3778 	nfs_node_unlock(np);
3779 done:
3780 	lck_mtx_lock(&nfs_buf_mutex);
3781 	flags = np->n_bflag;
3782 	np->n_bflag &= ~(NBFLUSHINPROG | NBFLUSHWANT);
3783 	lck_mtx_unlock(&nfs_buf_mutex);
3784 	if (flags & NBFLUSHWANT) {
3785 		wakeup(&np->n_bflag);
3786 	}
3787 out:
3788 	FSDBG_BOT(517, np, error, ignore_writeerr, 0);
3789 	return error;
3790 }
3791 
3792 /*
3793  * Flush out and invalidate all buffers associated with a vnode.
3794  * Called with the underlying object locked.
3795  */
3796 int
nfs_vinvalbuf_internal(nfsnode_t np,int flags,thread_t thd,kauth_cred_t cred,int slpflag,int slptimeo)3797 nfs_vinvalbuf_internal(
3798 	nfsnode_t np,
3799 	int flags,
3800 	thread_t thd,
3801 	kauth_cred_t cred,
3802 	int slpflag,
3803 	int slptimeo)
3804 {
3805 	struct nfsbuf *bp;
3806 	struct nfsbuflists blist;
3807 	int list, error = 0;
3808 
3809 	if (flags & V_SAVE) {
3810 		if ((error = nfs_flush(np, MNT_WAIT, thd, (flags & V_IGNORE_WRITEERR)))) {
3811 			return error;
3812 		}
3813 	}
3814 
3815 	lck_mtx_lock(&nfs_buf_mutex);
3816 	for (;;) {
3817 		list = NBI_CLEAN;
3818 		if (nfs_buf_iterprepare(np, &blist, list)) {
3819 			list = NBI_DIRTY;
3820 			if (nfs_buf_iterprepare(np, &blist, list)) {
3821 				break;
3822 			}
3823 		}
3824 		while ((bp = LIST_FIRST(&blist))) {
3825 			LIST_REMOVE(bp, nb_vnbufs);
3826 			if (list == NBI_CLEAN) {
3827 				LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs);
3828 			} else {
3829 				LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs);
3830 			}
3831 			nfs_buf_refget(bp);
3832 			while ((error = nfs_buf_acquire(bp, NBAC_REMOVE, slpflag, slptimeo))) {
3833 				FSDBG(556, np, bp, NBOFF(bp), bp->nb_flags);
3834 				if (error != EAGAIN) {
3835 					FSDBG(554, np, bp, -1, error);
3836 					nfs_buf_refrele(bp);
3837 					nfs_buf_itercomplete(np, &blist, list);
3838 					lck_mtx_unlock(&nfs_buf_mutex);
3839 					return error;
3840 				}
3841 			}
3842 			nfs_buf_refrele(bp);
3843 			FSDBG(554, np, bp, NBOFF(bp), bp->nb_flags);
3844 			lck_mtx_unlock(&nfs_buf_mutex);
3845 			if ((flags & V_SAVE) && UBCINFOEXISTS(NFSTOV(np)) && bp->nb_np &&
3846 			    (NBOFF(bp) < (off_t)np->n_size)) {
3847 				/* extra paranoia: make sure we're not */
3848 				/* somehow leaving any dirty data around */
3849 				nfsbufpgs pagemask;
3850 				int mustwrite = 0;
3851 				off_t end = (NBOFF(bp) + bp->nb_bufsize > (off_t)np->n_size) ?
3852 				    (np->n_size - NBOFF(bp)) : bp->nb_bufsize;
3853 				if (!ISSET(bp->nb_flags, NB_PAGELIST)) {
3854 					error = nfs_buf_upl_setup(bp);
3855 					if (error == EINVAL) {
3856 						/* vm object must no longer exist */
3857 						/* hopefully we don't need to do */
3858 						/* anything for this buffer */
3859 					} else if (error) {
3860 						printf("nfs_vinvalbuf_internal: upl setup failed %d\n", error);
3861 					}
3862 					NBPGS_ERASE(&bp->nb_valid);
3863 					NBPGS_ERASE(&bp->nb_dirty);
3864 				}
3865 				nfs_buf_upl_check(bp);
3866 				/* check for any dirty data before the EOF */
3867 				if ((bp->nb_dirtyend > 0) && (bp->nb_dirtyoff < end)) {
3868 					/* clip dirty range to EOF */
3869 					if (bp->nb_dirtyend > end) {
3870 						bp->nb_dirtyend = end;
3871 						if (bp->nb_dirtyoff >= bp->nb_dirtyend) {
3872 							bp->nb_dirtyoff = bp->nb_dirtyend = 0;
3873 						}
3874 					}
3875 					if ((bp->nb_dirtyend > 0) && (bp->nb_dirtyoff < end)) {
3876 						mustwrite++;
3877 					}
3878 				}
3879 				nfs_buf_pgs_get_page_mask(&pagemask, round_page_64(end) / PAGE_SIZE);
3880 				nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &bp->nb_dirty);
3881 				if (nfs_buf_pgs_is_set(&bp->nb_dirty)) {
3882 					mustwrite++;
3883 				}
3884 				/* also make sure we'll have a credential to do the write */
3885 				if (mustwrite && !IS_VALID_CRED(bp->nb_wcred) && !IS_VALID_CRED(cred)) {
3886 					printf("nfs_vinvalbuf_internal: found dirty buffer with no write creds\n");
3887 					mustwrite = 0;
3888 				}
3889 				if (mustwrite) {
3890 					FSDBG(554, np, bp, 0xd00dee, bp->nb_flags);
3891 					if (!ISSET(bp->nb_flags, NB_PAGELIST)) {
3892 						panic("nfs_vinvalbuf_internal: dirty buffer without upl");
3893 					}
3894 					/* gotta write out dirty data before invalidating */
3895 					/* (NB_STABLE indicates that data writes should be FILESYNC) */
3896 					/* (NB_NOCACHE indicates buffer should be discarded) */
3897 					CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL | NB_ASYNC));
3898 					SET(bp->nb_flags, NB_STABLE | NB_NOCACHE);
3899 					if (!IS_VALID_CRED(bp->nb_wcred)) {
3900 						kauth_cred_ref(cred);
3901 						bp->nb_wcred = cred;
3902 					}
3903 					error = nfs_buf_write(bp);
3904 					// Note: bp has been released
3905 					if (error) {
3906 						FSDBG(554, bp, 0xd00dee, 0xbad, error);
3907 						nfs_node_lock_force(np);
3908 						if ((error != EINTR) && (error != ERESTART)) {
3909 							np->n_error = error;
3910 							np->n_flag |= NWRITEERR;
3911 						}
3912 						/*
3913 						 * There was a write error and we need to
3914 						 * invalidate attrs to sync with server.
3915 						 * (if this write was extending the file,
3916 						 * we may no longer know the correct size)
3917 						 */
3918 						NATTRINVALIDATE(np);
3919 						nfs_node_unlock(np);
3920 						if ((error == EINTR) || (error == ERESTART)) {
3921 							/*
3922 							 * Abort on EINTR.  If we don't, we could
3923 							 * be stuck in this loop forever because
3924 							 * the buffer will continue to stay dirty.
3925 							 */
3926 							lck_mtx_lock(&nfs_buf_mutex);
3927 							nfs_buf_itercomplete(np, &blist, list);
3928 							lck_mtx_unlock(&nfs_buf_mutex);
3929 							return error;
3930 						}
3931 						error = 0;
3932 					}
3933 					lck_mtx_lock(&nfs_buf_mutex);
3934 					continue;
3935 				}
3936 			}
3937 			SET(bp->nb_flags, NB_INVAL);
3938 			// hold off on FREEUPs until we're done here
3939 			nfs_buf_release(bp, 0);
3940 			lck_mtx_lock(&nfs_buf_mutex);
3941 		}
3942 		nfs_buf_itercomplete(np, &blist, list);
3943 	}
3944 	if (!LIST_EMPTY(&(np)->n_dirtyblkhd) || !LIST_EMPTY(&(np)->n_cleanblkhd)) {
3945 		panic("nfs_vinvalbuf_internal: flush/inval failed");
3946 	}
3947 	lck_mtx_unlock(&nfs_buf_mutex);
3948 	nfs_node_lock_force(np);
3949 	if (!(flags & V_SAVE)) {
3950 		np->n_flag &= ~NMODIFIED;
3951 	}
3952 	if (vnode_vtype(NFSTOV(np)) == VREG) {
3953 		np->n_lastrahead = -1;
3954 	}
3955 	nfs_node_unlock(np);
3956 	NFS_BUF_FREEUP();
3957 	return 0;
3958 }
3959 
3960 
3961 /*
3962  * Flush and invalidate all dirty buffers. If another process is already
3963  * doing the flush, just wait for completion.
3964  */
3965 int
nfs_vinvalbuf1(vnode_t vp,int flags,vfs_context_t ctx,int intrflg)3966 nfs_vinvalbuf1(vnode_t vp, int flags, vfs_context_t ctx, int intrflg)
3967 {
3968 	return nfs_vinvalbuf2(vp, flags, vfs_context_thread(ctx), vfs_context_ucred(ctx), intrflg);
3969 }
3970 
3971 int
nfs_vinvalbuf2(vnode_t vp,int flags,thread_t thd,kauth_cred_t cred,int intrflg)3972 nfs_vinvalbuf2(vnode_t vp, int flags, thread_t thd, kauth_cred_t cred, int intrflg)
3973 {
3974 	nfsnode_t np = VTONFS(vp);
3975 	struct nfsmount *nmp = VTONMP(vp);
3976 	int error, slpflag, slptimeo, nflags, retry = 0;
3977 	int ubcflags = UBC_PUSHALL | UBC_SYNC | UBC_INVALIDATE;
3978 	struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3979 	off_t size;
3980 
3981 	FSDBG_TOP(554, np, flags, intrflg, 0);
3982 
3983 	if (nmp && !NMFLAG(nmp, INTR)) {
3984 		intrflg = 0;
3985 	}
3986 	if (intrflg) {
3987 		slpflag = PCATCH;
3988 		slptimeo = 2 * hz;
3989 	} else {
3990 		slpflag = 0;
3991 		slptimeo = 0;
3992 	}
3993 
3994 	/* First wait for any other process doing a flush to complete.  */
3995 	lck_mtx_lock(&nfs_buf_mutex);
3996 	while (np->n_bflag & NBINVALINPROG) {
3997 		np->n_bflag |= NBINVALWANT;
3998 		msleep(&np->n_bflag, &nfs_buf_mutex, slpflag, "nfs_vinvalbuf2", &ts);
3999 		if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) {
4000 			lck_mtx_unlock(&nfs_buf_mutex);
4001 			return error;
4002 		}
4003 		if (np->n_bflag & NBINVALINPROG) {
4004 			slpflag = 0;
4005 		}
4006 	}
4007 	np->n_bflag |= NBINVALINPROG;
4008 	lck_mtx_unlock(&nfs_buf_mutex);
4009 
4010 	/* Now, flush as required.  */
4011 again:
4012 	/* If the mount is gone no sense to try and write anything. and hang trying to do IO. */
4013 	if (nfs_mount_gone(nmp)) {
4014 		flags &= ~V_SAVE;
4015 	}
4016 
4017 	error = nfs_vinvalbuf_internal(np, flags, thd, cred, slpflag, 0);
4018 	while (error) {
4019 		FSDBG(554, np, 0, 0, error);
4020 		if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) {
4021 			goto done;
4022 		}
4023 		error = nfs_vinvalbuf_internal(np, flags, thd, cred, 0, slptimeo);
4024 	}
4025 
4026 	/* If the mount is gone no sense to try and write anything. and hang trying to do IO. */
4027 	if (nfs_mount_gone(nmp)) {
4028 		ubcflags &= ~UBC_PUSHALL;
4029 	}
4030 
4031 	/* get the pages out of vm also */
4032 	if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) {
4033 		if ((error = ubc_msync(vp, 0, size, NULL, ubcflags))) {
4034 			if (error == EINVAL) {
4035 				panic("nfs_vinvalbuf2: ubc_msync failed!, error %d", error);
4036 			}
4037 			if (retry++ < 10) { /* retry invalidating a few times */
4038 				if (retry > 1 || error == ENXIO) {
4039 					ubcflags &= ~UBC_PUSHALL;
4040 				}
4041 				goto again;
4042 			}
4043 			/* give up */
4044 			printf("nfs_vinvalbuf2: ubc_msync failed!, error %d\n", error);
4045 		}
4046 	}
4047 done:
4048 	lck_mtx_lock(&nfs_buf_mutex);
4049 	nflags = np->n_bflag;
4050 	np->n_bflag &= ~(NBINVALINPROG | NBINVALWANT);
4051 	lck_mtx_unlock(&nfs_buf_mutex);
4052 	if (nflags & NBINVALWANT) {
4053 		wakeup(&np->n_bflag);
4054 	}
4055 
4056 	FSDBG_BOT(554, np, flags, intrflg, error);
4057 	return error;
4058 }
4059 
4060 /*
4061  * Wait for any busy buffers to complete.
4062  */
4063 void
nfs_wait_bufs(nfsnode_t np)4064 nfs_wait_bufs(nfsnode_t np)
4065 {
4066 	struct nfsbuf *bp;
4067 	struct nfsbuflists blist;
4068 	int error = 0;
4069 
4070 	lck_mtx_lock(&nfs_buf_mutex);
4071 	if (!nfs_buf_iterprepare(np, &blist, NBI_CLEAN)) {
4072 		while ((bp = LIST_FIRST(&blist))) {
4073 			LIST_REMOVE(bp, nb_vnbufs);
4074 			LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs);
4075 			nfs_buf_refget(bp);
4076 			while ((error = nfs_buf_acquire(bp, 0, 0, 0))) {
4077 				if (error != EAGAIN) {
4078 					nfs_buf_refrele(bp);
4079 					nfs_buf_itercomplete(np, &blist, NBI_CLEAN);
4080 					lck_mtx_unlock(&nfs_buf_mutex);
4081 					return;
4082 				}
4083 			}
4084 			nfs_buf_refrele(bp);
4085 			nfs_buf_drop(bp);
4086 		}
4087 		nfs_buf_itercomplete(np, &blist, NBI_CLEAN);
4088 	}
4089 	if (!nfs_buf_iterprepare(np, &blist, NBI_DIRTY)) {
4090 		while ((bp = LIST_FIRST(&blist))) {
4091 			LIST_REMOVE(bp, nb_vnbufs);
4092 			LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs);
4093 			nfs_buf_refget(bp);
4094 			while ((error = nfs_buf_acquire(bp, 0, 0, 0))) {
4095 				if (error != EAGAIN) {
4096 					nfs_buf_refrele(bp);
4097 					nfs_buf_itercomplete(np, &blist, NBI_DIRTY);
4098 					lck_mtx_unlock(&nfs_buf_mutex);
4099 					return;
4100 				}
4101 			}
4102 			nfs_buf_refrele(bp);
4103 			nfs_buf_drop(bp);
4104 		}
4105 		nfs_buf_itercomplete(np, &blist, NBI_DIRTY);
4106 	}
4107 	lck_mtx_unlock(&nfs_buf_mutex);
4108 }
4109 
4110 
4111 /*
4112  * Add an async I/O request to the mount's async I/O queue and make
4113  * sure that an nfsiod will service it.
4114  */
4115 void
nfs_asyncio_finish(struct nfsreq * req)4116 nfs_asyncio_finish(struct nfsreq *req)
4117 {
4118 	struct nfsmount *nmp;
4119 	struct nfsiod *niod;
4120 	int started = 0;
4121 
4122 	FSDBG_TOP(552, nmp, 0, 0, 0);
4123 again:
4124 	nmp = req->r_nmp;
4125 
4126 	if (nmp == NULL) {
4127 		return;
4128 	}
4129 
4130 	lck_mtx_lock(&nfsiod_mutex);
4131 	niod = nmp->nm_niod;
4132 
4133 	/* grab an nfsiod if we don't have one already */
4134 	if (!niod) {
4135 		niod = TAILQ_FIRST(&nfsiodfree);
4136 		if (niod) {
4137 			TAILQ_REMOVE(&nfsiodfree, niod, niod_link);
4138 			TAILQ_INSERT_TAIL(&nfsiodwork, niod, niod_link);
4139 			niod->niod_nmp = nmp;
4140 		} else if (((nfsiod_thread_count < NFSIOD_MAX) || (nfsiod_thread_count <= 0)) && (started < 4)) {
4141 			/*
4142 			 * Try starting a new thread.
4143 			 * We may try a couple times if other callers
4144 			 * get the new threads before we do.
4145 			 */
4146 			lck_mtx_unlock(&nfsiod_mutex);
4147 			started++;
4148 			if (!nfsiod_start()) {
4149 				goto again;
4150 			}
4151 			lck_mtx_lock(&nfsiod_mutex);
4152 		}
4153 	}
4154 
4155 	/*
4156 	 * If we got here while being on the resendq we need to get off. This
4157 	 * happens when the timer fires and errors out requests from nfs_sigintr
4158 	 * or we receive a reply (UDP case) while being on the resend queue so
4159 	 * we're just finishing up and are not going to be resent.
4160 	 */
4161 	lck_mtx_lock(&req->r_mtx);
4162 	if (req->r_flags & R_RESENDQ) {
4163 		lck_mtx_lock(&nmp->nm_lock);
4164 		if ((req->r_flags & R_RESENDQ) && req->r_rchain.tqe_next != NFSREQNOLIST) {
4165 			NFS_BIO_DBG("Proccessing async request on resendq. Removing");
4166 			TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
4167 			req->r_flags &= ~R_RESENDQ;
4168 			req->r_rchain.tqe_next = NFSREQNOLIST;
4169 			assert(req->r_refs > 1);
4170 			/* Remove resendq reference */
4171 			req->r_refs--;
4172 		}
4173 		lck_mtx_unlock(&nmp->nm_lock);
4174 	}
4175 	lck_mtx_unlock(&req->r_mtx);
4176 
4177 	if (req->r_achain.tqe_next == NFSREQNOLIST) {
4178 		TAILQ_INSERT_TAIL(&nmp->nm_iodq, req, r_achain);
4179 	}
4180 
4181 	/* If this mount doesn't already have an nfsiod working on it... */
4182 	if (!nmp->nm_niod) {
4183 		if (niod) { /* give it the nfsiod we just grabbed */
4184 			nmp->nm_niod = niod;
4185 			lck_mtx_unlock(&nfsiod_mutex);
4186 			wakeup(niod);
4187 		} else if (nfsiod_thread_count > 0) {
4188 			/* just queue it up on nfsiod mounts queue if needed */
4189 			if (nmp->nm_iodlink.tqe_next == NFSNOLIST) {
4190 				TAILQ_INSERT_TAIL(&nfsiodmounts, nmp, nm_iodlink);
4191 			}
4192 			lck_mtx_unlock(&nfsiod_mutex);
4193 		} else {
4194 			printf("nfs_asyncio(): no nfsiods? %d %d (%d)\n", nfsiod_thread_count, NFSIOD_MAX, started);
4195 			lck_mtx_unlock(&nfsiod_mutex);
4196 			/* we have no other option but to be persistent */
4197 			started = 0;
4198 			goto again;
4199 		}
4200 	} else {
4201 		lck_mtx_unlock(&nfsiod_mutex);
4202 	}
4203 
4204 	FSDBG_BOT(552, nmp, 0, 0, 0);
4205 }
4206 
4207 /*
4208  * queue up async I/O request for resend
4209  * Must be called with req->r_mtx locked.
4210  */
4211 void
nfs_asyncio_resend(struct nfsreq * req)4212 nfs_asyncio_resend(struct nfsreq *req)
4213 {
4214 	struct nfsmount *nmp = req->r_nmp;
4215 
4216 	if (nfs_mount_gone(nmp)) {
4217 		return;
4218 	}
4219 
4220 #if CONFIG_NFS_GSS
4221 	nfs_gss_clnt_rpcdone(req);
4222 #endif
4223 	lck_mtx_lock(&nmp->nm_lock);
4224 	if (!(req->r_flags & R_RESENDQ)) {
4225 		TAILQ_INSERT_TAIL(&nmp->nm_resendq, req, r_rchain);
4226 		req->r_flags |= R_RESENDQ;
4227 		/*
4228 		 * We take a reference on this request so that it can't be
4229 		 * destroyed while a resend is queued or in progress.
4230 		 */
4231 		nfs_request_ref(req, 1);
4232 	}
4233 	nfs_mount_sock_thread_wake(nmp);
4234 	lck_mtx_unlock(&nmp->nm_lock);
4235 }
4236 
4237 /*
4238  * Read directory data into a buffer.
4239  *
4240  * Buffer will be filled (unless EOF is hit).
4241  * Buffers after this one may also be completely/partially filled.
4242  */
4243 int
nfs_buf_readdir(struct nfsbuf * bp,vfs_context_t ctx)4244 nfs_buf_readdir(struct nfsbuf *bp, vfs_context_t ctx)
4245 {
4246 	nfsnode_t np = bp->nb_np;
4247 	struct nfsmount *nmp = NFSTONMP(np);
4248 	int error = 0;
4249 
4250 	if (nfs_mount_gone(nmp)) {
4251 		return ENXIO;
4252 	}
4253 
4254 	if (nmp->nm_vers < NFS_VER4) {
4255 		error = nfs3_readdir_rpc(np, bp, ctx);
4256 	}
4257 #if CONFIG_NFS4
4258 	else {
4259 		error = nfs4_readdir_rpc(np, bp, ctx);
4260 	}
4261 #endif
4262 	if (error && (error != NFSERR_DIRBUFDROPPED)) {
4263 		SET(bp->nb_flags, NB_ERROR);
4264 		bp->nb_error = error;
4265 	}
4266 	return error;
4267 }
4268 
4269 #endif /* CONFIG_NFS_CLIENT */
4270