xref: /xnu-8792.81.2/bsd/vfs/vfs_bio.c (revision 19c3b8c28c31cb8130e034cfb5df6bf9ba342d90)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30  * Copyright (c) 1994 Christopher G. Demetriou
31  * Copyright (c) 1982, 1986, 1989, 1993
32  *	The Regents of the University of California.  All rights reserved.
33  * (c) UNIX System Laboratories, Inc.
34  * All or some portions of this file are derived from material licensed
35  * to the University of California by American Telephone and Telegraph
36  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
37  * the permission of UNIX System Laboratories, Inc.
38  *
39  * Redistribution and use in source and binary forms, with or without
40  * modification, are permitted provided that the following conditions
41  * are met:
42  * 1. Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  * 2. Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in the
46  *    documentation and/or other materials provided with the distribution.
47  * 3. All advertising materials mentioning features or use of this software
48  *    must display the following acknowledgement:
49  *	This product includes software developed by the University of
50  *	California, Berkeley and its contributors.
51  * 4. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
68  */
69 
70 /*
71  * Some references:
72  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
73  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
74  *		UNIX Operating System (Addison Welley, 1989)
75  */
76 
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc_internal.h>
80 #include <sys/buf_internal.h>
81 #include <sys/vnode_internal.h>
82 #include <sys/mount_internal.h>
83 #include <sys/trace.h>
84 #include <kern/kalloc.h>
85 #include <sys/resourcevar.h>
86 #include <miscfs/specfs/specdev.h>
87 #include <sys/ubc.h>
88 #include <sys/kauth.h>
89 #if DIAGNOSTIC
90 #include <kern/assert.h>
91 #endif /* DIAGNOSTIC */
92 #include <kern/task.h>
93 #include <kern/zalloc.h>
94 #include <kern/locks.h>
95 #include <kern/thread.h>
96 
97 #include <sys/fslog.h>          /* fslog_io_error() */
98 #include <sys/disk.h>           /* dk_error_description_t */
99 
100 #include <mach/mach_types.h>
101 #include <mach/memory_object_types.h>
102 #include <kern/sched_prim.h>    /* thread_block() */
103 
104 #include <vm/vm_kern.h>
105 #include <vm/vm_pageout.h>
106 
107 #include <sys/kdebug.h>
108 
109 #include <libkern/OSAtomic.h>
110 #include <libkern/OSDebug.h>
111 #include <sys/ubc_internal.h>
112 
113 #include <sys/sdt.h>
114 
115 int     bcleanbuf(buf_t bp, boolean_t discard);
116 static int      brecover_data(buf_t bp);
117 static boolean_t incore(vnode_t vp, daddr64_t blkno);
118 /* timeout is in msecs */
119 static buf_t    getnewbuf(int slpflag, int slptimeo, int *queue);
120 static void     bremfree_locked(buf_t bp);
121 static void     buf_reassign(buf_t bp, vnode_t newvp);
122 static errno_t  buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo);
123 static int      buf_iterprepare(vnode_t vp, struct buflists *, int flags);
124 static void     buf_itercomplete(vnode_t vp, struct buflists *, int flags);
125 static boolean_t buffer_cache_gc(int);
126 static buf_t    buf_brelse_shadow(buf_t bp);
127 static void     buf_free_meta_store(buf_t bp);
128 
129 static buf_t    buf_create_shadow_internal(buf_t bp, boolean_t force_copy,
130     uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv);
131 
132 
133 int  bdwrite_internal(buf_t, int);
134 
135 extern void disk_conditioner_delay(buf_t, int, int, uint64_t);
136 
137 /* zone allocated buffer headers */
138 static void     bcleanbuf_thread_init(void);
139 static void     bcleanbuf_thread(void);
140 
141 static ZONE_DEFINE_TYPE(buf_hdr_zone, "buf headers", struct buf, ZC_NONE);
142 static int      buf_hdr_count;
143 
144 
145 /*
146  * Definitions for the buffer hash lists.
147  */
148 #define BUFHASH(dvp, lbn)       \
149 	(&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
150 LIST_HEAD(bufhashhdr, buf) * bufhashtbl, invalhash;
151 u_long  bufhash;
152 
153 static buf_t    incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp);
154 
155 /* Definitions for the buffer stats. */
156 struct bufstats bufstats;
157 
158 /* Number of delayed write buffers */
159 long nbdwrite = 0;
160 int blaundrycnt = 0;
161 static int boot_nbuf_headers = 0;
162 
163 static TAILQ_HEAD(delayqueue, buf) delaybufqueue;
164 
165 static TAILQ_HEAD(ioqueue, buf) iobufqueue;
166 static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
167 static int needbuffer;
168 static int need_iobuffer;
169 
170 static LCK_GRP_DECLARE(buf_mtx_grp, "buffer cache");
171 static LCK_ATTR_DECLARE(buf_mtx_attr, 0, 0);
172 static LCK_MTX_DECLARE_ATTR(iobuffer_mtxp, &buf_mtx_grp, &buf_mtx_attr);
173 static LCK_MTX_DECLARE_ATTR(buf_mtx, &buf_mtx_grp, &buf_mtx_attr);
174 static LCK_MTX_DECLARE_ATTR(buf_gc_callout, &buf_mtx_grp, &buf_mtx_attr);
175 
176 static uint32_t buf_busycount;
177 
178 #define FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE 16
179 typedef struct {
180 	void (* callout)(int, void *);
181 	void *context;
182 } fs_buffer_cache_gc_callout_t;
183 
184 fs_buffer_cache_gc_callout_t fs_callouts[FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE] = { {NULL, NULL} };
185 
186 static __inline__ int
buf_timestamp(void)187 buf_timestamp(void)
188 {
189 	struct  timeval         t;
190 	microuptime(&t);
191 	return (int)t.tv_sec;
192 }
193 
194 /*
195  * Insq/Remq for the buffer free lists.
196  */
197 #define binsheadfree(bp, dp, whichq)    do { \
198 	                            TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
199 	                        } while (0)
200 
201 #define binstailfree(bp, dp, whichq)    do { \
202 	                            TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
203 	                        } while (0)
204 
205 #define BHASHENTCHECK(bp)       \
206 	if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef)  \
207 	        panic("%p: b_hash.le_prev is not deadbeef", (bp));
208 
209 #define BLISTNONE(bp)   \
210 	(bp)->b_hash.le_next = (struct buf *)0; \
211 	(bp)->b_hash.le_prev = (struct buf **)0xdeadbeef;
212 
213 /*
214  * Insq/Remq for the vnode usage lists.
215  */
216 #define bufinsvn(bp, dp)        LIST_INSERT_HEAD(dp, bp, b_vnbufs)
217 #define bufremvn(bp) {                                                  \
218 	LIST_REMOVE(bp, b_vnbufs);                                      \
219 	(bp)->b_vnbufs.le_next = NOLIST;                                \
220 }
221 
222 /*
223  * Time in seconds before a buffer on a list is
224  * considered as a stale buffer
225  */
226 #define LRU_IS_STALE 120 /* default value for the LRU */
227 #define AGE_IS_STALE 60  /* default value for the AGE */
228 #define META_IS_STALE 180 /* default value for the BQ_META */
229 
230 int lru_is_stale = LRU_IS_STALE;
231 int age_is_stale = AGE_IS_STALE;
232 int meta_is_stale = META_IS_STALE;
233 
234 #define MAXLAUNDRY      10
235 
236 /* LIST_INSERT_HEAD() with assertions */
237 static __inline__ void
blistenterhead(struct bufhashhdr * head,buf_t bp)238 blistenterhead(struct bufhashhdr * head, buf_t bp)
239 {
240 	if ((bp->b_hash.le_next = (head)->lh_first) != NULL) {
241 		(head)->lh_first->b_hash.le_prev = &(bp)->b_hash.le_next;
242 	}
243 	(head)->lh_first = bp;
244 	bp->b_hash.le_prev = &(head)->lh_first;
245 	if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) {
246 		panic("blistenterhead: le_prev is deadbeef");
247 	}
248 }
249 
250 static __inline__ void
binshash(buf_t bp,struct bufhashhdr * dp)251 binshash(buf_t bp, struct bufhashhdr *dp)
252 {
253 #if DIAGNOSTIC
254 	buf_t   nbp;
255 #endif /* DIAGNOSTIC */
256 
257 	BHASHENTCHECK(bp);
258 
259 #if DIAGNOSTIC
260 	nbp = dp->lh_first;
261 	for (; nbp != NULL; nbp = nbp->b_hash.le_next) {
262 		if (nbp == bp) {
263 			panic("buf already in hashlist");
264 		}
265 	}
266 #endif /* DIAGNOSTIC */
267 
268 	blistenterhead(dp, bp);
269 }
270 
271 static __inline__ void
bremhash(buf_t bp)272 bremhash(buf_t  bp)
273 {
274 	if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) {
275 		panic("bremhash le_prev is deadbeef");
276 	}
277 	if (bp->b_hash.le_next == bp) {
278 		panic("bremhash: next points to self");
279 	}
280 
281 	if (bp->b_hash.le_next != NULL) {
282 		bp->b_hash.le_next->b_hash.le_prev = bp->b_hash.le_prev;
283 	}
284 	*bp->b_hash.le_prev = (bp)->b_hash.le_next;
285 }
286 
287 /*
288  * buf_mtx held.
289  */
290 static __inline__ void
bmovelaundry(buf_t bp)291 bmovelaundry(buf_t bp)
292 {
293 	bp->b_whichq = BQ_LAUNDRY;
294 	bp->b_timestamp = buf_timestamp();
295 	binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
296 	blaundrycnt++;
297 }
298 
299 static __inline__ void
buf_release_credentials(buf_t bp)300 buf_release_credentials(buf_t bp)
301 {
302 	if (IS_VALID_CRED(bp->b_rcred)) {
303 		kauth_cred_unref(&bp->b_rcred);
304 	}
305 	if (IS_VALID_CRED(bp->b_wcred)) {
306 		kauth_cred_unref(&bp->b_wcred);
307 	}
308 }
309 
310 
311 int
buf_valid(buf_t bp)312 buf_valid(buf_t bp)
313 {
314 	if ((bp->b_flags & (B_DONE | B_DELWRI))) {
315 		return 1;
316 	}
317 	return 0;
318 }
319 
320 int
buf_fromcache(buf_t bp)321 buf_fromcache(buf_t bp)
322 {
323 	if ((bp->b_flags & B_CACHE)) {
324 		return 1;
325 	}
326 	return 0;
327 }
328 
329 void
buf_markinvalid(buf_t bp)330 buf_markinvalid(buf_t bp)
331 {
332 	SET(bp->b_flags, B_INVAL);
333 }
334 
335 void
buf_markdelayed(buf_t bp)336 buf_markdelayed(buf_t bp)
337 {
338 	if (!ISSET(bp->b_flags, B_DELWRI)) {
339 		SET(bp->b_flags, B_DELWRI);
340 
341 		OSAddAtomicLong(1, &nbdwrite);
342 		buf_reassign(bp, bp->b_vp);
343 	}
344 	SET(bp->b_flags, B_DONE);
345 }
346 
347 void
buf_markclean(buf_t bp)348 buf_markclean(buf_t bp)
349 {
350 	if (ISSET(bp->b_flags, B_DELWRI)) {
351 		CLR(bp->b_flags, B_DELWRI);
352 
353 		OSAddAtomicLong(-1, &nbdwrite);
354 		buf_reassign(bp, bp->b_vp);
355 	}
356 }
357 
358 void
buf_markeintr(buf_t bp)359 buf_markeintr(buf_t bp)
360 {
361 	SET(bp->b_flags, B_EINTR);
362 }
363 
364 
365 void
buf_markaged(buf_t bp)366 buf_markaged(buf_t bp)
367 {
368 	SET(bp->b_flags, B_AGE);
369 }
370 
371 int
buf_fua(buf_t bp)372 buf_fua(buf_t bp)
373 {
374 	if ((bp->b_flags & B_FUA) == B_FUA) {
375 		return 1;
376 	}
377 	return 0;
378 }
379 
380 void
buf_markfua(buf_t bp)381 buf_markfua(buf_t bp)
382 {
383 	SET(bp->b_flags, B_FUA);
384 }
385 
386 #if CONFIG_PROTECT
387 cpx_t
bufattr_cpx(bufattr_t bap)388 bufattr_cpx(bufattr_t bap)
389 {
390 	return bap->ba_cpx;
391 }
392 
393 void
bufattr_setcpx(bufattr_t bap,cpx_t cpx)394 bufattr_setcpx(bufattr_t bap, cpx_t cpx)
395 {
396 	bap->ba_cpx = cpx;
397 }
398 
399 void
buf_setcpoff(buf_t bp,uint64_t foffset)400 buf_setcpoff(buf_t bp, uint64_t foffset)
401 {
402 	bp->b_attr.ba_cp_file_off = foffset;
403 }
404 
405 uint64_t
bufattr_cpoff(bufattr_t bap)406 bufattr_cpoff(bufattr_t bap)
407 {
408 	return bap->ba_cp_file_off;
409 }
410 
411 void
bufattr_setcpoff(bufattr_t bap,uint64_t foffset)412 bufattr_setcpoff(bufattr_t bap, uint64_t foffset)
413 {
414 	bap->ba_cp_file_off = foffset;
415 }
416 
417 #else // !CONTECT_PROTECT
418 
419 uint64_t
bufattr_cpoff(bufattr_t bap __unused)420 bufattr_cpoff(bufattr_t bap __unused)
421 {
422 	return 0;
423 }
424 
425 void
bufattr_setcpoff(__unused bufattr_t bap,__unused uint64_t foffset)426 bufattr_setcpoff(__unused bufattr_t bap, __unused uint64_t foffset)
427 {
428 	return;
429 }
430 
431 struct cpx *
bufattr_cpx(__unused bufattr_t bap)432 bufattr_cpx(__unused bufattr_t bap)
433 {
434 	return NULL;
435 }
436 
437 void
bufattr_setcpx(__unused bufattr_t bap,__unused struct cpx * cpx)438 bufattr_setcpx(__unused bufattr_t bap, __unused struct cpx *cpx)
439 {
440 }
441 
442 #endif /* !CONFIG_PROTECT */
443 
444 bufattr_t
bufattr_alloc(void)445 bufattr_alloc(void)
446 {
447 	return kalloc_type(struct bufattr, Z_WAITOK | Z_ZERO);
448 }
449 
450 void
bufattr_free(bufattr_t bap)451 bufattr_free(bufattr_t bap)
452 {
453 	kfree_type(struct bufattr, bap);
454 }
455 
456 bufattr_t
bufattr_dup(bufattr_t bap)457 bufattr_dup(bufattr_t bap)
458 {
459 	bufattr_t new_bufattr;
460 	new_bufattr = kalloc_type(struct bufattr, Z_WAITOK | Z_NOFAIL);
461 
462 	/* Copy the provided one into the new copy */
463 	memcpy(new_bufattr, bap, sizeof(struct bufattr));
464 	return new_bufattr;
465 }
466 
467 int
bufattr_rawencrypted(bufattr_t bap)468 bufattr_rawencrypted(bufattr_t bap)
469 {
470 	if ((bap->ba_flags & BA_RAW_ENCRYPTED_IO)) {
471 		return 1;
472 	}
473 	return 0;
474 }
475 
476 int
bufattr_throttled(bufattr_t bap)477 bufattr_throttled(bufattr_t bap)
478 {
479 	return GET_BUFATTR_IO_TIER(bap);
480 }
481 
482 int
bufattr_passive(bufattr_t bap)483 bufattr_passive(bufattr_t bap)
484 {
485 	if ((bap->ba_flags & BA_PASSIVE)) {
486 		return 1;
487 	}
488 	return 0;
489 }
490 
491 int
bufattr_nocache(bufattr_t bap)492 bufattr_nocache(bufattr_t bap)
493 {
494 	if ((bap->ba_flags & BA_NOCACHE)) {
495 		return 1;
496 	}
497 	return 0;
498 }
499 
500 int
bufattr_meta(bufattr_t bap)501 bufattr_meta(bufattr_t bap)
502 {
503 	if ((bap->ba_flags & BA_META)) {
504 		return 1;
505 	}
506 	return 0;
507 }
508 
509 void
bufattr_markmeta(bufattr_t bap)510 bufattr_markmeta(bufattr_t bap)
511 {
512 	SET(bap->ba_flags, BA_META);
513 }
514 
515 int
bufattr_delayidlesleep(bufattr_t bap)516 bufattr_delayidlesleep(bufattr_t bap)
517 {
518 	if ((bap->ba_flags & BA_DELAYIDLESLEEP)) {
519 		return 1;
520 	}
521 	return 0;
522 }
523 
524 bufattr_t
buf_attr(buf_t bp)525 buf_attr(buf_t bp)
526 {
527 	return &bp->b_attr;
528 }
529 
530 void
buf_markstatic(buf_t bp __unused)531 buf_markstatic(buf_t bp __unused)
532 {
533 	SET(bp->b_flags, B_STATICCONTENT);
534 }
535 
536 int
buf_static(buf_t bp)537 buf_static(buf_t bp)
538 {
539 	if ((bp->b_flags & B_STATICCONTENT)) {
540 		return 1;
541 	}
542 	return 0;
543 }
544 
545 void
bufattr_markgreedymode(bufattr_t bap)546 bufattr_markgreedymode(bufattr_t bap)
547 {
548 	SET(bap->ba_flags, BA_GREEDY_MODE);
549 }
550 
551 int
bufattr_greedymode(bufattr_t bap)552 bufattr_greedymode(bufattr_t bap)
553 {
554 	if ((bap->ba_flags & BA_GREEDY_MODE)) {
555 		return 1;
556 	}
557 	return 0;
558 }
559 
560 void
bufattr_markisochronous(bufattr_t bap)561 bufattr_markisochronous(bufattr_t bap)
562 {
563 	SET(bap->ba_flags, BA_ISOCHRONOUS);
564 }
565 
566 int
bufattr_isochronous(bufattr_t bap)567 bufattr_isochronous(bufattr_t bap)
568 {
569 	if ((bap->ba_flags & BA_ISOCHRONOUS)) {
570 		return 1;
571 	}
572 	return 0;
573 }
574 
575 void
bufattr_markquickcomplete(bufattr_t bap)576 bufattr_markquickcomplete(bufattr_t bap)
577 {
578 	SET(bap->ba_flags, BA_QUICK_COMPLETE);
579 }
580 
581 int
bufattr_quickcomplete(bufattr_t bap)582 bufattr_quickcomplete(bufattr_t bap)
583 {
584 	if ((bap->ba_flags & BA_QUICK_COMPLETE)) {
585 		return 1;
586 	}
587 	return 0;
588 }
589 
590 void
bufattr_markioscheduled(bufattr_t bap)591 bufattr_markioscheduled(bufattr_t bap)
592 {
593 	SET(bap->ba_flags, BA_IO_SCHEDULED);
594 }
595 
596 
597 int
bufattr_ioscheduled(bufattr_t bap)598 bufattr_ioscheduled(bufattr_t bap)
599 {
600 	if ((bap->ba_flags & BA_IO_SCHEDULED)) {
601 		return 1;
602 	}
603 	return 0;
604 }
605 
606 void
bufattr_markexpeditedmeta(bufattr_t bap)607 bufattr_markexpeditedmeta(bufattr_t bap)
608 {
609 	SET(bap->ba_flags, BA_EXPEDITED_META_IO);
610 }
611 
612 int
bufattr_expeditedmeta(bufattr_t bap)613 bufattr_expeditedmeta(bufattr_t bap)
614 {
615 	if ((bap->ba_flags & BA_EXPEDITED_META_IO)) {
616 		return 1;
617 	}
618 	return 0;
619 }
620 
621 int
bufattr_willverify(bufattr_t bap)622 bufattr_willverify(bufattr_t bap)
623 {
624 	if ((bap->ba_flags & BA_WILL_VERIFY)) {
625 		return 1;
626 	}
627 	return 0;
628 }
629 
630 errno_t
buf_error(buf_t bp)631 buf_error(buf_t bp)
632 {
633 	return bp->b_error;
634 }
635 
636 void
buf_seterror(buf_t bp,errno_t error)637 buf_seterror(buf_t bp, errno_t error)
638 {
639 	if ((bp->b_error = error)) {
640 		SET(bp->b_flags, B_ERROR);
641 	} else {
642 		CLR(bp->b_flags, B_ERROR);
643 	}
644 }
645 
646 void
buf_setflags(buf_t bp,int32_t flags)647 buf_setflags(buf_t bp, int32_t flags)
648 {
649 	SET(bp->b_flags, (flags & BUF_X_WRFLAGS));
650 }
651 
652 void
buf_clearflags(buf_t bp,int32_t flags)653 buf_clearflags(buf_t bp, int32_t flags)
654 {
655 	CLR(bp->b_flags, (flags & BUF_X_WRFLAGS));
656 }
657 
658 int32_t
buf_flags(buf_t bp)659 buf_flags(buf_t bp)
660 {
661 	return bp->b_flags & BUF_X_RDFLAGS;
662 }
663 
664 void
buf_reset(buf_t bp,int32_t io_flags)665 buf_reset(buf_t bp, int32_t io_flags)
666 {
667 	CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA));
668 	SET(bp->b_flags, (io_flags & (B_ASYNC | B_READ | B_WRITE | B_NOCACHE)));
669 
670 	bp->b_error = 0;
671 }
672 
673 uint32_t
buf_count(buf_t bp)674 buf_count(buf_t bp)
675 {
676 	return bp->b_bcount;
677 }
678 
679 void
buf_setcount(buf_t bp,uint32_t bcount)680 buf_setcount(buf_t bp, uint32_t bcount)
681 {
682 	bp->b_bcount = bcount;
683 }
684 
685 uint32_t
buf_size(buf_t bp)686 buf_size(buf_t bp)
687 {
688 	return bp->b_bufsize;
689 }
690 
691 void
buf_setsize(buf_t bp,uint32_t bufsize)692 buf_setsize(buf_t bp, uint32_t bufsize)
693 {
694 	bp->b_bufsize = bufsize;
695 }
696 
697 uint32_t
buf_resid(buf_t bp)698 buf_resid(buf_t bp)
699 {
700 	return bp->b_resid;
701 }
702 
703 void
buf_setresid(buf_t bp,uint32_t resid)704 buf_setresid(buf_t bp, uint32_t resid)
705 {
706 	bp->b_resid = resid;
707 }
708 
709 uint32_t
buf_dirtyoff(buf_t bp)710 buf_dirtyoff(buf_t bp)
711 {
712 	return bp->b_dirtyoff;
713 }
714 
715 uint32_t
buf_dirtyend(buf_t bp)716 buf_dirtyend(buf_t bp)
717 {
718 	return bp->b_dirtyend;
719 }
720 
721 void
buf_setdirtyoff(buf_t bp,uint32_t dirtyoff)722 buf_setdirtyoff(buf_t bp, uint32_t dirtyoff)
723 {
724 	bp->b_dirtyoff = dirtyoff;
725 }
726 
727 void
buf_setdirtyend(buf_t bp,uint32_t dirtyend)728 buf_setdirtyend(buf_t bp, uint32_t dirtyend)
729 {
730 	bp->b_dirtyend = dirtyend;
731 }
732 
733 uintptr_t
buf_dataptr(buf_t bp)734 buf_dataptr(buf_t bp)
735 {
736 	return bp->b_datap;
737 }
738 
739 void
buf_setdataptr(buf_t bp,uintptr_t data)740 buf_setdataptr(buf_t bp, uintptr_t data)
741 {
742 	bp->b_datap = data;
743 }
744 
745 vnode_t
buf_vnode(buf_t bp)746 buf_vnode(buf_t bp)
747 {
748 	return bp->b_vp;
749 }
750 
751 void
buf_setvnode(buf_t bp,vnode_t vp)752 buf_setvnode(buf_t bp, vnode_t vp)
753 {
754 	bp->b_vp = vp;
755 }
756 
757 vnode_t
buf_vnop_vnode(buf_t bp)758 buf_vnop_vnode(buf_t bp)
759 {
760 	return bp->b_vnop_vp ? bp->b_vnop_vp :  bp->b_vp;
761 }
762 
763 void *
buf_callback(buf_t bp)764 buf_callback(buf_t bp)
765 {
766 	if (!(bp->b_flags & B_CALL)) {
767 		return (void *) NULL;
768 	}
769 
770 	return (void *)bp->b_iodone;
771 }
772 
773 
774 errno_t
buf_setcallback(buf_t bp,void (* callback)(buf_t,void *),void * transaction)775 buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction)
776 {
777 	assert(!ISSET(bp->b_flags, B_FILTER) && ISSET(bp->b_lflags, BL_BUSY));
778 
779 	if (callback) {
780 		bp->b_flags |= (B_CALL | B_ASYNC);
781 	} else {
782 		bp->b_flags &= ~B_CALL;
783 	}
784 	bp->b_transaction = transaction;
785 	bp->b_iodone = callback;
786 
787 	return 0;
788 }
789 
790 errno_t
buf_setupl(buf_t bp,upl_t upl,uint32_t offset)791 buf_setupl(buf_t bp, upl_t upl, uint32_t offset)
792 {
793 	if (!(bp->b_lflags & BL_IOBUF)) {
794 		return EINVAL;
795 	}
796 
797 	if (upl) {
798 		bp->b_flags |= B_CLUSTER;
799 	} else {
800 		bp->b_flags &= ~B_CLUSTER;
801 	}
802 	bp->b_upl = upl;
803 	bp->b_uploffset = offset;
804 
805 	return 0;
806 }
807 
808 buf_t
buf_clone(buf_t bp,int io_offset,int io_size,void (* iodone)(buf_t,void *),void * arg)809 buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg)
810 {
811 	buf_t   io_bp;
812 	int add1, add2;
813 
814 	if (io_offset < 0 || io_size < 0) {
815 		return NULL;
816 	}
817 
818 	if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount) {
819 		return NULL;
820 	}
821 
822 	if (bp->b_flags & B_CLUSTER) {
823 		if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK)) {
824 			return NULL;
825 		}
826 
827 		if (os_add_overflow(io_offset, io_size, &add1) || os_add_overflow(add1, bp->b_uploffset, &add2)) {
828 			return NULL;
829 		}
830 		if ((add2 & PAGE_MASK) && ((uint32_t)add1 < (uint32_t)bp->b_bcount)) {
831 			return NULL;
832 		}
833 	}
834 	io_bp = alloc_io_buf(bp->b_vp, 0);
835 
836 	io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_RAW | B_ASYNC | B_READ | B_FUA);
837 
838 	if (iodone) {
839 		io_bp->b_transaction = arg;
840 		io_bp->b_iodone = iodone;
841 		io_bp->b_flags |= B_CALL;
842 	}
843 	if (bp->b_flags & B_CLUSTER) {
844 		io_bp->b_upl = bp->b_upl;
845 		io_bp->b_uploffset = bp->b_uploffset + io_offset;
846 	} else {
847 		io_bp->b_datap  = (uintptr_t)(((char *)bp->b_datap) + io_offset);
848 	}
849 	io_bp->b_bcount = io_size;
850 
851 	return io_bp;
852 }
853 
854 
855 int
buf_shadow(buf_t bp)856 buf_shadow(buf_t bp)
857 {
858 	if (bp->b_lflags & BL_SHADOW) {
859 		return 1;
860 	}
861 	return 0;
862 }
863 
864 
865 buf_t
buf_create_shadow_priv(buf_t bp,boolean_t force_copy,uintptr_t external_storage,void (* iodone)(buf_t,void *),void * arg)866 buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
867 {
868 	return buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1);
869 }
870 
871 buf_t
buf_create_shadow(buf_t bp,boolean_t force_copy,uintptr_t external_storage,void (* iodone)(buf_t,void *),void * arg)872 buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
873 {
874 	return buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0);
875 }
876 
877 
878 static buf_t
buf_create_shadow_internal(buf_t bp,boolean_t force_copy,uintptr_t external_storage,void (* iodone)(buf_t,void *),void * arg,int priv)879 buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv)
880 {
881 	buf_t   io_bp;
882 
883 	KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_START, bp, 0, 0, 0, 0);
884 
885 	if (!(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) {
886 		KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, 0, 0, 0, 0);
887 		return NULL;
888 	}
889 #ifdef BUF_MAKE_PRIVATE
890 	if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0) {
891 		panic("buf_create_shadow: %p is in the private state (%d, %d)", bp, bp->b_shadow_ref, bp->b_data_ref);
892 	}
893 #endif
894 	io_bp = alloc_io_buf(bp->b_vp, priv);
895 
896 	io_bp->b_flags = bp->b_flags & (B_META | B_ZALLOC | B_ASYNC | B_READ | B_FUA);
897 	io_bp->b_blkno = bp->b_blkno;
898 	io_bp->b_lblkno = bp->b_lblkno;
899 	io_bp->b_lblksize = bp->b_lblksize;
900 
901 	if (iodone) {
902 		io_bp->b_transaction = arg;
903 		io_bp->b_iodone = iodone;
904 		io_bp->b_flags |= B_CALL;
905 	}
906 	if (force_copy == FALSE) {
907 		io_bp->b_bcount = bp->b_bcount;
908 		io_bp->b_bufsize = bp->b_bufsize;
909 
910 		if (external_storage) {
911 			io_bp->b_datap = external_storage;
912 #ifdef BUF_MAKE_PRIVATE
913 			io_bp->b_data_store = NULL;
914 #endif
915 		} else {
916 			io_bp->b_datap = bp->b_datap;
917 #ifdef BUF_MAKE_PRIVATE
918 			io_bp->b_data_store = bp;
919 #endif
920 		}
921 		*(buf_t *)(&io_bp->b_orig) = bp;
922 
923 		lck_mtx_lock_spin(&buf_mtx);
924 
925 		io_bp->b_lflags |= BL_SHADOW;
926 		io_bp->b_shadow = bp->b_shadow;
927 		bp->b_shadow = io_bp;
928 		bp->b_shadow_ref++;
929 
930 #ifdef BUF_MAKE_PRIVATE
931 		if (external_storage) {
932 			io_bp->b_lflags |= BL_EXTERNAL;
933 		} else {
934 			bp->b_data_ref++;
935 		}
936 #endif
937 		lck_mtx_unlock(&buf_mtx);
938 	} else {
939 		if (external_storage) {
940 #ifdef BUF_MAKE_PRIVATE
941 			io_bp->b_lflags |= BL_EXTERNAL;
942 #endif
943 			io_bp->b_bcount = bp->b_bcount;
944 			io_bp->b_bufsize = bp->b_bufsize;
945 			io_bp->b_datap = external_storage;
946 		} else {
947 			allocbuf(io_bp, bp->b_bcount);
948 
949 			io_bp->b_lflags |= BL_IOBUF_ALLOC;
950 		}
951 		bcopy((caddr_t)bp->b_datap, (caddr_t)io_bp->b_datap, bp->b_bcount);
952 
953 #ifdef BUF_MAKE_PRIVATE
954 		io_bp->b_data_store = NULL;
955 #endif
956 	}
957 	KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, io_bp, 0);
958 
959 	return io_bp;
960 }
961 
962 
963 #ifdef BUF_MAKE_PRIVATE
964 errno_t
buf_make_private(buf_t bp)965 buf_make_private(buf_t bp)
966 {
967 	buf_t   ds_bp;
968 	buf_t   t_bp;
969 	struct buf my_buf;
970 
971 	KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_START, bp, bp->b_shadow_ref, 0, 0, 0);
972 
973 	if (bp->b_shadow_ref == 0 || bp->b_data_ref == 0 || ISSET(bp->b_lflags, BL_SHADOW)) {
974 		KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
975 		return EINVAL;
976 	}
977 	my_buf.b_flags = B_META;
978 	my_buf.b_datap = (uintptr_t)NULL;
979 	allocbuf(&my_buf, bp->b_bcount);
980 
981 	bcopy((caddr_t)bp->b_datap, (caddr_t)my_buf.b_datap, bp->b_bcount);
982 
983 	lck_mtx_lock_spin(&buf_mtx);
984 
985 	for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
986 		if (!ISSET(bp->b_lflags, BL_EXTERNAL)) {
987 			break;
988 		}
989 	}
990 	ds_bp = t_bp;
991 
992 	if (ds_bp == NULL && bp->b_data_ref) {
993 		panic("buf_make_private: b_data_ref != 0 && ds_bp == NULL");
994 	}
995 
996 	if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0)) {
997 		panic("buf_make_private: ref_count == 0 && ds_bp != NULL");
998 	}
999 
1000 	if (ds_bp == NULL) {
1001 		lck_mtx_unlock(&buf_mtx);
1002 
1003 		buf_free_meta_store(&my_buf);
1004 
1005 		KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
1006 		return EINVAL;
1007 	}
1008 	for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
1009 		if (!ISSET(t_bp->b_lflags, BL_EXTERNAL)) {
1010 			t_bp->b_data_store = ds_bp;
1011 		}
1012 	}
1013 	ds_bp->b_data_ref = bp->b_data_ref;
1014 
1015 	bp->b_data_ref = 0;
1016 	bp->b_datap = my_buf.b_datap;
1017 
1018 	lck_mtx_unlock(&buf_mtx);
1019 
1020 	KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, 0, 0);
1021 	return 0;
1022 }
1023 #endif
1024 
1025 
1026 void
buf_setfilter(buf_t bp,void (* filter)(buf_t,void *),void * transaction,void (** old_iodone)(buf_t,void *),void ** old_transaction)1027 buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction,
1028     void(**old_iodone)(buf_t, void *), void **old_transaction)
1029 {
1030 	assert(ISSET(bp->b_lflags, BL_BUSY));
1031 
1032 	if (old_iodone) {
1033 		*old_iodone = bp->b_iodone;
1034 	}
1035 	if (old_transaction) {
1036 		*old_transaction = bp->b_transaction;
1037 	}
1038 
1039 	bp->b_transaction = transaction;
1040 	bp->b_iodone = filter;
1041 	if (filter) {
1042 		bp->b_flags |= B_FILTER;
1043 	} else {
1044 		bp->b_flags &= ~B_FILTER;
1045 	}
1046 }
1047 
1048 
1049 daddr64_t
buf_blkno(buf_t bp)1050 buf_blkno(buf_t bp)
1051 {
1052 	return bp->b_blkno;
1053 }
1054 
1055 daddr64_t
buf_lblkno(buf_t bp)1056 buf_lblkno(buf_t bp)
1057 {
1058 	return bp->b_lblkno;
1059 }
1060 
1061 uint32_t
buf_lblksize(buf_t bp)1062 buf_lblksize(buf_t bp)
1063 {
1064 	return bp->b_lblksize;
1065 }
1066 
1067 void
buf_setblkno(buf_t bp,daddr64_t blkno)1068 buf_setblkno(buf_t bp, daddr64_t blkno)
1069 {
1070 	bp->b_blkno = blkno;
1071 }
1072 
1073 void
buf_setlblkno(buf_t bp,daddr64_t lblkno)1074 buf_setlblkno(buf_t bp, daddr64_t lblkno)
1075 {
1076 	bp->b_lblkno = lblkno;
1077 }
1078 
1079 void
buf_setlblksize(buf_t bp,uint32_t lblksize)1080 buf_setlblksize(buf_t bp, uint32_t lblksize)
1081 {
1082 	bp->b_lblksize = lblksize;
1083 }
1084 
1085 dev_t
buf_device(buf_t bp)1086 buf_device(buf_t bp)
1087 {
1088 	return bp->b_dev;
1089 }
1090 
1091 errno_t
buf_setdevice(buf_t bp,vnode_t vp)1092 buf_setdevice(buf_t bp, vnode_t vp)
1093 {
1094 	if ((vp->v_type != VBLK) && (vp->v_type != VCHR)) {
1095 		return EINVAL;
1096 	}
1097 	bp->b_dev = vp->v_rdev;
1098 
1099 	return 0;
1100 }
1101 
1102 
1103 void *
buf_drvdata(buf_t bp)1104 buf_drvdata(buf_t bp)
1105 {
1106 	return bp->b_drvdata;
1107 }
1108 
1109 void
buf_setdrvdata(buf_t bp,void * drvdata)1110 buf_setdrvdata(buf_t bp, void *drvdata)
1111 {
1112 	bp->b_drvdata = drvdata;
1113 }
1114 
1115 void *
buf_fsprivate(buf_t bp)1116 buf_fsprivate(buf_t bp)
1117 {
1118 	return bp->b_fsprivate;
1119 }
1120 
1121 void
buf_setfsprivate(buf_t bp,void * fsprivate)1122 buf_setfsprivate(buf_t bp, void *fsprivate)
1123 {
1124 	bp->b_fsprivate = fsprivate;
1125 }
1126 
1127 kauth_cred_t
buf_rcred(buf_t bp)1128 buf_rcred(buf_t bp)
1129 {
1130 	return bp->b_rcred;
1131 }
1132 
1133 kauth_cred_t
buf_wcred(buf_t bp)1134 buf_wcred(buf_t bp)
1135 {
1136 	return bp->b_wcred;
1137 }
1138 
1139 void *
buf_upl(buf_t bp)1140 buf_upl(buf_t bp)
1141 {
1142 	return bp->b_upl;
1143 }
1144 
1145 uint32_t
buf_uploffset(buf_t bp)1146 buf_uploffset(buf_t bp)
1147 {
1148 	return (uint32_t)(bp->b_uploffset);
1149 }
1150 
1151 proc_t
buf_proc(buf_t bp)1152 buf_proc(buf_t bp)
1153 {
1154 	return bp->b_proc;
1155 }
1156 
1157 
1158 static errno_t
buf_map_range_internal(buf_t bp,caddr_t * io_addr,boolean_t legacymode)1159 buf_map_range_internal(buf_t bp, caddr_t *io_addr, boolean_t legacymode)
1160 {
1161 	buf_t           real_bp;
1162 	vm_offset_t     vaddr;
1163 	kern_return_t   kret;
1164 
1165 	if (!(bp->b_flags & B_CLUSTER)) {
1166 		*io_addr = (caddr_t)bp->b_datap;
1167 		return 0;
1168 	}
1169 	real_bp = (buf_t)(bp->b_real_bp);
1170 
1171 	if (real_bp && real_bp->b_datap) {
1172 		/*
1173 		 * b_real_bp is only valid if B_CLUSTER is SET
1174 		 * if it's non-zero, than someone did a cluster_bp call
1175 		 * if the backing physical pages were already mapped
1176 		 * in before the call to cluster_bp (non-zero b_datap),
1177 		 * than we just use that mapping
1178 		 */
1179 		*io_addr = (caddr_t)real_bp->b_datap;
1180 		return 0;
1181 	}
1182 
1183 	if (legacymode) {
1184 		kret = ubc_upl_map(bp->b_upl, &vaddr);    /* Map it in */
1185 		if (kret == KERN_SUCCESS) {
1186 			vaddr += bp->b_uploffset;
1187 		}
1188 	} else {
1189 		kret = ubc_upl_map_range(bp->b_upl, bp->b_uploffset, bp->b_bcount, VM_PROT_DEFAULT, &vaddr);    /* Map it in */
1190 	}
1191 
1192 	if (kret != KERN_SUCCESS) {
1193 		*io_addr = NULL;
1194 
1195 		return ENOMEM;
1196 	}
1197 
1198 	*io_addr = (caddr_t)vaddr;
1199 
1200 	return 0;
1201 }
1202 
1203 errno_t
buf_map_range(buf_t bp,caddr_t * io_addr)1204 buf_map_range(buf_t bp, caddr_t *io_addr)
1205 {
1206 	return buf_map_range_internal(bp, io_addr, false);
1207 }
1208 
1209 errno_t
buf_map(buf_t bp,caddr_t * io_addr)1210 buf_map(buf_t bp, caddr_t *io_addr)
1211 {
1212 	return buf_map_range_internal(bp, io_addr, true);
1213 }
1214 
1215 static errno_t
buf_unmap_range_internal(buf_t bp,boolean_t legacymode)1216 buf_unmap_range_internal(buf_t bp, boolean_t legacymode)
1217 {
1218 	buf_t           real_bp;
1219 	kern_return_t   kret;
1220 
1221 	if (!(bp->b_flags & B_CLUSTER)) {
1222 		return 0;
1223 	}
1224 	/*
1225 	 * see buf_map for the explanation
1226 	 */
1227 	real_bp = (buf_t)(bp->b_real_bp);
1228 
1229 	if (real_bp && real_bp->b_datap) {
1230 		return 0;
1231 	}
1232 
1233 	if ((bp->b_lflags & BL_IOBUF) &&
1234 	    ((bp->b_flags & (B_PAGEIO | B_READ)) != (B_PAGEIO | B_READ))) {
1235 		/*
1236 		 * ignore pageins... the 'right' thing will
1237 		 * happen due to the way we handle speculative
1238 		 * clusters...
1239 		 *
1240 		 * when we commit these pages, we'll hit
1241 		 * it with UPL_COMMIT_INACTIVE which
1242 		 * will clear the reference bit that got
1243 		 * turned on when we touched the mapping
1244 		 */
1245 		bp->b_flags |= B_AGE;
1246 	}
1247 
1248 	if (legacymode) {
1249 		kret = ubc_upl_unmap(bp->b_upl);
1250 	} else {
1251 		kret = ubc_upl_unmap_range(bp->b_upl, bp->b_uploffset, bp->b_bcount);
1252 	}
1253 
1254 	if (kret != KERN_SUCCESS) {
1255 		return EINVAL;
1256 	}
1257 	return 0;
1258 }
1259 
1260 errno_t
buf_unmap_range(buf_t bp)1261 buf_unmap_range(buf_t bp)
1262 {
1263 	return buf_unmap_range_internal(bp, false);
1264 }
1265 
1266 errno_t
buf_unmap(buf_t bp)1267 buf_unmap(buf_t bp)
1268 {
1269 	return buf_unmap_range_internal(bp, true);
1270 }
1271 
1272 
1273 void
buf_clear(buf_t bp)1274 buf_clear(buf_t bp)
1275 {
1276 	caddr_t baddr;
1277 
1278 	if (buf_map(bp, &baddr) == 0) {
1279 		bzero(baddr, bp->b_bcount);
1280 		buf_unmap(bp);
1281 	}
1282 	bp->b_resid = 0;
1283 }
1284 
1285 /*
1286  * Read or write a buffer that is not contiguous on disk.
1287  * buffer is marked done/error at the conclusion
1288  */
1289 static int
buf_strategy_fragmented(vnode_t devvp,buf_t bp,off_t f_offset,size_t contig_bytes)1290 buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_bytes)
1291 {
1292 	vnode_t vp = buf_vnode(bp);
1293 	buf_t   io_bp;                   /* For reading or writing a single block */
1294 	int     io_direction;
1295 	int     io_resid;
1296 	size_t  io_contig_bytes;
1297 	daddr64_t io_blkno;
1298 	int     error = 0;
1299 	int     bmap_flags;
1300 
1301 	/*
1302 	 * save our starting point... the bp was already mapped
1303 	 * in buf_strategy before we got called
1304 	 * no sense doing it again.
1305 	 */
1306 	io_blkno = bp->b_blkno;
1307 	/*
1308 	 * Make sure we redo this mapping for the next I/O
1309 	 * i.e. this can never be a 'permanent' mapping
1310 	 */
1311 	bp->b_blkno = bp->b_lblkno;
1312 
1313 	/*
1314 	 * Get an io buffer to do the deblocking
1315 	 */
1316 	io_bp = alloc_io_buf(devvp, 0);
1317 
1318 	io_bp->b_lblkno = bp->b_lblkno;
1319 	io_bp->b_lblksize = bp->b_lblksize;
1320 	io_bp->b_datap  = bp->b_datap;
1321 	io_resid        = bp->b_bcount;
1322 	io_direction    = bp->b_flags & B_READ;
1323 	io_contig_bytes = contig_bytes;
1324 
1325 	if (bp->b_flags & B_READ) {
1326 		bmap_flags = VNODE_READ;
1327 	} else {
1328 		bmap_flags = VNODE_WRITE;
1329 	}
1330 
1331 	for (;;) {
1332 		if (io_blkno == -1) {
1333 			/*
1334 			 * this is unexepected, but we'll allow for it
1335 			 */
1336 			bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes);
1337 		} else {
1338 			io_bp->b_bcount  = (uint32_t)io_contig_bytes;
1339 			io_bp->b_bufsize = (uint32_t)io_contig_bytes;
1340 			io_bp->b_resid   = (uint32_t)io_contig_bytes;
1341 			io_bp->b_blkno   = io_blkno;
1342 
1343 			buf_reset(io_bp, io_direction);
1344 
1345 			/*
1346 			 * Call the device to do the I/O and wait for it.  Make sure the appropriate party is charged for write
1347 			 */
1348 
1349 			if (!ISSET(bp->b_flags, B_READ)) {
1350 				OSAddAtomic(1, &devvp->v_numoutput);
1351 			}
1352 
1353 			if ((error = VNOP_STRATEGY(io_bp))) {
1354 				break;
1355 			}
1356 			if ((error = (int)buf_biowait(io_bp))) {
1357 				break;
1358 			}
1359 			if (io_bp->b_resid) {
1360 				io_resid -= (io_contig_bytes - io_bp->b_resid);
1361 				break;
1362 			}
1363 		}
1364 		if ((io_resid -= io_contig_bytes) == 0) {
1365 			break;
1366 		}
1367 		f_offset       += io_contig_bytes;
1368 		io_bp->b_datap += io_contig_bytes;
1369 
1370 		/*
1371 		 * Map the current position to a physical block number
1372 		 */
1373 		if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL))) {
1374 			break;
1375 		}
1376 	}
1377 	buf_free(io_bp);
1378 
1379 	if (error) {
1380 		buf_seterror(bp, error);
1381 	}
1382 	bp->b_resid = io_resid;
1383 	/*
1384 	 * This I/O is now complete
1385 	 */
1386 	buf_biodone(bp);
1387 
1388 	return error;
1389 }
1390 
1391 
1392 /*
1393  * struct vnop_strategy_args {
1394  *      struct buf *a_bp;
1395  * } *ap;
1396  */
1397 errno_t
buf_strategy(vnode_t devvp,void * ap)1398 buf_strategy(vnode_t devvp, void *ap)
1399 {
1400 	buf_t   bp = ((struct vnop_strategy_args *)ap)->a_bp;
1401 	vnode_t vp = bp->b_vp;
1402 	int     bmap_flags;
1403 	errno_t error;
1404 #if CONFIG_DTRACE
1405 	int dtrace_io_start_flag = 0;    /* We only want to trip the io:::start
1406 	                                  * probe once, with the true physical
1407 	                                  * block in place (b_blkno)
1408 	                                  */
1409 
1410 #endif
1411 
1412 	if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK) {
1413 		panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK");
1414 	}
1415 	/*
1416 	 * associate the physical device with
1417 	 * with this buf_t even if we don't
1418 	 * end up issuing the I/O...
1419 	 */
1420 	bp->b_dev = devvp->v_rdev;
1421 
1422 	if (bp->b_flags & B_READ) {
1423 		bmap_flags = VNODE_READ;
1424 	} else {
1425 		bmap_flags = VNODE_WRITE;
1426 	}
1427 
1428 	if (!(bp->b_flags & B_CLUSTER)) {
1429 		if ((bp->b_upl)) {
1430 			/*
1431 			 * we have a UPL associated with this bp
1432 			 * go through cluster_bp which knows how
1433 			 * to deal with filesystem block sizes
1434 			 * that aren't equal to the page size
1435 			 */
1436 			DTRACE_IO1(start, buf_t, bp);
1437 			return cluster_bp(bp);
1438 		}
1439 		if (bp->b_blkno == bp->b_lblkno) {
1440 			off_t       f_offset;
1441 			size_t  contig_bytes;
1442 
1443 			if (bp->b_lblksize && bp->b_lblkno >= 0) {
1444 				f_offset = bp->b_lblkno * bp->b_lblksize;
1445 			} else if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) {
1446 				DTRACE_IO1(start, buf_t, bp);
1447 				buf_seterror(bp, error);
1448 				buf_biodone(bp);
1449 
1450 				return error;
1451 			}
1452 
1453 			if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) {
1454 				DTRACE_IO1(start, buf_t, bp);
1455 				buf_seterror(bp, error);
1456 				buf_biodone(bp);
1457 
1458 				return error;
1459 			}
1460 
1461 			DTRACE_IO1(start, buf_t, bp);
1462 #if CONFIG_DTRACE
1463 			dtrace_io_start_flag = 1;
1464 #endif /* CONFIG_DTRACE */
1465 
1466 			if ((bp->b_blkno == -1) || (contig_bytes == 0)) {
1467 				/* Set block number to force biodone later */
1468 				bp->b_blkno = -1;
1469 				buf_clear(bp);
1470 			} else if (contig_bytes < (size_t)bp->b_bcount) {
1471 				return buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes);
1472 			}
1473 		}
1474 
1475 #if CONFIG_DTRACE
1476 		if (dtrace_io_start_flag == 0) {
1477 			DTRACE_IO1(start, buf_t, bp);
1478 			dtrace_io_start_flag = 1;
1479 		}
1480 #endif /* CONFIG_DTRACE */
1481 
1482 		if (bp->b_blkno == -1) {
1483 			buf_biodone(bp);
1484 			return 0;
1485 		}
1486 	}
1487 
1488 #if CONFIG_DTRACE
1489 	if (dtrace_io_start_flag == 0) {
1490 		DTRACE_IO1(start, buf_t, bp);
1491 	}
1492 #endif /* CONFIG_DTRACE */
1493 
1494 #if CONFIG_PROTECT
1495 	/* Capture f_offset in the bufattr*/
1496 	cpx_t cpx = bufattr_cpx(buf_attr(bp));
1497 	if (cpx) {
1498 		/* No need to go here for older EAs */
1499 		if (cpx_use_offset_for_iv(cpx) && !cpx_synthetic_offset_for_iv(cpx)) {
1500 			off_t f_offset;
1501 
1502 			/*
1503 			 * this assert should be changed if cluster_io  ever
1504 			 * changes its logical block size.
1505 			 */
1506 			assert((bp->b_lblksize == CLUSTER_IO_BLOCK_SIZE) || !(bp->b_flags & B_CLUSTER));
1507 
1508 			if (bp->b_lblksize && bp->b_lblkno >= 0) {
1509 				f_offset = bp->b_lblkno * bp->b_lblksize;
1510 			} else if ((error = VNOP_BLKTOOFF(bp->b_vp, bp->b_lblkno, &f_offset))) {
1511 				return error;
1512 			}
1513 
1514 			/*
1515 			 * Attach the file offset to this buffer.  The
1516 			 * bufattr attributes will be passed down the stack
1517 			 * until they reach the storage driver (whether
1518 			 * IOFlashStorage, ASP, or IONVMe). The driver
1519 			 * will retain the offset in a local variable when it
1520 			 * issues its I/Os to the NAND controller.
1521 			 *
1522 			 * Note that LwVM may end up splitting this I/O
1523 			 * into sub-I/Os if it crosses a chunk boundary.  In this
1524 			 * case, LwVM will update this field when it dispatches
1525 			 * each I/O to IOFlashStorage.  But from our perspective
1526 			 * we have only issued a single I/O.
1527 			 *
1528 			 * In the case of APFS we do not bounce through another
1529 			 * intermediate layer (such as CoreStorage). APFS will
1530 			 * issue the I/Os directly to the block device / IOMedia
1531 			 * via buf_strategy on the specfs node.
1532 			 */
1533 			buf_setcpoff(bp, f_offset);
1534 			CP_DEBUG((CPDBG_OFFSET_IO | DBG_FUNC_NONE), (uint32_t) f_offset, (uint32_t) bp->b_lblkno, (uint32_t) bp->b_blkno, (uint32_t) bp->b_bcount, 0);
1535 		}
1536 	}
1537 #endif
1538 
1539 	/*
1540 	 * we can issue the I/O because...
1541 	 * either B_CLUSTER is set which
1542 	 * means that the I/O is properly set
1543 	 * up to be a multiple of the page size, or
1544 	 * we were able to successfully set up the
1545 	 * physical block mapping
1546 	 */
1547 	bp->b_vnop_vp = devvp;
1548 	error = VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap);
1549 	bp->b_vnop_vp = NULLVP;
1550 	DTRACE_FSINFO(strategy, vnode_t, vp);
1551 	return error;
1552 }
1553 
1554 
1555 
1556 buf_t
buf_alloc(vnode_t vp)1557 buf_alloc(vnode_t vp)
1558 {
1559 	return alloc_io_buf(vp, is_vm_privileged());
1560 }
1561 
1562 void
buf_free(buf_t bp)1563 buf_free(buf_t bp)
1564 {
1565 	free_io_buf(bp);
1566 }
1567 
1568 
1569 /*
1570  * iterate buffers for the specified vp.
1571  *   if BUF_SCAN_DIRTY is set, do the dirty list
1572  *   if BUF_SCAN_CLEAN is set, do the clean list
1573  *   if neither flag is set, default to BUF_SCAN_DIRTY
1574  *   if BUF_NOTIFY_BUSY is set, call the callout function using a NULL bp for busy pages
1575  */
1576 
1577 struct buf_iterate_info_t {
1578 	int flag;
1579 	struct buflists *listhead;
1580 };
1581 
1582 void
buf_iterate(vnode_t vp,int (* callout)(buf_t,void *),int flags,void * arg)1583 buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg)
1584 {
1585 	buf_t   bp;
1586 	int     retval;
1587 	struct  buflists local_iterblkhd;
1588 	int     lock_flags = BAC_NOWAIT | BAC_REMOVE;
1589 	int     notify_busy = flags & BUF_NOTIFY_BUSY;
1590 	struct buf_iterate_info_t list[2];
1591 	int     num_lists, i;
1592 
1593 	if (flags & BUF_SKIP_LOCKED) {
1594 		lock_flags |= BAC_SKIP_LOCKED;
1595 	}
1596 	if (flags & BUF_SKIP_NONLOCKED) {
1597 		lock_flags |= BAC_SKIP_NONLOCKED;
1598 	}
1599 
1600 	if (!(flags & (BUF_SCAN_DIRTY | BUF_SCAN_CLEAN))) {
1601 		flags |= BUF_SCAN_DIRTY;
1602 	}
1603 
1604 	num_lists = 0;
1605 
1606 	if (flags & BUF_SCAN_DIRTY) {
1607 		list[num_lists].flag = VBI_DIRTY;
1608 		list[num_lists].listhead = &vp->v_dirtyblkhd;
1609 		num_lists++;
1610 	}
1611 	if (flags & BUF_SCAN_CLEAN) {
1612 		list[num_lists].flag = VBI_CLEAN;
1613 		list[num_lists].listhead = &vp->v_cleanblkhd;
1614 		num_lists++;
1615 	}
1616 
1617 	for (i = 0; i < num_lists; i++) {
1618 		lck_mtx_lock(&buf_mtx);
1619 
1620 		if (buf_iterprepare(vp, &local_iterblkhd, list[i].flag)) {
1621 			lck_mtx_unlock(&buf_mtx);
1622 			continue;
1623 		}
1624 		while (!LIST_EMPTY(&local_iterblkhd)) {
1625 			bp = LIST_FIRST(&local_iterblkhd);
1626 			LIST_REMOVE(bp, b_vnbufs);
1627 			LIST_INSERT_HEAD(list[i].listhead, bp, b_vnbufs);
1628 
1629 			if (buf_acquire_locked(bp, lock_flags, 0, 0)) {
1630 				if (notify_busy) {
1631 					bp = NULL;
1632 				} else {
1633 					continue;
1634 				}
1635 			}
1636 
1637 			lck_mtx_unlock(&buf_mtx);
1638 
1639 			retval = callout(bp, arg);
1640 
1641 			switch (retval) {
1642 			case BUF_RETURNED:
1643 				if (bp) {
1644 					buf_brelse(bp);
1645 				}
1646 				break;
1647 			case BUF_CLAIMED:
1648 				break;
1649 			case BUF_RETURNED_DONE:
1650 				if (bp) {
1651 					buf_brelse(bp);
1652 				}
1653 				lck_mtx_lock(&buf_mtx);
1654 				goto out;
1655 			case BUF_CLAIMED_DONE:
1656 				lck_mtx_lock(&buf_mtx);
1657 				goto out;
1658 			}
1659 			lck_mtx_lock(&buf_mtx);
1660 		} /* while list has more nodes */
1661 out:
1662 		buf_itercomplete(vp, &local_iterblkhd, list[i].flag);
1663 		lck_mtx_unlock(&buf_mtx);
1664 	} /* for each list */
1665 } /* buf_iterate */
1666 
1667 
1668 /*
1669  * Flush out and invalidate all buffers associated with a vnode.
1670  */
1671 int
buf_invalidateblks(vnode_t vp,int flags,int slpflag,int slptimeo)1672 buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo)
1673 {
1674 	buf_t   bp;
1675 	int     aflags;
1676 	int     error = 0;
1677 	int     must_rescan = 1;
1678 	struct  buflists local_iterblkhd;
1679 
1680 
1681 	if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) {
1682 		return 0;
1683 	}
1684 
1685 	lck_mtx_lock(&buf_mtx);
1686 
1687 	for (;;) {
1688 		if (must_rescan == 0) {
1689 			/*
1690 			 * the lists may not be empty, but all that's left at this
1691 			 * point are metadata or B_LOCKED buffers which are being
1692 			 * skipped... we know this because we made it through both
1693 			 * the clean and dirty lists without dropping buf_mtx...
1694 			 * each time we drop buf_mtx we bump "must_rescan"
1695 			 */
1696 			break;
1697 		}
1698 		if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) {
1699 			break;
1700 		}
1701 		must_rescan = 0;
1702 		/*
1703 		 * iterate the clean list
1704 		 */
1705 		if (buf_iterprepare(vp, &local_iterblkhd, VBI_CLEAN)) {
1706 			goto try_dirty_list;
1707 		}
1708 		while (!LIST_EMPTY(&local_iterblkhd)) {
1709 			bp = LIST_FIRST(&local_iterblkhd);
1710 
1711 			LIST_REMOVE(bp, b_vnbufs);
1712 			LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
1713 
1714 			/*
1715 			 * some filesystems distinguish meta data blocks with a negative logical block #
1716 			 */
1717 			if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) {
1718 				continue;
1719 			}
1720 
1721 			aflags = BAC_REMOVE;
1722 
1723 			if (!(flags & BUF_INVALIDATE_LOCKED)) {
1724 				aflags |= BAC_SKIP_LOCKED;
1725 			}
1726 
1727 			if ((error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo))) {
1728 				if (error == EDEADLK) {
1729 					/*
1730 					 * this buffer was marked B_LOCKED...
1731 					 * we didn't drop buf_mtx, so we
1732 					 * we don't need to rescan
1733 					 */
1734 					continue;
1735 				}
1736 				if (error == EAGAIN) {
1737 					/*
1738 					 * found a busy buffer... we blocked and
1739 					 * dropped buf_mtx, so we're going to
1740 					 * need to rescan after this pass is completed
1741 					 */
1742 					must_rescan++;
1743 					continue;
1744 				}
1745 				/*
1746 				 * got some kind of 'real' error out of the msleep
1747 				 * in buf_acquire_locked, terminate the scan and return the error
1748 				 */
1749 				buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1750 
1751 				lck_mtx_unlock(&buf_mtx);
1752 				return error;
1753 			}
1754 			lck_mtx_unlock(&buf_mtx);
1755 
1756 			if (bp->b_flags & B_LOCKED) {
1757 				KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 0, 0);
1758 			}
1759 
1760 			CLR(bp->b_flags, B_LOCKED);
1761 			SET(bp->b_flags, B_INVAL);
1762 			buf_brelse(bp);
1763 
1764 			lck_mtx_lock(&buf_mtx);
1765 
1766 			/*
1767 			 * by dropping buf_mtx, we allow new
1768 			 * buffers to be added to the vnode list(s)
1769 			 * we'll have to rescan at least once more
1770 			 * if the queues aren't empty
1771 			 */
1772 			must_rescan++;
1773 		}
1774 		buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1775 
1776 try_dirty_list:
1777 		/*
1778 		 * Now iterate on dirty blks
1779 		 */
1780 		if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY)) {
1781 			continue;
1782 		}
1783 		while (!LIST_EMPTY(&local_iterblkhd)) {
1784 			bp = LIST_FIRST(&local_iterblkhd);
1785 
1786 			LIST_REMOVE(bp, b_vnbufs);
1787 			LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1788 
1789 			/*
1790 			 * some filesystems distinguish meta data blocks with a negative logical block #
1791 			 */
1792 			if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) {
1793 				continue;
1794 			}
1795 
1796 			aflags = BAC_REMOVE;
1797 
1798 			if (!(flags & BUF_INVALIDATE_LOCKED)) {
1799 				aflags |= BAC_SKIP_LOCKED;
1800 			}
1801 
1802 			if ((error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo))) {
1803 				if (error == EDEADLK) {
1804 					/*
1805 					 * this buffer was marked B_LOCKED...
1806 					 * we didn't drop buf_mtx, so we
1807 					 * we don't need to rescan
1808 					 */
1809 					continue;
1810 				}
1811 				if (error == EAGAIN) {
1812 					/*
1813 					 * found a busy buffer... we blocked and
1814 					 * dropped buf_mtx, so we're going to
1815 					 * need to rescan after this pass is completed
1816 					 */
1817 					must_rescan++;
1818 					continue;
1819 				}
1820 				/*
1821 				 * got some kind of 'real' error out of the msleep
1822 				 * in buf_acquire_locked, terminate the scan and return the error
1823 				 */
1824 				buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1825 
1826 				lck_mtx_unlock(&buf_mtx);
1827 				return error;
1828 			}
1829 			lck_mtx_unlock(&buf_mtx);
1830 
1831 			if (bp->b_flags & B_LOCKED) {
1832 				KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 1, 0);
1833 			}
1834 
1835 			CLR(bp->b_flags, B_LOCKED);
1836 			SET(bp->b_flags, B_INVAL);
1837 
1838 			if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA)) {
1839 				(void) VNOP_BWRITE(bp);
1840 			} else {
1841 				buf_brelse(bp);
1842 			}
1843 
1844 			lck_mtx_lock(&buf_mtx);
1845 			/*
1846 			 * by dropping buf_mtx, we allow new
1847 			 * buffers to be added to the vnode list(s)
1848 			 * we'll have to rescan at least once more
1849 			 * if the queues aren't empty
1850 			 */
1851 			must_rescan++;
1852 		}
1853 		buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1854 	}
1855 	lck_mtx_unlock(&buf_mtx);
1856 
1857 	return 0;
1858 }
1859 
1860 void
buf_flushdirtyblks(vnode_t vp,int wait,int flags,const char * msg)1861 buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg)
1862 {
1863 	(void) buf_flushdirtyblks_skipinfo(vp, wait, flags, msg);
1864 	return;
1865 }
1866 
1867 int
buf_flushdirtyblks_skipinfo(vnode_t vp,int wait,int flags,const char * msg)1868 buf_flushdirtyblks_skipinfo(vnode_t vp, int wait, int flags, const char *msg)
1869 {
1870 	buf_t   bp;
1871 	int     writes_issued = 0;
1872 	errno_t error;
1873 	int     busy = 0;
1874 	struct  buflists local_iterblkhd;
1875 	int     lock_flags = BAC_NOWAIT | BAC_REMOVE;
1876 	int any_locked = 0;
1877 
1878 	if (flags & BUF_SKIP_LOCKED) {
1879 		lock_flags |= BAC_SKIP_LOCKED;
1880 	}
1881 	if (flags & BUF_SKIP_NONLOCKED) {
1882 		lock_flags |= BAC_SKIP_NONLOCKED;
1883 	}
1884 loop:
1885 	lck_mtx_lock(&buf_mtx);
1886 
1887 	if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) {
1888 		while (!LIST_EMPTY(&local_iterblkhd)) {
1889 			bp = LIST_FIRST(&local_iterblkhd);
1890 			LIST_REMOVE(bp, b_vnbufs);
1891 			LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1892 
1893 			if ((error = buf_acquire_locked(bp, lock_flags, 0, 0)) == EBUSY) {
1894 				busy++;
1895 			}
1896 			if (error) {
1897 				/*
1898 				 * If we passed in BUF_SKIP_LOCKED or BUF_SKIP_NONLOCKED,
1899 				 * we may want to do somethign differently if a locked or unlocked
1900 				 * buffer was encountered (depending on the arg specified).
1901 				 * In this case, we know that one of those two was set, and the
1902 				 * buf acquisition failed above.
1903 				 *
1904 				 * If it failed with EDEADLK, then save state which can be emitted
1905 				 * later on to the caller.  Most callers should not care.
1906 				 */
1907 				if (error == EDEADLK) {
1908 					any_locked++;
1909 				}
1910 				continue;
1911 			}
1912 			lck_mtx_unlock(&buf_mtx);
1913 
1914 			bp->b_flags &= ~B_LOCKED;
1915 
1916 			/*
1917 			 * Wait for I/O associated with indirect blocks to complete,
1918 			 * since there is no way to quickly wait for them below.
1919 			 */
1920 			if ((bp->b_vp == vp) || (wait == 0)) {
1921 				(void) buf_bawrite(bp);
1922 			} else {
1923 				(void) VNOP_BWRITE(bp);
1924 			}
1925 			writes_issued++;
1926 
1927 			lck_mtx_lock(&buf_mtx);
1928 		}
1929 		buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1930 	}
1931 	lck_mtx_unlock(&buf_mtx);
1932 
1933 	if (wait) {
1934 		(void)vnode_waitforwrites(vp, 0, 0, 0, msg);
1935 
1936 		if (vp->v_dirtyblkhd.lh_first && busy) {
1937 			/*
1938 			 * we had one or more BUSY buffers on
1939 			 * the dirtyblock list... most likely
1940 			 * these are due to delayed writes that
1941 			 * were moved to the bclean queue but
1942 			 * have not yet been 'written'.
1943 			 * if we issued some writes on the
1944 			 * previous pass, we try again immediately
1945 			 * if we didn't, we'll sleep for some time
1946 			 * to allow the state to change...
1947 			 */
1948 			if (writes_issued == 0) {
1949 				(void)tsleep((caddr_t)&vp->v_numoutput,
1950 				    PRIBIO + 1, "vnode_flushdirtyblks", hz / 20);
1951 			}
1952 			writes_issued = 0;
1953 			busy = 0;
1954 
1955 			goto loop;
1956 		}
1957 	}
1958 
1959 	return any_locked;
1960 }
1961 
1962 
1963 /*
1964  * called with buf_mtx held...
1965  * this lock protects the queue manipulation
1966  */
1967 static int
buf_iterprepare(vnode_t vp,struct buflists * iterheadp,int flags)1968 buf_iterprepare(vnode_t vp, struct buflists *iterheadp, int flags)
1969 {
1970 	struct buflists * listheadp;
1971 
1972 	if (flags & VBI_DIRTY) {
1973 		listheadp = &vp->v_dirtyblkhd;
1974 	} else {
1975 		listheadp = &vp->v_cleanblkhd;
1976 	}
1977 
1978 	while (vp->v_iterblkflags & VBI_ITER) {
1979 		vp->v_iterblkflags |= VBI_ITERWANT;
1980 		msleep(&vp->v_iterblkflags, &buf_mtx, 0, "buf_iterprepare", NULL);
1981 	}
1982 	if (LIST_EMPTY(listheadp)) {
1983 		LIST_INIT(iterheadp);
1984 		return EINVAL;
1985 	}
1986 	vp->v_iterblkflags |= VBI_ITER;
1987 
1988 	iterheadp->lh_first = listheadp->lh_first;
1989 	listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first;
1990 	LIST_INIT(listheadp);
1991 
1992 	return 0;
1993 }
1994 
1995 /*
1996  * called with buf_mtx held...
1997  * this lock protects the queue manipulation
1998  */
1999 static void
buf_itercomplete(vnode_t vp,struct buflists * iterheadp,int flags)2000 buf_itercomplete(vnode_t vp, struct buflists *iterheadp, int flags)
2001 {
2002 	struct buflists * listheadp;
2003 	buf_t bp;
2004 
2005 	if (flags & VBI_DIRTY) {
2006 		listheadp = &vp->v_dirtyblkhd;
2007 	} else {
2008 		listheadp = &vp->v_cleanblkhd;
2009 	}
2010 
2011 	while (!LIST_EMPTY(iterheadp)) {
2012 		bp = LIST_FIRST(iterheadp);
2013 		LIST_REMOVE(bp, b_vnbufs);
2014 		LIST_INSERT_HEAD(listheadp, bp, b_vnbufs);
2015 	}
2016 	vp->v_iterblkflags &= ~VBI_ITER;
2017 
2018 	if (vp->v_iterblkflags & VBI_ITERWANT) {
2019 		vp->v_iterblkflags &= ~VBI_ITERWANT;
2020 		wakeup(&vp->v_iterblkflags);
2021 	}
2022 }
2023 
2024 
2025 static void
bremfree_locked(buf_t bp)2026 bremfree_locked(buf_t bp)
2027 {
2028 	struct bqueues *dp = NULL;
2029 	int whichq;
2030 
2031 	whichq = bp->b_whichq;
2032 
2033 	if (whichq == -1) {
2034 		if (bp->b_shadow_ref == 0) {
2035 			panic("bremfree_locked: %p not on freelist", bp);
2036 		}
2037 		/*
2038 		 * there are clones pointing to 'bp'...
2039 		 * therefore, it was not put on a freelist
2040 		 * when buf_brelse was last called on 'bp'
2041 		 */
2042 		return;
2043 	}
2044 	/*
2045 	 * We only calculate the head of the freelist when removing
2046 	 * the last element of the list as that is the only time that
2047 	 * it is needed (e.g. to reset the tail pointer).
2048 	 *
2049 	 * NB: This makes an assumption about how tailq's are implemented.
2050 	 */
2051 	if (bp->b_freelist.tqe_next == NULL) {
2052 		dp = &bufqueues[whichq];
2053 
2054 		if (dp->tqh_last != &bp->b_freelist.tqe_next) {
2055 			panic("bremfree: lost tail");
2056 		}
2057 	}
2058 	TAILQ_REMOVE(dp, bp, b_freelist);
2059 
2060 	if (whichq == BQ_LAUNDRY) {
2061 		blaundrycnt--;
2062 	}
2063 
2064 	bp->b_whichq = -1;
2065 	bp->b_timestamp = 0;
2066 	bp->b_shadow = 0;
2067 }
2068 
2069 /*
2070  * Associate a buffer with a vnode.
2071  * buf_mtx must be locked on entry
2072  */
2073 static void
bgetvp_locked(vnode_t vp,buf_t bp)2074 bgetvp_locked(vnode_t vp, buf_t bp)
2075 {
2076 	if (bp->b_vp != vp) {
2077 		panic("bgetvp_locked: not free");
2078 	}
2079 
2080 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
2081 		bp->b_dev = vp->v_rdev;
2082 	} else {
2083 		bp->b_dev = NODEV;
2084 	}
2085 	/*
2086 	 * Insert onto list for new vnode.
2087 	 */
2088 	bufinsvn(bp, &vp->v_cleanblkhd);
2089 }
2090 
2091 /*
2092  * Disassociate a buffer from a vnode.
2093  * buf_mtx must be locked on entry
2094  */
2095 static void
brelvp_locked(buf_t bp)2096 brelvp_locked(buf_t bp)
2097 {
2098 	/*
2099 	 * Delete from old vnode list, if on one.
2100 	 */
2101 	if (bp->b_vnbufs.le_next != NOLIST) {
2102 		bufremvn(bp);
2103 	}
2104 
2105 	bp->b_vp = (vnode_t)NULL;
2106 }
2107 
2108 /*
2109  * Reassign a buffer from one vnode to another.
2110  * Used to assign file specific control information
2111  * (indirect blocks) to the vnode to which they belong.
2112  */
2113 static void
buf_reassign(buf_t bp,vnode_t newvp)2114 buf_reassign(buf_t bp, vnode_t newvp)
2115 {
2116 	struct buflists *listheadp;
2117 
2118 	if (newvp == NULL) {
2119 		printf("buf_reassign: NULL");
2120 		return;
2121 	}
2122 	lck_mtx_lock_spin(&buf_mtx);
2123 
2124 	/*
2125 	 * Delete from old vnode list, if on one.
2126 	 */
2127 	if (bp->b_vnbufs.le_next != NOLIST) {
2128 		bufremvn(bp);
2129 	}
2130 	/*
2131 	 * If dirty, put on list of dirty buffers;
2132 	 * otherwise insert onto list of clean buffers.
2133 	 */
2134 	if (ISSET(bp->b_flags, B_DELWRI)) {
2135 		listheadp = &newvp->v_dirtyblkhd;
2136 	} else {
2137 		listheadp = &newvp->v_cleanblkhd;
2138 	}
2139 	bufinsvn(bp, listheadp);
2140 
2141 	lck_mtx_unlock(&buf_mtx);
2142 }
2143 
2144 static __inline__ void
bufhdrinit(buf_t bp)2145 bufhdrinit(buf_t bp)
2146 {
2147 	bzero((char *)bp, sizeof *bp);
2148 	bp->b_dev = NODEV;
2149 	bp->b_rcred = NOCRED;
2150 	bp->b_wcred = NOCRED;
2151 	bp->b_vnbufs.le_next = NOLIST;
2152 	bp->b_flags = B_INVAL;
2153 
2154 	return;
2155 }
2156 
2157 /*
2158  * Initialize buffers and hash links for buffers.
2159  */
2160 __private_extern__ void
bufinit(void)2161 bufinit(void)
2162 {
2163 	buf_t   bp;
2164 	struct bqueues *dp;
2165 	int     i;
2166 
2167 	nbuf_headers = 0;
2168 	/* Initialize the buffer queues ('freelists') and the hash table */
2169 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) {
2170 		TAILQ_INIT(dp);
2171 	}
2172 	bufhashtbl = hashinit(nbuf_hashelements, M_CACHE, &bufhash);
2173 
2174 	buf_busycount = 0;
2175 
2176 	/* Initialize the buffer headers */
2177 	for (i = 0; i < max_nbuf_headers; i++) {
2178 		nbuf_headers++;
2179 		bp = &buf_headers[i];
2180 		bufhdrinit(bp);
2181 
2182 		BLISTNONE(bp);
2183 		dp = &bufqueues[BQ_EMPTY];
2184 		bp->b_whichq = BQ_EMPTY;
2185 		bp->b_timestamp = buf_timestamp();
2186 		binsheadfree(bp, dp, BQ_EMPTY);
2187 		binshash(bp, &invalhash);
2188 	}
2189 	boot_nbuf_headers = nbuf_headers;
2190 
2191 	TAILQ_INIT(&iobufqueue);
2192 	TAILQ_INIT(&delaybufqueue);
2193 
2194 	for (; i < nbuf_headers + niobuf_headers; i++) {
2195 		bp = &buf_headers[i];
2196 		bufhdrinit(bp);
2197 		bp->b_whichq = -1;
2198 		binsheadfree(bp, &iobufqueue, -1);
2199 	}
2200 
2201 	/*
2202 	 * allocate and initialize cluster specific global locks...
2203 	 */
2204 	cluster_init();
2205 
2206 	printf("using %d buffer headers and %d cluster IO buffer headers\n",
2207 	    nbuf_headers, niobuf_headers);
2208 
2209 	/* start the bcleanbuf() thread */
2210 	bcleanbuf_thread_init();
2211 
2212 	/* Register a callout for relieving vm pressure */
2213 	if (vm_set_buffer_cleanup_callout(buffer_cache_gc) != KERN_SUCCESS) {
2214 		panic("Couldn't register buffer cache callout for vm pressure!");
2215 	}
2216 }
2217 
2218 /*
2219  * Zones for the meta data buffers
2220  */
2221 
2222 #define MINMETA 512
2223 #define MAXMETA 16384
2224 
2225 KALLOC_HEAP_DEFINE(KHEAP_VFS_BIO, "vfs_bio", KHEAP_ID_DATA_BUFFERS);
2226 
2227 static struct buf *
bio_doread(vnode_t vp,daddr64_t blkno,int size,kauth_cred_t cred,int async,int queuetype)2228 bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, int queuetype)
2229 {
2230 	buf_t   bp;
2231 
2232 	bp = buf_getblk(vp, blkno, size, 0, 0, queuetype);
2233 
2234 	/*
2235 	 * If buffer does not have data valid, start a read.
2236 	 * Note that if buffer is B_INVAL, buf_getblk() won't return it.
2237 	 * Therefore, it's valid if it's I/O has completed or been delayed.
2238 	 */
2239 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
2240 		struct proc *p;
2241 
2242 		p = current_proc();
2243 
2244 		/* Start I/O for the buffer (keeping credentials). */
2245 		SET(bp->b_flags, B_READ | async);
2246 		if (IS_VALID_CRED(cred) && !IS_VALID_CRED(bp->b_rcred)) {
2247 			kauth_cred_ref(cred);
2248 			bp->b_rcred = cred;
2249 		}
2250 
2251 		VNOP_STRATEGY(bp);
2252 
2253 		trace(TR_BREADMISS, pack(vp, size), blkno);
2254 
2255 		/* Pay for the read. */
2256 		if (p && p->p_stats) {
2257 			OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock);            /* XXX */
2258 		}
2259 
2260 		if (async) {
2261 			/*
2262 			 * since we asked for an ASYNC I/O
2263 			 * the biodone will do the brelse
2264 			 * we don't want to pass back a bp
2265 			 * that we don't 'own'
2266 			 */
2267 			bp = NULL;
2268 		}
2269 	} else if (async) {
2270 		buf_brelse(bp);
2271 		bp = NULL;
2272 	}
2273 
2274 	trace(TR_BREADHIT, pack(vp, size), blkno);
2275 
2276 	return bp;
2277 }
2278 
2279 /*
2280  * Perform the reads for buf_breadn() and buf_meta_breadn().
2281  * Trivial modification to the breada algorithm presented in Bach (p.55).
2282  */
2283 static errno_t
do_breadn_for_type(vnode_t vp,daddr64_t blkno,int size,daddr64_t * rablks,int * rasizes,int nrablks,kauth_cred_t cred,buf_t * bpp,int queuetype)2284 do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes,
2285     int nrablks, kauth_cred_t cred, buf_t *bpp, int queuetype)
2286 {
2287 	buf_t   bp;
2288 	int     i;
2289 
2290 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0, queuetype);
2291 
2292 	/*
2293 	 * For each of the read-ahead blocks, start a read, if necessary.
2294 	 */
2295 	for (i = 0; i < nrablks; i++) {
2296 		/* If it's in the cache, just go on to next one. */
2297 		if (incore(vp, rablks[i])) {
2298 			continue;
2299 		}
2300 
2301 		/* Get a buffer for the read-ahead block */
2302 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, queuetype);
2303 	}
2304 
2305 	/* Otherwise, we had to start a read for it; wait until it's valid. */
2306 	return buf_biowait(bp);
2307 }
2308 
2309 
2310 /*
2311  * Read a disk block.
2312  * This algorithm described in Bach (p.54).
2313  */
2314 errno_t
buf_bread(vnode_t vp,daddr64_t blkno,int size,kauth_cred_t cred,buf_t * bpp)2315 buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
2316 {
2317 	buf_t   bp;
2318 
2319 	/* Get buffer for block. */
2320 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ);
2321 
2322 	/* Wait for the read to complete, and return result. */
2323 	return buf_biowait(bp);
2324 }
2325 
2326 /*
2327  * Read a disk block. [bread() for meta-data]
2328  * This algorithm described in Bach (p.54).
2329  */
2330 errno_t
buf_meta_bread(vnode_t vp,daddr64_t blkno,int size,kauth_cred_t cred,buf_t * bpp)2331 buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
2332 {
2333 	buf_t   bp;
2334 
2335 	/* Get buffer for block. */
2336 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_META);
2337 
2338 	/* Wait for the read to complete, and return result. */
2339 	return buf_biowait(bp);
2340 }
2341 
2342 /*
2343  * Read-ahead multiple disk blocks. The first is sync, the rest async.
2344  */
2345 errno_t
buf_breadn(vnode_t vp,daddr64_t blkno,int size,daddr64_t * rablks,int * rasizes,int nrablks,kauth_cred_t cred,buf_t * bpp)2346 buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
2347 {
2348 	return do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ);
2349 }
2350 
2351 /*
2352  * Read-ahead multiple disk blocks. The first is sync, the rest async.
2353  * [buf_breadn() for meta-data]
2354  */
2355 errno_t
buf_meta_breadn(vnode_t vp,daddr64_t blkno,int size,daddr64_t * rablks,int * rasizes,int nrablks,kauth_cred_t cred,buf_t * bpp)2356 buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
2357 {
2358 	return do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META);
2359 }
2360 
2361 /*
2362  * Block write.  Described in Bach (p.56)
2363  */
2364 errno_t
buf_bwrite(buf_t bp)2365 buf_bwrite(buf_t bp)
2366 {
2367 	int     sync, wasdelayed;
2368 	errno_t rv;
2369 	proc_t  p = current_proc();
2370 	vnode_t vp = bp->b_vp;
2371 
2372 	if (bp->b_datap == 0) {
2373 		if (brecover_data(bp) == 0) {
2374 			return 0;
2375 		}
2376 	}
2377 	/* Remember buffer type, to switch on it later. */
2378 	sync = !ISSET(bp->b_flags, B_ASYNC);
2379 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
2380 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
2381 
2382 	if (wasdelayed) {
2383 		OSAddAtomicLong(-1, &nbdwrite);
2384 	}
2385 
2386 	if (!sync) {
2387 		/*
2388 		 * If not synchronous, pay for the I/O operation and make
2389 		 * sure the buf is on the correct vnode queue.  We have
2390 		 * to do this now, because if we don't, the vnode may not
2391 		 * be properly notified that its I/O has completed.
2392 		 */
2393 		if (wasdelayed) {
2394 			buf_reassign(bp, vp);
2395 		} else if (p && p->p_stats) {
2396 			OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock);            /* XXX */
2397 		}
2398 	}
2399 	trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno);
2400 
2401 	/* Initiate disk write.  Make sure the appropriate party is charged. */
2402 
2403 	OSAddAtomic(1, &vp->v_numoutput);
2404 
2405 	VNOP_STRATEGY(bp);
2406 
2407 	if (sync) {
2408 		/*
2409 		 * If I/O was synchronous, wait for it to complete.
2410 		 */
2411 		rv = buf_biowait(bp);
2412 
2413 		/*
2414 		 * Pay for the I/O operation, if it's not been paid for, and
2415 		 * make sure it's on the correct vnode queue. (async operatings
2416 		 * were payed for above.)
2417 		 */
2418 		if (wasdelayed) {
2419 			buf_reassign(bp, vp);
2420 		} else if (p && p->p_stats) {
2421 			OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock);            /* XXX */
2422 		}
2423 
2424 		/* Release the buffer. */
2425 		buf_brelse(bp);
2426 
2427 		return rv;
2428 	} else {
2429 		return 0;
2430 	}
2431 }
2432 
2433 int
vn_bwrite(struct vnop_bwrite_args * ap)2434 vn_bwrite(struct vnop_bwrite_args *ap)
2435 {
2436 	return buf_bwrite(ap->a_bp);
2437 }
2438 
2439 /*
2440  * Delayed write.
2441  *
2442  * The buffer is marked dirty, but is not queued for I/O.
2443  * This routine should be used when the buffer is expected
2444  * to be modified again soon, typically a small write that
2445  * partially fills a buffer.
2446  *
2447  * NB: magnetic tapes cannot be delayed; they must be
2448  * written in the order that the writes are requested.
2449  *
2450  * Described in Leffler, et al. (pp. 208-213).
2451  *
2452  * Note: With the ability to allocate additional buffer
2453  * headers, we can get in to the situation where "too" many
2454  * buf_bdwrite()s can create situation where the kernel can create
2455  * buffers faster than the disks can service. Doing a buf_bawrite() in
2456  * cases where we have "too many" outstanding buf_bdwrite()s avoids that.
2457  */
2458 int
bdwrite_internal(buf_t bp,int return_error)2459 bdwrite_internal(buf_t bp, int return_error)
2460 {
2461 	proc_t  p  = current_proc();
2462 	vnode_t vp = bp->b_vp;
2463 
2464 	/*
2465 	 * If the block hasn't been seen before:
2466 	 *	(1) Mark it as having been seen,
2467 	 *	(2) Charge for the write.
2468 	 *	(3) Make sure it's on its vnode's correct block list,
2469 	 */
2470 	if (!ISSET(bp->b_flags, B_DELWRI)) {
2471 		SET(bp->b_flags, B_DELWRI);
2472 		if (p && p->p_stats) {
2473 			OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock);    /* XXX */
2474 		}
2475 		OSAddAtomicLong(1, &nbdwrite);
2476 		buf_reassign(bp, vp);
2477 	}
2478 
2479 	/*
2480 	 * if we're not LOCKED, but the total number of delayed writes
2481 	 * has climbed above 75% of the total buffers in the system
2482 	 * return an error if the caller has indicated that it can
2483 	 * handle one in this case, otherwise schedule the I/O now
2484 	 * this is done to prevent us from allocating tons of extra
2485 	 * buffers when dealing with virtual disks (i.e. DiskImages),
2486 	 * because additional buffers are dynamically allocated to prevent
2487 	 * deadlocks from occurring
2488 	 *
2489 	 * however, can't do a buf_bawrite() if the LOCKED bit is set because the
2490 	 * buffer is part of a transaction and can't go to disk until
2491 	 * the LOCKED bit is cleared.
2492 	 */
2493 	if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers / 4) * 3)) {
2494 		if (return_error) {
2495 			return EAGAIN;
2496 		}
2497 		/*
2498 		 * If the vnode has "too many" write operations in progress
2499 		 * wait for them to finish the IO
2500 		 */
2501 		(void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, "buf_bdwrite");
2502 
2503 		return buf_bawrite(bp);
2504 	}
2505 
2506 	/* Otherwise, the "write" is done, so mark and release the buffer. */
2507 	SET(bp->b_flags, B_DONE);
2508 	buf_brelse(bp);
2509 	return 0;
2510 }
2511 
2512 errno_t
buf_bdwrite(buf_t bp)2513 buf_bdwrite(buf_t bp)
2514 {
2515 	return bdwrite_internal(bp, 0);
2516 }
2517 
2518 
2519 /*
2520  * Asynchronous block write; just an asynchronous buf_bwrite().
2521  *
2522  * Note: With the abilitty to allocate additional buffer
2523  * headers, we can get in to the situation where "too" many
2524  * buf_bawrite()s can create situation where the kernel can create
2525  * buffers faster than the disks can service.
2526  * We limit the number of "in flight" writes a vnode can have to
2527  * avoid this.
2528  */
2529 static int
bawrite_internal(buf_t bp,int throttle)2530 bawrite_internal(buf_t bp, int throttle)
2531 {
2532 	vnode_t vp = bp->b_vp;
2533 
2534 	if (vp) {
2535 		if (throttle) {
2536 			/*
2537 			 * If the vnode has "too many" write operations in progress
2538 			 * wait for them to finish the IO
2539 			 */
2540 			(void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite");
2541 		} else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE) {
2542 			/*
2543 			 * return to the caller and
2544 			 * let him decide what to do
2545 			 */
2546 			return EWOULDBLOCK;
2547 		}
2548 	}
2549 	SET(bp->b_flags, B_ASYNC);
2550 
2551 	return VNOP_BWRITE(bp);
2552 }
2553 
2554 errno_t
buf_bawrite(buf_t bp)2555 buf_bawrite(buf_t bp)
2556 {
2557 	return bawrite_internal(bp, 1);
2558 }
2559 
2560 
2561 
2562 static void
buf_free_meta_store(buf_t bp)2563 buf_free_meta_store(buf_t bp)
2564 {
2565 	if (bp->b_bufsize) {
2566 		uintptr_t datap = bp->b_datap;
2567 		int bufsize = bp->b_bufsize;
2568 
2569 		bp->b_datap = (uintptr_t)NULL;
2570 		bp->b_bufsize = 0;
2571 
2572 		/*
2573 		 * Ensure the assignment of b_datap has global visibility
2574 		 * before we free the region.
2575 		 */
2576 		OSMemoryBarrier();
2577 
2578 		if (ISSET(bp->b_flags, B_ZALLOC)) {
2579 			kheap_free(KHEAP_VFS_BIO, datap, bufsize);
2580 		} else {
2581 			kmem_free(kernel_map, datap, bufsize);
2582 		}
2583 	}
2584 }
2585 
2586 
2587 static buf_t
buf_brelse_shadow(buf_t bp)2588 buf_brelse_shadow(buf_t bp)
2589 {
2590 	buf_t   bp_head;
2591 	buf_t   bp_temp;
2592 	buf_t   bp_return = NULL;
2593 #ifdef BUF_MAKE_PRIVATE
2594 	buf_t   bp_data;
2595 	int     data_ref = 0;
2596 #endif
2597 	int need_wakeup = 0;
2598 
2599 	lck_mtx_lock_spin(&buf_mtx);
2600 
2601 	__IGNORE_WCASTALIGN(bp_head = (buf_t)bp->b_orig);
2602 
2603 	if (bp_head->b_whichq != -1) {
2604 		panic("buf_brelse_shadow: bp_head on freelist %d", bp_head->b_whichq);
2605 	}
2606 
2607 #ifdef BUF_MAKE_PRIVATE
2608 	if (bp_data = bp->b_data_store) {
2609 		bp_data->b_data_ref--;
2610 		/*
2611 		 * snapshot the ref count so that we can check it
2612 		 * outside of the lock... we only want the guy going
2613 		 * from 1 -> 0 to try and release the storage
2614 		 */
2615 		data_ref = bp_data->b_data_ref;
2616 	}
2617 #endif
2618 	KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_START, bp, bp_head, bp_head->b_shadow_ref, 0, 0);
2619 
2620 	bp_head->b_shadow_ref--;
2621 
2622 	for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow) {
2623 		;
2624 	}
2625 
2626 	if (bp_temp == NULL) {
2627 		panic("buf_brelse_shadow: bp not on list %p", bp_head);
2628 	}
2629 
2630 	bp_temp->b_shadow = bp_temp->b_shadow->b_shadow;
2631 
2632 #ifdef BUF_MAKE_PRIVATE
2633 	/*
2634 	 * we're about to free the current 'owner' of the data buffer and
2635 	 * there is at least one other shadow buf_t still pointing at it
2636 	 * so transfer it to the first shadow buf left in the chain
2637 	 */
2638 	if (bp == bp_data && data_ref) {
2639 		if ((bp_data = bp_head->b_shadow) == NULL) {
2640 			panic("buf_brelse_shadow: data_ref mismatch bp(%p)", bp);
2641 		}
2642 
2643 		for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow) {
2644 			bp_temp->b_data_store = bp_data;
2645 		}
2646 		bp_data->b_data_ref = data_ref;
2647 	}
2648 #endif
2649 	if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow) {
2650 		panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0  bp(%p)", bp);
2651 	}
2652 	if (bp_head->b_shadow_ref && bp_head->b_shadow == 0) {
2653 		panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0  bp(%p)", bp);
2654 	}
2655 
2656 	if (bp_head->b_shadow_ref == 0) {
2657 		if (!ISSET(bp_head->b_lflags, BL_BUSY)) {
2658 			CLR(bp_head->b_flags, B_AGE);
2659 			bp_head->b_timestamp = buf_timestamp();
2660 
2661 			if (ISSET(bp_head->b_flags, B_LOCKED)) {
2662 				bp_head->b_whichq = BQ_LOCKED;
2663 				binstailfree(bp_head, &bufqueues[BQ_LOCKED], BQ_LOCKED);
2664 			} else {
2665 				bp_head->b_whichq = BQ_META;
2666 				binstailfree(bp_head, &bufqueues[BQ_META], BQ_META);
2667 			}
2668 		} else if (ISSET(bp_head->b_lflags, BL_WAITSHADOW)) {
2669 			CLR(bp_head->b_lflags, BL_WAITSHADOW);
2670 
2671 			bp_return = bp_head;
2672 		}
2673 		if (ISSET(bp_head->b_lflags, BL_WANTED_REF)) {
2674 			CLR(bp_head->b_lflags, BL_WANTED_REF);
2675 			need_wakeup = 1;
2676 		}
2677 	}
2678 	lck_mtx_unlock(&buf_mtx);
2679 
2680 	if (need_wakeup) {
2681 		wakeup(bp_head);
2682 	}
2683 
2684 #ifdef BUF_MAKE_PRIVATE
2685 	if (bp == bp_data && data_ref == 0) {
2686 		buf_free_meta_store(bp);
2687 	}
2688 
2689 	bp->b_data_store = NULL;
2690 #endif
2691 	KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0);
2692 
2693 	return bp_return;
2694 }
2695 
2696 
2697 /*
2698  * Release a buffer on to the free lists.
2699  * Described in Bach (p. 46).
2700  */
2701 void
buf_brelse(buf_t bp)2702 buf_brelse(buf_t bp)
2703 {
2704 	struct bqueues *bufq;
2705 	int    whichq;
2706 	upl_t   upl;
2707 	int need_wakeup = 0;
2708 	int need_bp_wakeup = 0;
2709 
2710 
2711 	if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY)) {
2712 		panic("buf_brelse: bad buffer = %p", bp);
2713 	}
2714 
2715 #ifdef JOE_DEBUG
2716 	(void) OSBacktrace(&bp->b_stackbrelse[0], 6);
2717 
2718 	bp->b_lastbrelse = current_thread();
2719 	bp->b_tag = 0;
2720 #endif
2721 	if (bp->b_lflags & BL_IOBUF) {
2722 		buf_t   shadow_master_bp = NULL;
2723 
2724 		if (ISSET(bp->b_lflags, BL_SHADOW)) {
2725 			shadow_master_bp = buf_brelse_shadow(bp);
2726 		} else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC)) {
2727 			buf_free_meta_store(bp);
2728 		}
2729 		free_io_buf(bp);
2730 
2731 		if (shadow_master_bp) {
2732 			bp = shadow_master_bp;
2733 			goto finish_shadow_master;
2734 		}
2735 		return;
2736 	}
2737 
2738 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_START,
2739 	    bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap,
2740 	    bp->b_flags, 0);
2741 
2742 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
2743 
2744 	/*
2745 	 * if we're invalidating a buffer that has the B_FILTER bit
2746 	 * set then call the b_iodone function so it gets cleaned
2747 	 * up properly.
2748 	 *
2749 	 * the HFS journal code depends on this
2750 	 */
2751 	if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) {
2752 		if (ISSET(bp->b_flags, B_FILTER)) {     /* if necessary, call out */
2753 			void    (*iodone_func)(struct buf *, void *) = bp->b_iodone;
2754 			void    *arg = bp->b_transaction;
2755 
2756 			CLR(bp->b_flags, B_FILTER);     /* but note callout done */
2757 			bp->b_iodone = NULL;
2758 			bp->b_transaction = NULL;
2759 
2760 			if (iodone_func == NULL) {
2761 				panic("brelse: bp @ %p has NULL b_iodone!", bp);
2762 			}
2763 			(*iodone_func)(bp, arg);
2764 		}
2765 	}
2766 	/*
2767 	 * I/O is done. Cleanup the UPL state
2768 	 */
2769 	upl = bp->b_upl;
2770 
2771 	if (!ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
2772 		kern_return_t kret;
2773 		int           upl_flags;
2774 
2775 		if (upl == NULL) {
2776 			if (!ISSET(bp->b_flags, B_INVAL)) {
2777 				kret = ubc_create_upl_kernel(bp->b_vp,
2778 				    ubc_blktooff(bp->b_vp, bp->b_lblkno),
2779 				    bp->b_bufsize,
2780 				    &upl,
2781 				    NULL,
2782 				    UPL_PRECIOUS,
2783 				    VM_KERN_MEMORY_FILE);
2784 
2785 				if (kret != KERN_SUCCESS) {
2786 					panic("brelse: Failed to create UPL");
2787 				}
2788 #if  UPL_DEBUG
2789 				upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 5);
2790 #endif /* UPL_DEBUG */
2791 			}
2792 		} else {
2793 			if (bp->b_datap) {
2794 				kret = ubc_upl_unmap(upl);
2795 
2796 				if (kret != KERN_SUCCESS) {
2797 					panic("ubc_upl_unmap failed");
2798 				}
2799 				bp->b_datap = (uintptr_t)NULL;
2800 			}
2801 		}
2802 		if (upl) {
2803 			if (bp->b_flags & (B_ERROR | B_INVAL)) {
2804 				if (bp->b_flags & (B_READ | B_INVAL)) {
2805 					upl_flags = UPL_ABORT_DUMP_PAGES;
2806 				} else {
2807 					upl_flags = 0;
2808 				}
2809 
2810 				ubc_upl_abort(upl, upl_flags);
2811 			} else {
2812 				if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY)) {
2813 					upl_flags = UPL_COMMIT_SET_DIRTY;
2814 				} else {
2815 					upl_flags = UPL_COMMIT_CLEAR_DIRTY;
2816 				}
2817 
2818 				ubc_upl_commit_range(upl, 0, bp->b_bufsize, upl_flags |
2819 				    UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
2820 			}
2821 			bp->b_upl = NULL;
2822 		}
2823 	} else {
2824 		if ((upl)) {
2825 			panic("brelse: UPL set for non VREG; vp=%p", bp->b_vp);
2826 		}
2827 	}
2828 
2829 	/*
2830 	 * If it's locked, don't report an error; try again later.
2831 	 */
2832 	if (ISSET(bp->b_flags, (B_LOCKED | B_ERROR)) == (B_LOCKED | B_ERROR)) {
2833 		CLR(bp->b_flags, B_ERROR);
2834 	}
2835 	/*
2836 	 * If it's not cacheable, or an error, mark it invalid.
2837 	 */
2838 	if (ISSET(bp->b_flags, (B_NOCACHE | B_ERROR))) {
2839 		SET(bp->b_flags, B_INVAL);
2840 	}
2841 
2842 	if ((bp->b_bufsize <= 0) ||
2843 	    ISSET(bp->b_flags, B_INVAL) ||
2844 	    (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) {
2845 		boolean_t       delayed_buf_free_meta_store = FALSE;
2846 
2847 		/*
2848 		 * If it's invalid or empty, dissociate it from its vnode,
2849 		 * release its storage if B_META, and
2850 		 * clean it up a bit and put it on the EMPTY queue
2851 		 */
2852 		if (ISSET(bp->b_flags, B_DELWRI)) {
2853 			OSAddAtomicLong(-1, &nbdwrite);
2854 		}
2855 
2856 		if (ISSET(bp->b_flags, B_META)) {
2857 			if (bp->b_shadow_ref) {
2858 				delayed_buf_free_meta_store = TRUE;
2859 			} else {
2860 				buf_free_meta_store(bp);
2861 			}
2862 		}
2863 		/*
2864 		 * nuke any credentials we were holding
2865 		 */
2866 		buf_release_credentials(bp);
2867 
2868 		lck_mtx_lock_spin(&buf_mtx);
2869 
2870 		if (bp->b_shadow_ref) {
2871 			SET(bp->b_lflags, BL_WAITSHADOW);
2872 
2873 			lck_mtx_unlock(&buf_mtx);
2874 
2875 			return;
2876 		}
2877 		if (delayed_buf_free_meta_store == TRUE) {
2878 			lck_mtx_unlock(&buf_mtx);
2879 finish_shadow_master:
2880 			buf_free_meta_store(bp);
2881 
2882 			lck_mtx_lock_spin(&buf_mtx);
2883 		}
2884 		CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
2885 
2886 		if (bp->b_vp) {
2887 			brelvp_locked(bp);
2888 		}
2889 
2890 		bremhash(bp);
2891 		BLISTNONE(bp);
2892 		binshash(bp, &invalhash);
2893 
2894 		bp->b_whichq = BQ_EMPTY;
2895 		binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
2896 	} else {
2897 		/*
2898 		 * It has valid data.  Put it on the end of the appropriate
2899 		 * queue, so that it'll stick around for as long as possible.
2900 		 */
2901 		if (ISSET(bp->b_flags, B_LOCKED)) {
2902 			whichq = BQ_LOCKED;             /* locked in core */
2903 		} else if (ISSET(bp->b_flags, B_META)) {
2904 			whichq = BQ_META;               /* meta-data */
2905 		} else if (ISSET(bp->b_flags, B_AGE)) {
2906 			whichq = BQ_AGE;                /* stale but valid data */
2907 		} else {
2908 			whichq = BQ_LRU;                /* valid data */
2909 		}
2910 		bufq = &bufqueues[whichq];
2911 
2912 		bp->b_timestamp = buf_timestamp();
2913 
2914 		lck_mtx_lock_spin(&buf_mtx);
2915 
2916 		/*
2917 		 * the buf_brelse_shadow routine doesn't take 'ownership'
2918 		 * of the parent buf_t... it updates state that is protected by
2919 		 * the buf_mtx, and checks for BL_BUSY to determine whether to
2920 		 * put the buf_t back on a free list.  b_shadow_ref is protected
2921 		 * by the lock, and since we have not yet cleared B_BUSY, we need
2922 		 * to check it while holding the lock to insure that one of us
2923 		 * puts this buf_t back on a free list when it is safe to do so
2924 		 */
2925 		if (bp->b_shadow_ref == 0) {
2926 			CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE));
2927 			bp->b_whichq = whichq;
2928 			binstailfree(bp, bufq, whichq);
2929 		} else {
2930 			/*
2931 			 * there are still cloned buf_t's pointing
2932 			 * at this guy... need to keep it off the
2933 			 * freelists until a buf_brelse is done on
2934 			 * the last clone
2935 			 */
2936 			CLR(bp->b_flags, (B_ASYNC | B_NOCACHE));
2937 		}
2938 	}
2939 	if (needbuffer) {
2940 		/*
2941 		 * needbuffer is a global
2942 		 * we're currently using buf_mtx to protect it
2943 		 * delay doing the actual wakeup until after
2944 		 * we drop buf_mtx
2945 		 */
2946 		needbuffer = 0;
2947 		need_wakeup = 1;
2948 	}
2949 	if (ISSET(bp->b_lflags, BL_WANTED)) {
2950 		/*
2951 		 * delay the actual wakeup until after we
2952 		 * clear BL_BUSY and we've dropped buf_mtx
2953 		 */
2954 		need_bp_wakeup = 1;
2955 	}
2956 	/*
2957 	 * Unlock the buffer.
2958 	 */
2959 	CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
2960 	buf_busycount--;
2961 
2962 	lck_mtx_unlock(&buf_mtx);
2963 
2964 	if (need_wakeup) {
2965 		/*
2966 		 * Wake up any processes waiting for any buffer to become free.
2967 		 */
2968 		wakeup(&needbuffer);
2969 	}
2970 	if (need_bp_wakeup) {
2971 		/*
2972 		 * Wake up any proceeses waiting for _this_ buffer to become free.
2973 		 */
2974 		wakeup(bp);
2975 	}
2976 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_END,
2977 	    bp, bp->b_datap, bp->b_flags, 0, 0);
2978 }
2979 
2980 /*
2981  * Determine if a block is in the cache.
2982  * Just look on what would be its hash chain.  If it's there, return
2983  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
2984  * we normally don't return the buffer, unless the caller explicitly
2985  * wants us to.
2986  */
2987 static boolean_t
incore(vnode_t vp,daddr64_t blkno)2988 incore(vnode_t vp, daddr64_t blkno)
2989 {
2990 	boolean_t retval;
2991 	struct  bufhashhdr *dp;
2992 
2993 	dp = BUFHASH(vp, blkno);
2994 
2995 	lck_mtx_lock_spin(&buf_mtx);
2996 
2997 	if (incore_locked(vp, blkno, dp)) {
2998 		retval = TRUE;
2999 	} else {
3000 		retval = FALSE;
3001 	}
3002 	lck_mtx_unlock(&buf_mtx);
3003 
3004 	return retval;
3005 }
3006 
3007 
3008 static buf_t
incore_locked(vnode_t vp,daddr64_t blkno,struct bufhashhdr * dp)3009 incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp)
3010 {
3011 	struct buf *bp;
3012 
3013 	/* Search hash chain */
3014 	for (bp = dp->lh_first; bp != NULL; bp = bp->b_hash.le_next) {
3015 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
3016 		    !ISSET(bp->b_flags, B_INVAL)) {
3017 			return bp;
3018 		}
3019 	}
3020 	return NULL;
3021 }
3022 
3023 
3024 void
buf_wait_for_shadow_io(vnode_t vp,daddr64_t blkno)3025 buf_wait_for_shadow_io(vnode_t vp, daddr64_t blkno)
3026 {
3027 	buf_t bp;
3028 	struct  bufhashhdr *dp;
3029 
3030 	dp = BUFHASH(vp, blkno);
3031 
3032 	lck_mtx_lock_spin(&buf_mtx);
3033 
3034 	for (;;) {
3035 		if ((bp = incore_locked(vp, blkno, dp)) == NULL) {
3036 			break;
3037 		}
3038 
3039 		if (bp->b_shadow_ref == 0) {
3040 			break;
3041 		}
3042 
3043 		SET(bp->b_lflags, BL_WANTED_REF);
3044 
3045 		(void) msleep(bp, &buf_mtx, PSPIN | (PRIBIO + 1), "buf_wait_for_shadow", NULL);
3046 	}
3047 	lck_mtx_unlock(&buf_mtx);
3048 }
3049 
3050 /* XXX FIXME -- Update the comment to reflect the UBC changes (please) -- */
3051 /*
3052  * Get a block of requested size that is associated with
3053  * a given vnode and block offset. If it is found in the
3054  * block cache, mark it as having been found, make it busy
3055  * and return it. Otherwise, return an empty block of the
3056  * correct size. It is up to the caller to insure that the
3057  * cached blocks be of the correct size.
3058  */
3059 buf_t
buf_getblk(vnode_t vp,daddr64_t blkno,int size,int slpflag,int slptimeo,int operation)3060 buf_getblk(vnode_t vp, daddr64_t blkno, int size, int slpflag, int slptimeo, int operation)
3061 {
3062 	buf_t bp;
3063 	int   err;
3064 	upl_t upl;
3065 	upl_page_info_t *pl;
3066 	kern_return_t kret;
3067 	int ret_only_valid;
3068 	struct timespec ts;
3069 	int upl_flags;
3070 	struct  bufhashhdr *dp;
3071 
3072 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_START,
3073 	    (uintptr_t)(blkno * PAGE_SIZE), size, operation, 0, 0);
3074 
3075 	ret_only_valid = operation & BLK_ONLYVALID;
3076 	operation &= ~BLK_ONLYVALID;
3077 	dp = BUFHASH(vp, blkno);
3078 start:
3079 	lck_mtx_lock_spin(&buf_mtx);
3080 
3081 	if ((bp = incore_locked(vp, blkno, dp))) {
3082 		/*
3083 		 * Found in the Buffer Cache
3084 		 */
3085 		if (ISSET(bp->b_lflags, BL_BUSY)) {
3086 			/*
3087 			 * but is busy
3088 			 */
3089 			switch (operation) {
3090 			case BLK_READ:
3091 			case BLK_WRITE:
3092 			case BLK_META:
3093 				SET(bp->b_lflags, BL_WANTED);
3094 				bufstats.bufs_busyincore++;
3095 
3096 				/*
3097 				 * don't retake the mutex after being awakened...
3098 				 * the time out is in msecs
3099 				 */
3100 				ts.tv_sec = (slptimeo / 1000);
3101 				ts.tv_nsec = (slptimeo % 1000) * 10  * NSEC_PER_USEC * 1000;
3102 
3103 				KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 396)) | DBG_FUNC_NONE,
3104 				    (uintptr_t)blkno, size, operation, 0, 0);
3105 
3106 				err = msleep(bp, &buf_mtx, slpflag | PDROP | (PRIBIO + 1), "buf_getblk", &ts);
3107 
3108 				/*
3109 				 * Callers who call with PCATCH or timeout are
3110 				 * willing to deal with the NULL pointer
3111 				 */
3112 				if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo))) {
3113 					return NULL;
3114 				}
3115 				goto start;
3116 			/*NOTREACHED*/
3117 
3118 			default:
3119 				/*
3120 				 * unknown operation requested
3121 				 */
3122 				panic("getblk: paging or unknown operation for incore busy buffer - %x", operation);
3123 				/*NOTREACHED*/
3124 				break;
3125 			}
3126 		} else {
3127 			int clear_bdone;
3128 
3129 			/*
3130 			 * buffer in core and not busy
3131 			 */
3132 			SET(bp->b_lflags, BL_BUSY);
3133 			SET(bp->b_flags, B_CACHE);
3134 			buf_busycount++;
3135 
3136 			bremfree_locked(bp);
3137 			bufstats.bufs_incore++;
3138 
3139 			lck_mtx_unlock(&buf_mtx);
3140 #ifdef JOE_DEBUG
3141 			bp->b_owner = current_thread();
3142 			bp->b_tag   = 1;
3143 #endif
3144 			if ((bp->b_upl)) {
3145 				panic("buffer has UPL, but not marked BUSY: %p", bp);
3146 			}
3147 
3148 			clear_bdone = FALSE;
3149 			if (!ret_only_valid) {
3150 				/*
3151 				 * If the number bytes that are valid is going
3152 				 * to increase (even if we end up not doing a
3153 				 * reallocation through allocbuf) we have to read
3154 				 * the new size first.
3155 				 *
3156 				 * This is required in cases where we doing a read
3157 				 * modify write of a already valid data on disk but
3158 				 * in cases where the data on disk beyond (blkno + b_bcount)
3159 				 * is invalid, we may end up doing extra I/O.
3160 				 */
3161 				if (operation == BLK_META && bp->b_bcount < (uint32_t)size) {
3162 					/*
3163 					 * Since we are going to read in the whole size first
3164 					 * we first have to ensure that any pending delayed write
3165 					 * is flushed to disk first.
3166 					 */
3167 					if (ISSET(bp->b_flags, B_DELWRI)) {
3168 						CLR(bp->b_flags, B_CACHE);
3169 						buf_bwrite(bp);
3170 						goto start;
3171 					}
3172 					/*
3173 					 * clear B_DONE before returning from
3174 					 * this function so that the caller can
3175 					 * can issue a read for the new size.
3176 					 */
3177 					clear_bdone = TRUE;
3178 				}
3179 
3180 				if (bp->b_bufsize != (uint32_t)size) {
3181 					allocbuf(bp, size);
3182 				}
3183 			}
3184 
3185 			upl_flags = 0;
3186 			switch (operation) {
3187 			case BLK_WRITE:
3188 				/*
3189 				 * "write" operation:  let the UPL subsystem
3190 				 * know that we intend to modify the buffer
3191 				 * cache pages we're gathering.
3192 				 */
3193 				upl_flags |= UPL_WILL_MODIFY;
3194 				OS_FALLTHROUGH;
3195 			case BLK_READ:
3196 				upl_flags |= UPL_PRECIOUS;
3197 				if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
3198 					kret = ubc_create_upl_kernel(vp,
3199 					    ubc_blktooff(vp, bp->b_lblkno),
3200 					    bp->b_bufsize,
3201 					    &upl,
3202 					    &pl,
3203 					    upl_flags,
3204 					    VM_KERN_MEMORY_FILE);
3205 					if (kret != KERN_SUCCESS) {
3206 						panic("Failed to create UPL");
3207 					}
3208 
3209 					bp->b_upl = upl;
3210 
3211 					if (upl_valid_page(pl, 0)) {
3212 						if (upl_dirty_page(pl, 0)) {
3213 							SET(bp->b_flags, B_WASDIRTY);
3214 						} else {
3215 							CLR(bp->b_flags, B_WASDIRTY);
3216 						}
3217 					} else {
3218 						CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI));
3219 					}
3220 
3221 					kret = ubc_upl_map(upl, (vm_offset_t*)&(bp->b_datap));
3222 
3223 					if (kret != KERN_SUCCESS) {
3224 						panic("getblk: ubc_upl_map() failed with (%d)", kret);
3225 					}
3226 				}
3227 				break;
3228 
3229 			case BLK_META:
3230 				/*
3231 				 * VM is not involved in IO for the meta data
3232 				 * buffer already has valid data
3233 				 */
3234 				break;
3235 
3236 			default:
3237 				panic("getblk: paging or unknown operation for incore buffer- %d", operation);
3238 				/*NOTREACHED*/
3239 				break;
3240 			}
3241 
3242 			if (clear_bdone) {
3243 				CLR(bp->b_flags, B_DONE);
3244 			}
3245 		}
3246 	} else { /* not incore() */
3247 		int queue = BQ_EMPTY; /* Start with no preference */
3248 
3249 		if (ret_only_valid) {
3250 			lck_mtx_unlock(&buf_mtx);
3251 			return NULL;
3252 		}
3253 		if ((vnode_isreg(vp) == 0) || (UBCINFOEXISTS(vp) == 0) /*|| (vnode_issystem(vp) == 1)*/) {
3254 			operation = BLK_META;
3255 		}
3256 
3257 		if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL) {
3258 			goto start;
3259 		}
3260 
3261 		/*
3262 		 * getnewbuf may block for a number of different reasons...
3263 		 * if it does, it's then possible for someone else to
3264 		 * create a buffer for the same block and insert it into
3265 		 * the hash... if we see it incore at this point we dump
3266 		 * the buffer we were working on and start over
3267 		 */
3268 		if (incore_locked(vp, blkno, dp)) {
3269 			SET(bp->b_flags, B_INVAL);
3270 			binshash(bp, &invalhash);
3271 
3272 			lck_mtx_unlock(&buf_mtx);
3273 
3274 			buf_brelse(bp);
3275 			goto start;
3276 		}
3277 		/*
3278 		 * NOTE: YOU CAN NOT BLOCK UNTIL binshash() HAS BEEN
3279 		 *       CALLED!  BE CAREFUL.
3280 		 */
3281 
3282 		/*
3283 		 * mark the buffer as B_META if indicated
3284 		 * so that when buffer is released it will goto META queue
3285 		 */
3286 		if (operation == BLK_META) {
3287 			SET(bp->b_flags, B_META);
3288 		}
3289 
3290 		bp->b_blkno = bp->b_lblkno = blkno;
3291 		bp->b_lblksize = 0; /* Should be set by caller */
3292 		bp->b_vp = vp;
3293 
3294 		/*
3295 		 * Insert in the hash so that incore() can find it
3296 		 */
3297 		binshash(bp, BUFHASH(vp, blkno));
3298 
3299 		bgetvp_locked(vp, bp);
3300 
3301 		lck_mtx_unlock(&buf_mtx);
3302 
3303 		allocbuf(bp, size);
3304 
3305 		upl_flags = 0;
3306 		switch (operation) {
3307 		case BLK_META:
3308 			/*
3309 			 * buffer data is invalid...
3310 			 *
3311 			 * I don't want to have to retake buf_mtx,
3312 			 * so the miss and vmhits counters are done
3313 			 * with Atomic updates... all other counters
3314 			 * in bufstats are protected with either
3315 			 * buf_mtx or iobuffer_mtxp
3316 			 */
3317 			OSAddAtomicLong(1, &bufstats.bufs_miss);
3318 			break;
3319 
3320 		case BLK_WRITE:
3321 			/*
3322 			 * "write" operation:  let the UPL subsystem know
3323 			 * that we intend to modify the buffer cache pages
3324 			 * we're gathering.
3325 			 */
3326 			upl_flags |= UPL_WILL_MODIFY;
3327 			OS_FALLTHROUGH;
3328 		case BLK_READ:
3329 		{     off_t   f_offset;
3330 		      size_t  contig_bytes;
3331 		      int     bmap_flags;
3332 
3333 #if DEVELOPMENT || DEBUG
3334 			/*
3335 			 * Apple implemented file systems use UBC excludively; they should
3336 			 * not call in here."
3337 			 */
3338 		      const char* excldfs[] = {"hfs", "afpfs", "smbfs", "acfs",
3339 			                       "exfat", "msdos", "webdav", NULL};
3340 
3341 		      for (int i = 0; excldfs[i] != NULL; i++) {
3342 			      if (vp->v_mount &&
3343 			          !strcmp(vp->v_mount->mnt_vfsstat.f_fstypename,
3344 			          excldfs[i])) {
3345 				      panic("%s %s calls buf_getblk",
3346 				          excldfs[i],
3347 				          operation == BLK_READ ? "BLK_READ" : "BLK_WRITE");
3348 			      }
3349 		      }
3350 #endif
3351 
3352 		      if ((bp->b_upl)) {
3353 			      panic("bp already has UPL: %p", bp);
3354 		      }
3355 
3356 		      f_offset = ubc_blktooff(vp, blkno);
3357 
3358 		      upl_flags |= UPL_PRECIOUS;
3359 		      kret = ubc_create_upl_kernel(vp,
3360 			  f_offset,
3361 			  bp->b_bufsize,
3362 			  &upl,
3363 			  &pl,
3364 			  upl_flags,
3365 			  VM_KERN_MEMORY_FILE);
3366 
3367 		      if (kret != KERN_SUCCESS) {
3368 			      panic("Failed to create UPL");
3369 		      }
3370 #if  UPL_DEBUG
3371 		      upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4);
3372 #endif /* UPL_DEBUG */
3373 		      bp->b_upl = upl;
3374 
3375 		      if (upl_valid_page(pl, 0)) {
3376 			      if (operation == BLK_READ) {
3377 				      bmap_flags = VNODE_READ;
3378 			      } else {
3379 				      bmap_flags = VNODE_WRITE;
3380 			      }
3381 
3382 			      SET(bp->b_flags, B_CACHE | B_DONE);
3383 
3384 			      OSAddAtomicLong(1, &bufstats.bufs_vmhits);
3385 
3386 			      bp->b_validoff = 0;
3387 			      bp->b_dirtyoff = 0;
3388 
3389 			      if (upl_dirty_page(pl, 0)) {
3390 				      /* page is dirty */
3391 				      SET(bp->b_flags, B_WASDIRTY);
3392 
3393 				      bp->b_validend = bp->b_bcount;
3394 				      bp->b_dirtyend = bp->b_bcount;
3395 			      } else {
3396 				      /* page is clean */
3397 				      bp->b_validend = bp->b_bcount;
3398 				      bp->b_dirtyend = 0;
3399 			      }
3400 			      /*
3401 			       * try to recreate the physical block number associated with
3402 			       * this buffer...
3403 			       */
3404 			      if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL)) {
3405 				      panic("getblk: VNOP_BLOCKMAP failed");
3406 			      }
3407 			      /*
3408 			       * if the extent represented by this buffer
3409 			       * is not completely physically contiguous on
3410 			       * disk, than we can't cache the physical mapping
3411 			       * in the buffer header
3412 			       */
3413 			      if ((uint32_t)contig_bytes < bp->b_bcount) {
3414 				      bp->b_blkno = bp->b_lblkno;
3415 			      }
3416 		      } else {
3417 			      OSAddAtomicLong(1, &bufstats.bufs_miss);
3418 		      }
3419 		      kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
3420 
3421 		      if (kret != KERN_SUCCESS) {
3422 			      panic("getblk: ubc_upl_map() failed with (%d)", kret);
3423 		      }
3424 		      break;} // end BLK_READ
3425 		default:
3426 			panic("getblk: paging or unknown operation - %x", operation);
3427 			/*NOTREACHED*/
3428 			break;
3429 		} // end switch
3430 	} //end buf_t !incore
3431 
3432 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_END,
3433 	    bp, bp->b_datap, bp->b_flags, 3, 0);
3434 
3435 #ifdef JOE_DEBUG
3436 	(void) OSBacktrace(&bp->b_stackgetblk[0], 6);
3437 #endif
3438 	return bp;
3439 }
3440 
3441 /*
3442  * Get an empty, disassociated buffer of given size.
3443  */
3444 buf_t
buf_geteblk(int size)3445 buf_geteblk(int size)
3446 {
3447 	buf_t   bp = NULL;
3448 	int queue = BQ_EMPTY;
3449 
3450 	do {
3451 		lck_mtx_lock_spin(&buf_mtx);
3452 
3453 		bp = getnewbuf(0, 0, &queue);
3454 	} while (bp == NULL);
3455 
3456 	SET(bp->b_flags, (B_META | B_INVAL));
3457 
3458 #if DIAGNOSTIC
3459 	assert(queue == BQ_EMPTY);
3460 #endif /* DIAGNOSTIC */
3461 	/* XXX need to implement logic to deal with other queues */
3462 
3463 	binshash(bp, &invalhash);
3464 	bufstats.bufs_eblk++;
3465 
3466 	lck_mtx_unlock(&buf_mtx);
3467 
3468 	allocbuf(bp, size);
3469 
3470 	return bp;
3471 }
3472 
3473 uint32_t
buf_redundancy_flags(buf_t bp)3474 buf_redundancy_flags(buf_t bp)
3475 {
3476 	return bp->b_redundancy_flags;
3477 }
3478 
3479 void
buf_set_redundancy_flags(buf_t bp,uint32_t flags)3480 buf_set_redundancy_flags(buf_t bp, uint32_t flags)
3481 {
3482 	SET(bp->b_redundancy_flags, flags);
3483 }
3484 
3485 void
buf_clear_redundancy_flags(buf_t bp,uint32_t flags)3486 buf_clear_redundancy_flags(buf_t bp, uint32_t flags)
3487 {
3488 	CLR(bp->b_redundancy_flags, flags);
3489 }
3490 
3491 
3492 
3493 static void *
recycle_buf_from_pool(int nsize)3494 recycle_buf_from_pool(int nsize)
3495 {
3496 	buf_t   bp;
3497 	void    *ptr = NULL;
3498 
3499 	lck_mtx_lock_spin(&buf_mtx);
3500 
3501 	TAILQ_FOREACH(bp, &bufqueues[BQ_META], b_freelist) {
3502 		if (ISSET(bp->b_flags, B_DELWRI) || bp->b_bufsize != (uint32_t)nsize) {
3503 			continue;
3504 		}
3505 		ptr = (void *)bp->b_datap;
3506 		bp->b_bufsize = 0;
3507 
3508 		bcleanbuf(bp, TRUE);
3509 		break;
3510 	}
3511 	lck_mtx_unlock(&buf_mtx);
3512 
3513 	return ptr;
3514 }
3515 
3516 
3517 
3518 int zalloc_nopagewait_failed = 0;
3519 int recycle_buf_failed = 0;
3520 
3521 static void *
grab_memory_for_meta_buf(int nsize)3522 grab_memory_for_meta_buf(int nsize)
3523 {
3524 	void *ptr;
3525 	boolean_t was_vmpriv;
3526 
3527 
3528 	/*
3529 	 * make sure we're NOT priviliged so that
3530 	 * if a vm_page_grab is needed, it won't
3531 	 * block if we're out of free pages... if
3532 	 * it blocks, then we can't honor the
3533 	 * nopagewait request
3534 	 */
3535 	was_vmpriv = set_vm_privilege(FALSE);
3536 
3537 	ptr = kheap_alloc(KHEAP_VFS_BIO, nsize, Z_NOPAGEWAIT);
3538 
3539 	if (was_vmpriv == TRUE) {
3540 		set_vm_privilege(TRUE);
3541 	}
3542 
3543 	if (ptr == NULL) {
3544 		zalloc_nopagewait_failed++;
3545 
3546 		ptr = recycle_buf_from_pool(nsize);
3547 
3548 		if (ptr == NULL) {
3549 			recycle_buf_failed++;
3550 
3551 			if (was_vmpriv == FALSE) {
3552 				set_vm_privilege(TRUE);
3553 			}
3554 
3555 			ptr = kheap_alloc(KHEAP_VFS_BIO, nsize, Z_WAITOK);
3556 
3557 			if (was_vmpriv == FALSE) {
3558 				set_vm_privilege(FALSE);
3559 			}
3560 		}
3561 	}
3562 	return ptr;
3563 }
3564 
3565 /*
3566  * With UBC, there is no need to expand / shrink the file data
3567  * buffer. The VM uses the same pages, hence no waste.
3568  * All the file data buffers can have one size.
3569  * In fact expand / shrink would be an expensive operation.
3570  *
3571  * Only exception to this is meta-data buffers. Most of the
3572  * meta data operations are smaller than PAGE_SIZE. Having the
3573  * meta-data buffers grow and shrink as needed, optimizes use
3574  * of the kernel wired memory.
3575  */
3576 
3577 int
allocbuf(buf_t bp,int size)3578 allocbuf(buf_t bp, int size)
3579 {
3580 	vm_size_t desired_size;
3581 
3582 	desired_size = roundup(size, CLBYTES);
3583 
3584 	if (desired_size < PAGE_SIZE) {
3585 		desired_size = PAGE_SIZE;
3586 	}
3587 	if (desired_size > MAXBSIZE) {
3588 		panic("allocbuf: buffer larger than MAXBSIZE requested");
3589 	}
3590 
3591 	if (ISSET(bp->b_flags, B_META)) {
3592 		int    nsize = roundup(size, MINMETA);
3593 
3594 		if (bp->b_datap) {
3595 			void *elem = (void *)bp->b_datap;
3596 
3597 			if (ISSET(bp->b_flags, B_ZALLOC)) {
3598 				if (bp->b_bufsize < (uint32_t)nsize) {
3599 					/* reallocate to a bigger size */
3600 
3601 					if (nsize <= MAXMETA) {
3602 						desired_size = nsize;
3603 
3604 						/* b_datap not really a ptr */
3605 						*(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize);
3606 					} else {
3607 						bp->b_datap = (uintptr_t)NULL;
3608 						kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size,
3609 						    KMA_KOBJECT | KMA_DATA | KMA_NOFAIL,
3610 						    VM_KERN_MEMORY_FILE);
3611 						CLR(bp->b_flags, B_ZALLOC);
3612 					}
3613 					bcopy(elem, (caddr_t)bp->b_datap, bp->b_bufsize);
3614 					kheap_free(KHEAP_VFS_BIO, elem, bp->b_bufsize);
3615 				} else {
3616 					desired_size = bp->b_bufsize;
3617 				}
3618 			} else {
3619 				if ((vm_size_t)bp->b_bufsize < desired_size) {
3620 					/* reallocate to a bigger size */
3621 					bp->b_datap = (uintptr_t)NULL;
3622 					kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size,
3623 					    KMA_KOBJECT | KMA_DATA | KMA_NOFAIL,
3624 					    VM_KERN_MEMORY_FILE);
3625 					bcopy(elem, (caddr_t)bp->b_datap, bp->b_bufsize);
3626 					kmem_free(kernel_map, (vm_offset_t)elem, bp->b_bufsize);
3627 				} else {
3628 					desired_size = bp->b_bufsize;
3629 				}
3630 			}
3631 		} else {
3632 			/* new allocation */
3633 			if (nsize <= MAXMETA) {
3634 				desired_size = nsize;
3635 
3636 				/* b_datap not really a ptr */
3637 				*(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize);
3638 				SET(bp->b_flags, B_ZALLOC);
3639 			} else {
3640 				kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size,
3641 				    KMA_KOBJECT | KMA_DATA | KMA_NOFAIL,
3642 				    VM_KERN_MEMORY_FILE);
3643 			}
3644 		}
3645 	}
3646 	bp->b_bufsize = (uint32_t)desired_size;
3647 	bp->b_bcount = size;
3648 
3649 	return 0;
3650 }
3651 
3652 /*
3653  *	Get a new buffer from one of the free lists.
3654  *
3655  *	Request for a queue is passes in. The queue from which the buffer was taken
3656  *	from is returned. Out of range queue requests get BQ_EMPTY. Request for
3657  *	BQUEUE means no preference. Use heuristics in that case.
3658  *	Heuristics is as follows:
3659  *	Try BQ_AGE, BQ_LRU, BQ_EMPTY, BQ_META in that order.
3660  *	If none available block till one is made available.
3661  *	If buffers available on both BQ_AGE and BQ_LRU, check the timestamps.
3662  *	Pick the most stale buffer.
3663  *	If found buffer was marked delayed write, start the async. write
3664  *	and restart the search.
3665  *	Initialize the fields and disassociate the buffer from the vnode.
3666  *	Remove the buffer from the hash. Return the buffer and the queue
3667  *	on which it was found.
3668  *
3669  *	buf_mtx is held upon entry
3670  *	returns with buf_mtx locked if new buf available
3671  *	returns with buf_mtx UNlocked if new buf NOT available
3672  */
3673 
3674 static buf_t
getnewbuf(int slpflag,int slptimeo,int * queue)3675 getnewbuf(int slpflag, int slptimeo, int * queue)
3676 {
3677 	buf_t   bp;
3678 	buf_t   lru_bp;
3679 	buf_t   age_bp;
3680 	buf_t   meta_bp;
3681 	int     age_time, lru_time, bp_time, meta_time;
3682 	int     req = *queue;   /* save it for restarts */
3683 	struct timespec ts;
3684 
3685 start:
3686 	/*
3687 	 * invalid request gets empty queue
3688 	 */
3689 	if ((*queue >= BQUEUES) || (*queue < 0)
3690 	    || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED)) {
3691 		*queue = BQ_EMPTY;
3692 	}
3693 
3694 
3695 	if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first)) {
3696 		goto found;
3697 	}
3698 
3699 	/*
3700 	 * need to grow number of bufs, add another one rather than recycling
3701 	 */
3702 	if (nbuf_headers < max_nbuf_headers) {
3703 		/*
3704 		 * Increment  count now as lock
3705 		 * is dropped for allocation.
3706 		 * That avoids over commits
3707 		 */
3708 		nbuf_headers++;
3709 		goto add_newbufs;
3710 	}
3711 	/* Try for the requested queue first */
3712 	bp = bufqueues[*queue].tqh_first;
3713 	if (bp) {
3714 		goto found;
3715 	}
3716 
3717 	/* Unable to use requested queue */
3718 	age_bp = bufqueues[BQ_AGE].tqh_first;
3719 	lru_bp = bufqueues[BQ_LRU].tqh_first;
3720 	meta_bp = bufqueues[BQ_META].tqh_first;
3721 
3722 	if (!age_bp && !lru_bp && !meta_bp) {
3723 		/*
3724 		 * Unavailble on AGE or LRU or META queues
3725 		 * Try the empty list first
3726 		 */
3727 		bp = bufqueues[BQ_EMPTY].tqh_first;
3728 		if (bp) {
3729 			*queue = BQ_EMPTY;
3730 			goto found;
3731 		}
3732 		/*
3733 		 * We have seen is this is hard to trigger.
3734 		 * This is an overcommit of nbufs but needed
3735 		 * in some scenarios with diskiamges
3736 		 */
3737 
3738 add_newbufs:
3739 		lck_mtx_unlock(&buf_mtx);
3740 
3741 		/* Create a new temporary buffer header */
3742 		bp = zalloc_flags(buf_hdr_zone, Z_WAITOK | Z_NOFAIL);
3743 		bufhdrinit(bp);
3744 		bp->b_whichq = BQ_EMPTY;
3745 		bp->b_timestamp = buf_timestamp();
3746 		BLISTNONE(bp);
3747 		SET(bp->b_flags, B_HDRALLOC);
3748 		*queue = BQ_EMPTY;
3749 		lck_mtx_lock_spin(&buf_mtx);
3750 
3751 		if (bp) {
3752 			binshash(bp, &invalhash);
3753 			binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3754 			buf_hdr_count++;
3755 			goto found;
3756 		}
3757 		/* subtract already accounted bufcount */
3758 		nbuf_headers--;
3759 
3760 		bufstats.bufs_sleeps++;
3761 
3762 		/* wait for a free buffer of any kind */
3763 		needbuffer = 1;
3764 		/* hz value is 100 */
3765 		ts.tv_sec = (slptimeo / 1000);
3766 		/* the hz value is 100; which leads to 10ms */
3767 		ts.tv_nsec = (slptimeo % 1000) * NSEC_PER_USEC * 1000 * 10;
3768 
3769 		msleep(&needbuffer, &buf_mtx, slpflag | PDROP | (PRIBIO + 1), "getnewbuf", &ts);
3770 		return NULL;
3771 	}
3772 
3773 	/* Buffer available either on AGE or LRU or META */
3774 	bp = NULL;
3775 	*queue = -1;
3776 
3777 	/* Buffer available either on AGE or LRU */
3778 	if (!age_bp) {
3779 		bp = lru_bp;
3780 		*queue = BQ_LRU;
3781 	} else if (!lru_bp) {
3782 		bp = age_bp;
3783 		*queue = BQ_AGE;
3784 	} else { /* buffer available on both AGE and LRU */
3785 		int             t = buf_timestamp();
3786 
3787 		age_time = t - age_bp->b_timestamp;
3788 		lru_time = t - lru_bp->b_timestamp;
3789 		if ((age_time < 0) || (lru_time < 0)) { /* time set backwards */
3790 			bp = age_bp;
3791 			*queue = BQ_AGE;
3792 			/*
3793 			 * we should probably re-timestamp eveything in the
3794 			 * queues at this point with the current time
3795 			 */
3796 		} else {
3797 			if ((lru_time >= lru_is_stale) && (age_time < age_is_stale)) {
3798 				bp = lru_bp;
3799 				*queue = BQ_LRU;
3800 			} else {
3801 				bp = age_bp;
3802 				*queue = BQ_AGE;
3803 			}
3804 		}
3805 	}
3806 
3807 	if (!bp) { /* Neither on AGE nor on LRU */
3808 		bp = meta_bp;
3809 		*queue = BQ_META;
3810 	} else if (meta_bp) {
3811 		int             t = buf_timestamp();
3812 
3813 		bp_time = t - bp->b_timestamp;
3814 		meta_time = t - meta_bp->b_timestamp;
3815 
3816 		if (!(bp_time < 0) && !(meta_time < 0)) {
3817 			/* time not set backwards */
3818 			int bp_is_stale;
3819 			bp_is_stale = (*queue == BQ_LRU) ?
3820 			    lru_is_stale : age_is_stale;
3821 
3822 			if ((meta_time >= meta_is_stale) &&
3823 			    (bp_time < bp_is_stale)) {
3824 				bp = meta_bp;
3825 				*queue = BQ_META;
3826 			}
3827 		}
3828 	}
3829 found:
3830 	if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY)) {
3831 		panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)", bp, bp->b_flags);
3832 	}
3833 
3834 	/* Clean it */
3835 	if (bcleanbuf(bp, FALSE)) {
3836 		/*
3837 		 * moved to the laundry thread, buffer not ready
3838 		 */
3839 		*queue = req;
3840 		goto start;
3841 	}
3842 	return bp;
3843 }
3844 
3845 
3846 /*
3847  * Clean a buffer.
3848  * Returns 0 if buffer is ready to use,
3849  * Returns 1 if issued a buf_bawrite() to indicate
3850  * that the buffer is not ready.
3851  *
3852  * buf_mtx is held upon entry
3853  * returns with buf_mtx locked
3854  */
3855 int
bcleanbuf(buf_t bp,boolean_t discard)3856 bcleanbuf(buf_t bp, boolean_t discard)
3857 {
3858 	/* Remove from the queue */
3859 	bremfree_locked(bp);
3860 
3861 #ifdef JOE_DEBUG
3862 	bp->b_owner = current_thread();
3863 	bp->b_tag   = 2;
3864 #endif
3865 	/*
3866 	 * If buffer was a delayed write, start the IO by queuing
3867 	 * it on the LAUNDRY queue, and return 1
3868 	 */
3869 	if (ISSET(bp->b_flags, B_DELWRI)) {
3870 		if (discard) {
3871 			SET(bp->b_lflags, BL_WANTDEALLOC);
3872 		}
3873 
3874 		bmovelaundry(bp);
3875 
3876 		lck_mtx_unlock(&buf_mtx);
3877 
3878 		wakeup(&bufqueues[BQ_LAUNDRY]);
3879 		/*
3880 		 * and give it a chance to run
3881 		 */
3882 		(void)thread_block(THREAD_CONTINUE_NULL);
3883 
3884 		lck_mtx_lock_spin(&buf_mtx);
3885 
3886 		return 1;
3887 	}
3888 #ifdef JOE_DEBUG
3889 	bp->b_owner = current_thread();
3890 	bp->b_tag   = 8;
3891 #endif
3892 	/*
3893 	 * Buffer is no longer on any free list... we own it
3894 	 */
3895 	SET(bp->b_lflags, BL_BUSY);
3896 	buf_busycount++;
3897 
3898 	bremhash(bp);
3899 
3900 	/*
3901 	 * disassociate us from our vnode, if we had one...
3902 	 */
3903 	if (bp->b_vp) {
3904 		brelvp_locked(bp);
3905 	}
3906 
3907 	lck_mtx_unlock(&buf_mtx);
3908 
3909 	BLISTNONE(bp);
3910 
3911 	if (ISSET(bp->b_flags, B_META)) {
3912 		buf_free_meta_store(bp);
3913 	}
3914 
3915 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
3916 
3917 	buf_release_credentials(bp);
3918 
3919 	/* If discarding, just move to the empty queue */
3920 	if (discard) {
3921 		lck_mtx_lock_spin(&buf_mtx);
3922 		CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
3923 		bp->b_whichq = BQ_EMPTY;
3924 		binshash(bp, &invalhash);
3925 		binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3926 		CLR(bp->b_lflags, BL_BUSY);
3927 		buf_busycount--;
3928 	} else {
3929 		/* Not discarding: clean up and prepare for reuse */
3930 		bp->b_bufsize = 0;
3931 		bp->b_datap = (uintptr_t)NULL;
3932 		bp->b_upl = (void *)NULL;
3933 		bp->b_fsprivate = (void *)NULL;
3934 		/*
3935 		 * preserve the state of whether this buffer
3936 		 * was allocated on the fly or not...
3937 		 * the only other flag that should be set at
3938 		 * this point is BL_BUSY...
3939 		 */
3940 #ifdef JOE_DEBUG
3941 		bp->b_owner = current_thread();
3942 		bp->b_tag   = 3;
3943 #endif
3944 		bp->b_lflags = BL_BUSY;
3945 		bp->b_flags = (bp->b_flags & B_HDRALLOC);
3946 		bp->b_redundancy_flags = 0;
3947 		bp->b_dev = NODEV;
3948 		bp->b_blkno = bp->b_lblkno = 0;
3949 		bp->b_lblksize = 0;
3950 		bp->b_iodone = NULL;
3951 		bp->b_error = 0;
3952 		bp->b_resid = 0;
3953 		bp->b_bcount = 0;
3954 		bp->b_dirtyoff = bp->b_dirtyend = 0;
3955 		bp->b_validoff = bp->b_validend = 0;
3956 		bzero(&bp->b_attr, sizeof(struct bufattr));
3957 
3958 		lck_mtx_lock_spin(&buf_mtx);
3959 	}
3960 	return 0;
3961 }
3962 
3963 
3964 
3965 errno_t
buf_invalblkno(vnode_t vp,daddr64_t lblkno,int flags)3966 buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags)
3967 {
3968 	buf_t   bp;
3969 	errno_t error;
3970 	struct bufhashhdr *dp;
3971 
3972 	dp = BUFHASH(vp, lblkno);
3973 
3974 relook:
3975 	lck_mtx_lock_spin(&buf_mtx);
3976 
3977 	if ((bp = incore_locked(vp, lblkno, dp)) == (struct buf *)0) {
3978 		lck_mtx_unlock(&buf_mtx);
3979 		return 0;
3980 	}
3981 	if (ISSET(bp->b_lflags, BL_BUSY)) {
3982 		if (!ISSET(flags, BUF_WAIT)) {
3983 			lck_mtx_unlock(&buf_mtx);
3984 			return EBUSY;
3985 		}
3986 		SET(bp->b_lflags, BL_WANTED);
3987 
3988 		error = msleep((caddr_t)bp, &buf_mtx, PDROP | (PRIBIO + 1), "buf_invalblkno", NULL);
3989 
3990 		if (error) {
3991 			return error;
3992 		}
3993 		goto relook;
3994 	}
3995 	bremfree_locked(bp);
3996 	SET(bp->b_lflags, BL_BUSY);
3997 	SET(bp->b_flags, B_INVAL);
3998 	buf_busycount++;
3999 #ifdef JOE_DEBUG
4000 	bp->b_owner = current_thread();
4001 	bp->b_tag   = 4;
4002 #endif
4003 	lck_mtx_unlock(&buf_mtx);
4004 	buf_brelse(bp);
4005 
4006 	return 0;
4007 }
4008 
4009 
4010 void
buf_drop(buf_t bp)4011 buf_drop(buf_t bp)
4012 {
4013 	int need_wakeup = 0;
4014 
4015 	lck_mtx_lock_spin(&buf_mtx);
4016 
4017 	if (ISSET(bp->b_lflags, BL_WANTED)) {
4018 		/*
4019 		 * delay the actual wakeup until after we
4020 		 * clear BL_BUSY and we've dropped buf_mtx
4021 		 */
4022 		need_wakeup = 1;
4023 	}
4024 #ifdef JOE_DEBUG
4025 	bp->b_owner = current_thread();
4026 	bp->b_tag   = 9;
4027 #endif
4028 	/*
4029 	 * Unlock the buffer.
4030 	 */
4031 	CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
4032 	buf_busycount--;
4033 
4034 	lck_mtx_unlock(&buf_mtx);
4035 
4036 	if (need_wakeup) {
4037 		/*
4038 		 * Wake up any proceeses waiting for _this_ buffer to become free.
4039 		 */
4040 		wakeup(bp);
4041 	}
4042 }
4043 
4044 
4045 errno_t
buf_acquire(buf_t bp,int flags,int slpflag,int slptimeo)4046 buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo)
4047 {
4048 	errno_t error;
4049 
4050 	lck_mtx_lock_spin(&buf_mtx);
4051 
4052 	error = buf_acquire_locked(bp, flags, slpflag, slptimeo);
4053 
4054 	lck_mtx_unlock(&buf_mtx);
4055 
4056 	return error;
4057 }
4058 
4059 
4060 static errno_t
buf_acquire_locked(buf_t bp,int flags,int slpflag,int slptimeo)4061 buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo)
4062 {
4063 	errno_t error;
4064 	struct timespec ts;
4065 
4066 	if (ISSET(bp->b_flags, B_LOCKED)) {
4067 		if ((flags & BAC_SKIP_LOCKED)) {
4068 			return EDEADLK;
4069 		}
4070 	} else {
4071 		if ((flags & BAC_SKIP_NONLOCKED)) {
4072 			return EDEADLK;
4073 		}
4074 	}
4075 	if (ISSET(bp->b_lflags, BL_BUSY)) {
4076 		/*
4077 		 * since the lck_mtx_lock may block, the buffer
4078 		 * may become BUSY, so we need to
4079 		 * recheck for a NOWAIT request
4080 		 */
4081 		if (flags & BAC_NOWAIT) {
4082 			return EBUSY;
4083 		}
4084 		SET(bp->b_lflags, BL_WANTED);
4085 
4086 		/* the hz value is 100; which leads to 10ms */
4087 		ts.tv_sec = (slptimeo / 100);
4088 		ts.tv_nsec = (slptimeo % 100) * 10  * NSEC_PER_USEC * 1000;
4089 		error = msleep((caddr_t)bp, &buf_mtx, slpflag | (PRIBIO + 1), "buf_acquire", &ts);
4090 
4091 		if (error) {
4092 			return error;
4093 		}
4094 		return EAGAIN;
4095 	}
4096 	if (flags & BAC_REMOVE) {
4097 		bremfree_locked(bp);
4098 	}
4099 	SET(bp->b_lflags, BL_BUSY);
4100 	buf_busycount++;
4101 
4102 #ifdef JOE_DEBUG
4103 	bp->b_owner = current_thread();
4104 	bp->b_tag   = 5;
4105 #endif
4106 	return 0;
4107 }
4108 
4109 
4110 /*
4111  * Wait for operations on the buffer to complete.
4112  * When they do, extract and return the I/O's error value.
4113  */
4114 errno_t
buf_biowait(buf_t bp)4115 buf_biowait(buf_t bp)
4116 {
4117 	while (!ISSET(bp->b_flags, B_DONE)) {
4118 		lck_mtx_lock_spin(&buf_mtx);
4119 
4120 		if (!ISSET(bp->b_flags, B_DONE)) {
4121 			DTRACE_IO1(wait__start, buf_t, bp);
4122 			(void) msleep(bp, &buf_mtx, PDROP | (PRIBIO + 1), "buf_biowait", NULL);
4123 			DTRACE_IO1(wait__done, buf_t, bp);
4124 		} else {
4125 			lck_mtx_unlock(&buf_mtx);
4126 		}
4127 	}
4128 	/* check for interruption of I/O (e.g. via NFS), then errors. */
4129 	if (ISSET(bp->b_flags, B_EINTR)) {
4130 		CLR(bp->b_flags, B_EINTR);
4131 		return EINTR;
4132 	} else if (ISSET(bp->b_flags, B_ERROR)) {
4133 		return bp->b_error ? bp->b_error : EIO;
4134 	} else {
4135 		return 0;
4136 	}
4137 }
4138 
4139 
4140 /*
4141  * Mark I/O complete on a buffer.
4142  *
4143  * If a callback has been requested, e.g. the pageout
4144  * daemon, do so. Otherwise, awaken waiting processes.
4145  *
4146  * [ Leffler, et al., says on p.247:
4147  *	"This routine wakes up the blocked process, frees the buffer
4148  *	for an asynchronous write, or, for a request by the pagedaemon
4149  *	process, invokes a procedure specified in the buffer structure" ]
4150  *
4151  * In real life, the pagedaemon (or other system processes) wants
4152  * to do async stuff to, and doesn't want the buffer buf_brelse()'d.
4153  * (for swap pager, that puts swap buffers on the free lists (!!!),
4154  * for the vn device, that puts malloc'd buffers on the free lists!)
4155  */
4156 
4157 void
buf_biodone(buf_t bp)4158 buf_biodone(buf_t bp)
4159 {
4160 	mount_t mp;
4161 	struct bufattr *bap;
4162 	struct timeval real_elapsed;
4163 	uint64_t real_elapsed_usec = 0;
4164 
4165 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START,
4166 	    bp, bp->b_datap, bp->b_flags, 0, 0);
4167 
4168 	/* Record our progress. */
4169 	vfs_update_last_completion_time();
4170 
4171 	if (ISSET(bp->b_flags, B_DONE)) {
4172 		panic("biodone already");
4173 	}
4174 
4175 	bap = &bp->b_attr;
4176 
4177 	if (bp->b_vp && bp->b_vp->v_mount) {
4178 		mp = bp->b_vp->v_mount;
4179 	} else {
4180 		mp = NULL;
4181 	}
4182 
4183 	if (ISSET(bp->b_flags, B_ERROR)) {
4184 		if (mp && (MNT_ROOTFS & mp->mnt_flag)) {
4185 			dk_error_description_t desc;
4186 			bzero(&desc, sizeof(desc));
4187 			desc.description      = panic_disk_error_description;
4188 			desc.description_size = panic_disk_error_description_size;
4189 			VNOP_IOCTL(mp->mnt_devvp, DKIOCGETERRORDESCRIPTION, (caddr_t)&desc, 0, vfs_context_kernel());
4190 		}
4191 	}
4192 
4193 	if (mp && (bp->b_flags & B_READ) == 0) {
4194 		update_last_io_time(mp);
4195 		INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_write_size);
4196 	} else if (mp) {
4197 		INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_read_size);
4198 	}
4199 
4200 	throttle_info_end_io(bp);
4201 
4202 	if (kdebug_enable) {
4203 		int code    = DKIO_DONE;
4204 		int io_tier = GET_BUFATTR_IO_TIER(bap);
4205 
4206 		if (bp->b_flags & B_READ) {
4207 			code |= DKIO_READ;
4208 		}
4209 		if (bp->b_flags & B_ASYNC) {
4210 			code |= DKIO_ASYNC;
4211 		}
4212 
4213 		if (bp->b_flags & B_META) {
4214 			code |= DKIO_META;
4215 		} else if (bp->b_flags & B_PAGEIO) {
4216 			code |= DKIO_PAGING;
4217 		}
4218 
4219 		if (io_tier != 0) {
4220 			code |= DKIO_THROTTLE;
4221 		}
4222 
4223 		code |= ((io_tier << DKIO_TIER_SHIFT) & DKIO_TIER_MASK);
4224 
4225 		if (bp->b_flags & B_PASSIVE) {
4226 			code |= DKIO_PASSIVE;
4227 		}
4228 
4229 		if (bap->ba_flags & BA_NOCACHE) {
4230 			code |= DKIO_NOCACHE;
4231 		}
4232 
4233 		if (bap->ba_flags & BA_IO_TIER_UPGRADE) {
4234 			code |= DKIO_TIER_UPGRADE;
4235 		}
4236 
4237 		KDBG_RELEASE_NOPROCFILT(FSDBG_CODE(DBG_DKRW, code),
4238 		    buf_kernel_addrperm_addr(bp),
4239 		    (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid,
4240 		    bp->b_error);
4241 	}
4242 
4243 	microuptime(&real_elapsed);
4244 	timevalsub(&real_elapsed, &bp->b_timestamp_tv);
4245 	real_elapsed_usec = real_elapsed.tv_sec * USEC_PER_SEC + real_elapsed.tv_usec;
4246 	disk_conditioner_delay(bp, 1, bp->b_bcount, real_elapsed_usec);
4247 
4248 	/*
4249 	 * I/O was done, so don't believe
4250 	 * the DIRTY state from VM anymore...
4251 	 * and we need to reset the THROTTLED/PASSIVE
4252 	 * indicators
4253 	 */
4254 	CLR(bp->b_flags, (B_WASDIRTY | B_PASSIVE));
4255 	CLR(bap->ba_flags, (BA_META | BA_NOCACHE | BA_DELAYIDLESLEEP | BA_IO_TIER_UPGRADE));
4256 
4257 	SET_BUFATTR_IO_TIER(bap, 0);
4258 
4259 	DTRACE_IO1(done, buf_t, bp);
4260 
4261 	if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW)) {
4262 		/*
4263 		 * wake up any writer's blocked
4264 		 * on throttle or waiting for I/O
4265 		 * to drain
4266 		 */
4267 		vnode_writedone(bp->b_vp);
4268 	}
4269 
4270 	if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) {  /* if necessary, call out */
4271 		void    (*iodone_func)(struct buf *, void *) = bp->b_iodone;
4272 		void    *arg = bp->b_transaction;
4273 		int     callout = ISSET(bp->b_flags, B_CALL);
4274 
4275 		if (iodone_func == NULL) {
4276 			panic("biodone: bp @ %p has NULL b_iodone!", bp);
4277 		}
4278 
4279 		CLR(bp->b_flags, (B_CALL | B_FILTER));  /* filters and callouts are one-shot */
4280 		bp->b_iodone = NULL;
4281 		bp->b_transaction = NULL;
4282 
4283 		if (callout) {
4284 			SET(bp->b_flags, B_DONE);       /* note that it's done */
4285 		}
4286 		(*iodone_func)(bp, arg);
4287 
4288 		if (callout) {
4289 			/*
4290 			 * assumes that the callback function takes
4291 			 * ownership of the bp and deals with releasing it if necessary
4292 			 */
4293 			goto biodone_done;
4294 		}
4295 		/*
4296 		 * in this case the call back function is acting
4297 		 * strictly as a filter... it does not take
4298 		 * ownership of the bp and is expecting us
4299 		 * to finish cleaning up... this is currently used
4300 		 * by the HFS journaling code
4301 		 */
4302 	}
4303 	if (ISSET(bp->b_flags, B_ASYNC)) {      /* if async, release it */
4304 		SET(bp->b_flags, B_DONE);       /* note that it's done */
4305 
4306 		buf_brelse(bp);
4307 	} else {                                /* or just wakeup the buffer */
4308 		/*
4309 		 * by taking the mutex, we serialize
4310 		 * the buf owner calling buf_biowait so that we'll
4311 		 * only see him in one of 2 states...
4312 		 * state 1: B_DONE wasn't set and he's
4313 		 * blocked in msleep
4314 		 * state 2: he's blocked trying to take the
4315 		 * mutex before looking at B_DONE
4316 		 * BL_WANTED is cleared in case anyone else
4317 		 * is blocked waiting for the buffer... note
4318 		 * that we haven't cleared B_BUSY yet, so if
4319 		 * they do get to run, their going to re-set
4320 		 * BL_WANTED and go back to sleep
4321 		 */
4322 		lck_mtx_lock_spin(&buf_mtx);
4323 
4324 		CLR(bp->b_lflags, BL_WANTED);
4325 		SET(bp->b_flags, B_DONE);               /* note that it's done */
4326 
4327 		lck_mtx_unlock(&buf_mtx);
4328 
4329 		wakeup(bp);
4330 	}
4331 biodone_done:
4332 	KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_END,
4333 	    (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0);
4334 }
4335 
4336 /*
4337  * Obfuscate buf pointers.
4338  */
4339 vm_offset_t
buf_kernel_addrperm_addr(void * addr)4340 buf_kernel_addrperm_addr(void * addr)
4341 {
4342 	if ((vm_offset_t)addr == 0) {
4343 		return 0;
4344 	} else {
4345 		return (vm_offset_t)addr + buf_kernel_addrperm;
4346 	}
4347 }
4348 
4349 /*
4350  * Return a count of buffers on the "locked" queue.
4351  */
4352 int
count_lock_queue(void)4353 count_lock_queue(void)
4354 {
4355 	buf_t   bp;
4356 	int     n = 0;
4357 
4358 	lck_mtx_lock_spin(&buf_mtx);
4359 
4360 	for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
4361 	    bp = bp->b_freelist.tqe_next) {
4362 		n++;
4363 	}
4364 	lck_mtx_unlock(&buf_mtx);
4365 
4366 	return n;
4367 }
4368 
4369 /*
4370  * Return a count of 'busy' buffers. Used at the time of shutdown.
4371  * note: This is also called from the mach side in debug context in kdp.c
4372  */
4373 uint32_t
count_busy_buffers(void)4374 count_busy_buffers(void)
4375 {
4376 	return buf_busycount + bufstats.bufs_iobufinuse;
4377 }
4378 
4379 #if DIAGNOSTIC
4380 /*
4381  * Print out statistics on the current allocation of the buffer pool.
4382  * Can be enabled to print out on every ``sync'' by setting "syncprt"
4383  * in vfs_syscalls.c using sysctl.
4384  */
4385 void
vfs_bufstats()4386 vfs_bufstats()
4387 {
4388 	int i, j, count;
4389 	struct buf *bp;
4390 	struct bqueues *dp;
4391 	int counts[MAXBSIZE / CLBYTES + 1];
4392 	static char *bname[BQUEUES] =
4393 	{ "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
4394 
4395 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
4396 		count = 0;
4397 		for (j = 0; j <= MAXBSIZE / CLBYTES; j++) {
4398 			counts[j] = 0;
4399 		}
4400 
4401 		lck_mtx_lock(&buf_mtx);
4402 
4403 		for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
4404 			counts[bp->b_bufsize / CLBYTES]++;
4405 			count++;
4406 		}
4407 		lck_mtx_unlock(&buf_mtx);
4408 
4409 		printf("%s: total-%d", bname[i], count);
4410 		for (j = 0; j <= MAXBSIZE / CLBYTES; j++) {
4411 			if (counts[j] != 0) {
4412 				printf(", %d-%d", j * CLBYTES, counts[j]);
4413 			}
4414 		}
4415 		printf("\n");
4416 	}
4417 }
4418 #endif /* DIAGNOSTIC */
4419 
4420 #define NRESERVEDIOBUFS 128
4421 
4422 #define MNT_VIRTUALDEV_MAX_IOBUFS 128
4423 #define VIRTUALDEV_MAX_IOBUFS ((40*niobuf_headers)/100)
4424 
4425 buf_t
alloc_io_buf(vnode_t vp,int priv)4426 alloc_io_buf(vnode_t vp, int priv)
4427 {
4428 	buf_t   bp;
4429 	mount_t mp = NULL;
4430 	int alloc_for_virtualdev = FALSE;
4431 
4432 	lck_mtx_lock_spin(&iobuffer_mtxp);
4433 
4434 	/*
4435 	 * We subject iobuf requests for diskimages to additional restrictions.
4436 	 *
4437 	 * a) A single diskimage mount cannot use up more than
4438 	 * MNT_VIRTUALDEV_MAX_IOBUFS. However,vm privileged (pageout) requests
4439 	 * are not subject to this restriction.
4440 	 * b) iobuf headers used by all diskimage headers by all mount
4441 	 * points cannot exceed  VIRTUALDEV_MAX_IOBUFS.
4442 	 */
4443 	if (vp && ((mp = vp->v_mount)) && mp != dead_mountp &&
4444 	    mp->mnt_kern_flag & MNTK_VIRTUALDEV) {
4445 		alloc_for_virtualdev = TRUE;
4446 		while ((!priv && mp->mnt_iobufinuse > MNT_VIRTUALDEV_MAX_IOBUFS) ||
4447 		    bufstats.bufs_iobufinuse_vdev > VIRTUALDEV_MAX_IOBUFS) {
4448 			bufstats.bufs_iobufsleeps++;
4449 
4450 			need_iobuffer = 1;
4451 			(void)msleep(&need_iobuffer, &iobuffer_mtxp,
4452 			    PSPIN | (PRIBIO + 1), (const char *)"alloc_io_buf (1)",
4453 			    NULL);
4454 		}
4455 	}
4456 
4457 	while ((((uint32_t)(niobuf_headers - NRESERVEDIOBUFS) < bufstats.bufs_iobufinuse) && !priv) ||
4458 	    (bp = iobufqueue.tqh_first) == NULL) {
4459 		bufstats.bufs_iobufsleeps++;
4460 
4461 		need_iobuffer = 1;
4462 		(void)msleep(&need_iobuffer, &iobuffer_mtxp, PSPIN | (PRIBIO + 1),
4463 		    (const char *)"alloc_io_buf (2)", NULL);
4464 	}
4465 	TAILQ_REMOVE(&iobufqueue, bp, b_freelist);
4466 
4467 	bufstats.bufs_iobufinuse++;
4468 	if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax) {
4469 		bufstats.bufs_iobufmax = bufstats.bufs_iobufinuse;
4470 	}
4471 
4472 	if (alloc_for_virtualdev) {
4473 		mp->mnt_iobufinuse++;
4474 		bufstats.bufs_iobufinuse_vdev++;
4475 	}
4476 
4477 	lck_mtx_unlock(&iobuffer_mtxp);
4478 
4479 	/*
4480 	 * initialize various fields
4481 	 * we don't need to hold the mutex since the buffer
4482 	 * is now private... the vp should have a reference
4483 	 * on it and is not protected by this mutex in any event
4484 	 */
4485 	bp->b_timestamp = 0;
4486 	bp->b_proc = NULL;
4487 
4488 	bp->b_datap = 0;
4489 	bp->b_flags = 0;
4490 	bp->b_lflags = BL_BUSY | BL_IOBUF;
4491 	if (alloc_for_virtualdev) {
4492 		bp->b_lflags |= BL_IOBUF_VDEV;
4493 	}
4494 	bp->b_redundancy_flags = 0;
4495 	bp->b_blkno = bp->b_lblkno = 0;
4496 	bp->b_lblksize = 0;
4497 #ifdef JOE_DEBUG
4498 	bp->b_owner = current_thread();
4499 	bp->b_tag   = 6;
4500 #endif
4501 	bp->b_iodone = NULL;
4502 	bp->b_error = 0;
4503 	bp->b_resid = 0;
4504 	bp->b_bcount = 0;
4505 	bp->b_bufsize = 0;
4506 	bp->b_upl = NULL;
4507 	bp->b_fsprivate = (void *)NULL;
4508 	bp->b_vp = vp;
4509 	bzero(&bp->b_attr, sizeof(struct bufattr));
4510 
4511 	if (vp && (vp->v_type == VBLK || vp->v_type == VCHR)) {
4512 		bp->b_dev = vp->v_rdev;
4513 	} else {
4514 		bp->b_dev = NODEV;
4515 	}
4516 
4517 	return bp;
4518 }
4519 
4520 
4521 void
free_io_buf(buf_t bp)4522 free_io_buf(buf_t bp)
4523 {
4524 	int need_wakeup = 0;
4525 	int free_for_virtualdev = FALSE;
4526 	mount_t mp = NULL;
4527 
4528 	/* Was this iobuf for a diskimage ? */
4529 	if (bp->b_lflags & BL_IOBUF_VDEV) {
4530 		free_for_virtualdev = TRUE;
4531 		if (bp->b_vp) {
4532 			mp = bp->b_vp->v_mount;
4533 		}
4534 	}
4535 
4536 	/*
4537 	 * put buffer back on the head of the iobufqueue
4538 	 */
4539 	bp->b_vp = NULL;
4540 	bp->b_flags = B_INVAL;
4541 
4542 	/* Zero out the bufattr and its flags before relinquishing this iobuf */
4543 	bzero(&bp->b_attr, sizeof(struct bufattr));
4544 
4545 	lck_mtx_lock_spin(&iobuffer_mtxp);
4546 
4547 	binsheadfree(bp, &iobufqueue, -1);
4548 
4549 	if (need_iobuffer) {
4550 		/*
4551 		 * Wake up any processes waiting because they need an io buffer
4552 		 *
4553 		 * do the wakeup after we drop the mutex... it's possible that the
4554 		 * wakeup will be superfluous if need_iobuffer gets set again and
4555 		 * another thread runs this path, but it's highly unlikely, doesn't
4556 		 * hurt, and it means we don't hold up I/O progress if the wakeup blocks
4557 		 * trying to grab a task related lock...
4558 		 */
4559 		need_iobuffer = 0;
4560 		need_wakeup = 1;
4561 	}
4562 	if (bufstats.bufs_iobufinuse <= 0) {
4563 		panic("free_io_buf: bp(%p) - bufstats.bufs_iobufinuse < 0", bp);
4564 	}
4565 
4566 	bufstats.bufs_iobufinuse--;
4567 
4568 	if (free_for_virtualdev) {
4569 		bufstats.bufs_iobufinuse_vdev--;
4570 		if (mp && mp != dead_mountp) {
4571 			mp->mnt_iobufinuse--;
4572 		}
4573 	}
4574 
4575 	lck_mtx_unlock(&iobuffer_mtxp);
4576 
4577 	if (need_wakeup) {
4578 		wakeup(&need_iobuffer);
4579 	}
4580 }
4581 
4582 
4583 void
buf_list_lock(void)4584 buf_list_lock(void)
4585 {
4586 	lck_mtx_lock_spin(&buf_mtx);
4587 }
4588 
4589 void
buf_list_unlock(void)4590 buf_list_unlock(void)
4591 {
4592 	lck_mtx_unlock(&buf_mtx);
4593 }
4594 
4595 /*
4596  * If getnewbuf() calls bcleanbuf() on the same thread
4597  * there is a potential for stack overrun and deadlocks.
4598  * So we always handoff the work to a worker thread for completion
4599  */
4600 
4601 
4602 static void
bcleanbuf_thread_init(void)4603 bcleanbuf_thread_init(void)
4604 {
4605 	thread_t        thread = THREAD_NULL;
4606 
4607 	/* create worker thread */
4608 	kernel_thread_start((thread_continue_t)bcleanbuf_thread, NULL, &thread);
4609 	thread_deallocate(thread);
4610 }
4611 
4612 typedef int (*bcleanbufcontinuation)(int);
4613 
4614 __attribute__((noreturn))
4615 static void
bcleanbuf_thread(void)4616 bcleanbuf_thread(void)
4617 {
4618 	struct buf *bp;
4619 	int error = 0;
4620 	int loopcnt = 0;
4621 
4622 	for (;;) {
4623 		lck_mtx_lock_spin(&buf_mtx);
4624 
4625 		while ((bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) {
4626 			(void)msleep0(&bufqueues[BQ_LAUNDRY], &buf_mtx, PRIBIO | PDROP, "blaundry", 0, (bcleanbufcontinuation)bcleanbuf_thread);
4627 		}
4628 
4629 		/*
4630 		 * Remove from the queue
4631 		 */
4632 		bremfree_locked(bp);
4633 
4634 		/*
4635 		 * Buffer is no longer on any free list
4636 		 */
4637 		SET(bp->b_lflags, BL_BUSY);
4638 		buf_busycount++;
4639 
4640 #ifdef JOE_DEBUG
4641 		bp->b_owner = current_thread();
4642 		bp->b_tag   = 10;
4643 #endif
4644 
4645 		lck_mtx_unlock(&buf_mtx);
4646 		/*
4647 		 * do the IO
4648 		 */
4649 		error = bawrite_internal(bp, 0);
4650 
4651 		if (error) {
4652 			bp->b_whichq = BQ_LAUNDRY;
4653 			bp->b_timestamp = buf_timestamp();
4654 
4655 			lck_mtx_lock_spin(&buf_mtx);
4656 
4657 			binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
4658 			blaundrycnt++;
4659 
4660 			/* we never leave a busy page on the laundry queue */
4661 			CLR(bp->b_lflags, BL_BUSY);
4662 			buf_busycount--;
4663 #ifdef JOE_DEBUG
4664 			bp->b_owner = current_thread();
4665 			bp->b_tag   = 11;
4666 #endif
4667 
4668 			lck_mtx_unlock(&buf_mtx);
4669 
4670 			if (loopcnt > MAXLAUNDRY) {
4671 				/*
4672 				 * bawrite_internal() can return errors if we're throttled. If we've
4673 				 * done several I/Os and failed, give the system some time to unthrottle
4674 				 * the vnode
4675 				 */
4676 				(void)tsleep((void *)&bufqueues[BQ_LAUNDRY], PRIBIO, "blaundry", 1);
4677 				loopcnt = 0;
4678 			} else {
4679 				/* give other threads a chance to run */
4680 				(void)thread_block(THREAD_CONTINUE_NULL);
4681 				loopcnt++;
4682 			}
4683 		}
4684 	}
4685 }
4686 
4687 
4688 static int
brecover_data(buf_t bp)4689 brecover_data(buf_t bp)
4690 {
4691 	int     upl_offset;
4692 	upl_t   upl;
4693 	upl_page_info_t *pl;
4694 	kern_return_t kret;
4695 	vnode_t vp = bp->b_vp;
4696 	int upl_flags;
4697 
4698 
4699 	if (!UBCINFOEXISTS(vp) || bp->b_bufsize == 0) {
4700 		goto dump_buffer;
4701 	}
4702 
4703 	upl_flags = UPL_PRECIOUS;
4704 	if (!(buf_flags(bp) & B_READ)) {
4705 		/*
4706 		 * "write" operation:  let the UPL subsystem know
4707 		 * that we intend to modify the buffer cache pages we're
4708 		 * gathering.
4709 		 */
4710 		upl_flags |= UPL_WILL_MODIFY;
4711 	}
4712 
4713 	kret = ubc_create_upl_kernel(vp,
4714 	    ubc_blktooff(vp, bp->b_lblkno),
4715 	    bp->b_bufsize,
4716 	    &upl,
4717 	    &pl,
4718 	    upl_flags,
4719 	    VM_KERN_MEMORY_FILE);
4720 	if (kret != KERN_SUCCESS) {
4721 		panic("Failed to create UPL");
4722 	}
4723 
4724 	for (upl_offset = 0; (uint32_t)upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) {
4725 		if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) {
4726 			ubc_upl_abort(upl, 0);
4727 			goto dump_buffer;
4728 		}
4729 	}
4730 	bp->b_upl = upl;
4731 
4732 	kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
4733 
4734 	if (kret != KERN_SUCCESS) {
4735 		panic("getblk: ubc_upl_map() failed with (%d)", kret);
4736 	}
4737 	return 1;
4738 
4739 dump_buffer:
4740 	bp->b_bufsize = 0;
4741 	SET(bp->b_flags, B_INVAL);
4742 	buf_brelse(bp);
4743 
4744 	return 0;
4745 }
4746 
4747 int
fs_buffer_cache_gc_register(void (* callout)(int,void *),void * context)4748 fs_buffer_cache_gc_register(void (* callout)(int, void *), void *context)
4749 {
4750 	lck_mtx_lock(&buf_gc_callout);
4751 	for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) {
4752 		if (fs_callouts[i].callout == NULL) {
4753 			fs_callouts[i].callout = callout;
4754 			fs_callouts[i].context = context;
4755 			lck_mtx_unlock(&buf_gc_callout);
4756 			return 0;
4757 		}
4758 	}
4759 
4760 	lck_mtx_unlock(&buf_gc_callout);
4761 	return ENOMEM;
4762 }
4763 
4764 int
fs_buffer_cache_gc_unregister(void (* callout)(int,void *),void * context)4765 fs_buffer_cache_gc_unregister(void (* callout)(int, void *), void *context)
4766 {
4767 	lck_mtx_lock(&buf_gc_callout);
4768 	for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) {
4769 		if (fs_callouts[i].callout == callout &&
4770 		    fs_callouts[i].context == context) {
4771 			fs_callouts[i].callout = NULL;
4772 			fs_callouts[i].context = NULL;
4773 		}
4774 	}
4775 	lck_mtx_unlock(&buf_gc_callout);
4776 	return 0;
4777 }
4778 
4779 static void
fs_buffer_cache_gc_dispatch_callouts(int all)4780 fs_buffer_cache_gc_dispatch_callouts(int all)
4781 {
4782 	lck_mtx_lock(&buf_gc_callout);
4783 	for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) {
4784 		if (fs_callouts[i].callout != NULL) {
4785 			fs_callouts[i].callout(all, fs_callouts[i].context);
4786 		}
4787 	}
4788 	lck_mtx_unlock(&buf_gc_callout);
4789 }
4790 
4791 static boolean_t
buffer_cache_gc(int all)4792 buffer_cache_gc(int all)
4793 {
4794 	buf_t bp;
4795 	boolean_t did_large_zfree = FALSE;
4796 	boolean_t need_wakeup = FALSE;
4797 	int now = buf_timestamp();
4798 	uint32_t found = 0;
4799 	struct bqueues privq;
4800 	int thresh_hold = BUF_STALE_THRESHHOLD;
4801 
4802 	if (all) {
4803 		thresh_hold = 0;
4804 	}
4805 	/*
4806 	 * We only care about metadata (incore storage comes from zalloc()).
4807 	 * Unless "all" is set (used to evict meta data buffers in preparation
4808 	 * for deep sleep), we only evict up to BUF_MAX_GC_BATCH_SIZE buffers
4809 	 * that have not been accessed in the last BUF_STALE_THRESHOLD seconds.
4810 	 * BUF_MAX_GC_BATCH_SIZE controls both the hold time of the global lock
4811 	 * "buf_mtx" and the length of time we spend compute bound in the GC
4812 	 * thread which calls this function
4813 	 */
4814 	lck_mtx_lock(&buf_mtx);
4815 
4816 	do {
4817 		found = 0;
4818 		TAILQ_INIT(&privq);
4819 		need_wakeup = FALSE;
4820 
4821 		while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) &&
4822 		    (now > bp->b_timestamp) &&
4823 		    (now - bp->b_timestamp > thresh_hold) &&
4824 		    (found < BUF_MAX_GC_BATCH_SIZE)) {
4825 			/* Remove from free list */
4826 			bremfree_locked(bp);
4827 			found++;
4828 
4829 #ifdef JOE_DEBUG
4830 			bp->b_owner = current_thread();
4831 			bp->b_tag   = 12;
4832 #endif
4833 
4834 			/* If dirty, move to laundry queue and remember to do wakeup */
4835 			if (ISSET(bp->b_flags, B_DELWRI)) {
4836 				SET(bp->b_lflags, BL_WANTDEALLOC);
4837 
4838 				bmovelaundry(bp);
4839 				need_wakeup = TRUE;
4840 
4841 				continue;
4842 			}
4843 
4844 			/*
4845 			 * Mark busy and put on private list.  We could technically get
4846 			 * away without setting BL_BUSY here.
4847 			 */
4848 			SET(bp->b_lflags, BL_BUSY);
4849 			buf_busycount++;
4850 
4851 			/*
4852 			 * Remove from hash and dissociate from vp.
4853 			 */
4854 			bremhash(bp);
4855 			if (bp->b_vp) {
4856 				brelvp_locked(bp);
4857 			}
4858 
4859 			TAILQ_INSERT_TAIL(&privq, bp, b_freelist);
4860 		}
4861 
4862 		if (found == 0) {
4863 			break;
4864 		}
4865 
4866 		/* Drop lock for batch processing */
4867 		lck_mtx_unlock(&buf_mtx);
4868 
4869 		/* Wakeup and yield for laundry if need be */
4870 		if (need_wakeup) {
4871 			wakeup(&bufqueues[BQ_LAUNDRY]);
4872 			(void)thread_block(THREAD_CONTINUE_NULL);
4873 		}
4874 
4875 		/* Clean up every buffer on private list */
4876 		TAILQ_FOREACH(bp, &privq, b_freelist) {
4877 			/* Take note if we've definitely freed at least a page to a zone */
4878 			if ((ISSET(bp->b_flags, B_ZALLOC)) && (buf_size(bp) >= PAGE_SIZE)) {
4879 				did_large_zfree = TRUE;
4880 			}
4881 
4882 			trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
4883 
4884 			/* Free Storage */
4885 			buf_free_meta_store(bp);
4886 
4887 			/* Release credentials */
4888 			buf_release_credentials(bp);
4889 
4890 			/* Prepare for moving to empty queue */
4891 			CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED
4892 			    | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
4893 			bp->b_whichq = BQ_EMPTY;
4894 			BLISTNONE(bp);
4895 		}
4896 		lck_mtx_lock(&buf_mtx);
4897 
4898 		/* Back under lock, move them all to invalid hash and clear busy */
4899 		TAILQ_FOREACH(bp, &privq, b_freelist) {
4900 			binshash(bp, &invalhash);
4901 			CLR(bp->b_lflags, BL_BUSY);
4902 			buf_busycount--;
4903 
4904 #ifdef JOE_DEBUG
4905 			if (bp->b_owner != current_thread()) {
4906 				panic("Buffer stolen from buffer_cache_gc()");
4907 			}
4908 			bp->b_owner = current_thread();
4909 			bp->b_tag   = 13;
4910 #endif
4911 		}
4912 
4913 		/* And do a big bulk move to the empty queue */
4914 		TAILQ_CONCAT(&bufqueues[BQ_EMPTY], &privq, b_freelist);
4915 	} while (all && (found == BUF_MAX_GC_BATCH_SIZE));
4916 
4917 	lck_mtx_unlock(&buf_mtx);
4918 
4919 	fs_buffer_cache_gc_dispatch_callouts(all);
4920 
4921 	return did_large_zfree;
4922 }
4923 
4924 
4925 /*
4926  * disabled for now
4927  */
4928 
4929 #if FLUSH_QUEUES
4930 
4931 #define NFLUSH 32
4932 
4933 static int
bp_cmp(void * a,void * b)4934 bp_cmp(void *a, void *b)
4935 {
4936 	buf_t *bp_a = *(buf_t **)a,
4937 	    *bp_b = *(buf_t **)b;
4938 	daddr64_t res;
4939 
4940 	// don't have to worry about negative block
4941 	// numbers so this is ok to do.
4942 	//
4943 	res = (bp_a->b_blkno - bp_b->b_blkno);
4944 
4945 	return (int)res;
4946 }
4947 
4948 
4949 int
bflushq(int whichq,mount_t mp)4950 bflushq(int whichq, mount_t mp)
4951 {
4952 	buf_t   bp, next;
4953 	int     i, buf_count;
4954 	int     total_writes = 0;
4955 	static buf_t flush_table[NFLUSH];
4956 
4957 	if (whichq < 0 || whichq >= BQUEUES) {
4958 		return 0;
4959 	}
4960 
4961 restart:
4962 	lck_mtx_lock(&buf_mtx);
4963 
4964 	bp = TAILQ_FIRST(&bufqueues[whichq]);
4965 
4966 	for (buf_count = 0; bp; bp = next) {
4967 		next = bp->b_freelist.tqe_next;
4968 
4969 		if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) {
4970 			continue;
4971 		}
4972 
4973 		if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) {
4974 			bremfree_locked(bp);
4975 #ifdef JOE_DEBUG
4976 			bp->b_owner = current_thread();
4977 			bp->b_tag   = 7;
4978 #endif
4979 			SET(bp->b_lflags, BL_BUSY);
4980 			buf_busycount++;
4981 
4982 			flush_table[buf_count] = bp;
4983 			buf_count++;
4984 			total_writes++;
4985 
4986 			if (buf_count >= NFLUSH) {
4987 				lck_mtx_unlock(&buf_mtx);
4988 
4989 				qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
4990 
4991 				for (i = 0; i < buf_count; i++) {
4992 					buf_bawrite(flush_table[i]);
4993 				}
4994 				goto restart;
4995 			}
4996 		}
4997 	}
4998 	lck_mtx_unlock(&buf_mtx);
4999 
5000 	if (buf_count > 0) {
5001 		qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
5002 
5003 		for (i = 0; i < buf_count; i++) {
5004 			buf_bawrite(flush_table[i]);
5005 		}
5006 	}
5007 
5008 	return total_writes;
5009 }
5010 #endif
5011