1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1994 Christopher G. Demetriou
31 * Copyright (c) 1982, 1986, 1989, 1993
32 * The Regents of the University of California. All rights reserved.
33 * (c) UNIX System Laboratories, Inc.
34 * All or some portions of this file are derived from material licensed
35 * to the University of California by American Telephone and Telegraph
36 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
37 * the permission of UNIX System Laboratories, Inc.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Berkeley and its contributors.
51 * 4. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
67 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
68 */
69
70 /*
71 * Some references:
72 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
73 * Leffler, et al.: The Design and Implementation of the 4.3BSD
74 * UNIX Operating System (Addison Welley, 1989)
75 */
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc_internal.h>
80 #include <sys/buf_internal.h>
81 #include <sys/vnode_internal.h>
82 #include <sys/mount_internal.h>
83 #include <sys/trace.h>
84 #include <kern/kalloc.h>
85 #include <sys/resourcevar.h>
86 #include <miscfs/specfs/specdev.h>
87 #include <sys/ubc.h>
88 #include <sys/kauth.h>
89 #if DIAGNOSTIC
90 #include <kern/assert.h>
91 #endif /* DIAGNOSTIC */
92 #include <kern/task.h>
93 #include <kern/zalloc.h>
94 #include <kern/locks.h>
95 #include <kern/thread.h>
96
97 #include <sys/fslog.h> /* fslog_io_error() */
98 #include <sys/disk.h> /* dk_error_description_t */
99
100 #include <mach/mach_types.h>
101 #include <mach/memory_object_types.h>
102 #include <kern/sched_prim.h> /* thread_block() */
103
104 #include <vm/vm_kern.h>
105 #include <vm/vm_pageout.h>
106
107 #include <sys/kdebug.h>
108
109 #include <libkern/OSAtomic.h>
110 #include <libkern/OSDebug.h>
111 #include <sys/ubc_internal.h>
112
113 #include <sys/sdt.h>
114
115 int bcleanbuf(buf_t bp, boolean_t discard);
116 static int brecover_data(buf_t bp);
117 static boolean_t incore(vnode_t vp, daddr64_t blkno);
118 /* timeout is in msecs */
119 static buf_t getnewbuf(int slpflag, int slptimeo, int *queue);
120 static void bremfree_locked(buf_t bp);
121 static void buf_reassign(buf_t bp, vnode_t newvp);
122 static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo);
123 static int buf_iterprepare(vnode_t vp, struct buflists *, int flags);
124 static void buf_itercomplete(vnode_t vp, struct buflists *, int flags);
125 static boolean_t buffer_cache_gc(int);
126 static buf_t buf_brelse_shadow(buf_t bp);
127 static void buf_free_meta_store(buf_t bp);
128
129 static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy,
130 uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv);
131
132
133 int bdwrite_internal(buf_t, int);
134
135 extern void disk_conditioner_delay(buf_t, int, int, uint64_t);
136
137 /* zone allocated buffer headers */
138 static void bcleanbuf_thread_init(void);
139 static void bcleanbuf_thread(void);
140
141 static ZONE_DEFINE_TYPE(buf_hdr_zone, "buf headers", struct buf, ZC_NONE);
142 static int buf_hdr_count;
143
144
145 /*
146 * Definitions for the buffer hash lists.
147 */
148 #define BUFHASH(dvp, lbn) \
149 (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
150 LIST_HEAD(bufhashhdr, buf) * bufhashtbl, invalhash;
151 u_long bufhash;
152
153 static buf_t incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp);
154
155 /* Definitions for the buffer stats. */
156 struct bufstats bufstats;
157
158 /* Number of delayed write buffers */
159 long nbdwrite = 0;
160 int blaundrycnt = 0;
161 static int boot_nbuf_headers = 0;
162
163 static TAILQ_HEAD(delayqueue, buf) delaybufqueue;
164
165 static TAILQ_HEAD(ioqueue, buf) iobufqueue;
166 static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
167 static int needbuffer;
168 static int need_iobuffer;
169
170 static LCK_GRP_DECLARE(buf_mtx_grp, "buffer cache");
171 static LCK_ATTR_DECLARE(buf_mtx_attr, 0, 0);
172 static LCK_MTX_DECLARE_ATTR(iobuffer_mtxp, &buf_mtx_grp, &buf_mtx_attr);
173 static LCK_MTX_DECLARE_ATTR(buf_mtx, &buf_mtx_grp, &buf_mtx_attr);
174 static LCK_MTX_DECLARE_ATTR(buf_gc_callout, &buf_mtx_grp, &buf_mtx_attr);
175
176 static uint32_t buf_busycount;
177
178 #define FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE 16
179 typedef struct {
180 void (* callout)(int, void *);
181 void *context;
182 } fs_buffer_cache_gc_callout_t;
183
184 fs_buffer_cache_gc_callout_t fs_callouts[FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE] = { {NULL, NULL} };
185
186 static __inline__ int
buf_timestamp(void)187 buf_timestamp(void)
188 {
189 struct timeval t;
190 microuptime(&t);
191 return (int)t.tv_sec;
192 }
193
194 /*
195 * Insq/Remq for the buffer free lists.
196 */
197 #define binsheadfree(bp, dp, whichq) do { \
198 TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
199 } while (0)
200
201 #define binstailfree(bp, dp, whichq) do { \
202 TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
203 } while (0)
204
205 #define BHASHENTCHECK(bp) \
206 if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \
207 panic("%p: b_hash.le_prev is not deadbeef", (bp));
208
209 #define BLISTNONE(bp) \
210 (bp)->b_hash.le_next = (struct buf *)0; \
211 (bp)->b_hash.le_prev = (struct buf **)0xdeadbeef;
212
213 /*
214 * Insq/Remq for the vnode usage lists.
215 */
216 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
217 #define bufremvn(bp) { \
218 LIST_REMOVE(bp, b_vnbufs); \
219 (bp)->b_vnbufs.le_next = NOLIST; \
220 }
221
222 /*
223 * Time in seconds before a buffer on a list is
224 * considered as a stale buffer
225 */
226 #define LRU_IS_STALE 120 /* default value for the LRU */
227 #define AGE_IS_STALE 60 /* default value for the AGE */
228 #define META_IS_STALE 180 /* default value for the BQ_META */
229
230 int lru_is_stale = LRU_IS_STALE;
231 int age_is_stale = AGE_IS_STALE;
232 int meta_is_stale = META_IS_STALE;
233
234 #define MAXLAUNDRY 10
235
236 /* LIST_INSERT_HEAD() with assertions */
237 static __inline__ void
blistenterhead(struct bufhashhdr * head,buf_t bp)238 blistenterhead(struct bufhashhdr * head, buf_t bp)
239 {
240 if ((bp->b_hash.le_next = (head)->lh_first) != NULL) {
241 (head)->lh_first->b_hash.le_prev = &(bp)->b_hash.le_next;
242 }
243 (head)->lh_first = bp;
244 bp->b_hash.le_prev = &(head)->lh_first;
245 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) {
246 panic("blistenterhead: le_prev is deadbeef");
247 }
248 }
249
250 static __inline__ void
binshash(buf_t bp,struct bufhashhdr * dp)251 binshash(buf_t bp, struct bufhashhdr *dp)
252 {
253 #if DIAGNOSTIC
254 buf_t nbp;
255 #endif /* DIAGNOSTIC */
256
257 BHASHENTCHECK(bp);
258
259 #if DIAGNOSTIC
260 nbp = dp->lh_first;
261 for (; nbp != NULL; nbp = nbp->b_hash.le_next) {
262 if (nbp == bp) {
263 panic("buf already in hashlist");
264 }
265 }
266 #endif /* DIAGNOSTIC */
267
268 blistenterhead(dp, bp);
269 }
270
271 static __inline__ void
bremhash(buf_t bp)272 bremhash(buf_t bp)
273 {
274 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) {
275 panic("bremhash le_prev is deadbeef");
276 }
277 if (bp->b_hash.le_next == bp) {
278 panic("bremhash: next points to self");
279 }
280
281 if (bp->b_hash.le_next != NULL) {
282 bp->b_hash.le_next->b_hash.le_prev = bp->b_hash.le_prev;
283 }
284 *bp->b_hash.le_prev = (bp)->b_hash.le_next;
285 }
286
287 /*
288 * buf_mtx held.
289 */
290 static __inline__ void
bmovelaundry(buf_t bp)291 bmovelaundry(buf_t bp)
292 {
293 bp->b_whichq = BQ_LAUNDRY;
294 bp->b_timestamp = buf_timestamp();
295 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
296 blaundrycnt++;
297 }
298
299 static __inline__ void
buf_release_credentials(buf_t bp)300 buf_release_credentials(buf_t bp)
301 {
302 if (IS_VALID_CRED(bp->b_rcred)) {
303 kauth_cred_unref(&bp->b_rcred);
304 }
305 if (IS_VALID_CRED(bp->b_wcred)) {
306 kauth_cred_unref(&bp->b_wcred);
307 }
308 }
309
310
311 int
buf_valid(buf_t bp)312 buf_valid(buf_t bp)
313 {
314 if ((bp->b_flags & (B_DONE | B_DELWRI))) {
315 return 1;
316 }
317 return 0;
318 }
319
320 int
buf_fromcache(buf_t bp)321 buf_fromcache(buf_t bp)
322 {
323 if ((bp->b_flags & B_CACHE)) {
324 return 1;
325 }
326 return 0;
327 }
328
329 void
buf_markinvalid(buf_t bp)330 buf_markinvalid(buf_t bp)
331 {
332 SET(bp->b_flags, B_INVAL);
333 }
334
335 void
buf_markdelayed(buf_t bp)336 buf_markdelayed(buf_t bp)
337 {
338 if (!ISSET(bp->b_flags, B_DELWRI)) {
339 SET(bp->b_flags, B_DELWRI);
340
341 OSAddAtomicLong(1, &nbdwrite);
342 buf_reassign(bp, bp->b_vp);
343 }
344 SET(bp->b_flags, B_DONE);
345 }
346
347 void
buf_markclean(buf_t bp)348 buf_markclean(buf_t bp)
349 {
350 if (ISSET(bp->b_flags, B_DELWRI)) {
351 CLR(bp->b_flags, B_DELWRI);
352
353 OSAddAtomicLong(-1, &nbdwrite);
354 buf_reassign(bp, bp->b_vp);
355 }
356 }
357
358 void
buf_markeintr(buf_t bp)359 buf_markeintr(buf_t bp)
360 {
361 SET(bp->b_flags, B_EINTR);
362 }
363
364
365 void
buf_markaged(buf_t bp)366 buf_markaged(buf_t bp)
367 {
368 SET(bp->b_flags, B_AGE);
369 }
370
371 int
buf_fua(buf_t bp)372 buf_fua(buf_t bp)
373 {
374 if ((bp->b_flags & B_FUA) == B_FUA) {
375 return 1;
376 }
377 return 0;
378 }
379
380 void
buf_markfua(buf_t bp)381 buf_markfua(buf_t bp)
382 {
383 SET(bp->b_flags, B_FUA);
384 }
385
386 #if CONFIG_PROTECT
387 cpx_t
bufattr_cpx(bufattr_t bap)388 bufattr_cpx(bufattr_t bap)
389 {
390 return bap->ba_cpx;
391 }
392
393 void
bufattr_setcpx(bufattr_t bap,cpx_t cpx)394 bufattr_setcpx(bufattr_t bap, cpx_t cpx)
395 {
396 bap->ba_cpx = cpx;
397 }
398
399 void
buf_setcpoff(buf_t bp,uint64_t foffset)400 buf_setcpoff(buf_t bp, uint64_t foffset)
401 {
402 bp->b_attr.ba_cp_file_off = foffset;
403 }
404
405 uint64_t
bufattr_cpoff(bufattr_t bap)406 bufattr_cpoff(bufattr_t bap)
407 {
408 return bap->ba_cp_file_off;
409 }
410
411 void
bufattr_setcpoff(bufattr_t bap,uint64_t foffset)412 bufattr_setcpoff(bufattr_t bap, uint64_t foffset)
413 {
414 bap->ba_cp_file_off = foffset;
415 }
416
417 #else // !CONTECT_PROTECT
418
419 uint64_t
bufattr_cpoff(bufattr_t bap __unused)420 bufattr_cpoff(bufattr_t bap __unused)
421 {
422 return 0;
423 }
424
425 void
bufattr_setcpoff(__unused bufattr_t bap,__unused uint64_t foffset)426 bufattr_setcpoff(__unused bufattr_t bap, __unused uint64_t foffset)
427 {
428 return;
429 }
430
431 struct cpx *
bufattr_cpx(__unused bufattr_t bap)432 bufattr_cpx(__unused bufattr_t bap)
433 {
434 return NULL;
435 }
436
437 void
bufattr_setcpx(__unused bufattr_t bap,__unused struct cpx * cpx)438 bufattr_setcpx(__unused bufattr_t bap, __unused struct cpx *cpx)
439 {
440 }
441
442 #endif /* !CONFIG_PROTECT */
443
444 bufattr_t
bufattr_alloc(void)445 bufattr_alloc(void)
446 {
447 return kalloc_type(struct bufattr, Z_WAITOK | Z_ZERO);
448 }
449
450 void
bufattr_free(bufattr_t bap)451 bufattr_free(bufattr_t bap)
452 {
453 kfree_type(struct bufattr, bap);
454 }
455
456 bufattr_t
bufattr_dup(bufattr_t bap)457 bufattr_dup(bufattr_t bap)
458 {
459 bufattr_t new_bufattr;
460 new_bufattr = kalloc_type(struct bufattr, Z_WAITOK | Z_NOFAIL);
461
462 /* Copy the provided one into the new copy */
463 memcpy(new_bufattr, bap, sizeof(struct bufattr));
464 return new_bufattr;
465 }
466
467 int
bufattr_rawencrypted(bufattr_t bap)468 bufattr_rawencrypted(bufattr_t bap)
469 {
470 if ((bap->ba_flags & BA_RAW_ENCRYPTED_IO)) {
471 return 1;
472 }
473 return 0;
474 }
475
476 int
bufattr_throttled(bufattr_t bap)477 bufattr_throttled(bufattr_t bap)
478 {
479 return GET_BUFATTR_IO_TIER(bap);
480 }
481
482 int
bufattr_passive(bufattr_t bap)483 bufattr_passive(bufattr_t bap)
484 {
485 if ((bap->ba_flags & BA_PASSIVE)) {
486 return 1;
487 }
488 return 0;
489 }
490
491 int
bufattr_nocache(bufattr_t bap)492 bufattr_nocache(bufattr_t bap)
493 {
494 if ((bap->ba_flags & BA_NOCACHE)) {
495 return 1;
496 }
497 return 0;
498 }
499
500 int
bufattr_meta(bufattr_t bap)501 bufattr_meta(bufattr_t bap)
502 {
503 if ((bap->ba_flags & BA_META)) {
504 return 1;
505 }
506 return 0;
507 }
508
509 void
bufattr_markmeta(bufattr_t bap)510 bufattr_markmeta(bufattr_t bap)
511 {
512 SET(bap->ba_flags, BA_META);
513 }
514
515 int
bufattr_delayidlesleep(bufattr_t bap)516 bufattr_delayidlesleep(bufattr_t bap)
517 {
518 if ((bap->ba_flags & BA_DELAYIDLESLEEP)) {
519 return 1;
520 }
521 return 0;
522 }
523
524 bufattr_t
buf_attr(buf_t bp)525 buf_attr(buf_t bp)
526 {
527 return &bp->b_attr;
528 }
529
530 void
buf_markstatic(buf_t bp __unused)531 buf_markstatic(buf_t bp __unused)
532 {
533 SET(bp->b_flags, B_STATICCONTENT);
534 }
535
536 int
buf_static(buf_t bp)537 buf_static(buf_t bp)
538 {
539 if ((bp->b_flags & B_STATICCONTENT)) {
540 return 1;
541 }
542 return 0;
543 }
544
545 void
bufattr_markgreedymode(bufattr_t bap)546 bufattr_markgreedymode(bufattr_t bap)
547 {
548 SET(bap->ba_flags, BA_GREEDY_MODE);
549 }
550
551 int
bufattr_greedymode(bufattr_t bap)552 bufattr_greedymode(bufattr_t bap)
553 {
554 if ((bap->ba_flags & BA_GREEDY_MODE)) {
555 return 1;
556 }
557 return 0;
558 }
559
560 void
bufattr_markisochronous(bufattr_t bap)561 bufattr_markisochronous(bufattr_t bap)
562 {
563 SET(bap->ba_flags, BA_ISOCHRONOUS);
564 }
565
566 int
bufattr_isochronous(bufattr_t bap)567 bufattr_isochronous(bufattr_t bap)
568 {
569 if ((bap->ba_flags & BA_ISOCHRONOUS)) {
570 return 1;
571 }
572 return 0;
573 }
574
575 void
bufattr_markquickcomplete(bufattr_t bap)576 bufattr_markquickcomplete(bufattr_t bap)
577 {
578 SET(bap->ba_flags, BA_QUICK_COMPLETE);
579 }
580
581 int
bufattr_quickcomplete(bufattr_t bap)582 bufattr_quickcomplete(bufattr_t bap)
583 {
584 if ((bap->ba_flags & BA_QUICK_COMPLETE)) {
585 return 1;
586 }
587 return 0;
588 }
589
590 void
bufattr_markioscheduled(bufattr_t bap)591 bufattr_markioscheduled(bufattr_t bap)
592 {
593 SET(bap->ba_flags, BA_IO_SCHEDULED);
594 }
595
596
597 int
bufattr_ioscheduled(bufattr_t bap)598 bufattr_ioscheduled(bufattr_t bap)
599 {
600 if ((bap->ba_flags & BA_IO_SCHEDULED)) {
601 return 1;
602 }
603 return 0;
604 }
605
606 void
bufattr_markexpeditedmeta(bufattr_t bap)607 bufattr_markexpeditedmeta(bufattr_t bap)
608 {
609 SET(bap->ba_flags, BA_EXPEDITED_META_IO);
610 }
611
612 int
bufattr_expeditedmeta(bufattr_t bap)613 bufattr_expeditedmeta(bufattr_t bap)
614 {
615 if ((bap->ba_flags & BA_EXPEDITED_META_IO)) {
616 return 1;
617 }
618 return 0;
619 }
620
621 int
bufattr_willverify(bufattr_t bap)622 bufattr_willverify(bufattr_t bap)
623 {
624 if ((bap->ba_flags & BA_WILL_VERIFY)) {
625 return 1;
626 }
627 return 0;
628 }
629
630 errno_t
buf_error(buf_t bp)631 buf_error(buf_t bp)
632 {
633 return bp->b_error;
634 }
635
636 void
buf_seterror(buf_t bp,errno_t error)637 buf_seterror(buf_t bp, errno_t error)
638 {
639 if ((bp->b_error = error)) {
640 SET(bp->b_flags, B_ERROR);
641 } else {
642 CLR(bp->b_flags, B_ERROR);
643 }
644 }
645
646 void
buf_setflags(buf_t bp,int32_t flags)647 buf_setflags(buf_t bp, int32_t flags)
648 {
649 SET(bp->b_flags, (flags & BUF_X_WRFLAGS));
650 }
651
652 void
buf_clearflags(buf_t bp,int32_t flags)653 buf_clearflags(buf_t bp, int32_t flags)
654 {
655 CLR(bp->b_flags, (flags & BUF_X_WRFLAGS));
656 }
657
658 int32_t
buf_flags(buf_t bp)659 buf_flags(buf_t bp)
660 {
661 return bp->b_flags & BUF_X_RDFLAGS;
662 }
663
664 void
buf_reset(buf_t bp,int32_t io_flags)665 buf_reset(buf_t bp, int32_t io_flags)
666 {
667 CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA));
668 SET(bp->b_flags, (io_flags & (B_ASYNC | B_READ | B_WRITE | B_NOCACHE)));
669
670 bp->b_error = 0;
671 }
672
673 uint32_t
buf_count(buf_t bp)674 buf_count(buf_t bp)
675 {
676 return bp->b_bcount;
677 }
678
679 void
buf_setcount(buf_t bp,uint32_t bcount)680 buf_setcount(buf_t bp, uint32_t bcount)
681 {
682 bp->b_bcount = bcount;
683 }
684
685 uint32_t
buf_size(buf_t bp)686 buf_size(buf_t bp)
687 {
688 return bp->b_bufsize;
689 }
690
691 void
buf_setsize(buf_t bp,uint32_t bufsize)692 buf_setsize(buf_t bp, uint32_t bufsize)
693 {
694 bp->b_bufsize = bufsize;
695 }
696
697 uint32_t
buf_resid(buf_t bp)698 buf_resid(buf_t bp)
699 {
700 return bp->b_resid;
701 }
702
703 void
buf_setresid(buf_t bp,uint32_t resid)704 buf_setresid(buf_t bp, uint32_t resid)
705 {
706 bp->b_resid = resid;
707 }
708
709 uint32_t
buf_dirtyoff(buf_t bp)710 buf_dirtyoff(buf_t bp)
711 {
712 return bp->b_dirtyoff;
713 }
714
715 uint32_t
buf_dirtyend(buf_t bp)716 buf_dirtyend(buf_t bp)
717 {
718 return bp->b_dirtyend;
719 }
720
721 void
buf_setdirtyoff(buf_t bp,uint32_t dirtyoff)722 buf_setdirtyoff(buf_t bp, uint32_t dirtyoff)
723 {
724 bp->b_dirtyoff = dirtyoff;
725 }
726
727 void
buf_setdirtyend(buf_t bp,uint32_t dirtyend)728 buf_setdirtyend(buf_t bp, uint32_t dirtyend)
729 {
730 bp->b_dirtyend = dirtyend;
731 }
732
733 uintptr_t
buf_dataptr(buf_t bp)734 buf_dataptr(buf_t bp)
735 {
736 return bp->b_datap;
737 }
738
739 void
buf_setdataptr(buf_t bp,uintptr_t data)740 buf_setdataptr(buf_t bp, uintptr_t data)
741 {
742 bp->b_datap = data;
743 }
744
745 vnode_t
buf_vnode(buf_t bp)746 buf_vnode(buf_t bp)
747 {
748 return bp->b_vp;
749 }
750
751 void
buf_setvnode(buf_t bp,vnode_t vp)752 buf_setvnode(buf_t bp, vnode_t vp)
753 {
754 bp->b_vp = vp;
755 }
756
757
758 void *
buf_callback(buf_t bp)759 buf_callback(buf_t bp)
760 {
761 if (!(bp->b_flags & B_CALL)) {
762 return (void *) NULL;
763 }
764
765 return (void *)bp->b_iodone;
766 }
767
768
769 errno_t
buf_setcallback(buf_t bp,void (* callback)(buf_t,void *),void * transaction)770 buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction)
771 {
772 assert(!ISSET(bp->b_flags, B_FILTER) && ISSET(bp->b_lflags, BL_BUSY));
773
774 if (callback) {
775 bp->b_flags |= (B_CALL | B_ASYNC);
776 } else {
777 bp->b_flags &= ~B_CALL;
778 }
779 bp->b_transaction = transaction;
780 bp->b_iodone = callback;
781
782 return 0;
783 }
784
785 errno_t
buf_setupl(buf_t bp,upl_t upl,uint32_t offset)786 buf_setupl(buf_t bp, upl_t upl, uint32_t offset)
787 {
788 if (!(bp->b_lflags & BL_IOBUF)) {
789 return EINVAL;
790 }
791
792 if (upl) {
793 bp->b_flags |= B_CLUSTER;
794 } else {
795 bp->b_flags &= ~B_CLUSTER;
796 }
797 bp->b_upl = upl;
798 bp->b_uploffset = offset;
799
800 return 0;
801 }
802
803 buf_t
buf_clone(buf_t bp,int io_offset,int io_size,void (* iodone)(buf_t,void *),void * arg)804 buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg)
805 {
806 buf_t io_bp;
807 int add1, add2;
808
809 if (io_offset < 0 || io_size < 0) {
810 return NULL;
811 }
812
813 if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount) {
814 return NULL;
815 }
816
817 if (bp->b_flags & B_CLUSTER) {
818 if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK)) {
819 return NULL;
820 }
821
822 if (os_add_overflow(io_offset, io_size, &add1) || os_add_overflow(add1, bp->b_uploffset, &add2)) {
823 return NULL;
824 }
825 if ((add2 & PAGE_MASK) && ((uint32_t)add1 < (uint32_t)bp->b_bcount)) {
826 return NULL;
827 }
828 }
829 io_bp = alloc_io_buf(bp->b_vp, 0);
830
831 io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_RAW | B_ASYNC | B_READ | B_FUA);
832
833 if (iodone) {
834 io_bp->b_transaction = arg;
835 io_bp->b_iodone = iodone;
836 io_bp->b_flags |= B_CALL;
837 }
838 if (bp->b_flags & B_CLUSTER) {
839 io_bp->b_upl = bp->b_upl;
840 io_bp->b_uploffset = bp->b_uploffset + io_offset;
841 } else {
842 io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset);
843 }
844 io_bp->b_bcount = io_size;
845
846 return io_bp;
847 }
848
849
850 int
buf_shadow(buf_t bp)851 buf_shadow(buf_t bp)
852 {
853 if (bp->b_lflags & BL_SHADOW) {
854 return 1;
855 }
856 return 0;
857 }
858
859
860 buf_t
buf_create_shadow_priv(buf_t bp,boolean_t force_copy,uintptr_t external_storage,void (* iodone)(buf_t,void *),void * arg)861 buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
862 {
863 return buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1);
864 }
865
866 buf_t
buf_create_shadow(buf_t bp,boolean_t force_copy,uintptr_t external_storage,void (* iodone)(buf_t,void *),void * arg)867 buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
868 {
869 return buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0);
870 }
871
872
873 static buf_t
buf_create_shadow_internal(buf_t bp,boolean_t force_copy,uintptr_t external_storage,void (* iodone)(buf_t,void *),void * arg,int priv)874 buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv)
875 {
876 buf_t io_bp;
877
878 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_START, bp, 0, 0, 0, 0);
879
880 if (!(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) {
881 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, 0, 0, 0, 0);
882 return NULL;
883 }
884 #ifdef BUF_MAKE_PRIVATE
885 if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0) {
886 panic("buf_create_shadow: %p is in the private state (%d, %d)", bp, bp->b_shadow_ref, bp->b_data_ref);
887 }
888 #endif
889 io_bp = alloc_io_buf(bp->b_vp, priv);
890
891 io_bp->b_flags = bp->b_flags & (B_META | B_ZALLOC | B_ASYNC | B_READ | B_FUA);
892 io_bp->b_blkno = bp->b_blkno;
893 io_bp->b_lblkno = bp->b_lblkno;
894 io_bp->b_lblksize = bp->b_lblksize;
895
896 if (iodone) {
897 io_bp->b_transaction = arg;
898 io_bp->b_iodone = iodone;
899 io_bp->b_flags |= B_CALL;
900 }
901 if (force_copy == FALSE) {
902 io_bp->b_bcount = bp->b_bcount;
903 io_bp->b_bufsize = bp->b_bufsize;
904
905 if (external_storage) {
906 io_bp->b_datap = external_storage;
907 #ifdef BUF_MAKE_PRIVATE
908 io_bp->b_data_store = NULL;
909 #endif
910 } else {
911 io_bp->b_datap = bp->b_datap;
912 #ifdef BUF_MAKE_PRIVATE
913 io_bp->b_data_store = bp;
914 #endif
915 }
916 *(buf_t *)(&io_bp->b_orig) = bp;
917
918 lck_mtx_lock_spin(&buf_mtx);
919
920 io_bp->b_lflags |= BL_SHADOW;
921 io_bp->b_shadow = bp->b_shadow;
922 bp->b_shadow = io_bp;
923 bp->b_shadow_ref++;
924
925 #ifdef BUF_MAKE_PRIVATE
926 if (external_storage) {
927 io_bp->b_lflags |= BL_EXTERNAL;
928 } else {
929 bp->b_data_ref++;
930 }
931 #endif
932 lck_mtx_unlock(&buf_mtx);
933 } else {
934 if (external_storage) {
935 #ifdef BUF_MAKE_PRIVATE
936 io_bp->b_lflags |= BL_EXTERNAL;
937 #endif
938 io_bp->b_bcount = bp->b_bcount;
939 io_bp->b_bufsize = bp->b_bufsize;
940 io_bp->b_datap = external_storage;
941 } else {
942 allocbuf(io_bp, bp->b_bcount);
943
944 io_bp->b_lflags |= BL_IOBUF_ALLOC;
945 }
946 bcopy((caddr_t)bp->b_datap, (caddr_t)io_bp->b_datap, bp->b_bcount);
947
948 #ifdef BUF_MAKE_PRIVATE
949 io_bp->b_data_store = NULL;
950 #endif
951 }
952 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, io_bp, 0);
953
954 return io_bp;
955 }
956
957
958 #ifdef BUF_MAKE_PRIVATE
959 errno_t
buf_make_private(buf_t bp)960 buf_make_private(buf_t bp)
961 {
962 buf_t ds_bp;
963 buf_t t_bp;
964 struct buf my_buf;
965
966 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_START, bp, bp->b_shadow_ref, 0, 0, 0);
967
968 if (bp->b_shadow_ref == 0 || bp->b_data_ref == 0 || ISSET(bp->b_lflags, BL_SHADOW)) {
969 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
970 return EINVAL;
971 }
972 my_buf.b_flags = B_META;
973 my_buf.b_datap = (uintptr_t)NULL;
974 allocbuf(&my_buf, bp->b_bcount);
975
976 bcopy((caddr_t)bp->b_datap, (caddr_t)my_buf.b_datap, bp->b_bcount);
977
978 lck_mtx_lock_spin(&buf_mtx);
979
980 for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
981 if (!ISSET(bp->b_lflags, BL_EXTERNAL)) {
982 break;
983 }
984 }
985 ds_bp = t_bp;
986
987 if (ds_bp == NULL && bp->b_data_ref) {
988 panic("buf_make_private: b_data_ref != 0 && ds_bp == NULL");
989 }
990
991 if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0)) {
992 panic("buf_make_private: ref_count == 0 && ds_bp != NULL");
993 }
994
995 if (ds_bp == NULL) {
996 lck_mtx_unlock(&buf_mtx);
997
998 buf_free_meta_store(&my_buf);
999
1000 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
1001 return EINVAL;
1002 }
1003 for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
1004 if (!ISSET(t_bp->b_lflags, BL_EXTERNAL)) {
1005 t_bp->b_data_store = ds_bp;
1006 }
1007 }
1008 ds_bp->b_data_ref = bp->b_data_ref;
1009
1010 bp->b_data_ref = 0;
1011 bp->b_datap = my_buf.b_datap;
1012
1013 lck_mtx_unlock(&buf_mtx);
1014
1015 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, 0, 0);
1016 return 0;
1017 }
1018 #endif
1019
1020
1021 void
buf_setfilter(buf_t bp,void (* filter)(buf_t,void *),void * transaction,void (** old_iodone)(buf_t,void *),void ** old_transaction)1022 buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction,
1023 void(**old_iodone)(buf_t, void *), void **old_transaction)
1024 {
1025 assert(ISSET(bp->b_lflags, BL_BUSY));
1026
1027 if (old_iodone) {
1028 *old_iodone = bp->b_iodone;
1029 }
1030 if (old_transaction) {
1031 *old_transaction = bp->b_transaction;
1032 }
1033
1034 bp->b_transaction = transaction;
1035 bp->b_iodone = filter;
1036 if (filter) {
1037 bp->b_flags |= B_FILTER;
1038 } else {
1039 bp->b_flags &= ~B_FILTER;
1040 }
1041 }
1042
1043
1044 daddr64_t
buf_blkno(buf_t bp)1045 buf_blkno(buf_t bp)
1046 {
1047 return bp->b_blkno;
1048 }
1049
1050 daddr64_t
buf_lblkno(buf_t bp)1051 buf_lblkno(buf_t bp)
1052 {
1053 return bp->b_lblkno;
1054 }
1055
1056 uint32_t
buf_lblksize(buf_t bp)1057 buf_lblksize(buf_t bp)
1058 {
1059 return bp->b_lblksize;
1060 }
1061
1062 void
buf_setblkno(buf_t bp,daddr64_t blkno)1063 buf_setblkno(buf_t bp, daddr64_t blkno)
1064 {
1065 bp->b_blkno = blkno;
1066 }
1067
1068 void
buf_setlblkno(buf_t bp,daddr64_t lblkno)1069 buf_setlblkno(buf_t bp, daddr64_t lblkno)
1070 {
1071 bp->b_lblkno = lblkno;
1072 }
1073
1074 void
buf_setlblksize(buf_t bp,uint32_t lblksize)1075 buf_setlblksize(buf_t bp, uint32_t lblksize)
1076 {
1077 bp->b_lblksize = lblksize;
1078 }
1079
1080 dev_t
buf_device(buf_t bp)1081 buf_device(buf_t bp)
1082 {
1083 return bp->b_dev;
1084 }
1085
1086 errno_t
buf_setdevice(buf_t bp,vnode_t vp)1087 buf_setdevice(buf_t bp, vnode_t vp)
1088 {
1089 if ((vp->v_type != VBLK) && (vp->v_type != VCHR)) {
1090 return EINVAL;
1091 }
1092 bp->b_dev = vp->v_rdev;
1093
1094 return 0;
1095 }
1096
1097
1098 void *
buf_drvdata(buf_t bp)1099 buf_drvdata(buf_t bp)
1100 {
1101 return bp->b_drvdata;
1102 }
1103
1104 void
buf_setdrvdata(buf_t bp,void * drvdata)1105 buf_setdrvdata(buf_t bp, void *drvdata)
1106 {
1107 bp->b_drvdata = drvdata;
1108 }
1109
1110 void *
buf_fsprivate(buf_t bp)1111 buf_fsprivate(buf_t bp)
1112 {
1113 return bp->b_fsprivate;
1114 }
1115
1116 void
buf_setfsprivate(buf_t bp,void * fsprivate)1117 buf_setfsprivate(buf_t bp, void *fsprivate)
1118 {
1119 bp->b_fsprivate = fsprivate;
1120 }
1121
1122 kauth_cred_t
buf_rcred(buf_t bp)1123 buf_rcred(buf_t bp)
1124 {
1125 return bp->b_rcred;
1126 }
1127
1128 kauth_cred_t
buf_wcred(buf_t bp)1129 buf_wcred(buf_t bp)
1130 {
1131 return bp->b_wcred;
1132 }
1133
1134 void *
buf_upl(buf_t bp)1135 buf_upl(buf_t bp)
1136 {
1137 return bp->b_upl;
1138 }
1139
1140 uint32_t
buf_uploffset(buf_t bp)1141 buf_uploffset(buf_t bp)
1142 {
1143 return (uint32_t)(bp->b_uploffset);
1144 }
1145
1146 proc_t
buf_proc(buf_t bp)1147 buf_proc(buf_t bp)
1148 {
1149 return bp->b_proc;
1150 }
1151
1152
1153 static errno_t
buf_map_range_internal(buf_t bp,caddr_t * io_addr,boolean_t legacymode)1154 buf_map_range_internal(buf_t bp, caddr_t *io_addr, boolean_t legacymode)
1155 {
1156 buf_t real_bp;
1157 vm_offset_t vaddr;
1158 kern_return_t kret;
1159
1160 if (!(bp->b_flags & B_CLUSTER)) {
1161 *io_addr = (caddr_t)bp->b_datap;
1162 return 0;
1163 }
1164 real_bp = (buf_t)(bp->b_real_bp);
1165
1166 if (real_bp && real_bp->b_datap) {
1167 /*
1168 * b_real_bp is only valid if B_CLUSTER is SET
1169 * if it's non-zero, than someone did a cluster_bp call
1170 * if the backing physical pages were already mapped
1171 * in before the call to cluster_bp (non-zero b_datap),
1172 * than we just use that mapping
1173 */
1174 *io_addr = (caddr_t)real_bp->b_datap;
1175 return 0;
1176 }
1177
1178 if (legacymode) {
1179 kret = ubc_upl_map(bp->b_upl, &vaddr); /* Map it in */
1180 if (kret == KERN_SUCCESS) {
1181 vaddr += bp->b_uploffset;
1182 }
1183 } else {
1184 kret = ubc_upl_map_range(bp->b_upl, bp->b_uploffset, bp->b_bcount, VM_PROT_DEFAULT, &vaddr); /* Map it in */
1185 }
1186
1187 if (kret != KERN_SUCCESS) {
1188 *io_addr = NULL;
1189
1190 return ENOMEM;
1191 }
1192
1193 *io_addr = (caddr_t)vaddr;
1194
1195 return 0;
1196 }
1197
1198 errno_t
buf_map_range(buf_t bp,caddr_t * io_addr)1199 buf_map_range(buf_t bp, caddr_t *io_addr)
1200 {
1201 return buf_map_range_internal(bp, io_addr, false);
1202 }
1203
1204 errno_t
buf_map(buf_t bp,caddr_t * io_addr)1205 buf_map(buf_t bp, caddr_t *io_addr)
1206 {
1207 return buf_map_range_internal(bp, io_addr, true);
1208 }
1209
1210 static errno_t
buf_unmap_range_internal(buf_t bp,boolean_t legacymode)1211 buf_unmap_range_internal(buf_t bp, boolean_t legacymode)
1212 {
1213 buf_t real_bp;
1214 kern_return_t kret;
1215
1216 if (!(bp->b_flags & B_CLUSTER)) {
1217 return 0;
1218 }
1219 /*
1220 * see buf_map for the explanation
1221 */
1222 real_bp = (buf_t)(bp->b_real_bp);
1223
1224 if (real_bp && real_bp->b_datap) {
1225 return 0;
1226 }
1227
1228 if ((bp->b_lflags & BL_IOBUF) &&
1229 ((bp->b_flags & (B_PAGEIO | B_READ)) != (B_PAGEIO | B_READ))) {
1230 /*
1231 * ignore pageins... the 'right' thing will
1232 * happen due to the way we handle speculative
1233 * clusters...
1234 *
1235 * when we commit these pages, we'll hit
1236 * it with UPL_COMMIT_INACTIVE which
1237 * will clear the reference bit that got
1238 * turned on when we touched the mapping
1239 */
1240 bp->b_flags |= B_AGE;
1241 }
1242
1243 if (legacymode) {
1244 kret = ubc_upl_unmap(bp->b_upl);
1245 } else {
1246 kret = ubc_upl_unmap_range(bp->b_upl, bp->b_uploffset, bp->b_bcount);
1247 }
1248
1249 if (kret != KERN_SUCCESS) {
1250 return EINVAL;
1251 }
1252 return 0;
1253 }
1254
1255 errno_t
buf_unmap_range(buf_t bp)1256 buf_unmap_range(buf_t bp)
1257 {
1258 return buf_unmap_range_internal(bp, false);
1259 }
1260
1261 errno_t
buf_unmap(buf_t bp)1262 buf_unmap(buf_t bp)
1263 {
1264 return buf_unmap_range_internal(bp, true);
1265 }
1266
1267
1268 void
buf_clear(buf_t bp)1269 buf_clear(buf_t bp)
1270 {
1271 caddr_t baddr;
1272
1273 if (buf_map(bp, &baddr) == 0) {
1274 bzero(baddr, bp->b_bcount);
1275 buf_unmap(bp);
1276 }
1277 bp->b_resid = 0;
1278 }
1279
1280 /*
1281 * Read or write a buffer that is not contiguous on disk.
1282 * buffer is marked done/error at the conclusion
1283 */
1284 static int
buf_strategy_fragmented(vnode_t devvp,buf_t bp,off_t f_offset,size_t contig_bytes)1285 buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_bytes)
1286 {
1287 vnode_t vp = buf_vnode(bp);
1288 buf_t io_bp; /* For reading or writing a single block */
1289 int io_direction;
1290 int io_resid;
1291 size_t io_contig_bytes;
1292 daddr64_t io_blkno;
1293 int error = 0;
1294 int bmap_flags;
1295
1296 /*
1297 * save our starting point... the bp was already mapped
1298 * in buf_strategy before we got called
1299 * no sense doing it again.
1300 */
1301 io_blkno = bp->b_blkno;
1302 /*
1303 * Make sure we redo this mapping for the next I/O
1304 * i.e. this can never be a 'permanent' mapping
1305 */
1306 bp->b_blkno = bp->b_lblkno;
1307
1308 /*
1309 * Get an io buffer to do the deblocking
1310 */
1311 io_bp = alloc_io_buf(devvp, 0);
1312
1313 io_bp->b_lblkno = bp->b_lblkno;
1314 io_bp->b_lblksize = bp->b_lblksize;
1315 io_bp->b_datap = bp->b_datap;
1316 io_resid = bp->b_bcount;
1317 io_direction = bp->b_flags & B_READ;
1318 io_contig_bytes = contig_bytes;
1319
1320 if (bp->b_flags & B_READ) {
1321 bmap_flags = VNODE_READ;
1322 } else {
1323 bmap_flags = VNODE_WRITE;
1324 }
1325
1326 for (;;) {
1327 if (io_blkno == -1) {
1328 /*
1329 * this is unexepected, but we'll allow for it
1330 */
1331 bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes);
1332 } else {
1333 io_bp->b_bcount = (uint32_t)io_contig_bytes;
1334 io_bp->b_bufsize = (uint32_t)io_contig_bytes;
1335 io_bp->b_resid = (uint32_t)io_contig_bytes;
1336 io_bp->b_blkno = io_blkno;
1337
1338 buf_reset(io_bp, io_direction);
1339
1340 /*
1341 * Call the device to do the I/O and wait for it. Make sure the appropriate party is charged for write
1342 */
1343
1344 if (!ISSET(bp->b_flags, B_READ)) {
1345 OSAddAtomic(1, &devvp->v_numoutput);
1346 }
1347
1348 if ((error = VNOP_STRATEGY(io_bp))) {
1349 break;
1350 }
1351 if ((error = (int)buf_biowait(io_bp))) {
1352 break;
1353 }
1354 if (io_bp->b_resid) {
1355 io_resid -= (io_contig_bytes - io_bp->b_resid);
1356 break;
1357 }
1358 }
1359 if ((io_resid -= io_contig_bytes) == 0) {
1360 break;
1361 }
1362 f_offset += io_contig_bytes;
1363 io_bp->b_datap += io_contig_bytes;
1364
1365 /*
1366 * Map the current position to a physical block number
1367 */
1368 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL))) {
1369 break;
1370 }
1371 }
1372 buf_free(io_bp);
1373
1374 if (error) {
1375 buf_seterror(bp, error);
1376 }
1377 bp->b_resid = io_resid;
1378 /*
1379 * This I/O is now complete
1380 */
1381 buf_biodone(bp);
1382
1383 return error;
1384 }
1385
1386
1387 /*
1388 * struct vnop_strategy_args {
1389 * struct buf *a_bp;
1390 * } *ap;
1391 */
1392 errno_t
buf_strategy(vnode_t devvp,void * ap)1393 buf_strategy(vnode_t devvp, void *ap)
1394 {
1395 buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp;
1396 vnode_t vp = bp->b_vp;
1397 int bmap_flags;
1398 errno_t error;
1399 #if CONFIG_DTRACE
1400 int dtrace_io_start_flag = 0; /* We only want to trip the io:::start
1401 * probe once, with the true physical
1402 * block in place (b_blkno)
1403 */
1404
1405 #endif
1406
1407 if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK) {
1408 panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK");
1409 }
1410 /*
1411 * associate the physical device with
1412 * with this buf_t even if we don't
1413 * end up issuing the I/O...
1414 */
1415 bp->b_dev = devvp->v_rdev;
1416
1417 if (bp->b_flags & B_READ) {
1418 bmap_flags = VNODE_READ;
1419 } else {
1420 bmap_flags = VNODE_WRITE;
1421 }
1422
1423 if (!(bp->b_flags & B_CLUSTER)) {
1424 if ((bp->b_upl)) {
1425 /*
1426 * we have a UPL associated with this bp
1427 * go through cluster_bp which knows how
1428 * to deal with filesystem block sizes
1429 * that aren't equal to the page size
1430 */
1431 DTRACE_IO1(start, buf_t, bp);
1432 return cluster_bp(bp);
1433 }
1434 if (bp->b_blkno == bp->b_lblkno) {
1435 off_t f_offset;
1436 size_t contig_bytes;
1437
1438 if (bp->b_lblksize && bp->b_lblkno >= 0) {
1439 f_offset = bp->b_lblkno * bp->b_lblksize;
1440 } else if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) {
1441 DTRACE_IO1(start, buf_t, bp);
1442 buf_seterror(bp, error);
1443 buf_biodone(bp);
1444
1445 return error;
1446 }
1447
1448 if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) {
1449 DTRACE_IO1(start, buf_t, bp);
1450 buf_seterror(bp, error);
1451 buf_biodone(bp);
1452
1453 return error;
1454 }
1455
1456 DTRACE_IO1(start, buf_t, bp);
1457 #if CONFIG_DTRACE
1458 dtrace_io_start_flag = 1;
1459 #endif /* CONFIG_DTRACE */
1460
1461 if ((bp->b_blkno == -1) || (contig_bytes == 0)) {
1462 /* Set block number to force biodone later */
1463 bp->b_blkno = -1;
1464 buf_clear(bp);
1465 } else if (contig_bytes < (size_t)bp->b_bcount) {
1466 return buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes);
1467 }
1468 }
1469
1470 #if CONFIG_DTRACE
1471 if (dtrace_io_start_flag == 0) {
1472 DTRACE_IO1(start, buf_t, bp);
1473 dtrace_io_start_flag = 1;
1474 }
1475 #endif /* CONFIG_DTRACE */
1476
1477 if (bp->b_blkno == -1) {
1478 buf_biodone(bp);
1479 return 0;
1480 }
1481 }
1482
1483 #if CONFIG_DTRACE
1484 if (dtrace_io_start_flag == 0) {
1485 DTRACE_IO1(start, buf_t, bp);
1486 }
1487 #endif /* CONFIG_DTRACE */
1488
1489 #if CONFIG_PROTECT
1490 /* Capture f_offset in the bufattr*/
1491 cpx_t cpx = bufattr_cpx(buf_attr(bp));
1492 if (cpx) {
1493 /* No need to go here for older EAs */
1494 if (cpx_use_offset_for_iv(cpx) && !cpx_synthetic_offset_for_iv(cpx)) {
1495 off_t f_offset;
1496
1497 /*
1498 * this assert should be changed if cluster_io ever
1499 * changes its logical block size.
1500 */
1501 assert((bp->b_lblksize == CLUSTER_IO_BLOCK_SIZE) || !(bp->b_flags & B_CLUSTER));
1502
1503 if (bp->b_lblksize && bp->b_lblkno >= 0) {
1504 f_offset = bp->b_lblkno * bp->b_lblksize;
1505 } else if ((error = VNOP_BLKTOOFF(bp->b_vp, bp->b_lblkno, &f_offset))) {
1506 return error;
1507 }
1508
1509 /*
1510 * Attach the file offset to this buffer. The
1511 * bufattr attributes will be passed down the stack
1512 * until they reach the storage driver (whether
1513 * IOFlashStorage, ASP, or IONVMe). The driver
1514 * will retain the offset in a local variable when it
1515 * issues its I/Os to the NAND controller.
1516 *
1517 * Note that LwVM may end up splitting this I/O
1518 * into sub-I/Os if it crosses a chunk boundary. In this
1519 * case, LwVM will update this field when it dispatches
1520 * each I/O to IOFlashStorage. But from our perspective
1521 * we have only issued a single I/O.
1522 *
1523 * In the case of APFS we do not bounce through another
1524 * intermediate layer (such as CoreStorage). APFS will
1525 * issue the I/Os directly to the block device / IOMedia
1526 * via buf_strategy on the specfs node.
1527 */
1528 buf_setcpoff(bp, f_offset);
1529 CP_DEBUG((CPDBG_OFFSET_IO | DBG_FUNC_NONE), (uint32_t) f_offset, (uint32_t) bp->b_lblkno, (uint32_t) bp->b_blkno, (uint32_t) bp->b_bcount, 0);
1530 }
1531 }
1532 #endif
1533
1534 /*
1535 * we can issue the I/O because...
1536 * either B_CLUSTER is set which
1537 * means that the I/O is properly set
1538 * up to be a multiple of the page size, or
1539 * we were able to successfully set up the
1540 * physical block mapping
1541 */
1542 error = VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap);
1543 DTRACE_FSINFO(strategy, vnode_t, vp);
1544 return error;
1545 }
1546
1547
1548
1549 buf_t
buf_alloc(vnode_t vp)1550 buf_alloc(vnode_t vp)
1551 {
1552 return alloc_io_buf(vp, is_vm_privileged());
1553 }
1554
1555 void
buf_free(buf_t bp)1556 buf_free(buf_t bp)
1557 {
1558 free_io_buf(bp);
1559 }
1560
1561
1562 /*
1563 * iterate buffers for the specified vp.
1564 * if BUF_SCAN_DIRTY is set, do the dirty list
1565 * if BUF_SCAN_CLEAN is set, do the clean list
1566 * if neither flag is set, default to BUF_SCAN_DIRTY
1567 * if BUF_NOTIFY_BUSY is set, call the callout function using a NULL bp for busy pages
1568 */
1569
1570 struct buf_iterate_info_t {
1571 int flag;
1572 struct buflists *listhead;
1573 };
1574
1575 void
buf_iterate(vnode_t vp,int (* callout)(buf_t,void *),int flags,void * arg)1576 buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg)
1577 {
1578 buf_t bp;
1579 int retval;
1580 struct buflists local_iterblkhd;
1581 int lock_flags = BAC_NOWAIT | BAC_REMOVE;
1582 int notify_busy = flags & BUF_NOTIFY_BUSY;
1583 struct buf_iterate_info_t list[2];
1584 int num_lists, i;
1585
1586 if (flags & BUF_SKIP_LOCKED) {
1587 lock_flags |= BAC_SKIP_LOCKED;
1588 }
1589 if (flags & BUF_SKIP_NONLOCKED) {
1590 lock_flags |= BAC_SKIP_NONLOCKED;
1591 }
1592
1593 if (!(flags & (BUF_SCAN_DIRTY | BUF_SCAN_CLEAN))) {
1594 flags |= BUF_SCAN_DIRTY;
1595 }
1596
1597 num_lists = 0;
1598
1599 if (flags & BUF_SCAN_DIRTY) {
1600 list[num_lists].flag = VBI_DIRTY;
1601 list[num_lists].listhead = &vp->v_dirtyblkhd;
1602 num_lists++;
1603 }
1604 if (flags & BUF_SCAN_CLEAN) {
1605 list[num_lists].flag = VBI_CLEAN;
1606 list[num_lists].listhead = &vp->v_cleanblkhd;
1607 num_lists++;
1608 }
1609
1610 for (i = 0; i < num_lists; i++) {
1611 lck_mtx_lock(&buf_mtx);
1612
1613 if (buf_iterprepare(vp, &local_iterblkhd, list[i].flag)) {
1614 lck_mtx_unlock(&buf_mtx);
1615 continue;
1616 }
1617 while (!LIST_EMPTY(&local_iterblkhd)) {
1618 bp = LIST_FIRST(&local_iterblkhd);
1619 LIST_REMOVE(bp, b_vnbufs);
1620 LIST_INSERT_HEAD(list[i].listhead, bp, b_vnbufs);
1621
1622 if (buf_acquire_locked(bp, lock_flags, 0, 0)) {
1623 if (notify_busy) {
1624 bp = NULL;
1625 } else {
1626 continue;
1627 }
1628 }
1629
1630 lck_mtx_unlock(&buf_mtx);
1631
1632 retval = callout(bp, arg);
1633
1634 switch (retval) {
1635 case BUF_RETURNED:
1636 if (bp) {
1637 buf_brelse(bp);
1638 }
1639 break;
1640 case BUF_CLAIMED:
1641 break;
1642 case BUF_RETURNED_DONE:
1643 if (bp) {
1644 buf_brelse(bp);
1645 }
1646 lck_mtx_lock(&buf_mtx);
1647 goto out;
1648 case BUF_CLAIMED_DONE:
1649 lck_mtx_lock(&buf_mtx);
1650 goto out;
1651 }
1652 lck_mtx_lock(&buf_mtx);
1653 } /* while list has more nodes */
1654 out:
1655 buf_itercomplete(vp, &local_iterblkhd, list[i].flag);
1656 lck_mtx_unlock(&buf_mtx);
1657 } /* for each list */
1658 } /* buf_iterate */
1659
1660
1661 /*
1662 * Flush out and invalidate all buffers associated with a vnode.
1663 */
1664 int
buf_invalidateblks(vnode_t vp,int flags,int slpflag,int slptimeo)1665 buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo)
1666 {
1667 buf_t bp;
1668 int aflags;
1669 int error = 0;
1670 int must_rescan = 1;
1671 struct buflists local_iterblkhd;
1672
1673
1674 if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) {
1675 return 0;
1676 }
1677
1678 lck_mtx_lock(&buf_mtx);
1679
1680 for (;;) {
1681 if (must_rescan == 0) {
1682 /*
1683 * the lists may not be empty, but all that's left at this
1684 * point are metadata or B_LOCKED buffers which are being
1685 * skipped... we know this because we made it through both
1686 * the clean and dirty lists without dropping buf_mtx...
1687 * each time we drop buf_mtx we bump "must_rescan"
1688 */
1689 break;
1690 }
1691 if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) {
1692 break;
1693 }
1694 must_rescan = 0;
1695 /*
1696 * iterate the clean list
1697 */
1698 if (buf_iterprepare(vp, &local_iterblkhd, VBI_CLEAN)) {
1699 goto try_dirty_list;
1700 }
1701 while (!LIST_EMPTY(&local_iterblkhd)) {
1702 bp = LIST_FIRST(&local_iterblkhd);
1703
1704 LIST_REMOVE(bp, b_vnbufs);
1705 LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
1706
1707 /*
1708 * some filesystems distinguish meta data blocks with a negative logical block #
1709 */
1710 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) {
1711 continue;
1712 }
1713
1714 aflags = BAC_REMOVE;
1715
1716 if (!(flags & BUF_INVALIDATE_LOCKED)) {
1717 aflags |= BAC_SKIP_LOCKED;
1718 }
1719
1720 if ((error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo))) {
1721 if (error == EDEADLK) {
1722 /*
1723 * this buffer was marked B_LOCKED...
1724 * we didn't drop buf_mtx, so we
1725 * we don't need to rescan
1726 */
1727 continue;
1728 }
1729 if (error == EAGAIN) {
1730 /*
1731 * found a busy buffer... we blocked and
1732 * dropped buf_mtx, so we're going to
1733 * need to rescan after this pass is completed
1734 */
1735 must_rescan++;
1736 continue;
1737 }
1738 /*
1739 * got some kind of 'real' error out of the msleep
1740 * in buf_acquire_locked, terminate the scan and return the error
1741 */
1742 buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1743
1744 lck_mtx_unlock(&buf_mtx);
1745 return error;
1746 }
1747 lck_mtx_unlock(&buf_mtx);
1748
1749 if (bp->b_flags & B_LOCKED) {
1750 KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 0, 0);
1751 }
1752
1753 CLR(bp->b_flags, B_LOCKED);
1754 SET(bp->b_flags, B_INVAL);
1755 buf_brelse(bp);
1756
1757 lck_mtx_lock(&buf_mtx);
1758
1759 /*
1760 * by dropping buf_mtx, we allow new
1761 * buffers to be added to the vnode list(s)
1762 * we'll have to rescan at least once more
1763 * if the queues aren't empty
1764 */
1765 must_rescan++;
1766 }
1767 buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1768
1769 try_dirty_list:
1770 /*
1771 * Now iterate on dirty blks
1772 */
1773 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY)) {
1774 continue;
1775 }
1776 while (!LIST_EMPTY(&local_iterblkhd)) {
1777 bp = LIST_FIRST(&local_iterblkhd);
1778
1779 LIST_REMOVE(bp, b_vnbufs);
1780 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1781
1782 /*
1783 * some filesystems distinguish meta data blocks with a negative logical block #
1784 */
1785 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) {
1786 continue;
1787 }
1788
1789 aflags = BAC_REMOVE;
1790
1791 if (!(flags & BUF_INVALIDATE_LOCKED)) {
1792 aflags |= BAC_SKIP_LOCKED;
1793 }
1794
1795 if ((error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo))) {
1796 if (error == EDEADLK) {
1797 /*
1798 * this buffer was marked B_LOCKED...
1799 * we didn't drop buf_mtx, so we
1800 * we don't need to rescan
1801 */
1802 continue;
1803 }
1804 if (error == EAGAIN) {
1805 /*
1806 * found a busy buffer... we blocked and
1807 * dropped buf_mtx, so we're going to
1808 * need to rescan after this pass is completed
1809 */
1810 must_rescan++;
1811 continue;
1812 }
1813 /*
1814 * got some kind of 'real' error out of the msleep
1815 * in buf_acquire_locked, terminate the scan and return the error
1816 */
1817 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1818
1819 lck_mtx_unlock(&buf_mtx);
1820 return error;
1821 }
1822 lck_mtx_unlock(&buf_mtx);
1823
1824 if (bp->b_flags & B_LOCKED) {
1825 KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 1, 0);
1826 }
1827
1828 CLR(bp->b_flags, B_LOCKED);
1829 SET(bp->b_flags, B_INVAL);
1830
1831 if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA)) {
1832 (void) VNOP_BWRITE(bp);
1833 } else {
1834 buf_brelse(bp);
1835 }
1836
1837 lck_mtx_lock(&buf_mtx);
1838 /*
1839 * by dropping buf_mtx, we allow new
1840 * buffers to be added to the vnode list(s)
1841 * we'll have to rescan at least once more
1842 * if the queues aren't empty
1843 */
1844 must_rescan++;
1845 }
1846 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1847 }
1848 lck_mtx_unlock(&buf_mtx);
1849
1850 return 0;
1851 }
1852
1853 void
buf_flushdirtyblks(vnode_t vp,int wait,int flags,const char * msg)1854 buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg)
1855 {
1856 (void) buf_flushdirtyblks_skipinfo(vp, wait, flags, msg);
1857 return;
1858 }
1859
1860 int
buf_flushdirtyblks_skipinfo(vnode_t vp,int wait,int flags,const char * msg)1861 buf_flushdirtyblks_skipinfo(vnode_t vp, int wait, int flags, const char *msg)
1862 {
1863 buf_t bp;
1864 int writes_issued = 0;
1865 errno_t error;
1866 int busy = 0;
1867 struct buflists local_iterblkhd;
1868 int lock_flags = BAC_NOWAIT | BAC_REMOVE;
1869 int any_locked = 0;
1870
1871 if (flags & BUF_SKIP_LOCKED) {
1872 lock_flags |= BAC_SKIP_LOCKED;
1873 }
1874 if (flags & BUF_SKIP_NONLOCKED) {
1875 lock_flags |= BAC_SKIP_NONLOCKED;
1876 }
1877 loop:
1878 lck_mtx_lock(&buf_mtx);
1879
1880 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) {
1881 while (!LIST_EMPTY(&local_iterblkhd)) {
1882 bp = LIST_FIRST(&local_iterblkhd);
1883 LIST_REMOVE(bp, b_vnbufs);
1884 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1885
1886 if ((error = buf_acquire_locked(bp, lock_flags, 0, 0)) == EBUSY) {
1887 busy++;
1888 }
1889 if (error) {
1890 /*
1891 * If we passed in BUF_SKIP_LOCKED or BUF_SKIP_NONLOCKED,
1892 * we may want to do somethign differently if a locked or unlocked
1893 * buffer was encountered (depending on the arg specified).
1894 * In this case, we know that one of those two was set, and the
1895 * buf acquisition failed above.
1896 *
1897 * If it failed with EDEADLK, then save state which can be emitted
1898 * later on to the caller. Most callers should not care.
1899 */
1900 if (error == EDEADLK) {
1901 any_locked++;
1902 }
1903 continue;
1904 }
1905 lck_mtx_unlock(&buf_mtx);
1906
1907 bp->b_flags &= ~B_LOCKED;
1908
1909 /*
1910 * Wait for I/O associated with indirect blocks to complete,
1911 * since there is no way to quickly wait for them below.
1912 */
1913 if ((bp->b_vp == vp) || (wait == 0)) {
1914 (void) buf_bawrite(bp);
1915 } else {
1916 (void) VNOP_BWRITE(bp);
1917 }
1918 writes_issued++;
1919
1920 lck_mtx_lock(&buf_mtx);
1921 }
1922 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1923 }
1924 lck_mtx_unlock(&buf_mtx);
1925
1926 if (wait) {
1927 (void)vnode_waitforwrites(vp, 0, 0, 0, msg);
1928
1929 if (vp->v_dirtyblkhd.lh_first && busy) {
1930 /*
1931 * we had one or more BUSY buffers on
1932 * the dirtyblock list... most likely
1933 * these are due to delayed writes that
1934 * were moved to the bclean queue but
1935 * have not yet been 'written'.
1936 * if we issued some writes on the
1937 * previous pass, we try again immediately
1938 * if we didn't, we'll sleep for some time
1939 * to allow the state to change...
1940 */
1941 if (writes_issued == 0) {
1942 (void)tsleep((caddr_t)&vp->v_numoutput,
1943 PRIBIO + 1, "vnode_flushdirtyblks", hz / 20);
1944 }
1945 writes_issued = 0;
1946 busy = 0;
1947
1948 goto loop;
1949 }
1950 }
1951
1952 return any_locked;
1953 }
1954
1955
1956 /*
1957 * called with buf_mtx held...
1958 * this lock protects the queue manipulation
1959 */
1960 static int
buf_iterprepare(vnode_t vp,struct buflists * iterheadp,int flags)1961 buf_iterprepare(vnode_t vp, struct buflists *iterheadp, int flags)
1962 {
1963 struct buflists * listheadp;
1964
1965 if (flags & VBI_DIRTY) {
1966 listheadp = &vp->v_dirtyblkhd;
1967 } else {
1968 listheadp = &vp->v_cleanblkhd;
1969 }
1970
1971 while (vp->v_iterblkflags & VBI_ITER) {
1972 vp->v_iterblkflags |= VBI_ITERWANT;
1973 msleep(&vp->v_iterblkflags, &buf_mtx, 0, "buf_iterprepare", NULL);
1974 }
1975 if (LIST_EMPTY(listheadp)) {
1976 LIST_INIT(iterheadp);
1977 return EINVAL;
1978 }
1979 vp->v_iterblkflags |= VBI_ITER;
1980
1981 iterheadp->lh_first = listheadp->lh_first;
1982 listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first;
1983 LIST_INIT(listheadp);
1984
1985 return 0;
1986 }
1987
1988 /*
1989 * called with buf_mtx held...
1990 * this lock protects the queue manipulation
1991 */
1992 static void
buf_itercomplete(vnode_t vp,struct buflists * iterheadp,int flags)1993 buf_itercomplete(vnode_t vp, struct buflists *iterheadp, int flags)
1994 {
1995 struct buflists * listheadp;
1996 buf_t bp;
1997
1998 if (flags & VBI_DIRTY) {
1999 listheadp = &vp->v_dirtyblkhd;
2000 } else {
2001 listheadp = &vp->v_cleanblkhd;
2002 }
2003
2004 while (!LIST_EMPTY(iterheadp)) {
2005 bp = LIST_FIRST(iterheadp);
2006 LIST_REMOVE(bp, b_vnbufs);
2007 LIST_INSERT_HEAD(listheadp, bp, b_vnbufs);
2008 }
2009 vp->v_iterblkflags &= ~VBI_ITER;
2010
2011 if (vp->v_iterblkflags & VBI_ITERWANT) {
2012 vp->v_iterblkflags &= ~VBI_ITERWANT;
2013 wakeup(&vp->v_iterblkflags);
2014 }
2015 }
2016
2017
2018 static void
bremfree_locked(buf_t bp)2019 bremfree_locked(buf_t bp)
2020 {
2021 struct bqueues *dp = NULL;
2022 int whichq;
2023
2024 whichq = bp->b_whichq;
2025
2026 if (whichq == -1) {
2027 if (bp->b_shadow_ref == 0) {
2028 panic("bremfree_locked: %p not on freelist", bp);
2029 }
2030 /*
2031 * there are clones pointing to 'bp'...
2032 * therefore, it was not put on a freelist
2033 * when buf_brelse was last called on 'bp'
2034 */
2035 return;
2036 }
2037 /*
2038 * We only calculate the head of the freelist when removing
2039 * the last element of the list as that is the only time that
2040 * it is needed (e.g. to reset the tail pointer).
2041 *
2042 * NB: This makes an assumption about how tailq's are implemented.
2043 */
2044 if (bp->b_freelist.tqe_next == NULL) {
2045 dp = &bufqueues[whichq];
2046
2047 if (dp->tqh_last != &bp->b_freelist.tqe_next) {
2048 panic("bremfree: lost tail");
2049 }
2050 }
2051 TAILQ_REMOVE(dp, bp, b_freelist);
2052
2053 if (whichq == BQ_LAUNDRY) {
2054 blaundrycnt--;
2055 }
2056
2057 bp->b_whichq = -1;
2058 bp->b_timestamp = 0;
2059 bp->b_shadow = 0;
2060 }
2061
2062 /*
2063 * Associate a buffer with a vnode.
2064 * buf_mtx must be locked on entry
2065 */
2066 static void
bgetvp_locked(vnode_t vp,buf_t bp)2067 bgetvp_locked(vnode_t vp, buf_t bp)
2068 {
2069 if (bp->b_vp != vp) {
2070 panic("bgetvp_locked: not free");
2071 }
2072
2073 if (vp->v_type == VBLK || vp->v_type == VCHR) {
2074 bp->b_dev = vp->v_rdev;
2075 } else {
2076 bp->b_dev = NODEV;
2077 }
2078 /*
2079 * Insert onto list for new vnode.
2080 */
2081 bufinsvn(bp, &vp->v_cleanblkhd);
2082 }
2083
2084 /*
2085 * Disassociate a buffer from a vnode.
2086 * buf_mtx must be locked on entry
2087 */
2088 static void
brelvp_locked(buf_t bp)2089 brelvp_locked(buf_t bp)
2090 {
2091 /*
2092 * Delete from old vnode list, if on one.
2093 */
2094 if (bp->b_vnbufs.le_next != NOLIST) {
2095 bufremvn(bp);
2096 }
2097
2098 bp->b_vp = (vnode_t)NULL;
2099 }
2100
2101 /*
2102 * Reassign a buffer from one vnode to another.
2103 * Used to assign file specific control information
2104 * (indirect blocks) to the vnode to which they belong.
2105 */
2106 static void
buf_reassign(buf_t bp,vnode_t newvp)2107 buf_reassign(buf_t bp, vnode_t newvp)
2108 {
2109 struct buflists *listheadp;
2110
2111 if (newvp == NULL) {
2112 printf("buf_reassign: NULL");
2113 return;
2114 }
2115 lck_mtx_lock_spin(&buf_mtx);
2116
2117 /*
2118 * Delete from old vnode list, if on one.
2119 */
2120 if (bp->b_vnbufs.le_next != NOLIST) {
2121 bufremvn(bp);
2122 }
2123 /*
2124 * If dirty, put on list of dirty buffers;
2125 * otherwise insert onto list of clean buffers.
2126 */
2127 if (ISSET(bp->b_flags, B_DELWRI)) {
2128 listheadp = &newvp->v_dirtyblkhd;
2129 } else {
2130 listheadp = &newvp->v_cleanblkhd;
2131 }
2132 bufinsvn(bp, listheadp);
2133
2134 lck_mtx_unlock(&buf_mtx);
2135 }
2136
2137 static __inline__ void
bufhdrinit(buf_t bp)2138 bufhdrinit(buf_t bp)
2139 {
2140 bzero((char *)bp, sizeof *bp);
2141 bp->b_dev = NODEV;
2142 bp->b_rcred = NOCRED;
2143 bp->b_wcred = NOCRED;
2144 bp->b_vnbufs.le_next = NOLIST;
2145 bp->b_flags = B_INVAL;
2146
2147 return;
2148 }
2149
2150 /*
2151 * Initialize buffers and hash links for buffers.
2152 */
2153 __private_extern__ void
bufinit(void)2154 bufinit(void)
2155 {
2156 buf_t bp;
2157 struct bqueues *dp;
2158 int i;
2159
2160 nbuf_headers = 0;
2161 /* Initialize the buffer queues ('freelists') and the hash table */
2162 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) {
2163 TAILQ_INIT(dp);
2164 }
2165 bufhashtbl = hashinit(nbuf_hashelements, M_CACHE, &bufhash);
2166
2167 buf_busycount = 0;
2168
2169 /* Initialize the buffer headers */
2170 for (i = 0; i < max_nbuf_headers; i++) {
2171 nbuf_headers++;
2172 bp = &buf_headers[i];
2173 bufhdrinit(bp);
2174
2175 BLISTNONE(bp);
2176 dp = &bufqueues[BQ_EMPTY];
2177 bp->b_whichq = BQ_EMPTY;
2178 bp->b_timestamp = buf_timestamp();
2179 binsheadfree(bp, dp, BQ_EMPTY);
2180 binshash(bp, &invalhash);
2181 }
2182 boot_nbuf_headers = nbuf_headers;
2183
2184 TAILQ_INIT(&iobufqueue);
2185 TAILQ_INIT(&delaybufqueue);
2186
2187 for (; i < nbuf_headers + niobuf_headers; i++) {
2188 bp = &buf_headers[i];
2189 bufhdrinit(bp);
2190 bp->b_whichq = -1;
2191 binsheadfree(bp, &iobufqueue, -1);
2192 }
2193
2194 /*
2195 * allocate and initialize cluster specific global locks...
2196 */
2197 cluster_init();
2198
2199 printf("using %d buffer headers and %d cluster IO buffer headers\n",
2200 nbuf_headers, niobuf_headers);
2201
2202 /* start the bcleanbuf() thread */
2203 bcleanbuf_thread_init();
2204
2205 /* Register a callout for relieving vm pressure */
2206 if (vm_set_buffer_cleanup_callout(buffer_cache_gc) != KERN_SUCCESS) {
2207 panic("Couldn't register buffer cache callout for vm pressure!");
2208 }
2209 }
2210
2211 /*
2212 * Zones for the meta data buffers
2213 */
2214
2215 #define MINMETA 512
2216 #define MAXMETA 16384
2217
2218 KALLOC_HEAP_DEFINE(KHEAP_VFS_BIO, "vfs_bio", KHEAP_ID_DATA_BUFFERS);
2219
2220 static struct buf *
bio_doread(vnode_t vp,daddr64_t blkno,int size,kauth_cred_t cred,int async,int queuetype)2221 bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, int queuetype)
2222 {
2223 buf_t bp;
2224
2225 bp = buf_getblk(vp, blkno, size, 0, 0, queuetype);
2226
2227 /*
2228 * If buffer does not have data valid, start a read.
2229 * Note that if buffer is B_INVAL, buf_getblk() won't return it.
2230 * Therefore, it's valid if it's I/O has completed or been delayed.
2231 */
2232 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
2233 struct proc *p;
2234
2235 p = current_proc();
2236
2237 /* Start I/O for the buffer (keeping credentials). */
2238 SET(bp->b_flags, B_READ | async);
2239 if (IS_VALID_CRED(cred) && !IS_VALID_CRED(bp->b_rcred)) {
2240 kauth_cred_ref(cred);
2241 bp->b_rcred = cred;
2242 }
2243
2244 VNOP_STRATEGY(bp);
2245
2246 trace(TR_BREADMISS, pack(vp, size), blkno);
2247
2248 /* Pay for the read. */
2249 if (p && p->p_stats) {
2250 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock); /* XXX */
2251 }
2252
2253 if (async) {
2254 /*
2255 * since we asked for an ASYNC I/O
2256 * the biodone will do the brelse
2257 * we don't want to pass back a bp
2258 * that we don't 'own'
2259 */
2260 bp = NULL;
2261 }
2262 } else if (async) {
2263 buf_brelse(bp);
2264 bp = NULL;
2265 }
2266
2267 trace(TR_BREADHIT, pack(vp, size), blkno);
2268
2269 return bp;
2270 }
2271
2272 /*
2273 * Perform the reads for buf_breadn() and buf_meta_breadn().
2274 * Trivial modification to the breada algorithm presented in Bach (p.55).
2275 */
2276 static errno_t
do_breadn_for_type(vnode_t vp,daddr64_t blkno,int size,daddr64_t * rablks,int * rasizes,int nrablks,kauth_cred_t cred,buf_t * bpp,int queuetype)2277 do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes,
2278 int nrablks, kauth_cred_t cred, buf_t *bpp, int queuetype)
2279 {
2280 buf_t bp;
2281 int i;
2282
2283 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, queuetype);
2284
2285 /*
2286 * For each of the read-ahead blocks, start a read, if necessary.
2287 */
2288 for (i = 0; i < nrablks; i++) {
2289 /* If it's in the cache, just go on to next one. */
2290 if (incore(vp, rablks[i])) {
2291 continue;
2292 }
2293
2294 /* Get a buffer for the read-ahead block */
2295 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, queuetype);
2296 }
2297
2298 /* Otherwise, we had to start a read for it; wait until it's valid. */
2299 return buf_biowait(bp);
2300 }
2301
2302
2303 /*
2304 * Read a disk block.
2305 * This algorithm described in Bach (p.54).
2306 */
2307 errno_t
buf_bread(vnode_t vp,daddr64_t blkno,int size,kauth_cred_t cred,buf_t * bpp)2308 buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
2309 {
2310 buf_t bp;
2311
2312 /* Get buffer for block. */
2313 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ);
2314
2315 /* Wait for the read to complete, and return result. */
2316 return buf_biowait(bp);
2317 }
2318
2319 /*
2320 * Read a disk block. [bread() for meta-data]
2321 * This algorithm described in Bach (p.54).
2322 */
2323 errno_t
buf_meta_bread(vnode_t vp,daddr64_t blkno,int size,kauth_cred_t cred,buf_t * bpp)2324 buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
2325 {
2326 buf_t bp;
2327
2328 /* Get buffer for block. */
2329 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_META);
2330
2331 /* Wait for the read to complete, and return result. */
2332 return buf_biowait(bp);
2333 }
2334
2335 /*
2336 * Read-ahead multiple disk blocks. The first is sync, the rest async.
2337 */
2338 errno_t
buf_breadn(vnode_t vp,daddr64_t blkno,int size,daddr64_t * rablks,int * rasizes,int nrablks,kauth_cred_t cred,buf_t * bpp)2339 buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
2340 {
2341 return do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ);
2342 }
2343
2344 /*
2345 * Read-ahead multiple disk blocks. The first is sync, the rest async.
2346 * [buf_breadn() for meta-data]
2347 */
2348 errno_t
buf_meta_breadn(vnode_t vp,daddr64_t blkno,int size,daddr64_t * rablks,int * rasizes,int nrablks,kauth_cred_t cred,buf_t * bpp)2349 buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
2350 {
2351 return do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META);
2352 }
2353
2354 /*
2355 * Block write. Described in Bach (p.56)
2356 */
2357 errno_t
buf_bwrite(buf_t bp)2358 buf_bwrite(buf_t bp)
2359 {
2360 int sync, wasdelayed;
2361 errno_t rv;
2362 proc_t p = current_proc();
2363 vnode_t vp = bp->b_vp;
2364
2365 if (bp->b_datap == 0) {
2366 if (brecover_data(bp) == 0) {
2367 return 0;
2368 }
2369 }
2370 /* Remember buffer type, to switch on it later. */
2371 sync = !ISSET(bp->b_flags, B_ASYNC);
2372 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
2373 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
2374
2375 if (wasdelayed) {
2376 OSAddAtomicLong(-1, &nbdwrite);
2377 }
2378
2379 if (!sync) {
2380 /*
2381 * If not synchronous, pay for the I/O operation and make
2382 * sure the buf is on the correct vnode queue. We have
2383 * to do this now, because if we don't, the vnode may not
2384 * be properly notified that its I/O has completed.
2385 */
2386 if (wasdelayed) {
2387 buf_reassign(bp, vp);
2388 } else if (p && p->p_stats) {
2389 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
2390 }
2391 }
2392 trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno);
2393
2394 /* Initiate disk write. Make sure the appropriate party is charged. */
2395
2396 OSAddAtomic(1, &vp->v_numoutput);
2397
2398 VNOP_STRATEGY(bp);
2399
2400 if (sync) {
2401 /*
2402 * If I/O was synchronous, wait for it to complete.
2403 */
2404 rv = buf_biowait(bp);
2405
2406 /*
2407 * Pay for the I/O operation, if it's not been paid for, and
2408 * make sure it's on the correct vnode queue. (async operatings
2409 * were payed for above.)
2410 */
2411 if (wasdelayed) {
2412 buf_reassign(bp, vp);
2413 } else if (p && p->p_stats) {
2414 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
2415 }
2416
2417 /* Release the buffer. */
2418 buf_brelse(bp);
2419
2420 return rv;
2421 } else {
2422 return 0;
2423 }
2424 }
2425
2426 int
vn_bwrite(struct vnop_bwrite_args * ap)2427 vn_bwrite(struct vnop_bwrite_args *ap)
2428 {
2429 return buf_bwrite(ap->a_bp);
2430 }
2431
2432 /*
2433 * Delayed write.
2434 *
2435 * The buffer is marked dirty, but is not queued for I/O.
2436 * This routine should be used when the buffer is expected
2437 * to be modified again soon, typically a small write that
2438 * partially fills a buffer.
2439 *
2440 * NB: magnetic tapes cannot be delayed; they must be
2441 * written in the order that the writes are requested.
2442 *
2443 * Described in Leffler, et al. (pp. 208-213).
2444 *
2445 * Note: With the ability to allocate additional buffer
2446 * headers, we can get in to the situation where "too" many
2447 * buf_bdwrite()s can create situation where the kernel can create
2448 * buffers faster than the disks can service. Doing a buf_bawrite() in
2449 * cases where we have "too many" outstanding buf_bdwrite()s avoids that.
2450 */
2451 int
bdwrite_internal(buf_t bp,int return_error)2452 bdwrite_internal(buf_t bp, int return_error)
2453 {
2454 proc_t p = current_proc();
2455 vnode_t vp = bp->b_vp;
2456
2457 /*
2458 * If the block hasn't been seen before:
2459 * (1) Mark it as having been seen,
2460 * (2) Charge for the write.
2461 * (3) Make sure it's on its vnode's correct block list,
2462 */
2463 if (!ISSET(bp->b_flags, B_DELWRI)) {
2464 SET(bp->b_flags, B_DELWRI);
2465 if (p && p->p_stats) {
2466 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
2467 }
2468 OSAddAtomicLong(1, &nbdwrite);
2469 buf_reassign(bp, vp);
2470 }
2471
2472 /*
2473 * if we're not LOCKED, but the total number of delayed writes
2474 * has climbed above 75% of the total buffers in the system
2475 * return an error if the caller has indicated that it can
2476 * handle one in this case, otherwise schedule the I/O now
2477 * this is done to prevent us from allocating tons of extra
2478 * buffers when dealing with virtual disks (i.e. DiskImages),
2479 * because additional buffers are dynamically allocated to prevent
2480 * deadlocks from occurring
2481 *
2482 * however, can't do a buf_bawrite() if the LOCKED bit is set because the
2483 * buffer is part of a transaction and can't go to disk until
2484 * the LOCKED bit is cleared.
2485 */
2486 if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers / 4) * 3)) {
2487 if (return_error) {
2488 return EAGAIN;
2489 }
2490 /*
2491 * If the vnode has "too many" write operations in progress
2492 * wait for them to finish the IO
2493 */
2494 (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, "buf_bdwrite");
2495
2496 return buf_bawrite(bp);
2497 }
2498
2499 /* Otherwise, the "write" is done, so mark and release the buffer. */
2500 SET(bp->b_flags, B_DONE);
2501 buf_brelse(bp);
2502 return 0;
2503 }
2504
2505 errno_t
buf_bdwrite(buf_t bp)2506 buf_bdwrite(buf_t bp)
2507 {
2508 return bdwrite_internal(bp, 0);
2509 }
2510
2511
2512 /*
2513 * Asynchronous block write; just an asynchronous buf_bwrite().
2514 *
2515 * Note: With the abilitty to allocate additional buffer
2516 * headers, we can get in to the situation where "too" many
2517 * buf_bawrite()s can create situation where the kernel can create
2518 * buffers faster than the disks can service.
2519 * We limit the number of "in flight" writes a vnode can have to
2520 * avoid this.
2521 */
2522 static int
bawrite_internal(buf_t bp,int throttle)2523 bawrite_internal(buf_t bp, int throttle)
2524 {
2525 vnode_t vp = bp->b_vp;
2526
2527 if (vp) {
2528 if (throttle) {
2529 /*
2530 * If the vnode has "too many" write operations in progress
2531 * wait for them to finish the IO
2532 */
2533 (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite");
2534 } else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE) {
2535 /*
2536 * return to the caller and
2537 * let him decide what to do
2538 */
2539 return EWOULDBLOCK;
2540 }
2541 }
2542 SET(bp->b_flags, B_ASYNC);
2543
2544 return VNOP_BWRITE(bp);
2545 }
2546
2547 errno_t
buf_bawrite(buf_t bp)2548 buf_bawrite(buf_t bp)
2549 {
2550 return bawrite_internal(bp, 1);
2551 }
2552
2553
2554
2555 static void
buf_free_meta_store(buf_t bp)2556 buf_free_meta_store(buf_t bp)
2557 {
2558 if (bp->b_bufsize) {
2559 uintptr_t datap = bp->b_datap;
2560 int bufsize = bp->b_bufsize;
2561
2562 bp->b_datap = (uintptr_t)NULL;
2563 bp->b_bufsize = 0;
2564
2565 /*
2566 * Ensure the assignment of b_datap has global visibility
2567 * before we free the region.
2568 */
2569 OSMemoryBarrier();
2570
2571 if (ISSET(bp->b_flags, B_ZALLOC)) {
2572 kheap_free(KHEAP_VFS_BIO, datap, bufsize);
2573 } else {
2574 kmem_free(kernel_map, datap, bufsize);
2575 }
2576 }
2577 }
2578
2579
2580 static buf_t
buf_brelse_shadow(buf_t bp)2581 buf_brelse_shadow(buf_t bp)
2582 {
2583 buf_t bp_head;
2584 buf_t bp_temp;
2585 buf_t bp_return = NULL;
2586 #ifdef BUF_MAKE_PRIVATE
2587 buf_t bp_data;
2588 int data_ref = 0;
2589 #endif
2590 int need_wakeup = 0;
2591
2592 lck_mtx_lock_spin(&buf_mtx);
2593
2594 __IGNORE_WCASTALIGN(bp_head = (buf_t)bp->b_orig);
2595
2596 if (bp_head->b_whichq != -1) {
2597 panic("buf_brelse_shadow: bp_head on freelist %d", bp_head->b_whichq);
2598 }
2599
2600 #ifdef BUF_MAKE_PRIVATE
2601 if (bp_data = bp->b_data_store) {
2602 bp_data->b_data_ref--;
2603 /*
2604 * snapshot the ref count so that we can check it
2605 * outside of the lock... we only want the guy going
2606 * from 1 -> 0 to try and release the storage
2607 */
2608 data_ref = bp_data->b_data_ref;
2609 }
2610 #endif
2611 KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_START, bp, bp_head, bp_head->b_shadow_ref, 0, 0);
2612
2613 bp_head->b_shadow_ref--;
2614
2615 for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow) {
2616 ;
2617 }
2618
2619 if (bp_temp == NULL) {
2620 panic("buf_brelse_shadow: bp not on list %p", bp_head);
2621 }
2622
2623 bp_temp->b_shadow = bp_temp->b_shadow->b_shadow;
2624
2625 #ifdef BUF_MAKE_PRIVATE
2626 /*
2627 * we're about to free the current 'owner' of the data buffer and
2628 * there is at least one other shadow buf_t still pointing at it
2629 * so transfer it to the first shadow buf left in the chain
2630 */
2631 if (bp == bp_data && data_ref) {
2632 if ((bp_data = bp_head->b_shadow) == NULL) {
2633 panic("buf_brelse_shadow: data_ref mismatch bp(%p)", bp);
2634 }
2635
2636 for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow) {
2637 bp_temp->b_data_store = bp_data;
2638 }
2639 bp_data->b_data_ref = data_ref;
2640 }
2641 #endif
2642 if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow) {
2643 panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp);
2644 }
2645 if (bp_head->b_shadow_ref && bp_head->b_shadow == 0) {
2646 panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp);
2647 }
2648
2649 if (bp_head->b_shadow_ref == 0) {
2650 if (!ISSET(bp_head->b_lflags, BL_BUSY)) {
2651 CLR(bp_head->b_flags, B_AGE);
2652 bp_head->b_timestamp = buf_timestamp();
2653
2654 if (ISSET(bp_head->b_flags, B_LOCKED)) {
2655 bp_head->b_whichq = BQ_LOCKED;
2656 binstailfree(bp_head, &bufqueues[BQ_LOCKED], BQ_LOCKED);
2657 } else {
2658 bp_head->b_whichq = BQ_META;
2659 binstailfree(bp_head, &bufqueues[BQ_META], BQ_META);
2660 }
2661 } else if (ISSET(bp_head->b_lflags, BL_WAITSHADOW)) {
2662 CLR(bp_head->b_lflags, BL_WAITSHADOW);
2663
2664 bp_return = bp_head;
2665 }
2666 if (ISSET(bp_head->b_lflags, BL_WANTED_REF)) {
2667 CLR(bp_head->b_lflags, BL_WANTED_REF);
2668 need_wakeup = 1;
2669 }
2670 }
2671 lck_mtx_unlock(&buf_mtx);
2672
2673 if (need_wakeup) {
2674 wakeup(bp_head);
2675 }
2676
2677 #ifdef BUF_MAKE_PRIVATE
2678 if (bp == bp_data && data_ref == 0) {
2679 buf_free_meta_store(bp);
2680 }
2681
2682 bp->b_data_store = NULL;
2683 #endif
2684 KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0);
2685
2686 return bp_return;
2687 }
2688
2689
2690 /*
2691 * Release a buffer on to the free lists.
2692 * Described in Bach (p. 46).
2693 */
2694 void
buf_brelse(buf_t bp)2695 buf_brelse(buf_t bp)
2696 {
2697 struct bqueues *bufq;
2698 int whichq;
2699 upl_t upl;
2700 int need_wakeup = 0;
2701 int need_bp_wakeup = 0;
2702
2703
2704 if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY)) {
2705 panic("buf_brelse: bad buffer = %p", bp);
2706 }
2707
2708 #ifdef JOE_DEBUG
2709 (void) OSBacktrace(&bp->b_stackbrelse[0], 6);
2710
2711 bp->b_lastbrelse = current_thread();
2712 bp->b_tag = 0;
2713 #endif
2714 if (bp->b_lflags & BL_IOBUF) {
2715 buf_t shadow_master_bp = NULL;
2716
2717 if (ISSET(bp->b_lflags, BL_SHADOW)) {
2718 shadow_master_bp = buf_brelse_shadow(bp);
2719 } else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC)) {
2720 buf_free_meta_store(bp);
2721 }
2722 free_io_buf(bp);
2723
2724 if (shadow_master_bp) {
2725 bp = shadow_master_bp;
2726 goto finish_shadow_master;
2727 }
2728 return;
2729 }
2730
2731 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_START,
2732 bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap,
2733 bp->b_flags, 0);
2734
2735 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
2736
2737 /*
2738 * if we're invalidating a buffer that has the B_FILTER bit
2739 * set then call the b_iodone function so it gets cleaned
2740 * up properly.
2741 *
2742 * the HFS journal code depends on this
2743 */
2744 if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) {
2745 if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */
2746 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
2747 void *arg = bp->b_transaction;
2748
2749 CLR(bp->b_flags, B_FILTER); /* but note callout done */
2750 bp->b_iodone = NULL;
2751 bp->b_transaction = NULL;
2752
2753 if (iodone_func == NULL) {
2754 panic("brelse: bp @ %p has NULL b_iodone!", bp);
2755 }
2756 (*iodone_func)(bp, arg);
2757 }
2758 }
2759 /*
2760 * I/O is done. Cleanup the UPL state
2761 */
2762 upl = bp->b_upl;
2763
2764 if (!ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
2765 kern_return_t kret;
2766 int upl_flags;
2767
2768 if (upl == NULL) {
2769 if (!ISSET(bp->b_flags, B_INVAL)) {
2770 kret = ubc_create_upl_kernel(bp->b_vp,
2771 ubc_blktooff(bp->b_vp, bp->b_lblkno),
2772 bp->b_bufsize,
2773 &upl,
2774 NULL,
2775 UPL_PRECIOUS,
2776 VM_KERN_MEMORY_FILE);
2777
2778 if (kret != KERN_SUCCESS) {
2779 panic("brelse: Failed to create UPL");
2780 }
2781 #if UPL_DEBUG
2782 upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 5);
2783 #endif /* UPL_DEBUG */
2784 }
2785 } else {
2786 if (bp->b_datap) {
2787 kret = ubc_upl_unmap(upl);
2788
2789 if (kret != KERN_SUCCESS) {
2790 panic("ubc_upl_unmap failed");
2791 }
2792 bp->b_datap = (uintptr_t)NULL;
2793 }
2794 }
2795 if (upl) {
2796 if (bp->b_flags & (B_ERROR | B_INVAL)) {
2797 if (bp->b_flags & (B_READ | B_INVAL)) {
2798 upl_flags = UPL_ABORT_DUMP_PAGES;
2799 } else {
2800 upl_flags = 0;
2801 }
2802
2803 ubc_upl_abort(upl, upl_flags);
2804 } else {
2805 if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY)) {
2806 upl_flags = UPL_COMMIT_SET_DIRTY;
2807 } else {
2808 upl_flags = UPL_COMMIT_CLEAR_DIRTY;
2809 }
2810
2811 ubc_upl_commit_range(upl, 0, bp->b_bufsize, upl_flags |
2812 UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
2813 }
2814 bp->b_upl = NULL;
2815 }
2816 } else {
2817 if ((upl)) {
2818 panic("brelse: UPL set for non VREG; vp=%p", bp->b_vp);
2819 }
2820 }
2821
2822 /*
2823 * If it's locked, don't report an error; try again later.
2824 */
2825 if (ISSET(bp->b_flags, (B_LOCKED | B_ERROR)) == (B_LOCKED | B_ERROR)) {
2826 CLR(bp->b_flags, B_ERROR);
2827 }
2828 /*
2829 * If it's not cacheable, or an error, mark it invalid.
2830 */
2831 if (ISSET(bp->b_flags, (B_NOCACHE | B_ERROR))) {
2832 SET(bp->b_flags, B_INVAL);
2833 }
2834
2835 if ((bp->b_bufsize <= 0) ||
2836 ISSET(bp->b_flags, B_INVAL) ||
2837 (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) {
2838 boolean_t delayed_buf_free_meta_store = FALSE;
2839
2840 /*
2841 * If it's invalid or empty, dissociate it from its vnode,
2842 * release its storage if B_META, and
2843 * clean it up a bit and put it on the EMPTY queue
2844 */
2845 if (ISSET(bp->b_flags, B_DELWRI)) {
2846 OSAddAtomicLong(-1, &nbdwrite);
2847 }
2848
2849 if (ISSET(bp->b_flags, B_META)) {
2850 if (bp->b_shadow_ref) {
2851 delayed_buf_free_meta_store = TRUE;
2852 } else {
2853 buf_free_meta_store(bp);
2854 }
2855 }
2856 /*
2857 * nuke any credentials we were holding
2858 */
2859 buf_release_credentials(bp);
2860
2861 lck_mtx_lock_spin(&buf_mtx);
2862
2863 if (bp->b_shadow_ref) {
2864 SET(bp->b_lflags, BL_WAITSHADOW);
2865
2866 lck_mtx_unlock(&buf_mtx);
2867
2868 return;
2869 }
2870 if (delayed_buf_free_meta_store == TRUE) {
2871 lck_mtx_unlock(&buf_mtx);
2872 finish_shadow_master:
2873 buf_free_meta_store(bp);
2874
2875 lck_mtx_lock_spin(&buf_mtx);
2876 }
2877 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
2878
2879 if (bp->b_vp) {
2880 brelvp_locked(bp);
2881 }
2882
2883 bremhash(bp);
2884 BLISTNONE(bp);
2885 binshash(bp, &invalhash);
2886
2887 bp->b_whichq = BQ_EMPTY;
2888 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
2889 } else {
2890 /*
2891 * It has valid data. Put it on the end of the appropriate
2892 * queue, so that it'll stick around for as long as possible.
2893 */
2894 if (ISSET(bp->b_flags, B_LOCKED)) {
2895 whichq = BQ_LOCKED; /* locked in core */
2896 } else if (ISSET(bp->b_flags, B_META)) {
2897 whichq = BQ_META; /* meta-data */
2898 } else if (ISSET(bp->b_flags, B_AGE)) {
2899 whichq = BQ_AGE; /* stale but valid data */
2900 } else {
2901 whichq = BQ_LRU; /* valid data */
2902 }
2903 bufq = &bufqueues[whichq];
2904
2905 bp->b_timestamp = buf_timestamp();
2906
2907 lck_mtx_lock_spin(&buf_mtx);
2908
2909 /*
2910 * the buf_brelse_shadow routine doesn't take 'ownership'
2911 * of the parent buf_t... it updates state that is protected by
2912 * the buf_mtx, and checks for BL_BUSY to determine whether to
2913 * put the buf_t back on a free list. b_shadow_ref is protected
2914 * by the lock, and since we have not yet cleared B_BUSY, we need
2915 * to check it while holding the lock to insure that one of us
2916 * puts this buf_t back on a free list when it is safe to do so
2917 */
2918 if (bp->b_shadow_ref == 0) {
2919 CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE));
2920 bp->b_whichq = whichq;
2921 binstailfree(bp, bufq, whichq);
2922 } else {
2923 /*
2924 * there are still cloned buf_t's pointing
2925 * at this guy... need to keep it off the
2926 * freelists until a buf_brelse is done on
2927 * the last clone
2928 */
2929 CLR(bp->b_flags, (B_ASYNC | B_NOCACHE));
2930 }
2931 }
2932 if (needbuffer) {
2933 /*
2934 * needbuffer is a global
2935 * we're currently using buf_mtx to protect it
2936 * delay doing the actual wakeup until after
2937 * we drop buf_mtx
2938 */
2939 needbuffer = 0;
2940 need_wakeup = 1;
2941 }
2942 if (ISSET(bp->b_lflags, BL_WANTED)) {
2943 /*
2944 * delay the actual wakeup until after we
2945 * clear BL_BUSY and we've dropped buf_mtx
2946 */
2947 need_bp_wakeup = 1;
2948 }
2949 /*
2950 * Unlock the buffer.
2951 */
2952 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
2953 buf_busycount--;
2954
2955 lck_mtx_unlock(&buf_mtx);
2956
2957 if (need_wakeup) {
2958 /*
2959 * Wake up any processes waiting for any buffer to become free.
2960 */
2961 wakeup(&needbuffer);
2962 }
2963 if (need_bp_wakeup) {
2964 /*
2965 * Wake up any proceeses waiting for _this_ buffer to become free.
2966 */
2967 wakeup(bp);
2968 }
2969 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_END,
2970 bp, bp->b_datap, bp->b_flags, 0, 0);
2971 }
2972
2973 /*
2974 * Determine if a block is in the cache.
2975 * Just look on what would be its hash chain. If it's there, return
2976 * a pointer to it, unless it's marked invalid. If it's marked invalid,
2977 * we normally don't return the buffer, unless the caller explicitly
2978 * wants us to.
2979 */
2980 static boolean_t
incore(vnode_t vp,daddr64_t blkno)2981 incore(vnode_t vp, daddr64_t blkno)
2982 {
2983 boolean_t retval;
2984 struct bufhashhdr *dp;
2985
2986 dp = BUFHASH(vp, blkno);
2987
2988 lck_mtx_lock_spin(&buf_mtx);
2989
2990 if (incore_locked(vp, blkno, dp)) {
2991 retval = TRUE;
2992 } else {
2993 retval = FALSE;
2994 }
2995 lck_mtx_unlock(&buf_mtx);
2996
2997 return retval;
2998 }
2999
3000
3001 static buf_t
incore_locked(vnode_t vp,daddr64_t blkno,struct bufhashhdr * dp)3002 incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp)
3003 {
3004 struct buf *bp;
3005
3006 /* Search hash chain */
3007 for (bp = dp->lh_first; bp != NULL; bp = bp->b_hash.le_next) {
3008 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
3009 !ISSET(bp->b_flags, B_INVAL)) {
3010 return bp;
3011 }
3012 }
3013 return NULL;
3014 }
3015
3016
3017 void
buf_wait_for_shadow_io(vnode_t vp,daddr64_t blkno)3018 buf_wait_for_shadow_io(vnode_t vp, daddr64_t blkno)
3019 {
3020 buf_t bp;
3021 struct bufhashhdr *dp;
3022
3023 dp = BUFHASH(vp, blkno);
3024
3025 lck_mtx_lock_spin(&buf_mtx);
3026
3027 for (;;) {
3028 if ((bp = incore_locked(vp, blkno, dp)) == NULL) {
3029 break;
3030 }
3031
3032 if (bp->b_shadow_ref == 0) {
3033 break;
3034 }
3035
3036 SET(bp->b_lflags, BL_WANTED_REF);
3037
3038 (void) msleep(bp, &buf_mtx, PSPIN | (PRIBIO + 1), "buf_wait_for_shadow", NULL);
3039 }
3040 lck_mtx_unlock(&buf_mtx);
3041 }
3042
3043 /* XXX FIXME -- Update the comment to reflect the UBC changes (please) -- */
3044 /*
3045 * Get a block of requested size that is associated with
3046 * a given vnode and block offset. If it is found in the
3047 * block cache, mark it as having been found, make it busy
3048 * and return it. Otherwise, return an empty block of the
3049 * correct size. It is up to the caller to insure that the
3050 * cached blocks be of the correct size.
3051 */
3052 buf_t
buf_getblk(vnode_t vp,daddr64_t blkno,int size,int slpflag,int slptimeo,int operation)3053 buf_getblk(vnode_t vp, daddr64_t blkno, int size, int slpflag, int slptimeo, int operation)
3054 {
3055 buf_t bp;
3056 int err;
3057 upl_t upl;
3058 upl_page_info_t *pl;
3059 kern_return_t kret;
3060 int ret_only_valid;
3061 struct timespec ts;
3062 int upl_flags;
3063 struct bufhashhdr *dp;
3064
3065 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_START,
3066 (uintptr_t)(blkno * PAGE_SIZE), size, operation, 0, 0);
3067
3068 ret_only_valid = operation & BLK_ONLYVALID;
3069 operation &= ~BLK_ONLYVALID;
3070 dp = BUFHASH(vp, blkno);
3071 start:
3072 lck_mtx_lock_spin(&buf_mtx);
3073
3074 if ((bp = incore_locked(vp, blkno, dp))) {
3075 /*
3076 * Found in the Buffer Cache
3077 */
3078 if (ISSET(bp->b_lflags, BL_BUSY)) {
3079 /*
3080 * but is busy
3081 */
3082 switch (operation) {
3083 case BLK_READ:
3084 case BLK_WRITE:
3085 case BLK_META:
3086 SET(bp->b_lflags, BL_WANTED);
3087 bufstats.bufs_busyincore++;
3088
3089 /*
3090 * don't retake the mutex after being awakened...
3091 * the time out is in msecs
3092 */
3093 ts.tv_sec = (slptimeo / 1000);
3094 ts.tv_nsec = (slptimeo % 1000) * 10 * NSEC_PER_USEC * 1000;
3095
3096 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 396)) | DBG_FUNC_NONE,
3097 (uintptr_t)blkno, size, operation, 0, 0);
3098
3099 err = msleep(bp, &buf_mtx, slpflag | PDROP | (PRIBIO + 1), "buf_getblk", &ts);
3100
3101 /*
3102 * Callers who call with PCATCH or timeout are
3103 * willing to deal with the NULL pointer
3104 */
3105 if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo))) {
3106 return NULL;
3107 }
3108 goto start;
3109 /*NOTREACHED*/
3110
3111 default:
3112 /*
3113 * unknown operation requested
3114 */
3115 panic("getblk: paging or unknown operation for incore busy buffer - %x", operation);
3116 /*NOTREACHED*/
3117 break;
3118 }
3119 } else {
3120 int clear_bdone;
3121
3122 /*
3123 * buffer in core and not busy
3124 */
3125 SET(bp->b_lflags, BL_BUSY);
3126 SET(bp->b_flags, B_CACHE);
3127 buf_busycount++;
3128
3129 bremfree_locked(bp);
3130 bufstats.bufs_incore++;
3131
3132 lck_mtx_unlock(&buf_mtx);
3133 #ifdef JOE_DEBUG
3134 bp->b_owner = current_thread();
3135 bp->b_tag = 1;
3136 #endif
3137 if ((bp->b_upl)) {
3138 panic("buffer has UPL, but not marked BUSY: %p", bp);
3139 }
3140
3141 clear_bdone = FALSE;
3142 if (!ret_only_valid) {
3143 /*
3144 * If the number bytes that are valid is going
3145 * to increase (even if we end up not doing a
3146 * reallocation through allocbuf) we have to read
3147 * the new size first.
3148 *
3149 * This is required in cases where we doing a read
3150 * modify write of a already valid data on disk but
3151 * in cases where the data on disk beyond (blkno + b_bcount)
3152 * is invalid, we may end up doing extra I/O.
3153 */
3154 if (operation == BLK_META && bp->b_bcount < (uint32_t)size) {
3155 /*
3156 * Since we are going to read in the whole size first
3157 * we first have to ensure that any pending delayed write
3158 * is flushed to disk first.
3159 */
3160 if (ISSET(bp->b_flags, B_DELWRI)) {
3161 CLR(bp->b_flags, B_CACHE);
3162 buf_bwrite(bp);
3163 goto start;
3164 }
3165 /*
3166 * clear B_DONE before returning from
3167 * this function so that the caller can
3168 * can issue a read for the new size.
3169 */
3170 clear_bdone = TRUE;
3171 }
3172
3173 if (bp->b_bufsize != (uint32_t)size) {
3174 allocbuf(bp, size);
3175 }
3176 }
3177
3178 upl_flags = 0;
3179 switch (operation) {
3180 case BLK_WRITE:
3181 /*
3182 * "write" operation: let the UPL subsystem
3183 * know that we intend to modify the buffer
3184 * cache pages we're gathering.
3185 */
3186 upl_flags |= UPL_WILL_MODIFY;
3187 OS_FALLTHROUGH;
3188 case BLK_READ:
3189 upl_flags |= UPL_PRECIOUS;
3190 if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
3191 kret = ubc_create_upl_kernel(vp,
3192 ubc_blktooff(vp, bp->b_lblkno),
3193 bp->b_bufsize,
3194 &upl,
3195 &pl,
3196 upl_flags,
3197 VM_KERN_MEMORY_FILE);
3198 if (kret != KERN_SUCCESS) {
3199 panic("Failed to create UPL");
3200 }
3201
3202 bp->b_upl = upl;
3203
3204 if (upl_valid_page(pl, 0)) {
3205 if (upl_dirty_page(pl, 0)) {
3206 SET(bp->b_flags, B_WASDIRTY);
3207 } else {
3208 CLR(bp->b_flags, B_WASDIRTY);
3209 }
3210 } else {
3211 CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI));
3212 }
3213
3214 kret = ubc_upl_map(upl, (vm_offset_t*)&(bp->b_datap));
3215
3216 if (kret != KERN_SUCCESS) {
3217 panic("getblk: ubc_upl_map() failed with (%d)", kret);
3218 }
3219 }
3220 break;
3221
3222 case BLK_META:
3223 /*
3224 * VM is not involved in IO for the meta data
3225 * buffer already has valid data
3226 */
3227 break;
3228
3229 default:
3230 panic("getblk: paging or unknown operation for incore buffer- %d", operation);
3231 /*NOTREACHED*/
3232 break;
3233 }
3234
3235 if (clear_bdone) {
3236 CLR(bp->b_flags, B_DONE);
3237 }
3238 }
3239 } else { /* not incore() */
3240 int queue = BQ_EMPTY; /* Start with no preference */
3241
3242 if (ret_only_valid) {
3243 lck_mtx_unlock(&buf_mtx);
3244 return NULL;
3245 }
3246 if ((vnode_isreg(vp) == 0) || (UBCINFOEXISTS(vp) == 0) /*|| (vnode_issystem(vp) == 1)*/) {
3247 operation = BLK_META;
3248 }
3249
3250 if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL) {
3251 goto start;
3252 }
3253
3254 /*
3255 * getnewbuf may block for a number of different reasons...
3256 * if it does, it's then possible for someone else to
3257 * create a buffer for the same block and insert it into
3258 * the hash... if we see it incore at this point we dump
3259 * the buffer we were working on and start over
3260 */
3261 if (incore_locked(vp, blkno, dp)) {
3262 SET(bp->b_flags, B_INVAL);
3263 binshash(bp, &invalhash);
3264
3265 lck_mtx_unlock(&buf_mtx);
3266
3267 buf_brelse(bp);
3268 goto start;
3269 }
3270 /*
3271 * NOTE: YOU CAN NOT BLOCK UNTIL binshash() HAS BEEN
3272 * CALLED! BE CAREFUL.
3273 */
3274
3275 /*
3276 * mark the buffer as B_META if indicated
3277 * so that when buffer is released it will goto META queue
3278 */
3279 if (operation == BLK_META) {
3280 SET(bp->b_flags, B_META);
3281 }
3282
3283 bp->b_blkno = bp->b_lblkno = blkno;
3284 bp->b_lblksize = 0; /* Should be set by caller */
3285 bp->b_vp = vp;
3286
3287 /*
3288 * Insert in the hash so that incore() can find it
3289 */
3290 binshash(bp, BUFHASH(vp, blkno));
3291
3292 bgetvp_locked(vp, bp);
3293
3294 lck_mtx_unlock(&buf_mtx);
3295
3296 allocbuf(bp, size);
3297
3298 upl_flags = 0;
3299 switch (operation) {
3300 case BLK_META:
3301 /*
3302 * buffer data is invalid...
3303 *
3304 * I don't want to have to retake buf_mtx,
3305 * so the miss and vmhits counters are done
3306 * with Atomic updates... all other counters
3307 * in bufstats are protected with either
3308 * buf_mtx or iobuffer_mtxp
3309 */
3310 OSAddAtomicLong(1, &bufstats.bufs_miss);
3311 break;
3312
3313 case BLK_WRITE:
3314 /*
3315 * "write" operation: let the UPL subsystem know
3316 * that we intend to modify the buffer cache pages
3317 * we're gathering.
3318 */
3319 upl_flags |= UPL_WILL_MODIFY;
3320 OS_FALLTHROUGH;
3321 case BLK_READ:
3322 { off_t f_offset;
3323 size_t contig_bytes;
3324 int bmap_flags;
3325
3326 #if DEVELOPMENT || DEBUG
3327 /*
3328 * Apple implemented file systems use UBC excludively; they should
3329 * not call in here."
3330 */
3331 const char* excldfs[] = {"hfs", "afpfs", "smbfs", "acfs",
3332 "exfat", "msdos", "webdav", NULL};
3333
3334 for (int i = 0; excldfs[i] != NULL; i++) {
3335 if (vp->v_mount &&
3336 !strcmp(vp->v_mount->mnt_vfsstat.f_fstypename,
3337 excldfs[i])) {
3338 panic("%s %s calls buf_getblk",
3339 excldfs[i],
3340 operation == BLK_READ ? "BLK_READ" : "BLK_WRITE");
3341 }
3342 }
3343 #endif
3344
3345 if ((bp->b_upl)) {
3346 panic("bp already has UPL: %p", bp);
3347 }
3348
3349 f_offset = ubc_blktooff(vp, blkno);
3350
3351 upl_flags |= UPL_PRECIOUS;
3352 kret = ubc_create_upl_kernel(vp,
3353 f_offset,
3354 bp->b_bufsize,
3355 &upl,
3356 &pl,
3357 upl_flags,
3358 VM_KERN_MEMORY_FILE);
3359
3360 if (kret != KERN_SUCCESS) {
3361 panic("Failed to create UPL");
3362 }
3363 #if UPL_DEBUG
3364 upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4);
3365 #endif /* UPL_DEBUG */
3366 bp->b_upl = upl;
3367
3368 if (upl_valid_page(pl, 0)) {
3369 if (operation == BLK_READ) {
3370 bmap_flags = VNODE_READ;
3371 } else {
3372 bmap_flags = VNODE_WRITE;
3373 }
3374
3375 SET(bp->b_flags, B_CACHE | B_DONE);
3376
3377 OSAddAtomicLong(1, &bufstats.bufs_vmhits);
3378
3379 bp->b_validoff = 0;
3380 bp->b_dirtyoff = 0;
3381
3382 if (upl_dirty_page(pl, 0)) {
3383 /* page is dirty */
3384 SET(bp->b_flags, B_WASDIRTY);
3385
3386 bp->b_validend = bp->b_bcount;
3387 bp->b_dirtyend = bp->b_bcount;
3388 } else {
3389 /* page is clean */
3390 bp->b_validend = bp->b_bcount;
3391 bp->b_dirtyend = 0;
3392 }
3393 /*
3394 * try to recreate the physical block number associated with
3395 * this buffer...
3396 */
3397 if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL)) {
3398 panic("getblk: VNOP_BLOCKMAP failed");
3399 }
3400 /*
3401 * if the extent represented by this buffer
3402 * is not completely physically contiguous on
3403 * disk, than we can't cache the physical mapping
3404 * in the buffer header
3405 */
3406 if ((uint32_t)contig_bytes < bp->b_bcount) {
3407 bp->b_blkno = bp->b_lblkno;
3408 }
3409 } else {
3410 OSAddAtomicLong(1, &bufstats.bufs_miss);
3411 }
3412 kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
3413
3414 if (kret != KERN_SUCCESS) {
3415 panic("getblk: ubc_upl_map() failed with (%d)", kret);
3416 }
3417 break;} // end BLK_READ
3418 default:
3419 panic("getblk: paging or unknown operation - %x", operation);
3420 /*NOTREACHED*/
3421 break;
3422 } // end switch
3423 } //end buf_t !incore
3424
3425 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_END,
3426 bp, bp->b_datap, bp->b_flags, 3, 0);
3427
3428 #ifdef JOE_DEBUG
3429 (void) OSBacktrace(&bp->b_stackgetblk[0], 6);
3430 #endif
3431 return bp;
3432 }
3433
3434 /*
3435 * Get an empty, disassociated buffer of given size.
3436 */
3437 buf_t
buf_geteblk(int size)3438 buf_geteblk(int size)
3439 {
3440 buf_t bp = NULL;
3441 int queue = BQ_EMPTY;
3442
3443 do {
3444 lck_mtx_lock_spin(&buf_mtx);
3445
3446 bp = getnewbuf(0, 0, &queue);
3447 } while (bp == NULL);
3448
3449 SET(bp->b_flags, (B_META | B_INVAL));
3450
3451 #if DIAGNOSTIC
3452 assert(queue == BQ_EMPTY);
3453 #endif /* DIAGNOSTIC */
3454 /* XXX need to implement logic to deal with other queues */
3455
3456 binshash(bp, &invalhash);
3457 bufstats.bufs_eblk++;
3458
3459 lck_mtx_unlock(&buf_mtx);
3460
3461 allocbuf(bp, size);
3462
3463 return bp;
3464 }
3465
3466 uint32_t
buf_redundancy_flags(buf_t bp)3467 buf_redundancy_flags(buf_t bp)
3468 {
3469 return bp->b_redundancy_flags;
3470 }
3471
3472 void
buf_set_redundancy_flags(buf_t bp,uint32_t flags)3473 buf_set_redundancy_flags(buf_t bp, uint32_t flags)
3474 {
3475 SET(bp->b_redundancy_flags, flags);
3476 }
3477
3478 void
buf_clear_redundancy_flags(buf_t bp,uint32_t flags)3479 buf_clear_redundancy_flags(buf_t bp, uint32_t flags)
3480 {
3481 CLR(bp->b_redundancy_flags, flags);
3482 }
3483
3484
3485
3486 static void *
recycle_buf_from_pool(int nsize)3487 recycle_buf_from_pool(int nsize)
3488 {
3489 buf_t bp;
3490 void *ptr = NULL;
3491
3492 lck_mtx_lock_spin(&buf_mtx);
3493
3494 TAILQ_FOREACH(bp, &bufqueues[BQ_META], b_freelist) {
3495 if (ISSET(bp->b_flags, B_DELWRI) || bp->b_bufsize != (uint32_t)nsize) {
3496 continue;
3497 }
3498 ptr = (void *)bp->b_datap;
3499 bp->b_bufsize = 0;
3500
3501 bcleanbuf(bp, TRUE);
3502 break;
3503 }
3504 lck_mtx_unlock(&buf_mtx);
3505
3506 return ptr;
3507 }
3508
3509
3510
3511 int zalloc_nopagewait_failed = 0;
3512 int recycle_buf_failed = 0;
3513
3514 static void *
grab_memory_for_meta_buf(int nsize)3515 grab_memory_for_meta_buf(int nsize)
3516 {
3517 void *ptr;
3518 boolean_t was_vmpriv;
3519
3520
3521 /*
3522 * make sure we're NOT priviliged so that
3523 * if a vm_page_grab is needed, it won't
3524 * block if we're out of free pages... if
3525 * it blocks, then we can't honor the
3526 * nopagewait request
3527 */
3528 was_vmpriv = set_vm_privilege(FALSE);
3529
3530 ptr = kheap_alloc(KHEAP_VFS_BIO, nsize, Z_NOPAGEWAIT);
3531
3532 if (was_vmpriv == TRUE) {
3533 set_vm_privilege(TRUE);
3534 }
3535
3536 if (ptr == NULL) {
3537 zalloc_nopagewait_failed++;
3538
3539 ptr = recycle_buf_from_pool(nsize);
3540
3541 if (ptr == NULL) {
3542 recycle_buf_failed++;
3543
3544 if (was_vmpriv == FALSE) {
3545 set_vm_privilege(TRUE);
3546 }
3547
3548 ptr = kheap_alloc(KHEAP_VFS_BIO, nsize, Z_WAITOK);
3549
3550 if (was_vmpriv == FALSE) {
3551 set_vm_privilege(FALSE);
3552 }
3553 }
3554 }
3555 return ptr;
3556 }
3557
3558 /*
3559 * With UBC, there is no need to expand / shrink the file data
3560 * buffer. The VM uses the same pages, hence no waste.
3561 * All the file data buffers can have one size.
3562 * In fact expand / shrink would be an expensive operation.
3563 *
3564 * Only exception to this is meta-data buffers. Most of the
3565 * meta data operations are smaller than PAGE_SIZE. Having the
3566 * meta-data buffers grow and shrink as needed, optimizes use
3567 * of the kernel wired memory.
3568 */
3569
3570 int
allocbuf(buf_t bp,int size)3571 allocbuf(buf_t bp, int size)
3572 {
3573 vm_size_t desired_size;
3574
3575 desired_size = roundup(size, CLBYTES);
3576
3577 if (desired_size < PAGE_SIZE) {
3578 desired_size = PAGE_SIZE;
3579 }
3580 if (desired_size > MAXBSIZE) {
3581 panic("allocbuf: buffer larger than MAXBSIZE requested");
3582 }
3583
3584 if (ISSET(bp->b_flags, B_META)) {
3585 int nsize = roundup(size, MINMETA);
3586
3587 if (bp->b_datap) {
3588 void *elem = (void *)bp->b_datap;
3589
3590 if (ISSET(bp->b_flags, B_ZALLOC)) {
3591 if (bp->b_bufsize < (uint32_t)nsize) {
3592 /* reallocate to a bigger size */
3593
3594 if (nsize <= MAXMETA) {
3595 desired_size = nsize;
3596
3597 /* b_datap not really a ptr */
3598 *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize);
3599 } else {
3600 bp->b_datap = (uintptr_t)NULL;
3601 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE);
3602 CLR(bp->b_flags, B_ZALLOC);
3603 }
3604 bcopy(elem, (caddr_t)bp->b_datap, bp->b_bufsize);
3605 kheap_free(KHEAP_VFS_BIO, elem, bp->b_bufsize);
3606 } else {
3607 desired_size = bp->b_bufsize;
3608 }
3609 } else {
3610 if ((vm_size_t)bp->b_bufsize < desired_size) {
3611 /* reallocate to a bigger size */
3612 bp->b_datap = (uintptr_t)NULL;
3613 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE);
3614 bcopy(elem, (caddr_t)bp->b_datap, bp->b_bufsize);
3615 kmem_free(kernel_map, (vm_offset_t)elem, bp->b_bufsize);
3616 } else {
3617 desired_size = bp->b_bufsize;
3618 }
3619 }
3620 } else {
3621 /* new allocation */
3622 if (nsize <= MAXMETA) {
3623 desired_size = nsize;
3624
3625 /* b_datap not really a ptr */
3626 *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize);
3627 SET(bp->b_flags, B_ZALLOC);
3628 } else {
3629 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE);
3630 }
3631 }
3632
3633 if (bp->b_datap == 0) {
3634 panic("allocbuf: NULL b_datap");
3635 }
3636 }
3637 bp->b_bufsize = (uint32_t)desired_size;
3638 bp->b_bcount = size;
3639
3640 return 0;
3641 }
3642
3643 /*
3644 * Get a new buffer from one of the free lists.
3645 *
3646 * Request for a queue is passes in. The queue from which the buffer was taken
3647 * from is returned. Out of range queue requests get BQ_EMPTY. Request for
3648 * BQUEUE means no preference. Use heuristics in that case.
3649 * Heuristics is as follows:
3650 * Try BQ_AGE, BQ_LRU, BQ_EMPTY, BQ_META in that order.
3651 * If none available block till one is made available.
3652 * If buffers available on both BQ_AGE and BQ_LRU, check the timestamps.
3653 * Pick the most stale buffer.
3654 * If found buffer was marked delayed write, start the async. write
3655 * and restart the search.
3656 * Initialize the fields and disassociate the buffer from the vnode.
3657 * Remove the buffer from the hash. Return the buffer and the queue
3658 * on which it was found.
3659 *
3660 * buf_mtx is held upon entry
3661 * returns with buf_mtx locked if new buf available
3662 * returns with buf_mtx UNlocked if new buf NOT available
3663 */
3664
3665 static buf_t
getnewbuf(int slpflag,int slptimeo,int * queue)3666 getnewbuf(int slpflag, int slptimeo, int * queue)
3667 {
3668 buf_t bp;
3669 buf_t lru_bp;
3670 buf_t age_bp;
3671 buf_t meta_bp;
3672 int age_time, lru_time, bp_time, meta_time;
3673 int req = *queue; /* save it for restarts */
3674 struct timespec ts;
3675
3676 start:
3677 /*
3678 * invalid request gets empty queue
3679 */
3680 if ((*queue >= BQUEUES) || (*queue < 0)
3681 || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED)) {
3682 *queue = BQ_EMPTY;
3683 }
3684
3685
3686 if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first)) {
3687 goto found;
3688 }
3689
3690 /*
3691 * need to grow number of bufs, add another one rather than recycling
3692 */
3693 if (nbuf_headers < max_nbuf_headers) {
3694 /*
3695 * Increment count now as lock
3696 * is dropped for allocation.
3697 * That avoids over commits
3698 */
3699 nbuf_headers++;
3700 goto add_newbufs;
3701 }
3702 /* Try for the requested queue first */
3703 bp = bufqueues[*queue].tqh_first;
3704 if (bp) {
3705 goto found;
3706 }
3707
3708 /* Unable to use requested queue */
3709 age_bp = bufqueues[BQ_AGE].tqh_first;
3710 lru_bp = bufqueues[BQ_LRU].tqh_first;
3711 meta_bp = bufqueues[BQ_META].tqh_first;
3712
3713 if (!age_bp && !lru_bp && !meta_bp) {
3714 /*
3715 * Unavailble on AGE or LRU or META queues
3716 * Try the empty list first
3717 */
3718 bp = bufqueues[BQ_EMPTY].tqh_first;
3719 if (bp) {
3720 *queue = BQ_EMPTY;
3721 goto found;
3722 }
3723 /*
3724 * We have seen is this is hard to trigger.
3725 * This is an overcommit of nbufs but needed
3726 * in some scenarios with diskiamges
3727 */
3728
3729 add_newbufs:
3730 lck_mtx_unlock(&buf_mtx);
3731
3732 /* Create a new temporary buffer header */
3733 bp = zalloc_flags(buf_hdr_zone, Z_WAITOK | Z_NOFAIL);
3734 bufhdrinit(bp);
3735 bp->b_whichq = BQ_EMPTY;
3736 bp->b_timestamp = buf_timestamp();
3737 BLISTNONE(bp);
3738 SET(bp->b_flags, B_HDRALLOC);
3739 *queue = BQ_EMPTY;
3740 lck_mtx_lock_spin(&buf_mtx);
3741
3742 if (bp) {
3743 binshash(bp, &invalhash);
3744 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3745 buf_hdr_count++;
3746 goto found;
3747 }
3748 /* subtract already accounted bufcount */
3749 nbuf_headers--;
3750
3751 bufstats.bufs_sleeps++;
3752
3753 /* wait for a free buffer of any kind */
3754 needbuffer = 1;
3755 /* hz value is 100 */
3756 ts.tv_sec = (slptimeo / 1000);
3757 /* the hz value is 100; which leads to 10ms */
3758 ts.tv_nsec = (slptimeo % 1000) * NSEC_PER_USEC * 1000 * 10;
3759
3760 msleep(&needbuffer, &buf_mtx, slpflag | PDROP | (PRIBIO + 1), "getnewbuf", &ts);
3761 return NULL;
3762 }
3763
3764 /* Buffer available either on AGE or LRU or META */
3765 bp = NULL;
3766 *queue = -1;
3767
3768 /* Buffer available either on AGE or LRU */
3769 if (!age_bp) {
3770 bp = lru_bp;
3771 *queue = BQ_LRU;
3772 } else if (!lru_bp) {
3773 bp = age_bp;
3774 *queue = BQ_AGE;
3775 } else { /* buffer available on both AGE and LRU */
3776 int t = buf_timestamp();
3777
3778 age_time = t - age_bp->b_timestamp;
3779 lru_time = t - lru_bp->b_timestamp;
3780 if ((age_time < 0) || (lru_time < 0)) { /* time set backwards */
3781 bp = age_bp;
3782 *queue = BQ_AGE;
3783 /*
3784 * we should probably re-timestamp eveything in the
3785 * queues at this point with the current time
3786 */
3787 } else {
3788 if ((lru_time >= lru_is_stale) && (age_time < age_is_stale)) {
3789 bp = lru_bp;
3790 *queue = BQ_LRU;
3791 } else {
3792 bp = age_bp;
3793 *queue = BQ_AGE;
3794 }
3795 }
3796 }
3797
3798 if (!bp) { /* Neither on AGE nor on LRU */
3799 bp = meta_bp;
3800 *queue = BQ_META;
3801 } else if (meta_bp) {
3802 int t = buf_timestamp();
3803
3804 bp_time = t - bp->b_timestamp;
3805 meta_time = t - meta_bp->b_timestamp;
3806
3807 if (!(bp_time < 0) && !(meta_time < 0)) {
3808 /* time not set backwards */
3809 int bp_is_stale;
3810 bp_is_stale = (*queue == BQ_LRU) ?
3811 lru_is_stale : age_is_stale;
3812
3813 if ((meta_time >= meta_is_stale) &&
3814 (bp_time < bp_is_stale)) {
3815 bp = meta_bp;
3816 *queue = BQ_META;
3817 }
3818 }
3819 }
3820 found:
3821 if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY)) {
3822 panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)", bp, bp->b_flags);
3823 }
3824
3825 /* Clean it */
3826 if (bcleanbuf(bp, FALSE)) {
3827 /*
3828 * moved to the laundry thread, buffer not ready
3829 */
3830 *queue = req;
3831 goto start;
3832 }
3833 return bp;
3834 }
3835
3836
3837 /*
3838 * Clean a buffer.
3839 * Returns 0 if buffer is ready to use,
3840 * Returns 1 if issued a buf_bawrite() to indicate
3841 * that the buffer is not ready.
3842 *
3843 * buf_mtx is held upon entry
3844 * returns with buf_mtx locked
3845 */
3846 int
bcleanbuf(buf_t bp,boolean_t discard)3847 bcleanbuf(buf_t bp, boolean_t discard)
3848 {
3849 /* Remove from the queue */
3850 bremfree_locked(bp);
3851
3852 #ifdef JOE_DEBUG
3853 bp->b_owner = current_thread();
3854 bp->b_tag = 2;
3855 #endif
3856 /*
3857 * If buffer was a delayed write, start the IO by queuing
3858 * it on the LAUNDRY queue, and return 1
3859 */
3860 if (ISSET(bp->b_flags, B_DELWRI)) {
3861 if (discard) {
3862 SET(bp->b_lflags, BL_WANTDEALLOC);
3863 }
3864
3865 bmovelaundry(bp);
3866
3867 lck_mtx_unlock(&buf_mtx);
3868
3869 wakeup(&bufqueues[BQ_LAUNDRY]);
3870 /*
3871 * and give it a chance to run
3872 */
3873 (void)thread_block(THREAD_CONTINUE_NULL);
3874
3875 lck_mtx_lock_spin(&buf_mtx);
3876
3877 return 1;
3878 }
3879 #ifdef JOE_DEBUG
3880 bp->b_owner = current_thread();
3881 bp->b_tag = 8;
3882 #endif
3883 /*
3884 * Buffer is no longer on any free list... we own it
3885 */
3886 SET(bp->b_lflags, BL_BUSY);
3887 buf_busycount++;
3888
3889 bremhash(bp);
3890
3891 /*
3892 * disassociate us from our vnode, if we had one...
3893 */
3894 if (bp->b_vp) {
3895 brelvp_locked(bp);
3896 }
3897
3898 lck_mtx_unlock(&buf_mtx);
3899
3900 BLISTNONE(bp);
3901
3902 if (ISSET(bp->b_flags, B_META)) {
3903 buf_free_meta_store(bp);
3904 }
3905
3906 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
3907
3908 buf_release_credentials(bp);
3909
3910 /* If discarding, just move to the empty queue */
3911 if (discard) {
3912 lck_mtx_lock_spin(&buf_mtx);
3913 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
3914 bp->b_whichq = BQ_EMPTY;
3915 binshash(bp, &invalhash);
3916 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3917 CLR(bp->b_lflags, BL_BUSY);
3918 buf_busycount--;
3919 } else {
3920 /* Not discarding: clean up and prepare for reuse */
3921 bp->b_bufsize = 0;
3922 bp->b_datap = (uintptr_t)NULL;
3923 bp->b_upl = (void *)NULL;
3924 bp->b_fsprivate = (void *)NULL;
3925 /*
3926 * preserve the state of whether this buffer
3927 * was allocated on the fly or not...
3928 * the only other flag that should be set at
3929 * this point is BL_BUSY...
3930 */
3931 #ifdef JOE_DEBUG
3932 bp->b_owner = current_thread();
3933 bp->b_tag = 3;
3934 #endif
3935 bp->b_lflags = BL_BUSY;
3936 bp->b_flags = (bp->b_flags & B_HDRALLOC);
3937 bp->b_redundancy_flags = 0;
3938 bp->b_dev = NODEV;
3939 bp->b_blkno = bp->b_lblkno = 0;
3940 bp->b_lblksize = 0;
3941 bp->b_iodone = NULL;
3942 bp->b_error = 0;
3943 bp->b_resid = 0;
3944 bp->b_bcount = 0;
3945 bp->b_dirtyoff = bp->b_dirtyend = 0;
3946 bp->b_validoff = bp->b_validend = 0;
3947 bzero(&bp->b_attr, sizeof(struct bufattr));
3948
3949 lck_mtx_lock_spin(&buf_mtx);
3950 }
3951 return 0;
3952 }
3953
3954
3955
3956 errno_t
buf_invalblkno(vnode_t vp,daddr64_t lblkno,int flags)3957 buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags)
3958 {
3959 buf_t bp;
3960 errno_t error;
3961 struct bufhashhdr *dp;
3962
3963 dp = BUFHASH(vp, lblkno);
3964
3965 relook:
3966 lck_mtx_lock_spin(&buf_mtx);
3967
3968 if ((bp = incore_locked(vp, lblkno, dp)) == (struct buf *)0) {
3969 lck_mtx_unlock(&buf_mtx);
3970 return 0;
3971 }
3972 if (ISSET(bp->b_lflags, BL_BUSY)) {
3973 if (!ISSET(flags, BUF_WAIT)) {
3974 lck_mtx_unlock(&buf_mtx);
3975 return EBUSY;
3976 }
3977 SET(bp->b_lflags, BL_WANTED);
3978
3979 error = msleep((caddr_t)bp, &buf_mtx, PDROP | (PRIBIO + 1), "buf_invalblkno", NULL);
3980
3981 if (error) {
3982 return error;
3983 }
3984 goto relook;
3985 }
3986 bremfree_locked(bp);
3987 SET(bp->b_lflags, BL_BUSY);
3988 SET(bp->b_flags, B_INVAL);
3989 buf_busycount++;
3990 #ifdef JOE_DEBUG
3991 bp->b_owner = current_thread();
3992 bp->b_tag = 4;
3993 #endif
3994 lck_mtx_unlock(&buf_mtx);
3995 buf_brelse(bp);
3996
3997 return 0;
3998 }
3999
4000
4001 void
buf_drop(buf_t bp)4002 buf_drop(buf_t bp)
4003 {
4004 int need_wakeup = 0;
4005
4006 lck_mtx_lock_spin(&buf_mtx);
4007
4008 if (ISSET(bp->b_lflags, BL_WANTED)) {
4009 /*
4010 * delay the actual wakeup until after we
4011 * clear BL_BUSY and we've dropped buf_mtx
4012 */
4013 need_wakeup = 1;
4014 }
4015 #ifdef JOE_DEBUG
4016 bp->b_owner = current_thread();
4017 bp->b_tag = 9;
4018 #endif
4019 /*
4020 * Unlock the buffer.
4021 */
4022 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
4023 buf_busycount--;
4024
4025 lck_mtx_unlock(&buf_mtx);
4026
4027 if (need_wakeup) {
4028 /*
4029 * Wake up any proceeses waiting for _this_ buffer to become free.
4030 */
4031 wakeup(bp);
4032 }
4033 }
4034
4035
4036 errno_t
buf_acquire(buf_t bp,int flags,int slpflag,int slptimeo)4037 buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo)
4038 {
4039 errno_t error;
4040
4041 lck_mtx_lock_spin(&buf_mtx);
4042
4043 error = buf_acquire_locked(bp, flags, slpflag, slptimeo);
4044
4045 lck_mtx_unlock(&buf_mtx);
4046
4047 return error;
4048 }
4049
4050
4051 static errno_t
buf_acquire_locked(buf_t bp,int flags,int slpflag,int slptimeo)4052 buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo)
4053 {
4054 errno_t error;
4055 struct timespec ts;
4056
4057 if (ISSET(bp->b_flags, B_LOCKED)) {
4058 if ((flags & BAC_SKIP_LOCKED)) {
4059 return EDEADLK;
4060 }
4061 } else {
4062 if ((flags & BAC_SKIP_NONLOCKED)) {
4063 return EDEADLK;
4064 }
4065 }
4066 if (ISSET(bp->b_lflags, BL_BUSY)) {
4067 /*
4068 * since the lck_mtx_lock may block, the buffer
4069 * may become BUSY, so we need to
4070 * recheck for a NOWAIT request
4071 */
4072 if (flags & BAC_NOWAIT) {
4073 return EBUSY;
4074 }
4075 SET(bp->b_lflags, BL_WANTED);
4076
4077 /* the hz value is 100; which leads to 10ms */
4078 ts.tv_sec = (slptimeo / 100);
4079 ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000;
4080 error = msleep((caddr_t)bp, &buf_mtx, slpflag | (PRIBIO + 1), "buf_acquire", &ts);
4081
4082 if (error) {
4083 return error;
4084 }
4085 return EAGAIN;
4086 }
4087 if (flags & BAC_REMOVE) {
4088 bremfree_locked(bp);
4089 }
4090 SET(bp->b_lflags, BL_BUSY);
4091 buf_busycount++;
4092
4093 #ifdef JOE_DEBUG
4094 bp->b_owner = current_thread();
4095 bp->b_tag = 5;
4096 #endif
4097 return 0;
4098 }
4099
4100
4101 /*
4102 * Wait for operations on the buffer to complete.
4103 * When they do, extract and return the I/O's error value.
4104 */
4105 errno_t
buf_biowait(buf_t bp)4106 buf_biowait(buf_t bp)
4107 {
4108 while (!ISSET(bp->b_flags, B_DONE)) {
4109 lck_mtx_lock_spin(&buf_mtx);
4110
4111 if (!ISSET(bp->b_flags, B_DONE)) {
4112 DTRACE_IO1(wait__start, buf_t, bp);
4113 (void) msleep(bp, &buf_mtx, PDROP | (PRIBIO + 1), "buf_biowait", NULL);
4114 DTRACE_IO1(wait__done, buf_t, bp);
4115 } else {
4116 lck_mtx_unlock(&buf_mtx);
4117 }
4118 }
4119 /* check for interruption of I/O (e.g. via NFS), then errors. */
4120 if (ISSET(bp->b_flags, B_EINTR)) {
4121 CLR(bp->b_flags, B_EINTR);
4122 return EINTR;
4123 } else if (ISSET(bp->b_flags, B_ERROR)) {
4124 return bp->b_error ? bp->b_error : EIO;
4125 } else {
4126 return 0;
4127 }
4128 }
4129
4130
4131 /*
4132 * Mark I/O complete on a buffer.
4133 *
4134 * If a callback has been requested, e.g. the pageout
4135 * daemon, do so. Otherwise, awaken waiting processes.
4136 *
4137 * [ Leffler, et al., says on p.247:
4138 * "This routine wakes up the blocked process, frees the buffer
4139 * for an asynchronous write, or, for a request by the pagedaemon
4140 * process, invokes a procedure specified in the buffer structure" ]
4141 *
4142 * In real life, the pagedaemon (or other system processes) wants
4143 * to do async stuff to, and doesn't want the buffer buf_brelse()'d.
4144 * (for swap pager, that puts swap buffers on the free lists (!!!),
4145 * for the vn device, that puts malloc'd buffers on the free lists!)
4146 */
4147
4148 void
buf_biodone(buf_t bp)4149 buf_biodone(buf_t bp)
4150 {
4151 mount_t mp;
4152 struct bufattr *bap;
4153 struct timeval real_elapsed;
4154 uint64_t real_elapsed_usec = 0;
4155
4156 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START,
4157 bp, bp->b_datap, bp->b_flags, 0, 0);
4158
4159 if (ISSET(bp->b_flags, B_DONE)) {
4160 panic("biodone already");
4161 }
4162
4163 bap = &bp->b_attr;
4164
4165 if (bp->b_vp && bp->b_vp->v_mount) {
4166 mp = bp->b_vp->v_mount;
4167 } else {
4168 mp = NULL;
4169 }
4170
4171 if (ISSET(bp->b_flags, B_ERROR)) {
4172 if (mp && (MNT_ROOTFS & mp->mnt_flag)) {
4173 dk_error_description_t desc;
4174 bzero(&desc, sizeof(desc));
4175 desc.description = panic_disk_error_description;
4176 desc.description_size = panic_disk_error_description_size;
4177 VNOP_IOCTL(mp->mnt_devvp, DKIOCGETERRORDESCRIPTION, (caddr_t)&desc, 0, vfs_context_kernel());
4178 }
4179 }
4180
4181 if (mp && (bp->b_flags & B_READ) == 0) {
4182 update_last_io_time(mp);
4183 INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_write_size);
4184 } else if (mp) {
4185 INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_read_size);
4186 }
4187
4188 throttle_info_end_io(bp);
4189
4190 if (kdebug_enable) {
4191 int code = DKIO_DONE;
4192 int io_tier = GET_BUFATTR_IO_TIER(bap);
4193
4194 if (bp->b_flags & B_READ) {
4195 code |= DKIO_READ;
4196 }
4197 if (bp->b_flags & B_ASYNC) {
4198 code |= DKIO_ASYNC;
4199 }
4200
4201 if (bp->b_flags & B_META) {
4202 code |= DKIO_META;
4203 } else if (bp->b_flags & B_PAGEIO) {
4204 code |= DKIO_PAGING;
4205 }
4206
4207 if (io_tier != 0) {
4208 code |= DKIO_THROTTLE;
4209 }
4210
4211 code |= ((io_tier << DKIO_TIER_SHIFT) & DKIO_TIER_MASK);
4212
4213 if (bp->b_flags & B_PASSIVE) {
4214 code |= DKIO_PASSIVE;
4215 }
4216
4217 if (bap->ba_flags & BA_NOCACHE) {
4218 code |= DKIO_NOCACHE;
4219 }
4220
4221 if (bap->ba_flags & BA_IO_TIER_UPGRADE) {
4222 code |= DKIO_TIER_UPGRADE;
4223 }
4224
4225 KDBG_RELEASE_NOPROCFILT(FSDBG_CODE(DBG_DKRW, code),
4226 buf_kernel_addrperm_addr(bp),
4227 (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid,
4228 bp->b_error);
4229 }
4230
4231 microuptime(&real_elapsed);
4232 timevalsub(&real_elapsed, &bp->b_timestamp_tv);
4233 real_elapsed_usec = real_elapsed.tv_sec * USEC_PER_SEC + real_elapsed.tv_usec;
4234 disk_conditioner_delay(bp, 1, bp->b_bcount, real_elapsed_usec);
4235
4236 /*
4237 * I/O was done, so don't believe
4238 * the DIRTY state from VM anymore...
4239 * and we need to reset the THROTTLED/PASSIVE
4240 * indicators
4241 */
4242 CLR(bp->b_flags, (B_WASDIRTY | B_PASSIVE));
4243 CLR(bap->ba_flags, (BA_META | BA_NOCACHE | BA_DELAYIDLESLEEP | BA_IO_TIER_UPGRADE));
4244
4245 SET_BUFATTR_IO_TIER(bap, 0);
4246
4247 DTRACE_IO1(done, buf_t, bp);
4248
4249 if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW)) {
4250 /*
4251 * wake up any writer's blocked
4252 * on throttle or waiting for I/O
4253 * to drain
4254 */
4255 vnode_writedone(bp->b_vp);
4256 }
4257
4258 if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */
4259 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
4260 void *arg = bp->b_transaction;
4261 int callout = ISSET(bp->b_flags, B_CALL);
4262
4263 if (iodone_func == NULL) {
4264 panic("biodone: bp @ %p has NULL b_iodone!", bp);
4265 }
4266
4267 CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */
4268 bp->b_iodone = NULL;
4269 bp->b_transaction = NULL;
4270
4271 if (callout) {
4272 SET(bp->b_flags, B_DONE); /* note that it's done */
4273 }
4274 (*iodone_func)(bp, arg);
4275
4276 if (callout) {
4277 /*
4278 * assumes that the callback function takes
4279 * ownership of the bp and deals with releasing it if necessary
4280 */
4281 goto biodone_done;
4282 }
4283 /*
4284 * in this case the call back function is acting
4285 * strictly as a filter... it does not take
4286 * ownership of the bp and is expecting us
4287 * to finish cleaning up... this is currently used
4288 * by the HFS journaling code
4289 */
4290 }
4291 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */
4292 SET(bp->b_flags, B_DONE); /* note that it's done */
4293
4294 buf_brelse(bp);
4295 } else { /* or just wakeup the buffer */
4296 /*
4297 * by taking the mutex, we serialize
4298 * the buf owner calling buf_biowait so that we'll
4299 * only see him in one of 2 states...
4300 * state 1: B_DONE wasn't set and he's
4301 * blocked in msleep
4302 * state 2: he's blocked trying to take the
4303 * mutex before looking at B_DONE
4304 * BL_WANTED is cleared in case anyone else
4305 * is blocked waiting for the buffer... note
4306 * that we haven't cleared B_BUSY yet, so if
4307 * they do get to run, their going to re-set
4308 * BL_WANTED and go back to sleep
4309 */
4310 lck_mtx_lock_spin(&buf_mtx);
4311
4312 CLR(bp->b_lflags, BL_WANTED);
4313 SET(bp->b_flags, B_DONE); /* note that it's done */
4314
4315 lck_mtx_unlock(&buf_mtx);
4316
4317 wakeup(bp);
4318 }
4319 biodone_done:
4320 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_END,
4321 (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0);
4322 }
4323
4324 /*
4325 * Obfuscate buf pointers.
4326 */
4327 vm_offset_t
buf_kernel_addrperm_addr(void * addr)4328 buf_kernel_addrperm_addr(void * addr)
4329 {
4330 if ((vm_offset_t)addr == 0) {
4331 return 0;
4332 } else {
4333 return (vm_offset_t)addr + buf_kernel_addrperm;
4334 }
4335 }
4336
4337 /*
4338 * Return a count of buffers on the "locked" queue.
4339 */
4340 int
count_lock_queue(void)4341 count_lock_queue(void)
4342 {
4343 buf_t bp;
4344 int n = 0;
4345
4346 lck_mtx_lock_spin(&buf_mtx);
4347
4348 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
4349 bp = bp->b_freelist.tqe_next) {
4350 n++;
4351 }
4352 lck_mtx_unlock(&buf_mtx);
4353
4354 return n;
4355 }
4356
4357 /*
4358 * Return a count of 'busy' buffers. Used at the time of shutdown.
4359 * note: This is also called from the mach side in debug context in kdp.c
4360 */
4361 uint32_t
count_busy_buffers(void)4362 count_busy_buffers(void)
4363 {
4364 return buf_busycount + bufstats.bufs_iobufinuse;
4365 }
4366
4367 #if DIAGNOSTIC
4368 /*
4369 * Print out statistics on the current allocation of the buffer pool.
4370 * Can be enabled to print out on every ``sync'' by setting "syncprt"
4371 * in vfs_syscalls.c using sysctl.
4372 */
4373 void
vfs_bufstats()4374 vfs_bufstats()
4375 {
4376 int i, j, count;
4377 struct buf *bp;
4378 struct bqueues *dp;
4379 int counts[MAXBSIZE / CLBYTES + 1];
4380 static char *bname[BQUEUES] =
4381 { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
4382
4383 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
4384 count = 0;
4385 for (j = 0; j <= MAXBSIZE / CLBYTES; j++) {
4386 counts[j] = 0;
4387 }
4388
4389 lck_mtx_lock(&buf_mtx);
4390
4391 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
4392 counts[bp->b_bufsize / CLBYTES]++;
4393 count++;
4394 }
4395 lck_mtx_unlock(&buf_mtx);
4396
4397 printf("%s: total-%d", bname[i], count);
4398 for (j = 0; j <= MAXBSIZE / CLBYTES; j++) {
4399 if (counts[j] != 0) {
4400 printf(", %d-%d", j * CLBYTES, counts[j]);
4401 }
4402 }
4403 printf("\n");
4404 }
4405 }
4406 #endif /* DIAGNOSTIC */
4407
4408 #define NRESERVEDIOBUFS 128
4409
4410 #define MNT_VIRTUALDEV_MAX_IOBUFS 128
4411 #define VIRTUALDEV_MAX_IOBUFS ((40*niobuf_headers)/100)
4412
4413 buf_t
alloc_io_buf(vnode_t vp,int priv)4414 alloc_io_buf(vnode_t vp, int priv)
4415 {
4416 buf_t bp;
4417 mount_t mp = NULL;
4418 int alloc_for_virtualdev = FALSE;
4419
4420 lck_mtx_lock_spin(&iobuffer_mtxp);
4421
4422 /*
4423 * We subject iobuf requests for diskimages to additional restrictions.
4424 *
4425 * a) A single diskimage mount cannot use up more than
4426 * MNT_VIRTUALDEV_MAX_IOBUFS. However,vm privileged (pageout) requests
4427 * are not subject to this restriction.
4428 * b) iobuf headers used by all diskimage headers by all mount
4429 * points cannot exceed VIRTUALDEV_MAX_IOBUFS.
4430 */
4431 if (vp && ((mp = vp->v_mount)) && mp != dead_mountp &&
4432 mp->mnt_kern_flag & MNTK_VIRTUALDEV) {
4433 alloc_for_virtualdev = TRUE;
4434 while ((!priv && mp->mnt_iobufinuse > MNT_VIRTUALDEV_MAX_IOBUFS) ||
4435 bufstats.bufs_iobufinuse_vdev > VIRTUALDEV_MAX_IOBUFS) {
4436 bufstats.bufs_iobufsleeps++;
4437
4438 need_iobuffer = 1;
4439 (void)msleep(&need_iobuffer, &iobuffer_mtxp,
4440 PSPIN | (PRIBIO + 1), (const char *)"alloc_io_buf (1)",
4441 NULL);
4442 }
4443 }
4444
4445 while ((((uint32_t)(niobuf_headers - NRESERVEDIOBUFS) < bufstats.bufs_iobufinuse) && !priv) ||
4446 (bp = iobufqueue.tqh_first) == NULL) {
4447 bufstats.bufs_iobufsleeps++;
4448
4449 need_iobuffer = 1;
4450 (void)msleep(&need_iobuffer, &iobuffer_mtxp, PSPIN | (PRIBIO + 1),
4451 (const char *)"alloc_io_buf (2)", NULL);
4452 }
4453 TAILQ_REMOVE(&iobufqueue, bp, b_freelist);
4454
4455 bufstats.bufs_iobufinuse++;
4456 if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax) {
4457 bufstats.bufs_iobufmax = bufstats.bufs_iobufinuse;
4458 }
4459
4460 if (alloc_for_virtualdev) {
4461 mp->mnt_iobufinuse++;
4462 bufstats.bufs_iobufinuse_vdev++;
4463 }
4464
4465 lck_mtx_unlock(&iobuffer_mtxp);
4466
4467 /*
4468 * initialize various fields
4469 * we don't need to hold the mutex since the buffer
4470 * is now private... the vp should have a reference
4471 * on it and is not protected by this mutex in any event
4472 */
4473 bp->b_timestamp = 0;
4474 bp->b_proc = NULL;
4475
4476 bp->b_datap = 0;
4477 bp->b_flags = 0;
4478 bp->b_lflags = BL_BUSY | BL_IOBUF;
4479 if (alloc_for_virtualdev) {
4480 bp->b_lflags |= BL_IOBUF_VDEV;
4481 }
4482 bp->b_redundancy_flags = 0;
4483 bp->b_blkno = bp->b_lblkno = 0;
4484 bp->b_lblksize = 0;
4485 #ifdef JOE_DEBUG
4486 bp->b_owner = current_thread();
4487 bp->b_tag = 6;
4488 #endif
4489 bp->b_iodone = NULL;
4490 bp->b_error = 0;
4491 bp->b_resid = 0;
4492 bp->b_bcount = 0;
4493 bp->b_bufsize = 0;
4494 bp->b_upl = NULL;
4495 bp->b_fsprivate = (void *)NULL;
4496 bp->b_vp = vp;
4497 bzero(&bp->b_attr, sizeof(struct bufattr));
4498
4499 if (vp && (vp->v_type == VBLK || vp->v_type == VCHR)) {
4500 bp->b_dev = vp->v_rdev;
4501 } else {
4502 bp->b_dev = NODEV;
4503 }
4504
4505 return bp;
4506 }
4507
4508
4509 void
free_io_buf(buf_t bp)4510 free_io_buf(buf_t bp)
4511 {
4512 int need_wakeup = 0;
4513 int free_for_virtualdev = FALSE;
4514 mount_t mp = NULL;
4515
4516 /* Was this iobuf for a diskimage ? */
4517 if (bp->b_lflags & BL_IOBUF_VDEV) {
4518 free_for_virtualdev = TRUE;
4519 if (bp->b_vp) {
4520 mp = bp->b_vp->v_mount;
4521 }
4522 }
4523
4524 /*
4525 * put buffer back on the head of the iobufqueue
4526 */
4527 bp->b_vp = NULL;
4528 bp->b_flags = B_INVAL;
4529
4530 /* Zero out the bufattr and its flags before relinquishing this iobuf */
4531 bzero(&bp->b_attr, sizeof(struct bufattr));
4532
4533 lck_mtx_lock_spin(&iobuffer_mtxp);
4534
4535 binsheadfree(bp, &iobufqueue, -1);
4536
4537 if (need_iobuffer) {
4538 /*
4539 * Wake up any processes waiting because they need an io buffer
4540 *
4541 * do the wakeup after we drop the mutex... it's possible that the
4542 * wakeup will be superfluous if need_iobuffer gets set again and
4543 * another thread runs this path, but it's highly unlikely, doesn't
4544 * hurt, and it means we don't hold up I/O progress if the wakeup blocks
4545 * trying to grab a task related lock...
4546 */
4547 need_iobuffer = 0;
4548 need_wakeup = 1;
4549 }
4550 if (bufstats.bufs_iobufinuse <= 0) {
4551 panic("free_io_buf: bp(%p) - bufstats.bufs_iobufinuse < 0", bp);
4552 }
4553
4554 bufstats.bufs_iobufinuse--;
4555
4556 if (free_for_virtualdev) {
4557 bufstats.bufs_iobufinuse_vdev--;
4558 if (mp && mp != dead_mountp) {
4559 mp->mnt_iobufinuse--;
4560 }
4561 }
4562
4563 lck_mtx_unlock(&iobuffer_mtxp);
4564
4565 if (need_wakeup) {
4566 wakeup(&need_iobuffer);
4567 }
4568 }
4569
4570
4571 void
buf_list_lock(void)4572 buf_list_lock(void)
4573 {
4574 lck_mtx_lock_spin(&buf_mtx);
4575 }
4576
4577 void
buf_list_unlock(void)4578 buf_list_unlock(void)
4579 {
4580 lck_mtx_unlock(&buf_mtx);
4581 }
4582
4583 /*
4584 * If getnewbuf() calls bcleanbuf() on the same thread
4585 * there is a potential for stack overrun and deadlocks.
4586 * So we always handoff the work to a worker thread for completion
4587 */
4588
4589
4590 static void
bcleanbuf_thread_init(void)4591 bcleanbuf_thread_init(void)
4592 {
4593 thread_t thread = THREAD_NULL;
4594
4595 /* create worker thread */
4596 kernel_thread_start((thread_continue_t)bcleanbuf_thread, NULL, &thread);
4597 thread_deallocate(thread);
4598 }
4599
4600 typedef int (*bcleanbufcontinuation)(int);
4601
4602 __attribute__((noreturn))
4603 static void
bcleanbuf_thread(void)4604 bcleanbuf_thread(void)
4605 {
4606 struct buf *bp;
4607 int error = 0;
4608 int loopcnt = 0;
4609
4610 for (;;) {
4611 lck_mtx_lock_spin(&buf_mtx);
4612
4613 while ((bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) {
4614 (void)msleep0(&bufqueues[BQ_LAUNDRY], &buf_mtx, PRIBIO | PDROP, "blaundry", 0, (bcleanbufcontinuation)bcleanbuf_thread);
4615 }
4616
4617 /*
4618 * Remove from the queue
4619 */
4620 bremfree_locked(bp);
4621
4622 /*
4623 * Buffer is no longer on any free list
4624 */
4625 SET(bp->b_lflags, BL_BUSY);
4626 buf_busycount++;
4627
4628 #ifdef JOE_DEBUG
4629 bp->b_owner = current_thread();
4630 bp->b_tag = 10;
4631 #endif
4632
4633 lck_mtx_unlock(&buf_mtx);
4634 /*
4635 * do the IO
4636 */
4637 error = bawrite_internal(bp, 0);
4638
4639 if (error) {
4640 bp->b_whichq = BQ_LAUNDRY;
4641 bp->b_timestamp = buf_timestamp();
4642
4643 lck_mtx_lock_spin(&buf_mtx);
4644
4645 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
4646 blaundrycnt++;
4647
4648 /* we never leave a busy page on the laundry queue */
4649 CLR(bp->b_lflags, BL_BUSY);
4650 buf_busycount--;
4651 #ifdef JOE_DEBUG
4652 bp->b_owner = current_thread();
4653 bp->b_tag = 11;
4654 #endif
4655
4656 lck_mtx_unlock(&buf_mtx);
4657
4658 if (loopcnt > MAXLAUNDRY) {
4659 /*
4660 * bawrite_internal() can return errors if we're throttled. If we've
4661 * done several I/Os and failed, give the system some time to unthrottle
4662 * the vnode
4663 */
4664 (void)tsleep((void *)&bufqueues[BQ_LAUNDRY], PRIBIO, "blaundry", 1);
4665 loopcnt = 0;
4666 } else {
4667 /* give other threads a chance to run */
4668 (void)thread_block(THREAD_CONTINUE_NULL);
4669 loopcnt++;
4670 }
4671 }
4672 }
4673 }
4674
4675
4676 static int
brecover_data(buf_t bp)4677 brecover_data(buf_t bp)
4678 {
4679 int upl_offset;
4680 upl_t upl;
4681 upl_page_info_t *pl;
4682 kern_return_t kret;
4683 vnode_t vp = bp->b_vp;
4684 int upl_flags;
4685
4686
4687 if (!UBCINFOEXISTS(vp) || bp->b_bufsize == 0) {
4688 goto dump_buffer;
4689 }
4690
4691 upl_flags = UPL_PRECIOUS;
4692 if (!(buf_flags(bp) & B_READ)) {
4693 /*
4694 * "write" operation: let the UPL subsystem know
4695 * that we intend to modify the buffer cache pages we're
4696 * gathering.
4697 */
4698 upl_flags |= UPL_WILL_MODIFY;
4699 }
4700
4701 kret = ubc_create_upl_kernel(vp,
4702 ubc_blktooff(vp, bp->b_lblkno),
4703 bp->b_bufsize,
4704 &upl,
4705 &pl,
4706 upl_flags,
4707 VM_KERN_MEMORY_FILE);
4708 if (kret != KERN_SUCCESS) {
4709 panic("Failed to create UPL");
4710 }
4711
4712 for (upl_offset = 0; (uint32_t)upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) {
4713 if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) {
4714 ubc_upl_abort(upl, 0);
4715 goto dump_buffer;
4716 }
4717 }
4718 bp->b_upl = upl;
4719
4720 kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
4721
4722 if (kret != KERN_SUCCESS) {
4723 panic("getblk: ubc_upl_map() failed with (%d)", kret);
4724 }
4725 return 1;
4726
4727 dump_buffer:
4728 bp->b_bufsize = 0;
4729 SET(bp->b_flags, B_INVAL);
4730 buf_brelse(bp);
4731
4732 return 0;
4733 }
4734
4735 int
fs_buffer_cache_gc_register(void (* callout)(int,void *),void * context)4736 fs_buffer_cache_gc_register(void (* callout)(int, void *), void *context)
4737 {
4738 lck_mtx_lock(&buf_gc_callout);
4739 for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) {
4740 if (fs_callouts[i].callout == NULL) {
4741 fs_callouts[i].callout = callout;
4742 fs_callouts[i].context = context;
4743 lck_mtx_unlock(&buf_gc_callout);
4744 return 0;
4745 }
4746 }
4747
4748 lck_mtx_unlock(&buf_gc_callout);
4749 return ENOMEM;
4750 }
4751
4752 int
fs_buffer_cache_gc_unregister(void (* callout)(int,void *),void * context)4753 fs_buffer_cache_gc_unregister(void (* callout)(int, void *), void *context)
4754 {
4755 lck_mtx_lock(&buf_gc_callout);
4756 for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) {
4757 if (fs_callouts[i].callout == callout &&
4758 fs_callouts[i].context == context) {
4759 fs_callouts[i].callout = NULL;
4760 fs_callouts[i].context = NULL;
4761 }
4762 }
4763 lck_mtx_unlock(&buf_gc_callout);
4764 return 0;
4765 }
4766
4767 static void
fs_buffer_cache_gc_dispatch_callouts(int all)4768 fs_buffer_cache_gc_dispatch_callouts(int all)
4769 {
4770 lck_mtx_lock(&buf_gc_callout);
4771 for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) {
4772 if (fs_callouts[i].callout != NULL) {
4773 fs_callouts[i].callout(all, fs_callouts[i].context);
4774 }
4775 }
4776 lck_mtx_unlock(&buf_gc_callout);
4777 }
4778
4779 static boolean_t
buffer_cache_gc(int all)4780 buffer_cache_gc(int all)
4781 {
4782 buf_t bp;
4783 boolean_t did_large_zfree = FALSE;
4784 boolean_t need_wakeup = FALSE;
4785 int now = buf_timestamp();
4786 uint32_t found = 0;
4787 struct bqueues privq;
4788 int thresh_hold = BUF_STALE_THRESHHOLD;
4789
4790 if (all) {
4791 thresh_hold = 0;
4792 }
4793 /*
4794 * We only care about metadata (incore storage comes from zalloc()).
4795 * Unless "all" is set (used to evict meta data buffers in preparation
4796 * for deep sleep), we only evict up to BUF_MAX_GC_BATCH_SIZE buffers
4797 * that have not been accessed in the last BUF_STALE_THRESHOLD seconds.
4798 * BUF_MAX_GC_BATCH_SIZE controls both the hold time of the global lock
4799 * "buf_mtx" and the length of time we spend compute bound in the GC
4800 * thread which calls this function
4801 */
4802 lck_mtx_lock(&buf_mtx);
4803
4804 do {
4805 found = 0;
4806 TAILQ_INIT(&privq);
4807 need_wakeup = FALSE;
4808
4809 while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) &&
4810 (now > bp->b_timestamp) &&
4811 (now - bp->b_timestamp > thresh_hold) &&
4812 (found < BUF_MAX_GC_BATCH_SIZE)) {
4813 /* Remove from free list */
4814 bremfree_locked(bp);
4815 found++;
4816
4817 #ifdef JOE_DEBUG
4818 bp->b_owner = current_thread();
4819 bp->b_tag = 12;
4820 #endif
4821
4822 /* If dirty, move to laundry queue and remember to do wakeup */
4823 if (ISSET(bp->b_flags, B_DELWRI)) {
4824 SET(bp->b_lflags, BL_WANTDEALLOC);
4825
4826 bmovelaundry(bp);
4827 need_wakeup = TRUE;
4828
4829 continue;
4830 }
4831
4832 /*
4833 * Mark busy and put on private list. We could technically get
4834 * away without setting BL_BUSY here.
4835 */
4836 SET(bp->b_lflags, BL_BUSY);
4837 buf_busycount++;
4838
4839 /*
4840 * Remove from hash and dissociate from vp.
4841 */
4842 bremhash(bp);
4843 if (bp->b_vp) {
4844 brelvp_locked(bp);
4845 }
4846
4847 TAILQ_INSERT_TAIL(&privq, bp, b_freelist);
4848 }
4849
4850 if (found == 0) {
4851 break;
4852 }
4853
4854 /* Drop lock for batch processing */
4855 lck_mtx_unlock(&buf_mtx);
4856
4857 /* Wakeup and yield for laundry if need be */
4858 if (need_wakeup) {
4859 wakeup(&bufqueues[BQ_LAUNDRY]);
4860 (void)thread_block(THREAD_CONTINUE_NULL);
4861 }
4862
4863 /* Clean up every buffer on private list */
4864 TAILQ_FOREACH(bp, &privq, b_freelist) {
4865 /* Take note if we've definitely freed at least a page to a zone */
4866 if ((ISSET(bp->b_flags, B_ZALLOC)) && (buf_size(bp) >= PAGE_SIZE)) {
4867 did_large_zfree = TRUE;
4868 }
4869
4870 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
4871
4872 /* Free Storage */
4873 buf_free_meta_store(bp);
4874
4875 /* Release credentials */
4876 buf_release_credentials(bp);
4877
4878 /* Prepare for moving to empty queue */
4879 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED
4880 | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
4881 bp->b_whichq = BQ_EMPTY;
4882 BLISTNONE(bp);
4883 }
4884 lck_mtx_lock(&buf_mtx);
4885
4886 /* Back under lock, move them all to invalid hash and clear busy */
4887 TAILQ_FOREACH(bp, &privq, b_freelist) {
4888 binshash(bp, &invalhash);
4889 CLR(bp->b_lflags, BL_BUSY);
4890 buf_busycount--;
4891
4892 #ifdef JOE_DEBUG
4893 if (bp->b_owner != current_thread()) {
4894 panic("Buffer stolen from buffer_cache_gc()");
4895 }
4896 bp->b_owner = current_thread();
4897 bp->b_tag = 13;
4898 #endif
4899 }
4900
4901 /* And do a big bulk move to the empty queue */
4902 TAILQ_CONCAT(&bufqueues[BQ_EMPTY], &privq, b_freelist);
4903 } while (all && (found == BUF_MAX_GC_BATCH_SIZE));
4904
4905 lck_mtx_unlock(&buf_mtx);
4906
4907 fs_buffer_cache_gc_dispatch_callouts(all);
4908
4909 return did_large_zfree;
4910 }
4911
4912
4913 /*
4914 * disabled for now
4915 */
4916
4917 #if FLUSH_QUEUES
4918
4919 #define NFLUSH 32
4920
4921 static int
bp_cmp(void * a,void * b)4922 bp_cmp(void *a, void *b)
4923 {
4924 buf_t *bp_a = *(buf_t **)a,
4925 *bp_b = *(buf_t **)b;
4926 daddr64_t res;
4927
4928 // don't have to worry about negative block
4929 // numbers so this is ok to do.
4930 //
4931 res = (bp_a->b_blkno - bp_b->b_blkno);
4932
4933 return (int)res;
4934 }
4935
4936
4937 int
bflushq(int whichq,mount_t mp)4938 bflushq(int whichq, mount_t mp)
4939 {
4940 buf_t bp, next;
4941 int i, buf_count;
4942 int total_writes = 0;
4943 static buf_t flush_table[NFLUSH];
4944
4945 if (whichq < 0 || whichq >= BQUEUES) {
4946 return 0;
4947 }
4948
4949 restart:
4950 lck_mtx_lock(&buf_mtx);
4951
4952 bp = TAILQ_FIRST(&bufqueues[whichq]);
4953
4954 for (buf_count = 0; bp; bp = next) {
4955 next = bp->b_freelist.tqe_next;
4956
4957 if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) {
4958 continue;
4959 }
4960
4961 if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) {
4962 bremfree_locked(bp);
4963 #ifdef JOE_DEBUG
4964 bp->b_owner = current_thread();
4965 bp->b_tag = 7;
4966 #endif
4967 SET(bp->b_lflags, BL_BUSY);
4968 buf_busycount++;
4969
4970 flush_table[buf_count] = bp;
4971 buf_count++;
4972 total_writes++;
4973
4974 if (buf_count >= NFLUSH) {
4975 lck_mtx_unlock(&buf_mtx);
4976
4977 qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
4978
4979 for (i = 0; i < buf_count; i++) {
4980 buf_bawrite(flush_table[i]);
4981 }
4982 goto restart;
4983 }
4984 }
4985 }
4986 lck_mtx_unlock(&buf_mtx);
4987
4988 if (buf_count > 0) {
4989 qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
4990
4991 for (i = 0; i < buf_count; i++) {
4992 buf_bawrite(flush_table[i]);
4993 }
4994 }
4995
4996 return total_writes;
4997 }
4998 #endif
4999